diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 09:22:09 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 09:22:09 +0000 |
commit | 43a97878ce14b72f0981164f87f2e35e14151312 (patch) | |
tree | 620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/rust/futures-0.1.31 | |
parent | Initial commit. (diff) | |
download | firefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip |
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
144 files changed, 21749 insertions, 0 deletions
diff --git a/third_party/rust/futures-0.1.31/.cargo-checksum.json b/third_party/rust/futures-0.1.31/.cargo-checksum.json new file mode 100644 index 0000000000..4a15273133 --- /dev/null +++ b/third_party/rust/futures-0.1.31/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"081044d6883e82c3c5a288e0cf0e839acfffbc329c6170cecbf436d163b3390c","Cargo.toml":"b4b975265a565070b59e3a4224efdd58ec458d585b4a50fbbe917fc94283b852","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"453b187f7ca8e04f4a11e0a1a57ac13019823161a4ba72835ed16d1539bc2bbd","benches/bilock.rs":"60b9e0814b8396e0320d299273c6f91c2ccc09a2bb59eec92df74a1f0919e54f","benches/futures_unordered.rs":"fa2d3b5e6cdfe1e941d78c119a696fb583341fa0a0895ec2692e6d374ceb9a0e","benches/poll.rs":"ca369079c4db366a180be22f406eaf8e94e2e771c02568eb35d89e63093006cf","benches/sync_mpsc.rs":"8d4dbf78afcdf61fc72da326c4810bc797462771707d079f95a7f75aa2ec0ec0","benches/thread_notify.rs":"1992b1e2b352fbc15a611d1318ac1bf6f19318d769086d55c80e6863f1b0e106","src/executor.rs":"80466c075daf030e07cc0d053618837cb73c07f5399b3d65016925f4488adb73","src/future/and_then.rs":"15653d392d331a1fc4619129f737acc28525c88d1675b7fcea6ed27c5b1bf302","src/future/catch_unwind.rs":"dfef6b6a66c09574338046cf23b0c6aacd8200872d512b831d6dc12038f05298","src/future/chain.rs":"4d712e989e079f4164d5d9fe3bb522d521094b0d8083ee639350570444e5bb93","src/future/either.rs":"898813aa84c19946203dd076050035d899014a6b0749ba50f41ae148580f7169","src/future/empty.rs":"b549a1ca0f21bc6d1a26d9063a9a60deb9235ff7eff5db915050115fed91a9c7","src/future/flatten.rs":"7eb15429fcc749326371fe571e1f7d294d7b83f7557e6e1971e2206180253d65","src/future/flatten_stream.rs":"cf914425c3606b61c046df5c43d64266d6f2328693e4122441f9bbcf7cb0a4e1","src/future/from_err.rs":"a1f42d95f7b52e80c2e5a03b44cbce0efbe5fc486dfe33d799b74ab9ba9057ab","src/future/fuse.rs":"3920c819b850c8f04b3868eae70dc0d3e6802ff0b517501f3aa5057a3b632102","src/future/inspect.rs":"89c362d8402dddd784bcc54e62ca27657ca8108e1ae8de5a7237e08650e10636","src/future/into_stream.rs":"0fa6bc4d70e8b4d75cf45fba53b39f033b87574103fffea4090b78f049bf43d0","src/future/join.rs":"b1dcefb03b1cb4e609ad2e79ba9a6cfab24235d7a4fff7fb9daf2c8fbf0f3d70","src/future/join_all.rs":"30fc27cbc1248046937b441a165a911e9ed1cd887ad6f3aeeb573b59c43e9cbf","src/future/lazy.rs":"1a2025bae3675fb682cefbf8a88bbb7a7519cfdee42dd6b3049a4d2b7ab8b5b1","src/future/loop_fn.rs":"5bd952247ae4e9d31dff77386bbd3700f596da136ea53e9e9944266af3f08688","src/future/map.rs":"91e148d9adaea929b85ede63c71fb07ef9b5611db906a13eedad2cf551745b47","src/future/map_err.rs":"2c8e87fa8ff56061722db6c69aaba588e6df6835a4e2fe84826f0bd4fed2e007","src/future/mod.rs":"a0a2f09ffdcbe20e0b210be3fe3d0d6863b8472ea84cd677af57074a9a618166","src/future/option.rs":"93270226cadcfa349250023e2070e687cf595831f427904ca744f7bc50342ded","src/future/or_else.rs":"444567101c4c437b184aa2e2eec0cf4363af442c0afc58d6508d3d2ac86489a9","src/future/poll_fn.rs":"817bfb75e7c43ca96a53e8cc9f48606c92c3c6742b07a732ce79a8f9b7bf8808","src/future/result.rs":"cc62c2377defb7b53aa859bf05c41c52a9cf8583378b7072bb2b45232d5fc9c5","src/future/select.rs":"73efd98004d5d8c46607bf770ff07a810bcdbe05cce0e8e4f41f5e659fd44203","src/future/select2.rs":"cfbbf3a9794109c56a3703456fae6111826bc25f98f2f36b234d483eeeeab482","src/future/select_all.rs":"b009e57ac241a3aba78db0bb751432cb99c1e91b8bae1b3baf225921f0daa441","src/future/select_ok.rs":"4884896914d8903edbfa12b5e255d35d5b2c91a9182ce6f774978db636617905","src/future/shared.rs":"4d53dc900b6e68a9854da2f1d391c8e724790d06d67ba1b3fb65b0ad73347f14","src/future/then.rs":"c49b388ab3c78979ad9ae40f6e859ee98e9351bdb11e3c3f1ad4ceca77651a56","src/lib.rs":"34cd577dc7d92d1922c679903c092d3d818a824148076ff0eec3ff5f296b099c","src/lock.rs":"fe4c8185f9774a134d4ce27af4a9c8b25f30f7dcc6990473210d66b6b8936ce4","src/poll.rs":"df74c3a8169d7895f3c46dd6de99edd77bd024b85e26b1d0644d2b8e5ef515b9","src/resultstream.rs":"365bc127c0410badb58ea2beb2abae546968ba3ac91abe2140e93e0c3620228f","src/sink/buffer.rs":"17e6bad2434f31630494a9a98e40a287da8a603515885ab8a17199ab0e5f8e46","src/sink/fanout.rs":"1fbcabdb1d22a43919417790082dc27ac65e2a100263504b6664a0b5e0657ae1","src/sink/flush.rs":"6c9a3bb9705c740e601ca6101cf6e6a87f2568661cff39a3576ef55986e3cb60","src/sink/from_err.rs":"b6d6e43c1f90c70bc1576ac2c9f1a7777fc07eef419721850962d896ac6cc3de","src/sink/map_err.rs":"b34a60880336b536666c1047f1919dd90eeed10b869e9c679fa928a3d5321112","src/sink/mod.rs":"4b4d80d008bfa8d0abc83cd640dc9c107423c7920795678c079c544c037ab632","src/sink/send.rs":"019f3f8ab450edc0adb864e4b819f5b0d4cfe9dc33a53093c2aa18e1eb6270dc","src/sink/send_all.rs":"b05047459faceecf0dfd5e6280014c31f5a2a1058974785db8ede497c10a1e79","src/sink/wait.rs":"9c70fdd54c642e4ecf7d9b0ff1fbb2df9c89349dfd60b5482748cd93c6dc301e","src/sink/with.rs":"a122cc26108cb3396db12cb2107c576d366c61191f656acedd5ff6c65165fcfc","src/sink/with_flat_map.rs":"7b0f367d98a99d297c3ce097e9858ad7b0dfdafbb66516cba0767b62beb01af3","src/stream/and_then.rs":"9f0f6ee06343ab03eebcb71257963e76d8e7208e4015b402cc8a58f793e37d79","src/stream/buffer_unordered.rs":"057c3dec32baf451ef02f44ef849086637e4d2cbb2d65907cc15ed9398fe131b","src/stream/buffered.rs":"4ced19e37e47182d5f9c7f852a7906c35b71ac4a5b2774a9101859defbecb190","src/stream/catch_unwind.rs":"957b935645f1744a4741962772c15e94370153f33e0db356309bf98ebb599c37","src/stream/chain.rs":"0b6b06cf5aaf0c2f665c61c65766d6113e24f690ebd9ad3a89abfa521e2ce9b2","src/stream/channel.rs":"f728402228fea0be01ec5cf1d02e49e52666c0c9ea986708d18e24f30376f6de","src/stream/chunks.rs":"455481ae5bc83cde009dc73a5ba9d1fc6281dd5750591e7d08eb25f149cd822b","src/stream/collect.rs":"e770850c7ed2d458b521c12af4ee76adf2303919849d2f95fa93fdf574c86d37","src/stream/concat.rs":"747723d73dc8edfe807834835362abc305704fc9cd9c1faf7b387b3d02cd397e","src/stream/empty.rs":"e8e2820fd3b2329a6987a11c3b3f28849f49427d1a745f2bdc7a4982476514e7","src/stream/filter.rs":"4abaf6c7bd3ecbccf7deac7920cc6bdc1b17875bedf7c6acd7e702254b3b83ba","src/stream/filter_map.rs":"573079f98efc38bbc68746084702b952ccb035bd8238c3c30fa103979865ed0e","src/stream/flatten.rs":"f2edce326745373c9c524bb574ce18584be95c7fd1a0ef875256b39891219b18","src/stream/fold.rs":"7f397373ed66560ff1eb0cffc5dafaf1569d3c8155fe418cc2bf6fc33faec230","src/stream/for_each.rs":"bd7f96bf551a829e37a54fd529e0b68a8868480797df039c75e1f226639cf096","src/stream/forward.rs":"ad33478f18e830ce8d85c3d5555d030a3f599c2546ad5554710c5ed844607a93","src/stream/from_err.rs":"bde1791790030c480aa88c6f7b235703d5b400249c841c8b045ea2203728b96c","src/stream/fuse.rs":"5d544151de7e5a3ce8a47bdeabe5cc9beaf0937b1eeed67e8d76842f54dea65d","src/stream/future.rs":"8f72146483c0423cbc11d45c76ee219ed12d940164c83199bb85cd6d5d64c22d","src/stream/futures_ordered.rs":"82e46576eb5201bd00470831353ea4dd9fcd6bc2d194452bda385806f7a25586","src/stream/futures_unordered.rs":"e7287b90083f4122522b721269b655ae37ebfcd16256d6bbbad349465eed6812","src/stream/inspect.rs":"4a1e7d7bbb0842a7021c5145bb1b64dbc213cfdccff51fe8399e3120c123eab5","src/stream/inspect_err.rs":"b4f2bc6a139df8f8eb403aafbca91c05b3093d3a6e13cef034a639fbe3ebe01e","src/stream/iter.rs":"cfff6b28759ccf390e8367f9f63209133c16e7fa53c7ae71167f318ba3ec624b","src/stream/iter_ok.rs":"5165cb02972776515734e0f343e626fbb448b65b38cdeacffbd86116f3c3cd37","src/stream/iter_result.rs":"9db38b1066d9adc1ece496432127049d36fb4b9895660c2af2b7ac28510c9084","src/stream/map.rs":"ba16b1469e519377939cf3bd073b258ac41e6349aab1c59393e3b30178a56496","src/stream/map_err.rs":"5ce9a279fde1f4f0887435856e1efa4fdeda749d43f4bab658b0abd216bc0a6f","src/stream/merge.rs":"63bb60ca386e280985cee8e16ae8b07f02d57aa8a0fa877ae01fb8b4678366d0","src/stream/mod.rs":"5fe336a9e59e6e92f2fef56b95757f0fdd9a350de89b38e09e6e4a31a8ada299","src/stream/once.rs":"277c960dc4bfa09fcc6112efa4e38a9fe937dc31fff440405e60bfd843f3c1ab","src/stream/or_else.rs":"c11ea499d85d6204ad083058eeca9dbf29873c49ee21bf01f9fe53e9ec3bba52","src/stream/peek.rs":"25d78baa0b3e30d2d1c72d1f3b1aa2a28811522d345dceefec587beb18b70fe2","src/stream/poll_fn.rs":"1dffbe60bd50c19efb71de2f768eecf70fa280b0d9c9cb889d16bb43b1619c8b","src/stream/repeat.rs":"807f2be5c9c1e7d54954f73ee38a373e71177aca43be8866712798f29ab541c2","src/stream/select.rs":"027873d9142e896272f7471cccaaccb133bf9f696a3f7510f3fb1aa4253a7c09","src/stream/skip.rs":"d7c839ca15f830709ebedd9526bb9ebd64ee22cb944e44213ce850a1383b71fa","src/stream/skip_while.rs":"aeb9bd64530bfaa631f4ca9500861c62fbf32849b09383eb26904bedd8b8b269","src/stream/split.rs":"c9b391fcbf3d1762bde442fd3549bd4739d2f9f486e88063650d42fea33c6af3","src/stream/take.rs":"9872429dd89cb34755b514abde9b6a876da076aea0449fcadfcc48e982507f21","src/stream/take_while.rs":"36bc2a33850ba2b58fb0da3866c96c8f4dfbd81133e615fda031518e71d425b5","src/stream/then.rs":"c7c66e27180cf2d98694de27504283a32444a0d0d6919ab25b3621fa6169408d","src/stream/unfold.rs":"5e69718714cc38c5ca6d0a6f5243ab28e392bdc97d96e8ab9059d9f0e772120c","src/stream/wait.rs":"936a15df4499d188f210cb0133bc8ad25e33e5b674a96105b4da549f32e92b40","src/stream/zip.rs":"33f1401683a29ce194927533c40bdbbc0783c552cf0b666f268fa7109e593853","src/sync/bilock.rs":"def09b26f9d66f2be0a8885ad6cf7106c3a073493bad591fc4a068212f0d739f","src/sync/mod.rs":"27ad26777f600f7054215fccdff07f4303182af2a6e0998d4229d62b090b7aac","src/sync/mpsc/mod.rs":"97542ef9fcbe338f2ac0ce982a9af11883aded33d3b4ce34a788cf98e00a7d3f","src/sync/mpsc/queue.rs":"b39889f1b2000a3de995a50f46243f97a98d3cce7c6de4b95c4d8ffeb42af918","src/sync/oneshot.rs":"5d41f1d19b78ada7d5587d0fb5751de5886281cf59889ba1b77cbde399975f1f","src/task.rs":"38e6bff1ec9ba6d62825793865185b68a7b4688f373adb039f54374e564b41d0","src/task_impl/atomic_task.rs":"027606777e948f37a5931a152f9e755e5a235da0131a396f71e39945f04c7961","src/task_impl/core.rs":"e3aff0cc6f0604a24950c2a5f9076e338b33f30581c5f503aa3ce230d42c44f4","src/task_impl/mod.rs":"a026baea5e1ba8157f65979726060182ebcae17c4a7bdd63b5e946ea3c6f5101","src/task_impl/std/data.rs":"9b6210811c095c4d0ec0f59a566bb8f5bc4b6ba544c72a4565dc47f3b7fbfab9","src/task_impl/std/mod.rs":"f67d06c30d11e4b33e9432e7b292f8a5888f6f094b48f781a23272e2bd218847","src/task_impl/std/task_rc.rs":"a6e46e79fecb1497d603c016f4f1b14523346f74af800c9c27c069229d62dc25","src/task_impl/std/unpark_mutex.rs":"7a53b7209ff00880bce9d912c249b077870625ca87fe9ab7b0f441d3af430302","src/unsync/mod.rs":"e5da32f78212646f0161fec2e7193cda830f541bc9ae37361fbcf82e99cc1d86","src/unsync/mpsc.rs":"ef63328496eeaa6575a17525193c6093e7803df3a64355a40f0187119ca1d731","src/unsync/oneshot.rs":"89661388a87d4ac83befc31df9ad11e6a8c6104e2dde7be9e3585d7549cfe8c4","tests/all.rs":"483cfb9ed40cbffc9c9c84cc8e19711fad2fdace74afea95c57c12ecd46d8524","tests/bilock.rs":"ad0a2b79a135d653c3dd5fe6ec7b2fc90c8ea287ba7e54a1a6140b0ea03ade97","tests/buffer_unordered.rs":"50ceb305da08fa095ee40a8f145fa9d95db59372cca949d77f011bbabc072152","tests/channel.rs":"cf5b59e84722b54e37aed7edaef5e2d1036b3ff7c278167228157dd4e9dae323","tests/eager_drop.rs":"e0a615c39f1fb9baae543212e72a165f68e7576f6b8c6db1809149d819bd546b","tests/eventual.rs":"73cbd3836a598175439b5dc5597f7e464dfbc6d77379aaae1172c6c7f85220e5","tests/fuse.rs":"feba43c51cbeeb383f6ebba4a4c75107de69a3cdb3eadb3e673fbeb5a91f9ac4","tests/future_flatten_stream.rs":"133b91a9e2170849ed7dbcb4024675873a781bf2dd190cfcaa9c41418c3ccb97","tests/futures_ordered.rs":"ad1d83d09a0600dda9d084616d3cef03bbc89aa2da6a6b44898839df1de32e95","tests/futures_unordered.rs":"b5e665e6921670dd88983432b7c33f760dabf75711f3e754358cefc47f5f17c1","tests/inspect.rs":"d7706a175be9ed6ecc09d7a45e1559160e00da85fa8a9a7caec4c53918999842","tests/mpsc-close.rs":"62c1d2acaf60e3e896471fef6a507a125b336c04781237de8dc9d13e59cfa9fc","tests/mpsc.rs":"082f05481952a08a5e33fd6655b17f4f7b4461ad78412323be248073c8ee3542","tests/oneshot.rs":"a8773b3a65e79944045118f36bfd81fceb826d4e2846b46f86db37a02d7ae1f4","tests/ready_queue.rs":"3d50c4e71e3954c5b8e2672255b6af33abaebc16172c038e64c3323d633693c0","tests/recurse.rs":"16bd311747c6d00035febb4b6c30e0fb6d03f3398b9a175cbb46f46ac5466beb","tests/select_all.rs":"3666e95ea94da17abb1899101e51b294af576bc446119fbc8aea5bb2991f439a","tests/select_ok.rs":"7a740e5b2d70c7776202ed1495b016f6e63ae1de06ca0f12ab21fcb3117450a9","tests/shared.rs":"5916ecfd2af1ac74843ee1a85da2a8f61513568ff334242a60b1ee9459f2eed8","tests/sink.rs":"a6241723f306183900873c9f7cfe9e2c3096d880c90bef5a5a6d53f2a3237882","tests/split.rs":"24dd293f049a37bfaabb02ae558c81e9fef9298a2ce43ecb544450b045c15f5c","tests/stream.rs":"9cc5fc74be8e299491927f04008665da108aa47ba68d42256a07c79030da171e","tests/stream_catch_unwind.rs":"6cee77f455a671d038aac24cf2f79636f1c0a5d8900957a2fed0ee3ed99832b8","tests/support/local_executor.rs":"d082081397d0cbe9a3e0e20fdd23df6364016dd8f6c6a0db3441a9cde4268f35","tests/support/mod.rs":"1961189f57851a468e518327da0b7893eee990e477b82a278e0015f25b5e5a1c","tests/unfold.rs":"27ff8c3c83b333094bbffe6aebadf3730f0e35d1367b7b602a3df4e233d934d8","tests/unsync-oneshot.rs":"e676b37a64e1d6c0816d55cf443d86249ec2ff8180f1fc0d009de51e6842dac8","tests/unsync.rs":"ec7d4c7e46b8af1a1719f818e6b1777553fd6b7cc5c6e4df1a98efb19d52e933"},"package":"3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678"}
\ No newline at end of file diff --git a/third_party/rust/futures-0.1.31/CHANGELOG.md b/third_party/rust/futures-0.1.31/CHANGELOG.md new file mode 100644 index 0000000000..616282329e --- /dev/null +++ b/third_party/rust/futures-0.1.31/CHANGELOG.md @@ -0,0 +1,294 @@ +**Note**: This CHANGELOG is no longer maintained for newer 0.1.x releases. +See instead the github release tags and individual git commits. + +----- + +# 0.1.17 - 2017-10-31 + +* Add a `close` method on `sink::Wait` +* Undeprecate `stream::iter` as `stream::iter_result` +* Improve performance of wait-related methods +* Tweak buffered sinks with a 0 capacity to forward directly to the underlying + sink. +* Add `FromIterator` implementation for `FuturesOrdered` and `FuturesUnordered`. + +# 0.1.16 - 2017-09-15 + +* A `prelude` module has been added to glob import from and pick up a whole + bunch of useful types +* `sync::mpsc::Sender::poll_ready` has been added as an API +* `sync::mpsc::Sender::try_send` has been added as an API + +# 0.1.15 - 2017-08-24 + +* Improve performance of `BiLock` methods +* Implement `Clone` for `FutureResult` +* Forward `Stream` trait through `SinkMapErr` +* Add `stream::futures_ordered` next to `futures_unordered` +* Reimplement `Stream::buffered` on top of `stream::futures_ordered` (much more + efficient at scale). +* Add a `with_notify` function for abstractions which previously required + `UnparkEvent`. +* Add `get_ref`/`get_mut`/`into_inner` functions for stream take/skip methods +* Add a `Clone` implementation for `SharedItem` and `SharedError` +* Add a `mpsc::spawn` function to spawn a `Stream` into an `Executor` +* Add a `reunite` function for `BiLock` and the split stream/sink types to + rejoin two halves and reclaim the original item. +* Add `stream::poll_fn` to behave similarly to `future::poll_fn` +* Add `Sink::with_flat_map` like `Iterator::flat_map` +* Bump the minimum Rust version to 1.13.0 +* Expose `AtomicTask` in the public API for managing synchronization around task + notifications. +* Unify the `Canceled` type of the `sync` and `unsync` modules. +* Deprecate the `boxed` methods. These methods have caused more confusion than + they've solved historically, so it's recommended to use a local extension + trait or a local helper instead of the trait-based methods. +* Deprecate the `Stream::merge` method as it's less ergonomic than `select`. +* Add `oneshot::Sender::is_canceled` to test if a oneshot is canceled off a + task. +* Deprecates `UnboundedSender::send` in favor of a method named `unbounded_send` + to avoid a conflict with `Sink::send`. +* Deprecate the `stream::iter` function in favor of an `stream::iter_ok` adaptor + to avoid the need to deal with `Result` manually. +* Add an `inspect` function to the `Future` and `Stream` traits along the lines + of `Iterator::inspect` + +# 0.1.14 - 2017-05-30 + +This is a relatively large release of the `futures` crate, although much of it +is from reworking internals rather than new APIs. The banner feature of this +release is that the `futures::{task, executor}` modules are now available in +`no_std` contexts! A large refactoring of the task system was performed in +PR #436 to accommodate custom memory allocation schemes and otherwise remove +all dependencies on `std` for the task module. More details about this change +can be found on the PR itself. + +Other API additions in this release are: + +* A `FuturesUnordered::push` method was added and the `FuturesUnordered` type + itself was completely rewritten to efficiently track a large number of + futures. +* A `Task::will_notify_current` method was added with a slightly different + implementation than `Task::is_current` but with stronger guarantees and + documentation wording about its purpose. +* Many combinators now have `get_ref`, `get_mut`, and `into_inner` methods for + accessing internal futures and state. +* A `Stream::concat2` method was added which should be considered the "fixed" + version of `concat`, this one doesn't panic on empty streams. +* An `Executor` trait has been added to represent abstracting over the concept + of spawning a new task. Crates which only need the ability to spawn a future + can now be generic over `Executor` rather than requiring a + `tokio_core::reactor::Handle`. + +As with all 0.1.x releases this PR is intended to be 100% backwards compatible. +All code that previously compiled should continue to do so with these changes. +As with other changes, though, there are also some updates to be aware of: + +* The `task::park` function has been renamed to `task::current`. +* The `Task::unpark` function has been renamed to `Task::notify`, and in general + terminology around "unpark" has shifted to terminology around "notify" +* The `Unpark` trait has been deprecated in favor of the `Notify` trait + mentioned above. +* The `UnparkEvent` structure has been deprecated. It currently should perform + the same as it used to, but it's planned that in a future 0.1.x release the + performance will regress for crates that have not transitioned away. The + primary primitive to replace this is the addition of a `push` function on the + `FuturesUnordered` type. If this does not help implement your use case though, + please let us know! +* The `Task::is_current` method is now deprecated, and you likely want to use + `Task::will_notify_current` instead, but let us know if this doesn't suffice! + +# 0.1.13 - 2017-04-05 + +* Add forwarding sink/stream impls for `stream::FromErr` and `sink::SinkFromErr` +* Add `PartialEq` and `Eq` to `mpsc::SendError` +* Reimplement `Shared` with `spawn` instead of `UnparkEvent` + +# 0.1.12 - 2017-04-03 + +* Add `Stream::from_err` and `Sink::from_err` +* Allow `SendError` to be `Clone` when possible + +# 0.1.11 - 2017-03-13 + +The major highlight of this release is the addition of a new "default" method on +the `Sink` trait, `Sink::close`. This method is used to indicate to a sink that +no new values will ever need to get pushed into it. This can be used to +implement graceful shutdown of protocols and otherwise simply indicates to a +sink that it can start freeing up resources. + +Currently this method is **not** a default method to preserve backwards +compatibility, but it's intended to become a default method in the 0.2 series of +the `futures` crate. It's highly recommended to audit implementations of `Sink` +to implement the `close` method as is fit. + +Other changes in this release are: + +* A new select combinator, `Future::select2` was added for a heterogeneous + select. +* A `Shared::peek` method was added to check to see if it's done. +* `Sink::map_err` was implemented +* The `log` dependency was removed +* Implementations of the `Debug` trait are now generally available. +* The `stream::IterStream` type was renamed to `stream::Iter` (with a reexport + for the old name). +* Add a `Sink::wait` method which returns an adapter to use an arbitrary `Sink` + synchronously. +* A `Stream::concat` method was added to concatenate a sequence of lists. +* The `oneshot::Sender::complete` method was renamed to `send` and now returns a + `Result` indicating successful transmission of a message or not. Note that the + `complete` method still exists, it's just deprecated. + +# 0.1.10 - 2017-01-30 + +* Add a new `unsync` module which mirrors `sync` to the extent that it can but + is intended to not perform cross-thread synchronization (only usable within + one thread). +* Tweak `Shared` to work when handles may not get poll'd again. + +# 0.1.9 - 2017-01-18 + +* Fix `Send/Sync` of a few types +* Add `future::tail_fn` for more easily writing loops +* Export SharedItem/SharedError +* Remove an unused type parameter in `from_err` + +# 0.1.8 - 2017-01-11 + +* Fix some race conditions in the `Shared` implementation +* Add `Stream::take_while` +* Fix an unwrap in `stream::futures_unordered` +* Generalize `Stream::for_each` +* Add `Stream::chain` +* Add `stream::repeat` +* Relax `&mut self` to `&self` in `UnboundedSender::send` + +# 0.1.7 - 2016-12-18 + +* Add a `Future::shared` method for creating a future that can be shared + amongst threads by cloning the future itself. All derivative futures + will resolve to the same value once the original future has been + resolved. +* Add a `FutureFrom` trait for future-based conversion +* Fix a wakeup bug in `Receiver::close` +* Add `future::poll_fn` for quickly adapting a `Poll`-based function to + a future. +* Add an `Either` enum with two branches to easily create one future + type based on two different futures created on two branches of control + flow. +* Remove the `'static` bound on `Unpark` +* Optimize `send_all` and `forward` to send as many items as possible + before calling `poll_complete`. +* Unify the return types of the `ok`, `err`, and `result` future to + assist returning different varieties in different branches of a function. +* Add `CpuFuture::forget` to allow the computation to continue running + after a drop. +* Add a `stream::futures_unordered` combinator to turn a list of futures + into a stream representing their order of completion. + +# 0.1.6 - 2016-11-22 + +* Fix `Clone` bound on the type parameter on `UnboundedSender` + +# 0.1.5 - 2016-11-22 + +* Fix `#![no_std]` support + +# 0.1.4 - 2016-11-22 + +This is quite a large release relative to the previous point releases! As +with all 0.1 releases, this release should be fully compatible with the 0.1.3 +release. If any incompatibilities are discovered please file an issue! + +The largest changes in 0.1.4 are the addition of a `Sink` trait coupled with a +reorganization of this crate. Note that all old locations for types/traits +still exist, they're just deprecated and tagged with `#[doc(hidden)]`. + +The new `Sink` trait is used to represent types which can periodically over +time accept items, but may take some time to fully process the item before +another can be accepted. Essentially, a sink is the opposite of a stream. This +trait will then be used in the tokio-core crate to implement simple framing by +modeling I/O streams as both a stream and a sink of frames. + +The organization of this crate is to now have three primary submodules, +`future`, `stream`, and `sink`. The traits as well as all combinator types are +defined in these submodules. The traits and types like `Async` and `Poll` are +then reexported at the top of the crate for convenient usage. It should be a +relatively rare occasion that the modules themselves are reached into. + +Finally, the 0.1.4 release comes with a new module, `sync`, in the futures +crate. This is intended to be the home of a suite of futures-aware +synchronization primitives. Currently this is inhabited with a `oneshot` module +(the old `oneshot` function), a `mpsc` module for a new multi-producer +single-consumer channel, and a `BiLock` type which represents sharing ownership +of one value between two consumers. This module may expand over time with more +types like a mutex, rwlock, spsc channel, etc. + +Notable deprecations in the 0.1.4 release that will be deleted in an eventual +0.2 release: + +* The `TaskRc` type is now deprecated in favor of `BiLock` or otherwise `Arc` + sharing. +* All future combinators should be accessed through the `future` module, not + the top-level of the crate. +* The `Oneshot` and `Complete` types are now replaced with the `sync::oneshot` + module. +* Some old names like `collect` are deprecated in favor of more appropriately + named versions like `join_all` +* The `finished` constructor is now `ok`. +* The `failed` constructor is now `err`. +* The `done` constructor is now `result`. + +As always, please report bugs to https://github.com/rust-lang-nursery/futures-rs and +we always love feedback! If you've got situations we don't cover, combinators +you'd like to see, or slow code, please let us know! + +Full changelog: + +* Improve scalability of `buffer_unordered` combinator +* Fix a memory ordering bug in oneshot +* Add a new trait, `Sink` +* Reorganize the crate into three primary modules +* Add a new `sync` module for synchronization primitives +* Add a `BiLock` sync primitive for two-way sharing +* Deprecate `TaskRc` +* Rename `collect` to `join_all` +* Use a small vec in `Events` for improved clone performance +* Add `Stream::select` for selecting items from two streams like `merge` but + requiring the same types. +* Add `stream::unfold` constructor +* Add a `sync::mpsc` module with a futures-aware multi-producer single-consumer + queue. Both bounded (with backpressure) and unbounded (no backpressure) + variants are provided. +* Renamed `failed`, `finished`, and `done` combinators to `err`, `ok`, and + `result`. +* Add `Stream::forward` to send all items to a sink, like `Sink::send_all` +* Add `Stream::split` for streams which are both sinks and streams to have + separate ownership of the stream/sink halves +* Improve `join_all` with concurrency + +# 0.1.3 - 2016-10-24 + +* Rewrite `oneshot` for efficiency and removing allocations on send/recv +* Errors are passed through in `Stream::take` and `Stream::skip` +* Add a `select_ok` combinator to pick the first of a list that succeeds +* Remove the unnecessary `SelectAllNext` typedef +* Add `Stream::chunks` for receiving chunks of data +* Rewrite `stream::channel` for efficiency, correctness, and removing + allocations +* Remove `Send + 'static` bounds on the `stream::Empty` type + +# 0.1.2 - 2016-10-04 + +* Fixed a bug in drop of `FutureSender` +* Expose the channel `SendError` type +* Add `Future::into_stream` to convert to a single-element stream +* Add `Future::flatten_to_stream` to convert a future of a stream to a stream +* impl Debug for SendError +* Add stream::once for a one element stream +* Accept IntoIterator in stream::iter +* Add `Stream::catch_unwind` + +# 0.1.1 - 2016-09-09 + +Initial release! diff --git a/third_party/rust/futures-0.1.31/Cargo.toml b/third_party/rust/futures-0.1.31/Cargo.toml new file mode 100644 index 0000000000..23e9c4b225 --- /dev/null +++ b/third_party/rust/futures-0.1.31/Cargo.toml @@ -0,0 +1,32 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "futures" +version = "0.1.31" +authors = ["Alex Crichton <alex@alexcrichton.com>"] +description = "An implementation of futures and streams featuring zero allocations,\ncomposability, and iterator-like interfaces.\n" +homepage = "https://github.com/rust-lang-nursery/futures-rs" +documentation = "https://docs.rs/futures" +readme = "README.md" +keywords = ["futures", "async", "future"] +categories = ["asynchronous"] +license = "MIT/Apache-2.0" +repository = "https://github.com/rust-lang-nursery/futures-rs" + +[dependencies] + +[features] +default = ["use_std", "with-deprecated"] +nightly = [] +use_std = [] +with-deprecated = [] diff --git a/third_party/rust/futures-0.1.31/LICENSE-APACHE b/third_party/rust/futures-0.1.31/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/third_party/rust/futures-0.1.31/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/futures-0.1.31/LICENSE-MIT b/third_party/rust/futures-0.1.31/LICENSE-MIT new file mode 100644 index 0000000000..28e630cf40 --- /dev/null +++ b/third_party/rust/futures-0.1.31/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/futures-0.1.31/README.md b/third_party/rust/futures-0.1.31/README.md new file mode 100644 index 0000000000..1c33d5ddbb --- /dev/null +++ b/third_party/rust/futures-0.1.31/README.md @@ -0,0 +1,59 @@ +# futures-rs + +This library is an implementation of **zero-cost futures** in Rust. + +[![Build Status](https://img.shields.io/github/workflow/status/rust-lang/futures-rs/CI/master)](https://github.com/rust-lang/futures-rs/actions) +[![Crates.io](https://img.shields.io/crates/v/futures.svg?maxAge=2592000)](https://crates.io/crates/futures) + +[Documentation](https://docs.rs/futures) + +[Tutorial](https://tokio.rs/docs/getting-started/futures/) + +## Usage + +First, add this to your `Cargo.toml`: + +```toml +[dependencies] +futures = "0.1.26" +``` + +Next, add this to your crate: + +```rust +extern crate futures; + +use futures::Future; +``` + +For more information about how you can use futures with async I/O you can take a +look at [https://tokio.rs](https://tokio.rs) which is an introduction to both +the Tokio stack and also futures. + +### Feature `use_std` + +`futures-rs` works without the standard library, such as in bare metal environments. +However, it has a significantly reduced API surface. To use `futures-rs` in +a `#[no_std]` environment, use: + +```toml +[dependencies] +futures = { version = "0.1.26", default-features = false } +``` + +# License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Futures by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/futures-0.1.31/benches/bilock.rs b/third_party/rust/futures-0.1.31/benches/bilock.rs new file mode 100644 index 0000000000..0f840289ab --- /dev/null +++ b/third_party/rust/futures-0.1.31/benches/bilock.rs @@ -0,0 +1,121 @@ +#![feature(test)] + +extern crate futures; +extern crate test; + +use futures::{Async, Poll}; +use futures::executor; +use futures::executor::{Notify, NotifyHandle}; +use futures::sync::BiLock; +use futures::sync::BiLockAcquire; +use futures::sync::BiLockAcquired; +use futures::future::Future; +use futures::stream::Stream; + + +use test::Bencher; + +fn notify_noop() -> NotifyHandle { + struct Noop; + + impl Notify for Noop { + fn notify(&self, _id: usize) {} + } + + const NOOP : &'static Noop = &Noop; + + NotifyHandle::from(NOOP) +} + + +/// Pseudo-stream which simply calls `lock.poll()` on `poll` +struct LockStream { + lock: BiLockAcquire<u32>, +} + +impl LockStream { + fn new(lock: BiLock<u32>) -> LockStream { + LockStream { + lock: lock.lock() + } + } + + /// Release a lock after it was acquired in `poll`, + /// so `poll` could be called again. + fn release_lock(&mut self, guard: BiLockAcquired<u32>) { + self.lock = guard.unlock().lock() + } +} + +impl Stream for LockStream { + type Item = BiLockAcquired<u32>; + type Error = (); + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + self.lock.poll().map(|a| match a { + Async::Ready(a) => Async::Ready(Some(a)), + Async::NotReady => Async::NotReady, + }) + } +} + + +#[bench] +fn contended(b: &mut Bencher) { + b.iter(|| { + let (x, y) = BiLock::new(1); + + let mut x = executor::spawn(LockStream::new(x)); + let mut y = executor::spawn(LockStream::new(y)); + + for _ in 0..1000 { + let x_guard = match x.poll_stream_notify(¬ify_noop(), 11) { + Ok(Async::Ready(Some(guard))) => guard, + _ => panic!(), + }; + + // Try poll second lock while first lock still holds the lock + match y.poll_stream_notify(¬ify_noop(), 11) { + Ok(Async::NotReady) => (), + _ => panic!(), + }; + + x.get_mut().release_lock(x_guard); + + let y_guard = match y.poll_stream_notify(¬ify_noop(), 11) { + Ok(Async::Ready(Some(guard))) => guard, + _ => panic!(), + }; + + y.get_mut().release_lock(y_guard); + } + (x, y) + }); +} + +#[bench] +fn lock_unlock(b: &mut Bencher) { + b.iter(|| { + let (x, y) = BiLock::new(1); + + let mut x = executor::spawn(LockStream::new(x)); + let mut y = executor::spawn(LockStream::new(y)); + + for _ in 0..1000 { + let x_guard = match x.poll_stream_notify(¬ify_noop(), 11) { + Ok(Async::Ready(Some(guard))) => guard, + _ => panic!(), + }; + + x.get_mut().release_lock(x_guard); + + let y_guard = match y.poll_stream_notify(¬ify_noop(), 11) { + Ok(Async::Ready(Some(guard))) => guard, + _ => panic!(), + }; + + y.get_mut().release_lock(y_guard); + } + (x, y) + }) +} diff --git a/third_party/rust/futures-0.1.31/benches/futures_unordered.rs b/third_party/rust/futures-0.1.31/benches/futures_unordered.rs new file mode 100644 index 0000000000..c922df5541 --- /dev/null +++ b/third_party/rust/futures-0.1.31/benches/futures_unordered.rs @@ -0,0 +1,43 @@ +#![feature(test)] + +extern crate futures; +extern crate test; + +use futures::*; +use futures::stream::FuturesUnordered; +use futures::sync::oneshot; + +use test::Bencher; + +use std::collections::VecDeque; +use std::thread; + +#[bench] +fn oneshots(b: &mut Bencher) { + const NUM: usize = 10_000; + + b.iter(|| { + let mut txs = VecDeque::with_capacity(NUM); + let mut rxs = FuturesUnordered::new(); + + for _ in 0..NUM { + let (tx, rx) = oneshot::channel(); + txs.push_back(tx); + rxs.push(rx); + } + + thread::spawn(move || { + while let Some(tx) = txs.pop_front() { + let _ = tx.send("hello"); + } + }); + + future::lazy(move || { + loop { + if let Ok(Async::Ready(None)) = rxs.poll() { + return Ok::<(), ()>(()); + } + } + }).wait().unwrap(); + }); +} diff --git a/third_party/rust/futures-0.1.31/benches/poll.rs b/third_party/rust/futures-0.1.31/benches/poll.rs new file mode 100644 index 0000000000..1fec653fa6 --- /dev/null +++ b/third_party/rust/futures-0.1.31/benches/poll.rs @@ -0,0 +1,72 @@ +#![feature(test)] + +extern crate futures; +extern crate test; + +use futures::*; +use futures::executor::{Notify, NotifyHandle}; +use futures::task::Task; + +use test::Bencher; + +fn notify_noop() -> NotifyHandle { + struct Noop; + + impl Notify for Noop { + fn notify(&self, _id: usize) {} + } + + const NOOP : &'static Noop = &Noop; + + NotifyHandle::from(NOOP) +} + +#[bench] +fn task_init(b: &mut Bencher) { + const NUM: u32 = 100_000; + + struct MyFuture { + num: u32, + task: Option<Task>, + }; + + impl Future for MyFuture { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + if self.num == NUM { + Ok(Async::Ready(())) + } else { + self.num += 1; + + if let Some(ref t) = self.task { + if t.will_notify_current() { + t.notify(); + return Ok(Async::NotReady); + } + } + + let t = task::current(); + t.notify(); + self.task = Some(t); + + Ok(Async::NotReady) + } + } + } + + let notify = notify_noop(); + + let mut fut = executor::spawn(MyFuture { + num: 0, + task: None, + }); + + b.iter(|| { + fut.get_mut().num = 0; + + while let Ok(Async::NotReady) = fut.poll_future_notify(¬ify, 0) { + } + }); +} diff --git a/third_party/rust/futures-0.1.31/benches/sync_mpsc.rs b/third_party/rust/futures-0.1.31/benches/sync_mpsc.rs new file mode 100644 index 0000000000..c0365c5fed --- /dev/null +++ b/third_party/rust/futures-0.1.31/benches/sync_mpsc.rs @@ -0,0 +1,168 @@ +#![feature(test)] + +#[macro_use] +extern crate futures; +extern crate test; + +use futures::{Async, Poll, AsyncSink}; +use futures::executor; +use futures::executor::{Notify, NotifyHandle}; + +use futures::sink::Sink; +use futures::stream::Stream; + +use futures::sync::mpsc::unbounded; +use futures::sync::mpsc::channel; +use futures::sync::mpsc::Sender; +use futures::sync::mpsc::UnboundedSender; + + +use test::Bencher; + +fn notify_noop() -> NotifyHandle { + struct Noop; + + impl Notify for Noop { + fn notify(&self, _id: usize) {} + } + + const NOOP : &'static Noop = &Noop; + + NotifyHandle::from(NOOP) +} + +/// Single producer, single consumer +#[bench] +fn unbounded_1_tx(b: &mut Bencher) { + b.iter(|| { + let (tx, rx) = unbounded(); + + let mut rx = executor::spawn(rx); + + // 1000 iterations to avoid measuring overhead of initialization + // Result should be divided by 1000 + for i in 0..1000 { + + // Poll, not ready, park + assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(¬ify_noop(), 1)); + + UnboundedSender::unbounded_send(&tx, i).unwrap(); + + // Now poll ready + assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(¬ify_noop(), 1)); + } + }) +} + +/// 100 producers, single consumer +#[bench] +fn unbounded_100_tx(b: &mut Bencher) { + b.iter(|| { + let (tx, rx) = unbounded(); + + let mut rx = executor::spawn(rx); + + let tx: Vec<_> = (0..100).map(|_| tx.clone()).collect(); + + // 1000 send/recv operations total, result should be divided by 1000 + for _ in 0..10 { + for i in 0..tx.len() { + assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(¬ify_noop(), 1)); + + UnboundedSender::unbounded_send(&tx[i], i).unwrap(); + + assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(¬ify_noop(), 1)); + } + } + }) +} + +#[bench] +fn unbounded_uncontended(b: &mut Bencher) { + b.iter(|| { + let (tx, mut rx) = unbounded(); + + for i in 0..1000 { + UnboundedSender::unbounded_send(&tx, i).expect("send"); + // No need to create a task, because poll is not going to park. + assert_eq!(Ok(Async::Ready(Some(i))), rx.poll()); + } + }) +} + + +/// A Stream that continuously sends incrementing number of the queue +struct TestSender { + tx: Sender<u32>, + last: u32, // Last number sent +} + +// Could be a Future, it doesn't matter +impl Stream for TestSender { + type Item = u32; + type Error = (); + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + match self.tx.start_send(self.last + 1) { + Err(_) => panic!(), + Ok(AsyncSink::Ready) => { + self.last += 1; + Ok(Async::Ready(Some(self.last))) + } + Ok(AsyncSink::NotReady(_)) => { + Ok(Async::NotReady) + } + } + } +} + + +/// Single producers, single consumer +#[bench] +fn bounded_1_tx(b: &mut Bencher) { + b.iter(|| { + let (tx, rx) = channel(0); + + let mut tx = executor::spawn(TestSender { + tx: tx, + last: 0, + }); + + let mut rx = executor::spawn(rx); + + for i in 0..1000 { + assert_eq!(Ok(Async::Ready(Some(i + 1))), tx.poll_stream_notify(¬ify_noop(), 1)); + assert_eq!(Ok(Async::NotReady), tx.poll_stream_notify(¬ify_noop(), 1)); + assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(¬ify_noop(), 1)); + } + }) +} + +/// 100 producers, single consumer +#[bench] +fn bounded_100_tx(b: &mut Bencher) { + b.iter(|| { + // Each sender can send one item after specified capacity + let (tx, rx) = channel(0); + + let mut tx: Vec<_> = (0..100).map(|_| { + executor::spawn(TestSender { + tx: tx.clone(), + last: 0 + }) + }).collect(); + + let mut rx = executor::spawn(rx); + + for i in 0..10 { + for j in 0..tx.len() { + // Send an item + assert_eq!(Ok(Async::Ready(Some(i + 1))), tx[j].poll_stream_notify(¬ify_noop(), 1)); + // Then block + assert_eq!(Ok(Async::NotReady), tx[j].poll_stream_notify(¬ify_noop(), 1)); + // Recv the item + assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(¬ify_noop(), 1)); + } + } + }) +} diff --git a/third_party/rust/futures-0.1.31/benches/thread_notify.rs b/third_party/rust/futures-0.1.31/benches/thread_notify.rs new file mode 100644 index 0000000000..92932353d8 --- /dev/null +++ b/third_party/rust/futures-0.1.31/benches/thread_notify.rs @@ -0,0 +1,114 @@ +#![feature(test)] + +extern crate futures; +extern crate test; + +use futures::{Future, Poll, Async}; +use futures::task::{self, Task}; + +use test::Bencher; + +#[bench] +fn thread_yield_single_thread_one_wait(b: &mut Bencher) { + const NUM: usize = 10_000; + + struct Yield { + rem: usize, + } + + impl Future for Yield { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + if self.rem == 0 { + Ok(Async::Ready(())) + } else { + self.rem -= 1; + task::current().notify(); + Ok(Async::NotReady) + } + } + } + + b.iter(|| { + let y = Yield { rem: NUM }; + y.wait().unwrap(); + }); +} + +#[bench] +fn thread_yield_single_thread_many_wait(b: &mut Bencher) { + const NUM: usize = 10_000; + + struct Yield { + rem: usize, + } + + impl Future for Yield { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + if self.rem == 0 { + Ok(Async::Ready(())) + } else { + self.rem -= 1; + task::current().notify(); + Ok(Async::NotReady) + } + } + } + + b.iter(|| { + for _ in 0..NUM { + let y = Yield { rem: 1 }; + y.wait().unwrap(); + } + }); +} + +#[bench] +fn thread_yield_multi_thread(b: &mut Bencher) { + use std::sync::mpsc; + use std::thread; + + const NUM: usize = 1_000; + + let (tx, rx) = mpsc::sync_channel::<Task>(10_000); + + struct Yield { + rem: usize, + tx: mpsc::SyncSender<Task>, + } + + impl Future for Yield { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + if self.rem == 0 { + Ok(Async::Ready(())) + } else { + self.rem -= 1; + self.tx.send(task::current()).unwrap(); + Ok(Async::NotReady) + } + } + } + + thread::spawn(move || { + while let Ok(task) = rx.recv() { + task.notify(); + } + }); + + b.iter(move || { + let y = Yield { + rem: NUM, + tx: tx.clone(), + }; + + y.wait().unwrap(); + }); +} diff --git a/third_party/rust/futures-0.1.31/src/executor.rs b/third_party/rust/futures-0.1.31/src/executor.rs new file mode 100644 index 0000000000..365642f770 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/executor.rs @@ -0,0 +1,17 @@ +//! Executors +//! +//! This module contains tools for managing the raw execution of futures, +//! which is needed when building *executors* (places where futures can run). +//! +//! More information about executors can be [found online at tokio.rs][online]. +//! +//! [online]: https://tokio.rs/docs/going-deeper-futures/tasks/ + +#[allow(deprecated)] +#[doc(hidden)] +#[cfg(feature = "use_std")] +pub use task_impl::{Unpark, Executor, Run}; + +pub use task_impl::{Spawn, spawn, Notify, with_notify}; + +pub use task_impl::{UnsafeNotify, NotifyHandle}; diff --git a/third_party/rust/futures-0.1.31/src/future/and_then.rs b/third_party/rust/futures-0.1.31/src/future/and_then.rs new file mode 100644 index 0000000000..2e5b6aa16e --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/and_then.rs @@ -0,0 +1,38 @@ +use {Future, IntoFuture, Poll}; +use super::chain::Chain; + +/// Future for the `and_then` combinator, chaining a computation onto the end of +/// another future which completes successfully. +/// +/// This is created by the `Future::and_then` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct AndThen<A, B, F> where A: Future, B: IntoFuture { + state: Chain<A, B::Future, F>, +} + +pub fn new<A, B, F>(future: A, f: F) -> AndThen<A, B, F> + where A: Future, + B: IntoFuture, +{ + AndThen { + state: Chain::new(future, f), + } +} + +impl<A, B, F> Future for AndThen<A, B, F> + where A: Future, + B: IntoFuture<Error=A::Error>, + F: FnOnce(A::Item) -> B, +{ + type Item = B::Item; + type Error = B::Error; + + fn poll(&mut self) -> Poll<B::Item, B::Error> { + self.state.poll(|result, f| { + result.map(|e| { + Err(f(e).into_future()) + }) + }) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/catch_unwind.rs b/third_party/rust/futures-0.1.31/src/future/catch_unwind.rs new file mode 100644 index 0000000000..f87f118185 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/catch_unwind.rs @@ -0,0 +1,51 @@ +use std::prelude::v1::*; +use std::any::Any; +use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe}; + +use {Future, Poll, Async}; + +/// Future for the `catch_unwind` combinator. +/// +/// This is created by the `Future::catch_unwind` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct CatchUnwind<F> where F: Future { + future: Option<F>, +} + +pub fn new<F>(future: F) -> CatchUnwind<F> + where F: Future + UnwindSafe, +{ + CatchUnwind { + future: Some(future), + } +} + +impl<F> Future for CatchUnwind<F> + where F: Future + UnwindSafe, +{ + type Item = Result<F::Item, F::Error>; + type Error = Box<Any + Send>; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let mut future = self.future.take().expect("cannot poll twice"); + let (res, future) = catch_unwind(|| (future.poll(), future))?; + match res { + Ok(Async::NotReady) => { + self.future = Some(future); + Ok(Async::NotReady) + } + Ok(Async::Ready(t)) => Ok(Async::Ready(Ok(t))), + Err(e) => Ok(Async::Ready(Err(e))), + } + } +} + +impl<F: Future> Future for AssertUnwindSafe<F> { + type Item = F::Item; + type Error = F::Error; + + fn poll(&mut self) -> Poll<F::Item, F::Error> { + self.0.poll() + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/chain.rs b/third_party/rust/futures-0.1.31/src/future/chain.rs new file mode 100644 index 0000000000..1bf5cd639c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/chain.rs @@ -0,0 +1,48 @@ +use core::mem; + +use {Future, Poll, Async}; + +#[derive(Debug)] +pub enum Chain<A, B, C> where A: Future { + First(A, C), + Second(B), + Done, +} + +impl<A, B, C> Chain<A, B, C> + where A: Future, + B: Future, +{ + pub fn new(a: A, c: C) -> Chain<A, B, C> { + Chain::First(a, c) + } + + pub fn poll<F>(&mut self, f: F) -> Poll<B::Item, B::Error> + where F: FnOnce(Result<A::Item, A::Error>, C) + -> Result<Result<B::Item, B>, B::Error>, + { + let a_result = match *self { + Chain::First(ref mut a, _) => { + match a.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(t)) => Ok(t), + Err(e) => Err(e), + } + } + Chain::Second(ref mut b) => return b.poll(), + Chain::Done => panic!("cannot poll a chained future twice"), + }; + let data = match mem::replace(self, Chain::Done) { + Chain::First(_, c) => c, + _ => panic!(), + }; + match f(a_result, data)? { + Ok(e) => Ok(Async::Ready(e)), + Err(mut b) => { + let ret = b.poll(); + *self = Chain::Second(b); + ret + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/either.rs b/third_party/rust/futures-0.1.31/src/future/either.rs new file mode 100644 index 0000000000..253f26784c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/either.rs @@ -0,0 +1,54 @@ +use {Future, Poll, Stream}; + +/// Combines two different futures yielding the same item and error +/// types into a single type. +#[derive(Debug)] +pub enum Either<A, B> { + /// First branch of the type + A(A), + /// Second branch of the type + B(B), +} + +impl<T, A, B> Either<(T, A), (T, B)> { + /// Splits out the homogeneous type from an either of tuples. + /// + /// This method is typically useful when combined with the `Future::select2` + /// combinator. + pub fn split(self) -> (T, Either<A, B>) { + match self { + Either::A((a, b)) => (a, Either::A(b)), + Either::B((a, b)) => (a, Either::B(b)), + } + } +} + +impl<A, B> Future for Either<A, B> + where A: Future, + B: Future<Item = A::Item, Error = A::Error> +{ + type Item = A::Item; + type Error = A::Error; + + fn poll(&mut self) -> Poll<A::Item, A::Error> { + match *self { + Either::A(ref mut a) => a.poll(), + Either::B(ref mut b) => b.poll(), + } + } +} + +impl<A, B> Stream for Either<A, B> + where A: Stream, + B: Stream<Item = A::Item, Error = A::Error> +{ + type Item = A::Item; + type Error = A::Error; + + fn poll(&mut self) -> Poll<Option<A::Item>, A::Error> { + match *self { + Either::A(ref mut a) => a.poll(), + Either::B(ref mut b) => b.poll(), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/empty.rs b/third_party/rust/futures-0.1.31/src/future/empty.rs new file mode 100644 index 0000000000..fbb56b26fd --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/empty.rs @@ -0,0 +1,31 @@ +//! Definition of the Empty combinator, a future that's never ready. + +use core::marker; + +use {Future, Poll, Async}; + +/// A future which is never resolved. +/// +/// This future can be created with the `empty` function. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Empty<T, E> { + _data: marker::PhantomData<(T, E)>, +} + +/// Creates a future which never resolves, representing a computation that never +/// finishes. +/// +/// The returned future will forever return `Async::NotReady`. +pub fn empty<T, E>() -> Empty<T, E> { + Empty { _data: marker::PhantomData } +} + +impl<T, E> Future for Empty<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<T, E> { + Ok(Async::NotReady) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/flatten.rs b/third_party/rust/futures-0.1.31/src/future/flatten.rs new file mode 100644 index 0000000000..bfe286975c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/flatten.rs @@ -0,0 +1,49 @@ +use {Future, IntoFuture, Poll}; +use core::fmt; +use super::chain::Chain; + +/// Future for the `flatten` combinator, flattening a future-of-a-future to get just +/// the result of the final future. +/// +/// This is created by the `Future::flatten` method. +#[must_use = "futures do nothing unless polled"] +pub struct Flatten<A> where A: Future, A::Item: IntoFuture { + state: Chain<A, <A::Item as IntoFuture>::Future, ()>, +} + +impl<A> fmt::Debug for Flatten<A> + where A: Future + fmt::Debug, + A::Item: IntoFuture, + <<A as IntoFuture>::Item as IntoFuture>::Future: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Flatten") + .field("state", &self.state) + .finish() + } +} + +pub fn new<A>(future: A) -> Flatten<A> + where A: Future, + A::Item: IntoFuture, +{ + Flatten { + state: Chain::new(future, ()), + } +} + +impl<A> Future for Flatten<A> + where A: Future, + A::Item: IntoFuture, + <<A as Future>::Item as IntoFuture>::Error: From<<A as Future>::Error> +{ + type Item = <<A as Future>::Item as IntoFuture>::Item; + type Error = <<A as Future>::Item as IntoFuture>::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + self.state.poll(|a, ()| { + let future = a?.into_future(); + Ok(Err(future)) + }) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/flatten_stream.rs b/third_party/rust/futures-0.1.31/src/future/flatten_stream.rs new file mode 100644 index 0000000000..7bf3b9ca79 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/flatten_stream.rs @@ -0,0 +1,99 @@ +use {Async, Future, Poll}; +use core::fmt; +use stream::Stream; + +/// Future for the `flatten_stream` combinator, flattening a +/// future-of-a-stream to get just the result of the final stream as a stream. +/// +/// This is created by the `Future::flatten_stream` method. +#[must_use = "streams do nothing unless polled"] +pub struct FlattenStream<F> + where F: Future, + <F as Future>::Item: Stream<Error=F::Error>, +{ + state: State<F> +} + +impl<F> fmt::Debug for FlattenStream<F> + where F: Future + fmt::Debug, + <F as Future>::Item: Stream<Error=F::Error> + fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("FlattenStream") + .field("state", &self.state) + .finish() + } +} + +pub fn new<F>(f: F) -> FlattenStream<F> + where F: Future, + <F as Future>::Item: Stream<Error=F::Error>, +{ + FlattenStream { + state: State::Future(f) + } +} + +#[derive(Debug)] +enum State<F> + where F: Future, + <F as Future>::Item: Stream<Error=F::Error>, +{ + // future is not yet called or called and not ready + Future(F), + // future resolved to Stream + Stream(F::Item), + // EOF after future resolved to error + Eof, + // after EOF after future resolved to error + Done, +} + +impl<F> Stream for FlattenStream<F> + where F: Future, + <F as Future>::Item: Stream<Error=F::Error>, +{ + type Item = <F::Item as Stream>::Item; + type Error = <F::Item as Stream>::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + loop { + let (next_state, ret_opt) = match self.state { + State::Future(ref mut f) => { + match f.poll() { + Ok(Async::NotReady) => { + // State is not changed, early return. + return Ok(Async::NotReady) + }, + Ok(Async::Ready(stream)) => { + // Future resolved to stream. + // We do not return, but poll that + // stream in the next loop iteration. + (State::Stream(stream), None) + } + Err(e) => { + (State::Eof, Some(Err(e))) + } + } + } + State::Stream(ref mut s) => { + // Just forward call to the stream, + // do not track its state. + return s.poll(); + } + State::Eof => { + (State::Done, Some(Ok(Async::Ready(None)))) + } + State::Done => { + panic!("poll called after eof"); + } + }; + + self.state = next_state; + if let Some(ret) = ret_opt { + return ret; + } + } + } +} + diff --git a/third_party/rust/futures-0.1.31/src/future/from_err.rs b/third_party/rust/futures-0.1.31/src/future/from_err.rs new file mode 100644 index 0000000000..97e35d7cc7 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/from_err.rs @@ -0,0 +1,35 @@ +use core::marker::PhantomData; + +use {Future, Poll, Async}; + +/// Future for the `from_err` combinator, changing the error type of a future. +/// +/// This is created by the `Future::from_err` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct FromErr<A, E> where A: Future { + future: A, + f: PhantomData<E> +} + +pub fn new<A, E>(future: A) -> FromErr<A, E> + where A: Future +{ + FromErr { + future: future, + f: PhantomData + } +} + +impl<A:Future, E:From<A::Error>> Future for FromErr<A, E> { + type Item = A::Item; + type Error = E; + + fn poll(&mut self) -> Poll<A::Item, E> { + let e = match self.future.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + other => other, + }; + e.map_err(From::from) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/fuse.rs b/third_party/rust/futures-0.1.31/src/future/fuse.rs new file mode 100644 index 0000000000..05ad3d5afa --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/fuse.rs @@ -0,0 +1,49 @@ +use {Future, Poll, Async}; + +/// A future which "fuses" a future once it's been resolved. +/// +/// Normally futures can behave unpredictable once they're used after a future +/// has been resolved, but `Fuse` is always defined to return `Async::NotReady` +/// from `poll` after it has resolved successfully or returned an error. +/// +/// This is created by the `Future::fuse` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Fuse<A: Future> { + future: Option<A>, +} + +pub fn new<A: Future>(f: A) -> Fuse<A> { + Fuse { + future: Some(f), + } +} + +impl<A: Future> Fuse<A> { + /// Returns whether the underlying future has finished or not. + /// + /// If this method returns `true`, then all future calls to `poll` + /// are guaranteed to return `Ok(Async::NotReady)`. If this returns + /// false, then the underlying future has not been driven to + /// completion. + pub fn is_done(&self) -> bool { + self.future.is_none() + } +} + +impl<A: Future> Future for Fuse<A> { + type Item = A::Item; + type Error = A::Error; + + fn poll(&mut self) -> Poll<A::Item, A::Error> { + let res = self.future.as_mut().map(|f| f.poll()); + match res.unwrap_or(Ok(Async::NotReady)) { + res @ Ok(Async::Ready(_)) | + res @ Err(_) => { + self.future = None; + res + } + Ok(Async::NotReady) => Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/inspect.rs b/third_party/rust/futures-0.1.31/src/future/inspect.rs new file mode 100644 index 0000000000..59fcd78638 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/inspect.rs @@ -0,0 +1,40 @@ +use {Future, Poll, Async}; + +/// Do something with the item of a future, passing it on. +/// +/// This is created by the `Future::inspect` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Inspect<A, F> where A: Future { + future: A, + f: Option<F>, +} + +pub fn new<A, F>(future: A, f: F) -> Inspect<A, F> + where A: Future, + F: FnOnce(&A::Item), +{ + Inspect { + future: future, + f: Some(f), + } +} + +impl<A, F> Future for Inspect<A, F> + where A: Future, + F: FnOnce(&A::Item), +{ + type Item = A::Item; + type Error = A::Error; + + fn poll(&mut self) -> Poll<A::Item, A::Error> { + match self.future.poll() { + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(e)) => { + (self.f.take().expect("cannot poll Inspect twice"))(&e); + Ok(Async::Ready(e)) + }, + Err(e) => Err(e), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/into_stream.rs b/third_party/rust/futures-0.1.31/src/future/into_stream.rs new file mode 100644 index 0000000000..6e299e6a21 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/into_stream.rs @@ -0,0 +1,36 @@ +use {Async, Poll}; +use Future; +use stream::Stream; + +/// Future that forwards one element from the underlying future +/// (whether it is success of error) and emits EOF after that. +#[derive(Debug)] +pub struct IntoStream<F: Future> { + future: Option<F> +} + +pub fn new<F: Future>(future: F) -> IntoStream<F> { + IntoStream { + future: Some(future) + } +} + +impl<F: Future> Stream for IntoStream<F> { + type Item = F::Item; + type Error = F::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + let ret = match self.future { + None => return Ok(Async::Ready(None)), + Some(ref mut future) => { + match future.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => Err(e), + Ok(Async::Ready(r)) => Ok(r), + } + } + }; + self.future = None; + ret.map(|r| Async::Ready(Some(r))) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/join.rs b/third_party/rust/futures-0.1.31/src/future/join.rs new file mode 100644 index 0000000000..452121200b --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/join.rs @@ -0,0 +1,172 @@ +#![allow(non_snake_case)] + +use core::fmt; +use core::mem; + +use {Future, Poll, IntoFuture, Async}; + +macro_rules! generate { + ($( + $(#[$doc:meta])* + ($Join:ident, $new:ident, <A, $($B:ident),*>), + )*) => ($( + $(#[$doc])* + #[must_use = "futures do nothing unless polled"] + pub struct $Join<A, $($B),*> + where A: Future, + $($B: Future<Error=A::Error>),* + { + a: MaybeDone<A>, + $($B: MaybeDone<$B>,)* + } + + impl<A, $($B),*> fmt::Debug for $Join<A, $($B),*> + where A: Future + fmt::Debug, + A::Item: fmt::Debug, + $( + $B: Future<Error=A::Error> + fmt::Debug, + $B::Item: fmt::Debug + ),* + { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct(stringify!($Join)) + .field("a", &self.a) + $(.field(stringify!($B), &self.$B))* + .finish() + } + } + + pub fn $new<A, $($B),*>(a: A, $($B: $B),*) -> $Join<A, $($B),*> + where A: Future, + $($B: Future<Error=A::Error>),* + { + $Join { + a: MaybeDone::NotYet(a), + $($B: MaybeDone::NotYet($B)),* + } + } + + impl<A, $($B),*> $Join<A, $($B),*> + where A: Future, + $($B: Future<Error=A::Error>),* + { + fn erase(&mut self) { + self.a = MaybeDone::Gone; + $(self.$B = MaybeDone::Gone;)* + } + } + + impl<A, $($B),*> Future for $Join<A, $($B),*> + where A: Future, + $($B: Future<Error=A::Error>),* + { + type Item = (A::Item, $($B::Item),*); + type Error = A::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let mut all_done = match self.a.poll() { + Ok(done) => done, + Err(e) => { + self.erase(); + return Err(e) + } + }; + $( + all_done = match self.$B.poll() { + Ok(done) => all_done && done, + Err(e) => { + self.erase(); + return Err(e) + } + }; + )* + + if all_done { + Ok(Async::Ready((self.a.take(), $(self.$B.take()),*))) + } else { + Ok(Async::NotReady) + } + } + } + + impl<A, $($B),*> IntoFuture for (A, $($B),*) + where A: IntoFuture, + $( + $B: IntoFuture<Error=A::Error> + ),* + { + type Future = $Join<A::Future, $($B::Future),*>; + type Item = (A::Item, $($B::Item),*); + type Error = A::Error; + + fn into_future(self) -> Self::Future { + match self { + (a, $($B),+) => { + $new( + IntoFuture::into_future(a), + $(IntoFuture::into_future($B)),+ + ) + } + } + } + } + + )*) +} + +generate! { + /// Future for the `join` combinator, waiting for two futures to + /// complete. + /// + /// This is created by the `Future::join` method. + (Join, new, <A, B>), + + /// Future for the `join3` combinator, waiting for three futures to + /// complete. + /// + /// This is created by the `Future::join3` method. + (Join3, new3, <A, B, C>), + + /// Future for the `join4` combinator, waiting for four futures to + /// complete. + /// + /// This is created by the `Future::join4` method. + (Join4, new4, <A, B, C, D>), + + /// Future for the `join5` combinator, waiting for five futures to + /// complete. + /// + /// This is created by the `Future::join5` method. + (Join5, new5, <A, B, C, D, E>), +} + +#[derive(Debug)] +enum MaybeDone<A: Future> { + NotYet(A), + Done(A::Item), + Gone, +} + +impl<A: Future> MaybeDone<A> { + fn poll(&mut self) -> Result<bool, A::Error> { + let res = match *self { + MaybeDone::NotYet(ref mut a) => a.poll()?, + MaybeDone::Done(_) => return Ok(true), + MaybeDone::Gone => panic!("cannot poll Join twice"), + }; + match res { + Async::Ready(res) => { + *self = MaybeDone::Done(res); + Ok(true) + } + Async::NotReady => Ok(false), + } + } + + fn take(&mut self) -> A::Item { + match mem::replace(self, MaybeDone::Gone) { + MaybeDone::Done(a) => a, + _ => panic!(), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/join_all.rs b/third_party/rust/futures-0.1.31/src/future/join_all.rs new file mode 100644 index 0000000000..398a7a4736 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/join_all.rs @@ -0,0 +1,136 @@ +//! Definition of the `JoinAll` combinator, waiting for all of a list of futures +//! to finish. + +use std::prelude::v1::*; + +use std::fmt; +use std::mem; + +use {Future, IntoFuture, Poll, Async}; + +#[derive(Debug)] +enum ElemState<T> where T: Future { + Pending(T), + Done(T::Item), +} + +/// A future which takes a list of futures and resolves with a vector of the +/// completed values. +/// +/// This future is created with the `join_all` method. +#[must_use = "futures do nothing unless polled"] +pub struct JoinAll<I> + where I: IntoIterator, + I::Item: IntoFuture, +{ + elems: Vec<ElemState<<I::Item as IntoFuture>::Future>>, +} + +impl<I> fmt::Debug for JoinAll<I> + where I: IntoIterator, + I::Item: IntoFuture, + <<I as IntoIterator>::Item as IntoFuture>::Future: fmt::Debug, + <<I as IntoIterator>::Item as IntoFuture>::Item: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("JoinAll") + .field("elems", &self.elems) + .finish() + } +} + +/// Creates a future which represents a collection of the results of the futures +/// given. +/// +/// The returned future will drive execution for all of its underlying futures, +/// collecting the results into a destination `Vec<T>` in the same order as they +/// were provided. If any future returns an error then all other futures will be +/// canceled and an error will be returned immediately. If all futures complete +/// successfully, however, then the returned future will succeed with a `Vec` of +/// all the successful results. +/// +/// # Examples +/// +/// ``` +/// use futures::future::*; +/// +/// let f = join_all(vec![ +/// ok::<u32, u32>(1), +/// ok::<u32, u32>(2), +/// ok::<u32, u32>(3), +/// ]); +/// let f = f.map(|x| { +/// assert_eq!(x, [1, 2, 3]); +/// }); +/// +/// let f = join_all(vec![ +/// Box::new(ok::<u32, u32>(1)), +/// Box::new(err::<u32, u32>(2)), +/// Box::new(ok::<u32, u32>(3)), +/// ]); +/// let f = f.then(|x| { +/// assert_eq!(x, Err(2)); +/// x +/// }); +/// ``` +pub fn join_all<I>(i: I) -> JoinAll<I> + where I: IntoIterator, + I::Item: IntoFuture, +{ + let elems = i.into_iter().map(|f| { + ElemState::Pending(f.into_future()) + }).collect(); + JoinAll { elems: elems } +} + +impl<I> Future for JoinAll<I> + where I: IntoIterator, + I::Item: IntoFuture, +{ + type Item = Vec<<I::Item as IntoFuture>::Item>; + type Error = <I::Item as IntoFuture>::Error; + + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let mut all_done = true; + + for idx in 0 .. self.elems.len() { + let done_val = match self.elems[idx] { + ElemState::Pending(ref mut t) => { + match t.poll() { + Ok(Async::Ready(v)) => Ok(v), + Ok(Async::NotReady) => { + all_done = false; + continue + } + Err(e) => Err(e), + } + } + ElemState::Done(ref mut _v) => continue, + }; + + match done_val { + Ok(v) => self.elems[idx] = ElemState::Done(v), + Err(e) => { + // On completion drop all our associated resources + // ASAP. + self.elems = Vec::new(); + return Err(e) + } + } + } + + if all_done { + let elems = mem::replace(&mut self.elems, Vec::new()); + let result = elems.into_iter().map(|e| { + match e { + ElemState::Done(t) => t, + _ => unreachable!(), + } + }).collect(); + Ok(Async::Ready(result)) + } else { + Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/lazy.rs b/third_party/rust/futures-0.1.31/src/future/lazy.rs new file mode 100644 index 0000000000..2f310337b6 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/lazy.rs @@ -0,0 +1,84 @@ +//! Definition of the Lazy combinator, deferring execution of a function until +//! the future is polled. + +use core::mem; + +use {Future, IntoFuture, Poll}; + +/// A future which defers creation of the actual future until a callback is +/// scheduled. +/// +/// This is created by the `lazy` function. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Lazy<F, R: IntoFuture> { + inner: _Lazy<F, R::Future>, +} + +#[derive(Debug)] +enum _Lazy<F, R> { + First(F), + Second(R), + Moved, +} + +/// Creates a new future which will eventually be the same as the one created +/// by the closure provided. +/// +/// The provided closure is only run once the future has a callback scheduled +/// on it, otherwise the callback never runs. Once run, however, this future is +/// the same as the one the closure creates. +/// +/// # Examples +/// +/// ``` +/// use futures::future::*; +/// +/// let a = lazy(|| ok::<u32, u32>(1)); +/// +/// let b = lazy(|| -> FutureResult<u32, u32> { +/// panic!("oh no!") +/// }); +/// drop(b); // closure is never run +/// ``` +pub fn lazy<F, R>(f: F) -> Lazy<F, R> + where F: FnOnce() -> R, + R: IntoFuture +{ + Lazy { + inner: _Lazy::First(f), + } +} + +impl<F, R> Lazy<F, R> + where F: FnOnce() -> R, + R: IntoFuture, +{ + fn get(&mut self) -> &mut R::Future { + match self.inner { + _Lazy::First(_) => {} + _Lazy::Second(ref mut f) => return f, + _Lazy::Moved => panic!(), // can only happen if `f()` panics + } + match mem::replace(&mut self.inner, _Lazy::Moved) { + _Lazy::First(f) => self.inner = _Lazy::Second(f().into_future()), + _ => panic!(), // we already found First + } + match self.inner { + _Lazy::Second(ref mut f) => f, + _ => panic!(), // we just stored Second + } + } +} + +impl<F, R> Future for Lazy<F, R> + where F: FnOnce() -> R, + R: IntoFuture, +{ + type Item = R::Item; + type Error = R::Error; + + fn poll(&mut self) -> Poll<R::Item, R::Error> { + self.get().poll() + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/loop_fn.rs b/third_party/rust/futures-0.1.31/src/future/loop_fn.rs new file mode 100644 index 0000000000..299a0383c2 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/loop_fn.rs @@ -0,0 +1,99 @@ +//! Definition of the `LoopFn` combinator, implementing `Future` loops. + +use {Async, Future, IntoFuture, Poll}; + +/// The status of a `loop_fn` loop. +#[derive(Debug)] +pub enum Loop<T, S> { + /// Indicates that the loop has completed with output `T`. + Break(T), + + /// Indicates that the loop function should be called again with input + /// state `S`. + Continue(S), +} + +/// A future implementing a tail-recursive loop. +/// +/// Created by the `loop_fn` function. +#[derive(Debug)] +pub struct LoopFn<A, F> where A: IntoFuture { + future: A::Future, + func: F, +} + +/// Creates a new future implementing a tail-recursive loop. +/// +/// The loop function is immediately called with `initial_state` and should +/// return a value that can be converted to a future. On successful completion, +/// this future should output a `Loop<T, S>` to indicate the status of the +/// loop. +/// +/// `Loop::Break(T)` halts the loop and completes the future with output `T`. +/// +/// `Loop::Continue(S)` reinvokes the loop function with state `S`. The returned +/// future will be subsequently polled for a new `Loop<T, S>` value. +/// +/// # Examples +/// +/// ``` +/// use futures::future::{ok, loop_fn, Future, FutureResult, Loop}; +/// use std::io::Error; +/// +/// struct Client { +/// ping_count: u8, +/// } +/// +/// impl Client { +/// fn new() -> Self { +/// Client { ping_count: 0 } +/// } +/// +/// fn send_ping(self) -> FutureResult<Self, Error> { +/// ok(Client { ping_count: self.ping_count + 1 }) +/// } +/// +/// fn receive_pong(self) -> FutureResult<(Self, bool), Error> { +/// let done = self.ping_count >= 5; +/// ok((self, done)) +/// } +/// } +/// +/// let ping_til_done = loop_fn(Client::new(), |client| { +/// client.send_ping() +/// .and_then(|client| client.receive_pong()) +/// .and_then(|(client, done)| { +/// if done { +/// Ok(Loop::Break(client)) +/// } else { +/// Ok(Loop::Continue(client)) +/// } +/// }) +/// }); +/// ``` +pub fn loop_fn<S, T, A, F>(initial_state: S, mut func: F) -> LoopFn<A, F> + where F: FnMut(S) -> A, + A: IntoFuture<Item = Loop<T, S>>, +{ + LoopFn { + future: func(initial_state).into_future(), + func: func, + } +} + +impl<S, T, A, F> Future for LoopFn<A, F> + where F: FnMut(S) -> A, + A: IntoFuture<Item = Loop<T, S>>, +{ + type Item = T; + type Error = A::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + loop { + match try_ready!(self.future.poll()) { + Loop::Break(x) => return Ok(Async::Ready(x)), + Loop::Continue(s) => self.future = (self.func)(s).into_future(), + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/map.rs b/third_party/rust/futures-0.1.31/src/future/map.rs new file mode 100644 index 0000000000..4b1f4cd7d4 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/map.rs @@ -0,0 +1,38 @@ +use {Future, Poll, Async}; + +/// Future for the `map` combinator, changing the type of a future. +/// +/// This is created by the `Future::map` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Map<A, F> where A: Future { + future: A, + f: Option<F>, +} + +pub fn new<A, F>(future: A, f: F) -> Map<A, F> + where A: Future, +{ + Map { + future: future, + f: Some(f), + } +} + +impl<U, A, F> Future for Map<A, F> + where A: Future, + F: FnOnce(A::Item) -> U, +{ + type Item = U; + type Error = A::Error; + + fn poll(&mut self) -> Poll<U, A::Error> { + let e = match self.future.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(e)) => Ok(e), + Err(e) => Err(e), + }; + e.map(self.f.take().expect("cannot poll Map twice")) + .map(Async::Ready) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/map_err.rs b/third_party/rust/futures-0.1.31/src/future/map_err.rs new file mode 100644 index 0000000000..4ea12f4586 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/map_err.rs @@ -0,0 +1,36 @@ +use {Future, Poll, Async}; + +/// Future for the `map_err` combinator, changing the error type of a future. +/// +/// This is created by the `Future::map_err` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct MapErr<A, F> where A: Future { + future: A, + f: Option<F>, +} + +pub fn new<A, F>(future: A, f: F) -> MapErr<A, F> + where A: Future +{ + MapErr { + future: future, + f: Some(f), + } +} + +impl<U, A, F> Future for MapErr<A, F> + where A: Future, + F: FnOnce(A::Error) -> U, +{ + type Item = A::Item; + type Error = U; + + fn poll(&mut self) -> Poll<A::Item, U> { + let e = match self.future.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + other => other, + }; + e.map_err(self.f.take().expect("cannot poll MapErr twice")) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/mod.rs b/third_party/rust/futures-0.1.31/src/future/mod.rs new file mode 100644 index 0000000000..9867765902 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/mod.rs @@ -0,0 +1,1171 @@ +//! Futures +//! +//! This module contains the `Future` trait and a number of adaptors for this +//! trait. See the crate docs, and the docs for `Future`, for full detail. + +use core::fmt; +use core::result; + +// Primitive futures +mod empty; +mod lazy; +mod poll_fn; +#[path = "result.rs"] +mod result_; +mod loop_fn; +mod option; +pub use self::empty::{empty, Empty}; +pub use self::lazy::{lazy, Lazy}; +pub use self::poll_fn::{poll_fn, PollFn}; +pub use self::result_::{result, ok, err, FutureResult}; +pub use self::loop_fn::{loop_fn, Loop, LoopFn}; + +#[doc(hidden)] +#[deprecated(since = "0.1.4", note = "use `ok` instead")] +#[cfg(feature = "with-deprecated")] +pub use self::{ok as finished, Ok as Finished}; +#[doc(hidden)] +#[deprecated(since = "0.1.4", note = "use `err` instead")] +#[cfg(feature = "with-deprecated")] +pub use self::{err as failed, Err as Failed}; +#[doc(hidden)] +#[deprecated(since = "0.1.4", note = "use `result` instead")] +#[cfg(feature = "with-deprecated")] +pub use self::{result as done, FutureResult as Done}; +#[doc(hidden)] +#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")] +#[cfg(feature = "with-deprecated")] +pub use self::{FutureResult as Ok}; +#[doc(hidden)] +#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")] +#[cfg(feature = "with-deprecated")] +pub use self::{FutureResult as Err}; + +// combinators +mod and_then; +mod flatten; +mod flatten_stream; +mod fuse; +mod into_stream; +mod join; +mod map; +mod map_err; +mod from_err; +mod or_else; +mod select; +mod select2; +mod then; +mod either; +mod inspect; + +// impl details +mod chain; + +pub use self::and_then::AndThen; +pub use self::flatten::Flatten; +pub use self::flatten_stream::FlattenStream; +pub use self::fuse::Fuse; +pub use self::into_stream::IntoStream; +pub use self::join::{Join, Join3, Join4, Join5}; +pub use self::map::Map; +pub use self::map_err::MapErr; +pub use self::from_err::FromErr; +pub use self::or_else::OrElse; +pub use self::select::{Select, SelectNext}; +pub use self::select2::Select2; +pub use self::then::Then; +pub use self::either::Either; +pub use self::inspect::Inspect; + +if_std! { + mod catch_unwind; + mod join_all; + mod select_all; + mod select_ok; + mod shared; + pub use self::catch_unwind::CatchUnwind; + pub use self::join_all::{join_all, JoinAll}; + pub use self::select_all::{SelectAll, SelectAllNext, select_all}; + pub use self::select_ok::{SelectOk, select_ok}; + pub use self::shared::{Shared, SharedItem, SharedError}; + + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "use join_all instead")] + #[cfg(feature = "with-deprecated")] + pub use self::join_all::join_all as collect; + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "use JoinAll instead")] + #[cfg(feature = "with-deprecated")] + pub use self::join_all::JoinAll as Collect; + + /// A type alias for `Box<Future + Send>` + #[doc(hidden)] + #[deprecated(note = "removed without replacement, recommended to use a \ + local extension trait or function if needed, more \ + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] + pub type BoxFuture<T, E> = ::std::boxed::Box<Future<Item = T, Error = E> + Send>; + + impl<F: ?Sized + Future> Future for ::std::boxed::Box<F> { + type Item = F::Item; + type Error = F::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + (**self).poll() + } + } +} + +use {Poll, stream}; + +/// Trait for types which are a placeholder of a value that may become +/// available at some later point in time. +/// +/// In addition to the documentation here you can also find more information +/// about futures [online] at [https://tokio.rs](https://tokio.rs) +/// +/// [online]: https://tokio.rs/docs/getting-started/futures/ +/// +/// Futures are used to provide a sentinel through which a value can be +/// referenced. They crucially allow chaining and composing operations through +/// consumption which allows expressing entire trees of computation as one +/// sentinel value. +/// +/// The ergonomics and implementation of the `Future` trait are very similar to +/// the `Iterator` trait in that there is just one methods you need +/// to implement, but you get a whole lot of others for free as a result. +/// +/// # The `poll` method +/// +/// The core method of future, `poll`, is used to attempt to generate the value +/// of a `Future`. This method *does not block* but is allowed to inform the +/// caller that the value is not ready yet. Implementations of `poll` may +/// themselves do work to generate the value, but it's guaranteed that this will +/// never block the calling thread. +/// +/// A key aspect of this method is that if the value is not yet available the +/// current task is scheduled to receive a notification when it's later ready to +/// be made available. This follows what's typically known as a "readiness" or +/// "pull" model where values are pulled out of futures on demand, and +/// otherwise a task is notified when a value might be ready to get pulled out. +/// +/// The `poll` method is not intended to be called in general, but rather is +/// typically called in the context of a "task" which drives a future to +/// completion. For more information on this see the `task` module. +/// +/// More information about the details of `poll` and the nitty-gritty of tasks +/// can be [found online at tokio.rs][poll-dox]. +/// +/// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/ +/// +/// # Combinators +/// +/// Like iterators, futures provide a large number of combinators to work with +/// futures to express computations in a much more natural method than +/// scheduling a number of callbacks. For example the `map` method can change +/// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could +/// create a future after the first one is done and only be resolved when the +/// second is done. +/// +/// Combinators act very similarly to the methods on the `Iterator` trait itself +/// or those on `Option` and `Result`. Like with iterators, the combinators are +/// zero-cost and don't impose any extra layers of indirection you wouldn't +/// otherwise have to write down. +/// +/// More information about combinators can be found [on tokio.rs]. +/// +/// [on tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-mechanics/ +#[must_use = "futures do nothing unless polled"] +pub trait Future { + /// The type of value that this future will resolved with if it is + /// successful. + type Item; + + /// The type of error that this future will resolve with if it fails in a + /// normal fashion. + type Error; + + /// Query this future to see if its value has become available, registering + /// interest if it is not. + /// + /// This function will check the internal state of the future and assess + /// whether the value is ready to be produced. Implementers of this function + /// should ensure that a call to this **never blocks** as event loops may + /// not work properly otherwise. + /// + /// When a future is not ready yet, the `Async::NotReady` value will be + /// returned. In this situation the future will *also* register interest of + /// the current task in the value being produced. This is done by calling + /// `task::park` to retrieve a handle to the current `Task`. When the future + /// is then ready to make progress (e.g. it should be `poll`ed again) the + /// `unpark` method is called on the `Task`. + /// + /// More information about the details of `poll` and the nitty-gritty of + /// tasks can be [found online at tokio.rs][poll-dox]. + /// + /// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/ + /// + /// # Runtime characteristics + /// + /// This function, `poll`, is the primary method for 'making progress' + /// within a tree of futures. For example this method will be called + /// repeatedly as the internal state machine makes its various transitions. + /// Executors are responsible for ensuring that this function is called in + /// the right location (e.g. always on an I/O thread or not). Unless it is + /// otherwise arranged to be so, it should be ensured that **implementations + /// of this function finish very quickly**. + /// + /// Returning quickly prevents unnecessarily clogging up threads and/or + /// event loops while a `poll` function call, for example, takes up compute + /// resources to perform some expensive computation. If it is known ahead + /// of time that a call to `poll` may end up taking awhile, the work should + /// be offloaded to a thread pool (or something similar) to ensure that + /// `poll` can return quickly. + /// + /// Note that the `poll` function is not called repeatedly in a loop for + /// futures typically, but only whenever the future itself is ready. If + /// you're familiar with the `poll(2)` or `select(2)` syscalls on Unix + /// it's worth noting that futures typically do *not* suffer the same + /// problems of "all wakeups must poll all events". Futures have enough + /// support for only polling futures which cause a wakeup. + /// + /// # Return value + /// + /// This function returns `Async::NotReady` if the future is not ready yet, + /// `Err` if the future is finished but resolved to an error, or + /// `Async::Ready` with the result of this future if it's finished + /// successfully. Once a future has finished it is considered a contract + /// error to continue polling the future. + /// + /// If `NotReady` is returned, then the future will internally register + /// interest in the value being produced for the current task (through + /// `task::park`). In other words, the current task will receive a + /// notification (through the `unpark` method) once the value is ready to be + /// produced or the future can make progress. + /// + /// Note that if `NotReady` is returned it only means that *this* task will + /// receive a notification. Historical calls to `poll` with different tasks + /// will not receive notifications. In other words, implementers of the + /// `Future` trait need not store a queue of tasks to notify, but only the + /// last task that called this method. Alternatively callers of this method + /// can only rely on the most recent task which call `poll` being notified + /// when a future is ready. + /// + /// # Panics + /// + /// Once a future has completed (returned `Ready` or `Err` from `poll`), + /// then any future calls to `poll` may panic, block forever, or otherwise + /// cause wrong behavior. The `Future` trait itself provides no guarantees + /// about the behavior of `poll` after a future has completed. + /// + /// Callers who may call `poll` too many times may want to consider using + /// the `fuse` adaptor which defines the behavior of `poll`, but comes with + /// a little bit of extra cost. + /// + /// Additionally, calls to `poll` must always be made from within the + /// context of a task. If a current task is not set then this method will + /// likely panic. + /// + /// # Errors + /// + /// This future may have failed to finish the computation, in which case + /// the `Err` variant will be returned with an appropriate payload of an + /// error. + fn poll(&mut self) -> Poll<Self::Item, Self::Error>; + + /// Block the current thread until this future is resolved. + /// + /// This method will consume ownership of this future, driving it to + /// completion via `poll` and blocking the current thread while it's waiting + /// for the value to become available. Once the future is resolved the + /// result of this future is returned. + /// + /// > **Note:** This method is not appropriate to call on event loops or + /// > similar I/O situations because it will prevent the event + /// > loop from making progress (this blocks the thread). This + /// > method should only be called when it's guaranteed that the + /// > blocking work associated with this future will be completed + /// > by another thread. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Panics + /// + /// This function does not attempt to catch panics. If the `poll` function + /// of this future panics, panics will be propagated to the caller. + #[cfg(feature = "use_std")] + fn wait(self) -> result::Result<Self::Item, Self::Error> + where Self: Sized + { + ::executor::spawn(self).wait_future() + } + + /// Convenience function for turning this future into a trait object which + /// is also `Send`. + /// + /// This simply avoids the need to write `Box::new` and can often help with + /// type inference as well by always returning a trait object. Note that + /// this method requires the `Send` bound and returns a `BoxFuture`, which + /// also encodes this. If you'd like to create a `Box<Future>` without the + /// `Send` bound, then the `Box::new` function can be used instead. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future::{BoxFuture, result}; + /// + /// let a: BoxFuture<i32, i32> = result(Ok(1)).boxed(); + /// ``` + #[cfg(feature = "use_std")] + #[doc(hidden)] + #[deprecated(note = "removed without replacement, recommended to use a \ + local extension trait or function if needed, more \ + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] + #[allow(deprecated)] + fn boxed(self) -> BoxFuture<Self::Item, Self::Error> + where Self: Sized + Send + 'static + { + ::std::boxed::Box::new(self) + } + + /// Map this future's result to a different type, returning a new future of + /// the resulting type. + /// + /// This function is similar to the `Option::map` or `Iterator::map` where + /// it will change the type of the underlying future. This is useful to + /// chain along a computation once a future has been resolved. + /// + /// The closure provided will only be called if this future is resolved + /// successfully. If this future returns an error, panics, or is dropped, + /// then the closure provided will never be invoked. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it, similar to the existing `map` methods in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future = future::ok::<u32, u32>(1); + /// let new_future = future.map(|x| x + 3); + /// assert_eq!(new_future.wait(), Ok(4)); + /// ``` + /// + /// Calling `map` on an errored `Future` has no effect: + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future = future::err::<u32, u32>(1); + /// let new_future = future.map(|x| x + 3); + /// assert_eq!(new_future.wait(), Err(1)); + /// ``` + fn map<F, U>(self, f: F) -> Map<Self, F> + where F: FnOnce(Self::Item) -> U, + Self: Sized, + { + assert_future::<U, Self::Error, _>(map::new(self, f)) + } + + /// Map this future's error to a different error, returning a new future. + /// + /// This function is similar to the `Result::map_err` where it will change + /// the error type of the underlying future. This is useful for example to + /// ensure that futures have the same error type when used with combinators + /// like `select` and `join`. + /// + /// The closure provided will only be called if this future is resolved + /// with an error. If this future returns a success, panics, or is + /// dropped, then the closure provided will never be invoked. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::future::*; + /// + /// let future = err::<u32, u32>(1); + /// let new_future = future.map_err(|x| x + 3); + /// assert_eq!(new_future.wait(), Err(4)); + /// ``` + /// + /// Calling `map_err` on a successful `Future` has no effect: + /// + /// ``` + /// use futures::future::*; + /// + /// let future = ok::<u32, u32>(1); + /// let new_future = future.map_err(|x| x + 3); + /// assert_eq!(new_future.wait(), Ok(1)); + /// ``` + fn map_err<F, E>(self, f: F) -> MapErr<Self, F> + where F: FnOnce(Self::Error) -> E, + Self: Sized, + { + assert_future::<Self::Item, E, _>(map_err::new(self, f)) + } + + + + /// Map this future's error to any error implementing `From` for + /// this future's `Error`, returning a new future. + /// + /// This function does for futures what `try!` does for `Result`, + /// by letting the compiler infer the type of the resulting error. + /// Just as `map_err` above, this is useful for example to ensure + /// that futures have the same error type when used with + /// combinators like `select` and `join`. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future_with_err_u8 = future::err::<(), u8>(1); + /// let future_with_err_u32 = future_with_err_u8.from_err::<u32>(); + /// ``` + fn from_err<E:From<Self::Error>>(self) -> FromErr<Self, E> + where Self: Sized, + { + assert_future::<Self::Item, E, _>(from_err::new(self)) + } + + /// Chain on a computation for when a future finished, passing the result of + /// the future to the provided closure `f`. + /// + /// This function can be used to ensure a computation runs regardless of + /// the conclusion of the future. The closure provided will be yielded a + /// `Result` once the future is complete. + /// + /// The returned value of the closure must implement the `IntoFuture` trait + /// and can represent some more work to be done before the composed future + /// is finished. Note that the `Result` type implements the `IntoFuture` + /// trait so it is possible to simply alter the `Result` yielded to the + /// closure and return it. + /// + /// If this future is dropped or panics then the closure `f` will not be + /// run. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future_of_1 = future::ok::<u32, u32>(1); + /// let future_of_4 = future_of_1.then(|x| { + /// x.map(|y| y + 3) + /// }); + /// + /// let future_of_err_1 = future::err::<u32, u32>(1); + /// let future_of_4 = future_of_err_1.then(|x| { + /// match x { + /// Ok(_) => panic!("expected an error"), + /// Err(y) => future::ok::<u32, u32>(y + 3), + /// } + /// }); + /// ``` + fn then<F, B>(self, f: F) -> Then<Self, B, F> + where F: FnOnce(result::Result<Self::Item, Self::Error>) -> B, + B: IntoFuture, + Self: Sized, + { + assert_future::<B::Item, B::Error, _>(then::new(self, f)) + } + + /// Execute another future after this one has resolved successfully. + /// + /// This function can be used to chain two futures together and ensure that + /// the final future isn't resolved until both have finished. The closure + /// provided is yielded the successful result of this future and returns + /// another value which can be converted into a future. + /// + /// Note that because `Result` implements the `IntoFuture` trait this method + /// can also be useful for chaining fallible and serial computations onto + /// the end of one future. + /// + /// If this future is dropped, panics, or completes with an error then the + /// provided closure `f` is never called. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future::{self, FutureResult}; + /// + /// let future_of_1 = future::ok::<u32, u32>(1); + /// let future_of_4 = future_of_1.and_then(|x| { + /// Ok(x + 3) + /// }); + /// + /// let future_of_err_1 = future::err::<u32, u32>(1); + /// future_of_err_1.and_then(|_| -> FutureResult<u32, u32> { + /// panic!("should not be called in case of an error"); + /// }); + /// ``` + fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F> + where F: FnOnce(Self::Item) -> B, + B: IntoFuture<Error = Self::Error>, + Self: Sized, + { + assert_future::<B::Item, Self::Error, _>(and_then::new(self, f)) + } + + /// Execute another future if this one resolves with an error. + /// + /// Return a future that passes along this future's value if it succeeds, + /// and otherwise passes the error to the closure `f` and waits for the + /// future it returns. The closure may also simply return a value that can + /// be converted into a future. + /// + /// Note that because `Result` implements the `IntoFuture` trait this method + /// can also be useful for chaining together fallback computations, where + /// when one fails, the next is attempted. + /// + /// If this future is dropped, panics, or completes successfully then the + /// provided closure `f` is never called. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future::{self, FutureResult}; + /// + /// let future_of_err_1 = future::err::<u32, u32>(1); + /// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> { + /// Ok(x + 3) + /// }); + /// + /// let future_of_1 = future::ok::<u32, u32>(1); + /// future_of_1.or_else(|_| -> FutureResult<u32, u32> { + /// panic!("should not be called in case of success"); + /// }); + /// ``` + fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F> + where F: FnOnce(Self::Error) -> B, + B: IntoFuture<Item = Self::Item>, + Self: Sized, + { + assert_future::<Self::Item, B::Error, _>(or_else::new(self, f)) + } + + /// Waits for either one of two futures to complete. + /// + /// This function will return a new future which awaits for either this or + /// the `other` future to complete. The returned future will finish with + /// both the value resolved and a future representing the completion of the + /// other work. Both futures must have the same item and error type. + /// + /// Note that this function consumes the receiving futures and returns a + /// wrapped version of them. + /// + /// # Examples + /// + /// ```no_run + /// use futures::prelude::*; + /// use futures::future; + /// use std::thread; + /// use std::time; + /// + /// let future1 = future::lazy(|| { + /// thread::sleep(time::Duration::from_secs(5)); + /// future::ok::<char, ()>('a') + /// }); + /// + /// let future2 = future::lazy(|| { + /// thread::sleep(time::Duration::from_secs(3)); + /// future::ok::<char, ()>('b') + /// }); + /// + /// let (value, last_future) = future1.select(future2).wait().ok().unwrap(); + /// assert_eq!(value, 'a'); + /// assert_eq!(last_future.wait().unwrap(), 'b'); + /// ``` + /// + /// A poor-man's `join` implemented on top of `select`: + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// fn join<A>(a: A, b: A) -> Box<Future<Item=(u32, u32), Error=u32>> + /// where A: Future<Item = u32, Error = u32> + 'static, + /// { + /// Box::new(a.select(b).then(|res| -> Box<Future<Item=_, Error=_>> { + /// match res { + /// Ok((a, b)) => Box::new(b.map(move |b| (a, b))), + /// Err((a, _)) => Box::new(future::err(a)), + /// } + /// })) + /// } + /// ``` + fn select<B>(self, other: B) -> Select<Self, B::Future> + where B: IntoFuture<Item=Self::Item, Error=Self::Error>, + Self: Sized, + { + let f = select::new(self, other.into_future()); + assert_future::<(Self::Item, SelectNext<Self, B::Future>), + (Self::Error, SelectNext<Self, B::Future>), _>(f) + } + + /// Waits for either one of two differently-typed futures to complete. + /// + /// This function will return a new future which awaits for either this or + /// the `other` future to complete. The returned future will finish with + /// both the value resolved and a future representing the completion of the + /// other work. + /// + /// Note that this function consumes the receiving futures and returns a + /// wrapped version of them. + /// + /// Also note that if both this and the second future have the same + /// success/error type you can use the `Either::split` method to + /// conveniently extract out the value at the end. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future::{self, Either}; + /// + /// // A poor-man's join implemented on top of select2 + /// + /// fn join<A, B, E>(a: A, b: B) -> Box<Future<Item=(A::Item, B::Item), Error=E>> + /// where A: Future<Error = E> + 'static, + /// B: Future<Error = E> + 'static, + /// E: 'static, + /// { + /// Box::new(a.select2(b).then(|res| -> Box<Future<Item=_, Error=_>> { + /// match res { + /// Ok(Either::A((x, b))) => Box::new(b.map(move |y| (x, y))), + /// Ok(Either::B((y, a))) => Box::new(a.map(move |x| (x, y))), + /// Err(Either::A((e, _))) => Box::new(future::err(e)), + /// Err(Either::B((e, _))) => Box::new(future::err(e)), + /// } + /// })) + /// } + /// ``` + fn select2<B>(self, other: B) -> Select2<Self, B::Future> + where B: IntoFuture, Self: Sized + { + select2::new(self, other.into_future()) + } + + /// Joins the result of two futures, waiting for them both to complete. + /// + /// This function will return a new future which awaits both this and the + /// `other` future to complete. The returned future will finish with a tuple + /// of both results. + /// + /// Both futures must have the same error type, and if either finishes with + /// an error then the other will be dropped and that error will be + /// returned. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let a = future::ok::<u32, u32>(1); + /// let b = future::ok::<u32, u32>(2); + /// let pair = a.join(b); + /// + /// assert_eq!(pair.wait(), Ok((1, 2))); + /// ``` + /// + /// If one or both of the joined `Future`s is errored, the resulting + /// `Future` will be errored: + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let a = future::ok::<u32, u32>(1); + /// let b = future::err::<u32, u32>(2); + /// let pair = a.join(b); + /// + /// assert_eq!(pair.wait(), Err(2)); + /// ``` + fn join<B>(self, other: B) -> Join<Self, B::Future> + where B: IntoFuture<Error=Self::Error>, + Self: Sized, + { + let f = join::new(self, other.into_future()); + assert_future::<(Self::Item, B::Item), Self::Error, _>(f) + } + + /// Same as `join`, but with more futures. + fn join3<B, C>(self, b: B, c: C) -> Join3<Self, B::Future, C::Future> + where B: IntoFuture<Error=Self::Error>, + C: IntoFuture<Error=Self::Error>, + Self: Sized, + { + join::new3(self, b.into_future(), c.into_future()) + } + + /// Same as `join`, but with more futures. + fn join4<B, C, D>(self, b: B, c: C, d: D) + -> Join4<Self, B::Future, C::Future, D::Future> + where B: IntoFuture<Error=Self::Error>, + C: IntoFuture<Error=Self::Error>, + D: IntoFuture<Error=Self::Error>, + Self: Sized, + { + join::new4(self, b.into_future(), c.into_future(), d.into_future()) + } + + /// Same as `join`, but with more futures. + fn join5<B, C, D, E>(self, b: B, c: C, d: D, e: E) + -> Join5<Self, B::Future, C::Future, D::Future, E::Future> + where B: IntoFuture<Error=Self::Error>, + C: IntoFuture<Error=Self::Error>, + D: IntoFuture<Error=Self::Error>, + E: IntoFuture<Error=Self::Error>, + Self: Sized, + { + join::new5(self, b.into_future(), c.into_future(), d.into_future(), + e.into_future()) + } + + /// Convert this future into a single element stream. + /// + /// The returned stream contains single success if this future resolves to + /// success or single error if this future resolves into error. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future = future::ok::<_, bool>(17); + /// let mut stream = future.into_stream(); + /// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll()); + /// assert_eq!(Ok(Async::Ready(None)), stream.poll()); + /// + /// let future = future::err::<bool, _>(19); + /// let mut stream = future.into_stream(); + /// assert_eq!(Err(19), stream.poll()); + /// assert_eq!(Ok(Async::Ready(None)), stream.poll()); + /// ``` + fn into_stream(self) -> IntoStream<Self> + where Self: Sized + { + into_stream::new(self) + } + + /// Flatten the execution of this future when the successful result of this + /// future is itself another future. + /// + /// This can be useful when combining futures together to flatten the + /// computation out the final result. This method can only be called + /// when the successful result of this future itself implements the + /// `IntoFuture` trait and the error can be created from this future's error + /// type. + /// + /// This method is roughly equivalent to `self.and_then(|x| x)`. + /// + /// Note that this function consumes the receiving future and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let nested_future = future::ok::<_, u32>(future::ok::<u32, u32>(1)); + /// let future = nested_future.flatten(); + /// assert_eq!(future.wait(), Ok(1)); + /// ``` + /// + /// Calling `flatten` on an errored `Future`, or if the inner `Future` is + /// errored, will result in an errored `Future`: + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let nested_future = future::ok::<_, u32>(future::err::<u32, u32>(1)); + /// let future = nested_future.flatten(); + /// assert_eq!(future.wait(), Err(1)); + /// ``` + fn flatten(self) -> Flatten<Self> + where Self::Item: IntoFuture, + <<Self as Future>::Item as IntoFuture>::Error: + From<<Self as Future>::Error>, + Self: Sized + { + let f = flatten::new(self); + assert_future::<<<Self as Future>::Item as IntoFuture>::Item, + <<Self as Future>::Item as IntoFuture>::Error, + _>(f) + } + + /// Flatten the execution of this future when the successful result of this + /// future is a stream. + /// + /// This can be useful when stream initialization is deferred, and it is + /// convenient to work with that stream as if stream was available at the + /// call site. + /// + /// Note that this function consumes this future and returns a wrapped + /// version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// use futures::stream; + /// + /// let stream_items = vec![17, 18, 19]; + /// let future_of_a_stream = future::ok::<_, bool>(stream::iter_ok(stream_items)); + /// + /// let stream = future_of_a_stream.flatten_stream(); + /// + /// let mut iter = stream.wait(); + /// assert_eq!(Ok(17), iter.next().unwrap()); + /// assert_eq!(Ok(18), iter.next().unwrap()); + /// assert_eq!(Ok(19), iter.next().unwrap()); + /// assert_eq!(None, iter.next()); + /// ``` + fn flatten_stream(self) -> FlattenStream<Self> + where <Self as Future>::Item: stream::Stream<Error=Self::Error>, + Self: Sized + { + flatten_stream::new(self) + } + + /// Fuse a future such that `poll` will never again be called once it has + /// completed. + /// + /// Currently once a future has returned `Ready` or `Err` from + /// `poll` any further calls could exhibit bad behavior such as blocking + /// forever, panicking, never returning, etc. If it is known that `poll` + /// may be called too often then this method can be used to ensure that it + /// has defined semantics. + /// + /// Once a future has been `fuse`d and it returns a completion from `poll`, + /// then it will forever return `NotReady` from `poll` again (never + /// resolve). This, unlike the trait's `poll` method, is guaranteed. + /// + /// This combinator will drop this future as soon as it's been completed to + /// ensure resources are reclaimed as soon as possible. + /// + /// # Examples + /// + /// ```rust + /// use futures::prelude::*; + /// use futures::future; + /// + /// let mut future = future::ok::<i32, u32>(2); + /// assert_eq!(future.poll(), Ok(Async::Ready(2))); + /// + /// // Normally, a call such as this would panic: + /// //future.poll(); + /// + /// // This, however, is guaranteed to not panic + /// let mut future = future::ok::<i32, u32>(2).fuse(); + /// assert_eq!(future.poll(), Ok(Async::Ready(2))); + /// assert_eq!(future.poll(), Ok(Async::NotReady)); + /// ``` + fn fuse(self) -> Fuse<Self> + where Self: Sized + { + let f = fuse::new(self); + assert_future::<Self::Item, Self::Error, _>(f) + } + + /// Do something with the item of a future, passing it on. + /// + /// When using futures, you'll often chain several of them together. + /// While working on such code, you might want to check out what's happening at + /// various parts in the pipeline. To do that, insert a call to inspect(). + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future = future::ok::<u32, u32>(1); + /// let new_future = future.inspect(|&x| println!("about to resolve: {}", x)); + /// assert_eq!(new_future.wait(), Ok(1)); + /// ``` + fn inspect<F>(self, f: F) -> Inspect<Self, F> + where F: FnOnce(&Self::Item) -> (), + Self: Sized, + { + assert_future::<Self::Item, Self::Error, _>(inspect::new(self, f)) + } + + /// Catches unwinding panics while polling the future. + /// + /// In general, panics within a future can propagate all the way out to the + /// task level. This combinator makes it possible to halt unwinding within + /// the future itself. It's most commonly used within task executors. It's + /// not recommended to use this for error handling. + /// + /// Note that this method requires the `UnwindSafe` bound from the standard + /// library. This isn't always applied automatically, and the standard + /// library provides an `AssertUnwindSafe` wrapper type to apply it + /// after-the fact. To assist using this method, the `Future` trait is also + /// implemented for `AssertUnwindSafe<F>` where `F` implements `Future`. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Examples + /// + /// ```rust + /// use futures::prelude::*; + /// use futures::future::{self, FutureResult}; + /// + /// let mut future = future::ok::<i32, u32>(2); + /// assert!(future.catch_unwind().wait().is_ok()); + /// + /// let mut future = future::lazy(|| -> FutureResult<i32, u32> { + /// panic!(); + /// future::ok::<i32, u32>(2) + /// }); + /// assert!(future.catch_unwind().wait().is_err()); + /// ``` + #[cfg(feature = "use_std")] + fn catch_unwind(self) -> CatchUnwind<Self> + where Self: Sized + ::std::panic::UnwindSafe + { + catch_unwind::new(self) + } + + /// Create a cloneable handle to this future where all handles will resolve + /// to the same result. + /// + /// The shared() method provides a method to convert any future into a + /// cloneable future. It enables a future to be polled by multiple threads. + /// + /// The returned `Shared` future resolves successfully with + /// `SharedItem<Self::Item>` or erroneously with `SharedError<Self::Error>`. + /// Both `SharedItem` and `SharedError` implements `Deref` to allow shared + /// access to the underlying result. Ownership of `Self::Item` and + /// `Self::Error` cannot currently be reclaimed. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future = future::ok::<_, bool>(6); + /// let shared1 = future.shared(); + /// let shared2 = shared1.clone(); + /// assert_eq!(6, *shared1.wait().unwrap()); + /// assert_eq!(6, *shared2.wait().unwrap()); + /// ``` + /// + /// ``` + /// use std::thread; + /// use futures::prelude::*; + /// use futures::future; + /// + /// let future = future::ok::<_, bool>(6); + /// let shared1 = future.shared(); + /// let shared2 = shared1.clone(); + /// let join_handle = thread::spawn(move || { + /// assert_eq!(6, *shared2.wait().unwrap()); + /// }); + /// assert_eq!(6, *shared1.wait().unwrap()); + /// join_handle.join().unwrap(); + /// ``` + #[cfg(feature = "use_std")] + fn shared(self) -> Shared<Self> + where Self: Sized + { + shared::new(self) + } +} + +impl<'a, F: ?Sized + Future> Future for &'a mut F { + type Item = F::Item; + type Error = F::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + (**self).poll() + } +} + +// Just a helper function to ensure the futures we're returning all have the +// right implementations. +fn assert_future<A, B, F>(t: F) -> F + where F: Future<Item=A, Error=B>, +{ + t +} + +/// Class of types which can be converted into a future. +/// +/// This trait is very similar to the `IntoIterator` trait and is intended to be +/// used in a very similar fashion. +pub trait IntoFuture { + /// The future that this type can be converted into. + type Future: Future<Item=Self::Item, Error=Self::Error>; + + /// The item that the future may resolve with. + type Item; + /// The error that the future may resolve with. + type Error; + + /// Consumes this object and produces a future. + fn into_future(self) -> Self::Future; +} + +impl<F: Future> IntoFuture for F { + type Future = F; + type Item = F::Item; + type Error = F::Error; + + fn into_future(self) -> F { + self + } +} + +impl<T, E> IntoFuture for result::Result<T, E> { + type Future = FutureResult<T, E>; + type Item = T; + type Error = E; + + fn into_future(self) -> FutureResult<T, E> { + result(self) + } +} + +/// Asynchronous conversion from a type `T`. +/// +/// This trait is analogous to `std::convert::From`, adapted to asynchronous +/// computation. +pub trait FutureFrom<T>: Sized { + /// The future for the conversion. + type Future: Future<Item=Self, Error=Self::Error>; + + /// Possible errors during conversion. + type Error; + + /// Consume the given value, beginning the conversion. + fn future_from(T) -> Self::Future; +} + +/// A trait for types which can spawn fresh futures. +/// +/// This trait is typically implemented for "executors", or those types which +/// can execute futures to completion. Futures passed to `Spawn::spawn` +/// typically get turned into a *task* and are then driven to completion. +/// +/// On spawn, the executor takes ownership of the future and becomes responsible +/// to call `Future::poll()` whenever a readiness notification is raised. +pub trait Executor<F: Future<Item = (), Error = ()>> { + /// Spawns a future to run on this `Executor`, typically in the + /// "background". + /// + /// This function will return immediately, and schedule the future `future` + /// to run on `self`. The details of scheduling and execution are left to + /// the implementations of `Executor`, but this is typically a primary point + /// for injecting concurrency in a futures-based system. Futures spawned + /// through this `execute` function tend to run concurrently while they're + /// waiting on events. + /// + /// # Errors + /// + /// Implementers of this trait are allowed to reject accepting this future + /// as well. This can happen for various reason such as: + /// + /// * The executor is shut down + /// * The executor has run out of capacity to execute futures + /// + /// The decision is left to the caller how to work with this form of error. + /// The error returned transfers ownership of the future back to the caller. + fn execute(&self, future: F) -> Result<(), ExecuteError<F>>; +} + +/// Errors returned from the `Spawn::spawn` function. +pub struct ExecuteError<F> { + future: F, + kind: ExecuteErrorKind, +} + +/// Kinds of errors that can be returned from the `Execute::spawn` function. +/// +/// Executors which may not always be able to accept a future may return one of +/// these errors, indicating why it was unable to spawn a future. +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum ExecuteErrorKind { + /// This executor has shut down and will no longer accept new futures to + /// spawn. + Shutdown, + + /// This executor has no more capacity to run more futures. Other futures + /// need to finish before this executor can accept another. + NoCapacity, + + #[doc(hidden)] + __Nonexhaustive, +} + +impl<F> ExecuteError<F> { + /// Create a new `ExecuteError` + pub fn new(kind: ExecuteErrorKind, future: F) -> ExecuteError<F> { + ExecuteError { + future: future, + kind: kind, + } + } + + /// Returns the associated reason for the error + pub fn kind(&self) -> ExecuteErrorKind { + self.kind + } + + /// Consumes self and returns the original future that was spawned. + pub fn into_future(self) -> F { + self.future + } +} + +impl<F> fmt::Debug for ExecuteError<F> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.kind { + ExecuteErrorKind::Shutdown => "executor has shut down".fmt(f), + ExecuteErrorKind::NoCapacity => "executor has no more capacity".fmt(f), + ExecuteErrorKind::__Nonexhaustive => panic!(), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/option.rs b/third_party/rust/futures-0.1.31/src/future/option.rs new file mode 100644 index 0000000000..1b204d376a --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/option.rs @@ -0,0 +1,15 @@ +//! Definition of the `Option` (optional step) combinator + +use {Future, Poll, Async}; + +impl<F, T, E> Future for Option<F> where F: Future<Item=T, Error=E> { + type Item = Option<T>; + type Error = E; + + fn poll(&mut self) -> Poll<Option<T>, E> { + match *self { + None => Ok(Async::Ready(None)), + Some(ref mut x) => x.poll().map(|x| x.map(Some)), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/or_else.rs b/third_party/rust/futures-0.1.31/src/future/or_else.rs new file mode 100644 index 0000000000..bc134137af --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/or_else.rs @@ -0,0 +1,39 @@ +use {Future, IntoFuture, Poll}; +use super::chain::Chain; + +/// Future for the `or_else` combinator, chaining a computation onto the end of +/// a future which fails with an error. +/// +/// This is created by the `Future::or_else` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct OrElse<A, B, F> where A: Future, B: IntoFuture { + state: Chain<A, B::Future, F>, +} + +pub fn new<A, B, F>(future: A, f: F) -> OrElse<A, B, F> + where A: Future, + B: IntoFuture<Item=A::Item>, +{ + OrElse { + state: Chain::new(future, f), + } +} + +impl<A, B, F> Future for OrElse<A, B, F> + where A: Future, + B: IntoFuture<Item=A::Item>, + F: FnOnce(A::Error) -> B, +{ + type Item = B::Item; + type Error = B::Error; + + fn poll(&mut self) -> Poll<B::Item, B::Error> { + self.state.poll(|a, f| { + match a { + Ok(item) => Ok(Ok(item)), + Err(e) => Ok(Err(f(e).into_future())) + } + }) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/poll_fn.rs b/third_party/rust/futures-0.1.31/src/future/poll_fn.rs new file mode 100644 index 0000000000..d96bf2f98d --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/poll_fn.rs @@ -0,0 +1,45 @@ +//! Definition of the `PollFn` adapter combinator + +use {Future, Poll}; + +/// A future which adapts a function returning `Poll`. +/// +/// Created by the `poll_fn` function. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct PollFn<F> { + inner: F, +} + +/// Creates a new future wrapping around a function returning `Poll`. +/// +/// Polling the returned future delegates to the wrapped function. +/// +/// # Examples +/// +/// ``` +/// use futures::future::poll_fn; +/// use futures::{Async, Poll}; +/// +/// fn read_line() -> Poll<String, std::io::Error> { +/// Ok(Async::Ready("Hello, World!".into())) +/// } +/// +/// let read_future = poll_fn(read_line); +/// ``` +pub fn poll_fn<T, E, F>(f: F) -> PollFn<F> + where F: FnMut() -> ::Poll<T, E> +{ + PollFn { inner: f } +} + +impl<T, E, F> Future for PollFn<F> + where F: FnMut() -> Poll<T, E> +{ + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<T, E> { + (self.inner)() + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/result.rs b/third_party/rust/futures-0.1.31/src/future/result.rs new file mode 100644 index 0000000000..5c44a63e1f --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/result.rs @@ -0,0 +1,81 @@ +//! Definition of the `Result` (immediately finished) combinator + +use core::result; + +use {Future, Poll, Async}; + +/// A future representing a value that is immediately ready. +/// +/// Created by the `result` function. +#[derive(Debug, Clone)] +#[must_use = "futures do nothing unless polled"] +// TODO: rename this to `Result` on the next major version +pub struct FutureResult<T, E> { + inner: Option<result::Result<T, E>>, +} + +/// Creates a new "leaf future" which will resolve with the given result. +/// +/// The returned future represents a computation which is finished immediately. +/// This can be useful with the `finished` and `failed` base future types to +/// convert an immediate value to a future to interoperate elsewhere. +/// +/// # Examples +/// +/// ``` +/// use futures::future::*; +/// +/// let future_of_1 = result::<u32, u32>(Ok(1)); +/// let future_of_err_2 = result::<u32, u32>(Err(2)); +/// ``` +pub fn result<T, E>(r: result::Result<T, E>) -> FutureResult<T, E> { + FutureResult { inner: Some(r) } +} + +/// Creates a "leaf future" from an immediate value of a finished and +/// successful computation. +/// +/// The returned future is similar to `result` where it will immediately run a +/// scheduled callback with the provided value. +/// +/// # Examples +/// +/// ``` +/// use futures::future::*; +/// +/// let future_of_1 = ok::<u32, u32>(1); +/// ``` +pub fn ok<T, E>(t: T) -> FutureResult<T, E> { + result(Ok(t)) +} + +/// Creates a "leaf future" from an immediate value of a failed computation. +/// +/// The returned future is similar to `result` where it will immediately run a +/// scheduled callback with the provided value. +/// +/// # Examples +/// +/// ``` +/// use futures::future::*; +/// +/// let future_of_err_1 = err::<u32, u32>(1); +/// ``` +pub fn err<T, E>(e: E) -> FutureResult<T, E> { + result(Err(e)) +} + +impl<T, E> Future for FutureResult<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<T, E> { + self.inner.take().expect("cannot poll Result twice").map(Async::Ready) + } +} + +impl<T, E> From<Result<T, E>> for FutureResult<T, E> { + fn from(r: Result<T, E>) -> Self { + result(r) + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/select.rs b/third_party/rust/futures-0.1.31/src/future/select.rs new file mode 100644 index 0000000000..c48e1c0a1e --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/select.rs @@ -0,0 +1,86 @@ +use {Future, Poll, Async}; + +/// Future for the `select` combinator, waiting for one of two futures to +/// complete. +/// +/// This is created by the `Future::select` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Select<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> { + inner: Option<(A, B)>, +} + +/// Future yielded as the second result in a `Select` future. +/// +/// This sentinel future represents the completion of the second future to a +/// `select` which finished second. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct SelectNext<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> { + inner: OneOf<A, B>, +} + +#[derive(Debug)] +enum OneOf<A, B> where A: Future, B: Future { + A(A), + B(B), +} + +pub fn new<A, B>(a: A, b: B) -> Select<A, B> + where A: Future, + B: Future<Item=A::Item, Error=A::Error> +{ + Select { + inner: Some((a, b)), + } +} + +impl<A, B> Future for Select<A, B> + where A: Future, + B: Future<Item=A::Item, Error=A::Error>, +{ + type Item = (A::Item, SelectNext<A, B>); + type Error = (A::Error, SelectNext<A, B>); + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let (ret, is_a) = match self.inner { + Some((ref mut a, ref mut b)) => { + match a.poll() { + Err(a) => (Err(a), true), + Ok(Async::Ready(a)) => (Ok(a), true), + Ok(Async::NotReady) => { + match b.poll() { + Err(a) => (Err(a), false), + Ok(Async::Ready(a)) => (Ok(a), false), + Ok(Async::NotReady) => return Ok(Async::NotReady), + } + } + } + } + None => panic!("cannot poll select twice"), + }; + + let (a, b) = self.inner.take().unwrap(); + let next = if is_a {OneOf::B(b)} else {OneOf::A(a)}; + let next = SelectNext { inner: next }; + match ret { + Ok(a) => Ok(Async::Ready((a, next))), + Err(e) => Err((e, next)), + } + } +} + +impl<A, B> Future for SelectNext<A, B> + where A: Future, + B: Future<Item=A::Item, Error=A::Error>, +{ + type Item = A::Item; + type Error = A::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + match self.inner { + OneOf::A(ref mut a) => a.poll(), + OneOf::B(ref mut b) => b.poll(), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/select2.rs b/third_party/rust/futures-0.1.31/src/future/select2.rs new file mode 100644 index 0000000000..073f67be4a --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/select2.rs @@ -0,0 +1,39 @@ +use {Future, Poll, Async}; +use future::Either; + +/// Future for the `select2` combinator, waiting for one of two differently-typed +/// futures to complete. +/// +/// This is created by the [`Future::select2`] method. +/// +/// [`Future::select2`]: trait.Future.html#method.select2 +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct Select2<A, B> { + inner: Option<(A, B)>, +} + +pub fn new<A, B>(a: A, b: B) -> Select2<A, B> { + Select2 { inner: Some((a, b)) } +} + +impl<A, B> Future for Select2<A, B> where A: Future, B: Future { + type Item = Either<(A::Item, B), (B::Item, A)>; + type Error = Either<(A::Error, B), (B::Error, A)>; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let (mut a, mut b) = self.inner.take().expect("cannot poll Select2 twice"); + match a.poll() { + Err(e) => Err(Either::A((e, b))), + Ok(Async::Ready(x)) => Ok(Async::Ready(Either::A((x, b)))), + Ok(Async::NotReady) => match b.poll() { + Err(e) => Err(Either::B((e, a))), + Ok(Async::Ready(x)) => Ok(Async::Ready(Either::B((x, a)))), + Ok(Async::NotReady) => { + self.inner = Some((a, b)); + Ok(Async::NotReady) + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/select_all.rs b/third_party/rust/futures-0.1.31/src/future/select_all.rs new file mode 100644 index 0000000000..1fbc98693b --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/select_all.rs @@ -0,0 +1,71 @@ +//! Definition of the `SelectAll`, finding the first future in a list that +//! finishes. + +use std::mem; +use std::prelude::v1::*; + +use {Future, IntoFuture, Poll, Async}; + +/// Future for the `select_all` combinator, waiting for one of any of a list of +/// futures to complete. +/// +/// This is created by the `select_all` function. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct SelectAll<A> where A: Future { + inner: Vec<A>, +} + +#[doc(hidden)] +pub type SelectAllNext<A> = A; + +/// Creates a new future which will select over a list of futures. +/// +/// The returned future will wait for any future within `iter` to be ready. Upon +/// completion or failure the item resolved will be returned, along with the +/// index of the future that was ready and the list of all the remaining +/// futures. +/// +/// # Panics +/// +/// This function will panic if the iterator specified contains no items. +pub fn select_all<I>(iter: I) -> SelectAll<<I::Item as IntoFuture>::Future> + where I: IntoIterator, + I::Item: IntoFuture, +{ + let ret = SelectAll { + inner: iter.into_iter() + .map(|a| a.into_future()) + .collect(), + }; + assert!(ret.inner.len() > 0); + ret +} + +impl<A> Future for SelectAll<A> + where A: Future, +{ + type Item = (A::Item, usize, Vec<A>); + type Error = (A::Error, usize, Vec<A>); + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| { + match f.poll() { + Ok(Async::NotReady) => None, + Ok(Async::Ready(e)) => Some((i, Ok(e))), + Err(e) => Some((i, Err(e))), + } + }).next(); + match item { + Some((idx, res)) => { + self.inner.remove(idx); + let rest = mem::replace(&mut self.inner, Vec::new()); + match res { + Ok(e) => Ok(Async::Ready((e, idx, rest))), + Err(e) => Err((e, idx, rest)), + } + } + None => Ok(Async::NotReady), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/select_ok.rs b/third_party/rust/futures-0.1.31/src/future/select_ok.rs new file mode 100644 index 0000000000..f122a0ea30 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/select_ok.rs @@ -0,0 +1,81 @@ +//! Definition of the `SelectOk` combinator, finding the first successful future +//! in a list. + +use std::mem; +use std::prelude::v1::*; + +use {Future, IntoFuture, Poll, Async}; + +/// Future for the `select_ok` combinator, waiting for one of any of a list of +/// futures to successfully complete. Unlike `select_all`, this future ignores all +/// but the last error, if there are any. +/// +/// This is created by the `select_ok` function. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct SelectOk<A> where A: Future { + inner: Vec<A>, +} + +/// Creates a new future which will select the first successful future over a list of futures. +/// +/// The returned future will wait for any future within `iter` to be ready and Ok. Unlike +/// `select_all`, this will only return the first successful completion, or the last +/// failure. This is useful in contexts where any success is desired and failures +/// are ignored, unless all the futures fail. +/// +/// # Panics +/// +/// This function will panic if the iterator specified contains no items. +pub fn select_ok<I>(iter: I) -> SelectOk<<I::Item as IntoFuture>::Future> + where I: IntoIterator, + I::Item: IntoFuture, +{ + let ret = SelectOk { + inner: iter.into_iter() + .map(|a| a.into_future()) + .collect(), + }; + assert!(ret.inner.len() > 0); + ret +} + +impl<A> Future for SelectOk<A> where A: Future { + type Item = (A::Item, Vec<A>); + type Error = A::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + // loop until we've either exhausted all errors, a success was hit, or nothing is ready + loop { + let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| { + match f.poll() { + Ok(Async::NotReady) => None, + Ok(Async::Ready(e)) => Some((i, Ok(e))), + Err(e) => Some((i, Err(e))), + } + }).next(); + + match item { + Some((idx, res)) => { + // always remove Ok or Err, if it's not the last Err continue looping + drop(self.inner.remove(idx)); + match res { + Ok(e) => { + let rest = mem::replace(&mut self.inner, Vec::new()); + return Ok(Async::Ready((e, rest))) + }, + Err(e) => { + if self.inner.is_empty() { + return Err(e) + } + }, + } + } + None => { + // based on the filter above, nothing is ready, return + return Ok(Async::NotReady) + }, + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/shared.rs b/third_party/rust/futures-0.1.31/src/future/shared.rs new file mode 100644 index 0000000000..e3b6d2fca7 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/shared.rs @@ -0,0 +1,300 @@ +//! Definition of the Shared combinator, a future that is cloneable, +//! and can be polled in multiple threads. +//! +//! # Examples +//! +//! ``` +//! use futures::future::*; +//! +//! let future = ok::<_, bool>(6); +//! let shared1 = future.shared(); +//! let shared2 = shared1.clone(); +//! assert_eq!(6, *shared1.wait().unwrap()); +//! assert_eq!(6, *shared2.wait().unwrap()); +//! ``` + +use {Future, Poll, Async}; +use task::{self, Task}; +use executor::{self, Notify, Spawn}; + +use std::{error, fmt, mem, ops}; +use std::cell::UnsafeCell; +use std::sync::{Arc, Mutex}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::collections::HashMap; + +/// A future that is cloneable and can be polled in multiple threads. +/// Use `Future::shared()` method to convert any future into a `Shared` future. +#[must_use = "futures do nothing unless polled"] +pub struct Shared<F: Future> { + inner: Arc<Inner<F>>, + waiter: usize, +} + +impl<F> fmt::Debug for Shared<F> + where F: Future + fmt::Debug, + F::Item: fmt::Debug, + F::Error: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Shared") + .field("inner", &self.inner) + .field("waiter", &self.waiter) + .finish() + } +} + +struct Inner<F: Future> { + next_clone_id: AtomicUsize, + future: UnsafeCell<Option<Spawn<F>>>, + result: UnsafeCell<Option<Result<SharedItem<F::Item>, SharedError<F::Error>>>>, + notifier: Arc<Notifier>, +} + +struct Notifier { + state: AtomicUsize, + waiters: Mutex<HashMap<usize, Task>>, +} + +const IDLE: usize = 0; +const POLLING: usize = 1; +const COMPLETE: usize = 2; +const POISONED: usize = 3; + +pub fn new<F: Future>(future: F) -> Shared<F> { + Shared { + inner: Arc::new(Inner { + next_clone_id: AtomicUsize::new(1), + notifier: Arc::new(Notifier { + state: AtomicUsize::new(IDLE), + waiters: Mutex::new(HashMap::new()), + }), + future: UnsafeCell::new(Some(executor::spawn(future))), + result: UnsafeCell::new(None), + }), + waiter: 0, + } +} + +impl<F> Shared<F> where F: Future { + // TODO: make this private + #[deprecated(since = "0.1.12", note = "use `Future::shared` instead")] + #[cfg(feature = "with-deprecated")] + #[doc(hidden)] + pub fn new(future: F) -> Self { + new(future) + } + + /// If any clone of this `Shared` has completed execution, returns its result immediately + /// without blocking. Otherwise, returns None without triggering the work represented by + /// this `Shared`. + pub fn peek(&self) -> Option<Result<SharedItem<F::Item>, SharedError<F::Error>>> { + match self.inner.notifier.state.load(SeqCst) { + COMPLETE => { + Some(unsafe { self.clone_result() }) + } + POISONED => panic!("inner future panicked during poll"), + _ => None, + } + } + + fn set_waiter(&mut self) { + let mut waiters = self.inner.notifier.waiters.lock().unwrap(); + waiters.insert(self.waiter, task::current()); + } + + unsafe fn clone_result(&self) -> Result<SharedItem<F::Item>, SharedError<F::Error>> { + match *self.inner.result.get() { + Some(Ok(ref item)) => Ok(SharedItem { item: item.item.clone() }), + Some(Err(ref e)) => Err(SharedError { error: e.error.clone() }), + _ => unreachable!(), + } + } + + fn complete(&self) { + unsafe { *self.inner.future.get() = None }; + self.inner.notifier.state.store(COMPLETE, SeqCst); + self.inner.notifier.notify(0); + } +} + +impl<F> Future for Shared<F> + where F: Future +{ + type Item = SharedItem<F::Item>; + type Error = SharedError<F::Error>; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + self.set_waiter(); + + match self.inner.notifier.state.compare_and_swap(IDLE, POLLING, SeqCst) { + IDLE => { + // Lock acquired, fall through + } + POLLING => { + // Another task is currently polling, at this point we just want + // to ensure that our task handle is currently registered + + return Ok(Async::NotReady); + } + COMPLETE => { + return unsafe { self.clone_result().map(Async::Ready) }; + } + POISONED => panic!("inner future panicked during poll"), + _ => unreachable!(), + } + + struct Reset<'a>(&'a AtomicUsize); + + impl<'a> Drop for Reset<'a> { + fn drop(&mut self) { + use std::thread; + + if thread::panicking() { + self.0.store(POISONED, SeqCst); + } + } + } + + let _reset = Reset(&self.inner.notifier.state); + + // Poll the future + let res = unsafe { + (*self.inner.future.get()).as_mut().unwrap() + .poll_future_notify(&self.inner.notifier, 0) + }; + match res { + Ok(Async::NotReady) => { + // Not ready, try to release the handle + match self.inner.notifier.state.compare_and_swap(POLLING, IDLE, SeqCst) { + POLLING => { + // Success + return Ok(Async::NotReady); + } + _ => unreachable!(), + } + + } + Ok(Async::Ready(i)) => { + unsafe { + (*self.inner.result.get()) = Some(Ok(SharedItem { item: Arc::new(i) })); + } + } + Err(e) => { + unsafe { + (*self.inner.result.get()) = Some(Err(SharedError { error: Arc::new(e) })); + } + } + } + + self.complete(); + unsafe { self.clone_result().map(Async::Ready) } + } +} + +impl<F> Clone for Shared<F> where F: Future { + fn clone(&self) -> Self { + let next_clone_id = self.inner.next_clone_id.fetch_add(1, SeqCst); + + Shared { + inner: self.inner.clone(), + waiter: next_clone_id, + } + } +} + +impl<F> Drop for Shared<F> where F: Future { + fn drop(&mut self) { + let mut waiters = self.inner.notifier.waiters.lock().unwrap(); + waiters.remove(&self.waiter); + } +} + +impl Notify for Notifier { + fn notify(&self, _id: usize) { + let waiters = mem::replace(&mut *self.waiters.lock().unwrap(), HashMap::new()); + + for (_, waiter) in waiters { + waiter.notify(); + } + } +} + +// The `F` is synchronized by a lock, so `F` doesn't need +// to be `Sync`. However, its `Item` or `Error` are exposed +// through an `Arc` but not lock, so they must be `Send + Sync`. +unsafe impl<F> Send for Inner<F> + where F: Future + Send, + F::Item: Send + Sync, + F::Error: Send + Sync, +{} + +unsafe impl<F> Sync for Inner<F> + where F: Future + Send, + F::Item: Send + Sync, + F::Error: Send + Sync, +{} + +impl<F> fmt::Debug for Inner<F> + where F: Future + fmt::Debug, + F::Item: fmt::Debug, + F::Error: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Inner") + .finish() + } +} + +/// A wrapped item of the original future that is cloneable and implements Deref +/// for ease of use. +#[derive(Clone, Debug)] +pub struct SharedItem<T> { + item: Arc<T>, +} + +impl<T> ops::Deref for SharedItem<T> { + type Target = T; + + fn deref(&self) -> &T { + &self.item.as_ref() + } +} + +/// A wrapped error of the original future that is cloneable and implements Deref +/// for ease of use. +#[derive(Clone, Debug)] +pub struct SharedError<E> { + error: Arc<E>, +} + +impl<E> ops::Deref for SharedError<E> { + type Target = E; + + fn deref(&self) -> &E { + &self.error.as_ref() + } +} + +impl<E> fmt::Display for SharedError<E> + where E: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.error.fmt(f) + } +} + +impl<E> error::Error for SharedError<E> + where E: error::Error, +{ + #[allow(deprecated)] + fn description(&self) -> &str { + self.error.description() + } + + #[allow(deprecated)] + fn cause(&self) -> Option<&error::Error> { + self.error.cause() + } +} diff --git a/third_party/rust/futures-0.1.31/src/future/then.rs b/third_party/rust/futures-0.1.31/src/future/then.rs new file mode 100644 index 0000000000..188fb8fa80 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/future/then.rs @@ -0,0 +1,36 @@ +use {Future, IntoFuture, Poll}; +use super::chain::Chain; + +/// Future for the `then` combinator, chaining computations on the end of +/// another future regardless of its outcome. +/// +/// This is created by the `Future::then` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Then<A, B, F> where A: Future, B: IntoFuture { + state: Chain<A, B::Future, F>, +} + +pub fn new<A, B, F>(future: A, f: F) -> Then<A, B, F> + where A: Future, + B: IntoFuture, +{ + Then { + state: Chain::new(future, f), + } +} + +impl<A, B, F> Future for Then<A, B, F> + where A: Future, + B: IntoFuture, + F: FnOnce(Result<A::Item, A::Error>) -> B, +{ + type Item = B::Item; + type Error = B::Error; + + fn poll(&mut self) -> Poll<B::Item, B::Error> { + self.state.poll(|a, f| { + Ok(Err(f(a).into_future())) + }) + } +} diff --git a/third_party/rust/futures-0.1.31/src/lib.rs b/third_party/rust/futures-0.1.31/src/lib.rs new file mode 100644 index 0000000000..ccadb6777f --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/lib.rs @@ -0,0 +1,266 @@ +//! Zero-cost Futures in Rust +//! +//! This library is an implementation of futures in Rust which aims to provide +//! a robust implementation of handling asynchronous computations, ergonomic +//! composition and usage, and zero-cost abstractions over what would otherwise +//! be written by hand. +//! +//! Futures are a concept for an object which is a proxy for another value that +//! may not be ready yet. For example issuing an HTTP request may return a +//! future for the HTTP response, as it probably hasn't arrived yet. With an +//! object representing a value that will eventually be available, futures allow +//! for powerful composition of tasks through basic combinators that can perform +//! operations like chaining computations, changing the types of futures, or +//! waiting for two futures to complete at the same time. +//! +//! You can find extensive tutorials and documentations at [https://tokio.rs] +//! for both this crate (asynchronous programming in general) as well as the +//! Tokio stack to perform async I/O with. +//! +//! [https://tokio.rs]: https://tokio.rs +//! +//! ## Installation +//! +//! Add this to your `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! futures = "0.1" +//! ``` +//! +//! ## Examples +//! +//! Let's take a look at a few examples of how futures might be used: +//! +//! ``` +//! extern crate futures; +//! +//! use std::io; +//! use std::time::Duration; +//! use futures::prelude::*; +//! use futures::future::Map; +//! +//! // A future is actually a trait implementation, so we can generically take a +//! // future of any integer and return back a future that will resolve to that +//! // value plus 10 more. +//! // +//! // Note here that like iterators, we're returning the `Map` combinator in +//! // the futures crate, not a boxed abstraction. This is a zero-cost +//! // construction of a future. +//! fn add_ten<F>(future: F) -> Map<F, fn(i32) -> i32> +//! where F: Future<Item=i32>, +//! { +//! fn add(a: i32) -> i32 { a + 10 } +//! future.map(add) +//! } +//! +//! // Not only can we modify one future, but we can even compose them together! +//! // Here we have a function which takes two futures as input, and returns a +//! // future that will calculate the sum of their two values. +//! // +//! // Above we saw a direct return value of the `Map` combinator, but +//! // performance isn't always critical and sometimes it's more ergonomic to +//! // return a trait object like we do here. Note though that there's only one +//! // allocation here, not any for the intermediate futures. +//! fn add<'a, A, B>(a: A, b: B) -> Box<Future<Item=i32, Error=A::Error> + 'a> +//! where A: Future<Item=i32> + 'a, +//! B: Future<Item=i32, Error=A::Error> + 'a, +//! { +//! Box::new(a.join(b).map(|(a, b)| a + b)) +//! } +//! +//! // Futures also allow chaining computations together, starting another after +//! // the previous finishes. Here we wait for the first computation to finish, +//! // and then decide what to do depending on the result. +//! fn download_timeout(url: &str, +//! timeout_dur: Duration) +//! -> Box<Future<Item=Vec<u8>, Error=io::Error>> { +//! use std::io; +//! use std::net::{SocketAddr, TcpStream}; +//! +//! type IoFuture<T> = Box<Future<Item=T, Error=io::Error>>; +//! +//! // First thing to do is we need to resolve our URL to an address. This +//! // will likely perform a DNS lookup which may take some time. +//! let addr = resolve(url); +//! +//! // After we acquire the address, we next want to open up a TCP +//! // connection. +//! let tcp = addr.and_then(|addr| connect(&addr)); +//! +//! // After the TCP connection is established and ready to go, we're off to +//! // the races! +//! let data = tcp.and_then(|conn| download(conn)); +//! +//! // That all might take awhile, though, so let's not wait too long for it +//! // to all come back. The `select` combinator here returns a future which +//! // resolves to the first value that's ready plus the next future. +//! // +//! // Note we can also use the `then` combinator which is similar to +//! // `and_then` above except that it receives the result of the +//! // computation, not just the successful value. +//! // +//! // Again note that all the above calls to `and_then` and the below calls +//! // to `map` and such require no allocations. We only ever allocate once +//! // we hit the `Box::new()` call at the end here, which means we've built +//! // up a relatively involved computation with only one box, and even that +//! // was optional! +//! +//! let data = data.map(Ok); +//! let timeout = timeout(timeout_dur).map(Err); +//! +//! let ret = data.select(timeout).then(|result| { +//! match result { +//! // One future succeeded, and it was the one which was +//! // downloading data from the connection. +//! Ok((Ok(data), _other_future)) => Ok(data), +//! +//! // The timeout fired, and otherwise no error was found, so +//! // we translate this to an error. +//! Ok((Err(_timeout), _other_future)) => { +//! Err(io::Error::new(io::ErrorKind::Other, "timeout")) +//! } +//! +//! // A normal I/O error happened, so we pass that on through. +//! Err((e, _other_future)) => Err(e), +//! } +//! }); +//! return Box::new(ret); +//! +//! fn resolve(url: &str) -> IoFuture<SocketAddr> { +//! // ... +//! # panic!("unimplemented"); +//! } +//! +//! fn connect(hostname: &SocketAddr) -> IoFuture<TcpStream> { +//! // ... +//! # panic!("unimplemented"); +//! } +//! +//! fn download(stream: TcpStream) -> IoFuture<Vec<u8>> { +//! // ... +//! # panic!("unimplemented"); +//! } +//! +//! fn timeout(stream: Duration) -> IoFuture<()> { +//! // ... +//! # panic!("unimplemented"); +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! Some more information can also be found in the [README] for now, but +//! otherwise feel free to jump in to the docs below! +//! +//! [README]: https://github.com/rust-lang-nursery/futures-rs#futures-rs + +#![no_std] +#![deny(missing_docs, missing_debug_implementations)] +#![allow(bare_trait_objects, unknown_lints)] +#![doc(html_root_url = "https://docs.rs/futures/0.1")] + +#[macro_use] +#[cfg(feature = "use_std")] +extern crate std; + +macro_rules! if_std { + ($($i:item)*) => ($( + #[cfg(feature = "use_std")] + $i + )*) +} + +#[macro_use] +mod poll; +pub use poll::{Poll, Async, AsyncSink, StartSend}; + +pub mod future; +pub use future::{Future, IntoFuture}; + +pub mod stream; +pub use stream::Stream; + +pub mod sink; +pub use sink::Sink; + +#[deprecated(since = "0.1.4", note = "import through the future module instead")] +#[cfg(feature = "with-deprecated")] +#[doc(hidden)] +pub use future::{done, empty, failed, finished, lazy}; + +#[doc(hidden)] +#[cfg(feature = "with-deprecated")] +#[deprecated(since = "0.1.4", note = "import through the future module instead")] +pub use future::{ + Done, Empty, Failed, Finished, Lazy, AndThen, Flatten, FlattenStream, Fuse, IntoStream, + Join, Join3, Join4, Join5, Map, MapErr, OrElse, Select, + SelectNext, Then +}; + +#[cfg(feature = "use_std")] +mod lock; +mod task_impl; + +mod resultstream; + +pub mod task; +pub mod executor; +#[cfg(feature = "use_std")] +pub mod sync; +#[cfg(feature = "use_std")] +pub mod unsync; + + +if_std! { + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "use sync::oneshot::channel instead")] + #[cfg(feature = "with-deprecated")] + pub use sync::oneshot::channel as oneshot; + + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "use sync::oneshot::Receiver instead")] + #[cfg(feature = "with-deprecated")] + pub use sync::oneshot::Receiver as Oneshot; + + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "use sync::oneshot::Sender instead")] + #[cfg(feature = "with-deprecated")] + pub use sync::oneshot::Sender as Complete; + + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "use sync::oneshot::Canceled instead")] + #[cfg(feature = "with-deprecated")] + pub use sync::oneshot::Canceled; + + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "import through the future module instead")] + #[cfg(feature = "with-deprecated")] + #[allow(deprecated)] + pub use future::{BoxFuture, collect, select_all, select_ok}; + + #[doc(hidden)] + #[deprecated(since = "0.1.4", note = "import through the future module instead")] + #[cfg(feature = "with-deprecated")] + pub use future::{SelectAll, SelectAllNext, Collect, SelectOk}; +} + +/// A "prelude" for crates using the `futures` crate. +/// +/// This prelude is similar to the standard library's prelude in that you'll +/// almost always want to import its entire contents, but unlike the standard +/// library's prelude you'll have to do so manually. An example of using this is: +/// +/// ``` +/// use futures::prelude::*; +/// ``` +/// +/// We may add items to this over time as they become ubiquitous as well, but +/// otherwise this should help cut down on futures-related imports when you're +/// working with the `futures` crate! +pub mod prelude { + #[doc(no_inline)] + pub use {Future, Stream, Sink, Async, AsyncSink, Poll, StartSend}; + #[doc(no_inline)] + pub use IntoFuture; +} diff --git a/third_party/rust/futures-0.1.31/src/lock.rs b/third_party/rust/futures-0.1.31/src/lock.rs new file mode 100644 index 0000000000..627c524949 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/lock.rs @@ -0,0 +1,107 @@ +//! A "mutex" which only supports `try_lock` +//! +//! As a futures library the eventual call to an event loop should be the only +//! thing that ever blocks, so this is assisted with a fast user-space +//! implementation of a lock that can only have a `try_lock` operation. + +extern crate core; + +use self::core::cell::UnsafeCell; +use self::core::ops::{Deref, DerefMut}; +use self::core::sync::atomic::Ordering::SeqCst; +use self::core::sync::atomic::AtomicBool; + +/// A "mutex" around a value, similar to `std::sync::Mutex<T>`. +/// +/// This lock only supports the `try_lock` operation, however, and does not +/// implement poisoning. +#[derive(Debug)] +pub struct Lock<T> { + locked: AtomicBool, + data: UnsafeCell<T>, +} + +/// Sentinel representing an acquired lock through which the data can be +/// accessed. +pub struct TryLock<'a, T: 'a> { + __ptr: &'a Lock<T>, +} + +// The `Lock` structure is basically just a `Mutex<T>`, and these two impls are +// intended to mirror the standard library's corresponding impls for `Mutex<T>`. +// +// If a `T` is sendable across threads, so is the lock, and `T` must be sendable +// across threads to be `Sync` because it allows mutable access from multiple +// threads. +unsafe impl<T: Send> Send for Lock<T> {} +unsafe impl<T: Send> Sync for Lock<T> {} + +impl<T> Lock<T> { + /// Creates a new lock around the given value. + pub fn new(t: T) -> Lock<T> { + Lock { + locked: AtomicBool::new(false), + data: UnsafeCell::new(t), + } + } + + /// Attempts to acquire this lock, returning whether the lock was acquired or + /// not. + /// + /// If `Some` is returned then the data this lock protects can be accessed + /// through the sentinel. This sentinel allows both mutable and immutable + /// access. + /// + /// If `None` is returned then the lock is already locked, either elsewhere + /// on this thread or on another thread. + pub fn try_lock(&self) -> Option<TryLock<T>> { + if !self.locked.swap(true, SeqCst) { + Some(TryLock { __ptr: self }) + } else { + None + } + } +} + +impl<'a, T> Deref for TryLock<'a, T> { + type Target = T; + fn deref(&self) -> &T { + // The existence of `TryLock` represents that we own the lock, so we + // can safely access the data here. + unsafe { &*self.__ptr.data.get() } + } +} + +impl<'a, T> DerefMut for TryLock<'a, T> { + fn deref_mut(&mut self) -> &mut T { + // The existence of `TryLock` represents that we own the lock, so we + // can safely access the data here. + // + // Additionally, we're the *only* `TryLock` in existence so mutable + // access should be ok. + unsafe { &mut *self.__ptr.data.get() } + } +} + +impl<'a, T> Drop for TryLock<'a, T> { + fn drop(&mut self) { + self.__ptr.locked.store(false, SeqCst); + } +} + +#[cfg(test)] +mod tests { + use super::Lock; + + #[test] + fn smoke() { + let a = Lock::new(1); + let mut a1 = a.try_lock().unwrap(); + assert!(a.try_lock().is_none()); + assert_eq!(*a1, 1); + *a1 = 2; + drop(a1); + assert_eq!(*a.try_lock().unwrap(), 2); + assert_eq!(*a.try_lock().unwrap(), 2); + } +} diff --git a/third_party/rust/futures-0.1.31/src/poll.rs b/third_party/rust/futures-0.1.31/src/poll.rs new file mode 100644 index 0000000000..c568e726c2 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/poll.rs @@ -0,0 +1,105 @@ +/// A macro for extracting the successful type of a `Poll<T, E>`. +/// +/// This macro bakes propagation of both errors and `NotReady` signals by +/// returning early. +#[macro_export] +macro_rules! try_ready { + ($e:expr) => (match $e { + Ok($crate::Async::Ready(t)) => t, + Ok($crate::Async::NotReady) => return Ok($crate::Async::NotReady), + Err(e) => return Err(From::from(e)), + }) +} + +/// Return type of the `Future::poll` method, indicates whether a future's value +/// is ready or not. +/// +/// * `Ok(Async::Ready(t))` means that a future has successfully resolved +/// * `Ok(Async::NotReady)` means that a future is not ready to complete yet +/// * `Err(e)` means that a future has completed with the given failure +pub type Poll<T, E> = Result<Async<T>, E>; + +/// Return type of future, indicating whether a value is ready or not. +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum Async<T> { + /// Represents that a value is immediately ready. + Ready(T), + + /// Represents that a value is not ready yet, but may be so later. + NotReady, +} + +impl<T> Async<T> { + /// Change the success value of this `Async` with the closure provided + pub fn map<F, U>(self, f: F) -> Async<U> + where F: FnOnce(T) -> U + { + match self { + Async::Ready(t) => Async::Ready(f(t)), + Async::NotReady => Async::NotReady, + } + } + + /// Returns whether this is `Async::Ready` + pub fn is_ready(&self) -> bool { + match *self { + Async::Ready(_) => true, + Async::NotReady => false, + } + } + + /// Returns whether this is `Async::NotReady` + pub fn is_not_ready(&self) -> bool { + !self.is_ready() + } +} + +impl<T> From<T> for Async<T> { + fn from(t: T) -> Async<T> { + Async::Ready(t) + } +} + +/// The result of an asynchronous attempt to send a value to a sink. +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum AsyncSink<T> { + /// The `start_send` attempt succeeded, so the sending process has + /// *started*; you must use `Sink::poll_complete` to drive the send + /// to completion. + Ready, + + /// The `start_send` attempt failed due to the sink being full. The value + /// being sent is returned, and the current `Task` will be automatically + /// notified again once the sink has room. + NotReady(T), +} + +impl<T> AsyncSink<T> { + /// Change the NotReady value of this `AsyncSink` with the closure provided + pub fn map<F, U>(self, f: F) -> AsyncSink<U> + where F: FnOnce(T) -> U, + { + match self { + AsyncSink::Ready => AsyncSink::Ready, + AsyncSink::NotReady(t) => AsyncSink::NotReady(f(t)), + } + } + + /// Returns whether this is `AsyncSink::Ready` + pub fn is_ready(&self) -> bool { + match *self { + AsyncSink::Ready => true, + AsyncSink::NotReady(_) => false, + } + } + + /// Returns whether this is `AsyncSink::NotReady` + pub fn is_not_ready(&self) -> bool { + !self.is_ready() + } +} + + +/// Return type of the `Sink::start_send` method, indicating the outcome of a +/// send attempt. See `AsyncSink` for more details. +pub type StartSend<T, E> = Result<AsyncSink<T>, E>; diff --git a/third_party/rust/futures-0.1.31/src/resultstream.rs b/third_party/rust/futures-0.1.31/src/resultstream.rs new file mode 100644 index 0000000000..23a99819bd --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/resultstream.rs @@ -0,0 +1,46 @@ +// This should really be in the stream module, +// but `pub(crate)` isn't available until Rust 1.18, +// and pre-1.18 there isn't a really good way to have a sub-module +// available to the crate, but not without it. +use core::marker::PhantomData; + +use {Poll, Async}; +use stream::Stream; + + +/// A stream combinator used to convert a `Stream<Item=T,Error=E>` +/// to a `Stream<Item=Result<T,E>>`. +/// +/// A poll on this stream will never return an `Err`. As such the +/// actual error type is parameterized, so it can match whatever error +/// type is needed. +/// +/// This structure is produced by the `Stream::results` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Results<S: Stream, E> { + inner: S, + phantom: PhantomData<E> +} + +pub fn new<S, E>(s: S) -> Results<S, E> where S: Stream { + Results { + inner: s, + phantom: PhantomData + } +} + +impl<S: Stream, E> Stream for Results<S, E> { + type Item = Result<S::Item, S::Error>; + type Error = E; + + fn poll(&mut self) -> Poll<Option<Result<S::Item, S::Error>>, E> { + match self.inner.poll() { + Ok(Async::Ready(Some(item))) => Ok(Async::Ready(Some(Ok(item)))), + Err(e) => Ok(Async::Ready(Some(Err(e)))), + Ok(Async::Ready(None)) => Ok(Async::Ready(None)), + Ok(Async::NotReady) => Ok(Async::NotReady) + } + } +} + diff --git a/third_party/rust/futures-0.1.31/src/sink/buffer.rs b/third_party/rust/futures-0.1.31/src/sink/buffer.rs new file mode 100644 index 0000000000..419579d9a0 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/buffer.rs @@ -0,0 +1,108 @@ +use std::collections::VecDeque; + +use {Poll, Async}; +use {StartSend, AsyncSink}; +use sink::Sink; +use stream::Stream; + +/// Sink for the `Sink::buffer` combinator, which buffers up to some fixed +/// number of values when the underlying sink is unable to accept them. +#[derive(Debug)] +#[must_use = "sinks do nothing unless polled"] +pub struct Buffer<S: Sink> { + sink: S, + buf: VecDeque<S::SinkItem>, + + // Track capacity separately from the `VecDeque`, which may be rounded up + cap: usize, +} + +pub fn new<S: Sink>(sink: S, amt: usize) -> Buffer<S> { + Buffer { + sink: sink, + buf: VecDeque::with_capacity(amt), + cap: amt, + } +} + +impl<S: Sink> Buffer<S> { + /// Get a shared reference to the inner sink. + pub fn get_ref(&self) -> &S { + &self.sink + } + + /// Get a mutable reference to the inner sink. + pub fn get_mut(&mut self) -> &mut S { + &mut self.sink + } + + /// Consumes this combinator, returning the underlying sink. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.sink + } + + fn try_empty_buffer(&mut self) -> Poll<(), S::SinkError> { + while let Some(item) = self.buf.pop_front() { + if let AsyncSink::NotReady(item) = self.sink.start_send(item)? { + self.buf.push_front(item); + + return Ok(Async::NotReady); + } + } + + Ok(Async::Ready(())) + } +} + +// Forwarding impl of Stream from the underlying sink +impl<S> Stream for Buffer<S> where S: Sink + Stream { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + self.sink.poll() + } +} + +impl<S: Sink> Sink for Buffer<S> { + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> { + if self.cap == 0 { + return self.sink.start_send(item); + } + + self.try_empty_buffer()?; + if self.buf.len() == self.cap { + return Ok(AsyncSink::NotReady(item)); + } + self.buf.push_back(item); + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + if self.cap == 0 { + return self.sink.poll_complete(); + } + + try_ready!(self.try_empty_buffer()); + debug_assert!(self.buf.is_empty()); + self.sink.poll_complete() + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + if self.cap == 0 { + return self.sink.close(); + } + + if self.buf.len() > 0 { + try_ready!(self.try_empty_buffer()); + } + assert_eq!(self.buf.len(), 0); + self.sink.close() + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/fanout.rs b/third_party/rust/futures-0.1.31/src/sink/fanout.rs new file mode 100644 index 0000000000..8d2456e7e8 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/fanout.rs @@ -0,0 +1,135 @@ +use core::fmt::{Debug, Formatter, Result as FmtResult}; +use core::mem::replace; + +use {Async, AsyncSink, Poll, Sink, StartSend}; + +/// Sink that clones incoming items and forwards them to two sinks at the same time. +/// +/// Backpressure from any downstream sink propagates up, which means that this sink +/// can only process items as fast as its _slowest_ downstream sink. +pub struct Fanout<A: Sink, B: Sink> { + left: Downstream<A>, + right: Downstream<B> +} + +impl<A: Sink, B: Sink> Fanout<A, B> { + /// Consumes this combinator, returning the underlying sinks. + /// + /// Note that this may discard intermediate state of this combinator, + /// so care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> (A, B) { + (self.left.sink, self.right.sink) + } +} + +impl<A: Sink + Debug, B: Sink + Debug> Debug for Fanout<A, B> + where A::SinkItem: Debug, + B::SinkItem: Debug +{ + fn fmt(&self, f: &mut Formatter) -> FmtResult { + f.debug_struct("Fanout") + .field("left", &self.left) + .field("right", &self.right) + .finish() + } +} + +pub fn new<A: Sink, B: Sink>(left: A, right: B) -> Fanout<A, B> { + Fanout { + left: Downstream::new(left), + right: Downstream::new(right) + } +} + +impl<A, B> Sink for Fanout<A, B> + where A: Sink, + A::SinkItem: Clone, + B: Sink<SinkItem=A::SinkItem, SinkError=A::SinkError> +{ + type SinkItem = A::SinkItem; + type SinkError = A::SinkError; + + fn start_send( + &mut self, + item: Self::SinkItem + ) -> StartSend<Self::SinkItem, Self::SinkError> { + // Attempt to complete processing any outstanding requests. + self.left.keep_flushing()?; + self.right.keep_flushing()?; + // Only if both downstream sinks are ready, start sending the next item. + if self.left.is_ready() && self.right.is_ready() { + self.left.state = self.left.sink.start_send(item.clone())?; + self.right.state = self.right.sink.start_send(item)?; + Ok(AsyncSink::Ready) + } else { + Ok(AsyncSink::NotReady(item)) + } + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + let left_async = self.left.poll_complete()?; + let right_async = self.right.poll_complete()?; + // Only if both downstream sinks are ready, signal readiness. + if left_async.is_ready() && right_async.is_ready() { + Ok(Async::Ready(())) + } else { + Ok(Async::NotReady) + } + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + let left_async = self.left.close()?; + let right_async = self.right.close()?; + // Only if both downstream sinks are ready, signal readiness. + if left_async.is_ready() && right_async.is_ready() { + Ok(Async::Ready(())) + } else { + Ok(Async::NotReady) + } + } +} + +#[derive(Debug)] +struct Downstream<S: Sink> { + sink: S, + state: AsyncSink<S::SinkItem> +} + +impl<S: Sink> Downstream<S> { + fn new(sink: S) -> Self { + Downstream { sink: sink, state: AsyncSink::Ready } + } + + fn is_ready(&self) -> bool { + self.state.is_ready() + } + + fn keep_flushing(&mut self) -> Result<(), S::SinkError> { + if let AsyncSink::NotReady(item) = replace(&mut self.state, AsyncSink::Ready) { + self.state = self.sink.start_send(item)?; + } + Ok(()) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.keep_flushing()?; + let async = self.sink.poll_complete()?; + // Only if all values have been sent _and_ the underlying + // sink is completely flushed, signal readiness. + if self.state.is_ready() && async.is_ready() { + Ok(Async::Ready(())) + } else { + Ok(Async::NotReady) + } + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.keep_flushing()?; + // If all items have been flushed, initiate close. + if self.state.is_ready() { + self.sink.close() + } else { + Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/flush.rs b/third_party/rust/futures-0.1.31/src/sink/flush.rs new file mode 100644 index 0000000000..f66811e03d --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/flush.rs @@ -0,0 +1,46 @@ +use {Poll, Async, Future}; +use sink::Sink; + +/// Future for the `Sink::flush` combinator, which polls the sink until all data +/// has been flushed. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Flush<S> { + sink: Option<S>, +} + +pub fn new<S: Sink>(sink: S) -> Flush<S> { + Flush { sink: Some(sink) } +} + +impl<S: Sink> Flush<S> { + /// Get a shared reference to the inner sink. + pub fn get_ref(&self) -> &S { + self.sink.as_ref().expect("Attempted `Flush::get_ref` after the flush completed") + } + + /// Get a mutable reference to the inner sink. + pub fn get_mut(&mut self) -> &mut S { + self.sink.as_mut().expect("Attempted `Flush::get_mut` after the flush completed") + } + + /// Consume the `Flush` and return the inner sink. + pub fn into_inner(self) -> S { + self.sink.expect("Attempted `Flush::into_inner` after the flush completed") + } +} + +impl<S: Sink> Future for Flush<S> { + type Item = S; + type Error = S::SinkError; + + fn poll(&mut self) -> Poll<S, S::SinkError> { + let mut sink = self.sink.take().expect("Attempted to poll Flush after it completed"); + if sink.poll_complete()?.is_ready() { + Ok(Async::Ready(sink)) + } else { + self.sink = Some(sink); + Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/from_err.rs b/third_party/rust/futures-0.1.31/src/sink/from_err.rs new file mode 100644 index 0000000000..4880c30ef4 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/from_err.rs @@ -0,0 +1,71 @@ +use core::marker::PhantomData; + +use {Sink, Poll, StartSend}; + +/// A sink combinator to change the error type of a sink. +/// +/// This is created by the `Sink::from_err` method. +#[derive(Clone, Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct SinkFromErr<S, E> { + sink: S, + f: PhantomData<E> +} + +pub fn new<S, E>(sink: S) -> SinkFromErr<S, E> + where S: Sink +{ + SinkFromErr { + sink: sink, + f: PhantomData + } +} + +impl<S, E> SinkFromErr<S, E> { + /// Get a shared reference to the inner sink. + pub fn get_ref(&self) -> &S { + &self.sink + } + + /// Get a mutable reference to the inner sink. + pub fn get_mut(&mut self) -> &mut S { + &mut self.sink + } + + /// Consumes this combinator, returning the underlying sink. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.sink + } +} + +impl<S, E> Sink for SinkFromErr<S, E> + where S: Sink, + E: From<S::SinkError> +{ + type SinkItem = S::SinkItem; + type SinkError = E; + + fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> { + self.sink.start_send(item).map_err(|e| e.into()) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + self.sink.poll_complete().map_err(|e| e.into()) + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + self.sink.close().map_err(|e| e.into()) + } +} + +impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + self.sink.poll() + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/map_err.rs b/third_party/rust/futures-0.1.31/src/sink/map_err.rs new file mode 100644 index 0000000000..25c168c071 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/map_err.rs @@ -0,0 +1,64 @@ +use sink::Sink; + +use {Poll, StartSend, Stream}; + +/// Sink for the `Sink::sink_map_err` combinator. +#[derive(Clone,Debug)] +#[must_use = "sinks do nothing unless polled"] +pub struct SinkMapErr<S, F> { + sink: S, + f: Option<F>, +} + +pub fn new<S, F>(s: S, f: F) -> SinkMapErr<S, F> { + SinkMapErr { sink: s, f: Some(f) } +} + +impl<S, E> SinkMapErr<S, E> { + /// Get a shared reference to the inner sink. + pub fn get_ref(&self) -> &S { + &self.sink + } + + /// Get a mutable reference to the inner sink. + pub fn get_mut(&mut self) -> &mut S { + &mut self.sink + } + + /// Consumes this combinator, returning the underlying sink. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.sink + } +} + +impl<S, F, E> Sink for SinkMapErr<S, F> + where S: Sink, + F: FnOnce(S::SinkError) -> E, +{ + type SinkItem = S::SinkItem; + type SinkError = E; + + fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> { + self.sink.start_send(item).map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e)) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + self.sink.poll_complete().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e)) + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + self.sink.close().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e)) + } +} + +impl<S: Stream, F> Stream for SinkMapErr<S, F> { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + self.sink.poll() + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/mod.rs b/third_party/rust/futures-0.1.31/src/sink/mod.rs new file mode 100644 index 0000000000..e5ea97f92a --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/mod.rs @@ -0,0 +1,489 @@ +//! Asynchronous sinks +//! +//! This module contains the `Sink` trait, along with a number of adapter types +//! for it. An overview is available in the documentation for the trait itself. +//! +//! You can find more information/tutorials about streams [online at +//! https://tokio.rs][online] +//! +//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/ + +use {IntoFuture, Poll, StartSend}; +use stream::Stream; + +mod with; +mod with_flat_map; +// mod with_map; +// mod with_filter; +// mod with_filter_map; +mod flush; +mod from_err; +mod send; +mod send_all; +mod map_err; +mod fanout; + +if_std! { + mod buffer; + mod wait; + + pub use self::buffer::Buffer; + pub use self::wait::Wait; + + // TODO: consider expanding this via e.g. FromIterator + impl<T> Sink for ::std::vec::Vec<T> { + type SinkItem = T; + type SinkError = (); // Change this to ! once it stabilizes + + fn start_send(&mut self, item: Self::SinkItem) + -> StartSend<Self::SinkItem, Self::SinkError> + { + self.push(item); + Ok(::AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + Ok(::Async::Ready(())) + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + Ok(::Async::Ready(())) + } + } + + /// A type alias for `Box<Sink + Send>` + pub type BoxSink<T, E> = ::std::boxed::Box<Sink<SinkItem = T, SinkError = E> + + ::core::marker::Send>; + + impl<S: ?Sized + Sink> Sink for ::std::boxed::Box<S> { + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: Self::SinkItem) + -> StartSend<Self::SinkItem, Self::SinkError> { + (**self).start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + (**self).poll_complete() + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + (**self).close() + } + } +} + +pub use self::with::With; +pub use self::with_flat_map::WithFlatMap; +pub use self::flush::Flush; +pub use self::send::Send; +pub use self::send_all::SendAll; +pub use self::map_err::SinkMapErr; +pub use self::from_err::SinkFromErr; +pub use self::fanout::Fanout; + +/// A `Sink` is a value into which other values can be sent, asynchronously. +/// +/// Basic examples of sinks include the sending side of: +/// +/// - Channels +/// - Sockets +/// - Pipes +/// +/// In addition to such "primitive" sinks, it's typical to layer additional +/// functionality, such as buffering, on top of an existing sink. +/// +/// Sending to a sink is "asynchronous" in the sense that the value may not be +/// sent in its entirety immediately. Instead, values are sent in a two-phase +/// way: first by initiating a send, and then by polling for completion. This +/// two-phase setup is analogous to buffered writing in synchronous code, where +/// writes often succeed immediately, but internally are buffered and are +/// *actually* written only upon flushing. +/// +/// In addition, the `Sink` may be *full*, in which case it is not even possible +/// to start the sending process. +/// +/// As with `Future` and `Stream`, the `Sink` trait is built from a few core +/// required methods, and a host of default methods for working in a +/// higher-level way. The `Sink::send_all` combinator is of particular +/// importance: you can use it to send an entire stream to a sink, which is +/// the simplest way to ultimately consume a sink. +/// +/// You can find more information/tutorials about streams [online at +/// https://tokio.rs][online] +/// +/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/ +pub trait Sink { + /// The type of value that the sink accepts. + type SinkItem; + + /// The type of value produced by the sink when an error occurs. + type SinkError; + + /// Begin the process of sending a value to the sink. + /// + /// As the name suggests, this method only *begins* the process of sending + /// the item. If the sink employs buffering, the item isn't fully processed + /// until the buffer is fully flushed. Since sinks are designed to work with + /// asynchronous I/O, the process of actually writing out the data to an + /// underlying object takes place asynchronously. **You *must* use + /// `poll_complete` in order to drive completion of a send**. In particular, + /// `start_send` does not begin the flushing process + /// + /// # Return value + /// + /// This method returns `AsyncSink::Ready` if the sink was able to start + /// sending `item`. In that case, you *must* ensure that you call + /// `poll_complete` to process the sent item to completion. Note, however, + /// that several calls to `start_send` can be made prior to calling + /// `poll_complete`, which will work on completing all pending items. + /// + /// The method returns `AsyncSink::NotReady` if the sink was unable to begin + /// sending, usually due to being full. The sink must have attempted to + /// complete processing any outstanding requests (equivalent to + /// `poll_complete`) before yielding this result. The current task will be + /// automatically scheduled for notification when the sink may be ready to + /// receive new values. + /// + /// # Errors + /// + /// If the sink encounters an error other than being temporarily full, it + /// uses the `Err` variant to signal that error. In most cases, such errors + /// mean that the sink will permanently be unable to receive items. + /// + /// # Panics + /// + /// This method may panic in a few situations, depending on the specific + /// sink: + /// + /// - It is called outside of the context of a task. + /// - A previous call to `start_send` or `poll_complete` yielded an error. + fn start_send(&mut self, item: Self::SinkItem) + -> StartSend<Self::SinkItem, Self::SinkError>; + + /// Flush all output from this sink, if necessary. + /// + /// Some sinks may buffer intermediate data as an optimization to improve + /// throughput. In other words, if a sink has a corresponding receiver then + /// a successful `start_send` above may not guarantee that the value is + /// actually ready to be received by the receiver. This function is intended + /// to be used to ensure that values do indeed make their way to the + /// receiver. + /// + /// This function will attempt to process any pending requests on behalf of + /// the sink and drive it to completion. + /// + /// # Return value + /// + /// Returns `Ok(Async::Ready(()))` when no buffered items remain. If this + /// value is returned then it is guaranteed that all previous values sent + /// via `start_send` will be guaranteed to be available to a listening + /// receiver. + /// + /// Returns `Ok(Async::NotReady)` if there is more work left to do, in which + /// case the current task is scheduled to wake up when more progress may be + /// possible. + /// + /// # Errors + /// + /// Returns `Err` if the sink encounters an error while processing one of + /// its pending requests. Due to the buffered nature of requests, it is not + /// generally possible to correlate the error with a particular request. As + /// with `start_send`, these errors are generally "fatal" for continued use + /// of the sink. + /// + /// # Panics + /// + /// This method may panic in a few situations, depending on the specific sink: + /// + /// - It is called outside of the context of a task. + /// - A previous call to `start_send` or `poll_complete` yielded an error. + /// + /// # Compatibility nodes + /// + /// The name of this method may be slightly misleading as the original + /// intention was to have this method be more general than just flushing + /// requests. Over time though it was decided to trim back the ambitions of + /// this method to what it's always done, just flushing. + /// + /// In the 0.2 release series of futures this method will be renamed to + /// `poll_flush`. For 0.1, however, the breaking change is not happening + /// yet. + fn poll_complete(&mut self) -> Poll<(), Self::SinkError>; + + /// A method to indicate that no more values will ever be pushed into this + /// sink. + /// + /// This method is used to indicate that a sink will no longer even be given + /// another value by the caller. That is, the `start_send` method above will + /// be called no longer (nor `poll_complete`). This method is intended to + /// model "graceful shutdown" in various protocols where the intent to shut + /// down is followed by a little more blocking work. + /// + /// Callers of this function should work it it in a similar fashion to + /// `poll_complete`. Once called it may return `NotReady` which indicates + /// that more external work needs to happen to make progress. The current + /// task will be scheduled to receive a notification in such an event, + /// however. + /// + /// Note that this function will imply `poll_complete` above. That is, if a + /// sink has buffered data, then it'll be flushed out during a `close` + /// operation. It is not necessary to have `poll_complete` return `Ready` + /// before a `close` is called. Once a `close` is called, though, + /// `poll_complete` cannot be called. + /// + /// # Return value + /// + /// This function, like `poll_complete`, returns a `Poll`. The value is + /// `Ready` once the close operation has completed. At that point it should + /// be safe to drop the sink and deallocate associated resources. + /// + /// If the value returned is `NotReady` then the sink is not yet closed and + /// work needs to be done to close it. The work has been scheduled and the + /// current task will receive a notification when it's next ready to call + /// this method again. + /// + /// Finally, this function may also return an error. + /// + /// # Errors + /// + /// This function will return an `Err` if any operation along the way during + /// the close operation fails. An error typically is fatal for a sink and is + /// unable to be recovered from, but in specific situations this may not + /// always be true. + /// + /// Note that it's also typically an error to call `start_send` or + /// `poll_complete` after the `close` function is called. This method will + /// *initiate* a close, and continuing to send values after that (or attempt + /// to flush) may result in strange behavior, panics, errors, etc. Once this + /// method is called, it must be the only method called on this `Sink`. + /// + /// # Panics + /// + /// This method may panic or cause panics if: + /// + /// * It is called outside the context of a future's task + /// * It is called and then `start_send` or `poll_complete` is called + /// + /// # Compatibility notes + /// + /// Note that this function is currently by default a provided function, + /// defaulted to calling `poll_complete` above. This function was added + /// in the 0.1 series of the crate as a backwards-compatible addition. It + /// is intended that in the 0.2 series the method will no longer be a + /// default method. + /// + /// It is highly recommended to consider this method a required method and + /// to implement it whenever you implement `Sink` locally. It is especially + /// crucial to be sure to close inner sinks, if applicable. + #[cfg(feature = "with-deprecated")] + fn close(&mut self) -> Poll<(), Self::SinkError> { + self.poll_complete() + } + + /// dox (you should see the above, not this) + #[cfg(not(feature = "with-deprecated"))] + fn close(&mut self) -> Poll<(), Self::SinkError>; + + /// Creates a new object which will produce a synchronous sink. + /// + /// The sink returned does **not** implement the `Sink` trait, and instead + /// only has two methods: `send` and `flush`. These two methods correspond + /// to `start_send` and `poll_complete` above except are executed in a + /// blocking fashion. + #[cfg(feature = "use_std")] + fn wait(self) -> Wait<Self> + where Self: Sized + { + wait::new(self) + } + + /// Composes a function *in front of* the sink. + /// + /// This adapter produces a new sink that passes each value through the + /// given function `f` before sending it to `self`. + /// + /// To process each value, `f` produces a *future*, which is then polled to + /// completion before passing its result down to the underlying sink. If the + /// future produces an error, that error is returned by the new sink. + /// + /// Note that this function consumes the given sink, returning a wrapped + /// version, much like `Iterator::map`. + fn with<U, F, Fut>(self, f: F) -> With<Self, U, F, Fut> + where F: FnMut(U) -> Fut, + Fut: IntoFuture<Item = Self::SinkItem>, + Fut::Error: From<Self::SinkError>, + Self: Sized + { + with::new(self, f) + } + + /// Composes a function *in front of* the sink. + /// + /// This adapter produces a new sink that passes each value through the + /// given function `f` before sending it to `self`. + /// + /// To process each value, `f` produces a *stream*, of which each value + /// is passed to the underlying sink. A new value will not be accepted until + /// the stream has been drained + /// + /// Note that this function consumes the given sink, returning a wrapped + /// version, much like `Iterator::flat_map`. + /// + /// # Examples + /// --- + /// Using this function with an iterator through use of the `stream::iter_ok()` + /// function + /// + /// ``` + /// use futures::prelude::*; + /// use futures::stream; + /// use futures::sync::mpsc; + /// + /// let (tx, rx) = mpsc::channel::<i32>(5); + /// + /// let tx = tx.with_flat_map(|x| { + /// stream::iter_ok(vec![42; x].into_iter().map(|y| y)) + /// }); + /// tx.send(5).wait().unwrap(); + /// assert_eq!(rx.collect().wait(), Ok(vec![42, 42, 42, 42, 42])) + /// ``` + fn with_flat_map<U, F, St>(self, f: F) -> WithFlatMap<Self, U, F, St> + where F: FnMut(U) -> St, + St: Stream<Item = Self::SinkItem, Error=Self::SinkError>, + Self: Sized + { + with_flat_map::new(self, f) + } + + /* + fn with_map<U, F>(self, f: F) -> WithMap<Self, U, F> + where F: FnMut(U) -> Self::SinkItem, + Self: Sized; + + fn with_filter<F>(self, f: F) -> WithFilter<Self, F> + where F: FnMut(Self::SinkItem) -> bool, + Self: Sized; + + fn with_filter_map<U, F>(self, f: F) -> WithFilterMap<Self, U, F> + where F: FnMut(U) -> Option<Self::SinkItem>, + Self: Sized; + */ + + /// Transforms the error returned by the sink. + fn sink_map_err<F, E>(self, f: F) -> SinkMapErr<Self, F> + where F: FnOnce(Self::SinkError) -> E, + Self: Sized, + { + map_err::new(self, f) + } + + /// Map this sink's error to any error implementing `From` for this sink's + /// `Error`, returning a new sink. + /// + /// If wanting to map errors of a `Sink + Stream`, use `.sink_from_err().from_err()`. + fn sink_from_err<E: From<Self::SinkError>>(self) -> from_err::SinkFromErr<Self, E> + where Self: Sized, + { + from_err::new(self) + } + + + /// Adds a fixed-size buffer to the current sink. + /// + /// The resulting sink will buffer up to `amt` items when the underlying + /// sink is unwilling to accept additional items. Calling `poll_complete` on + /// the buffered sink will attempt to both empty the buffer and complete + /// processing on the underlying sink. + /// + /// Note that this function consumes the given sink, returning a wrapped + /// version, much like `Iterator::map`. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + #[cfg(feature = "use_std")] + fn buffer(self, amt: usize) -> Buffer<Self> + where Self: Sized + { + buffer::new(self, amt) + } + + /// Fanout items to multiple sinks. + /// + /// This adapter clones each incoming item and forwards it to both this as well as + /// the other sink at the same time. + fn fanout<S>(self, other: S) -> Fanout<Self, S> + where Self: Sized, + Self::SinkItem: Clone, + S: Sink<SinkItem=Self::SinkItem, SinkError=Self::SinkError> + { + fanout::new(self, other) + } + + /// A future that completes when the sink has finished processing all + /// pending requests. + /// + /// The sink itself is returned after flushing is complete; this adapter is + /// intended to be used when you want to stop sending to the sink until + /// all current requests are processed. + fn flush(self) -> Flush<Self> + where Self: Sized + { + flush::new(self) + } + + /// A future that completes after the given item has been fully processed + /// into the sink, including flushing. + /// + /// Note that, **because of the flushing requirement, it is usually better + /// to batch together items to send via `send_all`, rather than flushing + /// between each item.** + /// + /// On completion, the sink is returned. + fn send(self, item: Self::SinkItem) -> Send<Self> + where Self: Sized + { + send::new(self, item) + } + + /// A future that completes after the given stream has been fully processed + /// into the sink, including flushing. + /// + /// This future will drive the stream to keep producing items until it is + /// exhausted, sending each item to the sink. It will complete once both the + /// stream is exhausted, the sink has received all items, the sink has been + /// flushed, and the sink has been closed. + /// + /// Doing `sink.send_all(stream)` is roughly equivalent to + /// `stream.forward(sink)`. The returned future will exhaust all items from + /// `stream` and send them to `self`, closing `self` when all items have been + /// received. + /// + /// On completion, the pair `(sink, source)` is returned. + fn send_all<S>(self, stream: S) -> SendAll<Self, S> + where S: Stream<Item = Self::SinkItem>, + Self::SinkError: From<S::Error>, + Self: Sized + { + send_all::new(self, stream) + } +} + +impl<'a, S: ?Sized + Sink> Sink for &'a mut S { + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: Self::SinkItem) + -> StartSend<Self::SinkItem, Self::SinkError> { + (**self).start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + (**self).poll_complete() + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + (**self).close() + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/send.rs b/third_party/rust/futures-0.1.31/src/sink/send.rs new file mode 100644 index 0000000000..71173fa836 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/send.rs @@ -0,0 +1,59 @@ +use {Poll, Async, Future, AsyncSink}; +use sink::Sink; + +/// Future for the `Sink::send` combinator, which sends a value to a sink and +/// then waits until the sink has fully flushed. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Send<S: Sink> { + sink: Option<S>, + item: Option<S::SinkItem>, +} + +pub fn new<S: Sink>(sink: S, item: S::SinkItem) -> Send<S> { + Send { + sink: Some(sink), + item: Some(item), + } +} + +impl<S: Sink> Send<S> { + /// Get a shared reference to the inner sink. + pub fn get_ref(&self) -> &S { + self.sink.as_ref().take().expect("Attempted Send::get_ref after completion") + } + + /// Get a mutable reference to the inner sink. + pub fn get_mut(&mut self) -> &mut S { + self.sink.as_mut().take().expect("Attempted Send::get_mut after completion") + } + + fn sink_mut(&mut self) -> &mut S { + self.sink.as_mut().take().expect("Attempted to poll Send after completion") + } + + fn take_sink(&mut self) -> S { + self.sink.take().expect("Attempted to poll Send after completion") + } +} + +impl<S: Sink> Future for Send<S> { + type Item = S; + type Error = S::SinkError; + + fn poll(&mut self) -> Poll<S, S::SinkError> { + if let Some(item) = self.item.take() { + if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? { + self.item = Some(item); + return Ok(Async::NotReady); + } + } + + // we're done sending the item, but want to block on flushing the + // sink + try_ready!(self.sink_mut().poll_complete()); + + // now everything's emptied, so return the sink for further use + Ok(Async::Ready(self.take_sink())) + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/send_all.rs b/third_party/rust/futures-0.1.31/src/sink/send_all.rs new file mode 100644 index 0000000000..a230903d1c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/send_all.rs @@ -0,0 +1,88 @@ +use {Poll, Async, Future, AsyncSink}; +use stream::{Stream, Fuse}; +use sink::Sink; + +/// Future for the `Sink::send_all` combinator, which sends a stream of values +/// to a sink and then waits until the sink has fully flushed those values. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct SendAll<T, U: Stream> { + sink: Option<T>, + stream: Option<Fuse<U>>, + buffered: Option<U::Item>, +} + +pub fn new<T, U>(sink: T, stream: U) -> SendAll<T, U> + where T: Sink, + U: Stream<Item = T::SinkItem>, + T::SinkError: From<U::Error>, +{ + SendAll { + sink: Some(sink), + stream: Some(stream.fuse()), + buffered: None, + } +} + +impl<T, U> SendAll<T, U> + where T: Sink, + U: Stream<Item = T::SinkItem>, + T::SinkError: From<U::Error>, +{ + fn sink_mut(&mut self) -> &mut T { + self.sink.as_mut().take().expect("Attempted to poll SendAll after completion") + } + + fn stream_mut(&mut self) -> &mut Fuse<U> { + self.stream.as_mut().take() + .expect("Attempted to poll SendAll after completion") + } + + fn take_result(&mut self) -> (T, U) { + let sink = self.sink.take() + .expect("Attempted to poll Forward after completion"); + let fuse = self.stream.take() + .expect("Attempted to poll Forward after completion"); + (sink, fuse.into_inner()) + } + + fn try_start_send(&mut self, item: U::Item) -> Poll<(), T::SinkError> { + debug_assert!(self.buffered.is_none()); + if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? { + self.buffered = Some(item); + return Ok(Async::NotReady) + } + Ok(Async::Ready(())) + } +} + +impl<T, U> Future for SendAll<T, U> + where T: Sink, + U: Stream<Item = T::SinkItem>, + T::SinkError: From<U::Error>, +{ + type Item = (T, U); + type Error = T::SinkError; + + fn poll(&mut self) -> Poll<(T, U), T::SinkError> { + // If we've got an item buffered already, we need to write it to the + // sink before we can do anything else + if let Some(item) = self.buffered.take() { + try_ready!(self.try_start_send(item)) + } + + loop { + match self.stream_mut().poll()? { + Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)), + Async::Ready(None) => { + try_ready!(self.sink_mut().close()); + return Ok(Async::Ready(self.take_result())) + } + Async::NotReady => { + try_ready!(self.sink_mut().poll_complete()); + return Ok(Async::NotReady) + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/wait.rs b/third_party/rust/futures-0.1.31/src/sink/wait.rs new file mode 100644 index 0000000000..940a58862f --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/wait.rs @@ -0,0 +1,59 @@ +use sink::Sink; +use executor; + +/// A sink combinator which converts an asynchronous sink to a **blocking +/// sink**. +/// +/// Created by the `Sink::wait` method, this function transforms any sink into a +/// blocking version. This is implemented by blocking the current thread when a +/// sink is otherwise unable to make progress. +#[must_use = "sinks do nothing unless used"] +#[derive(Debug)] +pub struct Wait<S> { + sink: executor::Spawn<S>, +} + +pub fn new<S: Sink>(s: S) -> Wait<S> { + Wait { + sink: executor::spawn(s), + } +} + +impl<S: Sink> Wait<S> { + /// Sends a value to this sink, blocking the current thread until it's able + /// to do so. + /// + /// This function will take the `value` provided and call the underlying + /// sink's `start_send` function until it's ready to accept the value. If + /// the function returns `NotReady` then the current thread is blocked + /// until it is otherwise ready to accept the value. + /// + /// # Return value + /// + /// If `Ok(())` is returned then the `value` provided was successfully sent + /// along the sink, and if `Err(e)` is returned then an error occurred + /// which prevented the value from being sent. + pub fn send(&mut self, value: S::SinkItem) -> Result<(), S::SinkError> { + self.sink.wait_send(value) + } + + /// Flushes any buffered data in this sink, blocking the current thread + /// until it's entirely flushed. + /// + /// This function will call the underlying sink's `poll_complete` method + /// until it returns that it's ready to proceed. If the method returns + /// `NotReady` the current thread will be blocked until it's otherwise + /// ready to proceed. + pub fn flush(&mut self) -> Result<(), S::SinkError> { + self.sink.wait_flush() + } + + /// Close this sink, blocking the current thread until it's entirely closed. + /// + /// This function will call the underlying sink's `close` method + /// until it returns that it's closed. If the method returns + /// `NotReady` the current thread will be blocked until it's otherwise closed. + pub fn close(&mut self) -> Result<(), S::SinkError> { + self.sink.wait_close() + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/with.rs b/third_party/rust/futures-0.1.31/src/sink/with.rs new file mode 100644 index 0000000000..3326b6e49c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/with.rs @@ -0,0 +1,153 @@ +use core::mem; +use core::marker::PhantomData; + +use {IntoFuture, Future, Poll, Async, StartSend, AsyncSink}; +use sink::Sink; +use stream::Stream; + +/// Sink for the `Sink::with` combinator, chaining a computation to run *prior* +/// to pushing a value into the underlying sink. +#[derive(Clone, Debug)] +#[must_use = "sinks do nothing unless polled"] +pub struct With<S, U, F, Fut> + where S: Sink, + F: FnMut(U) -> Fut, + Fut: IntoFuture, +{ + sink: S, + f: F, + state: State<Fut::Future, S::SinkItem>, + _phantom: PhantomData<fn(U)>, +} + +#[derive(Clone, Debug)] +enum State<Fut, T> { + Empty, + Process(Fut), + Buffered(T), +} + +impl<Fut, T> State<Fut, T> { + fn is_empty(&self) -> bool { + if let State::Empty = *self { + true + } else { + false + } + } +} + +pub fn new<S, U, F, Fut>(sink: S, f: F) -> With<S, U, F, Fut> + where S: Sink, + F: FnMut(U) -> Fut, + Fut: IntoFuture<Item = S::SinkItem>, + Fut::Error: From<S::SinkError>, +{ + With { + state: State::Empty, + sink: sink, + f: f, + _phantom: PhantomData, + } +} + +// Forwarding impl of Stream from the underlying sink +impl<S, U, F, Fut> Stream for With<S, U, F, Fut> + where S: Stream + Sink, + F: FnMut(U) -> Fut, + Fut: IntoFuture +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + self.sink.poll() + } +} + +impl<S, U, F, Fut> With<S, U, F, Fut> + where S: Sink, + F: FnMut(U) -> Fut, + Fut: IntoFuture<Item = S::SinkItem>, + Fut::Error: From<S::SinkError>, +{ + /// Get a shared reference to the inner sink. + pub fn get_ref(&self) -> &S { + &self.sink + } + + /// Get a mutable reference to the inner sink. + pub fn get_mut(&mut self) -> &mut S { + &mut self.sink + } + + /// Consumes this combinator, returning the underlying sink. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.sink + } + + fn poll(&mut self) -> Poll<(), Fut::Error> { + loop { + match mem::replace(&mut self.state, State::Empty) { + State::Empty => break, + State::Process(mut fut) => { + match fut.poll()? { + Async::Ready(item) => { + self.state = State::Buffered(item); + } + Async::NotReady => { + self.state = State::Process(fut); + break + } + } + } + State::Buffered(item) => { + if let AsyncSink::NotReady(item) = self.sink.start_send(item)? { + self.state = State::Buffered(item); + break + } + } + } + } + + if self.state.is_empty() { + Ok(Async::Ready(())) + } else { + Ok(Async::NotReady) + } + } +} + +impl<S, U, F, Fut> Sink for With<S, U, F, Fut> + where S: Sink, + F: FnMut(U) -> Fut, + Fut: IntoFuture<Item = S::SinkItem>, + Fut::Error: From<S::SinkError>, +{ + type SinkItem = U; + type SinkError = Fut::Error; + + fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Fut::Error> { + if self.poll()?.is_not_ready() { + return Ok(AsyncSink::NotReady(item)) + } + self.state = State::Process((self.f)(item).into_future()); + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), Fut::Error> { + // poll ourselves first, to push data downward + let me_ready = self.poll()?; + // always propagate `poll_complete` downward to attempt to make progress + try_ready!(self.sink.poll_complete()); + Ok(me_ready) + } + + fn close(&mut self) -> Poll<(), Fut::Error> { + try_ready!(self.poll()); + Ok(self.sink.close()?) + } +} diff --git a/third_party/rust/futures-0.1.31/src/sink/with_flat_map.rs b/third_party/rust/futures-0.1.31/src/sink/with_flat_map.rs new file mode 100644 index 0000000000..80c4f6605a --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sink/with_flat_map.rs @@ -0,0 +1,126 @@ +use core::marker::PhantomData; + +use {Poll, Async, StartSend, AsyncSink}; +use sink::Sink; +use stream::Stream; + +/// Sink for the `Sink::with_flat_map` combinator, chaining a computation that returns an iterator +/// to run prior to pushing a value into the underlying sink +#[derive(Debug)] +#[must_use = "sinks do nothing unless polled"] +pub struct WithFlatMap<S, U, F, St> +where + S: Sink, + F: FnMut(U) -> St, + St: Stream<Item = S::SinkItem, Error=S::SinkError>, +{ + sink: S, + f: F, + stream: Option<St>, + buffer: Option<S::SinkItem>, + _phantom: PhantomData<fn(U)>, +} + +pub fn new<S, U, F, St>(sink: S, f: F) -> WithFlatMap<S, U, F, St> +where + S: Sink, + F: FnMut(U) -> St, + St: Stream<Item = S::SinkItem, Error=S::SinkError>, +{ + WithFlatMap { + sink: sink, + f: f, + stream: None, + buffer: None, + _phantom: PhantomData, + } +} + +impl<S, U, F, St> WithFlatMap<S, U, F, St> +where + S: Sink, + F: FnMut(U) -> St, + St: Stream<Item = S::SinkItem, Error=S::SinkError>, +{ + /// Get a shared reference to the inner sink. + pub fn get_ref(&self) -> &S { + &self.sink + } + + /// Get a mutable reference to the inner sink. + pub fn get_mut(&mut self) -> &mut S { + &mut self.sink + } + + /// Consumes this combinator, returning the underlying sink. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.sink + } + + fn try_empty_stream(&mut self) -> Poll<(), S::SinkError> { + if let Some(x) = self.buffer.take() { + if let AsyncSink::NotReady(x) = self.sink.start_send(x)? { + self.buffer = Some(x); + return Ok(Async::NotReady); + } + } + if let Some(mut stream) = self.stream.take() { + while let Some(x) = try_ready!(stream.poll()) { + if let AsyncSink::NotReady(x) = self.sink.start_send(x)? { + self.stream = Some(stream); + self.buffer = Some(x); + return Ok(Async::NotReady); + } + } + } + Ok(Async::Ready(())) + } +} + +impl<S, U, F, St> Stream for WithFlatMap<S, U, F, St> +where + S: Stream + Sink, + F: FnMut(U) -> St, + St: Stream<Item = S::SinkItem, Error=S::SinkError>, +{ + type Item = S::Item; + type Error = S::Error; + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + self.sink.poll() + } +} + +impl<S, U, F, St> Sink for WithFlatMap<S, U, F, St> +where + S: Sink, + F: FnMut(U) -> St, + St: Stream<Item = S::SinkItem, Error=S::SinkError>, +{ + type SinkItem = U; + type SinkError = S::SinkError; + fn start_send(&mut self, i: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> { + if self.try_empty_stream()?.is_not_ready() { + return Ok(AsyncSink::NotReady(i)); + } + assert!(self.stream.is_none()); + self.stream = Some((self.f)(i)); + self.try_empty_stream()?; + Ok(AsyncSink::Ready) + } + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + if self.try_empty_stream()?.is_not_ready() { + return Ok(Async::NotReady); + } + self.sink.poll_complete() + } + fn close(&mut self) -> Poll<(), Self::SinkError> { + if self.try_empty_stream()?.is_not_ready() { + return Ok(Async::NotReady); + } + assert!(self.stream.is_none()); + self.sink.close() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/and_then.rs b/third_party/rust/futures-0.1.31/src/stream/and_then.rs new file mode 100644 index 0000000000..1fac8b952d --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/and_then.rs @@ -0,0 +1,106 @@ +use {IntoFuture, Future, Poll, Async}; +use stream::Stream; + +/// A stream combinator which chains a computation onto values produced by a +/// stream. +/// +/// This structure is produced by the `Stream::and_then` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct AndThen<S, F, U> + where U: IntoFuture, +{ + stream: S, + future: Option<U::Future>, + f: F, +} + +pub fn new<S, F, U>(s: S, f: F) -> AndThen<S, F, U> + where S: Stream, + F: FnMut(S::Item) -> U, + U: IntoFuture<Error=S::Error>, +{ + AndThen { + stream: s, + future: None, + f: f, + } +} + +impl<S, F, U> AndThen<S, F, U> + where U: IntoFuture, +{ + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F, U: IntoFuture> ::sink::Sink for AndThen<S, F, U> + where S: ::sink::Sink +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F, U> Stream for AndThen<S, F, U> + where S: Stream, + F: FnMut(S::Item) -> U, + U: IntoFuture<Error=S::Error>, +{ + type Item = U::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<U::Item>, S::Error> { + if self.future.is_none() { + let item = match try_ready!(self.stream.poll()) { + None => return Ok(Async::Ready(None)), + Some(e) => e, + }; + self.future = Some((self.f)(item).into_future()); + } + assert!(self.future.is_some()); + match self.future.as_mut().unwrap().poll() { + Ok(Async::Ready(e)) => { + self.future = None; + Ok(Async::Ready(Some(e))) + } + Err(e) => { + self.future = None; + Err(e) + } + Ok(Async::NotReady) => Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/buffer_unordered.rs b/third_party/rust/futures-0.1.31/src/stream/buffer_unordered.rs new file mode 100644 index 0000000000..3011108cf3 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/buffer_unordered.rs @@ -0,0 +1,130 @@ +use std::fmt; + +use {Async, IntoFuture, Poll}; +use stream::{Stream, Fuse, FuturesUnordered}; + +/// An adaptor for a stream of futures to execute the futures concurrently, if +/// possible, delivering results as they become available. +/// +/// This adaptor will buffer up a list of pending futures, and then return their +/// results in the order that they complete. This is created by the +/// `Stream::buffer_unordered` method. +#[must_use = "streams do nothing unless polled"] +pub struct BufferUnordered<S> + where S: Stream, + S::Item: IntoFuture, +{ + stream: Fuse<S>, + queue: FuturesUnordered<<S::Item as IntoFuture>::Future>, + max: usize, +} + +impl<S> fmt::Debug for BufferUnordered<S> + where S: Stream + fmt::Debug, + S::Item: IntoFuture, + <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("BufferUnordered") + .field("stream", &self.stream) + .field("queue", &self.queue) + .field("max", &self.max) + .finish() + } +} + +pub fn new<S>(s: S, amt: usize) -> BufferUnordered<S> + where S: Stream, + S::Item: IntoFuture<Error=<S as Stream>::Error>, +{ + BufferUnordered { + stream: super::fuse::new(s), + queue: FuturesUnordered::new(), + max: amt, + } +} + +impl<S> BufferUnordered<S> + where S: Stream, + S::Item: IntoFuture<Error=<S as Stream>::Error>, +{ + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + self.stream.get_ref() + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + self.stream.get_mut() + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream.into_inner() + } +} + +impl<S> Stream for BufferUnordered<S> + where S: Stream, + S::Item: IntoFuture<Error=<S as Stream>::Error>, +{ + type Item = <S::Item as IntoFuture>::Item; + type Error = <S as Stream>::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + // First up, try to spawn off as many futures as possible by filling up + // our slab of futures. + while self.queue.len() < self.max { + let future = match self.stream.poll()? { + Async::Ready(Some(s)) => s.into_future(), + Async::Ready(None) | + Async::NotReady => break, + }; + + self.queue.push(future); + } + + // Try polling a new future + if let Some(val) = try_ready!(self.queue.poll()) { + return Ok(Async::Ready(Some(val))); + } + + // If we've gotten this far, then there are no events for us to process + // and nothing was ready, so figure out if we're not done yet or if + // we've reached the end. + if self.stream.is_done() { + Ok(Async::Ready(None)) + } else { + Ok(Async::NotReady) + } + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for BufferUnordered<S> + where S: ::sink::Sink + Stream, + S::Item: IntoFuture, +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/buffered.rs b/third_party/rust/futures-0.1.31/src/stream/buffered.rs new file mode 100644 index 0000000000..5616b73d7a --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/buffered.rs @@ -0,0 +1,132 @@ +use std::fmt; + +use {Async, IntoFuture, Poll}; +use stream::{Stream, Fuse, FuturesOrdered}; + +/// An adaptor for a stream of futures to execute the futures concurrently, if +/// possible. +/// +/// This adaptor will buffer up a list of pending futures, and then return their +/// results in the order that they were pulled out of the original stream. This +/// is created by the `Stream::buffered` method. +#[must_use = "streams do nothing unless polled"] +pub struct Buffered<S> + where S: Stream, + S::Item: IntoFuture, +{ + stream: Fuse<S>, + queue: FuturesOrdered<<S::Item as IntoFuture>::Future>, + max: usize, +} + +impl<S> fmt::Debug for Buffered<S> + where S: Stream + fmt::Debug, + S::Item: IntoFuture, + <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug, + <<S as Stream>::Item as IntoFuture>::Item: fmt::Debug, + <<S as Stream>::Item as IntoFuture>::Error: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Buffered") + .field("stream", &self.stream) + .field("queue", &self.queue) + .field("max", &self.max) + .finish() + } +} + +pub fn new<S>(s: S, amt: usize) -> Buffered<S> + where S: Stream, + S::Item: IntoFuture<Error=<S as Stream>::Error>, +{ + Buffered { + stream: super::fuse::new(s), + queue: FuturesOrdered::new(), + max: amt, + } +} + +impl<S> Buffered<S> + where S: Stream, + S::Item: IntoFuture<Error=<S as Stream>::Error>, +{ + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + self.stream.get_ref() + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + self.stream.get_mut() + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream.into_inner() + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for Buffered<S> + where S: ::sink::Sink + Stream, + S::Item: IntoFuture, +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S> Stream for Buffered<S> + where S: Stream, + S::Item: IntoFuture<Error=<S as Stream>::Error>, +{ + type Item = <S::Item as IntoFuture>::Item; + type Error = <S as Stream>::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + // First up, try to spawn off as many futures as possible by filling up + // our slab of futures. + while self.queue.len() < self.max { + let future = match self.stream.poll()? { + Async::Ready(Some(s)) => s.into_future(), + Async::Ready(None) | + Async::NotReady => break, + }; + + self.queue.push(future); + } + + // Try polling a new future + if let Some(val) = try_ready!(self.queue.poll()) { + return Ok(Async::Ready(Some(val))); + } + + // If we've gotten this far, then there are no events for us to process + // and nothing was ready, so figure out if we're not done yet or if + // we've reached the end. + if self.stream.is_done() { + Ok(Async::Ready(None)) + } else { + Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/catch_unwind.rs b/third_party/rust/futures-0.1.31/src/stream/catch_unwind.rs new file mode 100644 index 0000000000..d3244946e5 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/catch_unwind.rs @@ -0,0 +1,71 @@ +use std::prelude::v1::*; +use std::any::Any; +use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe}; +use std::mem; + +use super::super::{Poll, Async}; +use super::Stream; + +/// Stream for the `catch_unwind` combinator. +/// +/// This is created by the `Stream::catch_unwind` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct CatchUnwind<S> where S: Stream { + state: CatchUnwindState<S>, +} + +pub fn new<S>(stream: S) -> CatchUnwind<S> + where S: Stream + UnwindSafe, +{ + CatchUnwind { + state: CatchUnwindState::Stream(stream), + } +} + +#[derive(Debug)] +enum CatchUnwindState<S> { + Stream(S), + Eof, + Done, +} + +impl<S> Stream for CatchUnwind<S> + where S: Stream + UnwindSafe, +{ + type Item = Result<S::Item, S::Error>; + type Error = Box<Any + Send>; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + let mut stream = match mem::replace(&mut self.state, CatchUnwindState::Eof) { + CatchUnwindState::Done => panic!("cannot poll after eof"), + CatchUnwindState::Eof => { + self.state = CatchUnwindState::Done; + return Ok(Async::Ready(None)); + } + CatchUnwindState::Stream(stream) => stream, + }; + let res = catch_unwind(|| (stream.poll(), stream)); + match res { + Err(e) => Err(e), // and state is already Eof + Ok((poll, stream)) => { + self.state = CatchUnwindState::Stream(stream); + match poll { + Err(e) => Ok(Async::Ready(Some(Err(e)))), + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(Some(r))) => Ok(Async::Ready(Some(Ok(r)))), + Ok(Async::Ready(None)) => Ok(Async::Ready(None)), + } + } + } + } +} + +impl<S: Stream> Stream for AssertUnwindSafe<S> { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + self.0.poll() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/chain.rs b/third_party/rust/futures-0.1.31/src/stream/chain.rs new file mode 100644 index 0000000000..0ff0e5ce6f --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/chain.rs @@ -0,0 +1,57 @@ +use core::mem; + +use stream::Stream; +use {Async, Poll}; + + +/// State of chain stream. +#[derive(Debug)] +enum State<S1, S2> { + /// Emitting elements of first stream + First(S1, S2), + /// Emitting elements of second stream + Second(S2), + /// Temporary value to replace first with second + Temp, +} + +/// An adapter for chaining the output of two streams. +/// +/// The resulting stream produces items from first stream and then +/// from second stream. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Chain<S1, S2> { + state: State<S1, S2> +} + +pub fn new<S1, S2>(s1: S1, s2: S2) -> Chain<S1, S2> + where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>, +{ + Chain { state: State::First(s1, s2) } +} + +impl<S1, S2> Stream for Chain<S1, S2> + where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>, +{ + type Item = S1::Item; + type Error = S1::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + loop { + match self.state { + State::First(ref mut s1, ref _s2) => match s1.poll() { + Ok(Async::Ready(None)) => (), // roll + x => return x, + }, + State::Second(ref mut s2) => return s2.poll(), + State::Temp => unreachable!(), + } + + self.state = match mem::replace(&mut self.state, State::Temp) { + State::First(_s1, s2) => State::Second(s2), + _ => unreachable!(), + }; + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/channel.rs b/third_party/rust/futures-0.1.31/src/stream/channel.rs new file mode 100644 index 0000000000..89a419d150 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/channel.rs @@ -0,0 +1,114 @@ +#![cfg(feature = "with-deprecated")] +#![deprecated(since = "0.1.4", note = "use sync::mpsc::channel instead")] +#![allow(deprecated)] + +use std::any::Any; +use std::error::Error; +use std::fmt; + +use {Poll, Async, Stream, Future, Sink}; +use sink::Send; +use sync::mpsc; + +/// Creates an in-memory channel implementation of the `Stream` trait. +/// +/// This method creates a concrete implementation of the `Stream` trait which +/// can be used to send values across threads in a streaming fashion. This +/// channel is unique in that it implements back pressure to ensure that the +/// sender never outpaces the receiver. The `Sender::send` method will only +/// allow sending one message and the next message can only be sent once the +/// first was consumed. +/// +/// The `Receiver` returned implements the `Stream` trait and has access to any +/// number of the associated combinators for transforming the result. +pub fn channel<T, E>() -> (Sender<T, E>, Receiver<T, E>) { + let (tx, rx) = mpsc::channel(0); + (Sender { inner: tx }, Receiver { inner: rx }) +} + +/// The transmission end of a channel which is used to send values. +/// +/// This is created by the `channel` method in the `stream` module. +#[derive(Debug)] +pub struct Sender<T, E> { + inner: mpsc::Sender<Result<T, E>>, +} + +/// The receiving end of a channel which implements the `Stream` trait. +/// +/// This is a concrete implementation of a stream which can be used to represent +/// a stream of values being computed elsewhere. This is created by the +/// `channel` method in the `stream` module. +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub struct Receiver<T, E> { + inner: mpsc::Receiver<Result<T, E>>, +} + +/// Error type for sending, used when the receiving end of the channel is dropped +pub struct SendError<T, E>(Result<T, E>); + +/// Future returned by `Sender::send`. +#[derive(Debug)] +pub struct FutureSender<T, E> { + inner: Send<mpsc::Sender<Result<T, E>>>, +} + +impl<T, E> fmt::Debug for SendError<T, E> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_tuple("SendError") + .field(&"...") + .finish() + } +} + +impl<T, E> fmt::Display for SendError<T, E> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "send failed because receiver is gone") + } +} + +impl<T, E> Error for SendError<T, E> + where T: Any, E: Any +{ + fn description(&self) -> &str { + "send failed because receiver is gone" + } +} + + +impl<T, E> Stream for Receiver<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<T>, E> { + match self.inner.poll().expect("cannot fail") { + Async::Ready(Some(Ok(e))) => Ok(Async::Ready(Some(e))), + Async::Ready(Some(Err(e))) => Err(e), + Async::Ready(None) => Ok(Async::Ready(None)), + Async::NotReady => Ok(Async::NotReady), + } + } +} + +impl<T, E> Sender<T, E> { + /// Sends a new value along this channel to the receiver. + /// + /// This method consumes the sender and returns a future which will resolve + /// to the sender again when the value sent has been consumed. + pub fn send(self, t: Result<T, E>) -> FutureSender<T, E> { + FutureSender { inner: self.inner.send(t) } + } +} + +impl<T, E> Future for FutureSender<T, E> { + type Item = Sender<T, E>; + type Error = SendError<T, E>; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + match self.inner.poll() { + Ok(a) => Ok(a.map(|a| Sender { inner: a })), + Err(e) => Err(SendError(e.into_inner())), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/chunks.rs b/third_party/rust/futures-0.1.31/src/stream/chunks.rs new file mode 100644 index 0000000000..dbfaeb89ec --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/chunks.rs @@ -0,0 +1,136 @@ +use std::mem; +use std::prelude::v1::*; + +use {Async, Poll}; +use stream::{Stream, Fuse}; + +/// An adaptor that chunks up elements in a vector. +/// +/// This adaptor will buffer up a list of items in the stream and pass on the +/// vector used for buffering when a specified capacity has been reached. This +/// is created by the `Stream::chunks` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Chunks<S> + where S: Stream +{ + items: Vec<S::Item>, + err: Option<S::Error>, + stream: Fuse<S>, + cap: usize, // https://github.com/rust-lang-nursery/futures-rs/issues/1475 +} + +pub fn new<S>(s: S, capacity: usize) -> Chunks<S> + where S: Stream +{ + assert!(capacity > 0); + + Chunks { + items: Vec::with_capacity(capacity), + err: None, + stream: super::fuse::new(s), + cap: capacity, + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for Chunks<S> + where S: ::sink::Sink + Stream +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + + +impl<S> Chunks<S> where S: Stream { + fn take(&mut self) -> Vec<S::Item> { + let cap = self.cap; + mem::replace(&mut self.items, Vec::with_capacity(cap)) + } + + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + self.stream.get_ref() + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + self.stream.get_mut() + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream.into_inner() + } +} + +impl<S> Stream for Chunks<S> + where S: Stream +{ + type Item = Vec<<S as Stream>::Item>; + type Error = <S as Stream>::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + if let Some(err) = self.err.take() { + return Err(err) + } + + loop { + match self.stream.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + + // Push the item into the buffer and check whether it is full. + // If so, replace our buffer with a new and empty one and return + // the full one. + Ok(Async::Ready(Some(item))) => { + self.items.push(item); + if self.items.len() >= self.cap { + return Ok(Some(self.take()).into()) + } + } + + // Since the underlying stream ran out of values, return what we + // have buffered, if we have anything. + Ok(Async::Ready(None)) => { + return if self.items.len() > 0 { + let full_buf = mem::replace(&mut self.items, Vec::new()); + Ok(Some(full_buf).into()) + } else { + Ok(Async::Ready(None)) + } + } + + // If we've got buffered items be sure to return them first, + // we'll defer our error for later. + Err(e) => { + if self.items.len() == 0 { + return Err(e) + } else { + self.err = Some(e); + return Ok(Some(self.take()).into()) + } + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/collect.rs b/third_party/rust/futures-0.1.31/src/stream/collect.rs new file mode 100644 index 0000000000..8bd9d0e1dc --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/collect.rs @@ -0,0 +1,52 @@ +use std::prelude::v1::*; + +use std::mem; + +use {Future, Poll, Async}; +use stream::Stream; + +/// A future which collects all of the values of a stream into a vector. +/// +/// This future is created by the `Stream::collect` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Collect<S> where S: Stream { + stream: S, + items: Vec<S::Item>, +} + +pub fn new<S>(s: S) -> Collect<S> + where S: Stream, +{ + Collect { + stream: s, + items: Vec::new(), + } +} + +impl<S: Stream> Collect<S> { + fn finish(&mut self) -> Vec<S::Item> { + mem::replace(&mut self.items, Vec::new()) + } +} + +impl<S> Future for Collect<S> + where S: Stream, +{ + type Item = Vec<S::Item>; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Vec<S::Item>, S::Error> { + loop { + match self.stream.poll() { + Ok(Async::Ready(Some(e))) => self.items.push(e), + Ok(Async::Ready(None)) => return Ok(Async::Ready(self.finish())), + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + self.finish(); + return Err(e) + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/concat.rs b/third_party/rust/futures-0.1.31/src/stream/concat.rs new file mode 100644 index 0000000000..a0da71bdd5 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/concat.rs @@ -0,0 +1,172 @@ +use core::mem; +use core::fmt::{Debug, Formatter, Result as FmtResult}; +use core::default::Default; + +use {Poll, Async}; +use future::Future; +use stream::Stream; + +/// A stream combinator to concatenate the results of a stream into the first +/// yielded item. +/// +/// This structure is produced by the `Stream::concat2` method. +#[must_use = "streams do nothing unless polled"] +pub struct Concat2<S> + where S: Stream, +{ + inner: ConcatSafe<S> +} + +impl<S: Debug> Debug for Concat2<S> where S: Stream, S::Item: Debug { + fn fmt(&self, fmt: &mut Formatter) -> FmtResult { + fmt.debug_struct("Concat2") + .field("inner", &self.inner) + .finish() + } +} + +pub fn new2<S>(s: S) -> Concat2<S> + where S: Stream, + S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default, +{ + Concat2 { + inner: new_safe(s) + } +} + +impl<S> Future for Concat2<S> + where S: Stream, + S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default, + +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + self.inner.poll().map(|a| { + match a { + Async::NotReady => Async::NotReady, + Async::Ready(None) => Async::Ready(Default::default()), + Async::Ready(Some(e)) => Async::Ready(e) + } + }) + } +} + + +/// A stream combinator to concatenate the results of a stream into the first +/// yielded item. +/// +/// This structure is produced by the `Stream::concat` method. +#[deprecated(since="0.1.18", note="please use `Stream::Concat2` instead")] +#[must_use = "streams do nothing unless polled"] +pub struct Concat<S> + where S: Stream, +{ + inner: ConcatSafe<S> +} + +#[allow(deprecated)] +impl<S: Debug> Debug for Concat<S> where S: Stream, S::Item: Debug { + fn fmt(&self, fmt: &mut Formatter) -> FmtResult { + fmt.debug_struct("Concat") + .field("inner", &self.inner) + .finish() + } +} + +#[allow(deprecated)] +pub fn new<S>(s: S) -> Concat<S> + where S: Stream, + S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator, +{ + Concat { + inner: new_safe(s) + } +} + +#[allow(deprecated)] +impl<S> Future for Concat<S> + where S: Stream, + S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator, + +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + self.inner.poll().map(|a| { + match a { + Async::NotReady => Async::NotReady, + Async::Ready(None) => panic!("attempted concatenation of empty stream"), + Async::Ready(Some(e)) => Async::Ready(e) + } + }) + } +} + + +#[derive(Debug)] +struct ConcatSafe<S> + where S: Stream, +{ + stream: S, + extend: Inner<S::Item>, +} + +fn new_safe<S>(s: S) -> ConcatSafe<S> + where S: Stream, + S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator, +{ + ConcatSafe { + stream: s, + extend: Inner::First, + } +} + +impl<S> Future for ConcatSafe<S> + where S: Stream, + S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator, + +{ + type Item = Option<S::Item>; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + loop { + match self.stream.poll() { + Ok(Async::Ready(Some(i))) => { + match self.extend { + Inner::First => { + self.extend = Inner::Extending(i); + }, + Inner::Extending(ref mut e) => { + e.extend(i); + }, + Inner::Done => unreachable!(), + } + }, + Ok(Async::Ready(None)) => { + match mem::replace(&mut self.extend, Inner::Done) { + Inner::First => return Ok(Async::Ready(None)), + Inner::Extending(e) => return Ok(Async::Ready(Some(e))), + Inner::Done => panic!("cannot poll Concat again") + } + }, + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + self.extend = Inner::Done; + return Err(e) + } + } + } + } +} + + +#[derive(Debug)] +enum Inner<E> { + First, + Extending(E), + Done, +} diff --git a/third_party/rust/futures-0.1.31/src/stream/empty.rs b/third_party/rust/futures-0.1.31/src/stream/empty.rs new file mode 100644 index 0000000000..c53fb80238 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/empty.rs @@ -0,0 +1,29 @@ +use core::marker; + +use stream::Stream; +use {Poll, Async}; + +/// A stream which contains no elements. +/// +/// This stream can be created with the `stream::empty` function. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Empty<T, E> { + _data: marker::PhantomData<(T, E)>, +} + +/// Creates a stream which contains no elements. +/// +/// The returned stream will always return `Ready(None)` when polled. +pub fn empty<T, E>() -> Empty<T, E> { + Empty { _data: marker::PhantomData } +} + +impl<T, E> Stream for Empty<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + Ok(Async::Ready(None)) + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/filter.rs b/third_party/rust/futures-0.1.31/src/stream/filter.rs new file mode 100644 index 0000000000..99c4abd657 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/filter.rs @@ -0,0 +1,89 @@ +use {Async, Poll}; +use stream::Stream; + +/// A stream combinator used to filter the results of a stream and only yield +/// some values. +/// +/// This structure is produced by the `Stream::filter` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Filter<S, F> { + stream: S, + f: F, +} + +pub fn new<S, F>(s: S, f: F) -> Filter<S, F> + where S: Stream, + F: FnMut(&S::Item) -> bool, +{ + Filter { + stream: s, + f: f, + } +} + +impl<S, F> Filter<S, F> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F> ::sink::Sink for Filter<S, F> + where S: ::sink::Sink +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F> Stream for Filter<S, F> + where S: Stream, + F: FnMut(&S::Item) -> bool, +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + loop { + match try_ready!(self.stream.poll()) { + Some(e) => { + if (self.f)(&e) { + return Ok(Async::Ready(Some(e))) + } + } + None => return Ok(Async::Ready(None)), + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/filter_map.rs b/third_party/rust/futures-0.1.31/src/stream/filter_map.rs new file mode 100644 index 0000000000..f91d26a45c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/filter_map.rs @@ -0,0 +1,89 @@ +use {Async, Poll}; +use stream::Stream; + +/// A combinator used to filter the results of a stream and simultaneously map +/// them to a different type. +/// +/// This structure is returned by the `Stream::filter_map` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct FilterMap<S, F> { + stream: S, + f: F, +} + +pub fn new<S, F, B>(s: S, f: F) -> FilterMap<S, F> + where S: Stream, + F: FnMut(S::Item) -> Option<B>, +{ + FilterMap { + stream: s, + f: f, + } +} + +impl<S, F> FilterMap<S, F> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F> ::sink::Sink for FilterMap<S, F> + where S: ::sink::Sink +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F, B> Stream for FilterMap<S, F> + where S: Stream, + F: FnMut(S::Item) -> Option<B>, +{ + type Item = B; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<B>, S::Error> { + loop { + match try_ready!(self.stream.poll()) { + Some(e) => { + if let Some(e) = (self.f)(e) { + return Ok(Async::Ready(Some(e))) + } + } + None => return Ok(Async::Ready(None)), + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/flatten.rs b/third_party/rust/futures-0.1.31/src/stream/flatten.rs new file mode 100644 index 0000000000..4baf9045a0 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/flatten.rs @@ -0,0 +1,96 @@ +use {Poll, Async}; +use stream::Stream; + +/// A combinator used to flatten a stream-of-streams into one long stream of +/// elements. +/// +/// This combinator is created by the `Stream::flatten` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Flatten<S> + where S: Stream, +{ + stream: S, + next: Option<S::Item>, +} + +pub fn new<S>(s: S) -> Flatten<S> + where S: Stream, + S::Item: Stream, + <S::Item as Stream>::Error: From<S::Error>, +{ + Flatten { + stream: s, + next: None, + } +} + +impl<S: Stream> Flatten<S> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for Flatten<S> + where S: ::sink::Sink + Stream +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S> Stream for Flatten<S> + where S: Stream, + S::Item: Stream, + <S::Item as Stream>::Error: From<S::Error>, +{ + type Item = <S::Item as Stream>::Item; + type Error = <S::Item as Stream>::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + loop { + if self.next.is_none() { + match try_ready!(self.stream.poll()) { + Some(e) => self.next = Some(e), + None => return Ok(Async::Ready(None)), + } + } + assert!(self.next.is_some()); + match self.next.as_mut().unwrap().poll() { + Ok(Async::Ready(None)) => self.next = None, + other => return other, + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/fold.rs b/third_party/rust/futures-0.1.31/src/stream/fold.rs new file mode 100644 index 0000000000..7fa24b449d --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/fold.rs @@ -0,0 +1,81 @@ +use core::mem; + +use {Future, Poll, IntoFuture, Async}; +use stream::Stream; + +/// A future used to collect all the results of a stream into one generic type. +/// +/// This future is returned by the `Stream::fold` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Fold<S, F, Fut, T> where Fut: IntoFuture { + stream: S, + f: F, + state: State<T, Fut::Future>, +} + +#[derive(Debug)] +enum State<T, F> where F: Future { + /// Placeholder state when doing work + Empty, + + /// Ready to process the next stream item; current accumulator is the `T` + Ready(T), + + /// Working on a future the process the previous stream item + Processing(F), +} + +pub fn new<S, F, Fut, T>(s: S, f: F, t: T) -> Fold<S, F, Fut, T> + where S: Stream, + F: FnMut(T, S::Item) -> Fut, + Fut: IntoFuture<Item = T>, + S::Error: From<Fut::Error>, +{ + Fold { + stream: s, + f: f, + state: State::Ready(t), + } +} + +impl<S, F, Fut, T> Future for Fold<S, F, Fut, T> + where S: Stream, + F: FnMut(T, S::Item) -> Fut, + Fut: IntoFuture<Item = T>, + S::Error: From<Fut::Error>, +{ + type Item = T; + type Error = S::Error; + + fn poll(&mut self) -> Poll<T, S::Error> { + loop { + match mem::replace(&mut self.state, State::Empty) { + State::Empty => panic!("cannot poll Fold twice"), + State::Ready(state) => { + match self.stream.poll()? { + Async::Ready(Some(e)) => { + let future = (self.f)(state, e); + let future = future.into_future(); + self.state = State::Processing(future); + } + Async::Ready(None) => return Ok(Async::Ready(state)), + Async::NotReady => { + self.state = State::Ready(state); + return Ok(Async::NotReady) + } + } + } + State::Processing(mut fut) => { + match fut.poll()? { + Async::Ready(state) => self.state = State::Ready(state), + Async::NotReady => { + self.state = State::Processing(fut); + return Ok(Async::NotReady) + } + } + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/for_each.rs b/third_party/rust/futures-0.1.31/src/stream/for_each.rs new file mode 100644 index 0000000000..c7e1cde5bb --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/for_each.rs @@ -0,0 +1,51 @@ +use {Async, Future, IntoFuture, Poll}; +use stream::Stream; + +/// A stream combinator which executes a unit closure over each item on a +/// stream. +/// +/// This structure is returned by the `Stream::for_each` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct ForEach<S, F, U> where U: IntoFuture { + stream: S, + f: F, + fut: Option<U::Future>, +} + +pub fn new<S, F, U>(s: S, f: F) -> ForEach<S, F, U> + where S: Stream, + F: FnMut(S::Item) -> U, + U: IntoFuture<Item = (), Error = S::Error>, +{ + ForEach { + stream: s, + f: f, + fut: None, + } +} + +impl<S, F, U> Future for ForEach<S, F, U> + where S: Stream, + F: FnMut(S::Item) -> U, + U: IntoFuture<Item= (), Error = S::Error>, +{ + type Item = (); + type Error = S::Error; + + fn poll(&mut self) -> Poll<(), S::Error> { + loop { + if let Some(mut fut) = self.fut.take() { + if fut.poll()?.is_not_ready() { + self.fut = Some(fut); + return Ok(Async::NotReady); + } + } + + match try_ready!(self.stream.poll()) { + Some(e) => self.fut = Some((self.f)(e).into_future()), + None => return Ok(Async::Ready(())), + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/forward.rs b/third_party/rust/futures-0.1.31/src/stream/forward.rs new file mode 100644 index 0000000000..6722af8c20 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/forward.rs @@ -0,0 +1,110 @@ +use {Poll, Async, Future, AsyncSink}; +use stream::{Stream, Fuse}; +use sink::Sink; + +/// Future for the `Stream::forward` combinator, which sends a stream of values +/// to a sink and then waits until the sink has fully flushed those values. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Forward<T: Stream, U> { + sink: Option<U>, + stream: Option<Fuse<T>>, + buffered: Option<T::Item>, +} + + +pub fn new<T, U>(stream: T, sink: U) -> Forward<T, U> + where U: Sink<SinkItem=T::Item>, + T: Stream, + T::Error: From<U::SinkError>, +{ + Forward { + sink: Some(sink), + stream: Some(stream.fuse()), + buffered: None, + } +} + +impl<T, U> Forward<T, U> + where U: Sink<SinkItem=T::Item>, + T: Stream, + T::Error: From<U::SinkError>, +{ + /// Get a shared reference to the inner sink. + /// If this combinator has already been polled to completion, None will be returned. + pub fn sink_ref(&self) -> Option<&U> { + self.sink.as_ref() + } + + /// Get a mutable reference to the inner sink. + /// If this combinator has already been polled to completion, None will be returned. + pub fn sink_mut(&mut self) -> Option<&mut U> { + self.sink.as_mut() + } + + /// Get a shared reference to the inner stream. + /// If this combinator has already been polled to completion, None will be returned. + pub fn stream_ref(&self) -> Option<&T> { + self.stream.as_ref().map(|x| x.get_ref()) + } + + /// Get a mutable reference to the inner stream. + /// If this combinator has already been polled to completion, None will be returned. + pub fn stream_mut(&mut self) -> Option<&mut T> { + self.stream.as_mut().map(|x| x.get_mut()) + } + + fn take_result(&mut self) -> (T, U) { + let sink = self.sink.take() + .expect("Attempted to poll Forward after completion"); + let fuse = self.stream.take() + .expect("Attempted to poll Forward after completion"); + (fuse.into_inner(), sink) + } + + fn try_start_send(&mut self, item: T::Item) -> Poll<(), U::SinkError> { + debug_assert!(self.buffered.is_none()); + if let AsyncSink::NotReady(item) = self.sink_mut() + .expect("Attempted to poll Forward after completion") + .start_send(item)? + { + self.buffered = Some(item); + return Ok(Async::NotReady) + } + Ok(Async::Ready(())) + } +} + +impl<T, U> Future for Forward<T, U> + where U: Sink<SinkItem=T::Item>, + T: Stream, + T::Error: From<U::SinkError>, +{ + type Item = (T, U); + type Error = T::Error; + + fn poll(&mut self) -> Poll<(T, U), T::Error> { + // If we've got an item buffered already, we need to write it to the + // sink before we can do anything else + if let Some(item) = self.buffered.take() { + try_ready!(self.try_start_send(item)) + } + + loop { + match self.stream.as_mut() + .expect("Attempted to poll Forward after completion") + .poll()? + { + Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)), + Async::Ready(None) => { + try_ready!(self.sink_mut().expect("Attempted to poll Forward after completion").close()); + return Ok(Async::Ready(self.take_result())) + } + Async::NotReady => { + try_ready!(self.sink_mut().expect("Attempted to poll Forward after completion").poll_complete()); + return Ok(Async::NotReady) + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/from_err.rs b/third_party/rust/futures-0.1.31/src/stream/from_err.rs new file mode 100644 index 0000000000..4028542dfc --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/from_err.rs @@ -0,0 +1,80 @@ +use core::marker::PhantomData; +use poll::Poll; +use Async; +use stream::Stream; + +/// A stream combinator to change the error type of a stream. +/// +/// This is created by the `Stream::from_err` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct FromErr<S, E> { + stream: S, + f: PhantomData<E> +} + +pub fn new<S, E>(stream: S) -> FromErr<S, E> + where S: Stream +{ + FromErr { + stream: stream, + f: PhantomData + } +} + +impl<S, E> FromErr<S, E> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + + +impl<S: Stream, E: From<S::Error>> Stream for FromErr<S, E> { + type Item = S::Item; + type Error = E; + + fn poll(&mut self) -> Poll<Option<S::Item>, E> { + let e = match self.stream.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + other => other, + }; + e.map_err(From::from) + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S: Stream + ::sink::Sink, E> ::sink::Sink for FromErr<S, E> { + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: Self::SinkItem) -> ::StartSend<Self::SinkItem, Self::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + self.stream.close() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/fuse.rs b/third_party/rust/futures-0.1.31/src/stream/fuse.rs new file mode 100644 index 0000000000..e39c31f348 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/fuse.rs @@ -0,0 +1,89 @@ +use {Poll, Async}; +use stream::Stream; + +/// A stream which "fuse"s a stream once it's terminated. +/// +/// Normally streams can behave unpredictably when used after they have already +/// finished, but `Fuse` continues to return `None` from `poll` forever when +/// finished. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Fuse<S> { + stream: S, + done: bool, +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for Fuse<S> + where S: ::sink::Sink +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +pub fn new<S: Stream>(s: S) -> Fuse<S> { + Fuse { stream: s, done: false } +} + +impl<S: Stream> Stream for Fuse<S> { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + if self.done { + Ok(Async::Ready(None)) + } else { + let r = self.stream.poll(); + if let Ok(Async::Ready(None)) = r { + self.done = true; + } + r + } + } +} + +impl<S> Fuse<S> { + /// Returns whether the underlying stream has finished or not. + /// + /// If this method returns `true`, then all future calls to poll are + /// guaranteed to return `None`. If this returns `false`, then the + /// underlying stream is still in use. + pub fn is_done(&self) -> bool { + self.done + } + + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/future.rs b/third_party/rust/futures-0.1.31/src/stream/future.rs new file mode 100644 index 0000000000..5b052ee4d3 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/future.rs @@ -0,0 +1,76 @@ +use {Future, Poll, Async}; +use stream::Stream; + +/// A combinator used to temporarily convert a stream into a future. +/// +/// This future is returned by the `Stream::into_future` method. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct StreamFuture<S> { + stream: Option<S>, +} + +pub fn new<S: Stream>(s: S) -> StreamFuture<S> { + StreamFuture { stream: Some(s) } +} + +impl<S> StreamFuture<S> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + /// + /// This method returns an `Option` to account for the fact that `StreamFuture`'s + /// implementation of `Future::poll` consumes the underlying stream during polling + /// in order to return it to the caller of `Future::poll` if the stream yielded + /// an element. + pub fn get_ref(&self) -> Option<&S> { + self.stream.as_ref() + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + /// + /// This method returns an `Option` to account for the fact that `StreamFuture`'s + /// implementation of `Future::poll` consumes the underlying stream during polling + /// in order to return it to the caller of `Future::poll` if the stream yielded + /// an element. + pub fn get_mut(&mut self) -> Option<&mut S> { + self.stream.as_mut() + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + /// + /// This method returns an `Option` to account for the fact that `StreamFuture`'s + /// implementation of `Future::poll` consumes the underlying stream during polling + /// in order to return it to the caller of `Future::poll` if the stream yielded + /// an element. + pub fn into_inner(self) -> Option<S> { + self.stream + } +} + +impl<S: Stream> Future for StreamFuture<S> { + type Item = (Option<S::Item>, S); + type Error = (S::Error, S); + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let item = { + let s = self.stream.as_mut().expect("polling StreamFuture twice"); + match s.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(e)) => Ok(e), + Err(e) => Err(e), + } + }; + let stream = self.stream.take().unwrap(); + match item { + Ok(e) => Ok(Async::Ready((e, stream))), + Err(e) => Err((e, stream)), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/futures_ordered.rs b/third_party/rust/futures-0.1.31/src/stream/futures_ordered.rs new file mode 100644 index 0000000000..561bbb5189 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/futures_ordered.rs @@ -0,0 +1,219 @@ +use std::cmp::{Eq, PartialEq, PartialOrd, Ord, Ordering}; +use std::collections::BinaryHeap; +use std::fmt::{self, Debug}; +use std::iter::FromIterator; + +use {Async, Future, IntoFuture, Poll, Stream}; +use stream::FuturesUnordered; + +#[derive(Debug)] +struct OrderWrapper<T> { + item: T, + index: usize, +} + +impl<T> PartialEq for OrderWrapper<T> { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + } +} + +impl<T> Eq for OrderWrapper<T> {} + +impl<T> PartialOrd for OrderWrapper<T> { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl<T> Ord for OrderWrapper<T> { + fn cmp(&self, other: &Self) -> Ordering { + // BinaryHeap is a max heap, so compare backwards here. + other.index.cmp(&self.index) + } +} + +impl<T> Future for OrderWrapper<T> + where T: Future +{ + type Item = OrderWrapper<T::Item>; + type Error = T::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let result = try_ready!(self.item.poll()); + Ok(Async::Ready(OrderWrapper { + item: result, + index: self.index + })) + } +} + +/// An unbounded queue of futures. +/// +/// This "combinator" is similar to `FuturesUnordered`, but it imposes an order +/// on top of the set of futures. While futures in the set will race to +/// completion in parallel, results will only be returned in the order their +/// originating futures were added to the queue. +/// +/// Futures are pushed into this queue and their realized values are yielded in +/// order. This structure is optimized to manage a large number of futures. +/// Futures managed by `FuturesOrdered` will only be polled when they generate +/// notifications. This reduces the required amount of work needed to coordinate +/// large numbers of futures. +/// +/// When a `FuturesOrdered` is first created, it does not contain any futures. +/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be +/// returned. Futures are submitted to the queue using `push`; however, the +/// future will **not** be polled at this point. `FuturesOrdered` will only +/// poll managed futures when `FuturesOrdered::poll` is called. As such, it +/// is important to call `poll` after pushing new futures. +/// +/// If `FuturesOrdered::poll` returns `Ok(Async::Ready(None))` this means that +/// the queue is currently not managing any futures. A future may be submitted +/// to the queue at a later time. At that point, a call to +/// `FuturesOrdered::poll` will either return the future's resolved value +/// **or** `Ok(Async::NotReady)` if the future has not yet completed. When +/// multiple futures are submitted to the queue, `FuturesOrdered::poll` will +/// return `Ok(Async::NotReady)` until the first future completes, even if +/// some of the later futures have already completed. +/// +/// Note that you can create a ready-made `FuturesOrdered` via the +/// `futures_ordered` function in the `stream` module, or you can start with an +/// empty queue with the `FuturesOrdered::new` constructor. +#[must_use = "streams do nothing unless polled"] +pub struct FuturesOrdered<T> + where T: Future +{ + in_progress: FuturesUnordered<OrderWrapper<T>>, + queued_results: BinaryHeap<OrderWrapper<T::Item>>, + next_incoming_index: usize, + next_outgoing_index: usize, +} + +/// Converts a list of futures into a `Stream` of results from the futures. +/// +/// This function will take an list of futures (e.g. a vector, an iterator, +/// etc), and return a stream. The stream will yield items as they become +/// available on the futures internally, in the order that their originating +/// futures were submitted to the queue. If the futures complete out of order, +/// items will be stored internally within `FuturesOrdered` until all preceding +/// items have been yielded. +/// +/// Note that the returned queue can also be used to dynamically push more +/// futures into the queue as they become available. +pub fn futures_ordered<I>(futures: I) -> FuturesOrdered<<I::Item as IntoFuture>::Future> + where I: IntoIterator, + I::Item: IntoFuture +{ + let mut queue = FuturesOrdered::new(); + + for future in futures { + queue.push(future.into_future()); + } + + return queue +} + +impl<T> Default for FuturesOrdered<T> where T: Future { + fn default() -> Self { + FuturesOrdered::new() + } +} + +impl<T> FuturesOrdered<T> + where T: Future +{ + /// Constructs a new, empty `FuturesOrdered` + /// + /// The returned `FuturesOrdered` does not contain any futures and, in this + /// state, `FuturesOrdered::poll` will return `Ok(Async::Ready(None))`. + pub fn new() -> FuturesOrdered<T> { + FuturesOrdered { + in_progress: FuturesUnordered::new(), + queued_results: BinaryHeap::new(), + next_incoming_index: 0, + next_outgoing_index: 0, + } + } + + /// Returns the number of futures contained in the queue. + /// + /// This represents the total number of in-flight futures, both + /// those currently processing and those that have completed but + /// which are waiting for earlier futures to complete. + pub fn len(&self) -> usize { + self.in_progress.len() + self.queued_results.len() + } + + /// Returns `true` if the queue contains no futures + pub fn is_empty(&self) -> bool { + self.in_progress.is_empty() && self.queued_results.is_empty() + } + + /// Push a future into the queue. + /// + /// This function submits the given future to the internal set for managing. + /// This function will not call `poll` on the submitted future. The caller + /// must ensure that `FuturesOrdered::poll` is called in order to receive + /// task notifications. + pub fn push(&mut self, future: T) { + let wrapped = OrderWrapper { + item: future, + index: self.next_incoming_index, + }; + self.next_incoming_index += 1; + self.in_progress.push(wrapped); + } +} + +impl<T> Stream for FuturesOrdered<T> + where T: Future +{ + type Item = T::Item; + type Error = T::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + // Get any completed futures from the unordered set. + loop { + match self.in_progress.poll()? { + Async::Ready(Some(result)) => self.queued_results.push(result), + Async::Ready(None) | Async::NotReady => break, + } + } + + if let Some(next_result) = self.queued_results.peek() { + // PeekMut::pop is not stable yet QQ + if next_result.index != self.next_outgoing_index { + return Ok(Async::NotReady); + } + } else if !self.in_progress.is_empty() { + return Ok(Async::NotReady); + } else { + return Ok(Async::Ready(None)); + } + + let next_result = self.queued_results.pop().unwrap(); + self.next_outgoing_index += 1; + Ok(Async::Ready(Some(next_result.item))) + } +} + +impl<T: Debug> Debug for FuturesOrdered<T> + where T: Future +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "FuturesOrdered {{ ... }}") + } +} + +impl<F: Future> FromIterator<F> for FuturesOrdered<F> { + fn from_iter<T>(iter: T) -> Self + where T: IntoIterator<Item = F> + { + let mut new = FuturesOrdered::new(); + for future in iter.into_iter() { + new.push(future); + } + new + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/futures_unordered.rs b/third_party/rust/futures-0.1.31/src/stream/futures_unordered.rs new file mode 100644 index 0000000000..3f25c86f39 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/futures_unordered.rs @@ -0,0 +1,707 @@ +//! An unbounded set of futures. + +use std::cell::UnsafeCell; +use std::fmt::{self, Debug}; +use std::iter::FromIterator; +use std::marker::PhantomData; +use std::mem; +use std::ptr; +use std::sync::atomic::Ordering::{Relaxed, SeqCst, Acquire, Release, AcqRel}; +use std::sync::atomic::{AtomicPtr, AtomicBool}; +use std::sync::{Arc, Weak}; +use std::usize; + +use {task, Stream, Future, Poll, Async}; +use executor::{Notify, UnsafeNotify, NotifyHandle}; +use task_impl::{self, AtomicTask}; + +/// An unbounded set of futures. +/// +/// This "combinator" also serves a special function in this library, providing +/// the ability to maintain a set of futures that and manage driving them all +/// to completion. +/// +/// Futures are pushed into this set and their realized values are yielded as +/// they are ready. This structure is optimized to manage a large number of +/// futures. Futures managed by `FuturesUnordered` will only be polled when they +/// generate notifications. This reduces the required amount of work needed to +/// coordinate large numbers of futures. +/// +/// When a `FuturesUnordered` is first created, it does not contain any futures. +/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be +/// returned. Futures are submitted to the set using `push`; however, the +/// future will **not** be polled at this point. `FuturesUnordered` will only +/// poll managed futures when `FuturesUnordered::poll` is called. As such, it +/// is important to call `poll` after pushing new futures. +/// +/// If `FuturesUnordered::poll` returns `Ok(Async::Ready(None))` this means that +/// the set is currently not managing any futures. A future may be submitted +/// to the set at a later time. At that point, a call to +/// `FuturesUnordered::poll` will either return the future's resolved value +/// **or** `Ok(Async::NotReady)` if the future has not yet completed. +/// +/// Note that you can create a ready-made `FuturesUnordered` via the +/// `futures_unordered` function in the `stream` module, or you can start with an +/// empty set with the `FuturesUnordered::new` constructor. +#[must_use = "streams do nothing unless polled"] +pub struct FuturesUnordered<F> { + inner: Arc<Inner<F>>, + len: usize, + head_all: *const Node<F>, +} + +unsafe impl<T: Send> Send for FuturesUnordered<T> {} +unsafe impl<T: Sync> Sync for FuturesUnordered<T> {} + +// FuturesUnordered is implemented using two linked lists. One which links all +// futures managed by a `FuturesUnordered` and one that tracks futures that have +// been scheduled for polling. The first linked list is not thread safe and is +// only accessed by the thread that owns the `FuturesUnordered` value. The +// second linked list is an implementation of the intrusive MPSC queue algorithm +// described by 1024cores.net. +// +// When a future is submitted to the set a node is allocated and inserted in +// both linked lists. The next call to `poll` will (eventually) see this node +// and call `poll` on the future. +// +// Before a managed future is polled, the current task's `Notify` is replaced +// with one that is aware of the specific future being run. This ensures that +// task notifications generated by that specific future are visible to +// `FuturesUnordered`. When a notification is received, the node is scheduled +// for polling by being inserted into the concurrent linked list. +// +// Each node uses an `AtomicUsize` to track it's state. The node state is the +// reference count (the number of outstanding handles to the node) as well as a +// flag tracking if the node is currently inserted in the atomic queue. When the +// future is notified, it will only insert itself into the linked list if it +// isn't currently inserted. + +#[allow(missing_debug_implementations)] +struct Inner<T> { + // The task using `FuturesUnordered`. + parent: AtomicTask, + + // Head/tail of the readiness queue + head_readiness: AtomicPtr<Node<T>>, + tail_readiness: UnsafeCell<*const Node<T>>, + stub: Arc<Node<T>>, +} + +struct Node<T> { + // The future + future: UnsafeCell<Option<T>>, + + // Next pointer for linked list tracking all active nodes + next_all: UnsafeCell<*const Node<T>>, + + // Previous node in linked list tracking all active nodes + prev_all: UnsafeCell<*const Node<T>>, + + // Next pointer in readiness queue + next_readiness: AtomicPtr<Node<T>>, + + // Queue that we'll be enqueued to when notified + queue: Weak<Inner<T>>, + + // Whether or not this node is currently in the mpsc queue. + queued: AtomicBool, +} + +enum Dequeue<T> { + Data(*const Node<T>), + Empty, + Inconsistent, +} + +impl<T> Default for FuturesUnordered<T> where T: Future { + fn default() -> Self { + FuturesUnordered::new() + } +} + +impl<T> FuturesUnordered<T> + where T: Future, +{ + /// Constructs a new, empty `FuturesUnordered` + /// + /// The returned `FuturesUnordered` does not contain any futures and, in this + /// state, `FuturesUnordered::poll` will return `Ok(Async::Ready(None))`. + pub fn new() -> FuturesUnordered<T> { + let stub = Arc::new(Node { + future: UnsafeCell::new(None), + next_all: UnsafeCell::new(ptr::null()), + prev_all: UnsafeCell::new(ptr::null()), + next_readiness: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + queue: Weak::new(), + }); + let stub_ptr = &*stub as *const Node<T>; + let inner = Arc::new(Inner { + parent: AtomicTask::new(), + head_readiness: AtomicPtr::new(stub_ptr as *mut _), + tail_readiness: UnsafeCell::new(stub_ptr), + stub: stub, + }); + + FuturesUnordered { + len: 0, + head_all: ptr::null_mut(), + inner: inner, + } + } +} + +impl<T> FuturesUnordered<T> { + /// Returns the number of futures contained in the set. + /// + /// This represents the total number of in-flight futures. + pub fn len(&self) -> usize { + self.len + } + + /// Returns `true` if the set contains no futures + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + /// Push a future into the set. + /// + /// This function submits the given future to the set for managing. This + /// function will not call `poll` on the submitted future. The caller must + /// ensure that `FuturesUnordered::poll` is called in order to receive task + /// notifications. + pub fn push(&mut self, future: T) { + let node = Arc::new(Node { + future: UnsafeCell::new(Some(future)), + next_all: UnsafeCell::new(ptr::null_mut()), + prev_all: UnsafeCell::new(ptr::null_mut()), + next_readiness: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + queue: Arc::downgrade(&self.inner), + }); + + // Right now our node has a strong reference count of 1. We transfer + // ownership of this reference count to our internal linked list + // and we'll reclaim ownership through the `unlink` function below. + let ptr = self.link(node); + + // We'll need to get the future "into the system" to start tracking it, + // e.g. getting its unpark notifications going to us tracking which + // futures are ready. To do that we unconditionally enqueue it for + // polling here. + self.inner.enqueue(ptr); + } + + /// Returns an iterator that allows modifying each future in the set. + pub fn iter_mut(&mut self) -> IterMut<T> { + IterMut { + node: self.head_all, + len: self.len, + _marker: PhantomData + } + } + + fn release_node(&mut self, node: Arc<Node<T>>) { + // The future is done, try to reset the queued flag. This will prevent + // `notify` from doing any work in the future + let prev = node.queued.swap(true, SeqCst); + + // Drop the future, even if it hasn't finished yet. This is safe + // because we're dropping the future on the thread that owns + // `FuturesUnordered`, which correctly tracks T's lifetimes and such. + unsafe { + drop((*node.future.get()).take()); + } + + // If the queued flag was previously set then it means that this node + // is still in our internal mpsc queue. We then transfer ownership + // of our reference count to the mpsc queue, and it'll come along and + // free it later, noticing that the future is `None`. + // + // If, however, the queued flag was *not* set then we're safe to + // release our reference count on the internal node. The queued flag + // was set above so all future `enqueue` operations will not actually + // enqueue the node, so our node will never see the mpsc queue again. + // The node itself will be deallocated once all reference counts have + // been dropped by the various owning tasks elsewhere. + if prev { + mem::forget(node); + } + } + + /// Insert a new node into the internal linked list. + fn link(&mut self, node: Arc<Node<T>>) -> *const Node<T> { + let ptr = arc2ptr(node); + unsafe { + *(*ptr).next_all.get() = self.head_all; + if !self.head_all.is_null() { + *(*self.head_all).prev_all.get() = ptr; + } + } + + self.head_all = ptr; + self.len += 1; + return ptr + } + + /// Remove the node from the linked list tracking all nodes currently + /// managed by `FuturesUnordered`. + unsafe fn unlink(&mut self, node: *const Node<T>) -> Arc<Node<T>> { + let node = ptr2arc(node); + let next = *node.next_all.get(); + let prev = *node.prev_all.get(); + *node.next_all.get() = ptr::null_mut(); + *node.prev_all.get() = ptr::null_mut(); + + if !next.is_null() { + *(*next).prev_all.get() = prev; + } + + if !prev.is_null() { + *(*prev).next_all.get() = next; + } else { + self.head_all = next; + } + self.len -= 1; + return node + } +} + +impl<T> Stream for FuturesUnordered<T> + where T: Future +{ + type Item = T::Item; + type Error = T::Error; + + fn poll(&mut self) -> Poll<Option<T::Item>, T::Error> { + // Variable to determine how many times it is allowed to poll underlying + // futures without yielding. + // + // A single call to `poll_next` may potentially do a lot of work before + // yielding. This happens in particular if the underlying futures are awoken + // frequently but continue to return `Pending`. This is problematic if other + // tasks are waiting on the executor, since they do not get to run. This value + // caps the number of calls to `poll` on underlying futures a single call to + // `poll_next` is allowed to make. + // + // The value is the length of FuturesUnordered. This ensures that each + // future is polled only once at most per iteration. + // + // See also https://github.com/rust-lang/futures-rs/issues/2047. + let yield_every = self.len(); + + // Keep track of how many child futures we have polled, + // in case we want to forcibly yield. + let mut polled = 0; + + // Ensure `parent` is correctly set. + self.inner.parent.register(); + + loop { + let node = match unsafe { self.inner.dequeue() } { + Dequeue::Empty => { + if self.is_empty() { + return Ok(Async::Ready(None)); + } else { + return Ok(Async::NotReady) + } + } + Dequeue::Inconsistent => { + // At this point, it may be worth yielding the thread & + // spinning a few times... but for now, just yield using the + // task system. + task::current().notify(); + return Ok(Async::NotReady); + } + Dequeue::Data(node) => node, + }; + + debug_assert!(node != self.inner.stub()); + + unsafe { + let mut future = match (*(*node).future.get()).take() { + Some(future) => future, + + // If the future has already gone away then we're just + // cleaning out this node. See the comment in + // `release_node` for more information, but we're basically + // just taking ownership of our reference count here. + None => { + let node = ptr2arc(node); + assert!((*node.next_all.get()).is_null()); + assert!((*node.prev_all.get()).is_null()); + continue + } + }; + + // Unset queued flag... this must be done before + // polling. This ensures that the future gets + // rescheduled if it is notified **during** a call + // to `poll`. + let prev = (*node).queued.swap(false, SeqCst); + assert!(prev); + + // We're going to need to be very careful if the `poll` + // function below panics. We need to (a) not leak memory and + // (b) ensure that we still don't have any use-after-frees. To + // manage this we do a few things: + // + // * This "bomb" here will call `release_node` if dropped + // abnormally. That way we'll be sure the memory management + // of the `node` is managed correctly. + // * The future was extracted above (taken ownership). That way + // if it panics we're guaranteed that the future is + // dropped on this thread and doesn't accidentally get + // dropped on a different thread (bad). + // * We unlink the node from our internal queue to preemptively + // assume it'll panic, in which case we'll want to discard it + // regardless. + struct Bomb<'a, T: 'a> { + queue: &'a mut FuturesUnordered<T>, + node: Option<Arc<Node<T>>>, + } + impl<'a, T> Drop for Bomb<'a, T> { + fn drop(&mut self) { + if let Some(node) = self.node.take() { + self.queue.release_node(node); + } + } + } + let mut bomb = Bomb { + node: Some(self.unlink(node)), + queue: self, + }; + + // Poll the underlying future with the appropriate `notify` + // implementation. This is where a large bit of the unsafety + // starts to stem from internally. The `notify` instance itself + // is basically just our `Arc<Node<T>>` and tracks the mpsc + // queue of ready futures. + // + // Critically though `Node<T>` won't actually access `T`, the + // future, while it's floating around inside of `Task` + // instances. These structs will basically just use `T` to size + // the internal allocation, appropriately accessing fields and + // deallocating the node if need be. + let res = { + let notify = NodeToHandle(bomb.node.as_ref().unwrap()); + task_impl::with_notify(¬ify, 0, || { + future.poll() + }) + }; + polled += 1; + + let ret = match res { + Ok(Async::NotReady) => { + let node = bomb.node.take().unwrap(); + *node.future.get() = Some(future); + bomb.queue.link(node); + + if polled == yield_every { + // We have polled a large number of futures in a row without yielding. + // To ensure we do not starve other tasks waiting on the executor, + // we yield here, but immediately wake ourselves up to continue. + task_impl::current().notify(); + return Ok(Async::NotReady); + } + continue + } + Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))), + Err(e) => Err(e), + }; + return ret + } + } + } +} + +impl<T: Debug> Debug for FuturesUnordered<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "FuturesUnordered {{ ... }}") + } +} + +impl<T> Drop for FuturesUnordered<T> { + fn drop(&mut self) { + // When a `FuturesUnordered` is dropped we want to drop all futures associated + // with it. At the same time though there may be tons of `Task` handles + // flying around which contain `Node<T>` references inside them. We'll + // let those naturally get deallocated when the `Task` itself goes out + // of scope or gets notified. + unsafe { + while !self.head_all.is_null() { + let head = self.head_all; + let node = self.unlink(head); + self.release_node(node); + } + } + + // Note that at this point we could still have a bunch of nodes in the + // mpsc queue. None of those nodes, however, have futures associated + // with them so they're safe to destroy on any thread. At this point + // the `FuturesUnordered` struct, the owner of the one strong reference + // to `Inner<T>` will drop the strong reference. At that point + // whichever thread releases the strong refcount last (be it this + // thread or some other thread as part of an `upgrade`) will clear out + // the mpsc queue and free all remaining nodes. + // + // While that freeing operation isn't guaranteed to happen here, it's + // guaranteed to happen "promptly" as no more "blocking work" will + // happen while there's a strong refcount held. + } +} + +impl<F: Future> FromIterator<F> for FuturesUnordered<F> { + fn from_iter<T>(iter: T) -> Self + where T: IntoIterator<Item = F> + { + let mut new = FuturesUnordered::new(); + for future in iter.into_iter() { + new.push(future); + } + new + } +} + +#[derive(Debug)] +/// Mutable iterator over all futures in the unordered set. +pub struct IterMut<'a, F: 'a> { + node: *const Node<F>, + len: usize, + _marker: PhantomData<&'a mut FuturesUnordered<F>> +} + +impl<'a, F> Iterator for IterMut<'a, F> { + type Item = &'a mut F; + + fn next(&mut self) -> Option<&'a mut F> { + if self.node.is_null() { + return None; + } + unsafe { + let future = (*(*self.node).future.get()).as_mut().unwrap(); + let next = *(*self.node).next_all.get(); + self.node = next; + self.len -= 1; + return Some(future); + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.len, Some(self.len)) + } +} + +impl<'a, F> ExactSizeIterator for IterMut<'a, F> {} + +impl<T> Inner<T> { + /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. + fn enqueue(&self, node: *const Node<T>) { + unsafe { + debug_assert!((*node).queued.load(Relaxed)); + + // This action does not require any coordination + (*node).next_readiness.store(ptr::null_mut(), Relaxed); + + // Note that these atomic orderings come from 1024cores + let node = node as *mut _; + let prev = self.head_readiness.swap(node, AcqRel); + (*prev).next_readiness.store(node, Release); + } + } + + /// The dequeue function from the 1024cores intrusive MPSC queue algorithm + /// + /// Note that this unsafe as it required mutual exclusion (only one thread + /// can call this) to be guaranteed elsewhere. + unsafe fn dequeue(&self) -> Dequeue<T> { + let mut tail = *self.tail_readiness.get(); + let mut next = (*tail).next_readiness.load(Acquire); + + if tail == self.stub() { + if next.is_null() { + return Dequeue::Empty; + } + + *self.tail_readiness.get() = next; + tail = next; + next = (*next).next_readiness.load(Acquire); + } + + if !next.is_null() { + *self.tail_readiness.get() = next; + debug_assert!(tail != self.stub()); + return Dequeue::Data(tail); + } + + if self.head_readiness.load(Acquire) as *const _ != tail { + return Dequeue::Inconsistent; + } + + self.enqueue(self.stub()); + + next = (*tail).next_readiness.load(Acquire); + + if !next.is_null() { + *self.tail_readiness.get() = next; + return Dequeue::Data(tail); + } + + Dequeue::Inconsistent + } + + fn stub(&self) -> *const Node<T> { + &*self.stub + } +} + +impl<T> Drop for Inner<T> { + fn drop(&mut self) { + // Once we're in the destructor for `Inner<T>` we need to clear out the + // mpsc queue of nodes if there's anything left in there. + // + // Note that each node has a strong reference count associated with it + // which is owned by the mpsc queue. All nodes should have had their + // futures dropped already by the `FuturesUnordered` destructor above, + // so we're just pulling out nodes and dropping their refcounts. + unsafe { + loop { + match self.dequeue() { + Dequeue::Empty => break, + Dequeue::Inconsistent => abort("inconsistent in drop"), + Dequeue::Data(ptr) => drop(ptr2arc(ptr)), + } + } + } + } +} + +#[allow(missing_debug_implementations)] +struct NodeToHandle<'a, T: 'a>(&'a Arc<Node<T>>); + +impl<'a, T> Clone for NodeToHandle<'a, T> { + fn clone(&self) -> Self { + NodeToHandle(self.0) + } +} + +impl<'a, T> From<NodeToHandle<'a, T>> for NotifyHandle { + fn from(handle: NodeToHandle<'a, T>) -> NotifyHandle { + unsafe { + let ptr = handle.0.clone(); + let ptr = mem::transmute::<Arc<Node<T>>, *mut ArcNode<T>>(ptr); + NotifyHandle::new(hide_lt(ptr)) + } + } +} + +struct ArcNode<T>(PhantomData<T>); + +// We should never touch `T` on any thread other than the one owning +// `FuturesUnordered`, so this should be a safe operation. +unsafe impl<T> Send for ArcNode<T> {} +unsafe impl<T> Sync for ArcNode<T> {} + +impl<T> Notify for ArcNode<T> { + fn notify(&self, _id: usize) { + unsafe { + let me: *const ArcNode<T> = self; + let me: *const *const ArcNode<T> = &me; + let me = me as *const Arc<Node<T>>; + Node::notify(&*me) + } + } +} + +unsafe impl<T> UnsafeNotify for ArcNode<T> { + unsafe fn clone_raw(&self) -> NotifyHandle { + let me: *const ArcNode<T> = self; + let me: *const *const ArcNode<T> = &me; + let me = &*(me as *const Arc<Node<T>>); + NodeToHandle(me).into() + } + + unsafe fn drop_raw(&self) { + let mut me: *const ArcNode<T> = self; + let me = &mut me as *mut *const ArcNode<T> as *mut Arc<Node<T>>; + ptr::drop_in_place(me); + } +} + +unsafe fn hide_lt<T>(p: *mut ArcNode<T>) -> *mut UnsafeNotify { + mem::transmute(p as *mut UnsafeNotify) +} + +impl<T> Node<T> { + fn notify(me: &Arc<Node<T>>) { + let inner = match me.queue.upgrade() { + Some(inner) => inner, + None => return, + }; + + // It's our job to notify the node that it's ready to get polled, + // meaning that we need to enqueue it into the readiness queue. To + // do this we flag that we're ready to be queued, and if successful + // we then do the literal queueing operation, ensuring that we're + // only queued once. + // + // Once the node is inserted we be sure to notify the parent task, + // as it'll want to come along and pick up our node now. + // + // Note that we don't change the reference count of the node here, + // we're just enqueueing the raw pointer. The `FuturesUnordered` + // implementation guarantees that if we set the `queued` flag true that + // there's a reference count held by the main `FuturesUnordered` queue + // still. + let prev = me.queued.swap(true, SeqCst); + if !prev { + inner.enqueue(&**me); + inner.parent.notify(); + } + } +} + +impl<T> Drop for Node<T> { + fn drop(&mut self) { + // Currently a `Node<T>` is sent across all threads for any lifetime, + // regardless of `T`. This means that for memory safety we can't + // actually touch `T` at any time except when we have a reference to the + // `FuturesUnordered` itself. + // + // Consequently it *should* be the case that we always drop futures from + // the `FuturesUnordered` instance, but this is a bomb in place to catch + // any bugs in that logic. + unsafe { + if (*self.future.get()).is_some() { + abort("future still here when dropping"); + } + } + } +} + +fn arc2ptr<T>(ptr: Arc<T>) -> *const T { + let addr = &*ptr as *const T; + mem::forget(ptr); + return addr +} + +unsafe fn ptr2arc<T>(ptr: *const T) -> Arc<T> { + let anchor = mem::transmute::<usize, Arc<T>>(0x10); + let addr = &*anchor as *const T; + mem::forget(anchor); + let offset = addr as isize - 0x10; + mem::transmute::<isize, Arc<T>>(ptr as isize - offset) +} + +fn abort(s: &str) -> ! { + struct DoublePanic; + + impl Drop for DoublePanic { + fn drop(&mut self) { + panic!("panicking twice to abort the program"); + } + } + + let _bomb = DoublePanic; + panic!("{}", s); +} diff --git a/third_party/rust/futures-0.1.31/src/stream/inspect.rs b/third_party/rust/futures-0.1.31/src/stream/inspect.rs new file mode 100644 index 0000000000..fc8f7f4ea2 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/inspect.rs @@ -0,0 +1,84 @@ +use {Stream, Poll, Async}; + +/// Do something with the items of a stream, passing it on. +/// +/// This is created by the `Stream::inspect` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Inspect<S, F> where S: Stream { + stream: S, + inspect: F, +} + +pub fn new<S, F>(stream: S, f: F) -> Inspect<S, F> + where S: Stream, + F: FnMut(&S::Item) -> (), +{ + Inspect { + stream: stream, + inspect: f, + } +} + +impl<S: Stream, F> Inspect<S, F> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F> ::sink::Sink for Inspect<S, F> + where S: ::sink::Sink + Stream +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F> Stream for Inspect<S, F> + where S: Stream, + F: FnMut(&S::Item), +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + match try_ready!(self.stream.poll()) { + Some(e) => { + (self.inspect)(&e); + Ok(Async::Ready(Some(e))) + } + None => Ok(Async::Ready(None)), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/inspect_err.rs b/third_party/rust/futures-0.1.31/src/stream/inspect_err.rs new file mode 100644 index 0000000000..5c56a217ff --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/inspect_err.rs @@ -0,0 +1,81 @@ +use {Stream, Poll}; + +/// Do something with the error of a stream, passing it on. +/// +/// This is created by the `Stream::inspect_err` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct InspectErr<S, F> where S: Stream { + stream: S, + inspect: F, +} + +pub fn new<S, F>(stream: S, f: F) -> InspectErr<S, F> + where S: Stream, + F: FnMut(&S::Error) -> (), +{ + InspectErr { + stream: stream, + inspect: f, + } +} + +impl<S: Stream, F> InspectErr<S, F> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F> ::sink::Sink for InspectErr<S, F> + where S: ::sink::Sink + Stream +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F> Stream for InspectErr<S, F> + where S: Stream, + F: FnMut(&S::Error), +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + self.stream.poll().map_err(|e| { + (self.inspect)(&e); + e + }) + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/iter.rs b/third_party/rust/futures-0.1.31/src/stream/iter.rs new file mode 100644 index 0000000000..e0b9379353 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/iter.rs @@ -0,0 +1,46 @@ +#![deprecated(note = "implementation moved to `iter_ok` and `iter_result`")] +#![allow(deprecated)] + +use Poll; +use stream::{iter_result, IterResult, Stream}; + +/// A stream which is just a shim over an underlying instance of `Iterator`. +/// +/// This stream will never block and is always ready. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Iter<I>(IterResult<I>); + +/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready +/// to yield the next value. +/// +/// Iterators in Rust don't express the ability to block, so this adapter simply +/// always calls `iter.next()` and returns that. +/// +/// ```rust +/// use futures::*; +/// +/// let mut stream = stream::iter(vec![Ok(17), Err(false), Ok(19)]); +/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll()); +/// assert_eq!(Err(false), stream.poll()); +/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll()); +/// assert_eq!(Ok(Async::Ready(None)), stream.poll()); +/// ``` +#[inline] +pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter> + where J: IntoIterator<Item=Result<T, E>>, +{ + Iter(iter_result(i)) +} + +impl<I, T, E> Stream for Iter<I> + where I: Iterator<Item=Result<T, E>>, +{ + type Item = T; + type Error = E; + + #[inline] + fn poll(&mut self) -> Poll<Option<T>, E> { + self.0.poll() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/iter_ok.rs b/third_party/rust/futures-0.1.31/src/stream/iter_ok.rs new file mode 100644 index 0000000000..9c8d871399 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/iter_ok.rs @@ -0,0 +1,48 @@ +use core::marker; + +use {Async, Poll}; +use stream::Stream; + +/// A stream which is just a shim over an underlying instance of `Iterator`. +/// +/// This stream will never block and is always ready. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct IterOk<I, E> { + iter: I, + _marker: marker::PhantomData<fn() -> E>, +} + +/// Converts an `Iterator` into a `Stream` which is always ready +/// to yield the next value. +/// +/// Iterators in Rust don't express the ability to block, so this adapter +/// simply always calls `iter.next()` and returns that. +/// +/// ```rust +/// use futures::*; +/// +/// let mut stream = stream::iter_ok::<_, ()>(vec![17, 19]); +/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll()); +/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll()); +/// assert_eq!(Ok(Async::Ready(None)), stream.poll()); +/// ``` +pub fn iter_ok<I, E>(i: I) -> IterOk<I::IntoIter, E> + where I: IntoIterator, +{ + IterOk { + iter: i.into_iter(), + _marker: marker::PhantomData, + } +} + +impl<I, E> Stream for IterOk<I, E> + where I: Iterator, +{ + type Item = I::Item; + type Error = E; + + fn poll(&mut self) -> Poll<Option<I::Item>, E> { + Ok(Async::Ready(self.iter.next())) + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/iter_result.rs b/third_party/rust/futures-0.1.31/src/stream/iter_result.rs new file mode 100644 index 0000000000..4eef5da08e --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/iter_result.rs @@ -0,0 +1,51 @@ +use {Async, Poll}; +use stream::Stream; + +/// A stream which is just a shim over an underlying instance of `Iterator`. +/// +/// This stream will never block and is always ready. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct IterResult<I> { + iter: I, +} + +/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready +/// to yield the next value. +/// +/// Iterators in Rust don't express the ability to block, so this adapter simply +/// always calls `iter.next()` and returns that. +/// +/// ```rust +/// use futures::*; +/// +/// let mut stream = stream::iter_result(vec![Ok(17), Err(false), Ok(19)]); +/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll()); +/// assert_eq!(Err(false), stream.poll()); +/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll()); +/// assert_eq!(Ok(Async::Ready(None)), stream.poll()); +/// ``` +pub fn iter_result<J, T, E>(i: J) -> IterResult<J::IntoIter> +where + J: IntoIterator<Item = Result<T, E>>, +{ + IterResult { + iter: i.into_iter(), + } +} + +impl<I, T, E> Stream for IterResult<I> +where + I: Iterator<Item = Result<T, E>>, +{ + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<T>, E> { + match self.iter.next() { + Some(Ok(e)) => Ok(Async::Ready(Some(e))), + Some(Err(e)) => Err(e), + None => Ok(Async::Ready(None)), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/map.rs b/third_party/rust/futures-0.1.31/src/stream/map.rs new file mode 100644 index 0000000000..702e980b3f --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/map.rs @@ -0,0 +1,81 @@ +use {Async, Poll}; +use stream::Stream; + +/// A stream combinator which will change the type of a stream from one +/// type to another. +/// +/// This is produced by the `Stream::map` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Map<S, F> { + stream: S, + f: F, +} + +pub fn new<S, F, U>(s: S, f: F) -> Map<S, F> + where S: Stream, + F: FnMut(S::Item) -> U, +{ + Map { + stream: s, + f: f, + } +} + +impl<S, F> Map<S, F> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F> ::sink::Sink for Map<S, F> + where S: ::sink::Sink +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F, U> Stream for Map<S, F> + where S: Stream, + F: FnMut(S::Item) -> U, +{ + type Item = U; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<U>, S::Error> { + let option = try_ready!(self.stream.poll()); + Ok(Async::Ready(option.map(&mut self.f))) + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/map_err.rs b/third_party/rust/futures-0.1.31/src/stream/map_err.rs new file mode 100644 index 0000000000..8d1c0fc083 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/map_err.rs @@ -0,0 +1,80 @@ +use Poll; +use stream::Stream; + +/// A stream combinator which will change the error type of a stream from one +/// type to another. +/// +/// This is produced by the `Stream::map_err` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct MapErr<S, F> { + stream: S, + f: F, +} + +pub fn new<S, F, U>(s: S, f: F) -> MapErr<S, F> + where S: Stream, + F: FnMut(S::Error) -> U, +{ + MapErr { + stream: s, + f: f, + } +} + +impl<S, F> MapErr<S, F> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F> ::sink::Sink for MapErr<S, F> + where S: ::sink::Sink +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F, U> Stream for MapErr<S, F> + where S: Stream, + F: FnMut(S::Error) -> U, +{ + type Item = S::Item; + type Error = U; + + fn poll(&mut self) -> Poll<Option<S::Item>, U> { + self.stream.poll().map_err(&mut self.f) + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/merge.rs b/third_party/rust/futures-0.1.31/src/stream/merge.rs new file mode 100644 index 0000000000..af7505e69a --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/merge.rs @@ -0,0 +1,82 @@ +#![deprecated(note = "functionality provided by `select` now")] +#![allow(deprecated)] + +use {Poll, Async}; +use stream::{Stream, Fuse}; + +/// An adapter for merging the output of two streams. +/// +/// The merged stream produces items from one or both of the underlying +/// streams as they become available. Errors, however, are not merged: you +/// get at most one error at a time. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Merge<S1, S2: Stream> { + stream1: Fuse<S1>, + stream2: Fuse<S2>, + queued_error: Option<S2::Error>, +} + +pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Merge<S1, S2> + where S1: Stream, S2: Stream<Error = S1::Error> +{ + Merge { + stream1: stream1.fuse(), + stream2: stream2.fuse(), + queued_error: None, + } +} + +/// An item returned from a merge stream, which represents an item from one or +/// both of the underlying streams. +#[derive(Debug)] +pub enum MergedItem<I1, I2> { + /// An item from the first stream + First(I1), + /// An item from the second stream + Second(I2), + /// Items from both streams + Both(I1, I2), +} + +impl<S1, S2> Stream for Merge<S1, S2> + where S1: Stream, S2: Stream<Error = S1::Error> +{ + type Item = MergedItem<S1::Item, S2::Item>; + type Error = S1::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + if let Some(e) = self.queued_error.take() { + return Err(e) + } + + match self.stream1.poll()? { + Async::NotReady => { + match try_ready!(self.stream2.poll()) { + Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))), + None => Ok(Async::NotReady), + } + } + Async::Ready(None) => { + match try_ready!(self.stream2.poll()) { + Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))), + None => Ok(Async::Ready(None)), + } + } + Async::Ready(Some(item1)) => { + match self.stream2.poll() { + Err(e) => { + self.queued_error = Some(e); + Ok(Async::Ready(Some(MergedItem::First(item1)))) + } + Ok(Async::NotReady) | Ok(Async::Ready(None)) => { + Ok(Async::Ready(Some(MergedItem::First(item1)))) + } + Ok(Async::Ready(Some(item2))) => { + Ok(Async::Ready(Some(MergedItem::Both(item1, item2)))) + } + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/mod.rs b/third_party/rust/futures-0.1.31/src/stream/mod.rs new file mode 100644 index 0000000000..2d90362470 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/mod.rs @@ -0,0 +1,1146 @@ +//! Asynchronous streams +//! +//! This module contains the `Stream` trait and a number of adaptors for this +//! trait. This trait is very similar to the `Iterator` trait in the standard +//! library except that it expresses the concept of blocking as well. A stream +//! here is a sequential sequence of values which may take some amount of time +//! in between to produce. +//! +//! A stream may request that it is blocked between values while the next value +//! is calculated, and provides a way to get notified once the next value is +//! ready as well. +//! +//! You can find more information/tutorials about streams [online at +//! https://tokio.rs][online] +//! +//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/ + +use {IntoFuture, Poll}; + +mod iter; +#[allow(deprecated)] +pub use self::iter::{iter, Iter}; +#[cfg(feature = "with-deprecated")] +#[allow(deprecated)] +pub use self::Iter as IterStream; +mod iter_ok; +pub use self::iter_ok::{iter_ok, IterOk}; +mod iter_result; +pub use self::iter_result::{iter_result, IterResult}; + +mod repeat; +pub use self::repeat::{repeat, Repeat}; + +mod and_then; +mod chain; +mod concat; +mod empty; +mod filter; +mod filter_map; +mod flatten; +mod fold; +mod for_each; +mod from_err; +mod fuse; +mod future; +mod inspect; +mod inspect_err; +mod map; +mod map_err; +mod merge; +mod once; +mod or_else; +mod peek; +mod poll_fn; +mod select; +mod skip; +mod skip_while; +mod take; +mod take_while; +mod then; +mod unfold; +mod zip; +mod forward; +pub use self::and_then::AndThen; +pub use self::chain::Chain; +#[allow(deprecated)] +pub use self::concat::Concat; +pub use self::concat::Concat2; +pub use self::empty::{Empty, empty}; +pub use self::filter::Filter; +pub use self::filter_map::FilterMap; +pub use self::flatten::Flatten; +pub use self::fold::Fold; +pub use self::for_each::ForEach; +pub use self::from_err::FromErr; +pub use self::fuse::Fuse; +pub use self::future::StreamFuture; +pub use self::inspect::Inspect; +pub use self::inspect_err::InspectErr; +pub use self::map::Map; +pub use self::map_err::MapErr; +#[allow(deprecated)] +pub use self::merge::{Merge, MergedItem}; +pub use self::once::{Once, once}; +pub use self::or_else::OrElse; +pub use self::peek::Peekable; +pub use self::poll_fn::{poll_fn, PollFn}; +pub use self::select::Select; +pub use self::skip::Skip; +pub use self::skip_while::SkipWhile; +pub use self::take::Take; +pub use self::take_while::TakeWhile; +pub use self::then::Then; +pub use self::unfold::{Unfold, unfold}; +pub use self::zip::Zip; +pub use self::forward::Forward; +use sink::{Sink}; + +if_std! { + use std; + + mod buffered; + mod buffer_unordered; + mod catch_unwind; + mod chunks; + mod collect; + mod wait; + mod channel; + mod split; + pub mod futures_unordered; + mod futures_ordered; + pub use self::buffered::Buffered; + pub use self::buffer_unordered::BufferUnordered; + pub use self::catch_unwind::CatchUnwind; + pub use self::chunks::Chunks; + pub use self::collect::Collect; + pub use self::wait::Wait; + pub use self::split::{SplitStream, SplitSink, ReuniteError}; + pub use self::futures_unordered::FuturesUnordered; + pub use self::futures_ordered::{futures_ordered, FuturesOrdered}; + + #[doc(hidden)] + #[cfg(feature = "with-deprecated")] + #[allow(deprecated)] + pub use self::channel::{channel, Sender, Receiver, FutureSender, SendError}; + + /// A type alias for `Box<Stream + Send>` + #[doc(hidden)] + #[deprecated(note = "removed without replacement, recommended to use a \ + local extension trait or function if needed, more \ + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] + pub type BoxStream<T, E> = ::std::boxed::Box<Stream<Item = T, Error = E> + Send>; + + impl<S: ?Sized + Stream> Stream for ::std::boxed::Box<S> { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + (**self).poll() + } + } +} + +/// A stream of values, not all of which may have been produced yet. +/// +/// `Stream` is a trait to represent any source of sequential events or items +/// which acts like an iterator but long periods of time may pass between +/// items. Like `Future` the methods of `Stream` never block and it is thus +/// suitable for programming in an asynchronous fashion. This trait is very +/// similar to the `Iterator` trait in the standard library where `Some` is +/// used to signal elements of the stream and `None` is used to indicate that +/// the stream is finished. +/// +/// Like futures a stream has basic combinators to transform the stream, perform +/// more work on each item, etc. +/// +/// You can find more information/tutorials about streams [online at +/// https://tokio.rs][online] +/// +/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/ +/// +/// # Streams as Futures +/// +/// Any instance of `Stream` can also be viewed as a `Future` where the resolved +/// value is the next item in the stream along with the rest of the stream. The +/// `into_future` adaptor can be used here to convert any stream into a future +/// for use with other future methods like `join` and `select`. +/// +/// # Errors +/// +/// Streams, like futures, can also model errors in their computation. All +/// streams have an associated `Error` type like with futures. Currently as of +/// the 0.1 release of this library an error on a stream **does not terminate +/// the stream**. That is, after one error is received, another error may be +/// received from the same stream (it's valid to keep polling). +/// +/// This property of streams, however, is [being considered] for change in 0.2 +/// where an error on a stream is similar to `None`, it terminates the stream +/// entirely. If one of these use cases suits you perfectly and not the other, +/// please feel welcome to comment on [the issue][being considered]! +/// +/// [being considered]: https://github.com/rust-lang-nursery/futures-rs/issues/206 +#[must_use = "streams do nothing unless polled"] +pub trait Stream { + /// The type of item this stream will yield on success. + type Item; + + /// The type of error this stream may generate. + type Error; + + /// Attempt to pull out the next value of this stream, returning `None` if + /// the stream is finished. + /// + /// This method, like `Future::poll`, is the sole method of pulling out a + /// value from a stream. This method must also be run within the context of + /// a task typically and implementors of this trait must ensure that + /// implementations of this method do not block, as it may cause consumers + /// to behave badly. + /// + /// # Return value + /// + /// If `NotReady` is returned then this stream's next value is not ready + /// yet and implementations will ensure that the current task will be + /// notified when the next value may be ready. If `Some` is returned then + /// the returned value represents the next value on the stream. `Err` + /// indicates an error happened, while `Ok` indicates whether there was a + /// new item on the stream or whether the stream has terminated. + /// + /// # Panics + /// + /// Once a stream is finished, that is `Ready(None)` has been returned, + /// further calls to `poll` may result in a panic or other "bad behavior". + /// If this is difficult to guard against then the `fuse` adapter can be + /// used to ensure that `poll` always has well-defined semantics. + // TODO: more here + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error>; + + // TODO: should there also be a method like `poll` but doesn't return an + // item? basically just says "please make more progress internally" + // seems crucial for buffering to actually make any sense. + + /// Creates an iterator which blocks the current thread until each item of + /// this stream is resolved. + /// + /// This method will consume ownership of this stream, returning an + /// implementation of a standard iterator. This iterator will *block the + /// current thread* on each call to `next` if the item in the stream isn't + /// ready yet. + /// + /// > **Note:** This method is not appropriate to call on event loops or + /// > similar I/O situations because it will prevent the event + /// > loop from making progress (this blocks the thread). This + /// > method should only be called when it's guaranteed that the + /// > blocking work associated with this stream will be completed + /// > by another thread. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Panics + /// + /// The returned iterator does not attempt to catch panics. If the `poll` + /// function panics, panics will be propagated to the caller of `next`. + #[cfg(feature = "use_std")] + fn wait(self) -> Wait<Self> + where Self: Sized + { + wait::new(self) + } + + /// Convenience function for turning this stream into a trait object. + /// + /// This simply avoids the need to write `Box::new` and can often help with + /// type inference as well by always returning a trait object. Note that + /// this method requires the `Send` bound and returns a `BoxStream`, which + /// also encodes this. If you'd like to create a `Box<Stream>` without the + /// `Send` bound, then the `Box::new` function can be used instead. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Examples + /// + /// ``` + /// use futures::stream::*; + /// use futures::sync::mpsc; + /// + /// let (_tx, rx) = mpsc::channel(1); + /// let a: BoxStream<i32, ()> = rx.boxed(); + /// ``` + #[cfg(feature = "use_std")] + #[doc(hidden)] + #[deprecated(note = "removed without replacement, recommended to use a \ + local extension trait or function if needed, more \ + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] + #[allow(deprecated)] + fn boxed(self) -> BoxStream<Self::Item, Self::Error> + where Self: Sized + Send + 'static, + { + ::std::boxed::Box::new(self) + } + + /// Converts this stream into a `Future`. + /// + /// A stream can be viewed as a future which will resolve to a pair containing + /// the next element of the stream plus the remaining stream. If the stream + /// terminates, then the next element is `None` and the remaining stream is + /// still passed back, to allow reclamation of its resources. + /// + /// The returned future can be used to compose streams and futures together by + /// placing everything into the "world of futures". + fn into_future(self) -> StreamFuture<Self> + where Self: Sized + { + future::new(self) + } + + /// Converts a stream of type `T` to a stream of type `U`. + /// + /// The provided closure is executed over all elements of this stream as + /// they are made available, and the callback will be executed inline with + /// calls to `poll`. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it, similar to the existing `map` methods in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (_tx, rx) = mpsc::channel::<i32>(1); + /// let rx = rx.map(|x| x + 3); + /// ``` + fn map<U, F>(self, f: F) -> Map<Self, F> + where F: FnMut(Self::Item) -> U, + Self: Sized + { + map::new(self, f) + } + + /// Converts a stream of error type `T` to a stream of error type `U`. + /// + /// The provided closure is executed over all errors of this stream as + /// they are made available, and the callback will be executed inline with + /// calls to `poll`. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it, similar to the existing `map_err` methods in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (_tx, rx) = mpsc::channel::<i32>(1); + /// let rx = rx.map_err(|()| 3); + /// ``` + fn map_err<U, F>(self, f: F) -> MapErr<Self, F> + where F: FnMut(Self::Error) -> U, + Self: Sized + { + map_err::new(self, f) + } + + /// Filters the values produced by this stream according to the provided + /// predicate. + /// + /// As values of this stream are made available, the provided predicate will + /// be run against them. If the predicate returns `true` then the stream + /// will yield the value, but if the predicate returns `false` then the + /// value will be discarded and the next value will be produced. + /// + /// All errors are passed through without filtering in this combinator. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it, similar to the existing `filter` methods in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (_tx, rx) = mpsc::channel::<i32>(1); + /// let evens = rx.filter(|x| x % 2 == 0); + /// ``` + fn filter<F>(self, f: F) -> Filter<Self, F> + where F: FnMut(&Self::Item) -> bool, + Self: Sized + { + filter::new(self, f) + } + + /// Filters the values produced by this stream while simultaneously mapping + /// them to a different type. + /// + /// As values of this stream are made available, the provided function will + /// be run on them. If the predicate returns `Some(e)` then the stream will + /// yield the value `e`, but if the predicate returns `None` then the next + /// value will be produced. + /// + /// All errors are passed through without filtering in this combinator. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it, similar to the existing `filter_map` methods in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (_tx, rx) = mpsc::channel::<i32>(1); + /// let evens_plus_one = rx.filter_map(|x| { + /// if x % 0 == 2 { + /// Some(x + 1) + /// } else { + /// None + /// } + /// }); + /// ``` + fn filter_map<F, B>(self, f: F) -> FilterMap<Self, F> + where F: FnMut(Self::Item) -> Option<B>, + Self: Sized + { + filter_map::new(self, f) + } + + /// Chain on a computation for when a value is ready, passing the resulting + /// item to the provided closure `f`. + /// + /// This function can be used to ensure a computation runs regardless of + /// the next value on the stream. The closure provided will be yielded a + /// `Result` once a value is ready, and the returned future will then be run + /// to completion to produce the next value on this stream. + /// + /// The returned value of the closure must implement the `IntoFuture` trait + /// and can represent some more work to be done before the composed stream + /// is finished. Note that the `Result` type implements the `IntoFuture` + /// trait so it is possible to simply alter the `Result` yielded to the + /// closure and return it. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (_tx, rx) = mpsc::channel::<i32>(1); + /// + /// let rx = rx.then(|result| { + /// match result { + /// Ok(e) => Ok(e + 3), + /// Err(()) => Err(4), + /// } + /// }); + /// ``` + fn then<F, U>(self, f: F) -> Then<Self, F, U> + where F: FnMut(Result<Self::Item, Self::Error>) -> U, + U: IntoFuture, + Self: Sized + { + then::new(self, f) + } + + /// Chain on a computation for when a value is ready, passing the successful + /// results to the provided closure `f`. + /// + /// This function can be used to run a unit of work when the next successful + /// value on a stream is ready. The closure provided will be yielded a value + /// when ready, and the returned future will then be run to completion to + /// produce the next value on this stream. + /// + /// Any errors produced by this stream will not be passed to the closure, + /// and will be passed through. + /// + /// The returned value of the closure must implement the `IntoFuture` trait + /// and can represent some more work to be done before the composed stream + /// is finished. Note that the `Result` type implements the `IntoFuture` + /// trait so it is possible to simply alter the `Result` yielded to the + /// closure and return it. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it. + /// + /// To process the entire stream and return a single future representing + /// success or error, use `for_each` instead. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (_tx, rx) = mpsc::channel::<i32>(1); + /// + /// let rx = rx.and_then(|result| { + /// if result % 2 == 0 { + /// Ok(result) + /// } else { + /// Err(()) + /// } + /// }); + /// ``` + fn and_then<F, U>(self, f: F) -> AndThen<Self, F, U> + where F: FnMut(Self::Item) -> U, + U: IntoFuture<Error = Self::Error>, + Self: Sized + { + and_then::new(self, f) + } + + /// Chain on a computation for when an error happens, passing the + /// erroneous result to the provided closure `f`. + /// + /// This function can be used to run a unit of work and attempt to recover from + /// an error if one happens. The closure provided will be yielded an error + /// when one appears, and the returned future will then be run to completion + /// to produce the next value on this stream. + /// + /// Any successful values produced by this stream will not be passed to the + /// closure, and will be passed through. + /// + /// The returned value of the closure must implement the `IntoFuture` trait + /// and can represent some more work to be done before the composed stream + /// is finished. Note that the `Result` type implements the `IntoFuture` + /// trait so it is possible to simply alter the `Result` yielded to the + /// closure and return it. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it. + fn or_else<F, U>(self, f: F) -> OrElse<Self, F, U> + where F: FnMut(Self::Error) -> U, + U: IntoFuture<Item = Self::Item>, + Self: Sized + { + or_else::new(self, f) + } + + /// Collect all of the values of this stream into a vector, returning a + /// future representing the result of that computation. + /// + /// This combinator will collect all successful results of this stream and + /// collect them into a `Vec<Self::Item>`. If an error happens then all + /// collected elements will be dropped and the error will be returned. + /// + /// The returned future will be resolved whenever an error happens or when + /// the stream returns `Ok(None)`. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (mut tx, rx) = mpsc::channel(1); + /// + /// thread::spawn(|| { + /// for i in (0..5).rev() { + /// tx = tx.send(i + 1).wait().unwrap(); + /// } + /// }); + /// + /// let mut result = rx.collect(); + /// assert_eq!(result.wait(), Ok(vec![5, 4, 3, 2, 1])); + /// ``` + #[cfg(feature = "use_std")] + fn collect(self) -> Collect<Self> + where Self: Sized + { + collect::new(self) + } + + /// Concatenate all results of a stream into a single extendable + /// destination, returning a future representing the end result. + /// + /// This combinator will extend the first item with the contents + /// of all the successful results of the stream. If the stream is + /// empty, the default value will be returned. If an error occurs, + /// all the results will be dropped and the error will be returned. + /// + /// The name `concat2` is an intermediate measure until the release of + /// futures 0.2, at which point it will be renamed back to `concat`. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (mut tx, rx) = mpsc::channel(1); + /// + /// thread::spawn(move || { + /// for i in (0..3).rev() { + /// let n = i * 3; + /// tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap(); + /// } + /// }); + /// let result = rx.concat2(); + /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3])); + /// ``` + fn concat2(self) -> Concat2<Self> + where Self: Sized, + Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default, + { + concat::new2(self) + } + + /// Concatenate all results of a stream into a single extendable + /// destination, returning a future representing the end result. + /// + /// This combinator will extend the first item with the contents + /// of all the successful results of the stream. If an error occurs, + /// all the results will be dropped and the error will be returned. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (mut tx, rx) = mpsc::channel(1); + /// + /// thread::spawn(move || { + /// for i in (0..3).rev() { + /// let n = i * 3; + /// tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap(); + /// } + /// }); + /// let result = rx.concat(); + /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3])); + /// ``` + /// + /// # Panics + /// + /// It's important to note that this function will panic if the stream + /// is empty, which is the reason for its deprecation. + #[deprecated(since="0.1.14", note="please use `Stream::concat2` instead")] + #[allow(deprecated)] + fn concat(self) -> Concat<Self> + where Self: Sized, + Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator, + { + concat::new(self) + } + + /// Execute an accumulating computation over a stream, collecting all the + /// values into one final result. + /// + /// This combinator will collect all successful results of this stream + /// according to the closure provided. The initial state is also provided to + /// this method and then is returned again by each execution of the closure. + /// Once the entire stream has been exhausted the returned future will + /// resolve to this value. + /// + /// If an error happens then collected state will be dropped and the error + /// will be returned. + /// + /// # Examples + /// + /// ``` + /// use futures::prelude::*; + /// use futures::stream; + /// use futures::future; + /// + /// let number_stream = stream::iter_ok::<_, ()>(0..6); + /// let sum = number_stream.fold(0, |acc, x| future::ok(acc + x)); + /// assert_eq!(sum.wait(), Ok(15)); + /// ``` + fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T> + where F: FnMut(T, Self::Item) -> Fut, + Fut: IntoFuture<Item = T>, + Self::Error: From<Fut::Error>, + Self: Sized + { + fold::new(self, f, init) + } + + /// Flattens a stream of streams into just one continuous stream. + /// + /// If this stream's elements are themselves streams then this combinator + /// will flatten out the entire stream to one long chain of elements. Any + /// errors are passed through without looking at them, but otherwise each + /// individual stream will get exhausted before moving on to the next. + /// + /// ``` + /// use std::thread; + /// + /// use futures::prelude::*; + /// use futures::sync::mpsc; + /// + /// let (tx1, rx1) = mpsc::channel::<i32>(1); + /// let (tx2, rx2) = mpsc::channel::<i32>(1); + /// let (tx3, rx3) = mpsc::channel(1); + /// + /// thread::spawn(|| { + /// tx1.send(1).wait().unwrap() + /// .send(2).wait().unwrap(); + /// }); + /// thread::spawn(|| { + /// tx2.send(3).wait().unwrap() + /// .send(4).wait().unwrap(); + /// }); + /// thread::spawn(|| { + /// tx3.send(rx1).wait().unwrap() + /// .send(rx2).wait().unwrap(); + /// }); + /// + /// let mut result = rx3.flatten().collect(); + /// assert_eq!(result.wait(), Ok(vec![1, 2, 3, 4])); + /// ``` + fn flatten(self) -> Flatten<Self> + where Self::Item: Stream, + <Self::Item as Stream>::Error: From<Self::Error>, + Self: Sized + { + flatten::new(self) + } + + /// Skip elements on this stream while the predicate provided resolves to + /// `true`. + /// + /// This function, like `Iterator::skip_while`, will skip elements on the + /// stream until the `predicate` resolves to `false`. Once one element + /// returns false all future elements will be returned from the underlying + /// stream. + fn skip_while<P, R>(self, pred: P) -> SkipWhile<Self, P, R> + where P: FnMut(&Self::Item) -> R, + R: IntoFuture<Item=bool, Error=Self::Error>, + Self: Sized + { + skip_while::new(self, pred) + } + + /// Take elements from this stream while the predicate provided resolves to + /// `true`. + /// + /// This function, like `Iterator::take_while`, will take elements from the + /// stream until the `predicate` resolves to `false`. Once one element + /// returns false it will always return that the stream is done. + fn take_while<P, R>(self, pred: P) -> TakeWhile<Self, P, R> + where P: FnMut(&Self::Item) -> R, + R: IntoFuture<Item=bool, Error=Self::Error>, + Self: Sized + { + take_while::new(self, pred) + } + + /// Runs this stream to completion, executing the provided closure for each + /// element on the stream. + /// + /// The closure provided will be called for each item this stream resolves + /// to successfully, producing a future. That future will then be executed + /// to completion before moving on to the next item. + /// + /// The returned value is a `Future` where the `Item` type is `()` and + /// errors are otherwise threaded through. Any error on the stream or in the + /// closure will cause iteration to be halted immediately and the future + /// will resolve to that error. + /// + /// To process each item in the stream and produce another stream instead + /// of a single future, use `and_then` instead. + fn for_each<F, U>(self, f: F) -> ForEach<Self, F, U> + where F: FnMut(Self::Item) -> U, + U: IntoFuture<Item=(), Error = Self::Error>, + Self: Sized + { + for_each::new(self, f) + } + + /// Map this stream's error to any error implementing `From` for + /// this stream's `Error`, returning a new stream. + /// + /// This function does for streams what `try!` does for `Result`, + /// by letting the compiler infer the type of the resulting error. + /// Just as `map_err` above, this is useful for example to ensure + /// that streams have the same error type when used with + /// combinators. + /// + /// Note that this function consumes the receiving stream and returns a + /// wrapped version of it. + fn from_err<E: From<Self::Error>>(self) -> FromErr<Self, E> + where Self: Sized, + { + from_err::new(self) + } + + /// Creates a new stream of at most `amt` items of the underlying stream. + /// + /// Once `amt` items have been yielded from this stream then it will always + /// return that the stream is done. + /// + /// # Errors + /// + /// Any errors yielded from underlying stream, before the desired amount of + /// items is reached, are passed through and do not affect the total number + /// of items taken. + fn take(self, amt: u64) -> Take<Self> + where Self: Sized + { + take::new(self, amt) + } + + /// Creates a new stream which skips `amt` items of the underlying stream. + /// + /// Once `amt` items have been skipped from this stream then it will always + /// return the remaining items on this stream. + /// + /// # Errors + /// + /// All errors yielded from underlying stream are passed through and do not + /// affect the total number of items skipped. + fn skip(self, amt: u64) -> Skip<Self> + where Self: Sized + { + skip::new(self, amt) + } + + /// Fuse a stream such that `poll` will never again be called once it has + /// finished. + /// + /// Currently once a stream has returned `None` from `poll` any further + /// calls could exhibit bad behavior such as block forever, panic, never + /// return, etc. If it is known that `poll` may be called after stream has + /// already finished, then this method can be used to ensure that it has + /// defined semantics. + /// + /// Once a stream has been `fuse`d and it finishes, then it will forever + /// return `None` from `poll`. This, unlike for the traits `poll` method, + /// is guaranteed. + /// + /// Also note that as soon as this stream returns `None` it will be dropped + /// to reclaim resources associated with it. + fn fuse(self) -> Fuse<Self> + where Self: Sized + { + fuse::new(self) + } + + /// Borrows a stream, rather than consuming it. + /// + /// This is useful to allow applying stream adaptors while still retaining + /// ownership of the original stream. + /// + /// ``` + /// use futures::prelude::*; + /// use futures::stream; + /// use futures::future; + /// + /// let mut stream = stream::iter_ok::<_, ()>(1..5); + /// + /// let sum = stream.by_ref().take(2).fold(0, |a, b| future::ok(a + b)).wait(); + /// assert_eq!(sum, Ok(3)); + /// + /// // You can use the stream again + /// let sum = stream.take(2).fold(0, |a, b| future::ok(a + b)).wait(); + /// assert_eq!(sum, Ok(7)); + /// ``` + fn by_ref(&mut self) -> &mut Self + where Self: Sized + { + self + } + + /// Catches unwinding panics while polling the stream. + /// + /// Caught panic (if any) will be the last element of the resulting stream. + /// + /// In general, panics within a stream can propagate all the way out to the + /// task level. This combinator makes it possible to halt unwinding within + /// the stream itself. It's most commonly used within task executors. This + /// method should not be used for error handling. + /// + /// Note that this method requires the `UnwindSafe` bound from the standard + /// library. This isn't always applied automatically, and the standard + /// library provides an `AssertUnwindSafe` wrapper type to apply it + /// after-the fact. To assist using this method, the `Stream` trait is also + /// implemented for `AssertUnwindSafe<S>` where `S` implements `Stream`. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Examples + /// + /// ```rust + /// use futures::prelude::*; + /// use futures::stream; + /// + /// let stream = stream::iter_ok::<_, bool>(vec![Some(10), None, Some(11)]); + /// // panic on second element + /// let stream_panicking = stream.map(|o| o.unwrap()); + /// let mut iter = stream_panicking.catch_unwind().wait(); + /// + /// assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap()); + /// assert!(iter.next().unwrap().is_err()); + /// assert!(iter.next().is_none()); + /// ``` + #[cfg(feature = "use_std")] + fn catch_unwind(self) -> CatchUnwind<Self> + where Self: Sized + std::panic::UnwindSafe + { + catch_unwind::new(self) + } + + /// An adaptor for creating a buffered list of pending futures. + /// + /// If this stream's item can be converted into a future, then this adaptor + /// will buffer up to at most `amt` futures and then return results in the + /// same order as the underlying stream. No more than `amt` futures will be + /// buffered at any point in time, and less than `amt` may also be buffered + /// depending on the state of each future. + /// + /// The returned stream will be a stream of each future's result, with + /// errors passed through whenever they occur. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + #[cfg(feature = "use_std")] + fn buffered(self, amt: usize) -> Buffered<Self> + where Self::Item: IntoFuture<Error = <Self as Stream>::Error>, + Self: Sized + { + buffered::new(self, amt) + } + + /// An adaptor for creating a buffered list of pending futures (unordered). + /// + /// If this stream's item can be converted into a future, then this adaptor + /// will buffer up to `amt` futures and then return results in the order + /// in which they complete. No more than `amt` futures will be buffered at + /// any point in time, and less than `amt` may also be buffered depending on + /// the state of each future. + /// + /// The returned stream will be a stream of each future's result, with + /// errors passed through whenever they occur. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + #[cfg(feature = "use_std")] + fn buffer_unordered(self, amt: usize) -> BufferUnordered<Self> + where Self::Item: IntoFuture<Error = <Self as Stream>::Error>, + Self: Sized + { + buffer_unordered::new(self, amt) + } + + /// An adapter for merging the output of two streams. + /// + /// The merged stream produces items from one or both of the underlying + /// streams as they become available. Errors, however, are not merged: you + /// get at most one error at a time. + #[deprecated(note = "functionality provided by `select` now")] + #[allow(deprecated)] + fn merge<S>(self, other: S) -> Merge<Self, S> + where S: Stream<Error = Self::Error>, + Self: Sized, + { + merge::new(self, other) + } + + /// An adapter for zipping two streams together. + /// + /// The zipped stream waits for both streams to produce an item, and then + /// returns that pair. If an error happens, then that error will be returned + /// immediately. If either stream ends then the zipped stream will also end. + fn zip<S>(self, other: S) -> Zip<Self, S> + where S: Stream<Error = Self::Error>, + Self: Sized, + { + zip::new(self, other) + } + + /// Adapter for chaining two stream. + /// + /// The resulting stream emits elements from the first stream, and when + /// first stream reaches the end, emits the elements from the second stream. + /// + /// ```rust + /// use futures::prelude::*; + /// use futures::stream; + /// + /// let stream1 = stream::iter_result(vec![Ok(10), Err(false)]); + /// let stream2 = stream::iter_result(vec![Err(true), Ok(20)]); + /// let mut chain = stream1.chain(stream2).wait(); + /// + /// assert_eq!(Some(Ok(10)), chain.next()); + /// assert_eq!(Some(Err(false)), chain.next()); + /// assert_eq!(Some(Err(true)), chain.next()); + /// assert_eq!(Some(Ok(20)), chain.next()); + /// assert_eq!(None, chain.next()); + /// ``` + fn chain<S>(self, other: S) -> Chain<Self, S> + where S: Stream<Item = Self::Item, Error = Self::Error>, + Self: Sized + { + chain::new(self, other) + } + + /// Creates a new stream which exposes a `peek` method. + /// + /// Calling `peek` returns a reference to the next item in the stream. + fn peekable(self) -> Peekable<Self> + where Self: Sized + { + peek::new(self) + } + + /// An adaptor for chunking up items of the stream inside a vector. + /// + /// This combinator will attempt to pull items from this stream and buffer + /// them into a local vector. At most `capacity` items will get buffered + /// before they're yielded from the returned stream. + /// + /// Note that the vectors returned from this iterator may not always have + /// `capacity` elements. If the underlying stream ended and only a partial + /// vector was created, it'll be returned. Additionally if an error happens + /// from the underlying stream then the currently buffered items will be + /// yielded. + /// + /// Errors are passed through the stream unbuffered. + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + /// + /// # Panics + /// + /// This method will panic of `capacity` is zero. + #[cfg(feature = "use_std")] + fn chunks(self, capacity: usize) -> Chunks<Self> + where Self: Sized + { + chunks::new(self, capacity) + } + + /// Creates a stream that selects the next element from either this stream + /// or the provided one, whichever is ready first. + /// + /// This combinator will attempt to pull items from both streams. Each + /// stream will be polled in a round-robin fashion, and whenever a stream is + /// ready to yield an item that item is yielded. + /// + /// The `select` function is similar to `merge` except that it requires both + /// streams to have the same item and error types. + /// + /// Error are passed through from either stream. + fn select<S>(self, other: S) -> Select<Self, S> + where S: Stream<Item = Self::Item, Error = Self::Error>, + Self: Sized, + { + select::new(self, other) + } + + /// A future that completes after the given stream has been fully processed + /// into the sink, including flushing. + /// + /// This future will drive the stream to keep producing items until it is + /// exhausted, sending each item to the sink. It will complete once both the + /// stream is exhausted, and the sink has fully processed received item, + /// flushed successfully, and closed successfully. + /// + /// Doing `stream.forward(sink)` is roughly equivalent to + /// `sink.send_all(stream)`. The returned future will exhaust all items from + /// `self`, sending them all to `sink`. Furthermore the `sink` will be + /// closed and flushed. + /// + /// On completion, the pair `(stream, sink)` is returned. + fn forward<S>(self, sink: S) -> Forward<Self, S> + where S: Sink<SinkItem = Self::Item>, + Self::Error: From<S::SinkError>, + Self: Sized + { + forward::new(self, sink) + } + + /// Splits this `Stream + Sink` object into separate `Stream` and `Sink` + /// objects. + /// + /// This can be useful when you want to split ownership between tasks, or + /// allow direct interaction between the two objects (e.g. via + /// `Sink::send_all`). + /// + /// This method is only available when the `use_std` feature of this + /// library is activated, and it is activated by default. + #[cfg(feature = "use_std")] + fn split(self) -> (SplitSink<Self>, SplitStream<Self>) + where Self: super::sink::Sink + Sized + { + split::split(self) + } + + /// Do something with each item of this stream, afterwards passing it on. + /// + /// This is similar to the `Iterator::inspect` method in the standard + /// library where it allows easily inspecting each value as it passes + /// through the stream, for example to debug what's going on. + fn inspect<F>(self, f: F) -> Inspect<Self, F> + where F: FnMut(&Self::Item), + Self: Sized, + { + inspect::new(self, f) + } + + /// Do something with the error of this stream, afterwards passing it on. + /// + /// This is similar to the `Stream::inspect` method where it allows + /// easily inspecting the error as it passes through the stream, for + /// example to debug what's going on. + fn inspect_err<F>(self, f: F) -> InspectErr<Self, F> + where F: FnMut(&Self::Error), + Self: Sized, + { + inspect_err::new(self, f) + } +} + +impl<'a, S: ?Sized + Stream> Stream for &'a mut S { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + (**self).poll() + } +} + +/// Converts a list of futures into a `Stream` of results from the futures. +/// +/// This function will take an list of futures (e.g. a vector, an iterator, +/// etc), and return a stream. The stream will yield items as they become +/// available on the futures internally, in the order that they become +/// available. This function is similar to `buffer_unordered` in that it may +/// return items in a different order than in the list specified. +/// +/// Note that the returned set can also be used to dynamically push more +/// futures into the set as they become available. +#[cfg(feature = "use_std")] +pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future> + where I: IntoIterator, + I::Item: IntoFuture +{ + let mut set = FuturesUnordered::new(); + + for future in futures { + set.push(future.into_future()); + } + + return set +} diff --git a/third_party/rust/futures-0.1.31/src/stream/once.rs b/third_party/rust/futures-0.1.31/src/stream/once.rs new file mode 100644 index 0000000000..24fb327bd6 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/once.rs @@ -0,0 +1,35 @@ +use {Poll, Async}; +use stream::Stream; + +/// A stream which emits single element and then EOF. +/// +/// This stream will never block and is always ready. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Once<T, E>(Option<Result<T, E>>); + +/// Creates a stream of single element +/// +/// ```rust +/// use futures::*; +/// +/// let mut stream = stream::once::<(), _>(Err(17)); +/// assert_eq!(Err(17), stream.poll()); +/// assert_eq!(Ok(Async::Ready(None)), stream.poll()); +/// ``` +pub fn once<T, E>(item: Result<T, E>) -> Once<T, E> { + Once(Some(item)) +} + +impl<T, E> Stream for Once<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<T>, E> { + match self.0.take() { + Some(Ok(e)) => Ok(Async::Ready(Some(e))), + Some(Err(e)) => Err(e), + None => Ok(Async::Ready(None)), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/or_else.rs b/third_party/rust/futures-0.1.31/src/stream/or_else.rs new file mode 100644 index 0000000000..2d15fa2b70 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/or_else.rs @@ -0,0 +1,80 @@ +use {IntoFuture, Future, Poll, Async}; +use stream::Stream; + +/// A stream combinator which chains a computation onto errors produced by a +/// stream. +/// +/// This structure is produced by the `Stream::or_else` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct OrElse<S, F, U> + where U: IntoFuture, +{ + stream: S, + future: Option<U::Future>, + f: F, +} + +pub fn new<S, F, U>(s: S, f: F) -> OrElse<S, F, U> + where S: Stream, + F: FnMut(S::Error) -> U, + U: IntoFuture<Item=S::Item>, +{ + OrElse { + stream: s, + future: None, + f: f, + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F, U> ::sink::Sink for OrElse<S, F, U> + where S: ::sink::Sink, U: IntoFuture +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F, U> Stream for OrElse<S, F, U> + where S: Stream, + F: FnMut(S::Error) -> U, + U: IntoFuture<Item=S::Item>, +{ + type Item = S::Item; + type Error = U::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, U::Error> { + if self.future.is_none() { + let item = match self.stream.poll() { + Ok(Async::Ready(e)) => return Ok(Async::Ready(e)), + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => e, + }; + self.future = Some((self.f)(item).into_future()); + } + assert!(self.future.is_some()); + match self.future.as_mut().unwrap().poll() { + Ok(Async::Ready(e)) => { + self.future = None; + Ok(Async::Ready(Some(e))) + } + Err(e) => { + self.future = None; + Err(e) + } + Ok(Async::NotReady) => Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/peek.rs b/third_party/rust/futures-0.1.31/src/stream/peek.rs new file mode 100644 index 0000000000..96e657663b --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/peek.rs @@ -0,0 +1,74 @@ +use {Async, Poll}; +use stream::{Stream, Fuse}; + +/// A `Stream` that implements a `peek` method. +/// +/// The `peek` method can be used to retrieve a reference +/// to the next `Stream::Item` if available. A subsequent +/// call to `poll` will return the owned item. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Peekable<S: Stream> { + stream: Fuse<S>, + peeked: Option<S::Item>, +} + + +pub fn new<S: Stream>(stream: S) -> Peekable<S> { + Peekable { + stream: stream.fuse(), + peeked: None + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for Peekable<S> + where S: ::sink::Sink + Stream +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S: Stream> Stream for Peekable<S> { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + if let Some(item) = self.peeked.take() { + return Ok(Async::Ready(Some(item))) + } + self.stream.poll() + } +} + + +impl<S: Stream> Peekable<S> { + /// Peek retrieves a reference to the next item in the stream. + /// + /// This method polls the underlying stream and return either a reference + /// to the next item if the stream is ready or passes through any errors. + pub fn peek(&mut self) -> Poll<Option<&S::Item>, S::Error> { + if self.peeked.is_some() { + return Ok(Async::Ready(self.peeked.as_ref())) + } + match try_ready!(self.poll()) { + None => Ok(Async::Ready(None)), + Some(item) => { + self.peeked = Some(item); + Ok(Async::Ready(self.peeked.as_ref())) + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/poll_fn.rs b/third_party/rust/futures-0.1.31/src/stream/poll_fn.rs new file mode 100644 index 0000000000..fbc7df0844 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/poll_fn.rs @@ -0,0 +1,49 @@ +//! Definition of the `PollFn` combinator + +use {Stream, Poll}; + +/// A stream which adapts a function returning `Poll`. +/// +/// Created by the `poll_fn` function. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct PollFn<F> { + inner: F, +} + +/// Creates a new stream wrapping around a function returning `Poll`. +/// +/// Polling the returned stream delegates to the wrapped function. +/// +/// # Examples +/// +/// ``` +/// use futures::stream::poll_fn; +/// use futures::{Async, Poll}; +/// +/// let mut counter = 1usize; +/// +/// let read_stream = poll_fn(move || -> Poll<Option<String>, std::io::Error> { +/// if counter == 0 { return Ok(Async::Ready(None)); } +/// counter -= 1; +/// Ok(Async::Ready(Some("Hello, World!".to_owned()))) +/// }); +/// ``` +pub fn poll_fn<T, E, F>(f: F) -> PollFn<F> +where + F: FnMut() -> Poll<Option<T>, E>, +{ + PollFn { inner: f } +} + +impl<T, E, F> Stream for PollFn<F> +where + F: FnMut() -> Poll<Option<T>, E>, +{ + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<T>, E> { + (self.inner)() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/repeat.rs b/third_party/rust/futures-0.1.31/src/stream/repeat.rs new file mode 100644 index 0000000000..e3cb5ff49c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/repeat.rs @@ -0,0 +1,53 @@ +use core::marker; + + +use stream::Stream; + +use {Async, Poll}; + + +/// Stream that produces the same element repeatedly. +/// +/// This structure is created by the `stream::repeat` function. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Repeat<T, E> + where T: Clone +{ + item: T, + error: marker::PhantomData<E>, +} + +/// Create a stream which produces the same item repeatedly. +/// +/// Stream never produces an error or EOF. Note that you likely want to avoid +/// usage of `collect` or such on the returned stream as it will exhaust +/// available memory as it tries to just fill up all RAM. +/// +/// ```rust +/// use futures::*; +/// +/// let mut stream = stream::repeat::<_, bool>(10); +/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll()); +/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll()); +/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll()); +/// ``` +pub fn repeat<T, E>(item: T) -> Repeat<T, E> + where T: Clone +{ + Repeat { + item: item, + error: marker::PhantomData, + } +} + +impl<T, E> Stream for Repeat<T, E> + where T: Clone +{ + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + Ok(Async::Ready(Some(self.item.clone()))) + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/select.rs b/third_party/rust/futures-0.1.31/src/stream/select.rs new file mode 100644 index 0000000000..ae6b66cf14 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/select.rs @@ -0,0 +1,64 @@ +use {Poll, Async}; +use stream::{Stream, Fuse}; + +/// An adapter for merging the output of two streams. +/// +/// The merged stream produces items from either of the underlying streams as +/// they become available, and the streams are polled in a round-robin fashion. +/// Errors, however, are not merged: you get at most one error at a time. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Select<S1, S2> { + stream1: Fuse<S1>, + stream2: Fuse<S2>, + flag: bool, +} + +pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Select<S1, S2> + where S1: Stream, + S2: Stream<Item = S1::Item, Error = S1::Error> +{ + Select { + stream1: stream1.fuse(), + stream2: stream2.fuse(), + flag: false, + } +} + +impl<S1, S2> Stream for Select<S1, S2> + where S1: Stream, + S2: Stream<Item = S1::Item, Error = S1::Error> +{ + type Item = S1::Item; + type Error = S1::Error; + + fn poll(&mut self) -> Poll<Option<S1::Item>, S1::Error> { + let (a, b) = if self.flag { + (&mut self.stream2 as &mut Stream<Item=_, Error=_>, + &mut self.stream1 as &mut Stream<Item=_, Error=_>) + } else { + (&mut self.stream1 as &mut Stream<Item=_, Error=_>, + &mut self.stream2 as &mut Stream<Item=_, Error=_>) + }; + self.flag = !self.flag; + + let a_done = match a.poll()? { + Async::Ready(Some(item)) => return Ok(Some(item).into()), + Async::Ready(None) => true, + Async::NotReady => false, + }; + + match b.poll()? { + Async::Ready(Some(item)) => { + // If the other stream isn't finished yet, give them a chance to + // go first next time as we pulled something off `b`. + if !a_done { + self.flag = !self.flag; + } + Ok(Some(item).into()) + } + Async::Ready(None) if a_done => Ok(None.into()), + Async::Ready(None) | Async::NotReady => Ok(Async::NotReady), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/skip.rs b/third_party/rust/futures-0.1.31/src/stream/skip.rs new file mode 100644 index 0000000000..a1d7b49797 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/skip.rs @@ -0,0 +1,84 @@ +use {Poll, Async}; +use stream::Stream; + +/// A stream combinator which skips a number of elements before continuing. +/// +/// This structure is produced by the `Stream::skip` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Skip<S> { + stream: S, + remaining: u64, +} + +pub fn new<S>(s: S, amt: u64) -> Skip<S> + where S: Stream, +{ + Skip { + stream: s, + remaining: amt, + } +} + +impl<S> Skip<S> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for Skip<S> + where S: ::sink::Sink +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S> Stream for Skip<S> + where S: Stream, +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + while self.remaining > 0 { + match try_ready!(self.stream.poll()) { + Some(_) => self.remaining -= 1, + None => return Ok(Async::Ready(None)), + } + } + + self.stream.poll() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/skip_while.rs b/third_party/rust/futures-0.1.31/src/stream/skip_while.rs new file mode 100644 index 0000000000..b571996c24 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/skip_while.rs @@ -0,0 +1,113 @@ +use {Async, Poll, IntoFuture, Future}; +use stream::Stream; + +/// A stream combinator which skips elements of a stream while a predicate +/// holds. +/// +/// This structure is produced by the `Stream::skip_while` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct SkipWhile<S, P, R> where S: Stream, R: IntoFuture { + stream: S, + pred: P, + pending: Option<(R::Future, S::Item)>, + done_skipping: bool, +} + +pub fn new<S, P, R>(s: S, p: P) -> SkipWhile<S, P, R> + where S: Stream, + P: FnMut(&S::Item) -> R, + R: IntoFuture<Item=bool, Error=S::Error>, +{ + SkipWhile { + stream: s, + pred: p, + pending: None, + done_skipping: false, + } +} + +impl<S, P, R> SkipWhile<S, P, R> where S: Stream, R: IntoFuture { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, P, R> ::sink::Sink for SkipWhile<S, P, R> + where S: ::sink::Sink + Stream, R: IntoFuture +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, P, R> Stream for SkipWhile<S, P, R> + where S: Stream, + P: FnMut(&S::Item) -> R, + R: IntoFuture<Item=bool, Error=S::Error>, +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + if self.done_skipping { + return self.stream.poll(); + } + + loop { + if self.pending.is_none() { + let item = match try_ready!(self.stream.poll()) { + Some(e) => e, + None => return Ok(Async::Ready(None)), + }; + self.pending = Some(((self.pred)(&item).into_future(), item)); + } + + assert!(self.pending.is_some()); + match self.pending.as_mut().unwrap().0.poll() { + Ok(Async::Ready(true)) => self.pending = None, + Ok(Async::Ready(false)) => { + let (_, item) = self.pending.take().unwrap(); + self.done_skipping = true; + return Ok(Async::Ready(Some(item))) + } + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + self.pending = None; + return Err(e) + } + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/split.rs b/third_party/rust/futures-0.1.31/src/stream/split.rs new file mode 100644 index 0000000000..ddaa52997d --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/split.rs @@ -0,0 +1,105 @@ +use std::any::Any; +use std::error::Error; +use std::fmt; + +use {StartSend, Sink, Stream, Poll, Async, AsyncSink}; +use sync::BiLock; + +/// A `Stream` part of the split pair +#[derive(Debug)] +pub struct SplitStream<S>(BiLock<S>); + +impl<S> SplitStream<S> { + /// Attempts to put the two "halves" of a split `Stream + Sink` back + /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are + /// a matching pair originating from the same call to `Stream::split`. + pub fn reunite(self, other: SplitSink<S>) -> Result<S, ReuniteError<S>> { + other.reunite(self) + } +} + +impl<S: Stream> Stream for SplitStream<S> { + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + match self.0.poll_lock() { + Async::Ready(mut inner) => inner.poll(), + Async::NotReady => Ok(Async::NotReady), + } + } +} + +/// A `Sink` part of the split pair +#[derive(Debug)] +pub struct SplitSink<S>(BiLock<S>); + +impl<S> SplitSink<S> { + /// Attempts to put the two "halves" of a split `Stream + Sink` back + /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are + /// a matching pair originating from the same call to `Stream::split`. + pub fn reunite(self, other: SplitStream<S>) -> Result<S, ReuniteError<S>> { + self.0.reunite(other.0).map_err(|err| { + ReuniteError(SplitSink(err.0), SplitStream(err.1)) + }) + } +} + +impl<S: Sink> Sink for SplitSink<S> { + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) + -> StartSend<S::SinkItem, S::SinkError> + { + match self.0.poll_lock() { + Async::Ready(mut inner) => inner.start_send(item), + Async::NotReady => Ok(AsyncSink::NotReady(item)), + } + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + match self.0.poll_lock() { + Async::Ready(mut inner) => inner.poll_complete(), + Async::NotReady => Ok(Async::NotReady), + } + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + match self.0.poll_lock() { + Async::Ready(mut inner) => inner.close(), + Async::NotReady => Ok(Async::NotReady), + } + } +} + +pub fn split<S: Stream + Sink>(s: S) -> (SplitSink<S>, SplitStream<S>) { + let (a, b) = BiLock::new(s); + let read = SplitStream(a); + let write = SplitSink(b); + (write, read) +} + +/// Error indicating a `SplitSink<S>` and `SplitStream<S>` were not two halves +/// of a `Stream + Split`, and thus could not be `reunite`d. +pub struct ReuniteError<T>(pub SplitSink<T>, pub SplitStream<T>); + +impl<T> fmt::Debug for ReuniteError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_tuple("ReuniteError") + .field(&"...") + .finish() + } +} + +impl<T> fmt::Display for ReuniteError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "tried to reunite a SplitStream and SplitSink that don't form a pair") + } +} + +impl<T: Any> Error for ReuniteError<T> { + fn description(&self) -> &str { + "tried to reunite a SplitStream and SplitSink that don't form a pair" + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/take.rs b/third_party/rust/futures-0.1.31/src/stream/take.rs new file mode 100644 index 0000000000..0ca68496eb --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/take.rs @@ -0,0 +1,86 @@ +use {Async, Poll}; +use stream::Stream; + +/// A stream combinator which returns a maximum number of elements. +/// +/// This structure is produced by the `Stream::take` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Take<S> { + stream: S, + remaining: u64, +} + +pub fn new<S>(s: S, amt: u64) -> Take<S> + where S: Stream, +{ + Take { + stream: s, + remaining: amt, + } +} + +impl<S> Take<S> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S> ::sink::Sink for Take<S> + where S: ::sink::Sink + Stream +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S> Stream for Take<S> + where S: Stream, +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + if self.remaining == 0 { + Ok(Async::Ready(None)) + } else { + let next = try_ready!(self.stream.poll()); + match next { + Some(_) => self.remaining -= 1, + None => self.remaining = 0, + } + Ok(Async::Ready(next)) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/take_while.rs b/third_party/rust/futures-0.1.31/src/stream/take_while.rs new file mode 100644 index 0000000000..732ae855de --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/take_while.rs @@ -0,0 +1,113 @@ +use {Async, Poll, IntoFuture, Future}; +use stream::Stream; + +/// A stream combinator which takes elements from a stream while a predicate +/// holds. +/// +/// This structure is produced by the `Stream::take_while` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct TakeWhile<S, P, R> where S: Stream, R: IntoFuture { + stream: S, + pred: P, + pending: Option<(R::Future, S::Item)>, + done_taking: bool, +} + +pub fn new<S, P, R>(s: S, p: P) -> TakeWhile<S, P, R> + where S: Stream, + P: FnMut(&S::Item) -> R, + R: IntoFuture<Item=bool, Error=S::Error>, +{ + TakeWhile { + stream: s, + pred: p, + pending: None, + done_taking: false, + } +} + +impl<S, P, R> TakeWhile<S, P, R> where S: Stream, R: IntoFuture { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, P, R> ::sink::Sink for TakeWhile<S, P, R> + where S: ::sink::Sink + Stream, R: IntoFuture +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, P, R> Stream for TakeWhile<S, P, R> + where S: Stream, + P: FnMut(&S::Item) -> R, + R: IntoFuture<Item=bool, Error=S::Error>, +{ + type Item = S::Item; + type Error = S::Error; + + fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> { + if self.done_taking { + return Ok(Async::Ready(None)); + } + + if self.pending.is_none() { + let item = match try_ready!(self.stream.poll()) { + Some(e) => e, + None => return Ok(Async::Ready(None)), + }; + self.pending = Some(((self.pred)(&item).into_future(), item)); + } + + assert!(self.pending.is_some()); + match self.pending.as_mut().unwrap().0.poll() { + Ok(Async::Ready(true)) => { + let (_, item) = self.pending.take().unwrap(); + Ok(Async::Ready(Some(item))) + }, + Ok(Async::Ready(false)) => { + self.done_taking = true; + Ok(Async::Ready(None)) + } + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(e) => { + self.pending = None; + Err(e) + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/then.rs b/third_party/rust/futures-0.1.31/src/stream/then.rs new file mode 100644 index 0000000000..cab338e922 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/then.rs @@ -0,0 +1,81 @@ +use {Async, IntoFuture, Future, Poll}; +use stream::Stream; + +/// A stream combinator which chains a computation onto each item produced by a +/// stream. +/// +/// This structure is produced by the `Stream::then` method. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Then<S, F, U> + where U: IntoFuture, +{ + stream: S, + future: Option<U::Future>, + f: F, +} + +pub fn new<S, F, U>(s: S, f: F) -> Then<S, F, U> + where S: Stream, + F: FnMut(Result<S::Item, S::Error>) -> U, + U: IntoFuture, +{ + Then { + stream: s, + future: None, + f: f, + } +} + +// Forwarding impl of Sink from the underlying stream +impl<S, F, U> ::sink::Sink for Then<S, F, U> + where S: ::sink::Sink, U: IntoFuture, +{ + type SinkItem = S::SinkItem; + type SinkError = S::SinkError; + + fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> { + self.stream.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), S::SinkError> { + self.stream.poll_complete() + } + + fn close(&mut self) -> Poll<(), S::SinkError> { + self.stream.close() + } +} + +impl<S, F, U> Stream for Then<S, F, U> + where S: Stream, + F: FnMut(Result<S::Item, S::Error>) -> U, + U: IntoFuture, +{ + type Item = U::Item; + type Error = U::Error; + + fn poll(&mut self) -> Poll<Option<U::Item>, U::Error> { + if self.future.is_none() { + let item = match self.stream.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(None)) => return Ok(Async::Ready(None)), + Ok(Async::Ready(Some(e))) => Ok(e), + Err(e) => Err(e), + }; + self.future = Some((self.f)(item).into_future()); + } + assert!(self.future.is_some()); + match self.future.as_mut().unwrap().poll() { + Ok(Async::Ready(e)) => { + self.future = None; + Ok(Async::Ready(Some(e))) + } + Err(e) => { + self.future = None; + Err(e) + } + Ok(Async::NotReady) => Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/unfold.rs b/third_party/rust/futures-0.1.31/src/stream/unfold.rs new file mode 100644 index 0000000000..ac427b8c3b --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/unfold.rs @@ -0,0 +1,114 @@ +use core::mem; + +use {Future, IntoFuture, Async, Poll}; +use stream::Stream; + +/// Creates a `Stream` from a seed and a closure returning a `Future`. +/// +/// This function is the dual for the `Stream::fold()` adapter: while +/// `Stream::fold()` reduces a `Stream` to one single value, `unfold()` creates a +/// `Stream` from a seed value. +/// +/// `unfold()` will call the provided closure with the provided seed, then wait +/// for the returned `Future` to complete with `(a, b)`. It will then yield the +/// value `a`, and use `b` as the next internal state. +/// +/// If the closure returns `None` instead of `Some(Future)`, then the `unfold()` +/// will stop producing items and return `Ok(Async::Ready(None))` in future +/// calls to `poll()`. +/// +/// In case of error generated by the returned `Future`, the error will be +/// returned by the `Stream`. The `Stream` will then yield +/// `Ok(Async::Ready(None))` in future calls to `poll()`. +/// +/// This function can typically be used when wanting to go from the "world of +/// futures" to the "world of streams": the provided closure can build a +/// `Future` using other library functions working on futures, and `unfold()` +/// will turn it into a `Stream` by repeating the operation. +/// +/// # Example +/// +/// ```rust +/// use futures::stream::{self, Stream}; +/// use futures::future::{self, Future}; +/// +/// let mut stream = stream::unfold(0, |state| { +/// if state <= 2 { +/// let next_state = state + 1; +/// let yielded = state * 2; +/// let fut = future::ok::<_, u32>((yielded, next_state)); +/// Some(fut) +/// } else { +/// None +/// } +/// }); +/// +/// let result = stream.collect().wait(); +/// assert_eq!(result, Ok(vec![0, 2, 4])); +/// ``` +pub fn unfold<T, F, Fut, It>(init: T, f: F) -> Unfold<T, F, Fut> + where F: FnMut(T) -> Option<Fut>, + Fut: IntoFuture<Item = (It, T)>, +{ + Unfold { + f: f, + state: State::Ready(init), + } +} + +/// A stream which creates futures, polls them and return their result +/// +/// This stream is returned by the `futures::stream::unfold` method +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Unfold<T, F, Fut> where Fut: IntoFuture { + f: F, + state: State<T, Fut::Future>, +} + +impl <T, F, Fut, It> Stream for Unfold<T, F, Fut> + where F: FnMut(T) -> Option<Fut>, + Fut: IntoFuture<Item = (It, T)>, +{ + type Item = It; + type Error = Fut::Error; + + fn poll(&mut self) -> Poll<Option<It>, Fut::Error> { + loop { + match mem::replace(&mut self.state, State::Empty) { + // State::Empty may happen if the future returned an error + State::Empty => { return Ok(Async::Ready(None)); } + State::Ready(state) => { + match (self.f)(state) { + Some(fut) => { self.state = State::Processing(fut.into_future()); } + None => { return Ok(Async::Ready(None)); } + } + } + State::Processing(mut fut) => { + match fut.poll()? { + Async:: Ready((item, next_state)) => { + self.state = State::Ready(next_state); + return Ok(Async::Ready(Some(item))); + } + Async::NotReady => { + self.state = State::Processing(fut); + return Ok(Async::NotReady); + } + } + } + } + } + } +} + +#[derive(Debug)] +enum State<T, F> where F: Future { + /// Placeholder state when doing work, or when the returned Future generated an error + Empty, + + /// Ready to generate new future; current internal state is the `T` + Ready(T), + + /// Working on a future generated previously + Processing(F), +} diff --git a/third_party/rust/futures-0.1.31/src/stream/wait.rs b/third_party/rust/futures-0.1.31/src/stream/wait.rs new file mode 100644 index 0000000000..80acb6c2a6 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/wait.rs @@ -0,0 +1,53 @@ +use stream::Stream; +use executor; + +/// A stream combinator which converts an asynchronous stream to a **blocking +/// iterator**. +/// +/// Created by the `Stream::wait` method, this function transforms any stream +/// into a standard iterator. This is implemented by blocking the current thread +/// while items on the underlying stream aren't ready yet. +#[must_use = "iterators do nothing unless advanced"] +#[derive(Debug)] +pub struct Wait<S> { + stream: executor::Spawn<S>, +} + +impl<S> Wait<S> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &S { + self.stream.get_ref() + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut S { + self.stream.get_mut() + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> S { + self.stream.into_inner() + } +} + +pub fn new<S: Stream>(s: S) -> Wait<S> { + Wait { + stream: executor::spawn(s), + } +} + +impl<S: Stream> Iterator for Wait<S> { + type Item = Result<S::Item, S::Error>; + + fn next(&mut self) -> Option<Self::Item> { + self.stream.wait_stream() + } +} diff --git a/third_party/rust/futures-0.1.31/src/stream/zip.rs b/third_party/rust/futures-0.1.31/src/stream/zip.rs new file mode 100644 index 0000000000..17e3c69ffe --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/stream/zip.rs @@ -0,0 +1,59 @@ +use {Async, Poll}; +use stream::{Stream, Fuse}; + +/// An adapter for merging the output of two streams. +/// +/// The merged stream produces items from one or both of the underlying +/// streams as they become available. Errors, however, are not merged: you +#[derive(Debug)] +/// get at most one error at a time. +#[must_use = "streams do nothing unless polled"] +pub struct Zip<S1: Stream, S2: Stream> { + stream1: Fuse<S1>, + stream2: Fuse<S2>, + queued1: Option<S1::Item>, + queued2: Option<S2::Item>, +} + +pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Zip<S1, S2> + where S1: Stream, S2: Stream<Error = S1::Error> +{ + Zip { + stream1: stream1.fuse(), + stream2: stream2.fuse(), + queued1: None, + queued2: None, + } +} + +impl<S1, S2> Stream for Zip<S1, S2> + where S1: Stream, S2: Stream<Error = S1::Error> +{ + type Item = (S1::Item, S2::Item); + type Error = S1::Error; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + if self.queued1.is_none() { + match self.stream1.poll()? { + Async::Ready(Some(item1)) => self.queued1 = Some(item1), + Async::Ready(None) | Async::NotReady => {} + } + } + if self.queued2.is_none() { + match self.stream2.poll()? { + Async::Ready(Some(item2)) => self.queued2 = Some(item2), + Async::Ready(None) | Async::NotReady => {} + } + } + + if self.queued1.is_some() && self.queued2.is_some() { + let pair = (self.queued1.take().unwrap(), + self.queued2.take().unwrap()); + Ok(Async::Ready(Some(pair))) + } else if self.stream1.is_done() || self.stream2.is_done() { + Ok(Async::Ready(None)) + } else { + Ok(Async::NotReady) + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/sync/bilock.rs b/third_party/rust/futures-0.1.31/src/sync/bilock.rs new file mode 100644 index 0000000000..af9e1eeb2c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sync/bilock.rs @@ -0,0 +1,298 @@ +use std::any::Any; +use std::boxed::Box; +use std::cell::UnsafeCell; +use std::error::Error; +use std::fmt; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +use {Async, Future, Poll}; +use task::{self, Task}; + +/// A type of futures-powered synchronization primitive which is a mutex between +/// two possible owners. +/// +/// This primitive is not as generic as a full-blown mutex but is sufficient for +/// many use cases where there are only two possible owners of a resource. The +/// implementation of `BiLock` can be more optimized for just the two possible +/// owners. +/// +/// Note that it's possible to use this lock through a poll-style interface with +/// the `poll_lock` method but you can also use it as a future with the `lock` +/// method that consumes a `BiLock` and returns a future that will resolve when +/// it's locked. +/// +/// A `BiLock` is typically used for "split" operations where data which serves +/// two purposes wants to be split into two to be worked with separately. For +/// example a TCP stream could be both a reader and a writer or a framing layer +/// could be both a stream and a sink for messages. A `BiLock` enables splitting +/// these two and then using each independently in a futures-powered fashion. +#[derive(Debug)] +pub struct BiLock<T> { + inner: Arc<Inner<T>>, +} + +#[derive(Debug)] +struct Inner<T> { + state: AtomicUsize, + inner: Option<UnsafeCell<T>>, +} + +unsafe impl<T: Send> Send for Inner<T> {} +unsafe impl<T: Send> Sync for Inner<T> {} + +impl<T> BiLock<T> { + /// Creates a new `BiLock` protecting the provided data. + /// + /// Two handles to the lock are returned, and these are the only two handles + /// that will ever be available to the lock. These can then be sent to separate + /// tasks to be managed there. + pub fn new(t: T) -> (BiLock<T>, BiLock<T>) { + let inner = Arc::new(Inner { + state: AtomicUsize::new(0), + inner: Some(UnsafeCell::new(t)), + }); + + (BiLock { inner: inner.clone() }, BiLock { inner: inner }) + } + + /// Attempt to acquire this lock, returning `NotReady` if it can't be + /// acquired. + /// + /// This function will acquire the lock in a nonblocking fashion, returning + /// immediately if the lock is already held. If the lock is successfully + /// acquired then `Async::Ready` is returned with a value that represents + /// the locked value (and can be used to access the protected data). The + /// lock is unlocked when the returned `BiLockGuard` is dropped. + /// + /// If the lock is already held then this function will return + /// `Async::NotReady`. In this case the current task will also be scheduled + /// to receive a notification when the lock would otherwise become + /// available. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn poll_lock(&self) -> Async<BiLockGuard<T>> { + loop { + match self.inner.state.swap(1, SeqCst) { + // Woohoo, we grabbed the lock! + 0 => return Async::Ready(BiLockGuard { inner: self }), + + // Oops, someone else has locked the lock + 1 => {} + + // A task was previously blocked on this lock, likely our task, + // so we need to update that task. + n => unsafe { + drop(Box::from_raw(n as *mut Task)); + } + } + + let me = Box::new(task::current()); + let me = Box::into_raw(me) as usize; + + match self.inner.state.compare_exchange(1, me, SeqCst, SeqCst) { + // The lock is still locked, but we've now parked ourselves, so + // just report that we're scheduled to receive a notification. + Ok(_) => return Async::NotReady, + + // Oops, looks like the lock was unlocked after our swap above + // and before the compare_exchange. Deallocate what we just + // allocated and go through the loop again. + Err(0) => unsafe { + drop(Box::from_raw(me as *mut Task)); + }, + + // The top of this loop set the previous state to 1, so if we + // failed the CAS above then it's because the previous value was + // *not* zero or one. This indicates that a task was blocked, + // but we're trying to acquire the lock and there's only one + // other reference of the lock, so it should be impossible for + // that task to ever block itself. + Err(n) => panic!("invalid state: {}", n), + } + } + } + + /// Perform a "blocking lock" of this lock, consuming this lock handle and + /// returning a future to the acquired lock. + /// + /// This function consumes the `BiLock<T>` and returns a sentinel future, + /// `BiLockAcquire<T>`. The returned future will resolve to + /// `BiLockAcquired<T>` which represents a locked lock similarly to + /// `BiLockGuard<T>`. + /// + /// Note that the returned future will never resolve to an error. + pub fn lock(self) -> BiLockAcquire<T> { + BiLockAcquire { + inner: Some(self), + } + } + + /// Attempts to put the two "halves" of a `BiLock<T>` back together and + /// recover the original value. Succeeds only if the two `BiLock<T>`s + /// originated from the same call to `BiLock::new`. + pub fn reunite(self, other: Self) -> Result<T, ReuniteError<T>> { + if &*self.inner as *const _ == &*other.inner as *const _ { + drop(other); + let inner = Arc::try_unwrap(self.inner) + .ok() + .expect("futures: try_unwrap failed in BiLock<T>::reunite"); + Ok(unsafe { inner.into_inner() }) + } else { + Err(ReuniteError(self, other)) + } + } + + fn unlock(&self) { + match self.inner.state.swap(0, SeqCst) { + // we've locked the lock, shouldn't be possible for us to see an + // unlocked lock. + 0 => panic!("invalid unlocked state"), + + // Ok, no one else tried to get the lock, we're done. + 1 => {} + + // Another task has parked themselves on this lock, let's wake them + // up as its now their turn. + n => unsafe { + Box::from_raw(n as *mut Task).notify(); + } + } + } +} + +impl<T> Inner<T> { + unsafe fn into_inner(mut self) -> T { + mem::replace(&mut self.inner, None).unwrap().into_inner() + } +} + +impl<T> Drop for Inner<T> { + fn drop(&mut self) { + assert_eq!(self.state.load(SeqCst), 0); + } +} + +/// Error indicating two `BiLock<T>`s were not two halves of a whole, and +/// thus could not be `reunite`d. +pub struct ReuniteError<T>(pub BiLock<T>, pub BiLock<T>); + +impl<T> fmt::Debug for ReuniteError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_tuple("ReuniteError") + .field(&"...") + .finish() + } +} + +impl<T> fmt::Display for ReuniteError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "tried to reunite two BiLocks that don't form a pair") + } +} + +impl<T: Any> Error for ReuniteError<T> { + fn description(&self) -> &str { + "tried to reunite two BiLocks that don't form a pair" + } +} + +/// Returned RAII guard from the `poll_lock` method. +/// +/// This structure acts as a sentinel to the data in the `BiLock<T>` itself, +/// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be +/// unlocked. +#[derive(Debug)] +pub struct BiLockGuard<'a, T: 'a> { + inner: &'a BiLock<T>, +} + +impl<'a, T> Deref for BiLockGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + unsafe { &*self.inner.inner.inner.as_ref().unwrap().get() } + } +} + +impl<'a, T> DerefMut for BiLockGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.inner.inner.inner.as_ref().unwrap().get() } + } +} + +impl<'a, T> Drop for BiLockGuard<'a, T> { + fn drop(&mut self) { + self.inner.unlock(); + } +} + +/// Future returned by `BiLock::lock` which will resolve when the lock is +/// acquired. +#[derive(Debug)] +pub struct BiLockAcquire<T> { + inner: Option<BiLock<T>>, +} + +impl<T> Future for BiLockAcquire<T> { + type Item = BiLockAcquired<T>; + type Error = (); + + fn poll(&mut self) -> Poll<BiLockAcquired<T>, ()> { + match self.inner.as_ref().expect("cannot poll after Ready").poll_lock() { + Async::Ready(r) => { + mem::forget(r); + } + Async::NotReady => return Ok(Async::NotReady), + } + Ok(Async::Ready(BiLockAcquired { inner: self.inner.take() })) + } +} + +/// Resolved value of the `BiLockAcquire<T>` future. +/// +/// This value, like `BiLockGuard<T>`, is a sentinel to the value `T` through +/// implementations of `Deref` and `DerefMut`. When dropped will unlock the +/// lock, and the original unlocked `BiLock<T>` can be recovered through the +/// `unlock` method. +#[derive(Debug)] +pub struct BiLockAcquired<T> { + inner: Option<BiLock<T>>, +} + +impl<T> BiLockAcquired<T> { + /// Recovers the original `BiLock<T>`, unlocking this lock. + pub fn unlock(mut self) -> BiLock<T> { + let bi_lock = self.inner.take().unwrap(); + + bi_lock.unlock(); + + bi_lock + } +} + +impl<T> Deref for BiLockAcquired<T> { + type Target = T; + fn deref(&self) -> &T { + unsafe { &*self.inner.as_ref().unwrap().inner.inner.as_ref().unwrap().get() } + } +} + +impl<T> DerefMut for BiLockAcquired<T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.inner.as_mut().unwrap().inner.inner.as_ref().unwrap().get() } + } +} + +impl<T> Drop for BiLockAcquired<T> { + fn drop(&mut self) { + if let Some(ref bi_lock) = self.inner { + bi_lock.unlock(); + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/sync/mod.rs b/third_party/rust/futures-0.1.31/src/sync/mod.rs new file mode 100644 index 0000000000..0a46e9afbe --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sync/mod.rs @@ -0,0 +1,17 @@ +//! Future-aware synchronization +//! +//! This module, which is modeled after `std::sync`, contains user-space +//! synchronization tools that work with futures, streams and sinks. In +//! particular, these synchronizers do *not* block physical OS threads, but +//! instead work at the task level. +//! +//! More information and examples of how to use these synchronization primitives +//! can be found [online at tokio.rs]. +//! +//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/synchronization/ + +pub mod oneshot; +pub mod mpsc; +mod bilock; + +pub use self::bilock::{BiLock, BiLockGuard, BiLockAcquire, BiLockAcquired}; diff --git a/third_party/rust/futures-0.1.31/src/sync/mpsc/mod.rs b/third_party/rust/futures-0.1.31/src/sync/mpsc/mod.rs new file mode 100644 index 0000000000..31d2320ab6 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sync/mpsc/mod.rs @@ -0,0 +1,1187 @@ +//! A multi-producer, single-consumer, futures-aware, FIFO queue with back pressure. +//! +//! A channel can be used as a communication primitive between tasks running on +//! `futures-rs` executors. Channel creation provides `Receiver` and `Sender` +//! handles. `Receiver` implements `Stream` and allows a task to read values +//! out of the channel. If there is no message to read from the channel, the +//! current task will be notified when a new value is sent. `Sender` implements +//! the `Sink` trait and allows a task to send messages into the channel. If +//! the channel is at capacity, then send will be rejected and the task will be +//! notified when additional capacity is available. +//! +//! # Disconnection +//! +//! When all `Sender` handles have been dropped, it is no longer possible to +//! send values into the channel. This is considered the termination event of +//! the stream. As such, `Sender::poll` will return `Ok(Ready(None))`. +//! +//! If the receiver handle is dropped, then messages can no longer be read out +//! of the channel. In this case, a `send` will result in an error. +//! +//! # Clean Shutdown +//! +//! If the `Receiver` is simply dropped, then it is possible for there to be +//! messages still in the channel that will not be processed. As such, it is +//! usually desirable to perform a "clean" shutdown. To do this, the receiver +//! will first call `close`, which will prevent any further messages to be sent +//! into the channel. Then, the receiver consumes the channel to completion, at +//! which point the receiver can be dropped. + +// At the core, the channel uses an atomic FIFO queue for message passing. This +// queue is used as the primary coordination primitive. In order to enforce +// capacity limits and handle back pressure, a secondary FIFO queue is used to +// send parked task handles. +// +// The general idea is that the channel is created with a `buffer` size of `n`. +// The channel capacity is `n + num-senders`. Each sender gets one "guaranteed" +// slot to hold a message. This allows `Sender` to know for a fact that a send +// will succeed *before* starting to do the actual work of sending the value. +// Since most of this work is lock-free, once the work starts, it is impossible +// to safely revert. +// +// If the sender is unable to process a send operation, then the current +// task is parked and the handle is sent on the parked task queue. +// +// Note that the implementation guarantees that the channel capacity will never +// exceed the configured limit, however there is no *strict* guarantee that the +// receiver will wake up a parked task *immediately* when a slot becomes +// available. However, it will almost always unpark a task when a slot becomes +// available and it is *guaranteed* that a sender will be unparked when the +// message that caused the sender to become parked is read out of the channel. +// +// The steps for sending a message are roughly: +// +// 1) Increment the channel message count +// 2) If the channel is at capacity, push the task handle onto the wait queue +// 3) Push the message onto the message queue. +// +// The steps for receiving a message are roughly: +// +// 1) Pop a message from the message queue +// 2) Pop a task handle from the wait queue +// 3) Decrement the channel message count. +// +// It's important for the order of operations on lock-free structures to happen +// in reverse order between the sender and receiver. This makes the message +// queue the primary coordination structure and establishes the necessary +// happens-before semantics required for the acquire / release semantics used +// by the queue structure. + +use std::fmt; +use std::error::Error; +use std::any::Any; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::{Arc, Mutex}; +use std::thread; +use std::usize; + +use sync::mpsc::queue::{Queue, PopResult}; +use sync::oneshot; +use task::{self, Task}; +use future::Executor; +use sink::SendAll; +use resultstream::{self, Results}; +use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream}; + +mod queue; + +/// The transmission end of a channel which is used to send values. +/// +/// This is created by the `channel` method. +#[derive(Debug)] +pub struct Sender<T> { + // Channel state shared between the sender and receiver. + inner: Arc<Inner<T>>, + + // Handle to the task that is blocked on this sender. This handle is sent + // to the receiver half in order to be notified when the sender becomes + // unblocked. + sender_task: Arc<Mutex<SenderTask>>, + + // True if the sender might be blocked. This is an optimization to avoid + // having to lock the mutex most of the time. + maybe_parked: bool, +} + +/// The transmission end of a channel which is used to send values. +/// +/// This is created by the `unbounded` method. +#[derive(Debug)] +pub struct UnboundedSender<T>(Sender<T>); + +trait AssertKinds: Send + Sync + Clone {} +impl AssertKinds for UnboundedSender<u32> {} + + +/// The receiving end of a channel which implements the `Stream` trait. +/// +/// This is a concrete implementation of a stream which can be used to represent +/// a stream of values being computed elsewhere. This is created by the +/// `channel` method. +#[derive(Debug)] +pub struct Receiver<T> { + inner: Arc<Inner<T>>, +} + +/// The receiving end of a channel which implements the `Stream` trait. +/// +/// This is a concrete implementation of a stream which can be used to represent +/// a stream of values being computed elsewhere. This is created by the +/// `unbounded` method. +#[derive(Debug)] +pub struct UnboundedReceiver<T>(Receiver<T>); + +/// Error type for sending, used when the receiving end of a channel is +/// dropped +#[derive(Clone, PartialEq, Eq)] +pub struct SendError<T>(T); + +/// Error type returned from `try_send` +#[derive(Clone, PartialEq, Eq)] +pub struct TrySendError<T> { + kind: TrySendErrorKind<T>, +} + +#[derive(Clone, PartialEq, Eq)] +enum TrySendErrorKind<T> { + Full(T), + Disconnected(T), +} + +impl<T> fmt::Debug for SendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_tuple("SendError") + .field(&"...") + .finish() + } +} + +impl<T> fmt::Display for SendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "send failed because receiver is gone") + } +} + +impl<T: Any> Error for SendError<T> +{ + fn description(&self) -> &str { + "send failed because receiver is gone" + } +} + +impl<T> SendError<T> { + /// Returns the message that was attempted to be sent but failed. + pub fn into_inner(self) -> T { + self.0 + } +} + +impl<T> fmt::Debug for TrySendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_tuple("TrySendError") + .field(&"...") + .finish() + } +} + +impl<T> fmt::Display for TrySendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + if self.is_full() { + write!(fmt, "send failed because channel is full") + } else { + write!(fmt, "send failed because receiver is gone") + } + } +} + +impl<T: Any> Error for TrySendError<T> { + fn description(&self) -> &str { + if self.is_full() { + "send failed because channel is full" + } else { + "send failed because receiver is gone" + } + } +} + +impl<T> TrySendError<T> { + /// Returns true if this error is a result of the channel being full + pub fn is_full(&self) -> bool { + use self::TrySendErrorKind::*; + + match self.kind { + Full(_) => true, + _ => false, + } + } + + /// Returns true if this error is a result of the receiver being dropped + pub fn is_disconnected(&self) -> bool { + use self::TrySendErrorKind::*; + + match self.kind { + Disconnected(_) => true, + _ => false, + } + } + + /// Returns the message that was attempted to be sent but failed. + pub fn into_inner(self) -> T { + use self::TrySendErrorKind::*; + + match self.kind { + Full(v) | Disconnected(v) => v, + } + } +} + +#[derive(Debug)] +struct Inner<T> { + // Max buffer size of the channel. If `None` then the channel is unbounded. + buffer: Option<usize>, + + // Internal channel state. Consists of the number of messages stored in the + // channel as well as a flag signalling that the channel is closed. + state: AtomicUsize, + + // Atomic, FIFO queue used to send messages to the receiver + message_queue: Queue<Option<T>>, + + // Atomic, FIFO queue used to send parked task handles to the receiver. + parked_queue: Queue<Arc<Mutex<SenderTask>>>, + + // Number of senders in existence + num_senders: AtomicUsize, + + // Handle to the receiver's task. + recv_task: Mutex<ReceiverTask>, +} + +// Struct representation of `Inner::state`. +#[derive(Debug, Clone, Copy)] +struct State { + // `true` when the channel is open + is_open: bool, + + // Number of messages in the channel + num_messages: usize, +} + +#[derive(Debug)] +struct ReceiverTask { + unparked: bool, + task: Option<Task>, +} + +// Returned from Receiver::try_park() +enum TryPark { + Parked, + Closed, + NotEmpty, +} + +// The `is_open` flag is stored in the left-most bit of `Inner::state` +const OPEN_MASK: usize = usize::MAX - (usize::MAX >> 1); + +// When a new channel is created, it is created in the open state with no +// pending messages. +const INIT_STATE: usize = OPEN_MASK; + +// The maximum number of messages that a channel can track is `usize::MAX >> 1` +const MAX_CAPACITY: usize = !(OPEN_MASK); + +// The maximum requested buffer size must be less than the maximum capacity of +// a channel. This is because each sender gets a guaranteed slot. +const MAX_BUFFER: usize = MAX_CAPACITY >> 1; + +// Sent to the consumer to wake up blocked producers +#[derive(Debug)] +struct SenderTask { + task: Option<Task>, + is_parked: bool, +} + +impl SenderTask { + fn new() -> Self { + SenderTask { + task: None, + is_parked: false, + } + } + + fn notify(&mut self) { + self.is_parked = false; + + if let Some(task) = self.task.take() { + task.notify(); + } + } +} + +/// Creates an in-memory channel implementation of the `Stream` trait with +/// bounded capacity. +/// +/// This method creates a concrete implementation of the `Stream` trait which +/// can be used to send values across threads in a streaming fashion. This +/// channel is unique in that it implements back pressure to ensure that the +/// sender never outpaces the receiver. The channel capacity is equal to +/// `buffer + num-senders`. In other words, each sender gets a guaranteed slot +/// in the channel capacity, and on top of that there are `buffer` "first come, +/// first serve" slots available to all senders. +/// +/// The `Receiver` returned implements the `Stream` trait and has access to any +/// number of the associated combinators for transforming the result. +pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) { + // Check that the requested buffer size does not exceed the maximum buffer + // size permitted by the system. + assert!(buffer < MAX_BUFFER, "requested buffer size too large"); + channel2(Some(buffer)) +} + +/// Creates an in-memory channel implementation of the `Stream` trait with +/// unbounded capacity. +/// +/// This method creates a concrete implementation of the `Stream` trait which +/// can be used to send values across threads in a streaming fashion. A `send` +/// on this channel will always succeed as long as the receive half has not +/// been closed. If the receiver falls behind, messages will be buffered +/// internally. +/// +/// **Note** that the amount of available system memory is an implicit bound to +/// the channel. Using an `unbounded` channel has the ability of causing the +/// process to run out of memory. In this case, the process will be aborted. +pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) { + let (tx, rx) = channel2(None); + (UnboundedSender(tx), UnboundedReceiver(rx)) +} + +fn channel2<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) { + let inner = Arc::new(Inner { + buffer: buffer, + state: AtomicUsize::new(INIT_STATE), + message_queue: Queue::new(), + parked_queue: Queue::new(), + num_senders: AtomicUsize::new(1), + recv_task: Mutex::new(ReceiverTask { + unparked: false, + task: None, + }), + }); + + let tx = Sender { + inner: inner.clone(), + sender_task: Arc::new(Mutex::new(SenderTask::new())), + maybe_parked: false, + }; + + let rx = Receiver { + inner: inner, + }; + + (tx, rx) +} + +/* + * + * ===== impl Sender ===== + * + */ + +impl<T> Sender<T> { + /// Attempts to send a message on this `Sender<T>` without blocking. + /// + /// This function, unlike `start_send`, is safe to call whether it's being + /// called on a task or not. Note that this function, however, will *not* + /// attempt to block the current task if the message cannot be sent. + /// + /// It is not recommended to call this function from inside of a future, + /// only from an external thread where you've otherwise arranged to be + /// notified when the channel is no longer full. + pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> { + // If the sender is currently blocked, reject the message + if !self.poll_unparked(false).is_ready() { + return Err(TrySendError { + kind: TrySendErrorKind::Full(msg), + }); + } + + // The channel has capacity to accept the message, so send it + self.do_send(Some(msg), false) + .map_err(|SendError(v)| { + TrySendError { + kind: TrySendErrorKind::Disconnected(v), + } + }) + } + + // Do the send without failing + // None means close + fn do_send(&mut self, msg: Option<T>, do_park: bool) -> Result<(), SendError<T>> { + // First, increment the number of messages contained by the channel. + // This operation will also atomically determine if the sender task + // should be parked. + // + // None is returned in the case that the channel has been closed by the + // receiver. This happens when `Receiver::close` is called or the + // receiver is dropped. + let park_self = match self.inc_num_messages(msg.is_none()) { + Some(park_self) => park_self, + None => { + // The receiver has closed the channel. Only abort if actually + // sending a message. It is important that the stream + // termination (None) is always sent. This technically means + // that it is possible for the queue to contain the following + // number of messages: + // + // num-senders + buffer + 1 + // + if let Some(msg) = msg { + return Err(SendError(msg)); + } else { + return Ok(()); + } + } + }; + + // If the channel has reached capacity, then the sender task needs to + // be parked. This will send the task handle on the parked task queue. + // + // However, when `do_send` is called while dropping the `Sender`, + // `task::current()` can't be called safely. In this case, in order to + // maintain internal consistency, a blank message is pushed onto the + // parked task queue. + if park_self { + self.park(do_park); + } + + self.queue_push_and_signal(msg); + + Ok(()) + } + + // Do the send without parking current task. + // + // To be called from unbounded sender. + fn do_send_nb(&self, msg: T) -> Result<(), SendError<T>> { + match self.inc_num_messages(false) { + Some(park_self) => assert!(!park_self), + None => return Err(SendError(msg)), + }; + + self.queue_push_and_signal(Some(msg)); + + Ok(()) + } + + // Push message to the queue and signal to the receiver + fn queue_push_and_signal(&self, msg: Option<T>) { + // Push the message onto the message queue + self.inner.message_queue.push(msg); + + // Signal to the receiver that a message has been enqueued. If the + // receiver is parked, this will unpark the task. + self.signal(); + } + + // Increment the number of queued messages. Returns if the sender should + // block. + fn inc_num_messages(&self, close: bool) -> Option<bool> { + let mut curr = self.inner.state.load(SeqCst); + + loop { + let mut state = decode_state(curr); + + // The receiver end closed the channel. + if !state.is_open { + return None; + } + + // This probably is never hit? Odds are the process will run out of + // memory first. It may be worth to return something else in this + // case? + assert!(state.num_messages < MAX_CAPACITY, "buffer space exhausted; \ + sending this messages would overflow the state"); + + state.num_messages += 1; + + // The channel is closed by all sender handles being dropped. + if close { + state.is_open = false; + } + + let next = encode_state(&state); + match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { + Ok(_) => { + // Block if the current number of pending messages has exceeded + // the configured buffer size + let park_self = match self.inner.buffer { + Some(buffer) => state.num_messages > buffer, + None => false, + }; + + return Some(park_self) + } + Err(actual) => curr = actual, + } + } + } + + // Signal to the receiver task that a message has been enqueued + fn signal(&self) { + // TODO + // This logic can probably be improved by guarding the lock with an + // atomic. + // + // Do this step first so that the lock is dropped when + // `unpark` is called + let task = { + let mut recv_task = self.inner.recv_task.lock().unwrap(); + + // If the receiver has already been unparked, then there is nothing + // more to do + if recv_task.unparked { + return; + } + + // Setting this flag enables the receiving end to detect that + // an unpark event happened in order to avoid unnecessarily + // parking. + recv_task.unparked = true; + recv_task.task.take() + }; + + if let Some(task) = task { + task.notify(); + } + } + + fn park(&mut self, can_park: bool) { + // TODO: clean up internal state if the task::current will fail + + let task = if can_park { + Some(task::current()) + } else { + None + }; + + { + let mut sender = self.sender_task.lock().unwrap(); + sender.task = task; + sender.is_parked = true; + } + + // Send handle over queue + let t = self.sender_task.clone(); + self.inner.parked_queue.push(t); + + // Check to make sure we weren't closed after we sent our task on the + // queue + let state = decode_state(self.inner.state.load(SeqCst)); + self.maybe_parked = state.is_open; + } + + /// Polls the channel to determine if there is guaranteed to be capacity to send at least one + /// item without waiting. + /// + /// Returns `Ok(Async::Ready(_))` if there is sufficient capacity, or returns + /// `Ok(Async::NotReady)` if the channel is not guaranteed to have capacity. Returns + /// `Err(SendError(_))` if the receiver has been dropped. + /// + /// # Panics + /// + /// This method will panic if called from outside the context of a task or future. + pub fn poll_ready(&mut self) -> Poll<(), SendError<()>> { + let state = decode_state(self.inner.state.load(SeqCst)); + if !state.is_open { + return Err(SendError(())); + } + + Ok(self.poll_unparked(true)) + } + + /// Returns whether this channel is closed without needing a context. + pub fn is_closed(&self) -> bool { + !decode_state(self.inner.state.load(SeqCst)).is_open + } + + fn poll_unparked(&mut self, do_park: bool) -> Async<()> { + // First check the `maybe_parked` variable. This avoids acquiring the + // lock in most cases + if self.maybe_parked { + // Get a lock on the task handle + let mut task = self.sender_task.lock().unwrap(); + + if !task.is_parked { + self.maybe_parked = false; + return Async::Ready(()) + } + + // At this point, an unpark request is pending, so there will be an + // unpark sometime in the future. We just need to make sure that + // the correct task will be notified. + // + // Update the task in case the `Sender` has been moved to another + // task + task.task = if do_park { + Some(task::current()) + } else { + None + }; + + Async::NotReady + } else { + Async::Ready(()) + } + } +} + +impl<T> Sink for Sender<T> { + type SinkItem = T; + type SinkError = SendError<T>; + + fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { + // If the sender is currently blocked, reject the message before doing + // any work. + if !self.poll_unparked(true).is_ready() { + return Ok(AsyncSink::NotReady(msg)); + } + + // The channel has capacity to accept the message, so send it. + self.do_send(Some(msg), true)?; + + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), SendError<T>> { + self.poll_ready() + // At this point, the value cannot be returned and `SendError` + // cannot be created with a `T` without breaking backwards + // comptibility. This means we cannot return an error. + // + // That said, there is also no guarantee that a `poll_complete` + // returning `Ok` implies the receiver sees the message. + .or_else(|_| Ok(().into())) + } + + fn close(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } +} + +impl<T> UnboundedSender<T> { + /// Returns whether this channel is closed without needing a context. + pub fn is_closed(&self) -> bool { + self.0.is_closed() + } + + /// Sends the provided message along this channel. + /// + /// This is an unbounded sender, so this function differs from `Sink::send` + /// by ensuring the return type reflects that the channel is always ready to + /// receive messages. + #[deprecated(note = "renamed to `unbounded_send`")] + #[doc(hidden)] + pub fn send(&self, msg: T) -> Result<(), SendError<T>> { + self.unbounded_send(msg) + } + + /// Sends the provided message along this channel. + /// + /// This is an unbounded sender, so this function differs from `Sink::send` + /// by ensuring the return type reflects that the channel is always ready to + /// receive messages. + pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> { + self.0.do_send_nb(msg) + } +} + +impl<T> Sink for UnboundedSender<T> { + type SinkItem = T; + type SinkError = SendError<T>; + + fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { + self.0.start_send(msg) + } + + fn poll_complete(&mut self) -> Poll<(), SendError<T>> { + self.0.poll_complete() + } + + fn close(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } +} + +impl<'a, T> Sink for &'a UnboundedSender<T> { + type SinkItem = T; + type SinkError = SendError<T>; + + fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { + self.0.do_send_nb(msg)?; + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } + + fn close(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } +} + +impl<T> Clone for UnboundedSender<T> { + fn clone(&self) -> UnboundedSender<T> { + UnboundedSender(self.0.clone()) + } +} + + +impl<T> Clone for Sender<T> { + fn clone(&self) -> Sender<T> { + // Since this atomic op isn't actually guarding any memory and we don't + // care about any orderings besides the ordering on the single atomic + // variable, a relaxed ordering is acceptable. + let mut curr = self.inner.num_senders.load(SeqCst); + + loop { + // If the maximum number of senders has been reached, then fail + if curr == self.inner.max_senders() { + panic!("cannot clone `Sender` -- too many outstanding senders"); + } + + debug_assert!(curr < self.inner.max_senders()); + + let next = curr + 1; + let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst); + + // The ABA problem doesn't matter here. We only care that the + // number of senders never exceeds the maximum. + if actual == curr { + return Sender { + inner: self.inner.clone(), + sender_task: Arc::new(Mutex::new(SenderTask::new())), + maybe_parked: false, + }; + } + + curr = actual; + } + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + // Ordering between variables don't matter here + let prev = self.inner.num_senders.fetch_sub(1, SeqCst); + + if prev == 1 { + let _ = self.do_send(None, false); + } + } +} + +/* + * + * ===== impl Receiver ===== + * + */ + +impl<T> Receiver<T> { + /// Closes the receiving half + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. + pub fn close(&mut self) { + let mut curr = self.inner.state.load(SeqCst); + + loop { + let mut state = decode_state(curr); + + if !state.is_open { + break + } + + state.is_open = false; + + let next = encode_state(&state); + match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { + Ok(_) => break, + Err(actual) => curr = actual, + } + } + + // Wake up any threads waiting as they'll see that we've closed the + // channel and will continue on their merry way. + loop { + match unsafe { self.inner.parked_queue.pop() } { + PopResult::Data(task) => { + task.lock().unwrap().notify(); + } + PopResult::Empty => break, + PopResult::Inconsistent => thread::yield_now(), + } + } + } + + fn next_message(&mut self) -> Async<Option<T>> { + // Pop off a message + loop { + match unsafe { self.inner.message_queue.pop() } { + PopResult::Data(msg) => { + // If there are any parked task handles in the parked queue, + // pop one and unpark it. + self.unpark_one(); + // Decrement number of messages + self.dec_num_messages(); + + return Async::Ready(msg); + } + PopResult::Empty => { + // The queue is empty, return NotReady + return Async::NotReady; + } + PopResult::Inconsistent => { + // Inconsistent means that there will be a message to pop + // in a short time. This branch can only be reached if + // values are being produced from another thread, so there + // are a few ways that we can deal with this: + // + // 1) Spin + // 2) thread::yield_now() + // 3) task::current().unwrap() & return NotReady + // + // For now, thread::yield_now() is used, but it would + // probably be better to spin a few times then yield. + thread::yield_now(); + } + } + } + } + + // Unpark a single task handle if there is one pending in the parked queue + fn unpark_one(&mut self) { + loop { + match unsafe { self.inner.parked_queue.pop() } { + PopResult::Data(task) => { + task.lock().unwrap().notify(); + return; + } + PopResult::Empty => { + // Queue empty, no task to wake up. + return; + } + PopResult::Inconsistent => { + // Same as above + thread::yield_now(); + } + } + } + } + + // Try to park the receiver task + fn try_park(&self) -> TryPark { + let curr = self.inner.state.load(SeqCst); + let state = decode_state(curr); + + // If the channel is closed, then there is no need to park. + if state.is_closed() { + return TryPark::Closed; + } + + // First, track the task in the `recv_task` slot + let mut recv_task = self.inner.recv_task.lock().unwrap(); + + if recv_task.unparked { + // Consume the `unpark` signal without actually parking + recv_task.unparked = false; + return TryPark::NotEmpty; + } + + recv_task.task = Some(task::current()); + TryPark::Parked + } + + fn dec_num_messages(&self) { + let mut curr = self.inner.state.load(SeqCst); + + loop { + let mut state = decode_state(curr); + + state.num_messages -= 1; + + let next = encode_state(&state); + match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { + Ok(_) => break, + Err(actual) => curr = actual, + } + } + } +} + +impl<T> Stream for Receiver<T> { + type Item = T; + type Error = (); + + fn poll(&mut self) -> Poll<Option<T>, ()> { + loop { + // Try to read a message off of the message queue. + match self.next_message() { + Async::Ready(msg) => return Ok(Async::Ready(msg)), + Async::NotReady => { + // There are no messages to read, in this case, attempt to + // park. The act of parking will verify that the channel is + // still empty after the park operation has completed. + match self.try_park() { + TryPark::Parked => { + // The task was parked, and the channel is still + // empty, return NotReady. + return Ok(Async::NotReady); + } + TryPark::Closed => { + // The channel is closed, there will be no further + // messages. + return Ok(Async::Ready(None)); + } + TryPark::NotEmpty => { + // A message has been sent while attempting to + // park. Loop again, the next iteration is + // guaranteed to get the message. + continue; + } + } + } + } + } + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + // Drain the channel of all pending messages + self.close(); + + loop { + match self.next_message() { + Async::Ready(_) => {} + Async::NotReady => { + let curr = self.inner.state.load(SeqCst); + let state = decode_state(curr); + + // If the channel is closed, then there is no need to park. + if state.is_closed() { + return; + } + + // TODO: Spinning isn't ideal, it might be worth + // investigating using a condvar or some other strategy + // here. That said, if this case is hit, then another thread + // is about to push the value into the queue and this isn't + // the only spinlock in the impl right now. + thread::yield_now(); + } + } + } + } +} + +impl<T> UnboundedReceiver<T> { + /// Closes the receiving half + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. + pub fn close(&mut self) { + self.0.close(); + } +} + +impl<T> Stream for UnboundedReceiver<T> { + type Item = T; + type Error = (); + + fn poll(&mut self) -> Poll<Option<T>, ()> { + self.0.poll() + } +} + +/// Handle returned from the `spawn` function. +/// +/// This handle is a stream that proxies a stream on a separate `Executor`. +/// Created through the `mpsc::spawn` function, this handle will produce +/// the same values as the proxied stream, as they are produced in the executor, +/// and uses a limited buffer to exert back-pressure on the remote stream. +/// +/// If this handle is dropped, then the stream will no longer be polled and is +/// scheduled to be dropped. +pub struct SpawnHandle<Item, Error> { + rx: Receiver<Result<Item, Error>>, + _cancel_tx: oneshot::Sender<()>, +} + +/// Type of future which `Executor` instances must be able to execute for `spawn`. +pub struct Execute<S: Stream> { + inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>, + cancel_rx: oneshot::Receiver<()>, +} + +/// Spawns a `stream` onto the instance of `Executor` provided, `executor`, +/// returning a handle representing the remote stream. +/// +/// The `stream` will be canceled if the `SpawnHandle` is dropped. +/// +/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. +/// When `stream` has additional items available, then the `SpawnHandle` +/// will have those same items available. +/// +/// At most `buffer + 1` elements will be buffered at a time. If the buffer +/// is full, then `stream` will stop progressing until more space is available. +/// This allows the `SpawnHandle` to exert backpressure on the `stream`. +/// +/// # Panics +/// +/// This function will panic if `executor` is unable spawn a `Future` containing +/// the entirety of the `stream`. +pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error> + where S: Stream, + E: Executor<Execute<S>> +{ + let (cancel_tx, cancel_rx) = oneshot::channel(); + let (tx, rx) = channel(buffer); + executor.execute(Execute { + inner: tx.send_all(resultstream::new(stream)), + cancel_rx: cancel_rx, + }).expect("failed to spawn stream"); + SpawnHandle { + rx: rx, + _cancel_tx: cancel_tx, + } +} + +/// Spawns a `stream` onto the instance of `Executor` provided, `executor`, +/// returning a handle representing the remote stream, with unbounded buffering. +/// +/// The `stream` will be canceled if the `SpawnHandle` is dropped. +/// +/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. +/// When `stream` has additional items available, then the `SpawnHandle` +/// will have those same items available. +/// +/// An unbounded buffer is used, which means that values will be buffered as +/// fast as `stream` can produce them, without any backpressure. Therefore, if +/// `stream` is an infinite stream, it can use an unbounded amount of memory, and +/// potentially hog CPU resources. +/// +/// # Panics +/// +/// This function will panic if `executor` is unable spawn a `Future` containing +/// the entirety of the `stream`. +pub fn spawn_unbounded<S, E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error> + where S: Stream, + E: Executor<Execute<S>> +{ + let (cancel_tx, cancel_rx) = oneshot::channel(); + let (tx, rx) = channel2(None); + executor.execute(Execute { + inner: tx.send_all(resultstream::new(stream)), + cancel_rx: cancel_rx, + }).expect("failed to spawn stream"); + SpawnHandle { + rx: rx, + _cancel_tx: cancel_tx, + } +} + +impl<I, E> Stream for SpawnHandle<I, E> { + type Item = I; + type Error = E; + + fn poll(&mut self) -> Poll<Option<I>, E> { + match self.rx.poll() { + Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))), + Ok(Async::Ready(Some(Err(e)))) => Err(e), + Ok(Async::Ready(None)) => Ok(Async::Ready(None)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(_) => unreachable!("mpsc::Receiver should never return Err"), + } + } +} + +impl<I, E> fmt::Debug for SpawnHandle<I, E> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SpawnHandle") + .finish() + } +} + +impl<S: Stream> Future for Execute<S> { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + match self.cancel_rx.poll() { + Ok(Async::NotReady) => (), + _ => return Ok(Async::Ready(())), + } + match self.inner.poll() { + Ok(Async::NotReady) => Ok(Async::NotReady), + _ => Ok(Async::Ready(())) + } + } +} + +impl<S: Stream> fmt::Debug for Execute<S> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Execute") + .finish() + } +} + +/* + * + * ===== impl Inner ===== + * + */ + +impl<T> Inner<T> { + // The return value is such that the total number of messages that can be + // enqueued into the channel will never exceed MAX_CAPACITY + fn max_senders(&self) -> usize { + match self.buffer { + Some(buffer) => MAX_CAPACITY - buffer, + None => MAX_BUFFER, + } + } +} + +unsafe impl<T: Send> Send for Inner<T> {} +unsafe impl<T: Send> Sync for Inner<T> {} + +impl State { + fn is_closed(&self) -> bool { + !self.is_open && self.num_messages == 0 + } +} + +/* + * + * ===== Helpers ===== + * + */ + +fn decode_state(num: usize) -> State { + State { + is_open: num & OPEN_MASK == OPEN_MASK, + num_messages: num & MAX_CAPACITY, + } +} + +fn encode_state(state: &State) -> usize { + let mut num = state.num_messages; + + if state.is_open { + num |= OPEN_MASK; + } + + num +} diff --git a/third_party/rust/futures-0.1.31/src/sync/mpsc/queue.rs b/third_party/rust/futures-0.1.31/src/sync/mpsc/queue.rs new file mode 100644 index 0000000000..9ff6bcf873 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sync/mpsc/queue.rs @@ -0,0 +1,151 @@ +/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are + * those of the authors and should not be interpreted as representing official + * policies, either expressed or implied, of Dmitry Vyukov. + */ + +//! A mostly lock-free multi-producer, single consumer queue. +//! +//! This module contains an implementation of a concurrent MPSC queue. This +//! queue can be used to share data between threads, and is also used as the +//! building block of channels in rust. +//! +//! Note that the current implementation of this queue has a caveat of the `pop` +//! method, and see the method for more information about it. Due to this +//! caveat, this queue may not be appropriate for all use-cases. + +// http://www.1024cores.net/home/lock-free-algorithms +// /queues/non-intrusive-mpsc-node-based-queue + +// NOTE: this implementation is lifted from the standard library and only +// slightly modified + +pub use self::PopResult::*; +use std::prelude::v1::*; + +use std::cell::UnsafeCell; +use std::ptr; +use std::sync::atomic::{AtomicPtr, Ordering}; + +/// A result of the `pop` function. +pub enum PopResult<T> { + /// Some data has been popped + Data(T), + /// The queue is empty + Empty, + /// The queue is in an inconsistent state. Popping data should succeed, but + /// some pushers have yet to make enough progress in order allow a pop to + /// succeed. It is recommended that a pop() occur "in the near future" in + /// order to see if the sender has made progress or not + Inconsistent, +} + +#[derive(Debug)] +struct Node<T> { + next: AtomicPtr<Node<T>>, + value: Option<T>, +} + +/// The multi-producer single-consumer structure. This is not cloneable, but it +/// may be safely shared so long as it is guaranteed that there is only one +/// popper at a time (many pushers are allowed). +#[derive(Debug)] +pub struct Queue<T> { + head: AtomicPtr<Node<T>>, + tail: UnsafeCell<*mut Node<T>>, +} + +unsafe impl<T: Send> Send for Queue<T> { } +unsafe impl<T: Send> Sync for Queue<T> { } + +impl<T> Node<T> { + unsafe fn new(v: Option<T>) -> *mut Node<T> { + Box::into_raw(Box::new(Node { + next: AtomicPtr::new(ptr::null_mut()), + value: v, + })) + } +} + +impl<T> Queue<T> { + /// Creates a new queue that is safe to share among multiple producers and + /// one consumer. + pub fn new() -> Queue<T> { + let stub = unsafe { Node::new(None) }; + Queue { + head: AtomicPtr::new(stub), + tail: UnsafeCell::new(stub), + } + } + + /// Pushes a new value onto this queue. + pub fn push(&self, t: T) { + unsafe { + let n = Node::new(Some(t)); + let prev = self.head.swap(n, Ordering::AcqRel); + (*prev).next.store(n, Ordering::Release); + } + } + + /// Pops some data from this queue. + /// + /// Note that the current implementation means that this function cannot + /// return `Option<T>`. It is possible for this queue to be in an + /// inconsistent state where many pushes have succeeded and completely + /// finished, but pops cannot return `Some(t)`. This inconsistent state + /// happens when a pusher is preempted at an inopportune moment. + /// + /// This inconsistent state means that this queue does indeed have data, but + /// it does not currently have access to it at this time. + /// + /// This function is unsafe because only one thread can call it at a time. + pub unsafe fn pop(&self) -> PopResult<T> { + let tail = *self.tail.get(); + let next = (*tail).next.load(Ordering::Acquire); + + if !next.is_null() { + *self.tail.get() = next; + assert!((*tail).value.is_none()); + assert!((*next).value.is_some()); + let ret = (*next).value.take().unwrap(); + drop(Box::from_raw(tail)); + return Data(ret); + } + + if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent} + } +} + +impl<T> Drop for Queue<T> { + fn drop(&mut self) { + unsafe { + let mut cur = *self.tail.get(); + while !cur.is_null() { + let next = (*cur).next.load(Ordering::Relaxed); + drop(Box::from_raw(cur)); + cur = next; + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/sync/oneshot.rs b/third_party/rust/futures-0.1.31/src/sync/oneshot.rs new file mode 100644 index 0000000000..3a9d8efdca --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/sync/oneshot.rs @@ -0,0 +1,611 @@ +//! A one-shot, futures-aware channel + +use std::sync::Arc; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::SeqCst; +use std::error::Error; +use std::fmt; + +use {Future, Poll, Async}; +use future::{lazy, Lazy, Executor, IntoFuture}; +use lock::Lock; +use task::{self, Task}; + +/// A future representing the completion of a computation happening elsewhere in +/// memory. +/// +/// This is created by the `oneshot::channel` function. +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct Receiver<T> { + inner: Arc<Inner<T>>, +} + +/// Represents the completion half of a oneshot through which the result of a +/// computation is signaled. +/// +/// This is created by the `oneshot::channel` function. +#[derive(Debug)] +pub struct Sender<T> { + inner: Arc<Inner<T>>, +} + +/// Internal state of the `Receiver`/`Sender` pair above. This is all used as +/// the internal synchronization between the two for send/recv operations. +#[derive(Debug)] +struct Inner<T> { + /// Indicates whether this oneshot is complete yet. This is filled in both + /// by `Sender::drop` and by `Receiver::drop`, and both sides interpret it + /// appropriately. + /// + /// For `Receiver`, if this is `true`, then it's guaranteed that `data` is + /// unlocked and ready to be inspected. + /// + /// For `Sender` if this is `true` then the oneshot has gone away and it + /// can return ready from `poll_cancel`. + complete: AtomicBool, + + /// The actual data being transferred as part of this `Receiver`. This is + /// filled in by `Sender::complete` and read by `Receiver::poll`. + /// + /// Note that this is protected by `Lock`, but it is in theory safe to + /// replace with an `UnsafeCell` as it's actually protected by `complete` + /// above. I wouldn't recommend doing this, however, unless someone is + /// supremely confident in the various atomic orderings here and there. + data: Lock<Option<T>>, + + /// Field to store the task which is blocked in `Receiver::poll`. + /// + /// This is filled in when a oneshot is polled but not ready yet. Note that + /// the `Lock` here, unlike in `data` above, is important to resolve races. + /// Both the `Receiver` and the `Sender` halves understand that if they + /// can't acquire the lock then some important interference is happening. + rx_task: Lock<Option<Task>>, + + /// Like `rx_task` above, except for the task blocked in + /// `Sender::poll_cancel`. Additionally, `Lock` cannot be `UnsafeCell`. + tx_task: Lock<Option<Task>>, +} + +/// Creates a new futures-aware, one-shot channel. +/// +/// This function is similar to Rust's channels found in the standard library. +/// Two halves are returned, the first of which is a `Sender` handle, used to +/// signal the end of a computation and provide its value. The second half is a +/// `Receiver` which implements the `Future` trait, resolving to the value that +/// was given to the `Sender` handle. +/// +/// Each half can be separately owned and sent across threads/tasks. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use futures::sync::oneshot; +/// use futures::*; +/// +/// let (p, c) = oneshot::channel::<i32>(); +/// +/// thread::spawn(|| { +/// c.map(|i| { +/// println!("got: {}", i); +/// }).wait(); +/// }); +/// +/// p.send(3).unwrap(); +/// ``` +pub fn channel<T>() -> (Sender<T>, Receiver<T>) { + let inner = Arc::new(Inner::new()); + let receiver = Receiver { + inner: inner.clone(), + }; + let sender = Sender { + inner: inner, + }; + (sender, receiver) +} + +impl<T> Inner<T> { + fn new() -> Inner<T> { + Inner { + complete: AtomicBool::new(false), + data: Lock::new(None), + rx_task: Lock::new(None), + tx_task: Lock::new(None), + } + } + + fn send(&self, t: T) -> Result<(), T> { + if self.complete.load(SeqCst) { + return Err(t) + } + + // Note that this lock acquisition may fail if the receiver + // is closed and sets the `complete` flag to true, whereupon + // the receiver may call `poll()`. + if let Some(mut slot) = self.data.try_lock() { + assert!(slot.is_none()); + *slot = Some(t); + drop(slot); + + // If the receiver called `close()` between the check at the + // start of the function, and the lock being released, then + // the receiver may not be around to receive it, so try to + // pull it back out. + if self.complete.load(SeqCst) { + // If lock acquisition fails, then receiver is actually + // receiving it, so we're good. + if let Some(mut slot) = self.data.try_lock() { + if let Some(t) = slot.take() { + return Err(t); + } + } + } + Ok(()) + } else { + // Must have been closed + Err(t) + } + } + + fn poll_cancel(&self) -> Poll<(), ()> { + // Fast path up first, just read the flag and see if our other half is + // gone. This flag is set both in our destructor and the oneshot + // destructor, but our destructor hasn't run yet so if it's set then the + // oneshot is gone. + if self.complete.load(SeqCst) { + return Ok(Async::Ready(())) + } + + // If our other half is not gone then we need to park our current task + // and move it into the `notify_cancel` slot to get notified when it's + // actually gone. + // + // If `try_lock` fails, then the `Receiver` is in the process of using + // it, so we can deduce that it's now in the process of going away and + // hence we're canceled. If it succeeds then we just store our handle. + // + // Crucially we then check `oneshot_gone` *again* before we return. + // While we were storing our handle inside `notify_cancel` the `Receiver` + // may have been dropped. The first thing it does is set the flag, and + // if it fails to acquire the lock it assumes that we'll see the flag + // later on. So... we then try to see the flag later on! + let handle = task::current(); + match self.tx_task.try_lock() { + Some(mut p) => *p = Some(handle), + None => return Ok(Async::Ready(())), + } + if self.complete.load(SeqCst) { + Ok(Async::Ready(())) + } else { + Ok(Async::NotReady) + } + } + + fn is_canceled(&self) -> bool { + self.complete.load(SeqCst) + } + + fn drop_tx(&self) { + // Flag that we're a completed `Sender` and try to wake up a receiver. + // Whether or not we actually stored any data will get picked up and + // translated to either an item or cancellation. + // + // Note that if we fail to acquire the `rx_task` lock then that means + // we're in one of two situations: + // + // 1. The receiver is trying to block in `poll` + // 2. The receiver is being dropped + // + // In the first case it'll check the `complete` flag after it's done + // blocking to see if it succeeded. In the latter case we don't need to + // wake up anyone anyway. So in both cases it's ok to ignore the `None` + // case of `try_lock` and bail out. + // + // The first case crucially depends on `Lock` using `SeqCst` ordering + // under the hood. If it instead used `Release` / `Acquire` ordering, + // then it would not necessarily synchronize with `inner.complete` + // and deadlock might be possible, as was observed in + // https://github.com/rust-lang-nursery/futures-rs/pull/219. + self.complete.store(true, SeqCst); + if let Some(mut slot) = self.rx_task.try_lock() { + if let Some(task) = slot.take() { + drop(slot); + task.notify(); + } + } + } + + fn close_rx(&self) { + // Flag our completion and then attempt to wake up the sender if it's + // blocked. See comments in `drop` below for more info + self.complete.store(true, SeqCst); + if let Some(mut handle) = self.tx_task.try_lock() { + if let Some(task) = handle.take() { + drop(handle); + task.notify() + } + } + } + + fn try_recv(&self) -> Result<Option<T>, Canceled> { + // If we're complete, either `::close_rx` or `::drop_tx` was called. + // We can assume a successful send if data is present. + if self.complete.load(SeqCst) { + if let Some(mut slot) = self.data.try_lock() { + if let Some(data) = slot.take() { + return Ok(Some(data.into())); + } + } + // Should there be a different error value or a panic in the case + // where `self.data.try_lock() == None`? + Err(Canceled) + } else { + Ok(None) + } + } + + fn recv(&self) -> Poll<T, Canceled> { + let mut done = false; + + // Check to see if some data has arrived. If it hasn't then we need to + // block our task. + // + // Note that the acquisition of the `rx_task` lock might fail below, but + // the only situation where this can happen is during `Sender::drop` + // when we are indeed completed already. If that's happening then we + // know we're completed so keep going. + if self.complete.load(SeqCst) { + done = true; + } else { + let task = task::current(); + match self.rx_task.try_lock() { + Some(mut slot) => *slot = Some(task), + None => done = true, + } + } + + // If we're `done` via one of the paths above, then look at the data and + // figure out what the answer is. If, however, we stored `rx_task` + // successfully above we need to check again if we're completed in case + // a message was sent while `rx_task` was locked and couldn't notify us + // otherwise. + // + // If we're not done, and we're not complete, though, then we've + // successfully blocked our task and we return `NotReady`. + if done || self.complete.load(SeqCst) { + // If taking the lock fails, the sender will realise that the we're + // `done` when it checks the `complete` flag on the way out, and will + // treat the send as a failure. + if let Some(mut slot) = self.data.try_lock() { + if let Some(data) = slot.take() { + return Ok(data.into()); + } + } + Err(Canceled) + } else { + Ok(Async::NotReady) + } + } + + fn drop_rx(&self) { + // Indicate to the `Sender` that we're done, so any future calls to + // `poll_cancel` are weeded out. + self.complete.store(true, SeqCst); + + // If we've blocked a task then there's no need for it to stick around, + // so we need to drop it. If this lock acquisition fails, though, then + // it's just because our `Sender` is trying to take the task, so we + // let them take care of that. + if let Some(mut slot) = self.rx_task.try_lock() { + let task = slot.take(); + drop(slot); + drop(task); + } + + // Finally, if our `Sender` wants to get notified of us going away, it + // would have stored something in `tx_task`. Here we try to peel that + // out and unpark it. + // + // Note that the `try_lock` here may fail, but only if the `Sender` is + // in the process of filling in the task. If that happens then we + // already flagged `complete` and they'll pick that up above. + if let Some(mut handle) = self.tx_task.try_lock() { + if let Some(task) = handle.take() { + drop(handle); + task.notify() + } + } + } +} + +impl<T> Sender<T> { + #[deprecated(note = "renamed to `send`", since = "0.1.11")] + #[doc(hidden)] + #[cfg(feature = "with-deprecated")] + pub fn complete(self, t: T) { + drop(self.send(t)); + } + + /// Completes this oneshot with a successful result. + /// + /// This function will consume `self` and indicate to the other end, the + /// `Receiver`, that the value provided is the result of the computation this + /// represents. + /// + /// If the value is successfully enqueued for the remote end to receive, + /// then `Ok(())` is returned. If the receiving end was deallocated before + /// this function was called, however, then `Err` is returned with the value + /// provided. + pub fn send(self, t: T) -> Result<(), T> { + self.inner.send(t) + } + + /// Polls this `Sender` half to detect whether the `Receiver` this has + /// paired with has gone away. + /// + /// This function can be used to learn about when the `Receiver` (consumer) + /// half has gone away and nothing will be able to receive a message sent + /// from `send`. + /// + /// If `Ready` is returned then it means that the `Receiver` has disappeared + /// and the result this `Sender` would otherwise produce should no longer + /// be produced. + /// + /// If `NotReady` is returned then the `Receiver` is still alive and may be + /// able to receive a message if sent. The current task, however, is + /// scheduled to receive a notification if the corresponding `Receiver` goes + /// away. + /// + /// # Panics + /// + /// Like `Future::poll`, this function will panic if it's not called from + /// within the context of a task. In other words, this should only ever be + /// called from inside another future. + /// + /// If `Ok(Ready)` is returned then the associated `Receiver` has been + /// dropped, which means any work required for sending should be canceled. + /// + /// If you're calling this function from a context that does not have a + /// task, then you can use the `is_canceled` API instead. + pub fn poll_cancel(&mut self) -> Poll<(), ()> { + self.inner.poll_cancel() + } + + /// Tests to see whether this `Sender`'s corresponding `Receiver` + /// has gone away. + /// + /// This function can be used to learn about when the `Receiver` (consumer) + /// half has gone away and nothing will be able to receive a message sent + /// from `send`. + /// + /// Note that this function is intended to *not* be used in the context of a + /// future. If you're implementing a future you probably want to call the + /// `poll_cancel` function which will block the current task if the + /// cancellation hasn't happened yet. This can be useful when working on a + /// non-futures related thread, though, which would otherwise panic if + /// `poll_cancel` were called. + pub fn is_canceled(&self) -> bool { + self.inner.is_canceled() + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + self.inner.drop_tx() + } +} + +/// Error returned from a `Receiver<T>` whenever the corresponding `Sender<T>` +/// is dropped. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Canceled; + +impl fmt::Display for Canceled { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "oneshot canceled") + } +} + +impl Error for Canceled { + fn description(&self) -> &str { + "oneshot canceled" + } +} + +impl<T> Receiver<T> { + /// Gracefully close this receiver, preventing sending any future messages. + /// + /// Any `send` operation which happens after this method returns is + /// guaranteed to fail. Once this method is called the normal `poll` method + /// can be used to determine whether a message was actually sent or not. If + /// `Canceled` is returned from `poll` then no message was sent. + pub fn close(&mut self) { + self.inner.close_rx() + } + + /// Attempts to receive a message outside of the context of a task. + /// + /// Useful when a [`Context`](Context) is not available such as within a + /// `Drop` impl. + /// + /// Does not schedule a task wakeup or have any other side effects. + /// + /// A return value of `None` must be considered immediately stale (out of + /// date) unless [`::close`](Receiver::close) has been called first. + /// + /// Returns an error if the sender was dropped. + pub fn try_recv(&mut self) -> Result<Option<T>, Canceled> { + self.inner.try_recv() + } +} + +impl<T> Future for Receiver<T> { + type Item = T; + type Error = Canceled; + + fn poll(&mut self) -> Poll<T, Canceled> { + self.inner.recv() + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + self.inner.drop_rx() + } +} + +/// Handle returned from the `spawn` function. +/// +/// This handle is a future representing the completion of a different future on +/// a separate executor. Created through the `oneshot::spawn` function this +/// handle will resolve when the future provided to `spawn` resolves on the +/// `Executor` instance provided to that function. +/// +/// If this handle is dropped then the future will automatically no longer be +/// polled and is scheduled to be dropped. This can be canceled with the +/// `forget` function, however. +pub struct SpawnHandle<T, E> { + rx: Arc<ExecuteInner<Result<T, E>>>, +} + +struct ExecuteInner<T> { + inner: Inner<T>, + keep_running: AtomicBool, +} + +/// Type of future which `Execute` instances below must be able to spawn. +pub struct Execute<F: Future> { + future: F, + tx: Arc<ExecuteInner<Result<F::Item, F::Error>>>, +} + +/// Spawns a `future` onto the instance of `Executor` provided, `executor`, +/// returning a handle representing the completion of the future. +/// +/// The `SpawnHandle` returned is a future that is a proxy for `future` itself. +/// When `future` completes on `executor` then the `SpawnHandle` will itself be +/// resolved. Internally `SpawnHandle` contains a `oneshot` channel and is +/// thus safe to send across threads. +/// +/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is +/// not desired then the `SpawnHandle::forget` function can be used to continue +/// running the future to completion. +/// +/// # Panics +/// +/// This function will panic if the instance of `Spawn` provided is unable to +/// spawn the `future` provided. +/// +/// If the provided instance of `Spawn` does not actually run `future` to +/// completion, then the returned handle may panic when polled. Typically this +/// is not a problem, though, as most instances of `Spawn` will run futures to +/// completion. +/// +/// Note that the returned future will likely panic if the `futures` provided +/// panics. If a future running on an executor panics that typically means that +/// the executor drops the future, which falls into the above case of not +/// running the future to completion essentially. +pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error> + where F: Future, + E: Executor<Execute<F>>, +{ + let data = Arc::new(ExecuteInner { + inner: Inner::new(), + keep_running: AtomicBool::new(false), + }); + executor.execute(Execute { + future: future, + tx: data.clone(), + }).expect("failed to spawn future"); + SpawnHandle { rx: data } +} + +/// Spawns a function `f` onto the `Spawn` instance provided `s`. +/// +/// For more information see the `spawn` function in this module. This function +/// is just a thin wrapper around `spawn` which will execute the closure on the +/// executor provided and then complete the future that the closure returns. +pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error> + where F: FnOnce() -> R, + R: IntoFuture, + E: Executor<Execute<Lazy<F, R>>>, +{ + spawn(lazy(f), executor) +} + +impl<T, E> SpawnHandle<T, E> { + /// Drop this future without canceling the underlying future. + /// + /// When `SpawnHandle` is dropped, the spawned future will be canceled as + /// well if the future hasn't already resolved. This function can be used + /// when to drop this future but keep executing the underlying future. + pub fn forget(self) { + self.rx.keep_running.store(true, SeqCst); + } +} + +impl<T, E> Future for SpawnHandle<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<T, E> { + match self.rx.inner.recv() { + Ok(Async::Ready(Ok(t))) => Ok(t.into()), + Ok(Async::Ready(Err(e))) => Err(e), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(_) => panic!("future was canceled before completion"), + } + } +} + +impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SpawnHandle") + .finish() + } +} + +impl<T, E> Drop for SpawnHandle<T, E> { + fn drop(&mut self) { + self.rx.inner.drop_rx(); + } +} + +impl<F: Future> Future for Execute<F> { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + // If we're canceled then we may want to bail out early. + // + // If the `forget` function was called, though, then we keep going. + if self.tx.inner.poll_cancel().unwrap().is_ready() { + if !self.tx.keep_running.load(SeqCst) { + return Ok(().into()) + } + } + + let result = match self.future.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(t)) => Ok(t), + Err(e) => Err(e), + }; + drop(self.tx.inner.send(result)); + Ok(().into()) + } +} + +impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Execute") + .field("future", &self.future) + .finish() + } +} + +impl<F: Future> Drop for Execute<F> { + fn drop(&mut self) { + self.tx.inner.drop_tx(); + } +} diff --git a/third_party/rust/futures-0.1.31/src/task.rs b/third_party/rust/futures-0.1.31/src/task.rs new file mode 100644 index 0000000000..f83f2c4719 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task.rs @@ -0,0 +1,46 @@ +//! Tasks used to drive a future computation +//! +//! It's intended over time a particular operation (such as servicing an HTTP +//! request) will involve many futures. This entire operation, however, can be +//! thought of as one unit, as the entire result is essentially just moving +//! through one large state machine. +//! +//! A "task" is the unit of abstraction for what is driving this state machine +//! and tree of futures forward. A task is used to poll futures and schedule +//! futures with, and has utilities for sharing data between tasks and handles +//! for notifying when a future is ready. Each task also has its own set of +//! task-local data generated by `task_local!`. +//! +//! Note that libraries typically should not manage tasks themselves, but rather +//! leave that to event loops and other "executors" (see the `executor` module), +//! or by using the `wait` method to create and execute a task directly on the +//! current thread. +//! +//! More information about the task model can be found [online at tokio.rs]. +//! +//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-model/ +//! +//! ## Functions +//! +//! There is an important bare function in this module: `current`. The +//! `current` function returns a handle to the currently running task, panicking +//! if one isn't present. This handle is then used to later notify the task that +//! it's ready to make progress through the `Task::notify` method. + +#[doc(hidden)] +#[deprecated(since = "0.1.4", note = "import through the executor module instead")] +#[cfg(all(feature = "with-deprecated", feature = "use_std"))] +#[allow(deprecated)] +pub use task_impl::{Spawn, spawn, Unpark, Executor, Run, park}; + +pub use task_impl::{Task, AtomicTask, current, init, is_in_task}; + +#[allow(deprecated)] +#[cfg(feature = "use_std")] +pub use task_impl::{LocalKey, with_unpark_event, UnparkEvent, EventSet}; + +#[doc(hidden)] +#[deprecated(since = "0.1.4", note = "import through the executor module instead")] +#[cfg(all(feature = "with-deprecated", feature = "use_std"))] +#[allow(deprecated)] +pub use task_impl::TaskRc; diff --git a/third_party/rust/futures-0.1.31/src/task_impl/atomic_task.rs b/third_party/rust/futures-0.1.31/src/task_impl/atomic_task.rs new file mode 100644 index 0000000000..d73954e617 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task_impl/atomic_task.rs @@ -0,0 +1,283 @@ +use super::Task; + +use core::fmt; +use core::cell::UnsafeCell; +use core::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering::{Acquire, Release, AcqRel}; + +/// A synchronization primitive for task notification. +/// +/// `AtomicTask` will coordinate concurrent notifications with the consumer +/// potentially "updating" the underlying task to notify. This is useful in +/// scenarios where a computation completes in another thread and wants to +/// notify the consumer, but the consumer is in the process of being migrated to +/// a new logical task. +/// +/// Consumers should call `register` before checking the result of a computation +/// and producers should call `notify` after producing the computation (this +/// differs from the usual `thread::park` pattern). It is also permitted for +/// `notify` to be called **before** `register`. This results in a no-op. +/// +/// A single `AtomicTask` may be reused for any number of calls to `register` or +/// `notify`. +/// +/// `AtomicTask` does not provide any memory ordering guarantees, as such the +/// user should use caution and use other synchronization primitives to guard +/// the result of the underlying computation. +pub struct AtomicTask { + state: AtomicUsize, + task: UnsafeCell<Option<Task>>, +} + +// `AtomicTask` is a multi-consumer, single-producer transfer cell. The cell +// stores a `Task` value produced by calls to `register` and many threads can +// race to take the task (to notify it) by calling `notify. +// +// If a new `Task` instance is produced by calling `register` before an existing +// one is consumed, then the existing one is overwritten. +// +// While `AtomicTask` is single-producer, the implementation ensures memory +// safety. In the event of concurrent calls to `register`, there will be a +// single winner whose task will get stored in the cell. The losers will not +// have their tasks notified. As such, callers should ensure to add +// synchronization to calls to `register`. +// +// The implementation uses a single `AtomicUsize` value to coordinate access to +// the `Task` cell. There are two bits that are operated on independently. These +// are represented by `REGISTERING` and `NOTIFYING`. +// +// The `REGISTERING` bit is set when a producer enters the critical section. The +// `NOTIFYING` bit is set when a consumer enters the critical section. Neither +// bit being set is represented by `WAITING`. +// +// A thread obtains an exclusive lock on the task cell by transitioning the +// state from `WAITING` to `REGISTERING` or `NOTIFYING`, depending on the +// operation the thread wishes to perform. When this transition is made, it is +// guaranteed that no other thread will access the task cell. +// +// # Registering +// +// On a call to `register`, an attempt to transition the state from WAITING to +// REGISTERING is made. On success, the caller obtains a lock on the task cell. +// +// If the lock is obtained, then the thread sets the task cell to the task +// provided as an argument. Then it attempts to transition the state back from +// `REGISTERING` -> `WAITING`. +// +// If this transition is successful, then the registering process is complete +// and the next call to `notify` will observe the task. +// +// If the transition fails, then there was a concurrent call to `notify` that +// was unable to access the task cell (due to the registering thread holding the +// lock). To handle this, the registering thread removes the task it just set +// from the cell and calls `notify` on it. This call to notify represents the +// attempt to notify by the other thread (that set the `NOTIFYING` bit). The +// state is then transitioned from `REGISTERING | NOTIFYING` back to `WAITING`. +// This transition must succeed because, at this point, the state cannot be +// transitioned by another thread. +// +// # Notifying +// +// On a call to `notify`, an attempt to transition the state from `WAITING` to +// `NOTIFYING` is made. On success, the caller obtains a lock on the task cell. +// +// If the lock is obtained, then the thread takes ownership of the current value +// in teh task cell, and calls `notify` on it. The state is then transitioned +// back to `WAITING`. This transition must succeed as, at this point, the state +// cannot be transitioned by another thread. +// +// If the thread is unable to obtain the lock, the `NOTIFYING` bit is still. +// This is because it has either been set by the current thread but the previous +// value included the `REGISTERING` bit **or** a concurrent thread is in the +// `NOTIFYING` critical section. Either way, no action must be taken. +// +// If the current thread is the only concurrent call to `notify` and another +// thread is in the `register` critical section, when the other thread **exits** +// the `register` critical section, it will observe the `NOTIFYING` bit and +// handle the notify itself. +// +// If another thread is in the `notify` critical section, then it will handle +// notifying the task. +// +// # A potential race (is safely handled). +// +// Imagine the following situation: +// +// * Thread A obtains the `notify` lock and notifies a task. +// +// * Before thread A releases the `notify` lock, the notified task is scheduled. +// +// * Thread B attempts to notify the task. In theory this should result in the +// task being notified, but it cannot because thread A still holds the notify +// lock. +// +// This case is handled by requiring users of `AtomicTask` to call `register` +// **before** attempting to observe the application state change that resulted +// in the task being notified. The notifiers also change the application state +// before calling notify. +// +// Because of this, the task will do one of two things. +// +// 1) Observe the application state change that Thread B is notifying on. In +// this case, it is OK for Thread B's notification to be lost. +// +// 2) Call register before attempting to observe the application state. Since +// Thread A still holds the `notify` lock, the call to `register` will result +// in the task notifying itself and get scheduled again. + +/// Idle state +const WAITING: usize = 0; + +/// A new task value is being registered with the `AtomicTask` cell. +const REGISTERING: usize = 0b01; + +/// The task currently registered with the `AtomicTask` cell is being notified. +const NOTIFYING: usize = 0b10; + +impl AtomicTask { + /// Create an `AtomicTask` initialized with the given `Task` + pub fn new() -> AtomicTask { + // Make sure that task is Sync + trait AssertSync: Sync {} + impl AssertSync for Task {} + + AtomicTask { + state: AtomicUsize::new(WAITING), + task: UnsafeCell::new(None), + } + } + + /// Registers the current task to be notified on calls to `notify`. + /// + /// This is the same as calling `register_task` with `task::current()`. + pub fn register(&self) { + self.register_task(super::current()); + } + + /// Registers the provided task to be notified on calls to `notify`. + /// + /// The new task will take place of any previous tasks that were registered + /// by previous calls to `register`. Any calls to `notify` that happen after + /// a call to `register` (as defined by the memory ordering rules), will + /// notify the `register` caller's task. + /// + /// It is safe to call `register` with multiple other threads concurrently + /// calling `notify`. This will result in the `register` caller's current + /// task being notified once. + /// + /// This function is safe to call concurrently, but this is generally a bad + /// idea. Concurrent calls to `register` will attempt to register different + /// tasks to be notified. One of the callers will win and have its task set, + /// but there is no guarantee as to which caller will succeed. + pub fn register_task(&self, task: Task) { + match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) { + WAITING => { + unsafe { + // Locked acquired, update the waker cell + *self.task.get() = Some(task.clone()); + + // Release the lock. If the state transitioned to include + // the `NOTIFYING` bit, this means that a notify has been + // called concurrently, so we have to remove the task and + // notify it.` + // + // Start by assuming that the state is `REGISTERING` as this + // is what we jut set it to. + let res = self.state.compare_exchange( + REGISTERING, WAITING, AcqRel, Acquire); + + match res { + Ok(_) => {} + Err(actual) => { + // This branch can only be reached if a + // concurrent thread called `notify`. In this + // case, `actual` **must** be `REGISTERING | + // `NOTIFYING`. + debug_assert_eq!(actual, REGISTERING | NOTIFYING); + + // Take the task to notify once the atomic operation has + // completed. + let notify = (*self.task.get()).take().unwrap(); + + // Just swap, because no one could change state + // while state == `Registering | `Waking` + self.state.swap(WAITING, AcqRel); + + // The atomic swap was complete, now + // notify the task and return. + notify.notify(); + } + } + } + } + NOTIFYING => { + // Currently in the process of notifying the task, i.e., + // `notify` is currently being called on the old task handle. + // So, we call notify on the new task handle + task.notify(); + } + state => { + // In this case, a concurrent thread is holding the + // "registering" lock. This probably indicates a bug in the + // caller's code as racing to call `register` doesn't make much + // sense. + // + // We just want to maintain memory safety. It is ok to drop the + // call to `register`. + debug_assert!( + state == REGISTERING || + state == REGISTERING | NOTIFYING); + } + } + } + + /// Notifies the task that last called `register`. + /// + /// If `register` has not been called yet, then this does nothing. + pub fn notify(&self) { + // AcqRel ordering is used in order to acquire the value of the `task` + // cell as well as to establish a `release` ordering with whatever + // memory the `AtomicTask` is associated with. + match self.state.fetch_or(NOTIFYING, AcqRel) { + WAITING => { + // The notifying lock has been acquired. + let task = unsafe { (*self.task.get()).take() }; + + // Release the lock + self.state.fetch_and(!NOTIFYING, Release); + + if let Some(task) = task { + task.notify(); + } + } + state => { + // There is a concurrent thread currently updating the + // associated task. + // + // Nothing more to do as the `NOTIFYING` bit has been set. It + // doesn't matter if there are concurrent registering threads or + // not. + // + debug_assert!( + state == REGISTERING || + state == REGISTERING | NOTIFYING || + state == NOTIFYING); + } + } + } +} + +impl Default for AtomicTask { + fn default() -> Self { + AtomicTask::new() + } +} + +impl fmt::Debug for AtomicTask { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "AtomicTask") + } +} + +unsafe impl Send for AtomicTask {} +unsafe impl Sync for AtomicTask {} diff --git a/third_party/rust/futures-0.1.31/src/task_impl/core.rs b/third_party/rust/futures-0.1.31/src/task_impl/core.rs new file mode 100644 index 0000000000..d454116012 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task_impl/core.rs @@ -0,0 +1,186 @@ +#![cfg_attr(feature = "use_std", allow(dead_code))] + +use core::marker; +use core::mem; +use core::sync::atomic::AtomicUsize; +#[allow(deprecated)] +use core::sync::atomic::ATOMIC_USIZE_INIT; +use core::sync::atomic::Ordering::{SeqCst, Relaxed}; + +use super::{BorrowedTask, NotifyHandle}; + +pub struct LocalKey; +pub struct LocalMap; +pub fn local_map() -> LocalMap { LocalMap } + +#[derive(Copy, Clone)] +pub struct BorrowedEvents<'a>(marker::PhantomData<&'a ()>); + +#[derive(Copy, Clone)] +pub struct BorrowedUnpark<'a> { + f: &'a Fn() -> NotifyHandle, + id: usize, +} + +pub struct TaskUnpark { + handle: NotifyHandle, + id: usize, +} + +#[derive(Clone)] +pub struct UnparkEvents; + +impl<'a> BorrowedEvents<'a> { + pub fn new() -> BorrowedEvents<'a> { + BorrowedEvents(marker::PhantomData) + } + + pub fn to_owned(&self) -> UnparkEvents { + UnparkEvents + } +} + +impl<'a> BorrowedUnpark<'a> { + #[inline] + pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> { + BorrowedUnpark { f: f, id: id } + } + + #[inline] + pub fn to_owned(&self) -> TaskUnpark { + let handle = (self.f)(); + let id = handle.clone_id(self.id); + TaskUnpark { handle: handle, id: id } + } +} + +impl UnparkEvents { + pub fn notify(&self) {} + + pub fn will_notify(&self, _other: &BorrowedEvents) -> bool { + true + } +} + +impl TaskUnpark { + pub fn notify(&self) { + self.handle.notify(self.id); + } + + pub fn will_notify(&self, other: &BorrowedUnpark) -> bool { + self.id == other.id && self.handle.inner == (other.f)().inner + } +} + +impl Clone for TaskUnpark { + fn clone(&self) -> TaskUnpark { + let handle = self.handle.clone(); + let id = handle.clone_id(self.id); + TaskUnpark { handle: handle, id: id } + } +} + +impl Drop for TaskUnpark { + fn drop(&mut self) { + self.handle.drop_id(self.id); + } +} + +#[allow(deprecated)] +static GET: AtomicUsize = ATOMIC_USIZE_INIT; +#[allow(deprecated)] +static SET: AtomicUsize = ATOMIC_USIZE_INIT; + +/// Initialize the `futures` task system. +/// +/// This function is an unsafe low-level implementation detail typically only +/// used by crates using `futures` in `no_std` context. Users of this crate +/// who also use the standard library never need to invoke this function. +/// +/// The task system in the `futures` crate relies on some notion of "local +/// storage" for the running thread and/or context. The `task::current` function +/// can get invoked in any context, for example, and needs to be able to return +/// a `Task`. Typically with the standard library this is supported with +/// thread-local-storage, but this is not available in `no_std` contexts! +/// +/// This function is provided to allow `no_std` contexts to continue to be able +/// to use the standard task system in this crate. The functions provided here +/// will be used as-if they were thread-local-storage getters/setters. The `get` +/// function provided is used to retrieve the current thread-local value of the +/// task system's pointer, returning null if not initialized. The `set` function +/// updates the value of the pointer. +/// +/// # Return value +/// +/// This function will return whether initialization succeeded or not. This +/// function can be called concurrently and only the first invocation will +/// succeed. If `false` is returned then the `get` and `set` pointers provided +/// were *not* registered for use with the task system, but if `true` was +/// provided then they will be called when the task system is used. +/// +/// Note that while safe to call concurrently it's recommended to still perform +/// external synchronization when calling this function. This task system is +/// not guaranteed to be ready to go until a call to this function returns +/// `true`. In other words, if you call this function and see `false`, the +/// task system may not be ready to go as another thread may still be calling +/// `init`. +/// +/// # Unsafety +/// +/// This function is unsafe due to the requirements on the behavior of the +/// `get` and `set` functions. The pointers returned from these functions must +/// reflect the semantics specified above and must also be thread-local, +/// depending on the definition of a "thread" in the calling context. +pub unsafe fn init(get: fn() -> *mut u8, set: fn(*mut u8)) -> bool { + if GET.compare_exchange(0, get as usize, SeqCst, SeqCst).is_ok() { + SET.store(set as usize, SeqCst); + true + } else { + false + } +} + +/// Return whether the caller is running in a task (and so can use task_local!). +pub fn is_in_task() -> bool { + if let Some(ptr) = get_ptr() { + !ptr.is_null() + } else { + false + } +} + +#[inline] +pub fn get_ptr() -> Option<*mut u8> { + match GET.load(Relaxed) { + 0 => None, + n => Some(unsafe { mem::transmute::<usize, fn() -> *mut u8>(n)() }), + } +} + +#[cfg(feature = "use_std")] +#[inline] +pub fn is_get_ptr(f: usize) -> bool { + GET.load(Relaxed) == f +} + +pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R + where F: FnOnce() -> R +{ + let set = match SET.load(Relaxed) { + 0 => panic!("not initialized"), + n => unsafe { mem::transmute::<usize, fn(*mut u8)>(n) }, + }; + + struct Reset(fn(*mut u8), *mut u8); + + impl Drop for Reset { + #[inline] + fn drop(&mut self) { + (self.0)(self.1); + } + } + + let _reset = Reset(set, get_ptr().unwrap()); + set(task as *const _ as *mut u8); + f() +} diff --git a/third_party/rust/futures-0.1.31/src/task_impl/mod.rs b/third_party/rust/futures-0.1.31/src/task_impl/mod.rs new file mode 100644 index 0000000000..6f1cf36c0c --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task_impl/mod.rs @@ -0,0 +1,733 @@ +use core::fmt; +use core::marker::PhantomData; + +use {Poll, Future, Stream, Sink, StartSend}; + +mod atomic_task; +pub use self::atomic_task::AtomicTask; + +mod core; + +#[cfg(feature = "use_std")] +mod std; +#[cfg(feature = "use_std")] +pub use self::std::*; +#[cfg(not(feature = "use_std"))] +pub use self::core::*; + +pub struct BorrowedTask<'a> { + id: usize, + unpark: BorrowedUnpark<'a>, + events: BorrowedEvents<'a>, + // Task-local storage + map: &'a LocalMap, +} + +fn fresh_task_id() -> usize { + use core::sync::atomic::{AtomicUsize, Ordering}; + #[allow(deprecated)] + use core::sync::atomic::ATOMIC_USIZE_INIT; + + // TODO: this assert is a real bummer, need to figure out how to reuse + // old IDs that are no longer in use. + // + // Note, though, that it is intended that these ids go away entirely + // eventually, see the comment on `is_current` below. + #[allow(deprecated)] + static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT; + let id = NEXT_ID.fetch_add(1, Ordering::Relaxed); + assert!(id < usize::max_value() / 2, + "too many previous tasks have been allocated"); + id +} + +fn with<F: FnOnce(&BorrowedTask) -> R, R>(f: F) -> R { + unsafe { + let task = get_ptr().expect("no Task is currently running"); + assert!(!task.is_null(), "no Task is currently running"); + f(&*(task as *const BorrowedTask)) + } +} + +/// A handle to a "task", which represents a single lightweight "thread" of +/// execution driving a future to completion. +/// +/// In general, futures are composed into large units of work, which are then +/// spawned as tasks onto an *executor*. The executor is responsible for polling +/// the future as notifications arrive, until the future terminates. +/// +/// This is obtained by the `task::current` function. +/// +/// # FAQ +/// +/// ### Why does `Task` not implement `Eq` and `Hash`? +/// +/// A valid use case for `Task` to implement these two traits has not been +/// encountered. +/// +/// Usually, this question is asked by someone who wants to store a `Task` +/// instance in a `HashSet`. This seems like an obvious way to implement a +/// future aware, multi-handle structure; e.g. a multi-producer channel. +/// +/// In this case, the idea is that whenever a `start_send` is called on one of +/// the channel's send handles, if the channel is at capacity, the current task +/// is stored in a set. Then, when capacity is available, a task is removed from +/// the set and notified. +/// +/// The problem with this strategy is that multiple `Sender` handles can be used +/// on the same task. In this case, when the second handle is used and the task +/// is stored in a set, there already is an entry. Then, when the first +/// handle is dropped, this entry is cleared, resulting in a dead lock. +/// +/// See [here](https://github.com/rust-lang-nursery/futures-rs/issues/670) for +/// more discussion. +/// +#[derive(Clone)] +pub struct Task { + id: usize, + unpark: TaskUnpark, + events: UnparkEvents, +} + +trait AssertSend: Send {} +impl AssertSend for Task {} + +/// Returns a handle to the current task to call `notify` at a later date. +/// +/// The returned handle implements the `Send` and `'static` bounds and may also +/// be cheaply cloned. This is useful for squirreling away the handle into a +/// location which is then later signaled that a future can make progress. +/// +/// Implementations of the `Future` trait typically use this function if they +/// would otherwise perform a blocking operation. When something isn't ready +/// yet, this `current` function is called to acquire a handle to the current +/// task, and then the future arranges it such that when the blocking operation +/// otherwise finishes (perhaps in the background) it will `notify` the +/// returned handle. +/// +/// It's sometimes necessary to pass extra information to the task when +/// unparking it, so that the task knows something about *why* it was woken. +/// See the `FutureQueue` documentation for details on how to do this. +/// +/// # Panics +/// +/// This function will panic if a task is not currently being executed. That +/// is, this method can be dangerous to call outside of an implementation of +/// `poll`. +pub fn current() -> Task { + with(|borrowed| { + let unpark = borrowed.unpark.to_owned(); + let events = borrowed.events.to_owned(); + + Task { + id: borrowed.id, + unpark: unpark, + events: events, + } + }) +} + +#[doc(hidden)] +#[deprecated(note = "renamed to `current`")] +pub fn park() -> Task { + current() +} + +impl Task { + /// Indicate that the task should attempt to poll its future in a timely + /// fashion. + /// + /// It's typically guaranteed that, after calling `notify`, `poll` will + /// be called at least once subsequently (unless the future has terminated). + /// If the task is currently polling its future when `notify` is called, it + /// must poll the future *again* afterwards, ensuring that all relevant + /// events are eventually observed by the future. + pub fn notify(&self) { + self.events.notify(); + self.unpark.notify(); + } + + #[doc(hidden)] + #[deprecated(note = "renamed to `notify`")] + pub fn unpark(&self) { + self.notify() + } + + /// Returns `true` when called from within the context of the task. + /// + /// In other words, the task is currently running on the thread calling the + /// function. Note that this is currently, and has historically, been + /// implemented by tracking an `id` on every instance of `Spawn` created. + /// When a `Spawn` is being polled it stores in thread-local-storage the id + /// of the instance, and then `task::current` will return a `Task` that also + /// stores this id. + /// + /// The intention of this function was to answer questions like "if I + /// `notify` this task, is it equivalent to `task::current().notify()`?" + /// The answer "yes" may be able to avoid some extra work to block the + /// current task, such as sending a task along a channel or updating a + /// stored `Task` somewhere. An answer of "no" typically results in doing + /// the work anyway. + /// + /// Unfortunately this function has been somewhat buggy in the past and is + /// not intended to be supported in the future. By simply matching `id` the + /// intended question above isn't accurately taking into account, for + /// example, unpark events (now deprecated, but still a feature). Thus many + /// old users of this API weren't fully accounting for the question it was + /// intended they were asking. + /// + /// This API continues to be implemented but will in the future, e.g. in the + /// 0.1.x series of this crate, eventually return `false` unconditionally. + /// It is intended that this function will be removed in the next breaking + /// change of this crate. If you'd like to continue to be able to answer the + /// example question above, it's recommended you use the + /// `will_notify_current` method. + /// + /// If you've got questions about this though please let us know! We'd like + /// to learn about other use cases here that we did not consider. + /// + /// # Panics + /// + /// This function will panic if no current future is being polled. + #[deprecated(note = "intended to be removed, see docs for details")] + pub fn is_current(&self) -> bool { + with(|current| current.id == self.id) + } + + /// This function is intended as a performance optimization for structures + /// which store a `Task` internally. + /// + /// The purpose of this function is to answer the question "if I `notify` + /// this task is it equivalent to `task::current().notify()`". An answer + /// "yes" may mean that you don't actually need to call `task::current()` + /// and store it, but rather you can simply leave a stored task in place. An + /// answer of "no" typically means that you need to call `task::current()` + /// and store it somewhere. + /// + /// As this is purely a performance optimization a valid implementation for + /// this function is to always return `false`. A best effort is done to + /// return `true` where possible, but false negatives may happen. Note that + /// this function will not return a false positive, however. + /// + /// # Panics + /// + /// This function will panic if no current future is being polled. + #[allow(deprecated)] + pub fn will_notify_current(&self) -> bool { + with(|current| { + self.unpark.will_notify(¤t.unpark) && + self.events.will_notify(¤t.events) + }) + } +} + +impl fmt::Debug for Task { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Task") + .finish() + } +} + +/// Representation of a spawned future/stream. +/// +/// This object is returned by the `spawn` function in this module. This +/// represents a "fused task and future", storing all necessary pieces of a task +/// and owning the top-level future that's being driven as well. +/// +/// A `Spawn` can be poll'd for completion or execution of the current thread +/// can be blocked indefinitely until a notification arrives. This can be used +/// with either futures or streams, with different methods being available on +/// `Spawn` depending which is used. +pub struct Spawn<T: ?Sized> { + id: usize, + data: LocalMap, + obj: T, +} + +/// Spawns a future or stream, returning it and the new task responsible for +/// running it to completion. +/// +/// This function is the termination endpoint for running futures. This method +/// will conceptually allocate a new task to run the given object, which is +/// normally either a `Future` or `Stream`. +/// +/// This function is similar to the `thread::spawn` function but does not +/// attempt to run code in the background. The future will not make progress +/// until the methods on `Spawn` are called in turn. +pub fn spawn<T>(obj: T) -> Spawn<T> { + Spawn { + id: fresh_task_id(), + obj: obj, + data: local_map(), + } +} + +impl<T: ?Sized> Spawn<T> { + /// Get a shared reference to the object the Spawn is wrapping. + pub fn get_ref(&self) -> &T { + &self.obj + } + + /// Get a mutable reference to the object the Spawn is wrapping. + pub fn get_mut(&mut self) -> &mut T { + &mut self.obj + } + + /// Consume the Spawn, returning its inner object + pub fn into_inner(self) -> T where T: Sized { + self.obj + } + + /// Calls the provided closure, scheduling notifications to be sent to the + /// `notify` argument. + pub fn poll_fn_notify<N, F, R>(&mut self, + notify: &N, + id: usize, + f: F) -> R + where F: FnOnce(&mut T) -> R, + N: Clone + Into<NotifyHandle>, + { + let mk = || notify.clone().into(); + self.enter(BorrowedUnpark::new(&mk, id), f) + } + + /// Polls the internal future, scheduling notifications to be sent to the + /// `notify` argument. + /// + /// This method will poll the internal future, testing if it's completed + /// yet. The `notify` argument is used as a sink for notifications sent to + /// this future. That is, while the future is being polled, any call to + /// `task::current()` will return a handle that contains the `notify` + /// specified. + /// + /// If this function returns `NotReady`, then the `notify` should have been + /// scheduled to receive a notification when poll can be called again. + /// Otherwise if `Ready` or `Err` is returned, the `Spawn` task can be + /// safely destroyed. + /// + /// Note that `notify` itself is passed as a shared reference, and is itself + /// not required to be a `NotifyHandle`. The `Clone` and `Into` trait bounds + /// will be used to convert this `notify` to a `NotifyHandle` if necessary. + /// This construction can avoid an unnecessary atomic reference count bump + /// in some situations. + /// + /// ## Unsafety and `id` + /// + /// This function and all other `*_notify` functions on this type will treat + /// the `id` specified very carefully, explicitly calling functions like the + /// `notify` argument's `clone_id` and `drop_id` functions. It should be + /// safe to encode a pointer itself into the `id` specified, such as an + /// `Arc<N>` or a `Box<N>`. The `clone_id` and `drop_id` functions are then + /// intended to be sufficient for the memory management related to that + /// pointer. + pub fn poll_future_notify<N>(&mut self, + notify: &N, + id: usize) -> Poll<T::Item, T::Error> + where N: Clone + Into<NotifyHandle>, + T: Future, + { + self.poll_fn_notify(notify, id, |f| f.poll()) + } + + /// Like `poll_future_notify`, except polls the underlying stream. + pub fn poll_stream_notify<N>(&mut self, + notify: &N, + id: usize) + -> Poll<Option<T::Item>, T::Error> + where N: Clone + Into<NotifyHandle>, + T: Stream, + { + self.poll_fn_notify(notify, id, |s| s.poll()) + } + + /// Invokes the underlying `start_send` method with this task in place. + /// + /// If the underlying operation returns `NotReady` then the `notify` value + /// passed in will receive a notification when the operation is ready to be + /// attempted again. + pub fn start_send_notify<N>(&mut self, + value: T::SinkItem, + notify: &N, + id: usize) + -> StartSend<T::SinkItem, T::SinkError> + where N: Clone + Into<NotifyHandle>, + T: Sink, + { + self.poll_fn_notify(notify, id, |s| s.start_send(value)) + } + + /// Invokes the underlying `poll_complete` method with this task in place. + /// + /// If the underlying operation returns `NotReady` then the `notify` value + /// passed in will receive a notification when the operation is ready to be + /// attempted again. + pub fn poll_flush_notify<N>(&mut self, + notify: &N, + id: usize) + -> Poll<(), T::SinkError> + where N: Clone + Into<NotifyHandle>, + T: Sink, + { + self.poll_fn_notify(notify, id, |s| s.poll_complete()) + } + + /// Invokes the underlying `close` method with this task in place. + /// + /// If the underlying operation returns `NotReady` then the `notify` value + /// passed in will receive a notification when the operation is ready to be + /// attempted again. + pub fn close_notify<N>(&mut self, + notify: &N, + id: usize) + -> Poll<(), T::SinkError> + where N: Clone + Into<NotifyHandle>, + T: Sink, + { + self.poll_fn_notify(notify, id, |s| s.close()) + } + + fn enter<F, R>(&mut self, unpark: BorrowedUnpark, f: F) -> R + where F: FnOnce(&mut T) -> R + { + let borrowed = BorrowedTask { + id: self.id, + unpark: unpark, + events: BorrowedEvents::new(), + map: &self.data, + }; + let obj = &mut self.obj; + set(&borrowed, || f(obj)) + } +} + +impl<T: fmt::Debug + ?Sized> fmt::Debug for Spawn<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Spawn") + .field("obj", &&self.obj) + .finish() + } +} + +/// A trait which represents a sink of notifications that a future is ready to +/// make progress. +/// +/// This trait is provided as an argument to the `Spawn::*_notify` family of +/// functions. It's transitively used as part of the `Task::notify` method to +/// internally deliver notifications of readiness of a future to move forward. +/// +/// An instance of `Notify` has one primary method, `notify`, which is given a +/// contextual argument as to what's being notified. This contextual argument is +/// *also* provided to the `Spawn::*_notify` family of functions and can be used +/// to reuse an instance of `Notify` across many futures. +/// +/// Instances of `Notify` must be safe to share across threads, and the methods +/// be invoked concurrently. They must also live for the `'static` lifetime, +/// not containing any stack references. +pub trait Notify: Send + Sync { + /// Indicates that an associated future and/or task are ready to make + /// progress. + /// + /// Typically this means that the receiver of the notification should + /// arrange for the future to get poll'd in a prompt fashion. + /// + /// This method takes an `id` as an argument which was transitively passed + /// in from the original call to `Spawn::*_notify`. This id can be used to + /// disambiguate which precise future became ready for polling. + /// + /// # Panics + /// + /// Since `unpark` may be invoked from arbitrary contexts, it should + /// endeavor not to panic and to do as little work as possible. However, it + /// is not guaranteed not to panic, and callers should be wary. If a panic + /// occurs, that panic may or may not be propagated to the end-user of the + /// future that you'd otherwise wake up. + fn notify(&self, id: usize); + + /// This function is called whenever a new copy of `id` is needed. + /// + /// This is called in one of two situations: + /// + /// * A `Task` is being created through `task::current` while a future is + /// being polled. In that case the instance of `Notify` passed in to one + /// of the `poll_*` functions is called with the `id` passed into the same + /// `poll_*` function. + /// * A `Task` is itself being cloned. Each `Task` contains its own id and a + /// handle to the `Notify` behind it, and the task's `Notify` is used to + /// clone the internal `id` to assign to the new task. + /// + /// The `id` returned here will be stored in the `Task`-to-be and used later + /// to pass to `notify` when the `Task::notify` function is called on that + /// `Task`. + /// + /// Note that typically this is just the identity function, passing through + /// the identifier. For more unsafe situations, however, if `id` is itself a + /// pointer of some kind this can be used as a hook to "clone" the pointer, + /// depending on what that means for the specified pointer. + fn clone_id(&self, id: usize) -> usize { + id + } + + /// All instances of `Task` store an `id` that they're going to internally + /// notify with, and this function is called when the `Task` is dropped. + /// + /// This function provides a hook for schemes which encode pointers in this + /// `id` argument to deallocate resources associated with the pointer. It's + /// guaranteed that after this function is called the `Task` containing this + /// `id` will no longer use the `id`. + fn drop_id(&self, id: usize) { + drop(id); + } +} + +/// Sets the `NotifyHandle` of the current task for the duration of the provided +/// closure. +/// +/// This function takes a type that can be converted into a notify handle, +/// `notify` and `id`, and a closure `f`. The closure `f` will be executed such +/// that calls to `task::current()` will store a reference to the notify handle +/// provided, not the one previously in the environment. +/// +/// Note that calls to `task::current()` in the closure provided *will not* be +/// equivalent to `task::current()` before this method is called. The two tasks +/// returned will notify different handles, and the task handles pulled out +/// during the duration of this closure will not notify the previous task. It's +/// recommended that you call `task::current()` in some capacity before calling +/// this function to ensure that calls to `task::current()` inside of this +/// closure can transitively wake up the outer task. +/// +/// # Panics +/// +/// This function will panic if it is called outside the context of a future's +/// task. This is only valid to call once you've already entered a future via +/// `Spawn::poll_*` functions. +pub fn with_notify<F, T, R>(notify: &T, id: usize, f: F) -> R + where F: FnOnce() -> R, + T: Clone + Into<NotifyHandle>, +{ + with(|task| { + let mk = || notify.clone().into(); + let new_task = BorrowedTask { + id: task.id, + unpark: BorrowedUnpark::new(&mk, id), + events: task.events, + map: task.map, + }; + + set(&new_task, f) + }) +} + +/// An unsafe trait for implementing custom forms of memory management behind a +/// `Task`. +/// +/// The `futures` critically relies on "notification handles" to extract for +/// futures to contain and then later inform that they're ready to make +/// progress. These handles, however, must be cheap to create and cheap +/// to clone to ensure that this operation is efficient throughout the +/// execution of a program. +/// +/// Typically this sort of memory management is done in the standard library +/// with the `Arc` type. An `Arc` is relatively cheap to allocate an is +/// quite cheap to clone and pass around. Plus, it's 100% safe! +/// +/// When working outside the standard library, however, you don't always have +/// and `Arc` type available to you. This trait, `UnsafeNotify`, is intended +/// to be the "unsafe version" of the `Notify` trait. This trait encodes the +/// memory management operations of a `Task`'s notification handle, allowing +/// custom implementations for the memory management of a notification handle. +/// +/// Put another way, the core notification type in this library, +/// `NotifyHandle`, simply internally contains an instance of +/// `*mut UnsafeNotify`. This "unsafe trait object" is then used exclusively +/// to operate with, dynamically dispatching calls to clone, drop, and notify. +/// Critically though as a raw pointer it doesn't require a particular form +/// of memory management, allowing external implementations. +/// +/// A default implementation of the `UnsafeNotify` trait is provided for the +/// `Arc` type in the standard library. If the `use_std` feature of this crate +/// is not available however, you'll be required to implement your own +/// instance of this trait to pass it into `NotifyHandle::new`. +/// +/// # Unsafety +/// +/// This trait is manually encoding the memory management of the underlying +/// handle, and as a result is quite unsafe to implement! Implementors of +/// this trait must guarantee: +/// +/// * Calls to `clone_raw` produce uniquely owned handles. It should be safe +/// to drop the current handle and have the returned handle still be valid. +/// * Calls to `drop_raw` work with `self` as a raw pointer, deallocating +/// resources associated with it. This is a pretty unsafe operation as it's +/// invalidating the `self` pointer, so extreme care needs to be taken. +/// +/// In general it's recommended to review the trait documentation as well as +/// the implementation for `Arc` in this crate. When in doubt ping the +/// `futures` authors to clarify an unsafety question here. +pub unsafe trait UnsafeNotify: Notify { + /// Creates a new `NotifyHandle` from this instance of `UnsafeNotify`. + /// + /// This function will create a new uniquely owned handle that under the + /// hood references the same notification instance. In other words calls + /// to `notify` on the returned handle should be equivalent to calls to + /// `notify` on this handle. + /// + /// # Unsafety + /// + /// This trait is unsafe to implement, as are all these methods. This + /// method is also unsafe to call as it's asserting the `UnsafeNotify` + /// value is in a consistent state. In general it's recommended to + /// review the trait documentation as well as the implementation for `Arc` + /// in this crate. When in doubt ping the `futures` authors to clarify + /// an unsafety question here. + unsafe fn clone_raw(&self) -> NotifyHandle; + + /// Drops this instance of `UnsafeNotify`, deallocating resources + /// associated with it. + /// + /// This method is intended to have a signature such as: + /// + /// ```ignore + /// fn drop_raw(self: *mut Self); + /// ``` + /// + /// Unfortunately in Rust today that signature is not object safe. + /// Nevertheless it's recommended to implement this function *as if* that + /// were its signature. As such it is not safe to call on an invalid + /// pointer, nor is the validity of the pointer guaranteed after this + /// function returns. + /// + /// # Unsafety + /// + /// This trait is unsafe to implement, as are all these methods. This + /// method is also unsafe to call as it's asserting the `UnsafeNotify` + /// value is in a consistent state. In general it's recommended to + /// review the trait documentation as well as the implementation for `Arc` + /// in this crate. When in doubt ping the `futures` authors to clarify + /// an unsafety question here. + unsafe fn drop_raw(&self); +} + +/// A `NotifyHandle` is the core value through which notifications are routed +/// in the `futures` crate. +/// +/// All instances of `Task` will contain a `NotifyHandle` handle internally. +/// This handle itself contains a trait object pointing to an instance of the +/// `Notify` trait, allowing notifications to get routed through it. +/// +/// The `NotifyHandle` type internally does not codify any particular memory +/// management strategy. Internally it contains an instance of `*mut +/// UnsafeNotify`, and more details about that trait can be found on its own +/// documentation. Consequently, though, the one constructor of this type, +/// `NotifyHandle::new`, is `unsafe` to call. It is not recommended to call +/// this constructor directly. +/// +/// If you're working with the standard library then it's recommended to +/// work with the `Arc` type. If you have a struct, `T`, which implements the +/// `Notify` trait, then you can construct this with +/// `NotifyHandle::from(t: Arc<T>)`. The coercion to `UnsafeNotify` will +/// happen automatically and safely for you. +/// +/// When working externally from the standard library it's recommended to +/// provide a similar safe constructor for your custom type as opposed to +/// recommending an invocation of `NotifyHandle::new` directly. +pub struct NotifyHandle { + inner: *mut UnsafeNotify, +} + +unsafe impl Send for NotifyHandle {} +unsafe impl Sync for NotifyHandle {} + +impl NotifyHandle { + /// Constructs a new `NotifyHandle` directly. + /// + /// Note that most code will not need to call this. Implementers of the + /// `UnsafeNotify` trait will typically provide a wrapper that calls this + /// but you otherwise shouldn't call it directly. + /// + /// If you're working with the standard library then it's recommended to + /// use the `NotifyHandle::from` function instead which works with the safe + /// `Arc` type and the safe `Notify` trait. + #[inline] + pub unsafe fn new(inner: *mut UnsafeNotify) -> NotifyHandle { + NotifyHandle { inner: inner } + } + + /// Invokes the underlying instance of `Notify` with the provided `id`. + pub fn notify(&self, id: usize) { + unsafe { (*self.inner).notify(id) } + } + + fn clone_id(&self, id: usize) -> usize { + unsafe { (*self.inner).clone_id(id) } + } + + fn drop_id(&self, id: usize) { + unsafe { (*self.inner).drop_id(id) } + } +} + +impl Clone for NotifyHandle { + #[inline] + fn clone(&self) -> Self { + unsafe { + (*self.inner).clone_raw() + } + } +} + +impl fmt::Debug for NotifyHandle { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("NotifyHandle") + .finish() + } +} + +impl Drop for NotifyHandle { + fn drop(&mut self) { + unsafe { + (*self.inner).drop_raw() + } + } +} + +/// Marker for a `T` that is behind &'static. +struct StaticRef<T>(PhantomData<T>); + +impl<T: Notify> Notify for StaticRef<T> { + fn notify(&self, id: usize) { + let me = unsafe { &*(self as *const _ as *const T) }; + me.notify(id); + } + + fn clone_id(&self, id: usize) -> usize { + let me = unsafe { &*(self as *const _ as *const T) }; + me.clone_id(id) + } + + fn drop_id(&self, id: usize) { + let me = unsafe { &*(self as *const _ as *const T) }; + me.drop_id(id); + } +} + +unsafe impl<T: Notify + 'static> UnsafeNotify for StaticRef<T> { + unsafe fn clone_raw(&self) -> NotifyHandle { + NotifyHandle::new(self as *const _ as *mut StaticRef<T>) + } + + unsafe fn drop_raw(&self) {} +} + +impl<T: Notify> From<&'static T> for NotifyHandle { + fn from(src : &'static T) -> NotifyHandle { + unsafe { NotifyHandle::new(src as *const _ as *mut StaticRef<T>) } + } +} + +#[cfg(feature = "nightly")] +mod nightly { + use super::NotifyHandle; + use core::marker::Unpin; + + impl Unpin for NotifyHandle {} +} diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/data.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/data.rs new file mode 100644 index 0000000000..770912b219 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task_impl/std/data.rs @@ -0,0 +1,131 @@ +use std::prelude::v1::*; + +use std::any::TypeId; +use std::cell::RefCell; +use std::hash::{BuildHasherDefault, Hasher}; +use std::collections::HashMap; + +use task_impl::with; + +/// A macro to create a `static` of type `LocalKey` +/// +/// This macro is intentionally similar to the `thread_local!`, and creates a +/// `static` which has a `with` method to access the data on a task. +/// +/// The data associated with each task local is per-task, so different tasks +/// will contain different values. +#[macro_export] +macro_rules! task_local { + (static $NAME:ident: $t:ty = $e:expr) => ( + static $NAME: $crate::task::LocalKey<$t> = { + fn __init() -> $t { $e } + fn __key() -> ::std::any::TypeId { + struct __A; + ::std::any::TypeId::of::<__A>() + } + $crate::task::LocalKey { + __init: __init, + __key: __key, + } + }; + ) +} + +pub type LocalMap = RefCell<HashMap<TypeId, + Box<Opaque>, + BuildHasherDefault<IdHasher>>>; + +pub fn local_map() -> LocalMap { + RefCell::new(HashMap::default()) +} + +pub trait Opaque: Send {} +impl<T: Send> Opaque for T {} + +/// A key for task-local data stored in a future's task. +/// +/// This type is generated by the `task_local!` macro and performs very +/// similarly to the `thread_local!` macro and `std::thread::LocalKey` types. +/// Data associated with a `LocalKey<T>` is stored inside of a future's task, +/// and the data is destroyed when the future is completed and the task is +/// destroyed. +/// +/// Task-local data can migrate between threads and hence requires a `Send` +/// bound. Additionally, task-local data also requires the `'static` bound to +/// ensure it lives long enough. When a key is accessed for the first time the +/// task's data is initialized with the provided initialization expression to +/// the macro. +#[derive(Debug)] +pub struct LocalKey<T> { + // "private" fields which have to be public to get around macro hygiene, not + // included in the stability story for this type. Can change at any time. + #[doc(hidden)] + pub __key: fn() -> TypeId, + #[doc(hidden)] + pub __init: fn() -> T, +} + +pub struct IdHasher { + id: u64, +} + +impl Default for IdHasher { + fn default() -> IdHasher { + IdHasher { id: 0 } + } +} + +impl Hasher for IdHasher { + fn write(&mut self, _bytes: &[u8]) { + // TODO: need to do something sensible + panic!("can only hash u64"); + } + + fn write_u64(&mut self, u: u64) { + self.id = u; + } + + fn finish(&self) -> u64 { + self.id + } +} + +impl<T: Send + 'static> LocalKey<T> { + /// Access this task-local key, running the provided closure with a + /// reference to the value. + /// + /// This function will access this task-local key to retrieve the data + /// associated with the current task and this key. If this is the first time + /// this key has been accessed on this task, then the key will be + /// initialized with the initialization expression provided at the time the + /// `task_local!` macro was called. + /// + /// The provided closure will be provided a shared reference to the + /// underlying data associated with this task-local-key. The data itself is + /// stored inside of the current task. + /// + /// # Panics + /// + /// This function can possibly panic for a number of reasons: + /// + /// * If there is not a current task. + /// * If the initialization expression is run and it panics + /// * If the closure provided panics + pub fn with<F, R>(&'static self, f: F) -> R + where F: FnOnce(&T) -> R + { + let key = (self.__key)(); + with(|task| { + let raw_pointer = { + let mut data = task.map.borrow_mut(); + let entry = data.entry(key).or_insert_with(|| { + Box::new((self.__init)()) + }); + &**entry as *const Opaque as *const T + }; + unsafe { + f(&*raw_pointer) + } + }) + } +} diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/mod.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/mod.rs new file mode 100644 index 0000000000..e82a23e5d0 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task_impl/std/mod.rs @@ -0,0 +1,719 @@ +use std::prelude::v1::*; + +use std::cell::Cell; +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::ptr; +use std::sync::{Arc, Mutex, Condvar, Once}; +#[allow(deprecated)] +use std::sync::ONCE_INIT; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use {Future, Stream, Sink, Poll, Async, StartSend, AsyncSink}; +use super::core; +use super::{BorrowedTask, NotifyHandle, Spawn, spawn, Notify, UnsafeNotify}; + +mod unpark_mutex; +pub use self::unpark_mutex::UnparkMutex; + +mod data; +pub use self::data::*; + +mod task_rc; +#[allow(deprecated)] +#[cfg(feature = "with-deprecated")] +pub use self::task_rc::TaskRc; + +pub use task_impl::core::init; + +thread_local!(static CURRENT_TASK: Cell<*mut u8> = Cell::new(ptr::null_mut())); + +/// Return whether the caller is running in a task (and so can use task_local!). +pub fn is_in_task() -> bool { + CURRENT_TASK.with(|task| !task.get().is_null()) +} + +#[allow(deprecated)] +static INIT: Once = ONCE_INIT; + +pub fn get_ptr() -> Option<*mut u8> { + // Since this condition will always return true when TLS task storage is + // used (the default), the branch predictor will be able to optimize the + // branching and a dynamic dispatch will be avoided, which makes the + // compiler happier. + if core::is_get_ptr(0x1) { + Some(CURRENT_TASK.with(|c| c.get())) + } else { + core::get_ptr() + } +} + +fn tls_slot() -> *const Cell<*mut u8> { + CURRENT_TASK.with(|c| c as *const _) +} + +pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R + where F: FnOnce() -> R +{ + // Lazily initialize the get / set ptrs + // + // Note that we won't actually use these functions ever, we'll instead be + // testing the pointer's value elsewhere and calling our own functions. + INIT.call_once(|| unsafe { + let get = mem::transmute::<usize, _>(0x1); + let set = mem::transmute::<usize, _>(0x2); + init(get, set); + }); + + // Same as above. + if core::is_get_ptr(0x1) { + struct Reset(*const Cell<*mut u8>, *mut u8); + + impl Drop for Reset { + #[inline] + fn drop(&mut self) { + unsafe { + (*self.0).set(self.1); + } + } + } + + unsafe { + let slot = tls_slot(); + let _reset = Reset(slot, (*slot).get()); + (*slot).set(task as *const _ as *mut u8); + f() + } + } else { + core::set(task, f) + } +} + +#[derive(Copy, Clone)] +#[allow(deprecated)] +pub enum BorrowedUnpark<'a> { + Old(&'a Arc<Unpark>), + New(core::BorrowedUnpark<'a>), +} + +#[derive(Copy, Clone)] +#[allow(deprecated)] +pub enum BorrowedEvents<'a> { + None, + One(&'a UnparkEvent, &'a BorrowedEvents<'a>), +} + +#[derive(Clone)] +pub enum TaskUnpark { + #[allow(deprecated)] + Old(Arc<Unpark>), + New(core::TaskUnpark), +} + +#[derive(Clone)] +#[allow(deprecated)] +pub enum UnparkEvents { + None, + One(UnparkEvent), + Many(Box<[UnparkEvent]>), +} + +impl<'a> BorrowedUnpark<'a> { + #[inline] + pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> { + BorrowedUnpark::New(core::BorrowedUnpark::new(f, id)) + } + + #[inline] + pub fn to_owned(&self) -> TaskUnpark { + match *self { + BorrowedUnpark::Old(old) => TaskUnpark::Old(old.clone()), + BorrowedUnpark::New(new) => TaskUnpark::New(new.to_owned()), + } + } +} + +impl<'a> BorrowedEvents<'a> { + #[inline] + pub fn new() -> BorrowedEvents<'a> { + BorrowedEvents::None + } + + #[inline] + pub fn to_owned(&self) -> UnparkEvents { + let mut one_event = None; + let mut list = Vec::new(); + let mut cur = self; + while let BorrowedEvents::One(event, next) = *cur { + let event = event.clone(); + match one_event.take() { + None if list.len() == 0 => one_event = Some(event), + None => list.push(event), + Some(event2) => { + list.push(event2); + list.push(event); + } + } + cur = next; + } + + match one_event { + None if list.len() == 0 => UnparkEvents::None, + None => UnparkEvents::Many(list.into_boxed_slice()), + Some(e) => UnparkEvents::One(e), + } + } +} + +impl UnparkEvents { + pub fn notify(&self) { + match *self { + UnparkEvents::None => {} + UnparkEvents::One(ref e) => e.unpark(), + UnparkEvents::Many(ref list) => { + for event in list.iter() { + event.unpark(); + } + } + } + } + + pub fn will_notify(&self, events: &BorrowedEvents) -> bool { + // Pessimistically assume that any unpark events mean that we're not + // equivalent to the current task. + match *self { + UnparkEvents::None => {} + _ => return false, + } + + match *events { + BorrowedEvents::None => return true, + _ => {}, + } + + return false + } +} + +#[allow(deprecated)] +impl TaskUnpark { + pub fn notify(&self) { + match *self { + TaskUnpark::Old(ref old) => old.unpark(), + TaskUnpark::New(ref new) => new.notify(), + } + } + + pub fn will_notify(&self, unpark: &BorrowedUnpark) -> bool { + match (unpark, self) { + (&BorrowedUnpark::Old(old1), &TaskUnpark::Old(ref old2)) => { + &**old1 as *const Unpark == &**old2 as *const Unpark + } + (&BorrowedUnpark::New(ref new1), &TaskUnpark::New(ref new2)) => { + new2.will_notify(new1) + } + _ => false, + } + } +} + +impl<F: Future> Spawn<F> { + #[doc(hidden)] + #[deprecated(note = "recommended to use `poll_future_notify` instead")] + #[allow(deprecated)] + pub fn poll_future(&mut self, unpark: Arc<Unpark>) -> Poll<F::Item, F::Error> { + self.enter(BorrowedUnpark::Old(&unpark), |f| f.poll()) + } + + /// Waits for the internal future to complete, blocking this thread's + /// execution until it does. + /// + /// This function will call `poll_future` in a loop, waiting for the future + /// to complete. When a future cannot make progress it will use + /// `thread::park` to block the current thread. + pub fn wait_future(&mut self) -> Result<F::Item, F::Error> { + ThreadNotify::with_current(|notify| { + + loop { + match self.poll_future_notify(notify, 0)? { + Async::NotReady => notify.park(), + Async::Ready(e) => return Ok(e), + } + } + }) + } + + + #[doc(hidden)] + #[deprecated] + #[allow(deprecated)] + pub fn execute(self, exec: Arc<Executor>) + where F: Future<Item=(), Error=()> + Send + 'static, + { + exec.clone().execute(Run { + // Ideally this method would be defined directly on + // `Spawn<BoxFuture<(), ()>>` so we wouldn't have to box here and + // it'd be more explicit, but unfortunately that currently has a + // link error on nightly: rust-lang/rust#36155 + spawn: spawn(Box::new(self.into_inner())), + inner: Arc::new(RunInner { + exec: exec, + mutex: UnparkMutex::new() + }), + }) + } +} + +impl<S: Stream> Spawn<S> { + #[deprecated(note = "recommended to use `poll_stream_notify` instead")] + #[allow(deprecated)] + #[doc(hidden)] + pub fn poll_stream(&mut self, unpark: Arc<Unpark>) + -> Poll<Option<S::Item>, S::Error> { + self.enter(BorrowedUnpark::Old(&unpark), |s| s.poll()) + } + + /// Like `wait_future`, except only waits for the next element to arrive on + /// the underlying stream. + pub fn wait_stream(&mut self) -> Option<Result<S::Item, S::Error>> { + ThreadNotify::with_current(|notify| { + + loop { + match self.poll_stream_notify(notify, 0) { + Ok(Async::NotReady) => notify.park(), + Ok(Async::Ready(Some(e))) => return Some(Ok(e)), + Ok(Async::Ready(None)) => return None, + Err(e) => return Some(Err(e)), + } + } + }) + } +} + +impl<S: Sink> Spawn<S> { + #[doc(hidden)] + #[deprecated(note = "recommended to use `start_send_notify` instead")] + #[allow(deprecated)] + pub fn start_send(&mut self, value: S::SinkItem, unpark: &Arc<Unpark>) + -> StartSend<S::SinkItem, S::SinkError> { + self.enter(BorrowedUnpark::Old(unpark), |s| s.start_send(value)) + } + + #[deprecated(note = "recommended to use `poll_flush_notify` instead")] + #[allow(deprecated)] + #[doc(hidden)] + pub fn poll_flush(&mut self, unpark: &Arc<Unpark>) + -> Poll<(), S::SinkError> { + self.enter(BorrowedUnpark::Old(unpark), |s| s.poll_complete()) + } + + /// Blocks the current thread until it's able to send `value` on this sink. + /// + /// This function will send the `value` on the sink that this task wraps. If + /// the sink is not ready to send the value yet then the current thread will + /// be blocked until it's able to send the value. + pub fn wait_send(&mut self, mut value: S::SinkItem) + -> Result<(), S::SinkError> { + ThreadNotify::with_current(|notify| { + + loop { + value = match self.start_send_notify(value, notify, 0)? { + AsyncSink::NotReady(v) => v, + AsyncSink::Ready => return Ok(()), + }; + notify.park(); + } + }) + } + + /// Blocks the current thread until it's able to flush this sink. + /// + /// This function will call the underlying sink's `poll_complete` method + /// until it returns that it's ready, proxying out errors upwards to the + /// caller if one occurs. + /// + /// The thread will be blocked until `poll_complete` returns that it's + /// ready. + pub fn wait_flush(&mut self) -> Result<(), S::SinkError> { + ThreadNotify::with_current(|notify| { + + loop { + if self.poll_flush_notify(notify, 0)?.is_ready() { + return Ok(()) + } + notify.park(); + } + }) + } + + /// Blocks the current thread until it's able to close this sink. + /// + /// This function will close the sink that this task wraps. If the sink + /// is not ready to be close yet, then the current thread will be blocked + /// until it's closed. + pub fn wait_close(&mut self) -> Result<(), S::SinkError> { + ThreadNotify::with_current(|notify| { + + loop { + if self.close_notify(notify, 0)?.is_ready() { + return Ok(()) + } + notify.park(); + } + }) + } +} + +/// A trait which represents a sink of notifications that a future is ready to +/// make progress. +/// +/// This trait is provided as an argument to the `Spawn::poll_future` and +/// `Spawn::poll_stream` functions. It's transitively used as part of the +/// `Task::unpark` method to internally deliver notifications of readiness of a +/// future to move forward. +#[deprecated(note = "recommended to use `Notify` instead")] +pub trait Unpark: Send + Sync { + /// Indicates that an associated future and/or task are ready to make + /// progress. + /// + /// Typically this means that the receiver of the notification should + /// arrange for the future to get poll'd in a prompt fashion. + fn unpark(&self); +} + +/// A trait representing requests to poll futures. +/// +/// This trait is an argument to the `Spawn::execute` which is used to run a +/// future to completion. An executor will receive requests to run a future and +/// an executor is responsible for ensuring that happens in a timely fashion. +/// +/// Note that this trait is likely to be deprecated and/or renamed to avoid +/// clashing with the `future::Executor` trait. If you've got a use case for +/// this or would like to comment on the name please let us know! +#[deprecated] +#[allow(deprecated)] +pub trait Executor: Send + Sync + 'static { + /// Requests that `Run` is executed soon on the given executor. + fn execute(&self, r: Run); +} + +/// Units of work submitted to an `Executor`, currently only created +/// internally. +#[deprecated] +pub struct Run { + spawn: Spawn<Box<Future<Item = (), Error = ()> + Send>>, + inner: Arc<RunInner>, +} + +#[allow(deprecated)] +struct RunInner { + mutex: UnparkMutex<Run>, + exec: Arc<Executor>, +} + +#[allow(deprecated)] +impl Run { + /// Actually run the task (invoking `poll` on its future) on the current + /// thread. + pub fn run(self) { + let Run { mut spawn, inner } = self; + + // SAFETY: the ownership of this `Run` object is evidence that + // we are in the `POLLING`/`REPOLL` state for the mutex. + unsafe { + inner.mutex.start_poll(); + + loop { + match spawn.poll_future_notify(&inner, 0) { + Ok(Async::NotReady) => {} + Ok(Async::Ready(())) | + Err(()) => return inner.mutex.complete(), + } + let run = Run { spawn: spawn, inner: inner.clone() }; + match inner.mutex.wait(run) { + Ok(()) => return, // we've waited + Err(r) => spawn = r.spawn, // someone's notified us + } + } + } + } +} + +#[allow(deprecated)] +impl fmt::Debug for Run { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Run") + .field("contents", &"...") + .finish() + } +} + +#[allow(deprecated)] +impl Notify for RunInner { + fn notify(&self, _id: usize) { + match self.mutex.notify() { + Ok(run) => self.exec.execute(run), + Err(()) => {} + } + } +} + +// ===== ThreadNotify ===== + +struct ThreadNotify { + state: AtomicUsize, + mutex: Mutex<()>, + condvar: Condvar, +} + +const IDLE: usize = 0; +const NOTIFY: usize = 1; +const SLEEP: usize = 2; + +thread_local! { + static CURRENT_THREAD_NOTIFY: Arc<ThreadNotify> = Arc::new(ThreadNotify { + state: AtomicUsize::new(IDLE), + mutex: Mutex::new(()), + condvar: Condvar::new(), + }); +} + +impl ThreadNotify { + fn with_current<F, R>(f: F) -> R + where F: FnOnce(&Arc<ThreadNotify>) -> R, + { + CURRENT_THREAD_NOTIFY.with(|notify| f(notify)) + } + + fn park(&self) { + // If currently notified, then we skip sleeping. This is checked outside + // of the lock to avoid acquiring a mutex if not necessary. + match self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) { + NOTIFY => return, + IDLE => {}, + _ => unreachable!(), + } + + // The state is currently idle, so obtain the lock and then try to + // transition to a sleeping state. + let mut m = self.mutex.lock().unwrap(); + + // Transition to sleeping + match self.state.compare_and_swap(IDLE, SLEEP, Ordering::SeqCst) { + NOTIFY => { + // Notified before we could sleep, consume the notification and + // exit + self.state.store(IDLE, Ordering::SeqCst); + return; + } + IDLE => {}, + _ => unreachable!(), + } + + // Loop until we've been notified + loop { + m = self.condvar.wait(m).unwrap(); + + // Transition back to idle, loop otherwise + if NOTIFY == self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) { + return; + } + } + } +} + +impl Notify for ThreadNotify { + fn notify(&self, _unpark_id: usize) { + // First, try transitioning from IDLE -> NOTIFY, this does not require a + // lock. + match self.state.compare_and_swap(IDLE, NOTIFY, Ordering::SeqCst) { + IDLE | NOTIFY => return, + SLEEP => {} + _ => unreachable!(), + } + + // The other half is sleeping, this requires a lock + let _m = self.mutex.lock().unwrap(); + + // Transition from SLEEP -> NOTIFY + match self.state.compare_and_swap(SLEEP, NOTIFY, Ordering::SeqCst) { + SLEEP => {} + _ => return, + } + + // Wakeup the sleeper + self.condvar.notify_one(); + } +} + +// ===== UnparkEvent ===== + +/// For the duration of the given callback, add an "unpark event" to be +/// triggered when the task handle is used to unpark the task. +/// +/// Unpark events are used to pass information about what event caused a task to +/// be unparked. In some cases, tasks are waiting on a large number of possible +/// events, and need precise information about the wakeup to avoid extraneous +/// polling. +/// +/// Every `Task` handle comes with a set of unpark events which will fire when +/// `unpark` is called. When fired, these events insert an identifier into a +/// concurrent set, which the task can read from to determine what events +/// occurred. +/// +/// This function immediately invokes the closure, `f`, but arranges things so +/// that `task::park` will produce a `Task` handle that includes the given +/// unpark event. +/// +/// # Panics +/// +/// This function will panic if a task is not currently being executed. That +/// is, this method can be dangerous to call outside of an implementation of +/// `poll`. +#[deprecated(note = "recommended to use `FuturesUnordered` instead")] +#[allow(deprecated)] +pub fn with_unpark_event<F, R>(event: UnparkEvent, f: F) -> R + where F: FnOnce() -> R +{ + super::with(|task| { + let new_task = BorrowedTask { + id: task.id, + unpark: task.unpark, + events: BorrowedEvents::One(&event, &task.events), + map: task.map, + }; + + super::set(&new_task, f) + }) +} + +/// A set insertion to trigger upon `unpark`. +/// +/// Unpark events are used to communicate information about *why* an unpark +/// occurred, in particular populating sets with event identifiers so that the +/// unparked task can avoid extraneous polling. See `with_unpark_event` for +/// more. +#[derive(Clone)] +#[deprecated(note = "recommended to use `FuturesUnordered` instead")] +#[allow(deprecated)] +pub struct UnparkEvent { + set: Arc<EventSet>, + item: usize, +} + +#[allow(deprecated)] +impl UnparkEvent { + /// Construct an unpark event that will insert `id` into `set` when + /// triggered. + #[deprecated(note = "recommended to use `FuturesUnordered` instead")] + pub fn new(set: Arc<EventSet>, id: usize) -> UnparkEvent { + UnparkEvent { + set: set, + item: id, + } + } + + fn unpark(&self) { + self.set.insert(self.item); + } +} + +#[allow(deprecated)] +impl fmt::Debug for UnparkEvent { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("UnparkEvent") + .field("set", &"...") + .field("item", &self.item) + .finish() + } +} + +/// A concurrent set which allows for the insertion of `usize` values. +/// +/// `EventSet`s are used to communicate precise information about the event(s) +/// that triggered a task notification. See `task::with_unpark_event` for details. +#[deprecated(since="0.1.18", note = "recommended to use `FuturesUnordered` instead")] +pub trait EventSet: Send + Sync + 'static { + /// Insert the given ID into the set + fn insert(&self, id: usize); +} + +// Safe implementation of `UnsafeNotify` for `Arc` in the standard library. +// +// Note that this is a very unsafe implementation! The crucial pieces is that +// these two values are considered equivalent: +// +// * Arc<T> +// * *const ArcWrapped<T> +// +// We don't actually know the layout of `ArcWrapped<T>` as it's an +// implementation detail in the standard library. We can work, though, by +// casting it through and back an `Arc<T>`. +// +// This also means that you won't actually fine `UnsafeNotify for Arc<T>` +// because it's the wrong level of indirection. These methods are sort of +// receiving Arc<T>, but not an owned version. It's... complicated. We may be +// one of the first users of unsafe trait objects! + +struct ArcWrapped<T>(PhantomData<T>); + +impl<T: Notify + 'static> Notify for ArcWrapped<T> { + fn notify(&self, id: usize) { + unsafe { + let me: *const ArcWrapped<T> = self; + T::notify(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>), + id) + } + } + + fn clone_id(&self, id: usize) -> usize { + unsafe { + let me: *const ArcWrapped<T> = self; + T::clone_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>), + id) + } + } + + fn drop_id(&self, id: usize) { + unsafe { + let me: *const ArcWrapped<T> = self; + T::drop_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>), + id) + } + } +} + +unsafe impl<T: Notify + 'static> UnsafeNotify for ArcWrapped<T> { + unsafe fn clone_raw(&self) -> NotifyHandle { + let me: *const ArcWrapped<T> = self; + let arc = (*(&me as *const *const ArcWrapped<T> as *const Arc<T>)).clone(); + NotifyHandle::from(arc) + } + + unsafe fn drop_raw(&self) { + let mut me: *const ArcWrapped<T> = self; + let me = &mut me as *mut *const ArcWrapped<T> as *mut Arc<T>; + ptr::drop_in_place(me); + } +} + +impl<T> From<Arc<T>> for NotifyHandle + where T: Notify + 'static, +{ + fn from(rc: Arc<T>) -> NotifyHandle { + unsafe { + let ptr = mem::transmute::<Arc<T>, *mut ArcWrapped<T>>(rc); + NotifyHandle::new(ptr) + } + } +} + +#[cfg(feature = "nightly")] +mod nightly { + use super::{TaskUnpark, UnparkEvents}; + use core::marker::Unpin; + + impl Unpin for TaskUnpark {} + impl Unpin for UnparkEvents {} +} diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/task_rc.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/task_rc.rs new file mode 100644 index 0000000000..51bb44878d --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task_impl/std/task_rc.rs @@ -0,0 +1,129 @@ +#![cfg(feature = "with-deprecated")] +#![allow(deprecated)] +#![deprecated(since = "0.1.4", + note = "replaced with `BiLock` in many cases, otherwise slated \ + for removal due to confusion")] + +use std::prelude::v1::*; +use std::sync::Arc; +use std::cell::UnsafeCell; +use task_impl; + +// One critical piece of this module's contents are the `TaskRc<A>` handles. +// The purpose of this is to conceptually be able to store data in a task, +// allowing it to be accessed within multiple futures at once. For example if +// you have some concurrent futures working, they may all want mutable access to +// some data. We already know that when the futures are being poll'd that we're +// entirely synchronized (aka `&mut Task`), so you shouldn't require an +// `Arc<Mutex<T>>` to share as the synchronization isn't necessary! +// +// So the idea here is that you insert data into a task via `Task::insert`, and +// a handle to that data is then returned to you. That handle can later get +// presented to the task itself to actually retrieve the underlying data. The +// invariant is that the data can only ever be accessed with the task present, +// and the lifetime of the actual data returned is connected to the lifetime of +// the task itself. +// +// Conceptually I at least like to think of this as "dynamically adding more +// struct fields to a `Task`". Each call to insert creates a new "name" for the +// struct field, a `TaskRc<A>`, and then you can access the fields of a struct +// with the struct itself (`Task`) as well as the name of the field +// (`TaskRc<A>`). If that analogy doesn't make sense then oh well, it at least +// helped me! +// +// So anyway, we do some interesting trickery here to actually get it to work. +// Each `TaskRc<A>` handle stores `Arc<UnsafeCell<A>>`. So it turns out, we're +// not even adding data to the `Task`! Each `TaskRc<A>` contains a reference +// to this `Arc`, and `TaskRc` handles can be cloned which just bumps the +// reference count on the `Arc` itself. +// +// As before, though, you can present the `Arc` to a `Task` and if they +// originated from the same place you're allowed safe access to the internals. +// We allow but shared and mutable access without the `Sync` bound on the data, +// crucially noting that a `Task` itself is not `Sync`. +// +// So hopefully I've convinced you of this point that the `get` and `get_mut` +// methods below are indeed safe. The data is always valid as it's stored in an +// `Arc`, and access is only allowed with the proof of the associated `Task`. +// One thing you might be asking yourself though is what exactly is this "proof +// of a task"? Right now it's a `usize` corresponding to the `Task`'s +// `TaskHandle` arc allocation. +// +// Wait a minute, isn't that the ABA problem! That is, we create a task A, add +// some data to it, destroy task A, do some work, create a task B, and then ask +// to get the data from task B. In this case though the point of the +// `task_inner` "proof" field is simply that there's some non-`Sync` token +// proving that you can get access to the data. So while weird, this case should +// still be safe, as the data's not stored in the task itself. + +/// A reference to a piece of data that's accessible only within a specific +/// `Task`. +/// +/// This data is `Send` even when `A` is not `Sync`, because the data stored +/// within is accessed in a single-threaded way. The thread accessing it may +/// change over time, if the task migrates, so `A` must be `Send`. +#[derive(Debug)] +pub struct TaskRc<A> { + task: task_impl::Task, + ptr: Arc<UnsafeCell<A>>, +} + +// for safety here, see docs at the top of this module +unsafe impl<A: Send> Send for TaskRc<A> {} +unsafe impl<A: Sync> Sync for TaskRc<A> {} + +impl<A> TaskRc<A> { + /// Inserts a new piece of task-local data into this task, returning a + /// reference to it. + /// + /// Ownership of the data will be transferred to the task, and the data will + /// be destroyed when the task itself is destroyed. The returned value can + /// be passed to the `with` method to get a reference back to the original + /// data. + /// + /// Note that the returned handle is cloneable and copyable and can be sent + /// to other futures which will be associated with the same task. All + /// futures will then have access to this data when passed the reference + /// back. + /// + /// # Panics + /// + /// This function will panic if a task is not currently running. + pub fn new(a: A) -> TaskRc<A> { + TaskRc { + task: task_impl::park(), + ptr: Arc::new(UnsafeCell::new(a)), + } + } + + /// Operate with a reference to the underlying data. + /// + /// This method should be passed a handle previously returned by + /// `Task::insert`. That handle, when passed back into this method, will + /// retrieve a reference to the original data. + /// + /// # Panics + /// + /// This method will panic if a task is not currently running or if `self` + /// does not belong to the task that is currently running. That is, if + /// another task generated the `data` handle passed in, this method will + /// panic. + pub fn with<F, R>(&self, f: F) -> R + where F: FnOnce(&A) -> R + { + if !self.task.is_current() { + panic!("TaskRc being accessed on task it does not belong to"); + } + + f(unsafe { &*self.ptr.get() }) + } +} + +impl<A> Clone for TaskRc<A> { + fn clone(&self) -> TaskRc<A> { + TaskRc { + task: self.task.clone(), + ptr: self.ptr.clone(), + } + } +} diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/unpark_mutex.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/unpark_mutex.rs new file mode 100644 index 0000000000..246def2753 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/task_impl/std/unpark_mutex.rs @@ -0,0 +1,144 @@ +use std::cell::UnsafeCell; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +/// A "lock" around data `D`, which employs a *helping* strategy. +/// +/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being +/// invoked on only a single thread at a time (2) `poll` being invoked at least +/// once after each `unpark` (unless the future has completed). +pub struct UnparkMutex<D> { + // The state of task execution (state machine described below) + status: AtomicUsize, + + // The actual task data, accessible only in the POLLING state + inner: UnsafeCell<Option<D>>, +} + +// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on +// acquisition failure, the current lock holder performs the desired work -- +// re-polling. +// +// As such, these impls mirror those for `Mutex<D>`. In particular, a reference +// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which +// must therefore be `Send`. +unsafe impl<D: Send> Send for UnparkMutex<D> {} +unsafe impl<D: Send> Sync for UnparkMutex<D> {} + +// There are four possible task states, listed below with their possible +// transitions: + +// The task is blocked, waiting on an event +const WAITING: usize = 0; // --> POLLING + +// The task is actively being polled by a thread; arrival of additional events +// of interest should move it to the REPOLL state +const POLLING: usize = 1; // --> WAITING, REPOLL, or COMPLETE + +// The task is actively being polled, but will need to be re-polled upon +// completion to ensure that all events were observed. +const REPOLL: usize = 2; // --> POLLING + +// The task has finished executing (either successfully or with an error/panic) +const COMPLETE: usize = 3; // No transitions out + +impl<D> UnparkMutex<D> { + pub fn new() -> UnparkMutex<D> { + UnparkMutex { + status: AtomicUsize::new(WAITING), + inner: UnsafeCell::new(None), + } + } + + /// Attempt to "notify" the mutex that a poll should occur. + /// + /// An `Ok` result indicates that the `POLLING` state has been entered, and + /// the caller can proceed to poll the future. An `Err` result indicates + /// that polling is not necessary (because the task is finished or the + /// polling has been delegated). + pub fn notify(&self) -> Result<D, ()> { + let mut status = self.status.load(SeqCst); + loop { + match status { + // The task is idle, so try to run it immediately. + WAITING => { + match self.status.compare_exchange(WAITING, POLLING, + SeqCst, SeqCst) { + Ok(_) => { + let data = unsafe { + // SAFETY: we've ensured mutual exclusion via + // the status protocol; we are the only thread + // that has transitioned to the POLLING state, + // and we won't transition back to QUEUED until + // the lock is "released" by this thread. See + // the protocol diagram above. + (*self.inner.get()).take().unwrap() + }; + return Ok(data); + } + Err(cur) => status = cur, + } + } + + // The task is being polled, so we need to record that it should + // be *repolled* when complete. + POLLING => { + match self.status.compare_exchange(POLLING, REPOLL, + SeqCst, SeqCst) { + Ok(_) => return Err(()), + Err(cur) => status = cur, + } + } + + // The task is already scheduled for polling, or is complete, so + // we've got nothing to do. + _ => return Err(()), + } + } + } + + /// Alert the mutex that polling is about to begin, clearing any accumulated + /// re-poll requests. + /// + /// # Safety + /// + /// Callable only from the `POLLING`/`REPOLL` states, i.e. between + /// successful calls to `notify` and `wait`/`complete`. + pub unsafe fn start_poll(&self) { + self.status.store(POLLING, SeqCst); + } + + /// Alert the mutex that polling completed with NotReady. + /// + /// # Safety + /// + /// Callable only from the `POLLING`/`REPOLL` states, i.e. between + /// successful calls to `notify` and `wait`/`complete`. + pub unsafe fn wait(&self, data: D) -> Result<(), D> { + *self.inner.get() = Some(data); + + match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) { + // no unparks came in while we were running + Ok(_) => Ok(()), + + // guaranteed to be in REPOLL state; just clobber the + // state and run again. + Err(status) => { + assert_eq!(status, REPOLL); + self.status.store(POLLING, SeqCst); + Err((*self.inner.get()).take().unwrap()) + } + } + } + + /// Alert the mutex that the task has completed execution and should not be + /// notified again. + /// + /// # Safety + /// + /// Callable only from the `POLLING`/`REPOLL` states, i.e. between + /// successful calls to `notify` and `wait`/`complete`. + pub unsafe fn complete(&self) { + self.status.store(COMPLETE, SeqCst); + } +} diff --git a/third_party/rust/futures-0.1.31/src/unsync/mod.rs b/third_party/rust/futures-0.1.31/src/unsync/mod.rs new file mode 100644 index 0000000000..aaa5a707ba --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/unsync/mod.rs @@ -0,0 +1,7 @@ +//! Future-aware single-threaded synchronization +//! +//! This module contains similar abstractions to `sync`, for communications +//! between tasks on the same thread only. + +pub mod mpsc; +pub mod oneshot; diff --git a/third_party/rust/futures-0.1.31/src/unsync/mpsc.rs b/third_party/rust/futures-0.1.31/src/unsync/mpsc.rs new file mode 100644 index 0000000000..ba0d52dc98 --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/unsync/mpsc.rs @@ -0,0 +1,474 @@ +//! A multi-producer, single-consumer, futures-aware, FIFO queue with back +//! pressure, for use communicating between tasks on the same thread. +//! +//! These queues are the same as those in `futures::sync`, except they're not +//! intended to be sent across threads. + +use std::any::Any; +use std::cell::RefCell; +use std::collections::VecDeque; +use std::error::Error; +use std::fmt; +use std::mem; +use std::rc::{Rc, Weak}; + +use task::{self, Task}; +use future::Executor; +use sink::SendAll; +use resultstream::{self, Results}; +use unsync::oneshot; +use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream}; + +/// Creates a bounded in-memory channel with buffered storage. +/// +/// This method creates concrete implementations of the `Stream` and `Sink` +/// traits which can be used to communicate a stream of values between tasks +/// with backpressure. The channel capacity is exactly `buffer`. On average, +/// sending a message through this channel performs no dynamic allocation. +pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) { + channel_(Some(buffer)) +} + +fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) { + let shared = Rc::new(RefCell::new(Shared { + buffer: VecDeque::new(), + capacity: buffer, + blocked_senders: VecDeque::new(), + blocked_recv: None, + })); + let sender = Sender { shared: Rc::downgrade(&shared) }; + let receiver = Receiver { state: State::Open(shared) }; + (sender, receiver) +} + +#[derive(Debug)] +struct Shared<T> { + buffer: VecDeque<T>, + capacity: Option<usize>, + blocked_senders: VecDeque<Task>, + blocked_recv: Option<Task>, +} + +/// The transmission end of a channel. +/// +/// This is created by the `channel` function. +#[derive(Debug)] +pub struct Sender<T> { + shared: Weak<RefCell<Shared<T>>>, +} + +impl<T> Sender<T> { + fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> { + let shared = match self.shared.upgrade() { + Some(shared) => shared, + None => return Err(SendError(msg)), // receiver was dropped + }; + let mut shared = shared.borrow_mut(); + + match shared.capacity { + Some(capacity) if shared.buffer.len() == capacity => { + shared.blocked_senders.push_back(task::current()); + Ok(AsyncSink::NotReady(msg)) + } + _ => { + shared.buffer.push_back(msg); + if let Some(task) = shared.blocked_recv.take() { + task.notify(); + } + Ok(AsyncSink::Ready) + } + } + } +} + +impl<T> Clone for Sender<T> { + fn clone(&self) -> Self { + Sender { shared: self.shared.clone() } + } +} + +impl<T> Sink for Sender<T> { + type SinkItem = T; + type SinkError = SendError<T>; + + fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { + self.do_send(msg) + } + + fn poll_complete(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } + + fn close(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + let shared = match self.shared.upgrade() { + Some(shared) => shared, + None => return, + }; + // The number of existing `Weak` indicates if we are possibly the last + // `Sender`. If we are the last, we possibly must notify a blocked + // `Receiver`. `self.shared` is always one of the `Weak` to this shared + // data. Therefore the smallest possible Rc::weak_count(&shared) is 1. + if Rc::weak_count(&shared) == 1 { + if let Some(task) = shared.borrow_mut().blocked_recv.take() { + // Wake up receiver as its stream has ended + task.notify(); + } + } + } +} + +/// The receiving end of a channel which implements the `Stream` trait. +/// +/// This is created by the `channel` function. +#[derive(Debug)] +pub struct Receiver<T> { + state: State<T>, +} + +/// Possible states of a receiver. We're either Open (can receive more messages) +/// or we're closed with a list of messages we have left to receive. +#[derive(Debug)] +enum State<T> { + Open(Rc<RefCell<Shared<T>>>), + Closed(VecDeque<T>), +} + +impl<T> Receiver<T> { + /// Closes the receiving half + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. + pub fn close(&mut self) { + let (blockers, items) = match self.state { + State::Open(ref state) => { + let mut state = state.borrow_mut(); + let items = mem::replace(&mut state.buffer, VecDeque::new()); + let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new()); + (blockers, items) + } + State::Closed(_) => return, + }; + self.state = State::Closed(items); + for task in blockers { + task.notify(); + } + } +} + +impl<T> Stream for Receiver<T> { + type Item = T; + type Error = (); + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + let me = match self.state { + State::Open(ref mut me) => me, + State::Closed(ref mut items) => { + return Ok(Async::Ready(items.pop_front())) + } + }; + + if let Some(shared) = Rc::get_mut(me) { + // All senders have been dropped, so drain the buffer and end the + // stream. + return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front())); + } + + let mut shared = me.borrow_mut(); + if let Some(msg) = shared.buffer.pop_front() { + if let Some(task) = shared.blocked_senders.pop_front() { + drop(shared); + task.notify(); + } + Ok(Async::Ready(Some(msg))) + } else { + shared.blocked_recv = Some(task::current()); + Ok(Async::NotReady) + } + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + self.close(); + } +} + +/// The transmission end of an unbounded channel. +/// +/// This is created by the `unbounded` function. +#[derive(Debug)] +pub struct UnboundedSender<T>(Sender<T>); + +impl<T> Clone for UnboundedSender<T> { + fn clone(&self) -> Self { + UnboundedSender(self.0.clone()) + } +} + +impl<T> Sink for UnboundedSender<T> { + type SinkItem = T; + type SinkError = SendError<T>; + + fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { + self.0.start_send(msg) + } + fn poll_complete(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } + fn close(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } +} + +impl<'a, T> Sink for &'a UnboundedSender<T> { + type SinkItem = T; + type SinkError = SendError<T>; + + fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { + self.0.do_send(msg) + } + + fn poll_complete(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } + + fn close(&mut self) -> Poll<(), SendError<T>> { + Ok(Async::Ready(())) + } +} + +impl<T> UnboundedSender<T> { + /// Sends the provided message along this channel. + /// + /// This is an unbounded sender, so this function differs from `Sink::send` + /// by ensuring the return type reflects that the channel is always ready to + /// receive messages. + #[deprecated(note = "renamed to `unbounded_send`")] + #[doc(hidden)] + pub fn send(&self, msg: T) -> Result<(), SendError<T>> { + self.unbounded_send(msg) + } + + /// Sends the provided message along this channel. + /// + /// This is an unbounded sender, so this function differs from `Sink::send` + /// by ensuring the return type reflects that the channel is always ready to + /// receive messages. + pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> { + let shared = match self.0.shared.upgrade() { + Some(shared) => shared, + None => return Err(SendError(msg)), + }; + let mut shared = shared.borrow_mut(); + shared.buffer.push_back(msg); + if let Some(task) = shared.blocked_recv.take() { + drop(shared); + task.notify(); + } + Ok(()) + } +} + +/// The receiving end of an unbounded channel. +/// +/// This is created by the `unbounded` function. +#[derive(Debug)] +pub struct UnboundedReceiver<T>(Receiver<T>); + +impl<T> UnboundedReceiver<T> { + /// Closes the receiving half + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. + pub fn close(&mut self) { + self.0.close(); + } +} + +impl<T> Stream for UnboundedReceiver<T> { + type Item = T; + type Error = (); + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + self.0.poll() + } +} + +/// Creates an unbounded in-memory channel with buffered storage. +/// +/// Identical semantics to `channel`, except with no limit to buffer size. +pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) { + let (send, recv) = channel_(None); + (UnboundedSender(send), UnboundedReceiver(recv)) +} + +/// Error type for sending, used when the receiving end of a channel is +/// dropped +pub struct SendError<T>(T); + +impl<T> fmt::Debug for SendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_tuple("SendError") + .field(&"...") + .finish() + } +} + +impl<T> fmt::Display for SendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "send failed because receiver is gone") + } +} + +impl<T: Any> Error for SendError<T> { + fn description(&self) -> &str { + "send failed because receiver is gone" + } +} + +impl<T> SendError<T> { + /// Returns the message that was attempted to be sent but failed. + pub fn into_inner(self) -> T { + self.0 + } +} + +/// Handle returned from the `spawn` function. +/// +/// This handle is a stream that proxies a stream on a separate `Executor`. +/// Created through the `mpsc::spawn` function, this handle will produce +/// the same values as the proxied stream, as they are produced in the executor, +/// and uses a limited buffer to exert back-pressure on the remote stream. +/// +/// If this handle is dropped, then the stream will no longer be polled and is +/// scheduled to be dropped. +pub struct SpawnHandle<Item, Error> { + inner: Receiver<Result<Item, Error>>, + _cancel_tx: oneshot::Sender<()>, +} + +/// Type of future which `Executor` instances must be able to execute for `spawn`. +pub struct Execute<S: Stream> { + inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>, + cancel_rx: oneshot::Receiver<()>, +} + +/// Spawns a `stream` onto the instance of `Executor` provided, `executor`, +/// returning a handle representing the remote stream. +/// +/// The `stream` will be canceled if the `SpawnHandle` is dropped. +/// +/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. +/// When `stream` has additional items available, then the `SpawnHandle` +/// will have those same items available. +/// +/// At most `buffer + 1` elements will be buffered at a time. If the buffer +/// is full, then `stream` will stop progressing until more space is available. +/// This allows the `SpawnHandle` to exert backpressure on the `stream`. +/// +/// # Panics +/// +/// This function will panic if `executor` is unable spawn a `Future` containing +/// the entirety of the `stream`. +pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error> + where S: Stream, + E: Executor<Execute<S>> +{ + let (cancel_tx, cancel_rx) = oneshot::channel(); + let (tx, rx) = channel(buffer); + executor.execute(Execute { + inner: tx.send_all(resultstream::new(stream)), + cancel_rx: cancel_rx, + }).expect("failed to spawn stream"); + SpawnHandle { + inner: rx, + _cancel_tx: cancel_tx, + } +} + +/// Spawns a `stream` onto the instance of `Executor` provided, `executor`, +/// returning a handle representing the remote stream, with unbounded buffering. +/// +/// The `stream` will be canceled if the `SpawnHandle` is dropped. +/// +/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. +/// When `stream` has additional items available, then the `SpawnHandle` +/// will have those same items available. +/// +/// An unbounded buffer is used, which means that values will be buffered as +/// fast as `stream` can produce them, without any backpressure. Therefore, if +/// `stream` is an infinite stream, it can use an unbounded amount of memory, and +/// potentially hog CPU resources. In particular, if `stream` is infinite +/// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it +/// will result in an infinite loop. +/// +/// # Panics +/// +/// This function will panic if `executor` is unable spawn a `Future` containing +/// the entirety of the `stream`. +pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error> + where S: Stream, + E: Executor<Execute<S>> +{ + let (cancel_tx, cancel_rx) = oneshot::channel(); + let (tx, rx) = channel_(None); + executor.execute(Execute { + inner: tx.send_all(resultstream::new(stream)), + cancel_rx: cancel_rx, + }).expect("failed to spawn stream"); + SpawnHandle { + inner: rx, + _cancel_tx: cancel_tx, + } +} + +impl<I, E> Stream for SpawnHandle<I, E> { + type Item = I; + type Error = E; + + fn poll(&mut self) -> Poll<Option<I>, E> { + match self.inner.poll() { + Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))), + Ok(Async::Ready(Some(Err(e)))) => Err(e), + Ok(Async::Ready(None)) => Ok(Async::Ready(None)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(_) => unreachable!("mpsc::Receiver should never return Err"), + } + } +} + +impl<I, E> fmt::Debug for SpawnHandle<I, E> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SpawnHandle") + .finish() + } +} + +impl<S: Stream> Future for Execute<S> { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + match self.cancel_rx.poll() { + Ok(Async::NotReady) => (), + _ => return Ok(Async::Ready(())), + } + match self.inner.poll() { + Ok(Async::NotReady) => Ok(Async::NotReady), + _ => Ok(Async::Ready(())) + } + } +} + +impl<S: Stream> fmt::Debug for Execute<S> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Execute") + .finish() + } +} diff --git a/third_party/rust/futures-0.1.31/src/unsync/oneshot.rs b/third_party/rust/futures-0.1.31/src/unsync/oneshot.rs new file mode 100644 index 0000000000..7ae2890f9e --- /dev/null +++ b/third_party/rust/futures-0.1.31/src/unsync/oneshot.rs @@ -0,0 +1,351 @@ +//! A one-shot, futures-aware channel +//! +//! This channel is similar to that in `sync::oneshot` but cannot be sent across +//! threads. + +use std::cell::{Cell, RefCell}; +use std::fmt; +use std::rc::{Rc, Weak}; + +use {Future, Poll, Async}; +use future::{Executor, IntoFuture, Lazy, lazy}; +use task::{self, Task}; + +/// Creates a new futures-aware, one-shot channel. +/// +/// This function is the same as `sync::oneshot::channel` except that the +/// returned values cannot be sent across threads. +pub fn channel<T>() -> (Sender<T>, Receiver<T>) { + let inner = Rc::new(RefCell::new(Inner { + value: None, + tx_task: None, + rx_task: None, + })); + let tx = Sender { + inner: Rc::downgrade(&inner), + }; + let rx = Receiver { + state: State::Open(inner), + }; + (tx, rx) +} + +/// Represents the completion half of a oneshot through which the result of a +/// computation is signaled. +/// +/// This is created by the `unsync::oneshot::channel` function and is equivalent +/// in functionality to `sync::oneshot::Sender` except that it cannot be sent +/// across threads. +#[derive(Debug)] +pub struct Sender<T> { + inner: Weak<RefCell<Inner<T>>>, +} + +/// A future representing the completion of a computation happening elsewhere in +/// memory. +/// +/// This is created by the `unsync::oneshot::channel` function and is equivalent +/// in functionality to `sync::oneshot::Receiver` except that it cannot be sent +/// across threads. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Receiver<T> { + state: State<T>, +} + +#[derive(Debug)] +enum State<T> { + Open(Rc<RefCell<Inner<T>>>), + Closed(Option<T>), +} + +pub use sync::oneshot::Canceled; + +#[derive(Debug)] +struct Inner<T> { + value: Option<T>, + tx_task: Option<Task>, + rx_task: Option<Task>, +} + +impl<T> Sender<T> { + /// Completes this oneshot with a successful result. + /// + /// This function will consume `self` and indicate to the other end, the + /// `Receiver`, that the error provided is the result of the computation this + /// represents. + /// + /// If the value is successfully enqueued for the remote end to receive, + /// then `Ok(())` is returned. If the receiving end was deallocated before + /// this function was called, however, then `Err` is returned with the value + /// provided. + pub fn send(self, val: T) -> Result<(), T> { + if let Some(inner) = self.inner.upgrade() { + inner.borrow_mut().value = Some(val); + Ok(()) + } else { + Err(val) + } + } + + /// Polls this `Sender` half to detect whether the `Receiver` this has + /// paired with has gone away. + /// + /// This function can be used to learn about when the `Receiver` (consumer) + /// half has gone away and nothing will be able to receive a message sent + /// from `complete`. + /// + /// Like `Future::poll`, this function will panic if it's not called from + /// within the context of a task. In other words, this should only ever be + /// called from inside another future. + /// + /// If `Ready` is returned then it means that the `Receiver` has disappeared + /// and the result this `Sender` would otherwise produce should no longer + /// be produced. + /// + /// If `NotReady` is returned then the `Receiver` is still alive and may be + /// able to receive a message if sent. The current task, however, is + /// scheduled to receive a notification if the corresponding `Receiver` goes + /// away. + pub fn poll_cancel(&mut self) -> Poll<(), ()> { + match self.inner.upgrade() { + Some(inner) => { + inner.borrow_mut().tx_task = Some(task::current()); + Ok(Async::NotReady) + } + None => Ok(().into()), + } + } + + /// Tests to see whether this `Sender`'s corresponding `Receiver` + /// has gone away. + /// + /// This function can be used to learn about when the `Receiver` (consumer) + /// half has gone away and nothing will be able to receive a message sent + /// from `send`. + /// + /// Note that this function is intended to *not* be used in the context of a + /// future. If you're implementing a future you probably want to call the + /// `poll_cancel` function which will block the current task if the + /// cancellation hasn't happened yet. This can be useful when working on a + /// non-futures related thread, though, which would otherwise panic if + /// `poll_cancel` were called. + pub fn is_canceled(&self) -> bool { + !self.inner.upgrade().is_some() + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + let inner = match self.inner.upgrade() { + Some(inner) => inner, + None => return, + }; + let rx_task = { + let mut borrow = inner.borrow_mut(); + borrow.tx_task.take(); + borrow.rx_task.take() + }; + if let Some(task) = rx_task { + task.notify(); + } + } +} + +impl<T> Receiver<T> { + /// Gracefully close this receiver, preventing sending any future messages. + /// + /// Any `send` operation which happens after this method returns is + /// guaranteed to fail. Once this method is called the normal `poll` method + /// can be used to determine whether a message was actually sent or not. If + /// `Canceled` is returned from `poll` then no message was sent. + pub fn close(&mut self) { + let (item, task) = match self.state { + State::Open(ref inner) => { + let mut inner = inner.borrow_mut(); + drop(inner.rx_task.take()); + (inner.value.take(), inner.tx_task.take()) + } + State::Closed(_) => return, + }; + self.state = State::Closed(item); + if let Some(task) = task { + task.notify(); + } + } +} + +impl<T> Future for Receiver<T> { + type Item = T; + type Error = Canceled; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + let inner = match self.state { + State::Open(ref mut inner) => inner, + State::Closed(ref mut item) => { + match item.take() { + Some(item) => return Ok(item.into()), + None => return Err(Canceled), + } + } + }; + + // If we've got a value, then skip the logic below as we're done. + if let Some(val) = inner.borrow_mut().value.take() { + return Ok(Async::Ready(val)) + } + + // If we can get mutable access, then the sender has gone away. We + // didn't see a value above, so we're canceled. Otherwise we park + // our task and wait for a value to come in. + if Rc::get_mut(inner).is_some() { + Err(Canceled) + } else { + inner.borrow_mut().rx_task = Some(task::current()); + Ok(Async::NotReady) + } + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + self.close(); + } +} + +/// Handle returned from the `spawn` function. +/// +/// This handle is a future representing the completion of a different future on +/// a separate executor. Created through the `oneshot::spawn` function this +/// handle will resolve when the future provided to `spawn` resolves on the +/// `Executor` instance provided to that function. +/// +/// If this handle is dropped then the future will automatically no longer be +/// polled and is scheduled to be dropped. This can be canceled with the +/// `forget` function, however. +pub struct SpawnHandle<T, E> { + rx: Receiver<Result<T, E>>, + keep_running: Rc<Cell<bool>>, +} + +/// Type of future which `Spawn` instances below must be able to spawn. +pub struct Execute<F: Future> { + future: F, + tx: Option<Sender<Result<F::Item, F::Error>>>, + keep_running: Rc<Cell<bool>>, +} + +/// Spawns a `future` onto the instance of `Executor` provided, `executor`, +/// returning a handle representing the completion of the future. +/// +/// The `SpawnHandle` returned is a future that is a proxy for `future` itself. +/// When `future` completes on `executor` then the `SpawnHandle` will itself be +/// resolved. Internally `SpawnHandle` contains a `oneshot` channel and is +/// thus not safe to send across threads. +/// +/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is +/// not desired then the `SpawnHandle::forget` function can be used to continue +/// running the future to completion. +/// +/// # Panics +/// +/// This function will panic if the instance of `Spawn` provided is unable to +/// spawn the `future` provided. +/// +/// If the provided instance of `Spawn` does not actually run `future` to +/// completion, then the returned handle may panic when polled. Typically this +/// is not a problem, though, as most instances of `Spawn` will run futures to +/// completion. +pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error> + where F: Future, + E: Executor<Execute<F>>, +{ + let flag = Rc::new(Cell::new(false)); + let (tx, rx) = channel(); + executor.execute(Execute { + future: future, + tx: Some(tx), + keep_running: flag.clone(), + }).expect("failed to spawn future"); + SpawnHandle { + rx: rx, + keep_running: flag, + } +} + +/// Spawns a function `f` onto the `Spawn` instance provided `s`. +/// +/// For more information see the `spawn` function in this module. This function +/// is just a thin wrapper around `spawn` which will execute the closure on the +/// executor provided and then complete the future that the closure returns. +pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error> + where F: FnOnce() -> R, + R: IntoFuture, + E: Executor<Execute<Lazy<F, R>>>, +{ + spawn(lazy(f), executor) +} + +impl<T, E> SpawnHandle<T, E> { + /// Drop this future without canceling the underlying future. + /// + /// When `SpawnHandle` is dropped, the spawned future will be canceled as + /// well if the future hasn't already resolved. This function can be used + /// when to drop this future but keep executing the underlying future. + pub fn forget(self) { + self.keep_running.set(true); + } +} + +impl<T, E> Future for SpawnHandle<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<T, E> { + match self.rx.poll() { + Ok(Async::Ready(Ok(t))) => Ok(t.into()), + Ok(Async::Ready(Err(e))) => Err(e), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(_) => panic!("future was canceled before completion"), + } + } +} + +impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SpawnHandle") + .finish() + } +} + +impl<F: Future> Future for Execute<F> { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + // If we're canceled then we may want to bail out early. + // + // If the `forget` function was called, though, then we keep going. + if self.tx.as_mut().unwrap().poll_cancel().unwrap().is_ready() { + if !self.keep_running.get() { + return Ok(().into()) + } + } + + let result = match self.future.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(t)) => Ok(t), + Err(e) => Err(e), + }; + drop(self.tx.take().unwrap().send(result)); + Ok(().into()) + } +} + +impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Execute") + .field("future", &self.future) + .finish() + } +} diff --git a/third_party/rust/futures-0.1.31/tests/all.rs b/third_party/rust/futures-0.1.31/tests/all.rs new file mode 100644 index 0000000000..40e402f553 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/all.rs @@ -0,0 +1,377 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +use std::sync::mpsc::{channel, TryRecvError}; + +use futures::future::*; +use futures::future; +use futures::executor; +use futures::sync::oneshot::{self, Canceled}; + +mod support; +use support::*; + +fn unselect<T, U, E>(r: Result<(T, U), (E, U)>) -> Result<T, E> { + match r { + Ok((t, _)) => Ok(t), + Err((e, _)) => Err(e), + } +} + +#[test] +fn result_smoke() { + fn is_future_v<A, B, C>(_: C) + where A: Send + 'static, + B: Send + 'static, + C: Future<Item=A, Error=B> + {} + + is_future_v::<i32, u32, _>(f_ok(1).map(|a| a + 1)); + is_future_v::<i32, u32, _>(f_ok(1).map_err(|a| a + 1)); + is_future_v::<i32, u32, _>(f_ok(1).and_then(Ok)); + is_future_v::<i32, u32, _>(f_ok(1).or_else(Err)); + is_future_v::<(i32, i32), u32, _>(f_ok(1).join(Err(3))); + is_future_v::<i32, u32, _>(f_ok(1).map(f_ok).flatten()); + + assert_done(|| f_ok(1), r_ok(1)); + assert_done(|| f_err(1), r_err(1)); + assert_done(|| result(Ok(1)), r_ok(1)); + assert_done(|| result(Err(1)), r_err(1)); + assert_done(|| ok(1), r_ok(1)); + assert_done(|| err(1), r_err(1)); + assert_done(|| f_ok(1).map(|a| a + 2), r_ok(3)); + assert_done(|| f_err(1).map(|a| a + 2), r_err(1)); + assert_done(|| f_ok(1).map_err(|a| a + 2), r_ok(1)); + assert_done(|| f_err(1).map_err(|a| a + 2), r_err(3)); + assert_done(|| f_ok(1).and_then(|a| Ok(a + 2)), r_ok(3)); + assert_done(|| f_err(1).and_then(|a| Ok(a + 2)), r_err(1)); + assert_done(|| f_ok(1).and_then(|a| Err(a as u32 + 3)), r_err(4)); + assert_done(|| f_err(1).and_then(|a| Err(a as u32 + 4)), r_err(1)); + assert_done(|| f_ok(1).or_else(|a| Ok(a as i32 + 2)), r_ok(1)); + assert_done(|| f_err(1).or_else(|a| Ok(a as i32 + 2)), r_ok(3)); + assert_done(|| f_ok(1).or_else(|a| Err(a + 3)), r_ok(1)); + assert_done(|| f_err(1).or_else(|a| Err(a + 4)), r_err(5)); + assert_done(|| f_ok(1).select(f_err(2)).then(unselect), r_ok(1)); + assert_done(|| f_ok(1).select(Ok(2)).then(unselect), r_ok(1)); + assert_done(|| f_err(1).select(f_ok(1)).then(unselect), r_err(1)); + assert_done(|| f_ok(1).select(empty()).then(unselect), Ok(1)); + assert_done(|| empty().select(f_ok(1)).then(unselect), Ok(1)); + assert_done(|| f_ok(1).join(f_err(1)), Err(1)); + assert_done(|| f_ok(1).join(Ok(2)), Ok((1, 2))); + assert_done(|| f_err(1).join(f_ok(1)), Err(1)); + assert_done(|| f_ok(1).then(|_| Ok(2)), r_ok(2)); + assert_done(|| f_ok(1).then(|_| Err(2)), r_err(2)); + assert_done(|| f_err(1).then(|_| Ok(2)), r_ok(2)); + assert_done(|| f_err(1).then(|_| Err(2)), r_err(2)); +} + +#[test] +fn test_empty() { + fn empty() -> Empty<i32, u32> { future::empty() } + + assert_empty(|| empty()); + assert_empty(|| empty().select(empty())); + assert_empty(|| empty().join(empty())); + assert_empty(|| empty().join(f_ok(1))); + assert_empty(|| f_ok(1).join(empty())); + assert_empty(|| empty().or_else(move |_| empty())); + assert_empty(|| empty().and_then(move |_| empty())); + assert_empty(|| f_err(1).or_else(move |_| empty())); + assert_empty(|| f_ok(1).and_then(move |_| empty())); + assert_empty(|| empty().map(|a| a + 1)); + assert_empty(|| empty().map_err(|a| a + 1)); + assert_empty(|| empty().then(|a| a)); +} + +#[test] +fn test_ok() { + assert_done(|| ok(1), r_ok(1)); + assert_done(|| err(1), r_err(1)); +} + +#[test] +fn flatten() { + fn ok<T: Send + 'static>(a: T) -> FutureResult<T, u32> { + future::ok(a) + } + fn err<E: Send + 'static>(b: E) -> FutureResult<i32, E> { + future::err(b) + } + + assert_done(|| ok(ok(1)).flatten(), r_ok(1)); + assert_done(|| ok(err(1)).flatten(), r_err(1)); + assert_done(|| err(1u32).map(ok).flatten(), r_err(1)); + assert_done(|| future::ok::<_, u8>(future::ok::<_, u32>(1)) + .flatten(), r_ok(1)); + assert_empty(|| ok(empty::<i32, u32>()).flatten()); + assert_empty(|| empty::<i32, u32>().map(ok).flatten()); +} + +#[test] +fn smoke_oneshot() { + assert_done(|| { + let (c, p) = oneshot::channel(); + c.send(1).unwrap(); + p + }, Ok(1)); + assert_done(|| { + let (c, p) = oneshot::channel::<i32>(); + drop(c); + p + }, Err(Canceled)); + let mut completes = Vec::new(); + assert_empty(|| { + let (a, b) = oneshot::channel::<i32>(); + completes.push(a); + b + }); + + let (c, p) = oneshot::channel::<i32>(); + drop(c); + let res = executor::spawn(p).poll_future_notify(¬ify_panic(), 0); + assert!(res.is_err()); + let (c, p) = oneshot::channel::<i32>(); + drop(c); + let (tx, rx) = channel(); + p.then(move |_| { + tx.send(()) + }).forget(); + rx.recv().unwrap(); +} + +#[test] +fn select_cancels() { + let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { btx.send(b).unwrap(); b }); + let d = d.map(move |d| { dtx.send(d).unwrap(); d }); + + let f = b.select(d).then(unselect); + // assert!(f.poll(&mut Task::new()).is_not_ready()); + assert!(brx.try_recv().is_err()); + assert!(drx.try_recv().is_err()); + a.send(1).unwrap(); + let res = executor::spawn(f).poll_future_notify(¬ify_panic(), 0); + assert!(res.ok().unwrap().is_ready()); + assert_eq!(brx.recv().unwrap(), 1); + drop(c); + assert!(drx.recv().is_err()); + + let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let ((btx, _brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { btx.send(b).unwrap(); b }); + let d = d.map(move |d| { dtx.send(d).unwrap(); d }); + + let mut f = executor::spawn(b.select(d).then(unselect)); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready()); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready()); + a.send(1).unwrap(); + assert!(f.poll_future_notify(¬ify_panic(), 0).ok().unwrap().is_ready()); + drop((c, f)); + assert!(drx.recv().is_err()); +} + +#[test] +fn join_cancels() { + let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let ((btx, _brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { btx.send(b).unwrap(); b }); + let d = d.map(move |d| { dtx.send(d).unwrap(); d }); + + let f = b.join(d); + drop(a); + let res = executor::spawn(f).poll_future_notify(¬ify_panic(), 0); + assert!(res.is_err()); + drop(c); + assert!(drx.recv().is_err()); + + let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let ((btx, _brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { btx.send(b).unwrap(); b }); + let d = d.map(move |d| { dtx.send(d).unwrap(); d }); + + let (tx, rx) = channel(); + let f = b.join(d); + f.then(move |_| { + tx.send(()).unwrap(); + let res: Result<(), ()> = Ok(()); + res + }).forget(); + assert!(rx.try_recv().is_err()); + drop(a); + rx.recv().unwrap(); + drop(c); + assert!(drx.recv().is_err()); +} + +#[test] +fn join_incomplete() { + let (a, b) = oneshot::channel::<i32>(); + let (tx, rx) = channel(); + let mut f = executor::spawn(ok(1).join(b).map(move |r| tx.send(r).unwrap())); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready()); + assert!(rx.try_recv().is_err()); + a.send(2).unwrap(); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_ready()); + assert_eq!(rx.recv().unwrap(), (1, 2)); + + let (a, b) = oneshot::channel::<i32>(); + let (tx, rx) = channel(); + let mut f = executor::spawn(b.join(Ok(2)).map(move |r| tx.send(r).unwrap())); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready()); + assert!(rx.try_recv().is_err()); + a.send(1).unwrap(); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_ready()); + assert_eq!(rx.recv().unwrap(), (1, 2)); + + let (a, b) = oneshot::channel::<i32>(); + let (tx, rx) = channel(); + let mut f = executor::spawn(ok(1).join(b).map_err(move |_r| tx.send(2).unwrap())); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready()); + assert!(rx.try_recv().is_err()); + drop(a); + assert!(f.poll_future_notify(¬ify_noop(), 0).is_err()); + assert_eq!(rx.recv().unwrap(), 2); + + let (a, b) = oneshot::channel::<i32>(); + let (tx, rx) = channel(); + let mut f = executor::spawn(b.join(Ok(2)).map_err(move |_r| tx.send(1).unwrap())); + assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready()); + assert!(rx.try_recv().is_err()); + drop(a); + assert!(f.poll_future_notify(¬ify_noop(), 0).is_err()); + assert_eq!(rx.recv().unwrap(), 1); +} + +#[test] +fn collect_collects() { + assert_done(|| join_all(vec![f_ok(1), f_ok(2)]), Ok(vec![1, 2])); + assert_done(|| join_all(vec![f_ok(1)]), Ok(vec![1])); + assert_done(|| join_all(Vec::<Result<i32, u32>>::new()), Ok(vec![])); + + // TODO: needs more tests +} + +#[test] +fn select2() { + fn d<T, U, E>(r: Result<(T, U), (E, U)>) -> Result<T, E> { + match r { + Ok((t, _u)) => Ok(t), + Err((e, _u)) => Err(e), + } + } + + assert_done(|| f_ok(2).select(empty()).then(d), Ok(2)); + assert_done(|| empty().select(f_ok(2)).then(d), Ok(2)); + assert_done(|| f_err(2).select(empty()).then(d), Err(2)); + assert_done(|| empty().select(f_err(2)).then(d), Err(2)); + + assert_done(|| { + f_ok(1).select(f_ok(2)) + .map_err(|_| 0) + .and_then(|(a, b)| b.map(move |b| a + b)) + }, Ok(3)); + + // Finish one half of a select and then fail the second, ensuring that we + // get the notification of the second one. + { + let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let f = b.select(d); + let (tx, rx) = channel(); + f.map(move |r| tx.send(r).unwrap()).forget(); + a.send(1).unwrap(); + let (val, next) = rx.recv().unwrap(); + assert_eq!(val, 1); + let (tx, rx) = channel(); + next.map_err(move |_r| tx.send(2).unwrap()).forget(); + assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty); + drop(c); + assert_eq!(rx.recv().unwrap(), 2); + } + + // Fail the second half and ensure that we see the first one finish + { + let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let f = b.select(d); + let (tx, rx) = channel(); + f.map_err(move |r| tx.send((1, r.1)).unwrap()).forget(); + drop(c); + let (val, next) = rx.recv().unwrap(); + assert_eq!(val, 1); + let (tx, rx) = channel(); + next.map(move |r| tx.send(r).unwrap()).forget(); + assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty); + a.send(2).unwrap(); + assert_eq!(rx.recv().unwrap(), 2); + } + + // Cancelling the first half should cancel the second + { + let ((_a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |v| { btx.send(v).unwrap(); v }); + let d = d.map(move |v| { dtx.send(v).unwrap(); v }); + let f = b.select(d); + drop(f); + assert!(drx.recv().is_err()); + assert!(brx.recv().is_err()); + } + + // Cancel after a schedule + { + let ((_a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |v| { btx.send(v).unwrap(); v }); + let d = d.map(move |v| { dtx.send(v).unwrap(); v }); + let f = b.select(d); + drop(executor::spawn(f).poll_future_notify(&support::notify_noop(), 0)); + assert!(drx.recv().is_err()); + assert!(brx.recv().is_err()); + } + + // Cancel propagates + { + let ((a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |v| { btx.send(v).unwrap(); v }); + let d = d.map(move |v| { dtx.send(v).unwrap(); v }); + let (tx, rx) = channel(); + b.select(d).map(move |_| tx.send(()).unwrap()).forget(); + drop(a); + assert!(drx.recv().is_err()); + assert!(brx.recv().is_err()); + assert!(rx.recv().is_err()); + } + + // Cancel on early drop + { + let (tx, rx) = channel(); + let f = f_ok(1).select(empty().map(move |()| { + tx.send(()).unwrap(); + 1 + })); + drop(f); + assert!(rx.recv().is_err()); + } +} + +#[test] +fn option() { + assert_eq!(Ok(Some(())), Some(ok::<(), ()>(())).wait()); + assert_eq!(Ok(None), <Option<FutureResult<(), ()>> as Future>::wait(None)); +} + +#[test] +fn spawn_does_unsize() { + #[derive(Clone, Copy)] + struct EmptyNotify; + impl executor::Notify for EmptyNotify { + fn notify(&self, _: usize) { panic!("Cannot notify"); } + } + static EMPTY: &'static EmptyNotify = &EmptyNotify; + + let spawn: executor::Spawn<FutureResult<(), ()>> = executor::spawn(future::ok(())); + let mut spawn_box: Box<executor::Spawn<Future<Item = (), Error = ()>>> = Box::new(spawn); + spawn_box.poll_future_notify(&EMPTY, 0).unwrap(); +} diff --git a/third_party/rust/futures-0.1.31/tests/bilock.rs b/third_party/rust/futures-0.1.31/tests/bilock.rs new file mode 100644 index 0000000000..1658bdae27 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/bilock.rs @@ -0,0 +1,111 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +use std::thread; + +use futures::prelude::*; +use futures::executor; +use futures::stream; +use futures::future; +use futures::sync::BiLock; + +mod support; +use support::*; + +#[test] +fn smoke() { + let future = future::lazy(|| { + let (a, b) = BiLock::new(1); + + { + let mut lock = match a.poll_lock() { + Async::Ready(l) => l, + Async::NotReady => panic!("poll not ready"), + }; + assert_eq!(*lock, 1); + *lock = 2; + + assert!(b.poll_lock().is_not_ready()); + assert!(a.poll_lock().is_not_ready()); + } + + assert!(b.poll_lock().is_ready()); + assert!(a.poll_lock().is_ready()); + + { + let lock = match b.poll_lock() { + Async::Ready(l) => l, + Async::NotReady => panic!("poll not ready"), + }; + assert_eq!(*lock, 2); + } + + assert_eq!(a.reunite(b).expect("bilock/smoke: reunite error"), 2); + + Ok::<(), ()>(()) + }); + + assert!(executor::spawn(future) + .poll_future_notify(¬ify_noop(), 0) + .expect("failure in poll") + .is_ready()); +} + +#[test] +fn concurrent() { + const N: usize = 10000; + let (a, b) = BiLock::new(0); + + let a = Increment { + a: Some(a), + remaining: N, + }; + let b = stream::iter_ok::<_, ()>((0..N)).fold(b, |b, _n| { + b.lock().map(|mut b| { + *b += 1; + b.unlock() + }) + }); + + let t1 = thread::spawn(move || a.wait()); + let b = b.wait().expect("b error"); + let a = t1.join().unwrap().expect("a error"); + + match a.poll_lock() { + Async::Ready(l) => assert_eq!(*l, 2 * N), + Async::NotReady => panic!("poll not ready"), + } + match b.poll_lock() { + Async::Ready(l) => assert_eq!(*l, 2 * N), + Async::NotReady => panic!("poll not ready"), + } + + assert_eq!(a.reunite(b).expect("bilock/concurrent: reunite error"), 2 * N); + + struct Increment { + remaining: usize, + a: Option<BiLock<usize>>, + } + + impl Future for Increment { + type Item = BiLock<usize>; + type Error = (); + + fn poll(&mut self) -> Poll<BiLock<usize>, ()> { + loop { + if self.remaining == 0 { + return Ok(self.a.take().unwrap().into()) + } + + let a = self.a.as_ref().unwrap(); + let mut a = match a.poll_lock() { + Async::Ready(l) => l, + Async::NotReady => return Ok(Async::NotReady), + }; + self.remaining -= 1; + *a += 1; + } + } + } +} diff --git a/third_party/rust/futures-0.1.31/tests/buffer_unordered.rs b/third_party/rust/futures-0.1.31/tests/buffer_unordered.rs new file mode 100644 index 0000000000..005bbd9835 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/buffer_unordered.rs @@ -0,0 +1,74 @@ +extern crate futures; + +use std::sync::mpsc as std_mpsc; +use std::thread; + +use futures::prelude::*; +use futures::sync::oneshot; +use futures::sync::mpsc; + +#[test] +fn works() { + const N: usize = 4; + + let (mut tx, rx) = mpsc::channel(1); + + let (tx2, rx2) = std_mpsc::channel(); + let (tx3, rx3) = std_mpsc::channel(); + let t1 = thread::spawn(move || { + for _ in 0..N+1 { + let (mytx, myrx) = oneshot::channel(); + tx = tx.send(myrx).wait().unwrap(); + tx3.send(mytx).unwrap(); + } + rx2.recv().unwrap(); + for _ in 0..N { + let (mytx, myrx) = oneshot::channel(); + tx = tx.send(myrx).wait().unwrap(); + tx3.send(mytx).unwrap(); + } + }); + + let (tx4, rx4) = std_mpsc::channel(); + let t2 = thread::spawn(move || { + for item in rx.map_err(|_| panic!()).buffer_unordered(N).wait() { + tx4.send(item.unwrap()).unwrap(); + } + }); + + let o1 = rx3.recv().unwrap(); + let o2 = rx3.recv().unwrap(); + let o3 = rx3.recv().unwrap(); + let o4 = rx3.recv().unwrap(); + assert!(rx4.try_recv().is_err()); + + o1.send(1).unwrap(); + assert_eq!(rx4.recv(), Ok(1)); + o3.send(3).unwrap(); + assert_eq!(rx4.recv(), Ok(3)); + tx2.send(()).unwrap(); + o2.send(2).unwrap(); + assert_eq!(rx4.recv(), Ok(2)); + o4.send(4).unwrap(); + assert_eq!(rx4.recv(), Ok(4)); + + let o5 = rx3.recv().unwrap(); + let o6 = rx3.recv().unwrap(); + let o7 = rx3.recv().unwrap(); + let o8 = rx3.recv().unwrap(); + let o9 = rx3.recv().unwrap(); + + o5.send(5).unwrap(); + assert_eq!(rx4.recv(), Ok(5)); + o8.send(8).unwrap(); + assert_eq!(rx4.recv(), Ok(8)); + o9.send(9).unwrap(); + assert_eq!(rx4.recv(), Ok(9)); + o7.send(7).unwrap(); + assert_eq!(rx4.recv(), Ok(7)); + o6.send(6).unwrap(); + assert_eq!(rx4.recv(), Ok(6)); + + t1.join().unwrap(); + t2.join().unwrap(); +} diff --git a/third_party/rust/futures-0.1.31/tests/channel.rs b/third_party/rust/futures-0.1.31/tests/channel.rs new file mode 100644 index 0000000000..7940de4509 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/channel.rs @@ -0,0 +1,75 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +use std::sync::atomic::*; + +use futures::prelude::*; +use futures::future::result; +use futures::sync::mpsc; + +mod support; +use support::*; + +#[test] +fn sequence() { + let (tx, mut rx) = mpsc::channel(1); + + sassert_empty(&mut rx); + sassert_empty(&mut rx); + + let amt = 20; + send(amt, tx).forget(); + let mut rx = rx.wait(); + for i in (1..amt + 1).rev() { + assert_eq!(rx.next(), Some(Ok(i))); + } + assert_eq!(rx.next(), None); + + fn send(n: u32, sender: mpsc::Sender<u32>) + -> Box<Future<Item=(), Error=()> + Send> { + if n == 0 { + return Box::new(result(Ok(()))) + } + Box::new(sender.send(n).map_err(|_| ()).and_then(move |sender| { + send(n - 1, sender) + })) + } +} + +#[test] +fn drop_sender() { + let (tx, mut rx) = mpsc::channel::<u32>(1); + drop(tx); + sassert_done(&mut rx); +} + +#[test] +fn drop_rx() { + let (tx, rx) = mpsc::channel::<u32>(1); + let tx = tx.send(1).wait().ok().unwrap(); + drop(rx); + assert!(tx.send(1).wait().is_err()); +} + +#[test] +fn drop_order() { + #[allow(deprecated)] + static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; + let (tx, rx) = mpsc::channel(1); + + struct A; + + impl Drop for A { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + let tx = tx.send(A).wait().unwrap(); + assert_eq!(DROPS.load(Ordering::SeqCst), 0); + drop(rx); + assert_eq!(DROPS.load(Ordering::SeqCst), 1); + assert!(tx.send(A).wait().is_err()); + assert_eq!(DROPS.load(Ordering::SeqCst), 2); +} diff --git a/third_party/rust/futures-0.1.31/tests/eager_drop.rs b/third_party/rust/futures-0.1.31/tests/eager_drop.rs new file mode 100644 index 0000000000..79f94d5ddc --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/eager_drop.rs @@ -0,0 +1,82 @@ +extern crate futures; + +use std::sync::mpsc::channel; + +use futures::prelude::*; +use futures::sync::oneshot; +use futures::future::{err, ok}; + +mod support; +use support::*; + +#[test] +fn map() { + // Whatever runs after a `map` should have dropped the closure by that + // point. + let (tx, rx) = channel::<()>(); + let (tx2, rx2) = channel(); + err::<i32, i32>(1).map(move |a| { drop(tx); a }).map_err(move |_| { + assert!(rx.recv().is_err()); + tx2.send(()).unwrap() + }).forget(); + rx2.recv().unwrap(); +} + +#[test] +fn map_err() { + // Whatever runs after a `map_err` should have dropped the closure by that + // point. + let (tx, rx) = channel::<()>(); + let (tx2, rx2) = channel(); + ok::<i32, i32>(1).map_err(move |a| { drop(tx); a }).map(move |_| { + assert!(rx.recv().is_err()); + tx2.send(()).unwrap() + }).forget(); + rx2.recv().unwrap(); +} + +struct FutureData<F, T> { + _data: T, + future: F, +} + +impl<F: Future, T: Send + 'static> Future for FutureData<F, T> { + type Item = F::Item; + type Error = F::Error; + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + self.future.poll() + } +} + +#[test] +fn and_then_drops_eagerly() { + let (c, p) = oneshot::channel::<()>(); + let (tx, rx) = channel::<()>(); + let (tx2, rx2) = channel(); + FutureData { _data: tx, future: p }.and_then(move |_| { + assert!(rx.recv().is_err()); + tx2.send(()).unwrap(); + ok(1) + }).forget(); + assert!(rx2.try_recv().is_err()); + c.send(()).unwrap(); + rx2.recv().unwrap(); +} + +// #[test] +// fn or_else_drops_eagerly() { +// let (p1, c1) = oneshot::<(), ()>(); +// let (p2, c2) = oneshot::<(), ()>(); +// let (tx, rx) = channel::<()>(); +// let (tx2, rx2) = channel(); +// p1.map(move |a| { drop(tx); a }).or_else(move |_| { +// assert!(rx.recv().is_err()); +// p2 +// }).map(move |_| tx2.send(()).unwrap()).forget(); +// assert!(rx2.try_recv().is_err()); +// c1.fail(()); +// assert!(rx2.try_recv().is_err()); +// c2.finish(()); +// rx2.recv().unwrap(); +// } diff --git a/third_party/rust/futures-0.1.31/tests/eventual.rs b/third_party/rust/futures-0.1.31/tests/eventual.rs new file mode 100644 index 0000000000..fc484aaad2 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/eventual.rs @@ -0,0 +1,320 @@ +extern crate futures; + +mod support; +use support::*; + +use std::sync::mpsc; +use std::thread; + +use futures::prelude::*; +use futures::future::{ok, err}; +use futures::sync::oneshot; + +#[test] +fn and_then1() { + let (tx, rx) = mpsc::channel(); + + let tx2 = tx.clone(); + let p1 = ok::<_, i32>("a").then(move |t| { tx2.send("first").unwrap(); t }); + let tx2 = tx.clone(); + let p2 = ok("b").then(move |t| { tx2.send("second").unwrap(); t }); + let f = p1.and_then(|_| p2); + + assert!(rx.try_recv().is_err()); + f.map(move |s| tx.send(s).unwrap()).forget(); + assert_eq!(rx.recv(), Ok("first")); + assert_eq!(rx.recv(), Ok("second")); + assert_eq!(rx.recv(), Ok("b")); + assert!(rx.recv().is_err()); +} + +#[test] +fn and_then2() { + let (tx, rx) = mpsc::channel(); + + let tx2 = tx.clone(); + let p1 = err::<i32, _>(2).then(move |t| { tx2.send("first").unwrap(); t }); + let tx2 = tx.clone(); + let p2 = ok("b").then(move |t| { tx2.send("second").unwrap(); t }); + let f = p1.and_then(|_| p2); + + assert!(rx.try_recv().is_err()); + f.map_err(|_| drop(tx)).forget(); + assert_eq!(rx.recv(), Ok("first")); + assert!(rx.recv().is_err()); +} + +#[test] +fn oneshot1() { + let (c, p) = oneshot::channel::<i32>(); + let t = thread::spawn(|| c.send(1).unwrap()); + + let (tx, rx) = mpsc::channel(); + p.map(move |e| tx.send(e).unwrap()).forget(); + assert_eq!(rx.recv(), Ok(1)); + t.join().unwrap(); +} + +#[test] +fn oneshot2() { + let (c, p) = oneshot::channel::<i32>(); + let t = thread::spawn(|| c.send(1).unwrap()); + t.join().unwrap(); + + let (tx, rx) = mpsc::channel(); + p.map(move |e| tx.send(e).unwrap()).forget(); + assert_eq!(rx.recv(), Ok(1)); +} + +#[test] +fn oneshot3() { + let (c, p) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p.map(move |e| tx.send(e).unwrap()).forget(); + + let t = thread::spawn(|| c.send(1).unwrap()); + t.join().unwrap(); + + assert_eq!(rx.recv(), Ok(1)); +} + +#[test] +fn oneshot4() { + let (c, p) = oneshot::channel::<i32>(); + drop(c); + + let (tx, rx) = mpsc::channel(); + p.map(move |e| tx.send(e).unwrap()).forget(); + assert!(rx.recv().is_err()); +} + +#[test] +fn oneshot5() { + let (c, p) = oneshot::channel::<i32>(); + let t = thread::spawn(|| drop(c)); + let (tx, rx) = mpsc::channel(); + p.map(move |t| tx.send(t).unwrap()).forget(); + t.join().unwrap(); + assert!(rx.recv().is_err()); +} + +#[test] +fn oneshot6() { + let (c, p) = oneshot::channel::<i32>(); + drop(p); + c.send(2).unwrap_err(); +} + +#[test] +fn cancel1() { + let (c, p) = oneshot::channel::<i32>(); + drop(c); + p.map(|_| panic!()).forget(); +} + +#[test] +fn map_err1() { + ok::<i32, i32>(1).map_err(|_| panic!()).forget(); +} + +#[test] +fn map_err2() { + let (tx, rx) = mpsc::channel(); + err::<i32, i32>(1).map_err(move |v| tx.send(v).unwrap()).forget(); + assert_eq!(rx.recv(), Ok(1)); + assert!(rx.recv().is_err()); +} + +#[test] +fn map_err3() { + let (c, p) = oneshot::channel::<i32>(); + p.map_err(|_| {}).forget(); + drop(c); +} + +#[test] +fn or_else1() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + + let (tx, rx) = mpsc::channel(); + let tx2 = tx.clone(); + let p1 = p1.map_err(move |i| { tx2.send(2).unwrap(); i }); + let tx2 = tx.clone(); + let p2 = p2.map(move |i| { tx2.send(i).unwrap(); i }); + + assert!(rx.try_recv().is_err()); + drop(c1); + c2.send(3).unwrap(); + p1.or_else(|_| p2).map(move |v| tx.send(v).unwrap()).forget(); + + assert_eq!(rx.recv(), Ok(2)); + assert_eq!(rx.recv(), Ok(3)); + assert_eq!(rx.recv(), Ok(3)); + assert!(rx.recv().is_err()); +} + +#[test] +fn or_else2() { + let (c1, p1) = oneshot::channel::<i32>(); + + let (tx, rx) = mpsc::channel(); + + p1.or_else(move |_| { + tx.send(()).unwrap(); + ok::<i32, i32>(1) + }).forget(); + + c1.send(2).unwrap(); + assert!(rx.recv().is_err()); +} + +#[test] +fn join1() { + let (tx, rx) = mpsc::channel(); + ok::<i32, i32>(1).join(ok(2)) + .map(move |v| tx.send(v).unwrap()) + .forget(); + assert_eq!(rx.recv(), Ok((1, 2))); + assert!(rx.recv().is_err()); +} + +#[test] +fn join2() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p1.join(p2).map(move |v| tx.send(v).unwrap()).forget(); + assert!(rx.try_recv().is_err()); + c1.send(1).unwrap(); + assert!(rx.try_recv().is_err()); + c2.send(2).unwrap(); + assert_eq!(rx.recv(), Ok((1, 2))); + assert!(rx.recv().is_err()); +} + +#[test] +fn join3() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p1.join(p2).map_err(move |_v| tx.send(1).unwrap()).forget(); + assert!(rx.try_recv().is_err()); + drop(c1); + assert_eq!(rx.recv(), Ok(1)); + assert!(rx.recv().is_err()); + drop(c2); +} + +#[test] +fn join4() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p1.join(p2).map_err(move |v| tx.send(v).unwrap()).forget(); + assert!(rx.try_recv().is_err()); + drop(c1); + assert!(rx.recv().is_ok()); + drop(c2); + assert!(rx.recv().is_err()); +} + +#[test] +fn join5() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + let (c3, p3) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p1.join(p2).join(p3).map(move |v| tx.send(v).unwrap()).forget(); + assert!(rx.try_recv().is_err()); + c1.send(1).unwrap(); + assert!(rx.try_recv().is_err()); + c2.send(2).unwrap(); + assert!(rx.try_recv().is_err()); + c3.send(3).unwrap(); + assert_eq!(rx.recv(), Ok(((1, 2), 3))); + assert!(rx.recv().is_err()); +} + +#[test] +fn select1() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p1.select(p2).map(move |v| tx.send(v).unwrap()).forget(); + assert!(rx.try_recv().is_err()); + c1.send(1).unwrap(); + let (v, p2) = rx.recv().unwrap(); + assert_eq!(v, 1); + assert!(rx.recv().is_err()); + + let (tx, rx) = mpsc::channel(); + p2.map(move |v| tx.send(v).unwrap()).forget(); + c2.send(2).unwrap(); + assert_eq!(rx.recv(), Ok(2)); + assert!(rx.recv().is_err()); +} + +#[test] +fn select2() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p1.select(p2).map_err(move |v| tx.send((1, v.1)).unwrap()).forget(); + assert!(rx.try_recv().is_err()); + drop(c1); + let (v, p2) = rx.recv().unwrap(); + assert_eq!(v, 1); + assert!(rx.recv().is_err()); + + let (tx, rx) = mpsc::channel(); + p2.map(move |v| tx.send(v).unwrap()).forget(); + c2.send(2).unwrap(); + assert_eq!(rx.recv(), Ok(2)); + assert!(rx.recv().is_err()); +} + +#[test] +fn select3() { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + let (tx, rx) = mpsc::channel(); + p1.select(p2).map_err(move |v| tx.send((1, v.1)).unwrap()).forget(); + assert!(rx.try_recv().is_err()); + drop(c1); + let (v, p2) = rx.recv().unwrap(); + assert_eq!(v, 1); + assert!(rx.recv().is_err()); + + let (tx, rx) = mpsc::channel(); + p2.map_err(move |_v| tx.send(2).unwrap()).forget(); + drop(c2); + assert_eq!(rx.recv(), Ok(2)); + assert!(rx.recv().is_err()); +} + +#[test] +fn select4() { + let (tx, rx) = mpsc::channel::<oneshot::Sender<i32>>(); + + let t = thread::spawn(move || { + for c in rx { + c.send(1).unwrap(); + } + }); + + let (tx2, rx2) = mpsc::channel(); + for _ in 0..10000 { + let (c1, p1) = oneshot::channel::<i32>(); + let (c2, p2) = oneshot::channel::<i32>(); + + let tx3 = tx2.clone(); + p1.select(p2).map(move |_| tx3.send(()).unwrap()).forget(); + tx.send(c1).unwrap(); + rx2.recv().unwrap(); + drop(c2); + } + drop(tx); + + t.join().unwrap(); +} diff --git a/third_party/rust/futures-0.1.31/tests/fuse.rs b/third_party/rust/futures-0.1.31/tests/fuse.rs new file mode 100644 index 0000000000..177d914e19 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/fuse.rs @@ -0,0 +1,39 @@ +extern crate futures; + +use futures::prelude::*; +use futures::future::ok; +use futures::executor; + +mod support; +use support::*; + +#[test] +fn fuse() { + let mut future = executor::spawn(ok::<i32, u32>(2).fuse()); + assert!(future.poll_future_notify(¬ify_panic(), 0).unwrap().is_ready()); + assert!(future.poll_future_notify(¬ify_panic(), 0).unwrap().is_not_ready()); +} + +#[test] +fn fuse_is_done() { + use futures::future::{Fuse, FutureResult}; + + struct Wrapped(Fuse<FutureResult<i32, u32>>); + + impl Future for Wrapped { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + assert!(!self.0.is_done()); + assert_eq!(self.0.poll().unwrap(), Async::Ready(2)); + assert!(self.0.is_done()); + assert_eq!(self.0.poll().unwrap(), Async::NotReady); + assert!(self.0.is_done()); + + Ok(Async::Ready(())) + } + } + + assert!(Wrapped(ok::<i32, u32>(2).fuse()).wait().is_ok()); +}
\ No newline at end of file diff --git a/third_party/rust/futures-0.1.31/tests/future_flatten_stream.rs b/third_party/rust/futures-0.1.31/tests/future_flatten_stream.rs new file mode 100644 index 0000000000..442d381fd7 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/future_flatten_stream.rs @@ -0,0 +1,43 @@ +extern crate core; +extern crate futures; + +use core::marker; + +use futures::prelude::*; +use futures::future::{ok, err}; +use futures::stream; + +#[test] +fn successful_future() { + let stream_items = vec![17, 19]; + let future_of_a_stream = ok::<_, bool>(stream::iter_ok(stream_items)); + + let stream = future_of_a_stream.flatten_stream(); + + let mut iter = stream.wait(); + assert_eq!(Ok(17), iter.next().unwrap()); + assert_eq!(Ok(19), iter.next().unwrap()); + assert_eq!(None, iter.next()); +} + +struct PanickingStream<T, E> { + _marker: marker::PhantomData<(T, E)> +} + +impl<T, E> Stream for PanickingStream<T, E> { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + panic!() + } +} + +#[test] +fn failed_future() { + let future_of_a_stream = err::<PanickingStream<bool, u32>, _>(10); + let stream = future_of_a_stream.flatten_stream(); + let mut iter = stream.wait(); + assert_eq!(Err(10), iter.next().unwrap()); + assert_eq!(None, iter.next()); +} diff --git a/third_party/rust/futures-0.1.31/tests/futures_ordered.rs b/third_party/rust/futures-0.1.31/tests/futures_ordered.rs new file mode 100644 index 0000000000..6054192e3b --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/futures_ordered.rs @@ -0,0 +1,88 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +use std::any::Any; + +use futures::sync::oneshot; +use futures::stream::futures_ordered; +use futures::prelude::*; + +mod support; + +#[test] +fn works_1() { + let (a_tx, a_rx) = oneshot::channel::<u32>(); + let (b_tx, b_rx) = oneshot::channel::<u32>(); + let (c_tx, c_rx) = oneshot::channel::<u32>(); + + let stream = futures_ordered(vec![a_rx, b_rx, c_rx]); + + let mut spawn = futures::executor::spawn(stream); + b_tx.send(99).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + + a_tx.send(33).unwrap(); + c_tx.send(33).unwrap(); + assert_eq!(Some(Ok(33)), spawn.wait_stream()); + assert_eq!(Some(Ok(99)), spawn.wait_stream()); + assert_eq!(Some(Ok(33)), spawn.wait_stream()); + assert_eq!(None, spawn.wait_stream()); +} + +#[test] +fn works_2() { + let (a_tx, a_rx) = oneshot::channel::<u32>(); + let (b_tx, b_rx) = oneshot::channel::<u32>(); + let (c_tx, c_rx) = oneshot::channel::<u32>(); + + let stream = futures_ordered(vec![ + Box::new(a_rx) as Box<Future<Item = _, Error = _>>, + Box::new(b_rx.join(c_rx).map(|(a, b)| a + b)), + ]); + + let mut spawn = futures::executor::spawn(stream); + a_tx.send(33).unwrap(); + b_tx.send(33).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready()); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + c_tx.send(33).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready()); +} + +#[test] +fn from_iterator() { + use futures::future::ok; + use futures::stream::FuturesOrdered; + + let stream = vec![ + ok::<u32, ()>(1), + ok::<u32, ()>(2), + ok::<u32, ()>(3) + ].into_iter().collect::<FuturesOrdered<_>>(); + assert_eq!(stream.len(), 3); + assert_eq!(stream.collect().wait(), Ok(vec![1,2,3])); +} + +#[test] +fn queue_never_unblocked() { + let (_a_tx, a_rx) = oneshot::channel::<Box<Any+Send>>(); + let (b_tx, b_rx) = oneshot::channel::<Box<Any+Send>>(); + let (c_tx, c_rx) = oneshot::channel::<Box<Any+Send>>(); + + let stream = futures_ordered(vec![ + Box::new(a_rx) as Box<Future<Item = _, Error = _>>, + Box::new(b_rx.select(c_rx).then(|res| Ok(Box::new(res) as Box<Any+Send>))), + ]); + + let mut spawn = futures::executor::spawn(stream); + for _ in 0..10 { + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + } + + b_tx.send(Box::new(())).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + c_tx.send(Box::new(())).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); +} diff --git a/third_party/rust/futures-0.1.31/tests/futures_unordered.rs b/third_party/rust/futures-0.1.31/tests/futures_unordered.rs new file mode 100644 index 0000000000..325a6f3e48 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/futures_unordered.rs @@ -0,0 +1,167 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +use std::any::Any; + +use futures::sync::oneshot; +use std::iter::FromIterator; +use futures::stream::{futures_unordered, FuturesUnordered}; +use futures::prelude::*; + +mod support; + +#[test] +fn works_1() { + let (a_tx, a_rx) = oneshot::channel::<u32>(); + let (b_tx, b_rx) = oneshot::channel::<u32>(); + let (c_tx, c_rx) = oneshot::channel::<u32>(); + + let stream = futures_unordered(vec![a_rx, b_rx, c_rx]); + + let mut spawn = futures::executor::spawn(stream); + b_tx.send(99).unwrap(); + assert_eq!(Some(Ok(99)), spawn.wait_stream()); + + a_tx.send(33).unwrap(); + c_tx.send(33).unwrap(); + assert_eq!(Some(Ok(33)), spawn.wait_stream()); + assert_eq!(Some(Ok(33)), spawn.wait_stream()); + assert_eq!(None, spawn.wait_stream()); +} + +#[test] +fn works_2() { + let (a_tx, a_rx) = oneshot::channel::<u32>(); + let (b_tx, b_rx) = oneshot::channel::<u32>(); + let (c_tx, c_rx) = oneshot::channel::<u32>(); + + let stream = futures_unordered(vec![ + Box::new(a_rx) as Box<Future<Item = _, Error = _>>, + Box::new(b_rx.join(c_rx).map(|(a, b)| a + b)), + ]); + + let mut spawn = futures::executor::spawn(stream); + a_tx.send(33).unwrap(); + b_tx.send(33).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready()); + c_tx.send(33).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready()); +} + +#[test] +fn from_iterator() { + use futures::future::ok; + use futures::stream::FuturesUnordered; + + let stream = vec![ + ok::<u32, ()>(1), + ok::<u32, ()>(2), + ok::<u32, ()>(3) + ].into_iter().collect::<FuturesUnordered<_>>(); + assert_eq!(stream.len(), 3); + assert_eq!(stream.collect().wait(), Ok(vec![1,2,3])); +} + +#[test] +fn finished_future_ok() { + let (_a_tx, a_rx) = oneshot::channel::<Box<Any+Send>>(); + let (b_tx, b_rx) = oneshot::channel::<Box<Any+Send>>(); + let (c_tx, c_rx) = oneshot::channel::<Box<Any+Send>>(); + + let stream = futures_unordered(vec![ + Box::new(a_rx) as Box<Future<Item = _, Error = _>>, + Box::new(b_rx.select(c_rx).then(|res| Ok(Box::new(res) as Box<Any+Send>))), + ]); + + let mut spawn = futures::executor::spawn(stream); + for _ in 0..10 { + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + } + + b_tx.send(Box::new(())).unwrap(); + let next = spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap(); + assert!(next.is_ready()); + c_tx.send(Box::new(())).unwrap(); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); +} + +#[test] +fn iter_mut_cancel() { + let (a_tx, a_rx) = oneshot::channel::<u32>(); + let (b_tx, b_rx) = oneshot::channel::<u32>(); + let (c_tx, c_rx) = oneshot::channel::<u32>(); + + let mut stream = futures_unordered(vec![a_rx, b_rx, c_rx]); + + for rx in stream.iter_mut() { + rx.close(); + } + + assert!(a_tx.is_canceled()); + assert!(b_tx.is_canceled()); + assert!(c_tx.is_canceled()); + + let mut spawn = futures::executor::spawn(stream); + assert_eq!(Some(Err(futures::sync::oneshot::Canceled)), spawn.wait_stream()); + assert_eq!(Some(Err(futures::sync::oneshot::Canceled)), spawn.wait_stream()); + assert_eq!(Some(Err(futures::sync::oneshot::Canceled)), spawn.wait_stream()); + assert_eq!(None, spawn.wait_stream()); +} + +#[test] +fn iter_mut_len() { + let mut stream = futures_unordered(vec![ + futures::future::empty::<(),()>(), + futures::future::empty::<(),()>(), + futures::future::empty::<(),()>() + ]); + + let mut iter_mut = stream.iter_mut(); + assert_eq!(iter_mut.len(), 3); + assert!(iter_mut.next().is_some()); + assert_eq!(iter_mut.len(), 2); + assert!(iter_mut.next().is_some()); + assert_eq!(iter_mut.len(), 1); + assert!(iter_mut.next().is_some()); + assert_eq!(iter_mut.len(), 0); + assert!(iter_mut.next().is_none()); +} + +#[test] +fn polled_only_once_at_most_per_iteration() { + #[derive(Debug, Clone, Copy, Default)] + struct F { + polled: bool, + } + + impl Future for F { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> { + if self.polled { + panic!("polled twice") + } else { + self.polled = true; + Ok(Async::NotReady) + } + } + } + + + let tasks = FuturesUnordered::from_iter(vec![F::default(); 10]); + let mut tasks = futures::executor::spawn(tasks); + assert!(tasks.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + assert_eq!(10, tasks.get_mut().iter_mut().filter(|f| f.polled).count()); + + let tasks = FuturesUnordered::from_iter(vec![F::default(); 33]); + let mut tasks = futures::executor::spawn(tasks); + assert!(tasks.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready()); + assert_eq!(33, tasks.get_mut().iter_mut().filter(|f| f.polled).count()); + + let tasks = FuturesUnordered::<F>::new(); + let mut tasks = futures::executor::spawn(tasks); + assert!(tasks.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready()); +} diff --git a/third_party/rust/futures-0.1.31/tests/inspect.rs b/third_party/rust/futures-0.1.31/tests/inspect.rs new file mode 100644 index 0000000000..c16372ed91 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/inspect.rs @@ -0,0 +1,23 @@ +extern crate futures; + +use futures::prelude::*; +use futures::future::{ok, err}; + +#[test] +fn smoke() { + let mut counter = 0; + + { + let work = ok::<u32, u32>(40).inspect(|val| { counter += *val; }); + assert_eq!(work.wait(), Ok(40)); + } + + assert_eq!(counter, 40); + + { + let work = err::<u32, u32>(4).inspect(|val| { counter += *val; }); + assert_eq!(work.wait(), Err(4)); + } + + assert_eq!(counter, 40); +} diff --git a/third_party/rust/futures-0.1.31/tests/mpsc-close.rs b/third_party/rust/futures-0.1.31/tests/mpsc-close.rs new file mode 100644 index 0000000000..061616ae06 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/mpsc-close.rs @@ -0,0 +1,152 @@ +extern crate futures; + +use std::sync::{Arc, Weak}; +use std::thread; +use std::time::{Duration, Instant}; + +use futures::prelude::*; +use futures::sync::mpsc::*; +use futures::task; + +#[test] +fn smoke() { + let (mut sender, receiver) = channel(1); + + let t = thread::spawn(move ||{ + while let Ok(s) = sender.send(42).wait() { + sender = s; + } + }); + + receiver.take(3).for_each(|_| Ok(())).wait().unwrap(); + + t.join().unwrap() +} + +// Stress test that `try_send()`s occurring concurrently with receiver +// close/drops don't appear as successful sends. +#[test] +fn stress_try_send_as_receiver_closes() { + const AMT: usize = 10000; + // To provide variable timing characteristics (in the hopes of + // reproducing the collision that leads to a race), we busy-re-poll + // the test MPSC receiver a variable number of times before actually + // stopping. We vary this countdown between 1 and the following + // value. + const MAX_COUNTDOWN: usize = 20; + // When we detect that a successfully sent item is still in the + // queue after a disconnect, we spin for up to 100ms to confirm that + // it is a persistent condition and not a concurrency illusion. + const SPIN_TIMEOUT_S: u64 = 10; + const SPIN_SLEEP_MS: u64 = 10; + struct TestRx { + rx: Receiver<Arc<()>>, + // The number of times to query `rx` before dropping it. + poll_count: usize + } + struct TestTask { + command_rx: Receiver<TestRx>, + test_rx: Option<Receiver<Arc<()>>>, + countdown: usize, + } + impl TestTask { + /// Create a new TestTask + fn new() -> (TestTask, Sender<TestRx>) { + let (command_tx, command_rx) = channel::<TestRx>(0); + ( + TestTask { + command_rx: command_rx, + test_rx: None, + countdown: 0, // 0 means no countdown is in progress. + }, + command_tx, + ) + } + } + impl Future for TestTask { + type Item = (); + type Error = (); + fn poll(&mut self) -> Poll<(), ()> { + // Poll the test channel, if one is present. + if let Some(ref mut rx) = self.test_rx { + if let Ok(Async::Ready(v)) = rx.poll() { + let _ = v.expect("test finished unexpectedly!"); + } + self.countdown -= 1; + // Busy-poll until the countdown is finished. + task::current().notify(); + } + // Accept any newly submitted MPSC channels for testing. + match self.command_rx.poll()? { + Async::Ready(Some(TestRx { rx, poll_count })) => { + self.test_rx = Some(rx); + self.countdown = poll_count; + task::current().notify(); + }, + Async::Ready(None) => return Ok(Async::Ready(())), + _ => {}, + } + if self.countdown == 0 { + // Countdown complete -- drop the Receiver. + self.test_rx = None; + } + Ok(Async::NotReady) + } + } + let (f, mut cmd_tx) = TestTask::new(); + let bg = thread::spawn(move || f.wait()); + for i in 0..AMT { + let (mut test_tx, rx) = channel(0); + let poll_count = i % MAX_COUNTDOWN; + cmd_tx.try_send(TestRx { rx: rx, poll_count: poll_count }).unwrap(); + let mut prev_weak: Option<Weak<()>> = None; + let mut attempted_sends = 0; + let mut successful_sends = 0; + loop { + // Create a test item. + let item = Arc::new(()); + let weak = Arc::downgrade(&item); + match test_tx.try_send(item) { + Ok(_) => { + prev_weak = Some(weak); + successful_sends += 1; + } + Err(ref e) if e.is_full() => {} + Err(ref e) if e.is_disconnected() => { + // Test for evidence of the race condition. + if let Some(prev_weak) = prev_weak { + if prev_weak.upgrade().is_some() { + // The previously sent item is still allocated. + // However, there appears to be some aspect of the + // concurrency that can legitimately cause the Arc + // to be momentarily valid. Spin for up to 100ms + // waiting for the previously sent item to be + // dropped. + let t0 = Instant::now(); + let mut spins = 0; + loop { + if prev_weak.upgrade().is_none() { + break; + } + assert!(t0.elapsed() < Duration::from_secs(SPIN_TIMEOUT_S), + "item not dropped on iteration {} after \ + {} sends ({} successful). spin=({})", + i, attempted_sends, successful_sends, spins + ); + spins += 1; + thread::sleep(Duration::from_millis(SPIN_SLEEP_MS)); + } + } + } + break; + } + Err(ref e) => panic!("unexpected error: {}", e), + } + attempted_sends += 1; + } + } + drop(cmd_tx); + bg.join() + .expect("background thread join") + .expect("background thread result"); +} diff --git a/third_party/rust/futures-0.1.31/tests/mpsc.rs b/third_party/rust/futures-0.1.31/tests/mpsc.rs new file mode 100644 index 0000000000..9cb83e5952 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/mpsc.rs @@ -0,0 +1,567 @@ +#![cfg(feature = "use_std")] +#![allow(bare_trait_objects, unknown_lints)] + +#[macro_use] +extern crate futures; + +use futures::prelude::*; +use futures::future::{lazy, ok}; +use futures::stream::unfold; +use futures::sync::mpsc; +use futures::sync::oneshot; + +use std::thread; +use std::sync::{Arc, Mutex}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +mod support; +use support::*; + + +trait AssertSend: Send {} +impl AssertSend for mpsc::Sender<i32> {} +impl AssertSend for mpsc::Receiver<i32> {} + +#[test] +fn send_recv() { + let (tx, rx) = mpsc::channel::<i32>(16); + let mut rx = rx.wait(); + + tx.send(1).wait().unwrap(); + + assert_eq!(rx.next().unwrap(), Ok(1)); +} + +#[test] +fn send_recv_no_buffer() { + let (mut tx, mut rx) = mpsc::channel::<i32>(0); + + // Run on a task context + lazy(move || { + assert!(tx.poll_complete().unwrap().is_ready()); + assert!(tx.poll_ready().unwrap().is_ready()); + + // Send first message + let res = tx.start_send(1).unwrap(); + assert!(is_ready(&res)); + assert!(tx.poll_ready().unwrap().is_not_ready()); + + // Send second message + let res = tx.start_send(2).unwrap(); + assert!(!is_ready(&res)); + + // Take the value + assert_eq!(rx.poll().unwrap(), Async::Ready(Some(1))); + assert!(tx.poll_ready().unwrap().is_ready()); + + let res = tx.start_send(2).unwrap(); + assert!(is_ready(&res)); + assert!(tx.poll_ready().unwrap().is_not_ready()); + + // Take the value + assert_eq!(rx.poll().unwrap(), Async::Ready(Some(2))); + assert!(tx.poll_ready().unwrap().is_ready()); + + Ok::<(), ()>(()) + }).wait().unwrap(); +} + +#[test] +fn send_shared_recv() { + let (tx1, rx) = mpsc::channel::<i32>(16); + let tx2 = tx1.clone(); + let mut rx = rx.wait(); + + tx1.send(1).wait().unwrap(); + assert_eq!(rx.next().unwrap(), Ok(1)); + + tx2.send(2).wait().unwrap(); + assert_eq!(rx.next().unwrap(), Ok(2)); +} + +#[test] +fn send_recv_threads() { + let (tx, rx) = mpsc::channel::<i32>(16); + let mut rx = rx.wait(); + + thread::spawn(move|| { + tx.send(1).wait().unwrap(); + }); + + assert_eq!(rx.next().unwrap(), Ok(1)); +} + +#[test] +fn send_recv_threads_no_capacity() { + let (tx, rx) = mpsc::channel::<i32>(0); + let mut rx = rx.wait(); + + let (readytx, readyrx) = mpsc::channel::<()>(2); + let mut readyrx = readyrx.wait(); + let t = thread::spawn(move|| { + let readytx = readytx.sink_map_err(|_| panic!()); + let (a, b) = tx.send(1).join(readytx.send(())).wait().unwrap(); + a.send(2).join(b.send(())).wait().unwrap(); + }); + + drop(readyrx.next().unwrap()); + assert_eq!(rx.next().unwrap(), Ok(1)); + drop(readyrx.next().unwrap()); + assert_eq!(rx.next().unwrap(), Ok(2)); + + t.join().unwrap(); +} + +#[test] +fn recv_close_gets_none() { + let (mut tx, mut rx) = mpsc::channel::<i32>(10); + + // Run on a task context + lazy(move || { + rx.close(); + + assert_eq!(rx.poll(), Ok(Async::Ready(None))); + assert!(tx.poll_ready().is_err()); + + drop(tx); + + Ok::<(), ()>(()) + }).wait().unwrap(); +} + + +#[test] +fn tx_close_gets_none() { + let (_, mut rx) = mpsc::channel::<i32>(10); + + // Run on a task context + lazy(move || { + assert_eq!(rx.poll(), Ok(Async::Ready(None))); + assert_eq!(rx.poll(), Ok(Async::Ready(None))); + + Ok::<(), ()>(()) + }).wait().unwrap(); +} + +#[test] +fn spawn_sends_items() { + let core = local_executor::Core::new(); + let stream = unfold(0, |i| Some(ok::<_,u8>((i, i + 1)))); + let rx = mpsc::spawn(stream, &core, 1); + assert_eq!(core.run(rx.take(4).collect()).unwrap(), + [0, 1, 2, 3]); +} + +#[test] +fn spawn_kill_dead_stream() { + use std::thread; + use std::time::Duration; + use futures::future::Either; + use futures::sync::oneshot; + + // a stream which never returns anything (maybe a remote end isn't + // responding), but dropping it leads to observable side effects + // (like closing connections, releasing limited resources, ...) + #[derive(Debug)] + struct Dead { + // when dropped you should get Err(oneshot::Canceled) on the + // receiving end + done: oneshot::Sender<()>, + } + impl Stream for Dead { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + Ok(Async::NotReady) + } + } + + // need to implement a timeout for the test, as it would hang + // forever right now + let (timeout_tx, timeout_rx) = oneshot::channel(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(1000)); + let _ = timeout_tx.send(()); + }); + + let core = local_executor::Core::new(); + let (done_tx, done_rx) = oneshot::channel(); + let stream = Dead{done: done_tx}; + let rx = mpsc::spawn(stream, &core, 1); + let res = core.run( + Ok::<_, ()>(()) + .into_future() + .then(move |_| { + // now drop the spawned stream: maybe some timeout exceeded, + // or some connection on this end was closed by the remote + // end. + drop(rx); + // and wait for the spawned stream to release its resources + done_rx + }) + .select2(timeout_rx) + ); + match res { + Err(Either::A((oneshot::Canceled, _))) => (), + _ => { + panic!("dead stream wasn't canceled"); + }, + } +} + +#[test] +fn stress_shared_unbounded() { + const AMT: u32 = 10000; + const NTHREADS: u32 = 8; + let (tx, rx) = mpsc::unbounded::<i32>(); + let mut rx = rx.wait(); + + let t = thread::spawn(move|| { + for _ in 0..AMT * NTHREADS { + assert_eq!(rx.next().unwrap(), Ok(1)); + } + + if rx.next().is_some() { + panic!(); + } + }); + + for _ in 0..NTHREADS { + let tx = tx.clone(); + + thread::spawn(move|| { + for _ in 0..AMT { + tx.unbounded_send(1).unwrap(); + } + }); + } + + drop(tx); + + t.join().ok().unwrap(); +} + +#[test] +fn stress_shared_bounded_hard() { + const AMT: u32 = 10000; + const NTHREADS: u32 = 8; + let (tx, rx) = mpsc::channel::<i32>(0); + let mut rx = rx.wait(); + + let t = thread::spawn(move|| { + for _ in 0..AMT * NTHREADS { + assert_eq!(rx.next().unwrap(), Ok(1)); + } + + if rx.next().is_some() { + panic!(); + } + }); + + for _ in 0..NTHREADS { + let mut tx = tx.clone(); + + thread::spawn(move|| { + for _ in 0..AMT { + tx = tx.send(1).wait().unwrap(); + } + }); + } + + drop(tx); + + t.join().ok().unwrap(); +} + +#[test] +fn stress_receiver_multi_task_bounded_hard() { + const AMT: usize = 10_000; + const NTHREADS: u32 = 2; + + let (mut tx, rx) = mpsc::channel::<usize>(0); + let rx = Arc::new(Mutex::new(Some(rx))); + let n = Arc::new(AtomicUsize::new(0)); + + let mut th = vec![]; + + for _ in 0..NTHREADS { + let rx = rx.clone(); + let n = n.clone(); + + let t = thread::spawn(move || { + let mut i = 0; + + loop { + i += 1; + let mut lock = rx.lock().ok().unwrap(); + + match lock.take() { + Some(mut rx) => { + if i % 5 == 0 { + let (item, rest) = rx.into_future().wait().ok().unwrap(); + + if item.is_none() { + break; + } + + n.fetch_add(1, Ordering::Relaxed); + *lock = Some(rest); + } else { + // Just poll + let n = n.clone(); + let r = lazy(move || { + let r = match rx.poll().unwrap() { + Async::Ready(Some(_)) => { + n.fetch_add(1, Ordering::Relaxed); + *lock = Some(rx); + false + } + Async::Ready(None) => { + true + } + Async::NotReady => { + *lock = Some(rx); + false + } + }; + + Ok::<bool, ()>(r) + }).wait().unwrap(); + + if r { + break; + } + } + } + None => break, + } + } + }); + + th.push(t); + } + + for i in 0..AMT { + tx = tx.send(i).wait().unwrap(); + } + + drop(tx); + + for t in th { + t.join().unwrap(); + } + + assert_eq!(AMT, n.load(Ordering::Relaxed)); +} + +/// Stress test that receiver properly receives all the messages +/// after sender dropped. +#[test] +fn stress_drop_sender() { + fn list() -> Box<Stream<Item=i32, Error=u32>> { + let (tx, rx) = mpsc::channel(1); + tx.send(Ok(1)) + .and_then(|tx| tx.send(Ok(2))) + .and_then(|tx| tx.send(Ok(3))) + .forget(); + Box::new(rx.then(|r| r.unwrap())) + } + + for _ in 0..10000 { + assert_eq!(list().wait().collect::<Result<Vec<_>, _>>(), + Ok(vec![1, 2, 3])); + } +} + +/// Stress test that after receiver dropped, +/// no messages are lost. +fn stress_close_receiver_iter() { + let (tx, rx) = mpsc::unbounded(); + let (unwritten_tx, unwritten_rx) = std::sync::mpsc::channel(); + let th = thread::spawn(move || { + for i in 1.. { + if let Err(_) = tx.unbounded_send(i) { + unwritten_tx.send(i).expect("unwritten_tx"); + return; + } + } + }); + + let mut rx = rx.wait(); + + // Read one message to make sure thread effectively started + assert_eq!(Some(Ok(1)), rx.next()); + + rx.get_mut().close(); + + for i in 2.. { + match rx.next() { + Some(Ok(r)) => assert!(i == r), + Some(Err(_)) => unreachable!(), + None => { + let unwritten = unwritten_rx.recv().expect("unwritten_rx"); + assert_eq!(unwritten, i); + th.join().unwrap(); + return; + } + } + } +} + +#[test] +fn stress_close_receiver() { + for _ in 0..10000 { + stress_close_receiver_iter(); + } +} + +/// Tests that after `poll_ready` indicates capacity a channel can always send without waiting. +#[test] +fn stress_poll_ready() { + // A task which checks channel capacity using poll_ready, and pushes items onto the channel when + // ready. + struct SenderTask { + sender: mpsc::Sender<u32>, + count: u32, + } + impl Future for SenderTask { + type Item = (); + type Error = (); + fn poll(&mut self) -> Poll<(), ()> { + // In a loop, check if the channel is ready. If so, push an item onto the channel + // (asserting that it doesn't attempt to block). + while self.count > 0 { + try_ready!(self.sender.poll_ready().map_err(|_| ())); + assert!(self.sender.start_send(self.count).unwrap().is_ready()); + self.count -= 1; + } + Ok(Async::Ready(())) + } + } + + const AMT: u32 = 1000; + const NTHREADS: u32 = 8; + + /// Run a stress test using the specified channel capacity. + fn stress(capacity: usize) { + let (tx, rx) = mpsc::channel(capacity); + let mut threads = Vec::new(); + for _ in 0..NTHREADS { + let sender = tx.clone(); + threads.push(thread::spawn(move || { + SenderTask { + sender: sender, + count: AMT, + }.wait() + })); + } + drop(tx); + + let mut rx = rx.wait(); + for _ in 0..AMT * NTHREADS { + assert!(rx.next().is_some()); + } + + assert!(rx.next().is_none()); + + for thread in threads { + thread.join().unwrap().unwrap(); + } + } + + stress(0); + stress(1); + stress(8); + stress(16); +} + +fn is_ready<T>(res: &AsyncSink<T>) -> bool { + match *res { + AsyncSink::Ready => true, + _ => false, + } +} + +#[test] +fn try_send_1() { + const N: usize = 3000; + let (mut tx, rx) = mpsc::channel(0); + + let t = thread::spawn(move || { + for i in 0..N { + loop { + if tx.try_send(i).is_ok() { + break + } + } + } + }); + for (i, j) in rx.wait().enumerate() { + assert_eq!(i, j.unwrap()); + } + t.join().unwrap(); +} + +#[test] +fn try_send_2() { + let (mut tx, rx) = mpsc::channel(0); + + tx.try_send("hello").unwrap(); + + let (readytx, readyrx) = oneshot::channel::<()>(); + + let th = thread::spawn(|| { + lazy(|| { + assert!(tx.start_send("fail").unwrap().is_not_ready()); + Ok::<_, ()>(()) + }).wait().unwrap(); + + drop(readytx); + tx.send("goodbye").wait().unwrap(); + }); + + let mut rx = rx.wait(); + + drop(readyrx.wait()); + assert_eq!(rx.next(), Some(Ok("hello"))); + assert_eq!(rx.next(), Some(Ok("goodbye"))); + assert!(rx.next().is_none()); + + th.join().unwrap(); +} + +#[test] +fn try_send_fail() { + let (mut tx, rx) = mpsc::channel(0); + let mut rx = rx.wait(); + + tx.try_send("hello").unwrap(); + + // This should fail + assert!(tx.try_send("fail").is_err()); + + assert_eq!(rx.next(), Some(Ok("hello"))); + + tx.try_send("goodbye").unwrap(); + drop(tx); + + assert_eq!(rx.next(), Some(Ok("goodbye"))); + assert!(rx.next().is_none()); +} + +#[test] +fn bounded_is_really_bounded() { + use futures::Async::*; + let (mut tx, mut rx) = mpsc::channel(0); + lazy(|| { + assert!(tx.start_send(1).unwrap().is_ready()); + // Not ready until we receive + assert!(!tx.poll_complete().unwrap().is_ready()); + // Receive the value + assert_eq!(rx.poll().unwrap(), Ready(Some(1))); + // Now the sender is ready + assert!(tx.poll_complete().unwrap().is_ready()); + Ok::<_, ()>(()) + }).wait().unwrap(); +} diff --git a/third_party/rust/futures-0.1.31/tests/oneshot.rs b/third_party/rust/futures-0.1.31/tests/oneshot.rs new file mode 100644 index 0000000000..45c1996876 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/oneshot.rs @@ -0,0 +1,253 @@ +extern crate futures; + +use std::sync::mpsc; +use std::thread; + +use futures::prelude::*; +use futures::future::{lazy, ok}; +use futures::sync::oneshot::*; + +mod support; +use support::*; + +#[test] +fn smoke_poll() { + let (mut tx, rx) = channel::<u32>(); + let mut task = futures::executor::spawn(lazy(|| { + assert!(tx.poll_cancel().unwrap().is_not_ready()); + assert!(tx.poll_cancel().unwrap().is_not_ready()); + drop(rx); + assert!(tx.poll_cancel().unwrap().is_ready()); + assert!(tx.poll_cancel().unwrap().is_ready()); + ok::<(), ()>(()) + })); + assert!(task.poll_future_notify(¬ify_noop(), 0).unwrap().is_ready()); +} + +#[test] +fn cancel_notifies() { + let (tx, rx) = channel::<u32>(); + let (tx2, rx2) = mpsc::channel(); + + WaitForCancel { tx: tx }.then(move |v| tx2.send(v)).forget(); + drop(rx); + rx2.recv().unwrap().unwrap(); +} + +struct WaitForCancel { + tx: Sender<u32>, +} + +impl Future for WaitForCancel { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + self.tx.poll_cancel() + } +} + +#[test] +fn cancel_lots() { + let (tx, rx) = mpsc::channel::<(Sender<_>, mpsc::Sender<_>)>(); + let t = thread::spawn(move || { + for (tx, tx2) in rx { + WaitForCancel { tx: tx }.then(move |v| tx2.send(v)).forget(); + } + + }); + + for _ in 0..20000 { + let (otx, orx) = channel::<u32>(); + let (tx2, rx2) = mpsc::channel(); + tx.send((otx, tx2)).unwrap(); + drop(orx); + rx2.recv().unwrap().unwrap(); + } + drop(tx); + + t.join().unwrap(); +} + +#[test] +fn close() { + let (mut tx, mut rx) = channel::<u32>(); + rx.close(); + assert!(rx.poll().is_err()); + assert!(tx.poll_cancel().unwrap().is_ready()); +} + +#[test] +fn close_wakes() { + let (tx, mut rx) = channel::<u32>(); + let (tx2, rx2) = mpsc::channel(); + let t = thread::spawn(move || { + rx.close(); + rx2.recv().unwrap(); + }); + WaitForCancel { tx: tx }.wait().unwrap(); + tx2.send(()).unwrap(); + t.join().unwrap(); +} + +#[test] +fn is_canceled() { + let (tx, rx) = channel::<u32>(); + assert!(!tx.is_canceled()); + drop(rx); + assert!(tx.is_canceled()); +} + +#[test] +fn cancel_sends() { + let (tx, rx) = mpsc::channel::<Sender<_>>(); + let t = thread::spawn(move || { + for otx in rx { + let _ = otx.send(42); + } + }); + + for _ in 0..20000 { + let (otx, mut orx) = channel::<u32>(); + tx.send(otx).unwrap(); + + orx.close(); + // Not necessary to wrap in a task because the implementation of oneshot + // never calls `task::current()` if the channel has been closed already. + let _ = orx.poll(); + } + + drop(tx); + t.join().unwrap(); +} + +#[test] +fn spawn_sends_items() { + let core = local_executor::Core::new(); + let future = ok::<_, ()>(1); + let rx = spawn(future, &core); + assert_eq!(core.run(rx).unwrap(), 1); +} + +#[test] +fn spawn_kill_dead_stream() { + use std::thread; + use std::time::Duration; + use futures::future::Either; + use futures::sync::oneshot; + + // a future which never returns anything (forever accepting incoming + // connections), but dropping it leads to observable side effects + // (like closing listening sockets, releasing limited resources, + // ...) + #[derive(Debug)] + struct Dead { + // when dropped you should get Err(oneshot::Canceled) on the + // receiving end + done: oneshot::Sender<()>, + } + impl Future for Dead { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + Ok(Async::NotReady) + } + } + + // need to implement a timeout for the test, as it would hang + // forever right now + let (timeout_tx, timeout_rx) = oneshot::channel(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(1000)); + let _ = timeout_tx.send(()); + }); + + let core = local_executor::Core::new(); + let (done_tx, done_rx) = oneshot::channel(); + let future = Dead{done: done_tx}; + let rx = spawn(future, &core); + let res = core.run( + Ok::<_, ()>(()) + .into_future() + .then(move |_| { + // now drop the spawned future: maybe some timeout exceeded, + // or some connection on this end was closed by the remote + // end. + drop(rx); + // and wait for the spawned future to release its resources + done_rx + }) + .select2(timeout_rx) + ); + match res { + Err(Either::A((oneshot::Canceled, _))) => (), + Ok(Either::B(((), _))) => { + panic!("dead future wasn't canceled (timeout)"); + }, + _ => { + panic!("dead future wasn't canceled (unexpected result)"); + }, + } +} + +#[test] +fn spawn_dont_kill_forgot_dead_stream() { + use std::thread; + use std::time::Duration; + use futures::future::Either; + use futures::sync::oneshot; + + // a future which never returns anything (forever accepting incoming + // connections), but dropping it leads to observable side effects + // (like closing listening sockets, releasing limited resources, + // ...) + #[derive(Debug)] + struct Dead { + // when dropped you should get Err(oneshot::Canceled) on the + // receiving end + done: oneshot::Sender<()>, + } + impl Future for Dead { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + Ok(Async::NotReady) + } + } + + // need to implement a timeout for the test, as it would hang + // forever right now + let (timeout_tx, timeout_rx) = oneshot::channel(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(1000)); + let _ = timeout_tx.send(()); + }); + + let core = local_executor::Core::new(); + let (done_tx, done_rx) = oneshot::channel(); + let future = Dead{done: done_tx}; + let rx = spawn(future, &core); + let res = core.run( + Ok::<_, ()>(()) + .into_future() + .then(move |_| { + // forget the spawned future: should keep running, i.e. hit + // the timeout below. + rx.forget(); + // and wait for the spawned future to release its resources + done_rx + }) + .select2(timeout_rx) + ); + match res { + Err(Either::A((oneshot::Canceled, _))) => { + panic!("forgotten dead future was canceled"); + }, + Ok(Either::B(((), _))) => (), // reached timeout + _ => { + panic!("forgotten dead future was canceled (unexpected result)"); + }, + } +} diff --git a/third_party/rust/futures-0.1.31/tests/ready_queue.rs b/third_party/rust/futures-0.1.31/tests/ready_queue.rs new file mode 100644 index 0000000000..b0dc2375ba --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/ready_queue.rs @@ -0,0 +1,164 @@ +extern crate futures; + +use std::panic::{self, AssertUnwindSafe}; + +use futures::prelude::*; +use futures::Async::*; +use futures::future; +use futures::stream::FuturesUnordered; +use futures::sync::oneshot; + +trait AssertSendSync: Send + Sync {} +impl AssertSendSync for FuturesUnordered<()> {} + +#[test] +fn basic_usage() { + future::lazy(move || { + let mut queue = FuturesUnordered::new(); + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + let (tx3, rx3) = oneshot::channel(); + + queue.push(rx1); + queue.push(rx2); + queue.push(rx3); + + assert!(!queue.poll().unwrap().is_ready()); + + tx2.send("hello").unwrap(); + + assert_eq!(Ready(Some("hello")), queue.poll().unwrap()); + assert!(!queue.poll().unwrap().is_ready()); + + tx1.send("world").unwrap(); + tx3.send("world2").unwrap(); + + assert_eq!(Ready(Some("world")), queue.poll().unwrap()); + assert_eq!(Ready(Some("world2")), queue.poll().unwrap()); + assert_eq!(Ready(None), queue.poll().unwrap()); + + Ok::<_, ()>(()) + }).wait().unwrap(); +} + +#[test] +fn resolving_errors() { + future::lazy(move || { + let mut queue = FuturesUnordered::new(); + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + let (tx3, rx3) = oneshot::channel(); + + queue.push(rx1); + queue.push(rx2); + queue.push(rx3); + + assert!(!queue.poll().unwrap().is_ready()); + + drop(tx2); + + assert!(queue.poll().is_err()); + assert!(!queue.poll().unwrap().is_ready()); + + drop(tx1); + tx3.send("world2").unwrap(); + + assert!(queue.poll().is_err()); + assert_eq!(Ready(Some("world2")), queue.poll().unwrap()); + assert_eq!(Ready(None), queue.poll().unwrap()); + + Ok::<_, ()>(()) + }).wait().unwrap(); +} + +#[test] +fn dropping_ready_queue() { + future::lazy(move || { + let mut queue = FuturesUnordered::new(); + let (mut tx1, rx1) = oneshot::channel::<()>(); + let (mut tx2, rx2) = oneshot::channel::<()>(); + let (mut tx3, rx3) = oneshot::channel::<()>(); + + queue.push(rx1); + queue.push(rx2); + queue.push(rx3); + + assert!(!tx1.poll_cancel().unwrap().is_ready()); + assert!(!tx2.poll_cancel().unwrap().is_ready()); + assert!(!tx3.poll_cancel().unwrap().is_ready()); + + drop(queue); + + assert!(tx1.poll_cancel().unwrap().is_ready()); + assert!(tx2.poll_cancel().unwrap().is_ready()); + assert!(tx3.poll_cancel().unwrap().is_ready()); + + Ok::<_, ()>(()) + }).wait().unwrap(); +} + +#[test] +fn stress() { + const ITER: usize = 300; + + use std::sync::{Arc, Barrier}; + use std::thread; + + for i in 0..ITER { + let n = (i % 10) + 1; + + let mut queue = FuturesUnordered::new(); + + for _ in 0..5 { + let barrier = Arc::new(Barrier::new(n + 1)); + + for num in 0..n { + let barrier = barrier.clone(); + let (tx, rx) = oneshot::channel(); + + queue.push(rx); + + thread::spawn(move || { + barrier.wait(); + tx.send(num).unwrap(); + }); + } + + barrier.wait(); + + let mut sync = queue.wait(); + + let mut rx: Vec<_> = (&mut sync) + .take(n) + .map(|res| res.unwrap()) + .collect(); + + assert_eq!(rx.len(), n); + + rx.sort(); + + for num in 0..n { + assert_eq!(rx[num], num); + } + + queue = sync.into_inner(); + } + } +} + +#[test] +fn panicking_future_dropped() { + future::lazy(move || { + let mut queue = FuturesUnordered::new(); + queue.push(future::poll_fn(|| -> Poll<i32, i32> { + panic!() + })); + + let r = panic::catch_unwind(AssertUnwindSafe(|| queue.poll())); + assert!(r.is_err()); + assert!(queue.is_empty()); + assert_eq!(Ready(None), queue.poll().unwrap()); + + Ok::<_, ()>(()) + }).wait().unwrap(); +} diff --git a/third_party/rust/futures-0.1.31/tests/recurse.rs b/third_party/rust/futures-0.1.31/tests/recurse.rs new file mode 100644 index 0000000000..a521ed13b7 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/recurse.rs @@ -0,0 +1,25 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +use std::sync::mpsc::channel; + +use futures::future::ok; +use futures::prelude::*; + +#[test] +fn lots() { + fn doit(n: usize) -> Box<Future<Item=(), Error=()> + Send> { + if n == 0 { + Box::new(ok(())) + } else { + Box::new(ok(n - 1).and_then(doit)) + } + } + + let (tx, rx) = channel(); + ::std::thread::spawn(|| { + doit(1_000).map(move |_| tx.send(()).unwrap()).wait() + }); + rx.recv().unwrap(); +} diff --git a/third_party/rust/futures-0.1.31/tests/select_all.rs b/third_party/rust/futures-0.1.31/tests/select_all.rs new file mode 100644 index 0000000000..7780aa306d --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/select_all.rs @@ -0,0 +1,27 @@ +extern crate futures; + +use futures::prelude::*; +use futures::future::{ok, select_all, err}; + +#[test] +fn smoke() { + let v = vec![ + ok(1), + err(2), + ok(3), + ]; + + let (i, idx, v) = select_all(v).wait().ok().unwrap(); + assert_eq!(i, 1); + assert_eq!(idx, 0); + + let (i, idx, v) = select_all(v).wait().err().unwrap(); + assert_eq!(i, 2); + assert_eq!(idx, 0); + + let (i, idx, v) = select_all(v).wait().ok().unwrap(); + assert_eq!(i, 3); + assert_eq!(idx, 0); + + assert!(v.is_empty()); +} diff --git a/third_party/rust/futures-0.1.31/tests/select_ok.rs b/third_party/rust/futures-0.1.31/tests/select_ok.rs new file mode 100644 index 0000000000..85f39e2d39 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/select_ok.rs @@ -0,0 +1,40 @@ +extern crate futures; + +use futures::future::*; + +#[test] +fn ignore_err() { + let v = vec![ + err(1), + err(2), + ok(3), + ok(4), + ]; + + let (i, v) = select_ok(v).wait().ok().unwrap(); + assert_eq!(i, 3); + + assert_eq!(v.len(), 1); + + let (i, v) = select_ok(v).wait().ok().unwrap(); + assert_eq!(i, 4); + + assert!(v.is_empty()); +} + +#[test] +fn last_err() { + let v = vec![ + ok(1), + err(2), + err(3), + ]; + + let (i, v) = select_ok(v).wait().ok().unwrap(); + assert_eq!(i, 1); + + assert_eq!(v.len(), 2); + + let i = select_ok(v).wait().err().unwrap(); + assert_eq!(i, 3); +} diff --git a/third_party/rust/futures-0.1.31/tests/shared.rs b/third_party/rust/futures-0.1.31/tests/shared.rs new file mode 100644 index 0000000000..97989fe2cb --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/shared.rs @@ -0,0 +1,236 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +mod support; + +use std::cell::RefCell; +use std::rc::Rc; +use std::thread; + +use futures::sync::oneshot; +use futures::prelude::*; +use futures::future; + +fn send_shared_oneshot_and_wait_on_multiple_threads(threads_number: u32) { + let (tx, rx) = oneshot::channel::<u32>(); + let f = rx.shared(); + let threads = (0..threads_number).map(|_| { + let cloned_future = f.clone(); + thread::spawn(move || { + assert_eq!(*cloned_future.wait().unwrap(), 6); + }) + }).collect::<Vec<_>>(); + tx.send(6).unwrap(); + assert_eq!(*f.wait().unwrap(), 6); + for f in threads { + f.join().unwrap(); + } +} + +#[test] +fn one_thread() { + send_shared_oneshot_and_wait_on_multiple_threads(1); +} + +#[test] +fn two_threads() { + send_shared_oneshot_and_wait_on_multiple_threads(2); +} + +#[test] +fn many_threads() { + send_shared_oneshot_and_wait_on_multiple_threads(1000); +} + +#[test] +fn drop_on_one_task_ok() { + let (tx, rx) = oneshot::channel::<u32>(); + let f1 = rx.shared(); + let f2 = f1.clone(); + + let (tx2, rx2) = oneshot::channel::<u32>(); + + let t1 = thread::spawn(|| { + let f = f1.map_err(|_| ()).map(|x| *x).select(rx2.map_err(|_| ())); + drop(f.wait()); + }); + + let (tx3, rx3) = oneshot::channel::<u32>(); + + let t2 = thread::spawn(|| { + let _ = f2.map(|x| tx3.send(*x).unwrap()).map_err(|_| ()).wait(); + }); + + tx2.send(11).unwrap(); // cancel `f1` + t1.join().unwrap(); + + tx.send(42).unwrap(); // Should cause `f2` and then `rx3` to get resolved. + let result = rx3.wait().unwrap(); + assert_eq!(result, 42); + t2.join().unwrap(); +} + +#[test] +fn drop_in_poll() { + let slot = Rc::new(RefCell::new(None)); + let slot2 = slot.clone(); + let future = future::poll_fn(move || { + drop(slot2.borrow_mut().take().unwrap()); + Ok::<_, u32>(1.into()) + }).shared(); + let future2 = Box::new(future.clone()) as Box<Future<Item=_, Error=_>>; + *slot.borrow_mut() = Some(future2); + assert_eq!(*future.wait().unwrap(), 1); +} + +#[test] +fn peek() { + let core = ::support::local_executor::Core::new(); + + let (tx0, rx0) = oneshot::channel::<u32>(); + let f1 = rx0.shared(); + let f2 = f1.clone(); + + // Repeated calls on the original or clone do not change the outcome. + for _ in 0..2 { + assert!(f1.peek().is_none()); + assert!(f2.peek().is_none()); + } + + // Completing the underlying future has no effect, because the value has not been `poll`ed in. + tx0.send(42).unwrap(); + for _ in 0..2 { + assert!(f1.peek().is_none()); + assert!(f2.peek().is_none()); + } + + // Once the Shared has been polled, the value is peekable on the clone. + core.spawn(f1.map(|_|()).map_err(|_|())); + core.run(future::ok::<(),()>(())).unwrap(); + for _ in 0..2 { + assert_eq!(42, *f2.peek().unwrap().unwrap()); + } +} + +#[test] +fn polled_then_ignored() { + let core = ::support::local_executor::Core::new(); + + let (tx0, rx0) = oneshot::channel::<u32>(); + let f1 = rx0.shared(); + let f2 = f1.clone(); + + let (tx1, rx1) = oneshot::channel::<u32>(); + let (tx2, rx2) = oneshot::channel::<u32>(); + let (tx3, rx3) = oneshot::channel::<u32>(); + + core.spawn(f1.map(|n| tx3.send(*n).unwrap()).map_err(|_|())); + + core.run(future::ok::<(),()>(())).unwrap(); // Allow f1 to be polled. + + core.spawn(f2.map_err(|_| ()).map(|x| *x).select(rx2.map_err(|_| ())).map_err(|_| ()) + .and_then(|(_, f2)| rx3.map_err(|_| ()).map(move |n| {drop(f2); tx1.send(n).unwrap()}))); + + core.run(future::ok::<(),()>(())).unwrap(); // Allow f2 to be polled. + + tx2.send(11).unwrap(); // Resolve rx2, causing f2 to no longer get polled. + + core.run(future::ok::<(),()>(())).unwrap(); // Let the send() propagate. + + tx0.send(42).unwrap(); // Should cause f1, then rx3, and then rx1 to resolve. + + assert_eq!(core.run(rx1).unwrap(), 42); +} + +#[test] +fn recursive_poll() { + use futures::sync::mpsc; + use futures::Stream; + + let core = ::support::local_executor::Core::new(); + let (tx0, rx0) = mpsc::unbounded::<Box<Future<Item=(),Error=()>>>(); + let run_stream = rx0.for_each(|f| f); + + let (tx1, rx1) = oneshot::channel::<()>(); + + let f1 = run_stream.shared(); + let f2 = f1.clone(); + let f3 = f1.clone(); + tx0.unbounded_send(Box::new( + f1.map(|_|()).map_err(|_|()) + .select(rx1.map_err(|_|())) + .map(|_| ()).map_err(|_|()))).unwrap(); + + core.spawn(f2.map(|_|()).map_err(|_|())); + + // Call poll() on the spawned future. We want to be sure that this does not trigger a + // deadlock or panic due to a recursive lock() on a mutex. + core.run(future::ok::<(),()>(())).unwrap(); + + tx1.send(()).unwrap(); // Break the cycle. + drop(tx0); + core.run(f3).unwrap(); +} + +#[test] +fn recursive_poll_with_unpark() { + use futures::sync::mpsc; + use futures::{Stream, task}; + + let core = ::support::local_executor::Core::new(); + let (tx0, rx0) = mpsc::unbounded::<Box<Future<Item=(),Error=()>>>(); + let run_stream = rx0.for_each(|f| f); + + let (tx1, rx1) = oneshot::channel::<()>(); + + let f1 = run_stream.shared(); + let f2 = f1.clone(); + let f3 = f1.clone(); + tx0.unbounded_send(Box::new(future::lazy(move || { + task::current().notify(); + f1.map(|_|()).map_err(|_|()) + .select(rx1.map_err(|_|())) + .map(|_| ()).map_err(|_|()) + }))).unwrap(); + + core.spawn(f2.map(|_|()).map_err(|_|())); + + // Call poll() on the spawned future. We want to be sure that this does not trigger a + // deadlock or panic due to a recursive lock() on a mutex. + core.run(future::ok::<(),()>(())).unwrap(); + + tx1.send(()).unwrap(); // Break the cycle. + drop(tx0); + core.run(f3).unwrap(); +} + +#[test] +fn shared_future_that_wakes_itself_until_pending_is_returned() { + use futures::Async; + use std::cell::Cell; + + let core = ::support::local_executor::Core::new(); + + let proceed = Cell::new(false); + let fut = futures::future::poll_fn(|| { + Ok::<_, ()>(if proceed.get() { + Async::Ready(()) + } else { + futures::task::current().notify(); + Async::NotReady + }) + }) + .shared() + .map(|_| ()) + .map_err(|_| ()); + + // The join future can only complete if the second future gets a chance to run after the first + // has returned pending + let second = futures::future::lazy(|| { + proceed.set(true); + Ok::<_, ()>(()) + }); + + core.run(fut.join(second)).unwrap(); +} diff --git a/third_party/rust/futures-0.1.31/tests/sink.rs b/third_party/rust/futures-0.1.31/tests/sink.rs new file mode 100644 index 0000000000..460dbdf20c --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/sink.rs @@ -0,0 +1,446 @@ +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +use std::mem; +use std::sync::Arc; +use std::rc::Rc; +use std::cell::{Cell, RefCell}; +use std::sync::atomic::{Ordering, AtomicBool}; + +use futures::prelude::*; +use futures::future::ok; +use futures::stream; +use futures::sync::{oneshot, mpsc}; +use futures::task::{self, Task}; +use futures::executor::{self, Notify}; +use futures::sink::SinkFromErr; + +mod support; +use support::*; + +#[test] +fn vec_sink() { + let mut v = Vec::new(); + assert_eq!(v.start_send(0), Ok(AsyncSink::Ready)); + assert_eq!(v.start_send(1), Ok(AsyncSink::Ready)); + assert_eq!(v, vec![0, 1]); + assert_done(move || v.flush(), Ok(vec![0, 1])); +} + +#[test] +fn send() { + let v = Vec::new(); + + let v = v.send(0).wait().unwrap(); + assert_eq!(v, vec![0]); + + let v = v.send(1).wait().unwrap(); + assert_eq!(v, vec![0, 1]); + + assert_done(move || v.send(2), + Ok(vec![0, 1, 2])); +} + +#[test] +fn send_all() { + let v = Vec::new(); + + let (v, _) = v.send_all(stream::iter_ok(vec![0, 1])).wait().unwrap(); + assert_eq!(v, vec![0, 1]); + + let (v, _) = v.send_all(stream::iter_ok(vec![2, 3])).wait().unwrap(); + assert_eq!(v, vec![0, 1, 2, 3]); + + assert_done( + move || v.send_all(stream::iter_ok(vec![4, 5])).map(|(v, _)| v), + Ok(vec![0, 1, 2, 3, 4, 5])); +} + +// An Unpark struct that records unpark events for inspection +struct Flag(pub AtomicBool); + +impl Flag { + fn new() -> Arc<Flag> { + Arc::new(Flag(AtomicBool::new(false))) + } + + fn get(&self) -> bool { + self.0.load(Ordering::SeqCst) + } + + fn set(&self, v: bool) { + self.0.store(v, Ordering::SeqCst) + } +} + +impl Notify for Flag { + fn notify(&self, _id: usize) { + self.set(true) + } +} + +// Sends a value on an i32 channel sink +struct StartSendFut<S: Sink>(Option<S>, Option<S::SinkItem>); + +impl<S: Sink> StartSendFut<S> { + fn new(sink: S, item: S::SinkItem) -> StartSendFut<S> { + StartSendFut(Some(sink), Some(item)) + } +} + +impl<S: Sink> Future for StartSendFut<S> { + type Item = S; + type Error = S::SinkError; + + fn poll(&mut self) -> Poll<S, S::SinkError> { + match self.0.as_mut().unwrap().start_send(self.1.take().unwrap())? { + AsyncSink::Ready => Ok(Async::Ready(self.0.take().unwrap())), + AsyncSink::NotReady(item) => { + self.1 = Some(item); + Ok(Async::NotReady) + } + } + + } +} + +#[test] +// Test that `start_send` on an `mpsc` channel does indeed block when the +// channel is full +fn mpsc_blocking_start_send() { + let (mut tx, mut rx) = mpsc::channel::<i32>(0); + + futures::future::lazy(|| { + assert_eq!(tx.start_send(0).unwrap(), AsyncSink::Ready); + + let flag = Flag::new(); + let mut task = executor::spawn(StartSendFut::new(tx, 1)); + + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + assert!(!flag.get()); + sassert_next(&mut rx, 0); + assert!(flag.get()); + flag.set(false); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_ready()); + assert!(!flag.get()); + sassert_next(&mut rx, 1); + + Ok::<(), ()>(()) + }).wait().unwrap(); +} + +#[test] +// test `flush` by using `with` to make the first insertion into a sink block +// until a oneshot is completed +fn with_flush() { + let (tx, rx) = oneshot::channel(); + let mut block = Box::new(rx) as Box<Future<Item = _, Error = _>>; + let mut sink = Vec::new().with(|elem| { + mem::replace(&mut block, Box::new(ok(()))) + .map(move |_| elem + 1).map_err(|_| -> () { panic!() }) + }); + + assert_eq!(sink.start_send(0), Ok(AsyncSink::Ready)); + + let flag = Flag::new(); + let mut task = executor::spawn(sink.flush()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + tx.send(()).unwrap(); + assert!(flag.get()); + + let sink = match task.poll_future_notify(&flag, 0).unwrap() { + Async::Ready(sink) => sink, + _ => panic!() + }; + + assert_eq!(sink.send(1).wait().unwrap().get_ref(), &[1, 2]); +} + +#[test] +// test simple use of with to change data +fn with_as_map() { + let sink = Vec::new().with(|item| -> Result<i32, ()> { + Ok(item * 2) + }); + let sink = sink.send(0).wait().unwrap(); + let sink = sink.send(1).wait().unwrap(); + let sink = sink.send(2).wait().unwrap(); + assert_eq!(sink.get_ref(), &[0, 2, 4]); +} + +#[test] +// test simple use of with_flat_map +fn with_flat_map() { + let sink = Vec::new().with_flat_map(|item| { + stream::iter_ok(vec![item; item]) + }); + let sink = sink.send(0).wait().unwrap(); + let sink = sink.send(1).wait().unwrap(); + let sink = sink.send(2).wait().unwrap(); + let sink = sink.send(3).wait().unwrap(); + assert_eq!(sink.get_ref(), &[1,2,2,3,3,3]); +} + +// Immediately accepts all requests to start pushing, but completion is managed +// by manually flushing +struct ManualFlush<T> { + data: Vec<T>, + waiting_tasks: Vec<Task>, +} + +impl<T> Sink for ManualFlush<T> { + type SinkItem = Option<T>; // Pass None to flush + type SinkError = (); + + fn start_send(&mut self, op: Option<T>) -> StartSend<Option<T>, ()> { + if let Some(item) = op { + self.data.push(item); + } else { + self.force_flush(); + } + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), ()> { + if self.data.is_empty() { + Ok(Async::Ready(())) + } else { + self.waiting_tasks.push(task::current()); + Ok(Async::NotReady) + } + } + + fn close(&mut self) -> Poll<(), ()> { + Ok(().into()) + } +} + +impl<T> ManualFlush<T> { + fn new() -> ManualFlush<T> { + ManualFlush { + data: Vec::new(), + waiting_tasks: Vec::new() + } + } + + fn force_flush(&mut self) -> Vec<T> { + for task in self.waiting_tasks.drain(..) { + task.notify() + } + mem::replace(&mut self.data, Vec::new()) + } +} + +#[test] +// test that the `with` sink doesn't require the underlying sink to flush, +// but doesn't claim to be flushed until the underlying sink is +fn with_flush_propagate() { + let mut sink = ManualFlush::new().with(|x| -> Result<Option<i32>, ()> { Ok(x) }); + assert_eq!(sink.start_send(Some(0)).unwrap(), AsyncSink::Ready); + assert_eq!(sink.start_send(Some(1)).unwrap(), AsyncSink::Ready); + + let flag = Flag::new(); + let mut task = executor::spawn(sink.flush()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + assert!(!flag.get()); + assert_eq!(task.get_mut().get_mut().get_mut().force_flush(), vec![0, 1]); + assert!(flag.get()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_ready()); +} + +#[test] +// test that a buffer is a no-nop around a sink that always accepts sends +fn buffer_noop() { + let sink = Vec::new().buffer(0); + let sink = sink.send(0).wait().unwrap(); + let sink = sink.send(1).wait().unwrap(); + assert_eq!(sink.get_ref(), &[0, 1]); + + let sink = Vec::new().buffer(1); + let sink = sink.send(0).wait().unwrap(); + let sink = sink.send(1).wait().unwrap(); + assert_eq!(sink.get_ref(), &[0, 1]); +} + +struct ManualAllow<T> { + data: Vec<T>, + allow: Rc<Allow>, +} + +struct Allow { + flag: Cell<bool>, + tasks: RefCell<Vec<Task>>, +} + +impl Allow { + fn new() -> Allow { + Allow { + flag: Cell::new(false), + tasks: RefCell::new(Vec::new()), + } + } + + fn check(&self) -> bool { + if self.flag.get() { + true + } else { + self.tasks.borrow_mut().push(task::current()); + false + } + } + + fn start(&self) { + self.flag.set(true); + let mut tasks = self.tasks.borrow_mut(); + for task in tasks.drain(..) { + task.notify(); + } + } +} + +impl<T> Sink for ManualAllow<T> { + type SinkItem = T; + type SinkError = (); + + fn start_send(&mut self, item: T) -> StartSend<T, ()> { + if self.allow.check() { + self.data.push(item); + Ok(AsyncSink::Ready) + } else { + Ok(AsyncSink::NotReady(item)) + } + } + + fn poll_complete(&mut self) -> Poll<(), ()> { + Ok(Async::Ready(())) + } + + fn close(&mut self) -> Poll<(), ()> { + Ok(().into()) + } +} + +fn manual_allow<T>() -> (ManualAllow<T>, Rc<Allow>) { + let allow = Rc::new(Allow::new()); + let manual_allow = ManualAllow { + data: Vec::new(), + allow: allow.clone(), + }; + (manual_allow, allow) +} + +#[test] +// test basic buffer functionality, including both filling up to capacity, +// and writing out when the underlying sink is ready +fn buffer() { + let (sink, allow) = manual_allow::<i32>(); + let sink = sink.buffer(2); + + let sink = StartSendFut::new(sink, 0).wait().unwrap(); + let sink = StartSendFut::new(sink, 1).wait().unwrap(); + + let flag = Flag::new(); + let mut task = executor::spawn(sink.send(2)); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + assert!(!flag.get()); + allow.start(); + assert!(flag.get()); + match task.poll_future_notify(&flag, 0).unwrap() { + Async::Ready(sink) => { + assert_eq!(sink.get_ref().data, vec![0, 1, 2]); + } + _ => panic!() + } +} + +#[test] +fn fanout_smoke() { + let sink1 = Vec::new(); + let sink2 = Vec::new(); + let sink = sink1.fanout(sink2); + let stream = futures::stream::iter_ok(vec![1,2,3]); + let (sink, _) = sink.send_all(stream).wait().unwrap(); + let (sink1, sink2) = sink.into_inner(); + assert_eq!(sink1, vec![1,2,3]); + assert_eq!(sink2, vec![1,2,3]); +} + +#[test] +fn fanout_backpressure() { + let (left_send, left_recv) = mpsc::channel(0); + let (right_send, right_recv) = mpsc::channel(0); + let sink = left_send.fanout(right_send); + + let sink = StartSendFut::new(sink, 0).wait().unwrap(); + let sink = StartSendFut::new(sink, 1).wait().unwrap(); + + let flag = Flag::new(); + let mut task = executor::spawn(sink.send(2)); + assert!(!flag.get()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + let (item, left_recv) = left_recv.into_future().wait().unwrap(); + assert_eq!(item, Some(0)); + assert!(flag.get()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + let (item, right_recv) = right_recv.into_future().wait().unwrap(); + assert_eq!(item, Some(0)); + assert!(flag.get()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + let (item, left_recv) = left_recv.into_future().wait().unwrap(); + assert_eq!(item, Some(1)); + assert!(flag.get()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + let (item, right_recv) = right_recv.into_future().wait().unwrap(); + assert_eq!(item, Some(1)); + assert!(flag.get()); + let (item, left_recv) = left_recv.into_future().wait().unwrap(); + assert_eq!(item, Some(2)); + assert!(flag.get()); + assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready()); + let (item, right_recv) = right_recv.into_future().wait().unwrap(); + assert_eq!(item, Some(2)); + match task.poll_future_notify(&flag, 0).unwrap() { + Async::Ready(_) => { + }, + _ => panic!() + }; + // make sure receivers live until end of test to prevent send errors + drop(left_recv); + drop(right_recv); +} + +#[test] +fn map_err() { + { + let (tx, _rx) = mpsc::channel(1); + let mut tx = tx.sink_map_err(|_| ()); + assert_eq!(tx.start_send(()), Ok(AsyncSink::Ready)); + assert_eq!(tx.poll_complete(), Ok(Async::Ready(()))); + } + + let tx = mpsc::channel(0).0; + assert_eq!(tx.sink_map_err(|_| ()).start_send(()), Err(())); +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +struct FromErrTest; + +impl<T> From<mpsc::SendError<T>> for FromErrTest { + fn from(_: mpsc::SendError<T>) -> FromErrTest { + FromErrTest + } +} + +#[test] +fn from_err() { + { + let (tx, _rx) = mpsc::channel(1); + let mut tx: SinkFromErr<mpsc::Sender<()>, FromErrTest> = tx.sink_from_err(); + assert_eq!(tx.start_send(()), Ok(AsyncSink::Ready)); + assert_eq!(tx.poll_complete(), Ok(Async::Ready(()))); + } + + let tx = mpsc::channel(0).0; + assert_eq!(tx.sink_from_err().start_send(()), Err(FromErrTest)); +} diff --git a/third_party/rust/futures-0.1.31/tests/split.rs b/third_party/rust/futures-0.1.31/tests/split.rs new file mode 100644 index 0000000000..7a0667f135 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/split.rs @@ -0,0 +1,47 @@ +extern crate futures; + +use futures::prelude::*; +use futures::stream::iter_ok; + +struct Join<T, U>(T, U); + +impl<T: Stream, U> Stream for Join<T, U> { + type Item = T::Item; + type Error = T::Error; + + fn poll(&mut self) -> Poll<Option<T::Item>, T::Error> { + self.0.poll() + } +} + +impl<T, U: Sink> Sink for Join<T, U> { + type SinkItem = U::SinkItem; + type SinkError = U::SinkError; + + fn start_send(&mut self, item: U::SinkItem) + -> StartSend<U::SinkItem, U::SinkError> + { + self.1.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), U::SinkError> { + self.1.poll_complete() + } + + fn close(&mut self) -> Poll<(), U::SinkError> { + self.1.close() + } +} + +#[test] +fn test_split() { + let mut dest = Vec::new(); + { + let j = Join(iter_ok(vec![10, 20, 30]), &mut dest); + let (sink, stream) = j.split(); + let j = sink.reunite(stream).expect("test_split: reunite error"); + let (sink, stream) = j.split(); + sink.send_all(stream).wait().unwrap(); + } + assert_eq!(dest, vec![10, 20, 30]); +} diff --git a/third_party/rust/futures-0.1.31/tests/stream.rs b/third_party/rust/futures-0.1.31/tests/stream.rs new file mode 100644 index 0000000000..2400a2abb1 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/stream.rs @@ -0,0 +1,416 @@ +#![allow(bare_trait_objects, unknown_lints)] + +#[macro_use] +extern crate futures; + +use futures::prelude::*; +use futures::executor; +use futures::future::{err, ok}; +use futures::stream::{empty, iter_ok, poll_fn, Peekable}; +use futures::sync::oneshot; +use futures::sync::mpsc; + +mod support; +use support::*; + +pub struct Iter<I> { + iter: I, +} + +pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter> + where J: IntoIterator<Item=Result<T, E>>, +{ + Iter { + iter: i.into_iter(), + } +} + +impl<I, T, E> Stream for Iter<I> + where I: Iterator<Item=Result<T, E>>, +{ + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll<Option<T>, E> { + match self.iter.next() { + Some(Ok(e)) => Ok(Async::Ready(Some(e))), + Some(Err(e)) => Err(e), + None => Ok(Async::Ready(None)), + } + } +} + +fn list() -> Box<Stream<Item=i32, Error=u32> + Send> { + let (tx, rx) = mpsc::channel(1); + tx.send(Ok(1)) + .and_then(|tx| tx.send(Ok(2))) + .and_then(|tx| tx.send(Ok(3))) + .forget(); + Box::new(rx.then(|r| r.unwrap())) +} + +fn err_list() -> Box<Stream<Item=i32, Error=u32> + Send> { + let (tx, rx) = mpsc::channel(1); + tx.send(Ok(1)) + .and_then(|tx| tx.send(Ok(2))) + .and_then(|tx| tx.send(Err(3))) + .forget(); + Box::new(rx.then(|r| r.unwrap())) +} + +#[test] +fn map() { + assert_done(|| list().map(|a| a + 1).collect(), Ok(vec![2, 3, 4])); +} + +#[test] +fn map_err() { + assert_done(|| err_list().map_err(|a| a + 1).collect(), Err(4)); +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +struct FromErrTest(u32); + +impl From<u32> for FromErrTest { + fn from(i: u32) -> FromErrTest { + FromErrTest(i) + } +} + +#[test] +fn from_err() { + assert_done(|| err_list().from_err().collect(), Err(FromErrTest(3))); +} + +#[test] +fn fold() { + assert_done(|| list().fold(0, |a, b| ok::<i32, u32>(a + b)), Ok(6)); + assert_done(|| err_list().fold(0, |a, b| ok::<i32, u32>(a + b)), Err(3)); +} + +#[test] +fn filter() { + assert_done(|| list().filter(|a| *a % 2 == 0).collect(), Ok(vec![2])); +} + +#[test] +fn filter_map() { + assert_done(|| list().filter_map(|x| { + if x % 2 == 0 { + Some(x + 10) + } else { + None + } + }).collect(), Ok(vec![12])); +} + +#[test] +fn and_then() { + assert_done(|| list().and_then(|a| Ok(a + 1)).collect(), Ok(vec![2, 3, 4])); + assert_done(|| list().and_then(|a| err::<i32, u32>(a as u32)).collect(), + Err(1)); +} + +#[test] +fn then() { + assert_done(|| list().then(|a| a.map(|e| e + 1)).collect(), Ok(vec![2, 3, 4])); + +} + +#[test] +fn or_else() { + assert_done(|| err_list().or_else(|a| { + ok::<i32, u32>(a as i32) + }).collect(), Ok(vec![1, 2, 3])); +} + +#[test] +fn flatten() { + assert_done(|| list().map(|_| list()).flatten().collect(), + Ok(vec![1, 2, 3, 1, 2, 3, 1, 2, 3])); + +} + +#[test] +fn skip() { + assert_done(|| list().skip(2).collect(), Ok(vec![3])); +} + +#[test] +fn skip_passes_errors_through() { + let mut s = iter(vec![Err(1), Err(2), Ok(3), Ok(4), Ok(5)]) + .skip(1) + .wait(); + assert_eq!(s.next(), Some(Err(1))); + assert_eq!(s.next(), Some(Err(2))); + assert_eq!(s.next(), Some(Ok(4))); + assert_eq!(s.next(), Some(Ok(5))); + assert_eq!(s.next(), None); +} + +#[test] +fn skip_while() { + assert_done(|| list().skip_while(|e| Ok(*e % 2 == 1)).collect(), + Ok(vec![2, 3])); +} +#[test] +fn take() { + assert_done(|| list().take(2).collect(), Ok(vec![1, 2])); +} + +#[test] +fn take_while() { + assert_done(|| list().take_while(|e| Ok(*e < 3)).collect(), + Ok(vec![1, 2])); +} + +#[test] +fn take_passes_errors_through() { + let mut s = iter(vec![Err(1), Err(2), Ok(3), Ok(4), Err(4)]) + .take(1) + .wait(); + assert_eq!(s.next(), Some(Err(1))); + assert_eq!(s.next(), Some(Err(2))); + assert_eq!(s.next(), Some(Ok(3))); + assert_eq!(s.next(), None); + + let mut s = iter(vec![Ok(1), Err(2)]).take(1).wait(); + assert_eq!(s.next(), Some(Ok(1))); + assert_eq!(s.next(), None); +} + +#[test] +fn peekable() { + assert_done(|| list().peekable().collect(), Ok(vec![1, 2, 3])); +} + +#[test] +fn fuse() { + let mut stream = list().fuse().wait(); + assert_eq!(stream.next(), Some(Ok(1))); + assert_eq!(stream.next(), Some(Ok(2))); + assert_eq!(stream.next(), Some(Ok(3))); + assert_eq!(stream.next(), None); + assert_eq!(stream.next(), None); + assert_eq!(stream.next(), None); +} + +#[test] +fn buffered() { + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::<u32>(); + let (c, d) = oneshot::channel::<u32>(); + + tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>) + .and_then(|tx| tx.send(Box::new(d.map_err(|_| ())))) + .forget(); + + let mut rx = rx.buffered(2); + sassert_empty(&mut rx); + c.send(3).unwrap(); + sassert_empty(&mut rx); + a.send(5).unwrap(); + let mut rx = rx.wait(); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), Some(Ok(3))); + assert_eq!(rx.next(), None); + + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::<u32>(); + let (c, d) = oneshot::channel::<u32>(); + + tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>) + .and_then(|tx| tx.send(Box::new(d.map_err(|_| ())))) + .forget(); + + let mut rx = rx.buffered(1); + sassert_empty(&mut rx); + c.send(3).unwrap(); + sassert_empty(&mut rx); + a.send(5).unwrap(); + let mut rx = rx.wait(); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), Some(Ok(3))); + assert_eq!(rx.next(), None); +} + +#[test] +fn unordered() { + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::<u32>(); + let (c, d) = oneshot::channel::<u32>(); + + tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>) + .and_then(|tx| tx.send(Box::new(d.map_err(|_| ())))) + .forget(); + + let mut rx = rx.buffer_unordered(2); + sassert_empty(&mut rx); + let mut rx = rx.wait(); + c.send(3).unwrap(); + assert_eq!(rx.next(), Some(Ok(3))); + a.send(5).unwrap(); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), None); + + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::<u32>(); + let (c, d) = oneshot::channel::<u32>(); + + tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>) + .and_then(|tx| tx.send(Box::new(d.map_err(|_| ())))) + .forget(); + + // We don't even get to see `c` until `a` completes. + let mut rx = rx.buffer_unordered(1); + sassert_empty(&mut rx); + c.send(3).unwrap(); + sassert_empty(&mut rx); + a.send(5).unwrap(); + let mut rx = rx.wait(); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), Some(Ok(3))); + assert_eq!(rx.next(), None); +} + +#[test] +fn zip() { + assert_done(|| list().zip(list()).collect(), + Ok(vec![(1, 1), (2, 2), (3, 3)])); + assert_done(|| list().zip(list().take(2)).collect(), + Ok(vec![(1, 1), (2, 2)])); + assert_done(|| list().take(2).zip(list()).collect(), + Ok(vec![(1, 1), (2, 2)])); + assert_done(|| err_list().zip(list()).collect(), Err(3)); + assert_done(|| list().zip(list().map(|x| x + 1)).collect(), + Ok(vec![(1, 2), (2, 3), (3, 4)])); +} + +#[test] +fn peek() { + struct Peek { + inner: Peekable<Box<Stream<Item = i32, Error =u32> + Send>> + } + + impl Future for Peek { + type Item = (); + type Error = u32; + + fn poll(&mut self) -> Poll<(), u32> { + { + let res = try_ready!(self.inner.peek()); + assert_eq!(res, Some(&1)); + } + assert_eq!(self.inner.peek().unwrap(), Some(&1).into()); + assert_eq!(self.inner.poll().unwrap(), Some(1).into()); + Ok(().into()) + } + } + + Peek { + inner: list().peekable(), + }.wait().unwrap() +} + +#[test] +fn wait() { + assert_eq!(list().wait().collect::<Result<Vec<_>, _>>(), + Ok(vec![1, 2, 3])); +} + +#[test] +fn chunks() { + assert_done(|| list().chunks(3).collect(), Ok(vec![vec![1, 2, 3]])); + assert_done(|| list().chunks(1).collect(), Ok(vec![vec![1], vec![2], vec![3]])); + assert_done(|| list().chunks(2).collect(), Ok(vec![vec![1, 2], vec![3]])); + let mut list = executor::spawn(err_list().chunks(3)); + let i = list.wait_stream().unwrap().unwrap(); + assert_eq!(i, vec![1, 2]); + let i = list.wait_stream().unwrap().unwrap_err(); + assert_eq!(i, 3); +} + +#[test] +#[should_panic] +fn chunks_panic_on_cap_zero() { + let _ = list().chunks(0); +} + +#[test] +fn select() { + let a = iter_ok::<_, u32>(vec![1, 2, 3]); + let b = iter_ok(vec![4, 5, 6]); + assert_done(|| a.select(b).collect(), Ok(vec![1, 4, 2, 5, 3, 6])); + + let a = iter_ok::<_, u32>(vec![1, 2, 3]); + let b = iter_ok(vec![1, 2]); + assert_done(|| a.select(b).collect(), Ok(vec![1, 1, 2, 2, 3])); + + let a = iter_ok(vec![1, 2]); + let b = iter_ok::<_, u32>(vec![1, 2, 3]); + assert_done(|| a.select(b).collect(), Ok(vec![1, 1, 2, 2, 3])); +} + +#[test] +fn forward() { + let v = Vec::new(); + let v = iter_ok::<_, ()>(vec![0, 1]).forward(v).wait().unwrap().1; + assert_eq!(v, vec![0, 1]); + + let v = iter_ok::<_, ()>(vec![2, 3]).forward(v).wait().unwrap().1; + assert_eq!(v, vec![0, 1, 2, 3]); + + assert_done(move || iter_ok(vec![4, 5]).forward(v).map(|(_, s)| s), + Ok::<_, ()>(vec![0, 1, 2, 3, 4, 5])); +} + +#[test] +#[allow(deprecated)] +fn concat() { + let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); + assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9])); + + let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]); + assert_done(move || b.concat(), Err(())); +} + +#[test] +fn concat2() { + let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); + assert_done(move || a.concat2(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9])); + + let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]); + assert_done(move || b.concat2(), Err(())); + + let c = empty::<Vec<()>, ()>(); + assert_done(move || c.concat2(), Ok(vec![])) +} + +#[test] +fn stream_poll_fn() { + let mut counter = 5usize; + + let read_stream = poll_fn(move || -> Poll<Option<usize>, std::io::Error> { + if counter == 0 { + return Ok(Async::Ready(None)); + } + counter -= 1; + Ok(Async::Ready(Some(counter))) + }); + + assert_eq!(read_stream.wait().count(), 5); +} + +#[test] +fn inspect() { + let mut seen = vec![]; + assert_done(|| list().inspect(|&a| seen.push(a)).collect(), Ok(vec![1, 2, 3])); + assert_eq!(seen, [1, 2, 3]); +} + +#[test] +fn inspect_err() { + let mut seen = vec![]; + assert_done(|| err_list().inspect_err(|&a| seen.push(a)).collect(), Err(3)); + assert_eq!(seen, [3]); +} diff --git a/third_party/rust/futures-0.1.31/tests/stream_catch_unwind.rs b/third_party/rust/futures-0.1.31/tests/stream_catch_unwind.rs new file mode 100644 index 0000000000..a06748d09a --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/stream_catch_unwind.rs @@ -0,0 +1,29 @@ +extern crate futures; + +use futures::stream; +use futures::prelude::*; + +#[test] +fn panic_in_the_middle_of_the_stream() { + let stream = stream::iter_ok::<_, bool>(vec![Some(10), None, Some(11)]); + + // panic on second element + let stream_panicking = stream.map(|o| o.unwrap()); + let mut iter = stream_panicking.catch_unwind().wait(); + + assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap()); + assert!(iter.next().unwrap().is_err()); + assert!(iter.next().is_none()); +} + +#[test] +fn no_panic() { + let stream = stream::iter_ok::<_, bool>(vec![10, 11, 12]); + + let mut iter = stream.catch_unwind().wait(); + + assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap()); + assert_eq!(Ok(11), iter.next().unwrap().ok().unwrap()); + assert_eq!(Ok(12), iter.next().unwrap().ok().unwrap()); + assert!(iter.next().is_none()); +} diff --git a/third_party/rust/futures-0.1.31/tests/support/local_executor.rs b/third_party/rust/futures-0.1.31/tests/support/local_executor.rs new file mode 100644 index 0000000000..cf89e8152f --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/support/local_executor.rs @@ -0,0 +1,164 @@ +//! Execution of futures on a single thread +//! +//! This module has no special handling of any blocking operations other than +//! futures-aware inter-thread communications, and is not intended to be used to +//! manage I/O. For futures that do I/O you'll likely want to use `tokio-core`. + +#![allow(bare_trait_objects, unknown_lints)] + +use std::cell::{Cell, RefCell}; +use std::sync::{Arc, Mutex, mpsc}; + +use futures::executor::{self, Spawn, Notify}; +use futures::future::{Executor, ExecuteError}; +use futures::{Future, Async}; + +/// Main loop object +pub struct Core { + tx: mpsc::Sender<usize>, + rx: mpsc::Receiver<usize>, + notify: Arc<MyNotify>, + + // Slab of running futures used to track what's running and what slots are + // empty. Slot indexes are then sent along tx/rx above to indicate which + // future is ready to get polled. + tasks: RefCell<Vec<Slot>>, + next_vacant: Cell<usize>, +} + +enum Slot { + Vacant { next_vacant: usize }, + Running(Option<Spawn<Box<Future<Item = (), Error = ()>>>>), +} + +impl Core { + /// Create a new `Core`. + pub fn new() -> Self { + let (tx, rx) = mpsc::channel(); + Core { + notify: Arc::new(MyNotify { + tx: Mutex::new(tx.clone()), + }), + tx: tx, + rx: rx, + next_vacant: Cell::new(0), + tasks: RefCell::new(Vec::new()), + } + } + + /// Spawn a future to be executed by a future call to `run`. + /// + /// The future `f` provided will not be executed until `run` is called + /// below. While futures passed to `run` are executing, the future provided + /// here will be executed concurrently as well. + pub fn spawn<F>(&self, f: F) + where F: Future<Item=(), Error=()> + 'static + { + let idx = self.next_vacant.get(); + let mut tasks = self.tasks.borrow_mut(); + match tasks.get_mut(idx) { + Some(&mut Slot::Vacant { next_vacant }) => { + self.next_vacant.set(next_vacant); + } + Some(&mut Slot::Running (_)) => { + panic!("vacant points to running future") + } + None => { + assert_eq!(idx, tasks.len()); + tasks.push(Slot::Vacant { next_vacant: 0 }); + self.next_vacant.set(idx + 1); + } + } + tasks[idx] = Slot::Running(Some(executor::spawn(Box::new(f)))); + self.tx.send(idx).unwrap(); + } + + /// Run the loop until the future `f` completes. + /// + /// This method will block the current thread until the future `f` has + /// resolved. While waiting on `f` to finish it will also execute any + /// futures spawned via `spawn` above. + pub fn run<F>(&self, f: F) -> Result<F::Item, F::Error> + where F: Future, + { + let id = usize::max_value(); + self.tx.send(id).unwrap(); + let mut f = executor::spawn(f); + loop { + if self.turn() { + match f.poll_future_notify(&self.notify, id)? { + Async::Ready(e) => return Ok(e), + Async::NotReady => {} + } + } + } + } + + /// "Turns" this event loop one tick. + /// + /// This'll block the current thread until something happens, and once an + /// event happens this will act on that event. + /// + /// # Return value + /// + /// Returns `true` if the future passed to `run` should be polled or `false` + /// otherwise. + fn turn(&self) -> bool { + let task_id = self.rx.recv().unwrap(); + if task_id == usize::max_value() { + return true + } + + // This may be a spurious wakeup so we're not guaranteed to have a + // future associated with `task_id`, so do a fallible lookup. + // + // Note that we don't want to borrow `self.tasks` for too long so we + // try to extract the future here and leave behind a tombstone future + // which'll get replaced or removed later. This is how we support + // spawn-in-run. + let mut future = match self.tasks.borrow_mut().get_mut(task_id) { + Some(&mut Slot::Running(ref mut future)) => future.take().unwrap(), + Some(&mut Slot::Vacant { .. }) => return false, + None => return false, + }; + + // Drive this future forward. If it's done we remove it and if it's not + // done then we put it back in the tasks array. + let done = match future.poll_future_notify(&self.notify, task_id) { + Ok(Async::Ready(())) | Err(()) => true, + Ok(Async::NotReady) => false, + }; + let mut tasks = self.tasks.borrow_mut(); + if done { + tasks[task_id] = Slot::Vacant { next_vacant: self.next_vacant.get() }; + self.next_vacant.set(task_id); + } else { + tasks[task_id] = Slot::Running(Some(future)); + } + + return false + } +} + +impl<F> Executor<F> for Core + where F: Future<Item = (), Error = ()> + 'static, +{ + fn execute(&self, future: F) -> Result<(), ExecuteError<F>> { + self.spawn(future); + Ok(()) + } +} + +struct MyNotify { + // TODO: it's pretty unfortunate to use a `Mutex` here where the `Sender` + // itself is basically `Sync` as-is. Ideally this'd use something like + // an off-the-shelf mpsc queue as well as `thread::park` and + // `Thread::unpark`. + tx: Mutex<mpsc::Sender<usize>>, +} + +impl Notify for MyNotify { + fn notify(&self, id: usize) { + drop(self.tx.lock().unwrap().send(id)); + } +} diff --git a/third_party/rust/futures-0.1.31/tests/support/mod.rs b/third_party/rust/futures-0.1.31/tests/support/mod.rs new file mode 100644 index 0000000000..297749777a --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/support/mod.rs @@ -0,0 +1,134 @@ +#![allow(dead_code)] + +use std::fmt; +use std::sync::Arc; +use std::thread; + +use futures::{Future, IntoFuture, Async, Poll}; +use futures::future::FutureResult; +use futures::stream::Stream; +use futures::executor::{self, NotifyHandle, Notify}; +use futures::task; + +pub mod local_executor; + +pub fn f_ok(a: i32) -> FutureResult<i32, u32> { Ok(a).into_future() } +pub fn f_err(a: u32) -> FutureResult<i32, u32> { Err(a).into_future() } +pub fn r_ok(a: i32) -> Result<i32, u32> { Ok(a) } +pub fn r_err(a: u32) -> Result<i32, u32> { Err(a) } + +pub fn assert_done<T, F>(f: F, result: Result<T::Item, T::Error>) + where T: Future, + T::Item: Eq + fmt::Debug, + T::Error: Eq + fmt::Debug, + F: FnOnce() -> T, +{ + assert_eq!(f().wait(), result); +} + +pub fn assert_empty<T: Future, F: FnMut() -> T>(mut f: F) { + assert!(executor::spawn(f()).poll_future_notify(¬ify_panic(), 0).ok().unwrap().is_not_ready()); +} + +pub fn sassert_done<S: Stream>(s: &mut S) { + match executor::spawn(s).poll_stream_notify(¬ify_panic(), 0) { + Ok(Async::Ready(None)) => {} + Ok(Async::Ready(Some(_))) => panic!("stream had more elements"), + Ok(Async::NotReady) => panic!("stream wasn't ready"), + Err(_) => panic!("stream had an error"), + } +} + +pub fn sassert_empty<S: Stream>(s: &mut S) { + match executor::spawn(s).poll_stream_notify(¬ify_noop(), 0) { + Ok(Async::Ready(None)) => panic!("stream is at its end"), + Ok(Async::Ready(Some(_))) => panic!("stream had more elements"), + Ok(Async::NotReady) => {} + Err(_) => panic!("stream had an error"), + } +} + +pub fn sassert_next<S: Stream>(s: &mut S, item: S::Item) + where S::Item: Eq + fmt::Debug +{ + match executor::spawn(s).poll_stream_notify(¬ify_panic(), 0) { + Ok(Async::Ready(None)) => panic!("stream is at its end"), + Ok(Async::Ready(Some(e))) => assert_eq!(e, item), + Ok(Async::NotReady) => panic!("stream wasn't ready"), + Err(_) => panic!("stream had an error"), + } +} + +pub fn sassert_err<S: Stream>(s: &mut S, err: S::Error) + where S::Error: Eq + fmt::Debug +{ + match executor::spawn(s).poll_stream_notify(¬ify_panic(), 0) { + Ok(Async::Ready(None)) => panic!("stream is at its end"), + Ok(Async::Ready(Some(_))) => panic!("stream had more elements"), + Ok(Async::NotReady) => panic!("stream wasn't ready"), + Err(e) => assert_eq!(e, err), + } +} + +pub fn notify_panic() -> NotifyHandle { + struct Foo; + + impl Notify for Foo { + fn notify(&self, _id: usize) { + panic!("should not be notified"); + } + } + + NotifyHandle::from(Arc::new(Foo)) +} + +pub fn notify_noop() -> NotifyHandle { + struct Noop; + + impl Notify for Noop { + fn notify(&self, _id: usize) {} + } + + const NOOP : &'static Noop = &Noop; + + NotifyHandle::from(NOOP) +} + +pub trait ForgetExt { + fn forget(self); +} + +impl<F> ForgetExt for F + where F: Future + Sized + Send + 'static, + F::Item: Send, + F::Error: Send +{ + fn forget(self) { + thread::spawn(|| self.wait()); + } +} + +pub struct DelayFuture<F>(F,bool); + +impl<F: Future> Future for DelayFuture<F> { + type Item = F::Item; + type Error = F::Error; + + fn poll(&mut self) -> Poll<F::Item,F::Error> { + if self.1 { + self.0.poll() + } else { + self.1 = true; + task::current().notify(); + Ok(Async::NotReady) + } + } +} + +/// Introduces one `Ok(Async::NotReady)` before polling the given future +pub fn delay_future<F>(f: F) -> DelayFuture<F::Future> + where F: IntoFuture, +{ + DelayFuture(f.into_future(), false) +} + diff --git a/third_party/rust/futures-0.1.31/tests/unfold.rs b/third_party/rust/futures-0.1.31/tests/unfold.rs new file mode 100644 index 0000000000..1669a18aa5 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/unfold.rs @@ -0,0 +1,52 @@ +extern crate futures; + +mod support; + +use futures::stream; + +use support::*; + +#[test] +fn unfold1() { + let mut stream = stream::unfold(0, |state| { + if state <= 2 { + let res: Result<_,()> = Ok((state * 2, state + 1)); + Some(delay_future(res)) + } else { + None + } + }); + // Creates the future with the closure + // Not ready (delayed future) + sassert_empty(&mut stream); + // future is ready, yields the item + sassert_next(&mut stream, 0); + + // Repeat + sassert_empty(&mut stream); + sassert_next(&mut stream, 2); + + sassert_empty(&mut stream); + sassert_next(&mut stream, 4); + + // no more items + sassert_done(&mut stream); +} + +#[test] +fn unfold_err1() { + let mut stream = stream::unfold(0, |state| { + if state <= 2 { + Some(Ok((state * 2, state + 1))) + } else { + Some(Err(-1)) + } + }); + sassert_next(&mut stream, 0); + sassert_next(&mut stream, 2); + sassert_next(&mut stream, 4); + sassert_err(&mut stream, -1); + + // An error was generated by the stream, it will then finish + sassert_done(&mut stream); +} diff --git a/third_party/rust/futures-0.1.31/tests/unsync-oneshot.rs b/third_party/rust/futures-0.1.31/tests/unsync-oneshot.rs new file mode 100644 index 0000000000..55b0ca5ac2 --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/unsync-oneshot.rs @@ -0,0 +1,189 @@ +extern crate futures; + +use futures::prelude::*; +use futures::future; +use futures::unsync::oneshot::{channel, Canceled, spawn}; + +mod support; +use support::local_executor; + +#[test] +fn smoke() { + let (tx, rx) = channel(); + tx.send(33).unwrap(); + assert_eq!(rx.wait().unwrap(), 33); +} + +#[test] +fn canceled() { + let (_, rx) = channel::<()>(); + assert_eq!(rx.wait().unwrap_err(), Canceled); +} + +#[test] +fn poll_cancel() { + let (mut tx, _) = channel::<()>(); + assert!(tx.poll_cancel().unwrap().is_ready()); +} + +#[test] +fn tx_complete_rx_unparked() { + let (tx, rx) = channel(); + + let res = rx.join(future::lazy(move || { + tx.send(55).unwrap(); + Ok(11) + })); + assert_eq!(res.wait().unwrap(), (55, 11)); +} + +#[test] +fn tx_dropped_rx_unparked() { + let (tx, rx) = channel::<i32>(); + + let res = rx.join(future::lazy(move || { + let _tx = tx; + Ok(11) + })); + assert_eq!(res.wait().unwrap_err(), Canceled); +} + + +#[test] +fn is_canceled() { + let (tx, rx) = channel::<u32>(); + assert!(!tx.is_canceled()); + drop(rx); + assert!(tx.is_canceled()); +} + +#[test] +fn spawn_sends_items() { + let core = local_executor::Core::new(); + let future = future::ok::<_, ()>(1); + let rx = spawn(future, &core); + assert_eq!(core.run(rx).unwrap(), 1); +} + +#[test] +fn spawn_kill_dead_stream() { + use std::thread; + use std::time::Duration; + use futures::future::Either; + use futures::sync::oneshot; + + // a future which never returns anything (forever accepting incoming + // connections), but dropping it leads to observable side effects + // (like closing listening sockets, releasing limited resources, + // ...) + #[derive(Debug)] + struct Dead { + // when dropped you should get Err(oneshot::Canceled) on the + // receiving end + done: oneshot::Sender<()>, + } + impl Future for Dead { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + Ok(Async::NotReady) + } + } + + // need to implement a timeout for the test, as it would hang + // forever right now + let (timeout_tx, timeout_rx) = oneshot::channel(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(1000)); + let _ = timeout_tx.send(()); + }); + + let core = local_executor::Core::new(); + let (done_tx, done_rx) = oneshot::channel(); + let future = Dead{done: done_tx}; + let rx = spawn(future, &core); + let res = core.run( + Ok::<_, ()>(()) + .into_future() + .then(move |_| { + // now drop the spawned future: maybe some timeout exceeded, + // or some connection on this end was closed by the remote + // end. + drop(rx); + // and wait for the spawned future to release its resources + done_rx + }) + .select2(timeout_rx) + ); + match res { + Err(Either::A((oneshot::Canceled, _))) => (), + Ok(Either::B(((), _))) => { + panic!("dead future wasn't canceled (timeout)"); + }, + _ => { + panic!("dead future wasn't canceled (unexpected result)"); + }, + } +} + +#[test] +fn spawn_dont_kill_forgot_dead_stream() { + use std::thread; + use std::time::Duration; + use futures::future::Either; + use futures::sync::oneshot; + + // a future which never returns anything (forever accepting incoming + // connections), but dropping it leads to observable side effects + // (like closing listening sockets, releasing limited resources, + // ...) + #[derive(Debug)] + struct Dead { + // when dropped you should get Err(oneshot::Canceled) on the + // receiving end + done: oneshot::Sender<()>, + } + impl Future for Dead { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<Self::Item, Self::Error> { + Ok(Async::NotReady) + } + } + + // need to implement a timeout for the test, as it would hang + // forever right now + let (timeout_tx, timeout_rx) = oneshot::channel(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(1000)); + let _ = timeout_tx.send(()); + }); + + let core = local_executor::Core::new(); + let (done_tx, done_rx) = oneshot::channel(); + let future = Dead{done: done_tx}; + let rx = spawn(future, &core); + let res = core.run( + Ok::<_, ()>(()) + .into_future() + .then(move |_| { + // forget the spawned future: should keep running, i.e. hit + // the timeout below. + rx.forget(); + // and wait for the spawned future to release its resources + done_rx + }) + .select2(timeout_rx) + ); + match res { + Err(Either::A((oneshot::Canceled, _))) => { + panic!("forgotten dead future was canceled"); + }, + Ok(Either::B(((), _))) => (), // reached timeout + _ => { + panic!("forgotten dead future was canceled (unexpected result)"); + }, + } +} diff --git a/third_party/rust/futures-0.1.31/tests/unsync.rs b/third_party/rust/futures-0.1.31/tests/unsync.rs new file mode 100644 index 0000000000..490db0af1c --- /dev/null +++ b/third_party/rust/futures-0.1.31/tests/unsync.rs @@ -0,0 +1,266 @@ +#![cfg(feature = "use_std")] +#![allow(bare_trait_objects, unknown_lints)] + +extern crate futures; + +mod support; + +use futures::prelude::*; +use futures::unsync::oneshot; +use futures::unsync::mpsc::{self, SendError}; +use futures::future::lazy; +use futures::stream::{iter_ok, unfold}; + +use support::local_executor::Core; + +#[test] +fn mpsc_send_recv() { + let (tx, rx) = mpsc::channel::<i32>(1); + let mut rx = rx.wait(); + + tx.send(42).wait().unwrap(); + + assert_eq!(rx.next(), Some(Ok(42))); + assert_eq!(rx.next(), None); +} + +#[test] +fn mpsc_rx_notready() { + let (_tx, mut rx) = mpsc::channel::<i32>(1); + + lazy(|| { + assert_eq!(rx.poll().unwrap(), Async::NotReady); + Ok(()) as Result<(), ()> + }).wait().unwrap(); +} + +#[test] +fn mpsc_rx_end() { + let (_, mut rx) = mpsc::channel::<i32>(1); + + lazy(|| { + assert_eq!(rx.poll().unwrap(), Async::Ready(None)); + Ok(()) as Result<(), ()> + }).wait().unwrap(); +} + +#[test] +fn mpsc_tx_clone_weak_rc() { + let (tx, mut rx) = mpsc::channel::<i32>(1); // rc = 1 + + let tx_clone = tx.clone(); // rc = 2 + lazy(|| { + assert_eq!(rx.poll().unwrap(), Async::NotReady); + Ok(()) as Result<(), ()> + }).wait().unwrap(); + + drop(tx); // rc = 1 + lazy(|| { + assert_eq!(rx.poll().unwrap(), Async::NotReady); + Ok(()) as Result<(), ()> + }).wait().unwrap(); + + drop(tx_clone); // rc = 0 + lazy(|| { + assert_eq!(rx.poll().unwrap(), Async::Ready(None)); + Ok(()) as Result<(), ()> + }).wait().unwrap(); +} + +#[test] +fn mpsc_tx_notready() { + let (tx, _rx) = mpsc::channel::<i32>(1); + let tx = tx.send(1).wait().unwrap(); + lazy(move || { + assert!(tx.send(2).poll().unwrap().is_not_ready()); + Ok(()) as Result<(), ()> + }).wait().unwrap(); +} + +#[test] +fn mpsc_tx_err() { + let (tx, _) = mpsc::channel::<i32>(1); + lazy(move || { + assert!(tx.send(2).poll().is_err()); + Ok(()) as Result<(), ()> + }).wait().unwrap(); +} + +#[test] +fn mpsc_backpressure() { + let (tx, rx) = mpsc::channel::<i32>(1); + lazy(move || { + iter_ok(vec![1, 2, 3]) + .forward(tx) + .map_err(|e: SendError<i32>| panic!("{}", e)) + .join(rx.take(3).collect().map(|xs| { + assert_eq!(xs, [1, 2, 3]); + })) + }).wait().unwrap(); +} + +#[test] +fn mpsc_unbounded() { + let (tx, rx) = mpsc::unbounded::<i32>(); + lazy(move || { + iter_ok(vec![1, 2, 3]) + .forward(tx) + .map_err(|e: SendError<i32>| panic!("{}", e)) + .join(rx.take(3).collect().map(|xs| { + assert_eq!(xs, [1, 2, 3]); + })) + }).wait().unwrap(); +} + +#[test] +fn mpsc_recv_unpark() { + let core = Core::new(); + let (tx, rx) = mpsc::channel::<i32>(1); + let tx2 = tx.clone(); + core.spawn(rx.collect().map(|xs| assert_eq!(xs, [1, 2]))); + core.spawn(lazy(move || tx.send(1).map(|_| ()).map_err(|e| panic!("{}", e)))); + core.run(lazy(move || tx2.send(2))).unwrap(); +} + +#[test] +fn mpsc_send_unpark() { + let core = Core::new(); + let (tx, rx) = mpsc::channel::<i32>(1); + let (donetx, donerx) = oneshot::channel(); + core.spawn(iter_ok(vec![1, 2]).forward(tx) + .then(|x: Result<_, SendError<i32>>| { + assert!(x.is_err()); + donetx.send(()).unwrap(); + Ok(()) + })); + core.spawn(lazy(move || { let _ = rx; Ok(()) })); + core.run(donerx).unwrap(); +} + +#[test] +fn spawn_sends_items() { + let core = Core::new(); + let stream = unfold(0, |i| Some(Ok::<_,u8>((i, i + 1)))); + let rx = mpsc::spawn(stream, &core, 1); + assert_eq!(core.run(rx.take(4).collect()).unwrap(), + [0, 1, 2, 3]); +} + +#[test] +fn spawn_kill_dead_stream() { + use std::thread; + use std::time::Duration; + use futures::future::Either; + + // a stream which never returns anything (maybe a remote end isn't + // responding), but dropping it leads to observable side effects + // (like closing connections, releasing limited resources, ...) + #[derive(Debug)] + struct Dead { + // when dropped you should get Err(oneshot::Canceled) on the + // receiving end + done: oneshot::Sender<()>, + } + impl Stream for Dead { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { + Ok(Async::NotReady) + } + } + + // need to implement a timeout for the test, as it would hang + // forever right now + let (timeout_tx, timeout_rx) = futures::sync::oneshot::channel(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(1000)); + let _ = timeout_tx.send(()); + }); + + let core = Core::new(); + let (done_tx, done_rx) = oneshot::channel(); + let stream = Dead{done: done_tx}; + let rx = mpsc::spawn(stream, &core, 1); + let res = core.run( + Ok::<_, ()>(()) + .into_future() + .then(move |_| { + // now drop the spawned stream: maybe some timeout exceeded, + // or some connection on this end was closed by the remote + // end. + drop(rx); + // and wait for the spawned stream to release its resources + done_rx + }) + .select2(timeout_rx) + ); + match res { + Err(Either::A((oneshot::Canceled, _))) => (), + _ => { + panic!("dead stream wasn't canceled"); + }, + } +} + + +/// Test case for PR #768 (issue #766). +/// The issue was: +/// Given that an empty channel is polled by the Receiver, and the only Sender +/// gets dropped without sending anything, then the Receiver would get stuck. + +#[test] +fn dropped_sender_of_unused_channel_notifies_receiver() { + let core = Core::new(); + type FUTURE = Box<futures::Future<Item=u8, Error=()>>; + + // Constructs the channel which we want to test, and two futures which + // act on that channel. + let pair = |reverse| -> Vec<FUTURE> { + // This is the channel which we want to test. + let (tx, rx) = mpsc::channel::<u8>(1); + let mut futures: Vec<FUTURE> = vec![ + Box::new(futures::stream::iter_ok(vec![]) + .forward(tx) + .map_err(|_: mpsc::SendError<u8>| ()) + .map(|_| 42) + ), + Box::new(rx.fold((), |_, _| Ok(())) + .map(|_| 24) + ), + ]; + if reverse { + futures.reverse(); + } + futures + }; + + let make_test_future = |reverse| -> Box<Future<Item=Vec<u8>, Error=()>> { + let f = futures::future::join_all(pair(reverse)); + + // Use a timeout. This is not meant to test the `sync::oneshot` but + // merely uses it to implement this timeout. + let (timeout_tx, timeout_rx) = futures::sync::oneshot::channel::<Vec<u8>>(); + std::thread::spawn(move || { + std::thread::sleep(std::time::Duration::from_millis(1000)); + let x = timeout_tx.send(vec![0]); + assert!(x.is_err(), "Test timed out."); + }); + + Box::new(f.select(timeout_rx.map_err(|_|())) + .map_err(|x| x.0) + .map(|x| x.0) + ) + }; + + // The order of the tested futures is important to test fix of PR #768. + // We want future_2 to poll on the Receiver before the Sender is dropped. + let result = core.run(make_test_future(false)); + assert!(result.is_ok()); + assert_eq!(vec![42, 24], result.unwrap()); + + // Test also the other ordering: + let result = core.run(make_test_future(true)); + assert!(result.is_ok()); + assert_eq!(vec![24, 42], result.unwrap()); +} |