summaryrefslogtreecommitdiffstats
path: root/third_party/rust/mio
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/rust/mio-0.6.23/.cargo-checksum.json1
-rw-r--r--third_party/rust/mio-0.6.23/CHANGELOG.md227
-rw-r--r--third_party/rust/mio-0.6.23/Cargo.toml70
-rw-r--r--third_party/rust/mio-0.6.23/LICENSE19
-rw-r--r--third_party/rust/mio-0.6.23/README.md90
-rw-r--r--third_party/rust/mio-0.6.23/src/channel.rs390
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs346
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/handler.rs37
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/io.rs28
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/mod.rs36
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/notify.rs63
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/unix.rs420
-rw-r--r--third_party/rust/mio-0.6.23/src/event_imp.rs1162
-rw-r--r--third_party/rust/mio-0.6.23/src/io.rs35
-rw-r--r--third_party/rust/mio-0.6.23/src/lazycell.rs554
-rw-r--r--third_party/rust/mio-0.6.23/src/lib.rs308
-rw-r--r--third_party/rust/mio-0.6.23/src/net/mod.rs14
-rw-r--r--third_party/rust/mio-0.6.23/src/net/tcp.rs737
-rw-r--r--third_party/rust/mio-0.6.23/src/net/udp.rs645
-rw-r--r--third_party/rust/mio-0.6.23/src/poll.rs2783
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs73
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs263
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs78
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs177
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs444
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs181
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs353
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/mod.rs56
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs74
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs47
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs268
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs107
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/io.rs107
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs360
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/mod.rs105
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/ready.rs525
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs286
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/udp.rs181
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/uds.rs265
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/uio.rs44
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs66
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs20
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs116
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/mod.rs193
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/selector.rs538
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs853
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/udp.rs414
-rw-r--r--third_party/rust/mio-0.6.23/src/timer.rs516
-rw-r--r--third_party/rust/mio-0.6.23/src/token.rs153
-rw-r--r--third_party/rust/mio-0.6.23/src/udp.rs326
-rw-r--r--third_party/rust/mio-extras/.cargo-checksum.json1
-rw-r--r--third_party/rust/mio-extras/CHANGELOG.md37
-rw-r--r--third_party/rust/mio-extras/Cargo.toml40
-rw-r--r--third_party/rust/mio-extras/LICENSE-APACHE201
-rw-r--r--third_party/rust/mio-extras/LICENSE-MIT25
-rw-r--r--third_party/rust/mio-extras/README.md30
-rw-r--r--third_party/rust/mio-extras/src/channel.rs431
-rw-r--r--third_party/rust/mio-extras/src/lib.rs33
-rw-r--r--third_party/rust/mio-extras/src/timer.rs751
-rw-r--r--third_party/rust/mio-extras/test/mod.rs45
-rw-r--r--third_party/rust/mio-extras/test/test_poll_channel.rs362
-rw-r--r--third_party/rust/mio-extras/test/test_timer.rs308
-rw-r--r--third_party/rust/mio/.cargo-checksum.json1
-rw-r--r--third_party/rust/mio/CHANGELOG.md526
-rw-r--r--third_party/rust/mio/Cargo.lock147
-rw-r--r--third_party/rust/mio/Cargo.toml64
-rw-r--r--third_party/rust/mio/LICENSE19
-rw-r--r--third_party/rust/mio/README.md179
-rw-r--r--third_party/rust/mio/examples/tcp_server.rs183
-rw-r--r--third_party/rust/mio/examples/udp_server.rs77
-rw-r--r--third_party/rust/mio/src/event/event.rs230
-rw-r--r--third_party/rust/mio/src/event/events.rs230
-rw-r--r--third_party/rust/mio/src/event/mod.rs10
-rw-r--r--third_party/rust/mio/src/event/source.rs139
-rw-r--r--third_party/rust/mio/src/interest.rs179
-rw-r--r--third_party/rust/mio/src/io_source.rs294
-rw-r--r--third_party/rust/mio/src/lib.rs264
-rw-r--r--third_party/rust/mio/src/macros.rs70
-rw-r--r--third_party/rust/mio/src/net/mod.rs37
-rw-r--r--third_party/rust/mio/src/net/tcp/listener.rs217
-rw-r--r--third_party/rust/mio/src/net/tcp/mod.rs5
-rw-r--r--third_party/rust/mio/src/net/tcp/stream.rs334
-rw-r--r--third_party/rust/mio/src/net/udp.rs635
-rw-r--r--third_party/rust/mio/src/net/uds/datagram.rs165
-rw-r--r--third_party/rust/mio/src/net/uds/listener.rs104
-rw-r--r--third_party/rust/mio/src/net/uds/mod.rs10
-rw-r--r--third_party/rust/mio/src/net/uds/stream.rs174
-rw-r--r--third_party/rust/mio/src/poll.rs682
-rw-r--r--third_party/rust/mio/src/sys/mod.rs80
-rw-r--r--third_party/rust/mio/src/sys/shell/mod.rs70
-rw-r--r--third_party/rust/mio/src/sys/shell/selector.rs108
-rw-r--r--third_party/rust/mio/src/sys/shell/tcp.rs27
-rw-r--r--third_party/rust/mio/src/sys/shell/udp.rs10
-rw-r--r--third_party/rust/mio/src/sys/shell/uds.rs75
-rw-r--r--third_party/rust/mio/src/sys/shell/waker.rs16
-rw-r--r--third_party/rust/mio/src/sys/unix/mod.rs72
-rw-r--r--third_party/rust/mio/src/sys/unix/net.rs168
-rw-r--r--third_party/rust/mio/src/sys/unix/pipe.rs431
-rw-r--r--third_party/rust/mio/src/sys/unix/selector/epoll.rs246
-rw-r--r--third_party/rust/mio/src/sys/unix/selector/kqueue.rs688
-rw-r--r--third_party/rust/mio/src/sys/unix/selector/mod.rs35
-rw-r--r--third_party/rust/mio/src/sys/unix/sourcefd.rs116
-rw-r--r--third_party/rust/mio/src/sys/unix/tcp.rs113
-rw-r--r--third_party/rust/mio/src/sys/unix/udp.rs39
-rw-r--r--third_party/rust/mio/src/sys/unix/uds/datagram.rs56
-rw-r--r--third_party/rust/mio/src/sys/unix/uds/listener.rs94
-rw-r--r--third_party/rust/mio/src/sys/unix/uds/mod.rs149
-rw-r--r--third_party/rust/mio/src/sys/unix/uds/socketaddr.rs130
-rw-r--r--third_party/rust/mio/src/sys/unix/uds/stream.rs39
-rw-r--r--third_party/rust/mio/src/sys/unix/waker.rs178
-rw-r--r--third_party/rust/mio/src/sys/windows/afd.rs237
-rw-r--r--third_party/rust/mio/src/sys/windows/event.rs162
-rw-r--r--third_party/rust/mio/src/sys/windows/io_status_block.rs40
-rw-r--r--third_party/rust/mio/src/sys/windows/mod.rs147
-rw-r--r--third_party/rust/mio/src/sys/windows/named_pipe.rs782
-rw-r--r--third_party/rust/mio/src/sys/windows/net.rs108
-rw-r--r--third_party/rust/mio/src/sys/windows/overlapped.rs37
-rw-r--r--third_party/rust/mio/src/sys/windows/selector.rs748
-rw-r--r--third_party/rust/mio/src/sys/windows/tcp.rs71
-rw-r--r--third_party/rust/mio/src/sys/windows/udp.rs53
-rw-r--r--third_party/rust/mio/src/sys/windows/waker.rs29
-rw-r--r--third_party/rust/mio/src/token.rs138
-rw-r--r--third_party/rust/mio/src/waker.rs96
-rw-r--r--third_party/rust/miow/.cargo-checksum.json1
-rw-r--r--third_party/rust/miow/CHANGELOG.md5
-rw-r--r--third_party/rust/miow/Cargo.toml35
-rw-r--r--third_party/rust/miow/LICENSE-APACHE201
-rw-r--r--third_party/rust/miow/LICENSE-MIT25
-rw-r--r--third_party/rust/miow/README.md31
-rw-r--r--third_party/rust/miow/appveyor.yml20
-rw-r--r--third_party/rust/miow/src/handle.rs177
-rw-r--r--third_party/rust/miow/src/iocp.rs328
-rw-r--r--third_party/rust/miow/src/lib.rs52
-rw-r--r--third_party/rust/miow/src/net.rs1332
-rw-r--r--third_party/rust/miow/src/overlapped.rs92
-rw-r--r--third_party/rust/miow/src/pipe.rs788
136 files changed, 31068 insertions, 0 deletions
diff --git a/third_party/rust/mio-0.6.23/.cargo-checksum.json b/third_party/rust/mio-0.6.23/.cargo-checksum.json
new file mode 100644
index 0000000000..87772bc2e9
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"4ba3d031a78895b4251cc75585215ded07c2d4ca84b79dea5c55a68fd973a29d","Cargo.toml":"1cebd8a3a1509948b24b4de3ed6eedd2441f1a36e4831a2b2b3c38539b7ead70","LICENSE":"07919255c7e04793d8ea760d6c2ce32d19f9ff02bdbdde3ce90b1e1880929a9b","README.md":"eedc84973c97348ea27f93ac7d3232098438d4455c7eaedf6fcc7f105ac9f321","src/channel.rs":"b16493a2b74334156e153b1f4143b0e98d43cd4d7bff0275066dfa575dde2402","src/deprecated/event_loop.rs":"ba931d256e6e57d5635c6cfc6e3a4add4551c87f16457d901b334a129f9cf41d","src/deprecated/handler.rs":"13cbc0c193f43a331e125e05d5eddf3712fe86e41a8721186d3672518ef8a9cc","src/deprecated/io.rs":"4948217ffeeba4f508cc89744da5d6af858b4ad7b4be23f927a00df93bdf2984","src/deprecated/mod.rs":"4310471b5a1313dbf53b492769a3031b15353eb269333b7c1a890bc2709def7c","src/deprecated/notify.rs":"8cb108387ebcfb75764e4dd2868d80eb00d793c4b7c867c08cd86ef10b91b023","src/deprecated/unix.rs":"76c832e7db8263395b576930186fe1a3c472589bed41810d445d89f0eed684eb","src/event_imp.rs":"f8cff47dedc52dab9c7cc2d707f2c2d86d7185f942f02ace4c60353cc6094035","src/io.rs":"9207ffc93ea744b09bc6872fa4d378d7c75640f9ac38b1fa67b940c7cb5d0ade","src/lazycell.rs":"871dbd89f6918a354c2ec2d2a8b89e4aa30754e7e3e8dfcf2f5a6071156e39cf","src/lib.rs":"b875273d1852b6ef11a112fb27147587f5ed699e2c3ce99da3175358a8ff6fdd","src/net/mod.rs":"340c63a8efe9ee774b7bf8ed8c0f72fc7563e5c4b35f6a8b243f92d366e145a2","src/net/tcp.rs":"8b06dc8d2dd9fb7cd52db582fd7fe608b6a50cdf7ce18cf0abb9992956e95f6d","src/net/udp.rs":"8b5728924a07917d2845bbfb060cadb842b36a74d5372ac7707eb7f169a67d4d","src/poll.rs":"e76bb316deedbd9306f91ca8ab394d02b5676fa767746bd9770c5c9dff490426","src/sys/fuchsia/awakener.rs":"71a4a0083242457b0a96326c69c0f98b23dfdb97be96deb26ee02fa9d1dfb212","src/sys/fuchsia/eventedfd.rs":"bd8f43d2b61cdd6a5d0df9c0dc1cb43e1708140d01a05611055277ed55a33b63","src/sys/fuchsia/handles.rs":"161a69e8a8d7f71326a9c53bcb7685d0a81d184aba8e20da27c64aa27dfd56b2","src/sys/fuchsia/mod.rs":"9d80f1214abc93f48b6b6c12ce5b6cfaddbda592c8f3410564d0237228cae2d0","src/sys/fuchsia/net.rs":"50340191dd9cbe317bd6e6ae0170c03daa9141f15c96782b96484e3d8b8301b1","src/sys/fuchsia/ready.rs":"7e6bb7235c52ab4a2134d36cf982f6a4fd6e18050e737b40ee84c89a10a9faac","src/sys/fuchsia/selector.rs":"f3be7f8d683d43e4a8313246e4cacb9444549bf66ecb2234f0d0f53172043bf5","src/sys/mod.rs":"64bea046e4a9feb1f2e2fb8326452c1be8b9d56cf8794df0af4fbdf565204255","src/sys/unix/awakener.rs":"20a61d8f39b2f2abf4f166a3ed46fa0d79907ddf92758eaddb880c67321db56c","src/sys/unix/dlsym.rs":"559337d1f6c10e6c1172bd3908a9dcfa5a0828b53b04f7ca3a0d926afa85cd63","src/sys/unix/epoll.rs":"26b34910c87883f1b8170b95aed1bf3d9ecd9442c7afd23ff1b87d54391e2c88","src/sys/unix/eventedfd.rs":"a0bd2096ab5acf42c48110f024bc8ea052ba62c707345c7db46fea7a494388df","src/sys/unix/io.rs":"a518f09020f821e87bcf9c2ecb4bf501d428619ddfd7b35a26629b614919b14c","src/sys/unix/kqueue.rs":"3bf9f9635a8607569e3176998b61d1801e5bb35a94588c827a0a954656eee3ea","src/sys/unix/mod.rs":"15ddcfab101e7dfb926f82fd2d6eebb30c66f43fc2af00e4bb2f687c7059e0d0","src/sys/unix/ready.rs":"8494e27731d6842a90e01ec783d37001802f472f81358177e047d43b4bc68c43","src/sys/unix/tcp.rs":"19d483762fc8c8a1cb182b2f2ead85534f99394cf605a14d5ed46db7f3354667","src/sys/unix/udp.rs":"bc2e8ad142b17797a7d038e720ff57ac9840eb5b2b26696c854180085ccd1873","src/sys/unix/uds.rs":"5223d4d35048019d175679686cc65a08baf027df0b2127428e2f322bbb533309","src/sys/unix/uio.rs":"3942a49548dd3a37e5fd6044a103d92e2635965ace1ab370be10c82245b41f66","src/sys/windows/awakener.rs":"2d3cdaf8b523675e2f64c5fd81e1197d5255384517b9753b718c5c47acf0cabd","src/sys/windows/buffer_pool.rs":"636f4b7510a507f9985987699ce8ec140b2ed852abb231443ee1486c80759eed","src/sys/windows/from_raw_arc.rs":"659dabdf5037546e3783aacc422992b4248b0ba2ddcce52b149d35bc723550e5","src/sys/windows/mod.rs":"afeec8cd4e3adeaf6ffe68b134ad1b4ba07faa3abae96f6f9a00bbc20ff5f2c5","src/sys/windows/selector.rs":"0137276cff457f84511e007bb9527f5e082ec04e898b8f8e0acd39fe65c00148","src/sys/windows/tcp.rs":"9942db351f91229d01a0b9f52dd6c9680050d3abcee9fbb6b4f2f14896dc2c58","src/sys/windows/udp.rs":"1ef869b660bcf89ea6498552474abf8f540946631e14d5b610ca31014cd9045f","src/timer.rs":"540d521c5b4a79f3b1c01296ef2e14e2e3743192f25180ee6e71e367692ce762","src/token.rs":"4a56f851709470df2eed803c57c68b0a4b12ea86fa1b8d2c999bec7a85d58ec0","src/udp.rs":"442e620f3ea0cf010497d3ad775debd585f28e79a025993d40471c8e6839dc98"},"package":"4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4"} \ No newline at end of file
diff --git a/third_party/rust/mio-0.6.23/CHANGELOG.md b/third_party/rust/mio-0.6.23/CHANGELOG.md
new file mode 100644
index 0000000000..c17ebd0151
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/CHANGELOG.md
@@ -0,0 +1,227 @@
+# 0.6.23 (Dec 01, 2020)
+
+### Changed
+- **MSRV**: Increased the MSRV from 1.18.0 (Jun 8, 2017) to 1.31.0 (Dec 6,
+ 2018)
+ (https://github.com/tokio-rs/mio/commit/4879e0d32ddfd98e762fc87240e594a3ad8fca30).
+
+### Fixed
+- Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then
+ ~30 minutes effectively infinite
+ (https://github.com/tokio-rs/mio/commit/e7cba59950e9c9fa6194e29b5b1e72029e3df455).
+- Update miow and net2 depedencies to get rid of invalid memory layout assumption
+ (https://github.com/tokio-rs/mio/commit/13f02ac0a86d7c0c0001e5ff8960a0b4340d075c).
+
+# 0.6.22 (May 01, 2020)
+
+### Added
+- Add support for illumos target (#1294)
+
+# 0.6.21 (November 27, 2019)
+
+### Fixed
+- remove `=` dependency on `cfg-if`.
+
+# 0.6.20 (November 21, 2019)
+
+### Fixed
+- Use default IOCP concurrency value (#1161).
+- setting FD_CLOEXEC in pipe (#1095).
+
+# 0.6.19 (May 28, 2018)
+
+### Fixed
+- Do not trigger HUP events on kqueue platforms (#958).
+
+# 0.6.18 (May 24, 2018)
+
+### Fixed
+- Fix compilation on kqueue platforms with 32bit C long (#948).
+
+# 0.6.17 (May 15, 2018)
+
+### Fixed
+- Don't report `RDHUP` as `HUP` (#939)
+- Fix lazycell related compilation issues.
+- Fix EPOLLPRI conflicting with READABLE
+- Abort process on ref count overflows
+
+### Added
+- Define PRI on all targets
+
+# 0.6.16 (September 5, 2018)
+
+* Add EPOLLPRI readiness to UnixReady on supported platforms (#867)
+* Reduce spurious awaken calls (#875)
+
+# 0.6.15 (July 3, 2018)
+
+* Implement `Evented` for containers (#840).
+* Fix android-aarch64 build (#850).
+
+# 0.6.14 (March 8, 2018)
+
+* Add `Poll::poll_interruptible` (#811)
+* Add `Ready::all` and `usize` conversions (#825)
+
+# 0.6.13 (February 5, 2018)
+
+* Fix build on DragonFlyBSD.
+* Add `TcpListener::from_std` that does not require the socket addr.
+* Deprecate `TcpListener::from_listener` in favor of from_std.
+
+# 0.6.12 (January 5, 2018)
+
+* Add `TcpStream::peek` function (#773).
+* Raise minimum Rust version to 1.18.0.
+* `Poll`: retry select() when interrupted by a signal (#742).
+* Deprecate `Events` index access (#713).
+* Add `Events::clear` (#782).
+* Add support for `lio_listio` (#780).
+
+# 0.6.11 (October 25, 2017)
+
+* Allow register to take empty interest (#640).
+* Fix bug with TCP errors on windows (#725).
+* Add TcpListener::accept_std (#733).
+* Update IoVec to fix soundness bug -- includes behavior change. (#747).
+* Minimum Rust version is now 1.14.0.
+* Fix Android x86_64 build.
+* Misc API & doc polish.
+
+# 0.6.10 (July 27, 2017)
+
+* Experimental support for Fuchsia
+* Add `only_v6` option for UDP sockets
+* Fix build on NetBSD
+* Minimum Rust version is now 1.13.0
+* Assignment operators (e.g. `|=`) are now implemented for `Ready`
+
+# 0.6.9 (June 7, 2017)
+
+* More socket options are exposed through the TCP types, brought in through the
+ `net2` crate.
+
+# 0.6.8 (May 26, 2017)
+
+* Support Fuchia
+* POSIX AIO support
+* Fix memory leak caused by Register::new2
+* Windows: fix handling failed TCP connections
+* Fix build on aarch64-linux-android
+* Fix usage of `O_CLOEXEC` with `SETFL`
+
+# 0.6.7 (April 27, 2017)
+
+* Ignore EPIPE coming out of `kevent`
+* Timer thread should exit when timer is dropped.
+
+# 0.6.6 (March 22, 2017)
+
+* Add send(), recv() and connect() to UDPSocket.
+* Fix bug in custom readiness queue
+* Move net types into `net` module
+
+# 0.6.5 (March 14, 2017)
+
+* Misc improvements to kqueue bindings
+* Add official support for iOS, Android, BSD
+* Reimplement custom readiness queue
+* `Poll` is now `Sync`
+* Officially deprecate non-core functionality (timers, channel, etc...)
+* `Registration` now implements `Evented`
+* Fix bug around error conditions with `connect` on windows.
+* Use iovec crate for scatter / gather operations
+* Only support readable and writable readiness on all platforms
+* Expose additional readiness in a platform specific capacity
+
+# 0.6.4 (January 24, 2017)
+
+* Fix compilation on musl
+* Add `TcpStream::from_stream` which converts a std TCP stream to Mio.
+
+# 0.6.3 (January 22, 2017)
+
+* Implement readv/writev for `TcpStream`, allowing vectored reads/writes to
+ work across platforms
+* Remove `nix` dependency
+* Implement `Display` and `Error` for some channel error types.
+* Optimize TCP on Windows through `SetFileCompletionNotificationModes`
+
+# 0.6.2 (December 18, 2016)
+
+* Allow registration of custom handles on Windows (like `EventedFd` on Unix)
+* Send only one byte for the awakener on Unix instead of four
+* Fix a bug in the timer implementation which caused an infinite loop
+
+# 0.6.1 (October 30, 2016)
+
+* Update dependency of `libc` to 0.2.16
+* Fix channel `dec` logic
+* Fix a timer bug around timeout cancellation
+* Don't allocate buffers for TCP reads on Windows
+* Touched up documentation in a few places
+* Fix an infinite looping timer thread on OSX
+* Fix compile on 32-bit OSX
+* Fix compile on FreeBSD
+
+# 0.6.0 (September 2, 2016)
+
+* Shift primary API towards `Poll`
+* `EventLoop` and types to `deprecated` mod. All contents of the
+ `deprecated` mod will be removed by Mio 1.0.
+* Increase minimum supported Rust version to 1.9.0
+* Deprecate unix domain socket implementation in favor of using a
+ version external to Mio. For example: https://github.com/alexcrichton/mio-uds.
+* Remove various types now included in `std`
+* Updated TCP & UDP APIs to match the versions in `std`
+* Enable implementing `Evented` for any type via `Registration`
+* Rename `IoEvent` -> `Event`
+* Access `Event` data via functions vs. public fields.
+* Expose `Events` as a public type that is passed into `Poll`
+* Use `std::time::Duration` for all APIs that require a time duration.
+* Polled events are now retrieved via `Events` type.
+* Implement `std::error::Error` for `TimerError`
+* Relax `Send` bound on notify messages.
+* Remove `Clone` impl for `Timeout` (future proof)
+* Remove `mio::prelude`
+* Remove `mio::util`
+* Remove dependency on bytes
+
+# 0.5.0 (December 3, 2015)
+
+* Windows support (#239)
+* NetBSD support (#306)
+* Android support (#295)
+* Don't re-export bytes types
+* Renamed `EventLoop::register_opt` to `EventLoop::register` (#257)
+* `EventLoopConfig` is now a builder instead of having public struct fields. It
+ is also no longer `Copy`. (#259)
+* `TcpSocket` is no longer exported in the public API (#262)
+* Integrate with net2. (#262)
+* `TcpListener` now returns the remote peer address from `accept` as well (#275)
+* The `UdpSocket::{send_to, recv_from}` methods are no longer generic over `Buf`
+ or `MutBuf` but instead take slices directly. The return types have also been
+ updated to return the number of bytes transferred. (#260)
+* Fix bug with kqueue where an error on registration prevented the
+ changelist from getting flushed (#276)
+* Support sending/receiving FDs over UNIX sockets (#291)
+* Mio's socket types are permanently associated with an EventLoop (#308)
+* Reduce unnecessary poll wakeups (#314)
+
+
+# 0.4.1 (July 21, 2015)
+
+* [BUGFIX] Fix notify channel concurrency bug (#216)
+
+# 0.4.0 (July 16, 2015)
+
+* [BUGFIX] EventLoop::register requests all events, not just readable.
+* [BUGFIX] Attempting to send a message to a shutdown event loop fails correctly.
+* [FEATURE] Expose TCP shutdown
+* [IMPROVEMENT] Coalesce readable & writable into `ready` event (#184)
+* [IMPROVEMENT] Rename TryRead & TryWrite function names to avoid conflict with std.
+* [IMPROVEMENT] Provide TCP and UDP types in Mio (path to windows #155)
+* [IMPROVEMENT] Use clock_ticks crate instead of time (path to windows #155)
+* [IMPROVEMENT] Move unix specific features into mio::unix module
+* [IMPROVEMENT] TcpListener sets SO_REUSEADDR by default
diff --git a/third_party/rust/mio-0.6.23/Cargo.toml b/third_party/rust/mio-0.6.23/Cargo.toml
new file mode 100644
index 0000000000..08c5ac150f
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/Cargo.toml
@@ -0,0 +1,70 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "mio"
+version = "0.6.23"
+authors = ["Carl Lerche <me@carllerche.com>"]
+include = ["Cargo.toml", "LICENSE", "README.md", "CHANGELOG.md", "src/**/*.rs"]
+description = "Lightweight non-blocking IO"
+homepage = "https://github.com/tokio-rs/mio"
+documentation = "https://docs.rs/mio/0.6.23/mio/"
+readme = "README.md"
+keywords = ["io", "async", "non-blocking"]
+categories = ["asynchronous"]
+license = "MIT"
+repository = "https://github.com/tokio-rs/mio"
+
+[[test]]
+name = "test"
+path = "test/mod.rs"
+[dependencies.cfg-if]
+version = "0.1.9"
+
+[dependencies.iovec]
+version = "0.1.1"
+
+[dependencies.log]
+version = "0.4"
+
+[dependencies.net2]
+version = "0.2.36"
+
+[dependencies.slab]
+version = "0.4.0"
+[dev-dependencies.bytes]
+version = "0.3.0"
+
+[dev-dependencies.env_logger]
+version = "0.4.0"
+default-features = false
+
+[dev-dependencies.tempdir]
+version = "0.3.4"
+
+[features]
+default = ["with-deprecated"]
+with-deprecated = []
+[target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon]
+version = "0.3.2"
+
+[target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon-sys]
+version = "0.3.2"
+[target."cfg(unix)".dependencies.libc]
+version = "0.2.54"
+
+[target."cfg(windows)".dependencies.miow]
+version = "0.3"
+
+[target."cfg(windows)".dependencies.winapi]
+version = "0.3"
+features = ["ioapiset", "minwinbase", "minwindef", "winbase", "winerror", "winnt"]
diff --git a/third_party/rust/mio-0.6.23/LICENSE b/third_party/rust/mio-0.6.23/LICENSE
new file mode 100644
index 0000000000..3516413824
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014 Carl Lerche and other MIO contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/third_party/rust/mio-0.6.23/README.md b/third_party/rust/mio-0.6.23/README.md
new file mode 100644
index 0000000000..2a472bba46
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/README.md
@@ -0,0 +1,90 @@
+# Mio – Metal IO
+
+Mio is a lightweight I/O library for Rust with a focus on adding as little
+overhead as possible over the OS abstractions.
+
+[![Crates.io][crates-badge]][crates-url]
+[![MIT licensed][mit-badge]][mit-url]
+[![Build Status][azure-badge]][azure-url]
+[![Build Status][cirrus-badge]][cirrus-url]
+
+[crates-badge]: https://img.shields.io/crates/v/mio.svg
+[crates-url]: https://crates.io/crates/mio
+[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
+[mit-url]: LICENSE
+[azure-badge]: https://dev.azure.com/tokio-rs/Tokio/_apis/build/status/tokio-rs.mio?branchName=master
+[azure-url]: https://dev.azure.com/tokio-rs/Tokio/_build/latest?definitionId=2&branchName=master
+[cirrus-badge]: https://api.cirrus-ci.com/github/carllerche/mio.svg
+[cirrus-url]: https://cirrus-ci.com/github/carllerche/mio
+
+**API documentation**
+
+* [master](https://tokio-rs.github.io/mio/doc/mio/)
+* [v0.6](https://docs.rs/mio/^0.6)
+
+This is a low level library, if you are looking for something easier to get
+started with, see [Tokio](https://tokio.rs).
+
+## Usage
+
+To use `mio`, first add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+mio = "0.6"
+```
+
+Then, add this to your crate root:
+
+```rust
+extern crate mio;
+```
+
+## Features
+
+* Non-blocking TCP, UDP.
+* I/O event notification queue backed by epoll, kqueue, and IOCP.
+* Zero allocations at runtime
+* Platform specific extensions.
+
+## Non-goals
+
+The following are specifically omitted from Mio and are left to the user
+or higher-level libraries.
+
+* File operations
+* Thread pools / multi-threaded event loop
+* Timers
+
+## Platforms
+
+Currently supported platforms:
+
+* Linux
+* OS X
+* Windows
+* FreeBSD
+* NetBSD
+* Solaris
+* Android
+* iOS
+
+There are potentially others. If you find that Mio works on another
+platform, submit a PR to update the list!
+
+## Community
+
+A group of Mio users hang out in the #mio channel on the Mozilla IRC
+server (irc.mozilla.org). This can be a good place to go for questions.
+
+## Contributing
+
+Interested in getting involved? We would love to help you! For simple
+bug fixes, just submit a PR with the fix and we can discuss the fix
+directly in the PR. If the fix is more complex, start with an issue.
+
+If you want to propose an API change, create an issue to start a
+discussion with the community. Also, feel free to talk with us in the
+IRC channel.
+
+Finally, be kind. We support the [Rust Code of Conduct](https://www.rust-lang.org/conduct.html).
diff --git a/third_party/rust/mio-0.6.23/src/channel.rs b/third_party/rust/mio-0.6.23/src/channel.rs
new file mode 100644
index 0000000000..7077c51f86
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/channel.rs
@@ -0,0 +1,390 @@
+//! Thread safe communication channel implementing `Evented`
+
+#![allow(unused_imports, deprecated, missing_debug_implementations)]
+
+use {io, Ready, Poll, PollOpt, Registration, SetReadiness, Token};
+use event::Evented;
+use lazycell::{LazyCell, AtomicLazyCell};
+use std::any::Any;
+use std::fmt;
+use std::error;
+use std::sync::{mpsc, Arc};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// Creates a new asynchronous channel, where the `Receiver` can be registered
+/// with `Poll`.
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let (tx_ctl, rx_ctl) = ctl_pair();
+ let (tx, rx) = mpsc::channel();
+
+ let tx = Sender {
+ tx,
+ ctl: tx_ctl,
+ };
+
+ let rx = Receiver {
+ rx,
+ ctl: rx_ctl,
+ };
+
+ (tx, rx)
+}
+
+/// Creates a new synchronous, bounded channel where the `Receiver` can be
+/// registered with `Poll`.
+pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) {
+ let (tx_ctl, rx_ctl) = ctl_pair();
+ let (tx, rx) = mpsc::sync_channel(bound);
+
+ let tx = SyncSender {
+ tx,
+ ctl: tx_ctl,
+ };
+
+ let rx = Receiver {
+ rx,
+ ctl: rx_ctl,
+ };
+
+ (tx, rx)
+}
+
+pub fn ctl_pair() -> (SenderCtl, ReceiverCtl) {
+ let inner = Arc::new(Inner {
+ pending: AtomicUsize::new(0),
+ senders: AtomicUsize::new(1),
+ set_readiness: AtomicLazyCell::new(),
+ });
+
+ let tx = SenderCtl {
+ inner: inner.clone(),
+ };
+
+ let rx = ReceiverCtl {
+ registration: LazyCell::new(),
+ inner,
+ };
+
+ (tx, rx)
+}
+
+/// Tracks messages sent on a channel in order to update readiness.
+pub struct SenderCtl {
+ inner: Arc<Inner>,
+}
+
+/// Tracks messages received on a channel in order to track readiness.
+pub struct ReceiverCtl {
+ registration: LazyCell<Registration>,
+ inner: Arc<Inner>,
+}
+
+pub struct Sender<T> {
+ tx: mpsc::Sender<T>,
+ ctl: SenderCtl,
+}
+
+pub struct SyncSender<T> {
+ tx: mpsc::SyncSender<T>,
+ ctl: SenderCtl,
+}
+
+pub struct Receiver<T> {
+ rx: mpsc::Receiver<T>,
+ ctl: ReceiverCtl,
+}
+
+pub enum SendError<T> {
+ Io(io::Error),
+ Disconnected(T),
+}
+
+pub enum TrySendError<T> {
+ Io(io::Error),
+ Full(T),
+ Disconnected(T),
+}
+
+struct Inner {
+ // The number of outstanding messages for the receiver to read
+ pending: AtomicUsize,
+ // The number of sender handles
+ senders: AtomicUsize,
+ // The set readiness handle
+ set_readiness: AtomicLazyCell<SetReadiness>,
+}
+
+impl<T> Sender<T> {
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ self.tx.send(t)
+ .map_err(SendError::from)
+ .and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Sender<T> {
+ Sender {
+ tx: self.tx.clone(),
+ ctl: self.ctl.clone(),
+ }
+ }
+}
+
+impl<T> SyncSender<T> {
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ self.tx.send(t)
+ .map_err(From::from)
+ .and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+
+ pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
+ self.tx.try_send(t)
+ .map_err(From::from)
+ .and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+}
+
+impl<T> Clone for SyncSender<T> {
+ fn clone(&self) -> SyncSender<T> {
+ SyncSender {
+ tx: self.tx.clone(),
+ ctl: self.ctl.clone(),
+ }
+ }
+}
+
+impl<T> Receiver<T> {
+ pub fn try_recv(&self) -> Result<T, mpsc::TryRecvError> {
+ self.rx.try_recv().and_then(|res| {
+ let _ = self.ctl.dec();
+ Ok(res)
+ })
+ }
+}
+
+impl<T> Evented for Receiver<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.ctl.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.ctl.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.ctl.deregister(poll)
+ }
+}
+
+/*
+ *
+ * ===== SenderCtl / ReceiverCtl =====
+ *
+ */
+
+impl SenderCtl {
+ /// Call to track that a message has been sent
+ pub fn inc(&self) -> io::Result<()> {
+ let cnt = self.inner.pending.fetch_add(1, Ordering::Acquire);
+
+ if 0 == cnt {
+ // Toggle readiness to readable
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::readable())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Clone for SenderCtl {
+ fn clone(&self) -> SenderCtl {
+ self.inner.senders.fetch_add(1, Ordering::Relaxed);
+ SenderCtl { inner: self.inner.clone() }
+ }
+}
+
+impl Drop for SenderCtl {
+ fn drop(&mut self) {
+ if self.inner.senders.fetch_sub(1, Ordering::Release) == 1 {
+ let _ = self.inc();
+ }
+ }
+}
+
+impl ReceiverCtl {
+ pub fn dec(&self) -> io::Result<()> {
+ let first = self.inner.pending.load(Ordering::Acquire);
+
+ if first == 1 {
+ // Unset readiness
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::empty())?;
+ }
+ }
+
+ // Decrement
+ let second = self.inner.pending.fetch_sub(1, Ordering::AcqRel);
+
+ if first == 1 && second > 1 {
+ // There are still pending messages. Since readiness was
+ // previously unset, it must be reset here
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::readable())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Evented for ReceiverCtl {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ if self.registration.borrow().is_some() {
+ return Err(io::Error::new(io::ErrorKind::Other, "receiver already registered"));
+ }
+
+ let (registration, set_readiness) = Registration::new(poll, token, interest, opts);
+
+
+ if self.inner.pending.load(Ordering::Relaxed) > 0 {
+ // TODO: Don't drop readiness
+ let _ = set_readiness.set_readiness(Ready::readable());
+ }
+
+ self.registration.fill(registration).expect("unexpected state encountered");
+ self.inner.set_readiness.fill(set_readiness).expect("unexpected state encountered");
+
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ match self.registration.borrow() {
+ Some(registration) => registration.update(poll, token, interest, opts),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ match self.registration.borrow() {
+ Some(registration) => registration.deregister(poll),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+}
+
+/*
+ *
+ * ===== Error conversions =====
+ *
+ */
+
+impl<T> From<mpsc::SendError<T>> for SendError<T> {
+ fn from(src: mpsc::SendError<T>) -> SendError<T> {
+ SendError::Disconnected(src.0)
+ }
+}
+
+impl<T> From<io::Error> for SendError<T> {
+ fn from(src: io::Error) -> SendError<T> {
+ SendError::Io(src)
+ }
+}
+
+impl<T> From<mpsc::TrySendError<T>> for TrySendError<T> {
+ fn from(src: mpsc::TrySendError<T>) -> TrySendError<T> {
+ match src {
+ mpsc::TrySendError::Full(v) => TrySendError::Full(v),
+ mpsc::TrySendError::Disconnected(v) => TrySendError::Disconnected(v),
+ }
+ }
+}
+
+impl<T> From<mpsc::SendError<T>> for TrySendError<T> {
+ fn from(src: mpsc::SendError<T>) -> TrySendError<T> {
+ TrySendError::Disconnected(src.0)
+ }
+}
+
+impl<T> From<io::Error> for TrySendError<T> {
+ fn from(src: io::Error) -> TrySendError<T> {
+ TrySendError::Io(src)
+ }
+}
+
+/*
+ *
+ * ===== Implement Error, Debug and Display for Errors =====
+ *
+ */
+
+impl<T: Any> error::Error for SendError<T> {
+ fn description(&self) -> &str {
+ match *self {
+ SendError::Io(ref io_err) => io_err.description(),
+ SendError::Disconnected(..) => "Disconnected",
+ }
+ }
+}
+
+impl<T: Any> error::Error for TrySendError<T> {
+ fn description(&self) -> &str {
+ match *self {
+ TrySendError::Io(ref io_err) => io_err.description(),
+ TrySendError::Full(..) => "Full",
+ TrySendError::Disconnected(..) => "Disconnected",
+ }
+ }
+}
+
+impl<T> fmt::Debug for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Display for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Debug for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_try_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Display for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_try_send_error(self, f)
+ }
+}
+
+#[inline]
+fn format_send_error<T>(e: &SendError<T>, f: &mut fmt::Formatter) -> fmt::Result {
+ match *e {
+ SendError::Io(ref io_err) => write!(f, "{}", io_err),
+ SendError::Disconnected(..) => write!(f, "Disconnected"),
+ }
+}
+
+#[inline]
+fn format_try_send_error<T>(e: &TrySendError<T>, f: &mut fmt::Formatter) -> fmt::Result {
+ match *e {
+ TrySendError::Io(ref io_err) => write!(f, "{}", io_err),
+ TrySendError::Full(..) => write!(f, "Full"),
+ TrySendError::Disconnected(..) => write!(f, "Disconnected"),
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs b/third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs
new file mode 100644
index 0000000000..a4c4580b3a
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs
@@ -0,0 +1,346 @@
+use {channel, Poll, Events, Token};
+use event::Evented;
+use deprecated::{Handler, NotifyError};
+use event_imp::{Event, Ready, PollOpt};
+use timer::{self, Timer, Timeout};
+use std::{io, fmt, usize};
+use std::default::Default;
+use std::time::Duration;
+
+#[derive(Debug, Default, Clone)]
+pub struct EventLoopBuilder {
+ config: Config,
+}
+
+/// `EventLoop` configuration details
+#[derive(Clone, Debug)]
+struct Config {
+ // == Notifications ==
+ notify_capacity: usize,
+ messages_per_tick: usize,
+
+ // == Timer ==
+ timer_tick: Duration,
+ timer_wheel_size: usize,
+ timer_capacity: usize,
+}
+
+impl Default for Config {
+ fn default() -> Config {
+ // Default EventLoop configuration values
+ Config {
+ notify_capacity: 4_096,
+ messages_per_tick: 256,
+ timer_tick: Duration::from_millis(100),
+ timer_wheel_size: 1_024,
+ timer_capacity: 65_536,
+ }
+ }
+}
+
+impl EventLoopBuilder {
+ /// Construct a new `EventLoopBuilder` with the default configuration
+ /// values.
+ pub fn new() -> EventLoopBuilder {
+ EventLoopBuilder::default()
+ }
+
+ /// Sets the maximum number of messages that can be buffered on the event
+ /// loop's notification channel before a send will fail.
+ ///
+ /// The default value for this is 4096.
+ pub fn notify_capacity(&mut self, capacity: usize) -> &mut Self {
+ self.config.notify_capacity = capacity;
+ self
+ }
+
+ /// Sets the maximum number of messages that can be processed on any tick of
+ /// the event loop.
+ ///
+ /// The default value for this is 256.
+ pub fn messages_per_tick(&mut self, messages: usize) -> &mut Self {
+ self.config.messages_per_tick = messages;
+ self
+ }
+
+ pub fn timer_tick(&mut self, val: Duration) -> &mut Self {
+ self.config.timer_tick = val;
+ self
+ }
+
+ pub fn timer_wheel_size(&mut self, size: usize) -> &mut Self {
+ self.config.timer_wheel_size = size;
+ self
+ }
+
+ pub fn timer_capacity(&mut self, cap: usize) -> &mut Self {
+ self.config.timer_capacity = cap;
+ self
+ }
+
+ /// Constructs a new `EventLoop` using the configured values. The
+ /// `EventLoop` will not be running.
+ pub fn build<H: Handler>(self) -> io::Result<EventLoop<H>> {
+ EventLoop::configured(self.config)
+ }
+}
+
+/// Single threaded IO event loop.
+pub struct EventLoop<H: Handler> {
+ run: bool,
+ poll: Poll,
+ events: Events,
+ timer: Timer<H::Timeout>,
+ notify_tx: channel::SyncSender<H::Message>,
+ notify_rx: channel::Receiver<H::Message>,
+ config: Config,
+}
+
+// Token used to represent notifications
+const NOTIFY: Token = Token(usize::MAX - 1);
+const TIMER: Token = Token(usize::MAX - 2);
+
+impl<H: Handler> EventLoop<H> {
+
+ /// Constructs a new `EventLoop` using the default configuration values.
+ /// The `EventLoop` will not be running.
+ pub fn new() -> io::Result<EventLoop<H>> {
+ EventLoop::configured(Config::default())
+ }
+
+ fn configured(config: Config) -> io::Result<EventLoop<H>> {
+ // Create the IO poller
+ let poll = Poll::new()?;
+
+ let timer = timer::Builder::default()
+ .tick_duration(config.timer_tick)
+ .num_slots(config.timer_wheel_size)
+ .capacity(config.timer_capacity)
+ .build();
+
+ // Create cross thread notification queue
+ let (tx, rx) = channel::sync_channel(config.notify_capacity);
+
+ // Register the notification wakeup FD with the IO poller
+ poll.register(&rx, NOTIFY, Ready::readable(), PollOpt::edge() | PollOpt::oneshot())?;
+ poll.register(&timer, TIMER, Ready::readable(), PollOpt::edge())?;
+
+ Ok(EventLoop {
+ run: true,
+ poll,
+ timer,
+ notify_tx: tx,
+ notify_rx: rx,
+ config,
+ events: Events::with_capacity(1024),
+ })
+ }
+
+ /// Returns a sender that allows sending messages to the event loop in a
+ /// thread-safe way, waking up the event loop if needed.
+ ///
+ /// # Implementation Details
+ ///
+ /// Each [EventLoop](#) contains a lock-free queue with a pre-allocated
+ /// buffer size. The size can be changed by modifying
+ /// [EventLoopConfig.notify_capacity](struct.EventLoopConfig.html#method.notify_capacity).
+ /// When a message is sent to the EventLoop, it is first pushed on to the
+ /// queue. Then, if the EventLoop is currently running, an atomic flag is
+ /// set to indicate that the next loop iteration should be started without
+ /// waiting.
+ ///
+ /// If the loop is blocked waiting for IO events, then it is woken up. The
+ /// strategy for waking up the event loop is platform dependent. For
+ /// example, on a modern Linux OS, eventfd is used. On older OSes, a pipe
+ /// is used.
+ ///
+ /// The strategy of setting an atomic flag if the event loop is not already
+ /// sleeping allows avoiding an expensive wakeup operation if at all possible.
+ pub fn channel(&self) -> Sender<H::Message> {
+ Sender::new(self.notify_tx.clone())
+ }
+
+ /// Schedules a timeout after the requested time interval. When the
+ /// duration has been reached,
+ /// [Handler::timeout](trait.Handler.html#method.timeout) will be invoked
+ /// passing in the supplied token.
+ ///
+ /// Returns a handle to the timeout that can be used to cancel the timeout
+ /// using [#clear_timeout](#method.clear_timeout).
+ pub fn timeout(&mut self, token: H::Timeout, delay: Duration) -> timer::Result<Timeout> {
+ self.timer.set_timeout(delay, token)
+ }
+
+ /// If the supplied timeout has not been triggered, cancel it such that it
+ /// will not be triggered in the future.
+ pub fn clear_timeout(&mut self, timeout: &Timeout) -> bool {
+ self.timer.cancel_timeout(&timeout).is_some()
+ }
+
+ /// Tells the event loop to exit after it is done handling all events in the
+ /// current iteration.
+ pub fn shutdown(&mut self) {
+ self.run = false;
+ }
+
+ /// Indicates whether the event loop is currently running. If it's not it has either
+ /// stopped or is scheduled to stop on the next tick.
+ pub fn is_running(&self) -> bool {
+ self.run
+ }
+
+ /// Registers an IO handle with the event loop.
+ pub fn register<E: ?Sized>(&mut self, io: &E, token: Token, interest: Ready, opt: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ self.poll.register(io, token, interest, opt)
+ }
+
+ /// Re-Registers an IO handle with the event loop.
+ pub fn reregister<E: ?Sized>(&mut self, io: &E, token: Token, interest: Ready, opt: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ self.poll.reregister(io, token, interest, opt)
+ }
+
+ /// Keep spinning the event loop indefinitely, and notify the handler whenever
+ /// any of the registered handles are ready.
+ pub fn run(&mut self, handler: &mut H) -> io::Result<()> {
+ self.run = true;
+
+ while self.run {
+ // Execute ticks as long as the event loop is running
+ self.run_once(handler, None)?;
+ }
+
+ Ok(())
+ }
+
+ /// Deregisters an IO handle with the event loop.
+ ///
+ /// Both kqueue and epoll will automatically clear any pending events when closing a
+ /// file descriptor (socket). In that case, this method does not need to be called
+ /// prior to dropping a connection from the slab.
+ ///
+ /// Warning: kqueue effectively builds in deregister when using edge-triggered mode with
+ /// oneshot. Calling `deregister()` on the socket will cause a TcpStream error.
+ pub fn deregister<E: ?Sized>(&mut self, io: &E) -> io::Result<()> where E: Evented {
+ self.poll.deregister(io)
+ }
+
+ /// Spin the event loop once, with a given timeout (forever if `None`),
+ /// and notify the handler if any of the registered handles become ready
+ /// during that time.
+ pub fn run_once(&mut self, handler: &mut H, timeout: Option<Duration>) -> io::Result<()> {
+ trace!("event loop tick");
+
+ // Check the registered IO handles for any new events. Each poll
+ // is for one second, so a shutdown request can last as long as
+ // one second before it takes effect.
+ let events = match self.io_poll(timeout) {
+ Ok(e) => e,
+ Err(err) => {
+ if err.kind() == io::ErrorKind::Interrupted {
+ handler.interrupted(self);
+ 0
+ } else {
+ return Err(err);
+ }
+ }
+ };
+
+ self.io_process(handler, events);
+ handler.tick(self);
+ Ok(())
+ }
+
+ #[inline]
+ fn io_poll(&mut self, timeout: Option<Duration>) -> io::Result<usize> {
+ self.poll.poll(&mut self.events, timeout)
+ }
+
+ // Process IO events that have been previously polled
+ fn io_process(&mut self, handler: &mut H, cnt: usize) {
+ let mut i = 0;
+
+ trace!("io_process(..); cnt={}; len={}", cnt, self.events.len());
+
+ // Iterate over the notifications. Each event provides the token
+ // it was registered with (which usually represents, at least, the
+ // handle that the event is about) as well as information about
+ // what kind of event occurred (readable, writable, signal, etc.)
+ while i < cnt {
+ let evt = self.events.get(i).unwrap();
+
+ trace!("event={:?}; idx={:?}", evt, i);
+
+ match evt.token() {
+ NOTIFY => self.notify(handler),
+ TIMER => self.timer_process(handler),
+ _ => self.io_event(handler, evt)
+ }
+
+ i += 1;
+ }
+ }
+
+ fn io_event(&mut self, handler: &mut H, evt: Event) {
+ handler.ready(self, evt.token(), evt.readiness());
+ }
+
+ fn notify(&mut self, handler: &mut H) {
+ for _ in 0..self.config.messages_per_tick {
+ match self.notify_rx.try_recv() {
+ Ok(msg) => handler.notify(self, msg),
+ _ => break,
+ }
+ }
+
+ // Re-register
+ let _ = self.poll.reregister(&self.notify_rx, NOTIFY, Ready::readable(), PollOpt::edge() | PollOpt::oneshot());
+ }
+
+ fn timer_process(&mut self, handler: &mut H) {
+ while let Some(t) = self.timer.poll() {
+ handler.timeout(self, t);
+ }
+ }
+}
+
+impl<H: Handler> fmt::Debug for EventLoop<H> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("EventLoop")
+ .field("run", &self.run)
+ .field("poll", &self.poll)
+ .field("config", &self.config)
+ .finish()
+ }
+}
+
+/// Sends messages to the EventLoop from other threads.
+pub struct Sender<M> {
+ tx: channel::SyncSender<M>
+}
+
+impl<M> fmt::Debug for Sender<M> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "Sender<?> {{ ... }}")
+ }
+}
+
+impl<M> Clone for Sender <M> {
+ fn clone(&self) -> Sender<M> {
+ Sender { tx: self.tx.clone() }
+ }
+}
+
+impl<M> Sender<M> {
+ fn new(tx: channel::SyncSender<M>) -> Sender<M> {
+ Sender { tx }
+ }
+
+ pub fn send(&self, msg: M) -> Result<(), NotifyError<M>> {
+ self.tx.try_send(msg)?;
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/handler.rs b/third_party/rust/mio-0.6.23/src/deprecated/handler.rs
new file mode 100644
index 0000000000..db1bc314a7
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/handler.rs
@@ -0,0 +1,37 @@
+use {Ready, Token};
+use deprecated::{EventLoop};
+
+#[allow(unused_variables)]
+pub trait Handler: Sized {
+ type Timeout;
+ type Message;
+
+ /// Invoked when the socket represented by `token` is ready to be operated
+ /// on. `events` indicates the specific operations that are
+ /// ready to be performed.
+ ///
+ /// For example, when a TCP socket is ready to be read from, `events` will
+ /// have `readable` set. When the socket is ready to be written to,
+ /// `events` will have `writable` set.
+ ///
+ /// This function will only be invoked a single time per socket per event
+ /// loop tick.
+ fn ready(&mut self, event_loop: &mut EventLoop<Self>, token: Token, events: Ready) {
+ }
+
+ /// Invoked when a message has been received via the event loop's channel.
+ fn notify(&mut self, event_loop: &mut EventLoop<Self>, msg: Self::Message) {
+ }
+
+ /// Invoked when a timeout has completed.
+ fn timeout(&mut self, event_loop: &mut EventLoop<Self>, timeout: Self::Timeout) {
+ }
+
+ /// Invoked when `EventLoop` has been interrupted by a signal interrupt.
+ fn interrupted(&mut self, event_loop: &mut EventLoop<Self>) {
+ }
+
+ /// Invoked at the end of an event loop tick.
+ fn tick(&mut self, event_loop: &mut EventLoop<Self>) {
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/io.rs b/third_party/rust/mio-0.6.23/src/deprecated/io.rs
new file mode 100644
index 0000000000..16ff27993b
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/io.rs
@@ -0,0 +1,28 @@
+use ::io::MapNonBlock;
+use std::io::{self, Read, Write};
+
+pub trait TryRead {
+ fn try_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>>;
+}
+
+pub trait TryWrite {
+ fn try_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>>;
+}
+
+impl<T: Read> TryRead for T {
+ fn try_read(&mut self, dst: &mut [u8]) -> io::Result<Option<usize>> {
+ self.read(dst).map_non_block()
+ }
+}
+
+impl<T: Write> TryWrite for T {
+ fn try_write(&mut self, src: &[u8]) -> io::Result<Option<usize>> {
+ self.write(src).map_non_block()
+ }
+}
+
+pub trait TryAccept {
+ type Output;
+
+ fn accept(&self) -> io::Result<Option<Self::Output>>;
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/mod.rs b/third_party/rust/mio-0.6.23/src/deprecated/mod.rs
new file mode 100644
index 0000000000..124a2eee3d
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/mod.rs
@@ -0,0 +1,36 @@
+#![allow(deprecated)]
+
+mod event_loop;
+mod io;
+mod handler;
+mod notify;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub mod unix;
+
+pub use self::event_loop::{
+ EventLoop,
+ EventLoopBuilder,
+ Sender,
+};
+pub use self::io::{
+ TryAccept,
+ TryRead,
+ TryWrite,
+};
+pub use self::handler::{
+ Handler,
+};
+pub use self::notify::{
+ NotifyError,
+};
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub use self::unix::{
+ pipe,
+ PipeReader,
+ PipeWriter,
+ UnixListener,
+ UnixSocket,
+ UnixStream,
+ Shutdown,
+};
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/notify.rs b/third_party/rust/mio-0.6.23/src/deprecated/notify.rs
new file mode 100644
index 0000000000..c8432d6b0e
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/notify.rs
@@ -0,0 +1,63 @@
+use {channel};
+use std::{fmt, io, error, any};
+
+pub enum NotifyError<T> {
+ Io(io::Error),
+ Full(T),
+ Closed(Option<T>),
+}
+
+impl<M> fmt::Debug for NotifyError<M> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ NotifyError::Io(ref e) => {
+ write!(fmt, "NotifyError::Io({:?})", e)
+ }
+ NotifyError::Full(..) => {
+ write!(fmt, "NotifyError::Full(..)")
+ }
+ NotifyError::Closed(..) => {
+ write!(fmt, "NotifyError::Closed(..)")
+ }
+ }
+ }
+}
+
+impl<M> fmt::Display for NotifyError<M> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ NotifyError::Io(ref e) => {
+ write!(fmt, "IO error: {}", e)
+ }
+ NotifyError::Full(..) => write!(fmt, "Full"),
+ NotifyError::Closed(..) => write!(fmt, "Closed")
+ }
+ }
+}
+
+impl<M: any::Any> error::Error for NotifyError<M> {
+ fn description(&self) -> &str {
+ match *self {
+ NotifyError::Io(ref err) => err.description(),
+ NotifyError::Closed(..) => "The receiving end has hung up",
+ NotifyError::Full(..) => "Queue is full"
+ }
+ }
+
+ fn cause(&self) -> Option<&error::Error> {
+ match *self {
+ NotifyError::Io(ref err) => Some(err),
+ _ => None
+ }
+ }
+}
+
+impl<M> From<channel::TrySendError<M>> for NotifyError<M> {
+ fn from(src: channel::TrySendError<M>) -> NotifyError<M> {
+ match src {
+ channel::TrySendError::Io(e) => NotifyError::Io(e),
+ channel::TrySendError::Full(v) => NotifyError::Full(v),
+ channel::TrySendError::Disconnected(v) => NotifyError::Closed(Some(v)),
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/unix.rs b/third_party/rust/mio-0.6.23/src/deprecated/unix.rs
new file mode 100644
index 0000000000..97c6a60ba4
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/unix.rs
@@ -0,0 +1,420 @@
+use {io, sys, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use deprecated::TryAccept;
+use io::MapNonBlock;
+use std::io::{Read, Write};
+use std::path::Path;
+pub use std::net::Shutdown;
+use std::process;
+
+pub use sys::Io;
+
+#[derive(Debug)]
+pub struct UnixSocket {
+ sys: sys::UnixSocket,
+}
+
+impl UnixSocket {
+ /// Returns a new, unbound, non-blocking Unix domain socket
+ pub fn stream() -> io::Result<UnixSocket> {
+ sys::UnixSocket::stream()
+ .map(From::from)
+ }
+
+ /// Connect the socket to the specified address
+ pub fn connect<P: AsRef<Path> + ?Sized>(self, addr: &P) -> io::Result<(UnixStream, bool)> {
+ let complete = match self.sys.connect(addr) {
+ Ok(()) => true,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => false,
+ Err(e) => return Err(e),
+ };
+ Ok((From::from(self.sys), complete))
+ }
+
+ /// Bind the socket to the specified address
+ pub fn bind<P: AsRef<Path> + ?Sized>(&self, addr: &P) -> io::Result<()> {
+ self.sys.bind(addr)
+ }
+
+ /// Listen for incoming requests
+ pub fn listen(self, backlog: usize) -> io::Result<UnixListener> {
+ self.sys.listen(backlog)?;
+ Ok(From::from(self.sys))
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixSocket> {
+ self.sys.try_clone()
+ .map(From::from)
+ }
+}
+
+impl Evented for UnixSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl From<sys::UnixSocket> for UnixSocket {
+ fn from(sys: sys::UnixSocket) -> UnixSocket {
+ UnixSocket { sys }
+ }
+}
+
+/*
+ *
+ * ===== UnixStream =====
+ *
+ */
+
+#[derive(Debug)]
+pub struct UnixStream {
+ sys: sys::UnixSocket,
+}
+
+impl UnixStream {
+ pub fn connect<P: AsRef<Path> + ?Sized>(path: &P) -> io::Result<UnixStream> {
+ UnixSocket::stream()
+ .and_then(|sock| sock.connect(path))
+ .map(|(sock, _)| sock)
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixStream> {
+ self.sys.try_clone()
+ .map(From::from)
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<usize> {
+ self.sys.shutdown(how).map(|_| 0)
+ }
+
+ pub fn read_recv_fd(&mut self, buf: &mut [u8]) -> io::Result<(usize, Option<RawFd>)> {
+ self.sys.read_recv_fd(buf)
+ }
+
+ pub fn try_read_recv_fd(&mut self, buf: &mut [u8]) -> io::Result<Option<(usize, Option<RawFd>)>> {
+ self.read_recv_fd(buf).map_non_block()
+ }
+
+ pub fn write_send_fd(&mut self, buf: &[u8], fd: RawFd) -> io::Result<usize> {
+ self.sys.write_send_fd(buf, fd)
+ }
+
+ pub fn try_write_send_fd(&mut self, buf: &[u8], fd: RawFd) -> io::Result<Option<usize>> {
+ self.write_send_fd(buf, fd).map_non_block()
+ }
+}
+
+impl Read for UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.sys.read(buf)
+ }
+}
+
+impl Write for UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.sys.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.sys.flush()
+ }
+}
+
+impl Evented for UnixStream {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl From<sys::UnixSocket> for UnixStream {
+ fn from(sys: sys::UnixSocket) -> UnixStream {
+ UnixStream { sys }
+ }
+}
+
+/*
+ *
+ * ===== UnixListener =====
+ *
+ */
+
+#[derive(Debug)]
+pub struct UnixListener {
+ sys: sys::UnixSocket,
+}
+
+impl UnixListener {
+ pub fn bind<P: AsRef<Path> + ?Sized>(addr: &P) -> io::Result<UnixListener> {
+ UnixSocket::stream().and_then(|sock| {
+ sock.bind(addr)?;
+ sock.listen(256)
+ })
+ }
+
+ pub fn accept(&self) -> io::Result<UnixStream> {
+ self.sys.accept().map(From::from)
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixListener> {
+ self.sys.try_clone().map(From::from)
+ }
+}
+
+impl Evented for UnixListener {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl TryAccept for UnixListener {
+ type Output = UnixStream;
+
+ fn accept(&self) -> io::Result<Option<UnixStream>> {
+ UnixListener::accept(self).map_non_block()
+ }
+}
+
+impl From<sys::UnixSocket> for UnixListener {
+ fn from(sys: sys::UnixSocket) -> UnixListener {
+ UnixListener { sys }
+ }
+}
+
+/*
+ *
+ * ===== Pipe =====
+ *
+ */
+
+pub fn pipe() -> io::Result<(PipeReader, PipeWriter)> {
+ let (rd, wr) = sys::pipe()?;
+ Ok((From::from(rd), From::from(wr)))
+}
+
+#[derive(Debug)]
+pub struct PipeReader {
+ io: Io,
+}
+
+impl PipeReader {
+ pub fn from_stdout(stdout: process::ChildStdout) -> io::Result<Self> {
+ if let Err(e) = sys::set_nonblock(stdout.as_raw_fd()) {
+ return Err(e);
+ }
+ Ok(PipeReader::from(unsafe { Io::from_raw_fd(stdout.into_raw_fd()) }))
+ }
+ pub fn from_stderr(stderr: process::ChildStderr) -> io::Result<Self> {
+ if let Err(e) = sys::set_nonblock(stderr.as_raw_fd()) {
+ return Err(e);
+ }
+ Ok(PipeReader::from(unsafe { Io::from_raw_fd(stderr.into_raw_fd()) }))
+ }
+}
+
+impl Read for PipeReader {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.read(buf)
+ }
+}
+
+impl<'a> Read for &'a PipeReader {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.io).read(buf)
+ }
+}
+
+impl Evented for PipeReader {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.io.deregister(poll)
+ }
+}
+
+impl From<Io> for PipeReader {
+ fn from(io: Io) -> PipeReader {
+ PipeReader { io }
+ }
+}
+
+#[derive(Debug)]
+pub struct PipeWriter {
+ io: Io,
+}
+
+impl PipeWriter {
+ pub fn from_stdin(stdin: process::ChildStdin) -> io::Result<Self> {
+ if let Err(e) = sys::set_nonblock(stdin.as_raw_fd()) {
+ return Err(e);
+ }
+ Ok(PipeWriter::from(unsafe { Io::from_raw_fd(stdin.into_raw_fd()) }))
+ }
+}
+
+impl Write for PipeWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.io.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.io.flush()
+ }
+}
+
+impl<'a> Write for &'a PipeWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.io).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.io).flush()
+ }
+}
+
+impl Evented for PipeWriter {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.io.deregister(poll)
+ }
+}
+
+impl From<Io> for PipeWriter {
+ fn from(io: Io) -> PipeWriter {
+ PipeWriter { io }
+ }
+}
+
+/*
+ *
+ * ===== Conversions =====
+ *
+ */
+
+use std::os::unix::io::{RawFd, IntoRawFd, AsRawFd, FromRawFd};
+
+impl IntoRawFd for UnixSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixSocket {
+ UnixSocket { sys: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for UnixStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixStream {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixStream {
+ UnixStream { sys: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for UnixListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixListener {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixListener {
+ UnixListener { sys: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for PipeReader {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for PipeReader {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
+
+impl FromRawFd for PipeReader {
+ unsafe fn from_raw_fd(fd: RawFd) -> PipeReader {
+ PipeReader { io: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for PipeWriter {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for PipeWriter {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
+
+impl FromRawFd for PipeWriter {
+ unsafe fn from_raw_fd(fd: RawFd) -> PipeWriter {
+ PipeWriter { io: FromRawFd::from_raw_fd(fd) }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/event_imp.rs b/third_party/rust/mio-0.6.23/src/event_imp.rs
new file mode 100644
index 0000000000..7573ebca83
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/event_imp.rs
@@ -0,0 +1,1162 @@
+use {Poll, Token};
+use std::{fmt, io, ops};
+
+/// A value that may be registered with `Poll`
+///
+/// Values that implement `Evented` can be registered with `Poll`. Users of Mio
+/// should not use the `Evented` trait functions directly. Instead, the
+/// equivalent functions on `Poll` should be used.
+///
+/// See [`Poll`] for more details.
+///
+/// # Implementing `Evented`
+///
+/// There are two types of `Evented` values.
+///
+/// * **System** handles, which are backed by sockets or other system handles.
+/// These `Evented` handles will be monitored by the system selector. In this
+/// case, an implementation of `Evented` delegates to a lower level handle.
+///
+/// * **User** handles, which are driven entirely in user space using
+/// [`Registration`] and [`SetReadiness`]. In this case, the implementer takes
+/// responsibility for driving the readiness state changes.
+///
+/// [`Poll`]: ../struct.Poll.html
+/// [`Registration`]: ../struct.Registration.html
+/// [`SetReadiness`]: ../struct.SetReadiness.html
+///
+/// # Examples
+///
+/// Implementing `Evented` on a struct containing a socket:
+///
+/// ```
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+/// use mio::net::TcpStream;
+///
+/// use std::io;
+///
+/// pub struct MyEvented {
+/// socket: TcpStream,
+/// }
+///
+/// impl Evented for MyEvented {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `register` call to `socket`
+/// self.socket.register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `reregister` call to `socket`
+/// self.socket.reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// // Delegate the `deregister` call to `socket`
+/// self.socket.deregister(poll)
+/// }
+/// }
+/// ```
+///
+/// Implement `Evented` using [`Registration`] and [`SetReadiness`].
+///
+/// ```
+/// use mio::{Ready, Registration, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+///
+/// use std::io;
+/// use std::time::Instant;
+/// use std::thread;
+///
+/// pub struct Deadline {
+/// when: Instant,
+/// registration: Registration,
+/// }
+///
+/// impl Deadline {
+/// pub fn new(when: Instant) -> Deadline {
+/// let (registration, set_readiness) = Registration::new2();
+///
+/// thread::spawn(move || {
+/// let now = Instant::now();
+///
+/// if now < when {
+/// thread::sleep(when - now);
+/// }
+///
+/// set_readiness.set_readiness(Ready::readable());
+/// });
+///
+/// Deadline {
+/// when: when,
+/// registration: registration,
+/// }
+/// }
+///
+/// pub fn is_elapsed(&self) -> bool {
+/// Instant::now() >= self.when
+/// }
+/// }
+///
+/// impl Evented for Deadline {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// self.registration.deregister(poll)
+/// }
+/// }
+/// ```
+pub trait Evented {
+ /// Register `self` with the given `Poll` instance.
+ ///
+ /// This function should not be called directly. Use [`Poll::register`]
+ /// instead. Implementors should handle registration by either delegating
+ /// the call to another `Evented` type or creating a [`Registration`].
+ ///
+ /// [`Poll::register`]: ../struct.Poll.html#method.register
+ /// [`Registration`]: ../struct.Registration.html
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>;
+
+ /// Re-register `self` with the given `Poll` instance.
+ ///
+ /// This function should not be called directly. Use [`Poll::reregister`]
+ /// instead. Implementors should handle re-registration by either delegating
+ /// the call to another `Evented` type or calling
+ /// [`SetReadiness::set_readiness`].
+ ///
+ /// [`Poll::reregister`]: ../struct.Poll.html#method.reregister
+ /// [`SetReadiness::set_readiness`]: ../struct.SetReadiness.html#method.set_readiness
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>;
+
+ /// Deregister `self` from the given `Poll` instance
+ ///
+ /// This function should not be called directly. Use [`Poll::deregister`]
+ /// instead. Implementors should handle deregistration by either delegating
+ /// the call to another `Evented` type or by dropping the [`Registration`]
+ /// associated with `self`.
+ ///
+ /// [`Poll::deregister`]: ../struct.Poll.html#method.deregister
+ /// [`Registration`]: ../struct.Registration.html
+ fn deregister(&self, poll: &Poll) -> io::Result<()>;
+}
+
+impl Evented for Box<Evented> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.as_ref().deregister(poll)
+ }
+}
+
+impl<T: Evented> Evented for Box<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.as_ref().deregister(poll)
+ }
+}
+
+impl<T: Evented> Evented for ::std::sync::Arc<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.as_ref().deregister(poll)
+ }
+}
+
+/// Options supplied when registering an `Evented` handle with `Poll`
+///
+/// `PollOpt` values can be combined together using the various bitwise
+/// operators.
+///
+/// For high level documentation on polling and poll options, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::PollOpt;
+///
+/// let opts = PollOpt::edge() | PollOpt::oneshot();
+///
+/// assert!(opts.is_edge());
+/// assert!(opts.is_oneshot());
+/// assert!(!opts.is_level());
+/// ```
+///
+/// [`Poll`]: struct.Poll.html
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct PollOpt(usize);
+
+impl PollOpt {
+ /// Return a `PollOpt` representing no set options.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::empty();
+ ///
+ /// assert!(!opt.is_level());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn empty() -> PollOpt {
+ PollOpt(0)
+ }
+
+ /// Return a `PollOpt` representing edge-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::edge();
+ ///
+ /// assert!(opt.is_edge());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn edge() -> PollOpt {
+ PollOpt(0b0001)
+ }
+
+ /// Return a `PollOpt` representing level-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::level();
+ ///
+ /// assert!(opt.is_level());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn level() -> PollOpt {
+ PollOpt(0b0010)
+ }
+
+ /// Return a `PollOpt` representing oneshot notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot();
+ ///
+ /// assert!(opt.is_oneshot());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn oneshot() -> PollOpt {
+ PollOpt(0b0100)
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn urgent() -> PollOpt {
+ PollOpt(0b1000)
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn all() -> PollOpt {
+ PollOpt::edge() | PollOpt::level() | PollOpt::oneshot()
+ }
+
+ /// Returns true if the options include edge-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::edge();
+ ///
+ /// assert!(opt.is_edge());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_edge(&self) -> bool {
+ self.contains(PollOpt::edge())
+ }
+
+ /// Returns true if the options include level-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::level();
+ ///
+ /// assert!(opt.is_level());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_level(&self) -> bool {
+ self.contains(PollOpt::level())
+ }
+
+ /// Returns true if the options includes oneshot.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot();
+ ///
+ /// assert!(opt.is_oneshot());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_oneshot(&self) -> bool {
+ self.contains(PollOpt::oneshot())
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[allow(deprecated)]
+ #[inline]
+ pub fn is_urgent(&self) -> bool {
+ self.contains(PollOpt::urgent())
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn bits(&self) -> usize {
+ self.0
+ }
+
+ /// Returns true if `self` is a superset of `other`.
+ ///
+ /// `other` may represent more than one option, in which case the function
+ /// only returns true if `self` contains all of the options specified in
+ /// `other`.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot();
+ ///
+ /// assert!(opt.contains(PollOpt::oneshot()));
+ /// assert!(!opt.contains(PollOpt::edge()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot() | PollOpt::edge();
+ ///
+ /// assert!(opt.contains(PollOpt::oneshot()));
+ /// assert!(opt.contains(PollOpt::edge()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot() | PollOpt::edge();
+ ///
+ /// assert!(!PollOpt::oneshot().contains(opt));
+ /// assert!(opt.contains(opt));
+ /// assert!((opt | PollOpt::level()).contains(opt));
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn contains(&self, other: PollOpt) -> bool {
+ (*self & other) == other
+ }
+
+ /// Adds all options represented by `other` into `self`.
+ ///
+ /// This is equivalent to `*self = *self | other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let mut opt = PollOpt::empty();
+ /// opt.insert(PollOpt::oneshot());
+ ///
+ /// assert!(opt.is_oneshot());
+ /// ```
+ #[inline]
+ pub fn insert(&mut self, other: PollOpt) {
+ self.0 |= other.0;
+ }
+
+ /// Removes all options represented by `other` from `self`.
+ ///
+ /// This is equivalent to `*self = *self & !other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let mut opt = PollOpt::oneshot();
+ /// opt.remove(PollOpt::oneshot());
+ ///
+ /// assert!(!opt.is_oneshot());
+ /// ```
+ #[inline]
+ pub fn remove(&mut self, other: PollOpt) {
+ self.0 &= !other.0;
+ }
+}
+
+impl ops::BitOr for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn bitor(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 | other.0)
+ }
+}
+
+impl ops::BitXor for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn bitxor(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 ^ other.0)
+ }
+}
+
+impl ops::BitAnd for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn bitand(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 & other.0)
+ }
+}
+
+impl ops::Sub for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn sub(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 & !other.0)
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn not(self) -> PollOpt {
+ PollOpt(!self.0)
+ }
+}
+
+impl fmt::Debug for PollOpt {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut one = false;
+ let flags = [
+ (PollOpt::edge(), "Edge-Triggered"),
+ (PollOpt::level(), "Level-Triggered"),
+ (PollOpt::oneshot(), "OneShot")];
+
+ for &(flag, msg) in &flags {
+ if self.contains(flag) {
+ if one { write!(fmt, " | ")? }
+ write!(fmt, "{}", msg)?;
+
+ one = true
+ }
+ }
+
+ if !one {
+ fmt.write_str("(empty)")?;
+ }
+
+ Ok(())
+ }
+}
+
+#[test]
+fn test_debug_pollopt() {
+ assert_eq!("(empty)", format!("{:?}", PollOpt::empty()));
+ assert_eq!("Edge-Triggered", format!("{:?}", PollOpt::edge()));
+ assert_eq!("Level-Triggered", format!("{:?}", PollOpt::level()));
+ assert_eq!("OneShot", format!("{:?}", PollOpt::oneshot()));
+}
+
+/// A set of readiness event kinds
+///
+/// `Ready` is a set of operation descriptors indicating which kind of an
+/// operation is ready to be performed. For example, `Ready::readable()`
+/// indicates that the associated `Evented` handle is ready to perform a
+/// `read` operation.
+///
+/// This struct only represents portable event kinds. Since only readable and
+/// writable events are guaranteed to be raised on all systems, those are the
+/// only ones available via the `Ready` struct. There are also platform specific
+/// extensions to `Ready`, i.e. `UnixReady`, which provide additional readiness
+/// event kinds only available on unix platforms.
+///
+/// `Ready` values can be combined together using the various bitwise operators.
+///
+/// For high level documentation on polling and readiness, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::Ready;
+///
+/// let ready = Ready::readable() | Ready::writable();
+///
+/// assert!(ready.is_readable());
+/// assert!(ready.is_writable());
+/// ```
+///
+/// [`Poll`]: struct.Poll.html
+/// [`readable`]: #method.readable
+/// [`writable`]: #method.writable
+/// [readiness]: struct.Poll.html#readiness-operations
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct Ready(usize);
+
+const READABLE: usize = 0b00001;
+const WRITABLE: usize = 0b00010;
+
+// These are deprecated and are moved into platform specific implementations.
+const ERROR: usize = 0b00100;
+const HUP: usize = 0b01000;
+
+impl Ready {
+ /// Returns the empty `Ready` set.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::empty();
+ ///
+ /// assert!(!ready.is_readable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ pub fn empty() -> Ready {
+ Ready(0)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use Ready::empty instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn none() -> Ready {
+ Ready::empty()
+ }
+
+ /// Returns a `Ready` representing readable readiness.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ ///
+ /// assert!(ready.is_readable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn readable() -> Ready {
+ Ready(READABLE)
+ }
+
+ /// Returns a `Ready` representing writable readiness.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::writable();
+ ///
+ /// assert!(ready.is_writable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn writable() -> Ready {
+ Ready(WRITABLE)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn error() -> Ready {
+ Ready(ERROR)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn hup() -> Ready {
+ Ready(HUP)
+ }
+
+ /// Returns a `Ready` representing readiness for all operations.
+ ///
+ /// This includes platform specific operations as well (`hup`, `aio`,
+ /// `error`, `lio`, `pri`).
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::all();
+ ///
+ /// assert!(ready.is_readable());
+ /// assert!(ready.is_writable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn all() -> Ready {
+ Ready(READABLE | WRITABLE | ::sys::READY_ALL)
+ }
+
+ /// Returns true if `Ready` is the empty set
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::empty();
+ /// assert!(ready.is_empty());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ *self == Ready::empty()
+ }
+
+ #[deprecated(since = "0.6.5", note = "use Ready::is_empty instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn is_none(&self) -> bool {
+ self.is_empty()
+ }
+
+ /// Returns true if the value includes readable readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ ///
+ /// assert!(ready.is_readable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_readable(&self) -> bool {
+ self.contains(Ready::readable())
+ }
+
+ /// Returns true if the value includes writable readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::writable();
+ ///
+ /// assert!(ready.is_writable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_writable(&self) -> bool {
+ self.contains(Ready::writable())
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn is_error(&self) -> bool {
+ self.contains(Ready(ERROR))
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn is_hup(&self) -> bool {
+ self.contains(Ready(HUP))
+ }
+
+ /// Adds all readiness represented by `other` into `self`.
+ ///
+ /// This is equivalent to `*self = *self | other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let mut readiness = Ready::empty();
+ /// readiness.insert(Ready::readable());
+ ///
+ /// assert!(readiness.is_readable());
+ /// ```
+ #[inline]
+ pub fn insert<T: Into<Self>>(&mut self, other: T) {
+ let other = other.into();
+ self.0 |= other.0;
+ }
+
+ /// Removes all options represented by `other` from `self`.
+ ///
+ /// This is equivalent to `*self = *self & !other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let mut readiness = Ready::readable();
+ /// readiness.remove(Ready::readable());
+ ///
+ /// assert!(!readiness.is_readable());
+ /// ```
+ #[inline]
+ pub fn remove<T: Into<Self>>(&mut self, other: T) {
+ let other = other.into();
+ self.0 &= !other.0;
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn bits(&self) -> usize {
+ self.0
+ }
+
+ /// Returns true if `self` is a superset of `other`.
+ ///
+ /// `other` may represent more than one readiness operations, in which case
+ /// the function only returns true if `self` contains all readiness
+ /// specified in `other`.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let readiness = Ready::readable();
+ ///
+ /// assert!(readiness.contains(Ready::readable()));
+ /// assert!(!readiness.contains(Ready::writable()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let readiness = Ready::readable() | Ready::writable();
+ ///
+ /// assert!(readiness.contains(Ready::readable()));
+ /// assert!(readiness.contains(Ready::writable()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let readiness = Ready::readable() | Ready::writable();
+ ///
+ /// assert!(!Ready::readable().contains(readiness));
+ /// assert!(readiness.contains(readiness));
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn contains<T: Into<Self>>(&self, other: T) -> bool {
+ let other = other.into();
+ (*self & other) == other
+ }
+
+ /// Create a `Ready` instance using the given `usize` representation.
+ ///
+ /// The `usize` representation must have been obtained from a call to
+ /// `Ready::as_usize`.
+ ///
+ /// The `usize` representation must be treated as opaque. There is no
+ /// guaranteed correlation between the returned value and platform defined
+ /// constants. Also, there is no guarantee that the `usize` representation
+ /// will remain constant across patch releases of Mio.
+ ///
+ /// This function is mainly provided to allow the caller to loa a
+ /// readiness value from an `AtomicUsize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ /// let ready_usize = ready.as_usize();
+ /// let ready2 = Ready::from_usize(ready_usize);
+ ///
+ /// assert_eq!(ready, ready2);
+ /// ```
+ pub fn from_usize(val: usize) -> Ready {
+ Ready(val)
+ }
+
+ /// Returns a `usize` representation of the `Ready` value.
+ ///
+ /// This `usize` representation must be treated as opaque. There is no
+ /// guaranteed correlation between the returned value and platform defined
+ /// constants. Also, there is no guarantee that the `usize` representation
+ /// will remain constant across patch releases of Mio.
+ ///
+ /// This function is mainly provided to allow the caller to store a
+ /// readiness value in an `AtomicUsize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ /// let ready_usize = ready.as_usize();
+ /// let ready2 = Ready::from_usize(ready_usize);
+ ///
+ /// assert_eq!(ready, ready2);
+ /// ```
+ pub fn as_usize(&self) -> usize {
+ self.0
+ }
+}
+
+impl<T: Into<Ready>> ops::BitOr<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn bitor(self, other: T) -> Ready {
+ Ready(self.0 | other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::BitOrAssign<T> for Ready {
+ #[inline]
+ fn bitor_assign(&mut self, other: T) {
+ self.0 |= other.into().0;
+ }
+}
+
+impl<T: Into<Ready>> ops::BitXor<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn bitxor(self, other: T) -> Ready {
+ Ready(self.0 ^ other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::BitXorAssign<T> for Ready {
+ #[inline]
+ fn bitxor_assign(&mut self, other: T) {
+ self.0 ^= other.into().0;
+ }
+}
+
+impl<T: Into<Ready>> ops::BitAnd<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn bitand(self, other: T) -> Ready {
+ Ready(self.0 & other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::BitAndAssign<T> for Ready {
+ #[inline]
+ fn bitand_assign(&mut self, other: T) {
+ self.0 &= other.into().0
+ }
+}
+
+impl<T: Into<Ready>> ops::Sub<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn sub(self, other: T) -> Ready {
+ Ready(self.0 & !other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::SubAssign<T> for Ready {
+ #[inline]
+ fn sub_assign(&mut self, other: T) {
+ self.0 &= !other.into().0;
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn not(self) -> Ready {
+ Ready(!self.0)
+ }
+}
+
+impl fmt::Debug for Ready {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut one = false;
+ let flags = [
+ (Ready::readable(), "Readable"),
+ (Ready::writable(), "Writable"),
+ (Ready(ERROR), "Error"),
+ (Ready(HUP), "Hup")];
+
+ for &(flag, msg) in &flags {
+ if self.contains(flag) {
+ if one { write!(fmt, " | ")? }
+ write!(fmt, "{}", msg)?;
+
+ one = true
+ }
+ }
+
+ if !one {
+ fmt.write_str("(empty)")?;
+ }
+
+ Ok(())
+ }
+}
+
+#[test]
+fn test_debug_ready() {
+ assert_eq!("(empty)", format!("{:?}", Ready::empty()));
+ assert_eq!("Readable", format!("{:?}", Ready::readable()));
+ assert_eq!("Writable", format!("{:?}", Ready::writable()));
+}
+
+/// An readiness event returned by [`Poll::poll`].
+///
+/// `Event` is a [readiness state] paired with a [`Token`]. It is returned by
+/// [`Poll::poll`].
+///
+/// For more documentation on polling and events, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::{Ready, Token};
+/// use mio::event::Event;
+///
+/// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+///
+/// assert_eq!(event.readiness(), Ready::readable() | Ready::writable());
+/// assert_eq!(event.token(), Token(0));
+/// ```
+///
+/// [`Poll::poll`]: ../struct.Poll.html#method.poll
+/// [`Poll`]: ../struct.Poll.html
+/// [readiness state]: ../struct.Ready.html
+/// [`Token`]: ../struct.Token.html
+#[derive(Copy, Clone, Eq, PartialEq, Debug)]
+pub struct Event {
+ kind: Ready,
+ token: Token
+}
+
+impl Event {
+ /// Creates a new `Event` containing `readiness` and `token`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::{Ready, Token};
+ /// use mio::event::Event;
+ ///
+ /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+ ///
+ /// assert_eq!(event.readiness(), Ready::readable() | Ready::writable());
+ /// assert_eq!(event.token(), Token(0));
+ /// ```
+ pub fn new(readiness: Ready, token: Token) -> Event {
+ Event {
+ kind: readiness,
+ token,
+ }
+ }
+
+ /// Returns the event's readiness.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::{Ready, Token};
+ /// use mio::event::Event;
+ ///
+ /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+ ///
+ /// assert_eq!(event.readiness(), Ready::readable() | Ready::writable());
+ /// ```
+ pub fn readiness(&self) -> Ready {
+ self.kind
+ }
+
+ #[deprecated(since = "0.6.5", note = "use Event::readiness()")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn kind(&self) -> Ready {
+ self.kind
+ }
+
+ /// Returns the event's token.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::{Ready, Token};
+ /// use mio::event::Event;
+ ///
+ /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+ ///
+ /// assert_eq!(event.token(), Token(0));
+ /// ```
+ pub fn token(&self) -> Token {
+ self.token
+ }
+}
+
+/*
+ *
+ * ===== Mio internal helpers =====
+ *
+ */
+
+pub fn ready_as_usize(events: Ready) -> usize {
+ events.0
+}
+
+pub fn opt_as_usize(opt: PollOpt) -> usize {
+ opt.0
+}
+
+pub fn ready_from_usize(events: usize) -> Ready {
+ Ready(events)
+}
+
+pub fn opt_from_usize(opt: usize) -> PollOpt {
+ PollOpt(opt)
+}
+
+// Used internally to mutate an `Event` in place
+// Not used on all platforms
+#[allow(dead_code)]
+pub fn kind_mut(event: &mut Event) -> &mut Ready {
+ &mut event.kind
+}
diff --git a/third_party/rust/mio-0.6.23/src/io.rs b/third_party/rust/mio-0.6.23/src/io.rs
new file mode 100644
index 0000000000..275001387d
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/io.rs
@@ -0,0 +1,35 @@
+// Re-export the io::Result / Error types for convenience
+pub use std::io::{Read, Write, Result, Error, ErrorKind};
+
+// TODO: Delete this
+/// A helper trait to provide the map_non_block function on Results.
+pub trait MapNonBlock<T> {
+ /// Maps a `Result<T>` to a `Result<Option<T>>` by converting
+ /// operation-would-block errors into `Ok(None)`.
+ fn map_non_block(self) -> Result<Option<T>>;
+}
+
+impl<T> MapNonBlock<T> for Result<T> {
+ fn map_non_block(self) -> Result<Option<T>> {
+ use std::io::ErrorKind::WouldBlock;
+
+ match self {
+ Ok(value) => Ok(Some(value)),
+ Err(err) => {
+ if let WouldBlock = err.kind() {
+ Ok(None)
+ } else {
+ Err(err)
+ }
+ }
+ }
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+pub mod deprecated {
+ /// Returns a std `WouldBlock` error without allocating
+ pub fn would_block() -> ::std::io::Error {
+ ::std::io::ErrorKind::WouldBlock.into()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/lazycell.rs b/third_party/rust/mio-0.6.23/src/lazycell.rs
new file mode 100644
index 0000000000..681fb2f529
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/lazycell.rs
@@ -0,0 +1,554 @@
+// Original work Copyright (c) 2014 The Rust Project Developers
+// Modified work Copyright (c) 2016-2018 Nikita Pekin and the lazycell contributors
+// See the README.md file at the top-level directory of this distribution.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(missing_docs)]
+#![allow(unused)]
+
+//! This crate provides a `LazyCell` struct which acts as a lazily filled
+//! `Cell`.
+//!
+//! With a `RefCell`, the inner contents cannot be borrowed for the lifetime of
+//! the entire object, but only of the borrows returned. A `LazyCell` is a
+//! variation on `RefCell` which allows borrows to be tied to the lifetime of
+//! the outer object.
+//!
+//! `AtomicLazyCell` is a variant that uses an atomic variable to manage
+//! coordination in a thread-safe fashion. The limitation of an `AtomicLazyCell`
+//! is that after it is initialized, it can't be modified.
+
+use std::cell::UnsafeCell;
+use std::mem;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// A lazily filled `Cell`, with mutable contents.
+///
+/// A `LazyCell` is completely frozen once filled, **unless** you have `&mut`
+/// access to it, in which case `LazyCell::borrow_mut` may be used to mutate the
+/// contents.
+#[derive(Debug, Default)]
+pub struct LazyCell<T> {
+ inner: UnsafeCell<Option<T>>,
+}
+
+impl<T> LazyCell<T> {
+ /// Creates a new, empty, `LazyCell`.
+ pub fn new() -> LazyCell<T> {
+ LazyCell { inner: UnsafeCell::new(None) }
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// This function will return `Err(value)` is the cell is already full.
+ pub fn fill(&self, value: T) -> Result<(), T> {
+ let slot = unsafe { &mut *self.inner.get() };
+ if slot.is_some() {
+ return Err(value);
+ }
+ *slot = Some(value);
+
+ Ok(())
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// Note that this function is infallible but requires `&mut self`. By
+ /// requiring `&mut self` we're guaranteed that no active borrows to this
+ /// cell can exist so we can always fill in the value. This may not always
+ /// be usable, however, as `&mut self` may not be possible to borrow.
+ ///
+ /// # Return value
+ ///
+ /// This function returns the previous value, if any.
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ mem::replace(unsafe { &mut *self.inner.get() }, Some(value))
+ }
+
+ /// Test whether this cell has been previously filled.
+ pub fn filled(&self) -> bool {
+ self.borrow().is_some()
+ }
+
+ /// Borrows the contents of this lazy cell for the duration of the cell
+ /// itself.
+ ///
+ /// This function will return `Some` if the cell has been previously
+ /// initialized, and `None` if it has not yet been initialized.
+ pub fn borrow(&self) -> Option<&T> {
+ unsafe { &*self.inner.get() }.as_ref()
+ }
+
+ /// Borrows the contents of this lazy cell mutably for the duration of the cell
+ /// itself.
+ ///
+ /// This function will return `Some` if the cell has been previously
+ /// initialized, and `None` if it has not yet been initialized.
+ pub fn borrow_mut(&mut self) -> Option<&mut T> {
+ unsafe { &mut *self.inner.get() }.as_mut()
+ }
+
+ /// Borrows the contents of this lazy cell for the duration of the cell
+ /// itself.
+ ///
+ /// If the cell has not yet been filled, the cell is first filled using the
+ /// function provided.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn borrow_with<F: FnOnce() -> T>(&self, f: F) -> &T {
+ if let Some(value) = self.borrow() {
+ return value;
+ }
+ let value = f();
+ if self.fill(value).is_err() {
+ panic!("borrow_with: cell was filled by closure")
+ }
+ self.borrow().unwrap()
+ }
+
+ /// Borrows the contents of this `LazyCell` mutably for the duration of the
+ /// cell itself.
+ ///
+ /// If the cell has not yet been filled, the cell is first filled using the
+ /// function provided.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn borrow_mut_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
+ if !self.filled() {
+ let value = f();
+ if self.fill(value).is_err() {
+ panic!("borrow_mut_with: cell was filled by closure")
+ }
+ }
+
+ self.borrow_mut().unwrap()
+ }
+
+ /// Same as `borrow_with`, but allows the initializing function to fail.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn try_borrow_with<E, F>(&self, f: F) -> Result<&T, E>
+ where F: FnOnce() -> Result<T, E>
+ {
+ if let Some(value) = self.borrow() {
+ return Ok(value);
+ }
+ let value = f()?;
+ if self.fill(value).is_err() {
+ panic!("try_borrow_with: cell was filled by closure")
+ }
+ Ok(self.borrow().unwrap())
+ }
+
+ /// Same as `borrow_mut_with`, but allows the initializing function to fail.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn try_borrow_mut_with<E, F>(&mut self, f: F) -> Result<&mut T, E>
+ where F: FnOnce() -> Result<T, E>
+ {
+ if self.filled() {
+ return Ok(self.borrow_mut().unwrap());
+ }
+ let value = f()?;
+ if self.fill(value).is_err() {
+ panic!("try_borrow_mut_with: cell was filled by closure")
+ }
+ Ok(self.borrow_mut().unwrap())
+ }
+
+ /// Consumes this `LazyCell`, returning the underlying value.
+ pub fn into_inner(self) -> Option<T> {
+ // Rust 1.25 changed UnsafeCell::into_inner() from unsafe to safe
+ // function. This unsafe can be removed when supporting Rust older than
+ // 1.25 is not needed.
+ #[allow(unused_unsafe)]
+ unsafe { self.inner.into_inner() }
+ }
+}
+
+impl<T: Copy> LazyCell<T> {
+ /// Returns a copy of the contents of the lazy cell.
+ ///
+ /// This function will return `Some` if the cell has been previously initialized,
+ /// and `None` if it has not yet been initialized.
+ pub fn get(&self) -> Option<T> {
+ unsafe { *self.inner.get() }
+ }
+}
+
+// Tracks the AtomicLazyCell inner state
+const NONE: usize = 0;
+const LOCK: usize = 1;
+const SOME: usize = 2;
+
+/// A lazily filled and thread-safe `Cell`, with frozen contents.
+#[derive(Debug, Default)]
+pub struct AtomicLazyCell<T> {
+ inner: UnsafeCell<Option<T>>,
+ state: AtomicUsize,
+}
+
+impl<T> AtomicLazyCell<T> {
+ /// Creates a new, empty, `AtomicLazyCell`.
+ pub fn new() -> AtomicLazyCell<T> {
+ Self {
+ inner: UnsafeCell::new(None),
+ state: AtomicUsize::new(NONE),
+ }
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// This function will return `Err(value)` is the cell is already full.
+ pub fn fill(&self, t: T) -> Result<(), T> {
+ if NONE != self.state.compare_and_swap(NONE, LOCK, Ordering::Acquire) {
+ return Err(t);
+ }
+
+ unsafe { *self.inner.get() = Some(t) };
+
+ if LOCK != self.state.compare_and_swap(LOCK, SOME, Ordering::Release) {
+ panic!("unable to release lock");
+ }
+
+ Ok(())
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// Note that this function is infallible but requires `&mut self`. By
+ /// requiring `&mut self` we're guaranteed that no active borrows to this
+ /// cell can exist so we can always fill in the value. This may not always
+ /// be usable, however, as `&mut self` may not be possible to borrow.
+ ///
+ /// # Return value
+ ///
+ /// This function returns the previous value, if any.
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ match mem::replace(self.state.get_mut(), SOME) {
+ NONE | SOME => {}
+ _ => panic!("cell in inconsistent state"),
+ }
+ mem::replace(unsafe { &mut *self.inner.get() }, Some(value))
+ }
+
+ /// Test whether this cell has been previously filled.
+ pub fn filled(&self) -> bool {
+ self.state.load(Ordering::Acquire) == SOME
+ }
+
+ /// Borrows the contents of this lazy cell for the duration of the cell
+ /// itself.
+ ///
+ /// This function will return `Some` if the cell has been previously
+ /// initialized, and `None` if it has not yet been initialized.
+ pub fn borrow(&self) -> Option<&T> {
+ match self.state.load(Ordering::Acquire) {
+ SOME => unsafe { &*self.inner.get() }.as_ref(),
+ _ => None,
+ }
+ }
+
+ /// Consumes this `LazyCell`, returning the underlying value.
+ pub fn into_inner(self) -> Option<T> {
+ // Rust 1.25 changed UnsafeCell::into_inner() from unsafe to safe
+ // function. This unsafe can be removed when supporting Rust older than
+ // 1.25 is not needed.
+ #[allow(unused_unsafe)]
+ unsafe { self.inner.into_inner() }
+ }
+}
+
+impl<T: Copy> AtomicLazyCell<T> {
+ /// Returns a copy of the contents of the lazy cell.
+ ///
+ /// This function will return `Some` if the cell has been previously initialized,
+ /// and `None` if it has not yet been initialized.
+ pub fn get(&self) -> Option<T> {
+ match self.state.load(Ordering::Acquire) {
+ SOME => unsafe { *self.inner.get() },
+ _ => None,
+ }
+ }
+}
+
+unsafe impl<T: Sync + Send> Sync for AtomicLazyCell<T> {}
+
+unsafe impl<T: Send> Send for AtomicLazyCell<T> {}
+
+#[cfg(test)]
+mod tests {
+ use super::{AtomicLazyCell, LazyCell};
+
+ #[test]
+ fn test_borrow_from_empty() {
+ let lazycell: LazyCell<usize> = LazyCell::new();
+
+ let value = lazycell.borrow();
+ assert_eq!(value, None);
+
+ let value = lazycell.get();
+ assert_eq!(value, None);
+ }
+
+ #[test]
+ fn test_fill_and_borrow() {
+ let lazycell = LazyCell::new();
+
+ assert!(!lazycell.filled());
+ lazycell.fill(1).unwrap();
+ assert!(lazycell.filled());
+
+ let value = lazycell.borrow();
+ assert_eq!(value, Some(&1));
+
+ let value = lazycell.get();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn test_borrow_mut() {
+ let mut lazycell = LazyCell::new();
+ assert!(lazycell.borrow_mut().is_none());
+
+ lazycell.fill(1).unwrap();
+ assert_eq!(lazycell.borrow_mut(), Some(&mut 1));
+
+ *lazycell.borrow_mut().unwrap() = 2;
+ assert_eq!(lazycell.borrow_mut(), Some(&mut 2));
+
+ // official way to reset the cell
+ lazycell = LazyCell::new();
+ assert!(lazycell.borrow_mut().is_none());
+ }
+
+ #[test]
+ fn test_already_filled_error() {
+ let lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ assert_eq!(lazycell.fill(1), Err(1));
+ }
+
+ #[test]
+ fn test_borrow_with() {
+ let lazycell = LazyCell::new();
+
+ let value = lazycell.borrow_with(|| 1);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_borrow_with_already_filled() {
+ let lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_with(|| 1);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_borrow_with_not_called_when_filled() {
+ let lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_with(|| 2);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_borrow_with_sound_with_reentrancy() {
+ // Kudos to dbaupp for discovering this issue
+ // https://www.reddit.com/r/rust/comments/5vs9rt/lazycell_a_rust_library_providing_a_lazilyfilled/de527xm/
+ let lazycell: LazyCell<Box<i32>> = LazyCell::new();
+
+ let mut reference: Option<&i32> = None;
+
+ lazycell.borrow_with(|| {
+ let _ = lazycell.fill(Box::new(1));
+ reference = lazycell.borrow().map(|r| &**r);
+ Box::new(2)
+ });
+ }
+
+ #[test]
+ fn test_borrow_mut_with() {
+ let mut lazycell = LazyCell::new();
+
+ {
+ let value = lazycell.borrow_mut_with(|| 1);
+ assert_eq!(&mut 1, value);
+ *value = 2;
+ }
+ assert_eq!(&2, lazycell.borrow().unwrap());
+ }
+
+ #[test]
+ fn test_borrow_mut_with_already_filled() {
+ let mut lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_mut_with(|| 1);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_borrow_mut_with_not_called_when_filled() {
+ let mut lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_mut_with(|| 2);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_try_borrow_with_ok() {
+ let lazycell = LazyCell::new();
+ let result = lazycell.try_borrow_with::<(), _>(|| Ok(1));
+ assert_eq!(result, Ok(&1));
+ }
+
+ #[test]
+ fn test_try_borrow_with_err() {
+ let lazycell = LazyCell::<()>::new();
+ let result = lazycell.try_borrow_with(|| Err(1));
+ assert_eq!(result, Err(1));
+ }
+
+ #[test]
+ fn test_try_borrow_with_already_filled() {
+ let lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+ let result = lazycell.try_borrow_with::<(), _>(|| unreachable!());
+ assert_eq!(result, Ok(&1));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_try_borrow_with_sound_with_reentrancy() {
+ let lazycell: LazyCell<Box<i32>> = LazyCell::new();
+
+ let mut reference: Option<&i32> = None;
+
+ let _ = lazycell.try_borrow_with::<(), _>(|| {
+ let _ = lazycell.fill(Box::new(1));
+ reference = lazycell.borrow().map(|r| &**r);
+ Ok(Box::new(2))
+ });
+ }
+
+ #[test]
+ fn test_try_borrow_mut_with_ok() {
+ let mut lazycell = LazyCell::new();
+ {
+ let result = lazycell.try_borrow_mut_with::<(), _>(|| Ok(1));
+ assert_eq!(result, Ok(&mut 1));
+ *result.unwrap() = 2;
+ }
+ assert_eq!(&mut 2, lazycell.borrow().unwrap());
+ }
+
+ #[test]
+ fn test_try_borrow_mut_with_err() {
+ let mut lazycell = LazyCell::<()>::new();
+ let result = lazycell.try_borrow_mut_with(|| Err(1));
+ assert_eq!(result, Err(1));
+ }
+
+ #[test]
+ fn test_try_borrow_mut_with_already_filled() {
+ let mut lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+ let result = lazycell.try_borrow_mut_with::<(), _>(|| unreachable!());
+ assert_eq!(result, Ok(&mut 1));
+ }
+
+ #[test]
+ fn test_into_inner() {
+ let lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ let value = lazycell.into_inner();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn test_atomic_borrow_from_empty() {
+ let lazycell: AtomicLazyCell<usize> = AtomicLazyCell::new();
+
+ let value = lazycell.borrow();
+ assert_eq!(value, None);
+
+ let value = lazycell.get();
+ assert_eq!(value, None);
+ }
+
+ #[test]
+ fn test_atomic_fill_and_borrow() {
+ let lazycell = AtomicLazyCell::new();
+
+ assert!(!lazycell.filled());
+ lazycell.fill(1).unwrap();
+ assert!(lazycell.filled());
+
+ let value = lazycell.borrow();
+ assert_eq!(value, Some(&1));
+
+ let value = lazycell.get();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn test_atomic_already_filled_panic() {
+ let lazycell = AtomicLazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ assert_eq!(1, lazycell.fill(1).unwrap_err());
+ }
+
+ #[test]
+ fn test_atomic_into_inner() {
+ let lazycell = AtomicLazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ let value = lazycell.into_inner();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn normal_replace() {
+ let mut cell = LazyCell::new();
+ assert_eq!(cell.fill(1), Ok(()));
+ assert_eq!(cell.replace(2), Some(1));
+ assert_eq!(cell.replace(3), Some(2));
+ assert_eq!(cell.borrow(), Some(&3));
+
+ let mut cell = LazyCell::new();
+ assert_eq!(cell.replace(2), None);
+ }
+
+ #[test]
+ fn atomic_replace() {
+ let mut cell = AtomicLazyCell::new();
+ assert_eq!(cell.fill(1), Ok(()));
+ assert_eq!(cell.replace(2), Some(1));
+ assert_eq!(cell.replace(3), Some(2));
+ assert_eq!(cell.borrow(), Some(&3));
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/lib.rs b/third_party/rust/mio-0.6.23/src/lib.rs
new file mode 100644
index 0000000000..96f704603e
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/lib.rs
@@ -0,0 +1,308 @@
+#![doc(html_root_url = "https://docs.rs/mio/0.6.23")]
+// Mio targets old versions of the Rust compiler. In order to do this, uses
+// deprecated APIs.
+#![allow(bare_trait_objects, deprecated, unknown_lints)]
+#![deny(missing_docs, missing_debug_implementations)]
+#![cfg_attr(test, deny(warnings))]
+
+// Many of mio's public methods violate this lint, but they can't be fixed
+// without a breaking change.
+#![cfg_attr(feature = "cargo-clippy", allow(clippy::trivially_copy_pass_by_ref))]
+
+//! A fast, low-level IO library for Rust focusing on non-blocking APIs, event
+//! notification, and other useful utilities for building high performance IO
+//! apps.
+//!
+//! # Features
+//!
+//! * Non-blocking TCP, UDP
+//! * I/O event notification queue backed by epoll, kqueue, and IOCP
+//! * Zero allocations at runtime
+//! * Platform specific extensions
+//!
+//! # Non-goals
+//!
+//! The following are specifically omitted from Mio and are left to the user or higher-level libraries.
+//!
+//! * File operations
+//! * Thread pools / multi-threaded event loop
+//! * Timers
+//!
+//! # Platforms
+//!
+//! Currently supported platforms:
+//!
+//! * Linux
+//! * OS X
+//! * Windows
+//! * FreeBSD
+//! * NetBSD
+//! * Android
+//! * iOS
+//!
+//! mio can handle interfacing with each of the event notification systems of the aforementioned platforms. The details of
+//! their implementation are further discussed in [`Poll`].
+//!
+//! # Usage
+//!
+//! Using mio starts by creating a [`Poll`], which reads events from the OS and
+//! put them into [`Events`]. You can handle IO events from the OS with it.
+//!
+//! For more detail, see [`Poll`].
+//!
+//! [`Poll`]: struct.Poll.html
+//! [`Events`]: struct.Events.html
+//!
+//! # Example
+//!
+//! ```
+//! use mio::*;
+//! use mio::net::{TcpListener, TcpStream};
+//!
+//! // Setup some tokens to allow us to identify which event is
+//! // for which socket.
+//! const SERVER: Token = Token(0);
+//! const CLIENT: Token = Token(1);
+//!
+//! let addr = "127.0.0.1:13265".parse().unwrap();
+//!
+//! // Setup the server socket
+//! let server = TcpListener::bind(&addr).unwrap();
+//!
+//! // Create a poll instance
+//! let poll = Poll::new().unwrap();
+//!
+//! // Start listening for incoming connections
+//! poll.register(&server, SERVER, Ready::readable(),
+//! PollOpt::edge()).unwrap();
+//!
+//! // Setup the client socket
+//! let sock = TcpStream::connect(&addr).unwrap();
+//!
+//! // Register the socket
+//! poll.register(&sock, CLIENT, Ready::readable(),
+//! PollOpt::edge()).unwrap();
+//!
+//! // Create storage for events
+//! let mut events = Events::with_capacity(1024);
+//!
+//! loop {
+//! poll.poll(&mut events, None).unwrap();
+//!
+//! for event in events.iter() {
+//! match event.token() {
+//! SERVER => {
+//! // Accept and drop the socket immediately, this will close
+//! // the socket and notify the client of the EOF.
+//! let _ = server.accept();
+//! }
+//! CLIENT => {
+//! // The server just shuts down the socket, let's just exit
+//! // from our event loop.
+//! return;
+//! }
+//! _ => unreachable!(),
+//! }
+//! }
+//! }
+//!
+//! ```
+
+extern crate net2;
+extern crate iovec;
+extern crate slab;
+
+#[cfg(target_os = "fuchsia")]
+extern crate fuchsia_zircon as zircon;
+#[cfg(target_os = "fuchsia")]
+extern crate fuchsia_zircon_sys as zircon_sys;
+
+#[cfg(unix)]
+extern crate libc;
+
+#[cfg(windows)]
+extern crate miow;
+
+#[cfg(windows)]
+extern crate winapi;
+
+#[macro_use]
+extern crate log;
+
+mod event_imp;
+mod io;
+mod poll;
+mod sys;
+mod token;
+mod lazycell;
+
+pub mod net;
+
+#[deprecated(since = "0.6.5", note = "use mio-extras instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod channel;
+
+#[deprecated(since = "0.6.5", note = "use mio-extras instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod timer;
+
+#[deprecated(since = "0.6.5", note = "update to use `Poll`")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod deprecated;
+
+#[deprecated(since = "0.6.5", note = "use iovec crate directly")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use iovec::IoVec;
+
+#[deprecated(since = "0.6.6", note = "use net module instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod tcp {
+ pub use net::{TcpListener, TcpStream};
+ pub use std::net::Shutdown;
+}
+
+#[deprecated(since = "0.6.6", note = "use net module instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod udp;
+
+pub use poll::{
+ Poll,
+ Registration,
+ SetReadiness,
+};
+pub use event_imp::{
+ PollOpt,
+ Ready,
+};
+pub use token::Token;
+
+pub mod event {
+ //! Readiness event types and utilities.
+
+ pub use super::poll::{Events, Iter};
+ pub use super::event_imp::{Event, Evented};
+}
+
+pub use event::{
+ Events,
+};
+
+#[deprecated(since = "0.6.5", note = "use events:: instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use event::{Event, Evented};
+
+#[deprecated(since = "0.6.5", note = "use events::Iter instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use poll::Iter as EventsIter;
+
+#[deprecated(since = "0.6.5", note = "std::io::Error can avoid the allocation now")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use io::deprecated::would_block;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub mod unix {
+ //! Unix only extensions
+ pub use sys::{
+ EventedFd,
+ };
+ pub use sys::unix::UnixReady;
+}
+
+#[cfg(target_os = "fuchsia")]
+pub mod fuchsia {
+ //! Fuchsia-only extensions
+ //!
+ //! # Stability
+ //!
+ //! This module depends on the [magenta-sys crate](https://crates.io/crates/magenta-sys)
+ //! and so might introduce breaking changes, even on minor releases,
+ //! so long as that crate remains unstable.
+ pub use sys::{
+ EventedHandle,
+ };
+ pub use sys::fuchsia::{FuchsiaReady, zx_signals_t};
+}
+
+/// Windows-only extensions to the mio crate.
+///
+/// Mio on windows is currently implemented with IOCP for a high-performance
+/// implementation of asynchronous I/O. Mio then provides TCP and UDP as sample
+/// bindings for the system to connect networking types to asynchronous I/O. On
+/// Unix this scheme is then also extensible to all other file descriptors with
+/// the `EventedFd` type, but on Windows no such analog is available. The
+/// purpose of this module, however, is to similarly provide a mechanism for
+/// foreign I/O types to get hooked up into the IOCP event loop.
+///
+/// This module provides two types for interfacing with a custom IOCP handle:
+///
+/// * `Binding` - this type is intended to govern binding with mio's `Poll`
+/// type. Each I/O object should contain an instance of `Binding` that's
+/// interfaced with for the implementation of the `Evented` trait. The
+/// `register`, `reregister`, and `deregister` methods for the `Evented` trait
+/// all have rough analogs with `Binding`.
+///
+/// Note that this type **does not handle readiness**. That is, this type does
+/// not handle whether sockets are readable/writable/etc. It's intended that
+/// IOCP types will internally manage this state with a `SetReadiness` type
+/// from the `poll` module. The `SetReadiness` is typically lazily created on
+/// the first time that `Evented::register` is called and then stored in the
+/// I/O object.
+///
+/// Also note that for types which represent streams of bytes the mio
+/// interface of *readiness* doesn't map directly to the Windows model of
+/// *completion*. This means that types will have to perform internal
+/// buffering to ensure that a readiness interface can be provided. For a
+/// sample implementation see the TCP/UDP modules in mio itself.
+///
+/// * `Overlapped` - this type is intended to be used as the concrete instances
+/// of the `OVERLAPPED` type that most win32 methods expect. It's crucial, for
+/// safety, that all asynchronous operations are initiated with an instance of
+/// `Overlapped` and not another instantiation of `OVERLAPPED`.
+///
+/// Mio's `Overlapped` type is created with a function pointer that receives
+/// a `OVERLAPPED_ENTRY` type when called. This `OVERLAPPED_ENTRY` type is
+/// defined in the `winapi` crate. Whenever a completion is posted to an IOCP
+/// object the `OVERLAPPED` that was signaled will be interpreted as
+/// `Overlapped` in the mio crate and this function pointer will be invoked.
+/// Through this function pointer, and through the `OVERLAPPED` pointer,
+/// implementations can handle management of I/O events.
+///
+/// When put together these two types enable custom Windows handles to be
+/// registered with mio's event loops. The `Binding` type is used to associate
+/// handles and the `Overlapped` type is used to execute I/O operations. When
+/// the I/O operations are completed a custom function pointer is called which
+/// typically modifies a `SetReadiness` set by `Evented` methods which will get
+/// later hooked into the mio event loop.
+#[cfg(windows)]
+pub mod windows {
+
+ pub use sys::{Overlapped, Binding};
+}
+
+#[cfg(feature = "with-deprecated")]
+mod convert {
+ use std::time::Duration;
+
+ const NANOS_PER_MILLI: u32 = 1_000_000;
+ const MILLIS_PER_SEC: u64 = 1_000;
+
+ /// Convert a `Duration` to milliseconds, rounding up and saturating at
+ /// `u64::MAX`.
+ ///
+ /// The saturating is fine because `u64::MAX` milliseconds are still many
+ /// million years.
+ pub fn millis(duration: Duration) -> u64 {
+ // Round up.
+ let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
+ duration.as_secs().saturating_mul(MILLIS_PER_SEC).saturating_add(u64::from(millis))
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/net/mod.rs b/third_party/rust/mio-0.6.23/src/net/mod.rs
new file mode 100644
index 0000000000..53025c6869
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/net/mod.rs
@@ -0,0 +1,14 @@
+//! Networking primitives
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+
+mod tcp;
+mod udp;
+
+pub use self::tcp::{TcpListener, TcpStream};
+pub use self::udp::UdpSocket;
diff --git a/third_party/rust/mio-0.6.23/src/net/tcp.rs b/third_party/rust/mio-0.6.23/src/net/tcp.rs
new file mode 100644
index 0000000000..cc74ab9451
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/net/tcp.rs
@@ -0,0 +1,737 @@
+//! Primitives for working with TCP
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+/// [portability guidelines]: ../struct.Poll.html#portability
+
+use std::fmt;
+use std::io::{Read, Write};
+use std::net::{self, SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr};
+use std::time::Duration;
+
+use net2::TcpBuilder;
+use iovec::IoVec;
+
+use {io, sys, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use poll::SelectorId;
+
+/*
+ *
+ * ===== TcpStream =====
+ *
+ */
+
+/// A non-blocking TCP stream between a local socket and a remote socket.
+///
+/// The socket will be closed when the value is dropped.
+///
+/// # Examples
+///
+/// ```
+/// # use std::net::TcpListener;
+/// # use std::error::Error;
+/// #
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// # let _listener = TcpListener::bind("127.0.0.1:34254")?;
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpStream;
+/// use std::time::Duration;
+///
+/// let stream = TcpStream::connect(&"127.0.0.1:34254".parse()?)?;
+///
+/// let poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.register(&stream, Token(0), Ready::writable(),
+/// PollOpt::edge())?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // The socket might be ready at this point
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+pub struct TcpStream {
+ sys: sys::TcpStream,
+ selector_id: SelectorId,
+}
+
+use std::net::Shutdown;
+
+// TODO: remove when fuchsia's set_nonblocking is fixed in libstd
+#[cfg(target_os = "fuchsia")]
+fn set_nonblocking(stream: &net::TcpStream) -> io::Result<()> {
+ sys::set_nonblock(
+ ::std::os::unix::io::AsRawFd::as_raw_fd(stream))
+}
+#[cfg(not(target_os = "fuchsia"))]
+fn set_nonblocking(stream: &net::TcpStream) -> io::Result<()> {
+ stream.set_nonblocking(true)
+}
+
+
+impl TcpStream {
+ /// Create a new TCP stream and issue a non-blocking connect to the
+ /// specified address.
+ ///
+ /// This convenience method is available and uses the system's default
+ /// options when creating a socket which is then connected. If fine-grained
+ /// control over the creation of the socket is desired, you can use
+ /// `net2::TcpBuilder` to configure a socket and then pass its socket to
+ /// `TcpStream::connect_stream` to transfer ownership into mio and schedule
+ /// the connect operation.
+ pub fn connect(addr: &SocketAddr) -> io::Result<TcpStream> {
+ let sock = match *addr {
+ SocketAddr::V4(..) => TcpBuilder::new_v4(),
+ SocketAddr::V6(..) => TcpBuilder::new_v6(),
+ }?;
+ // Required on Windows for a future `connect_overlapped` operation to be
+ // executed successfully.
+ if cfg!(windows) {
+ sock.bind(&inaddr_any(addr))?;
+ }
+ TcpStream::connect_stream(sock.to_tcp_stream()?, addr)
+ }
+
+ /// Creates a new `TcpStream` from the pending socket inside the given
+ /// `std::net::TcpBuilder`, connecting it to the address specified.
+ ///
+ /// This constructor allows configuring the socket before it's actually
+ /// connected, and this function will transfer ownership to the returned
+ /// `TcpStream` if successful. An unconnected `TcpStream` can be created
+ /// with the `net2::TcpBuilder` type (and also configured via that route).
+ ///
+ /// The platform specific behavior of this function looks like:
+ ///
+ /// * On Unix, the socket is placed into nonblocking mode and then a
+ /// `connect` call is issued.
+ ///
+ /// * On Windows, the address is stored internally and the connect operation
+ /// is issued when the returned `TcpStream` is registered with an event
+ /// loop. Note that on Windows you must `bind` a socket before it can be
+ /// connected, so if a custom `TcpBuilder` is used it should be bound
+ /// (perhaps to `INADDR_ANY`) before this method is called.
+ pub fn connect_stream(stream: net::TcpStream,
+ addr: &SocketAddr) -> io::Result<TcpStream> {
+ Ok(TcpStream {
+ sys: sys::TcpStream::connect(stream, addr)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Creates a new `TcpStream` from a standard `net::TcpStream`.
+ ///
+ /// This function is intended to be used to wrap a TCP stream from the
+ /// standard library in the mio equivalent. The conversion here will
+ /// automatically set `stream` to nonblocking and the returned object should
+ /// be ready to get associated with an event loop.
+ ///
+ /// Note that the TCP stream here will not have `connect` called on it, so
+ /// it should already be connected via some other means (be it manually, the
+ /// net2 crate, or the standard library).
+ pub fn from_stream(stream: net::TcpStream) -> io::Result<TcpStream> {
+ set_nonblocking(&stream)?;
+
+ Ok(TcpStream {
+ sys: sys::TcpStream::from_stream(stream),
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Returns the socket address of the remote peer of this TCP connection.
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.peer_addr()
+ }
+
+ /// Returns the socket address of the local half of this TCP connection.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `TcpStream` is a reference to the same stream that this
+ /// object references. Both handles will read and write the same stream of
+ /// data, and options set on one stream will be propagated to the other
+ /// stream.
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.sys.try_clone().map(|s| {
+ TcpStream {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O on the specified
+ /// portions to return immediately with an appropriate value (see the
+ /// documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.sys.shutdown(how)
+ }
+
+ /// Sets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// If set, this option disables the Nagle algorithm. This means that
+ /// segments are always sent as soon as possible, even if there is only a
+ /// small amount of data. When not set, data is buffered until there is a
+ /// sufficient amount to send out, thereby avoiding the frequent sending of
+ /// small packets.
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.sys.set_nodelay(nodelay)
+ }
+
+ /// Gets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// For more information about this option, see [`set_nodelay`][link].
+ ///
+ /// [link]: #method.set_nodelay
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.sys.nodelay()
+ }
+
+ /// Sets the value of the `SO_RCVBUF` option on this socket.
+ ///
+ /// Changes the size of the operating system's receive buffer associated
+ /// with the socket.
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.sys.set_recv_buffer_size(size)
+ }
+
+ /// Gets the value of the `SO_RCVBUF` option on this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_recv_buffer_size`][link].
+ ///
+ /// [link]: #method.set_recv_buffer_size
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.sys.recv_buffer_size()
+ }
+
+ /// Sets the value of the `SO_SNDBUF` option on this socket.
+ ///
+ /// Changes the size of the operating system's send buffer associated with
+ /// the socket.
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.sys.set_send_buffer_size(size)
+ }
+
+ /// Gets the value of the `SO_SNDBUF` option on this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_send_buffer_size`][link].
+ ///
+ /// [link]: #method.set_send_buffer_size
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.sys.send_buffer_size()
+ }
+
+ /// Sets whether keepalive messages are enabled to be sent on this socket.
+ ///
+ /// On Unix, this option will set the `SO_KEEPALIVE` as well as the
+ /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform).
+ /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option.
+ ///
+ /// If `None` is specified then keepalive messages are disabled, otherwise
+ /// the duration specified will be the time to remain idle before sending a
+ /// TCP keepalive probe.
+ ///
+ /// Some platforms specify this value in seconds, so sub-second
+ /// specifications may be omitted.
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.sys.set_keepalive(keepalive)
+ }
+
+ /// Returns whether keepalive messages are enabled on this socket, and if so
+ /// the duration of time between them.
+ ///
+ /// For more information about this option, see [`set_keepalive`][link].
+ ///
+ /// [link]: #method.set_keepalive
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.sys.keepalive()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+ ///
+ /// If this is set to `true` then the socket is restricted to sending and
+ /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+ /// can bind the same port at the same time.
+ ///
+ /// If this is set to `false` then the socket can be used to send and
+ /// receive packets from an IPv4-mapped IPv6 address.
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.sys.set_only_v6(only_v6)
+ }
+
+ /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+ ///
+ /// For more information about this option, see [`set_only_v6`][link].
+ ///
+ /// [link]: #method.set_only_v6
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.sys.only_v6()
+ }
+
+ /// Sets the value for the `SO_LINGER` option on this socket.
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.sys.set_linger(dur)
+ }
+
+ /// Gets the value of the `SO_LINGER` option on this socket.
+ ///
+ /// For more information about this option, see [`set_linger`][link].
+ ///
+ /// [link]: #method.set_linger
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.sys.linger()
+ }
+
+ #[deprecated(since = "0.6.9", note = "use set_keepalive")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn set_keepalive_ms(&self, keepalive: Option<u32>) -> io::Result<()> {
+ self.set_keepalive(keepalive.map(|v| {
+ Duration::from_millis(u64::from(v))
+ }))
+ }
+
+ #[deprecated(since = "0.6.9", note = "use keepalive")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn keepalive_ms(&self) -> io::Result<Option<u32>> {
+ self.keepalive().map(|v| {
+ v.map(|v| {
+ ::convert::millis(v) as u32
+ })
+ })
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying recv system call.
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.sys.peek(buf)
+ }
+
+ /// Read in a list of buffers all at once.
+ ///
+ /// This operation will attempt to read bytes from this socket and place
+ /// them into the list of buffers provided. Note that each buffer is an
+ /// `IoVec` which can be created from a byte slice.
+ ///
+ /// The buffers provided will be filled in sequentially. A buffer will be
+ /// entirely filled up before the next is written to.
+ ///
+ /// The number of bytes read is returned, if successful, or an error is
+ /// returned otherwise. If no bytes are available to be read yet then
+ /// a "would block" error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `readv` syscall.
+ pub fn read_bufs(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.sys.readv(bufs)
+ }
+
+ /// Write a list of buffers all at once.
+ ///
+ /// This operation will attempt to write a list of byte buffers to this
+ /// socket. Note that each buffer is an `IoVec` which can be created from a
+ /// byte slice.
+ ///
+ /// The buffers provided will be written sequentially. A buffer will be
+ /// entirely written before the next is written.
+ ///
+ /// The number of bytes written is returned, if successful, or an error is
+ /// returned otherwise. If the socket is not currently writable then a
+ /// "would block" error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `writev` syscall.
+ pub fn write_bufs(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.sys.writev(bufs)
+ }
+}
+
+fn inaddr_any(other: &SocketAddr) -> SocketAddr {
+ match *other {
+ SocketAddr::V4(..) => {
+ let any = Ipv4Addr::new(0, 0, 0, 0);
+ let addr = SocketAddrV4::new(any, 0);
+ SocketAddr::V4(addr)
+ }
+ SocketAddr::V6(..) => {
+ let any = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
+ let addr = SocketAddrV6::new(any, 0, 0, 0);
+ SocketAddr::V6(addr)
+ }
+ }
+}
+
+impl Read for TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.sys).read(buf)
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.sys).read(buf)
+ }
+}
+
+impl Write for TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.sys).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.sys).flush()
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.sys).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.sys).flush()
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.sys, f)
+ }
+}
+
+/*
+ *
+ * ===== TcpListener =====
+ *
+ */
+
+/// A structure representing a socket server
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpListener;
+/// use std::time::Duration;
+///
+/// let listener = TcpListener::bind(&"127.0.0.1:34255".parse()?)?;
+///
+/// let poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.register(&listener, Token(0), Ready::readable(),
+/// PollOpt::edge())?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // There may be a socket ready to be accepted
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+pub struct TcpListener {
+ sys: sys::TcpListener,
+ selector_id: SelectorId,
+}
+
+impl TcpListener {
+ /// Convenience method to bind a new TCP listener to the specified address
+ /// to receive new connections.
+ ///
+ /// This function will take the following steps:
+ ///
+ /// 1. Create a new TCP socket.
+ /// 2. Set the `SO_REUSEADDR` option on the socket.
+ /// 3. Bind the socket to the specified address.
+ /// 4. Call `listen` on the socket to prepare it to receive new connections.
+ ///
+ /// If fine-grained control over the binding and listening process for a
+ /// socket is desired then the `net2::TcpBuilder` methods can be used in
+ /// combination with the `TcpListener::from_listener` method to transfer
+ /// ownership into mio.
+ pub fn bind(addr: &SocketAddr) -> io::Result<TcpListener> {
+ // Create the socket
+ let sock = match *addr {
+ SocketAddr::V4(..) => TcpBuilder::new_v4(),
+ SocketAddr::V6(..) => TcpBuilder::new_v6(),
+ }?;
+
+ // Set SO_REUSEADDR, but only on Unix (mirrors what libstd does)
+ if cfg!(unix) {
+ sock.reuse_address(true)?;
+ }
+
+ // Bind the socket
+ sock.bind(addr)?;
+
+ // listen
+ let listener = sock.listen(1024)?;
+ Ok(TcpListener {
+ sys: sys::TcpListener::new(listener)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ #[deprecated(since = "0.6.13", note = "use from_std instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn from_listener(listener: net::TcpListener, _: &SocketAddr)
+ -> io::Result<TcpListener> {
+ TcpListener::from_std(listener)
+ }
+
+ /// Creates a new `TcpListener` from an instance of a
+ /// `std::net::TcpListener` type.
+ ///
+ /// This function will set the `listener` provided into nonblocking mode on
+ /// Unix, and otherwise the stream will just be wrapped up in an mio stream
+ /// ready to accept new connections and become associated with an event
+ /// loop.
+ ///
+ /// The address provided must be the address that the listener is bound to.
+ pub fn from_std(listener: net::TcpListener) -> io::Result<TcpListener> {
+ sys::TcpListener::new(listener).map(|s| {
+ TcpListener {
+ sys: s,
+ selector_id: SelectorId::new(),
+ }
+ })
+ }
+
+ /// Accepts a new `TcpStream`.
+ ///
+ /// This may return an `Err(e)` where `e.kind()` is
+ /// `io::ErrorKind::WouldBlock`. This means a stream may be ready at a later
+ /// point and one should wait for a notification before calling `accept`
+ /// again.
+ ///
+ /// If an accepted stream is returned, the remote address of the peer is
+ /// returned along with it.
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ let (s, a) = try!(self.accept_std());
+ Ok((TcpStream::from_stream(s)?, a))
+ }
+
+ /// Accepts a new `std::net::TcpStream`.
+ ///
+ /// This method is the same as `accept`, except that it returns a TCP socket
+ /// *in blocking mode* which isn't bound to `mio`. This can be later then
+ /// converted to a `mio` type, if necessary.
+ pub fn accept_std(&self) -> io::Result<(net::TcpStream, SocketAddr)> {
+ self.sys.accept()
+ }
+
+ /// Returns the local socket address of this listener.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `TcpListener` is a reference to the same socket that this
+ /// object references. Both handles can be used to accept incoming
+ /// connections and options set on one listener will affect the other.
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.sys.try_clone().map(|s| {
+ TcpListener {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+ ///
+ /// If this is set to `true` then the socket is restricted to sending and
+ /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+ /// can bind the same port at the same time.
+ ///
+ /// If this is set to `false` then the socket can be used to send and
+ /// receive packets from an IPv4-mapped IPv6 address.
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.sys.set_only_v6(only_v6)
+ }
+
+ /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+ ///
+ /// For more information about this option, see [`set_only_v6`][link].
+ ///
+ /// [link]: #method.set_only_v6
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.sys.only_v6()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+}
+
+impl Evented for TcpListener {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.sys, f)
+ }
+}
+
+/*
+ *
+ * ===== UNIX ext =====
+ *
+ */
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for TcpStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for TcpStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for TcpStream {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
+ TcpStream {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for TcpListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for TcpListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for TcpListener {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
+ TcpListener {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/net/udp.rs b/third_party/rust/mio-0.6.23/src/net/udp.rs
new file mode 100644
index 0000000000..0d89511ac7
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/net/udp.rs
@@ -0,0 +1,645 @@
+//! Primitives for working with UDP
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+/// [portability guidelines]: ../struct.Poll.html#portability
+
+use {io, sys, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use poll::SelectorId;
+use std::fmt;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use iovec::IoVec;
+
+/// A User Datagram Protocol socket.
+///
+/// This is an implementation of a bound UDP socket. This supports both IPv4 and
+/// IPv6 addresses, and there is no corresponding notion of a server because UDP
+/// is a datagram protocol.
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// #
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// // An Echo program:
+/// // SENDER -> sends a message.
+/// // ECHOER -> listens and prints the message received.
+///
+/// use mio::net::UdpSocket;
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use std::time::Duration;
+///
+/// const SENDER: Token = Token(0);
+/// const ECHOER: Token = Token(1);
+///
+/// // This operation will fail if the address is in use, so we select different ports for each
+/// // socket.
+/// let sender_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+/// let echoer_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+///
+/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from
+/// // respectively.
+/// sender_socket.connect(echoer_socket.local_addr().unwrap())?;
+///
+/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be
+/// // read from.
+/// let poll = Poll::new()?;
+///
+/// // We register our sockets here so that we can check if they are ready to be written/read.
+/// poll.register(&sender_socket, SENDER, Ready::writable(), PollOpt::edge())?;
+/// poll.register(&echoer_socket, ECHOER, Ready::readable(), PollOpt::edge())?;
+///
+/// let msg_to_send = [9; 9];
+/// let mut buffer = [0; 9];
+///
+/// let mut events = Events::with_capacity(128);
+/// loop {
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+/// for event in events.iter() {
+/// match event.token() {
+/// // Our SENDER is ready to be written into.
+/// SENDER => {
+/// let bytes_sent = sender_socket.send(&msg_to_send)?;
+/// assert_eq!(bytes_sent, 9);
+/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent);
+/// },
+/// // Our ECHOER is ready to be read from.
+/// ECHOER => {
+/// let num_recv = echoer_socket.recv(&mut buffer)?;
+/// println!("echo {:?} -> {:?}", buffer, num_recv);
+/// buffer = [0; 9];
+/// # return Ok(());
+/// }
+/// _ => unreachable!()
+/// }
+/// }
+/// }
+/// #
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+pub struct UdpSocket {
+ sys: sys::UdpSocket,
+ selector_id: SelectorId,
+}
+
+impl UdpSocket {
+ /// Creates a UDP socket from the given address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// // We must bind it to an open address.
+ /// let socket = match UdpSocket::bind(&"127.0.0.1:0".parse()?) {
+ /// Ok(new_socket) => new_socket,
+ /// Err(fail) => {
+ /// // We panic! here, but you could try to bind it again on another address.
+ /// panic!("Failed to bind socket. {:?}", fail);
+ /// }
+ /// };
+ ///
+ /// // Our socket was created, but we should not use it before checking it's readiness.
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn bind(addr: &SocketAddr) -> io::Result<UdpSocket> {
+ let socket = net::UdpSocket::bind(addr)?;
+ UdpSocket::from_socket(socket)
+ }
+
+ /// Creates a new mio-wrapped socket from an underlying and bound std
+ /// socket.
+ ///
+ /// This function requires that `socket` has previously been bound to an
+ /// address to work correctly, and returns an I/O object which can be used
+ /// with mio to send/receive UDP messages.
+ ///
+ /// This can be used in conjunction with net2's `UdpBuilder` interface to
+ /// configure a socket before it's handed off to mio, such as setting
+ /// options like `reuse_address` or binding to multiple addresses.
+ pub fn from_socket(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ Ok(UdpSocket {
+ sys: sys::UdpSocket::new(socket)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Returns the socket address that this socket was created from.
+ ///
+ /// # Examples
+ ///
+ // This assertion is almost, but not quite, universal. It fails on
+ // shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed,
+ // so simply disable the test on FreeBSD.
+ #[cfg_attr(not(target_os = "freebsd"), doc = " ```")]
+ #[cfg_attr(target_os = "freebsd", doc = " ```no_run")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let addr = "127.0.0.1:0".parse()?;
+ /// let socket = UdpSocket::bind(&addr)?;
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UdpSocket` is a reference to the same socket that this
+ /// object references. Both handles will read and write the same port, and
+ /// options set on one socket will be propagated to the other.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// // We must bind it to an open address.
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// let cloned_socket = socket.try_clone()?;
+ ///
+ /// assert_eq!(socket.local_addr()?, cloned_socket.local_addr()?);
+ ///
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.sys.try_clone()
+ .map(|s| {
+ UdpSocket {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Sends data on the socket to the given address. On success, returns the
+ /// number of bytes written.
+ ///
+ /// Address type can be any implementor of `ToSocketAddrs` trait. See its
+ /// documentation for concrete examples.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is writable before calling send_to,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let bytes_sent = socket.send_to(&[9; 9], &"127.0.0.1:11100".parse()?)?;
+ /// assert_eq!(bytes_sent, 9);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+ self.sys.send_to(buf, target)
+ }
+
+ /// Receives data from the socket. On success, returns the number of bytes
+ /// read and the address from whence the data came.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is readable before calling recv_from,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let mut buf = [0; 9];
+ /// let (num_recv, from_addr) = socket.recv_from(&mut buf)?;
+ /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.sys.recv_from(buf)
+ }
+
+ /// Sends data on the socket to the address previously bound via connect(). On success,
+ /// returns the number of bytes written.
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.sys.send(buf)
+ }
+
+ /// Receives data from the socket previously bound with connect(). On success, returns
+ /// the number of bytes read.
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.sys.recv(buf)
+ }
+
+ /// Connects the UDP socket setting the default destination for `send()`
+ /// and limiting packets that are read via `recv` from the address specified
+ /// in `addr`.
+ pub fn connect(&self, addr: SocketAddr) -> io::Result<()> {
+ self.sys.connect(addr)
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// if broadcast_socket.broadcast()? == false {
+ /// broadcast_socket.set_broadcast(true)?;
+ /// }
+ ///
+ /// assert_eq!(broadcast_socket.broadcast()?, true);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.sys.set_broadcast(on)
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_broadcast`][link].
+ ///
+ /// [link]: #method.set_broadcast
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// assert_eq!(broadcast_socket.broadcast()?, false);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.sys.broadcast()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v4(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v4
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_multicast_ttl_v4(ttl)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_ttl_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_ttl_v4
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.sys.multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ /// Note that this may not have any affect on IPv4 sockets.
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v6(on)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v6`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v6
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v6()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// if socket.ttl()? < 255 {
+ /// socket.set_ttl(255)?;
+ /// }
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// socket.set_ttl(255)?;
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.join_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v4`][link].
+ ///
+ /// [link]: #method.join_multicast_v4
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.leave_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v6`][link].
+ ///
+ /// [link]: #method.join_multicast_v6
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.leave_multicast_v6(multiaddr, interface)
+ }
+
+ /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+ ///
+ /// If this is set to `true` then the socket is restricted to sending and
+ /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+ /// can bind the same port at the same time.
+ ///
+ /// If this is set to `false` then the socket can be used to send and
+ /// receive packets from an IPv4-mapped IPv6 address.
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.sys.set_only_v6(only_v6)
+ }
+
+ /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+ ///
+ /// For more information about this option, see [`set_only_v6`][link].
+ ///
+ /// [link]: #method.set_only_v6
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.sys.only_v6()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+
+ /// Receives a single datagram message socket previously bound with connect.
+ ///
+ /// This operation will attempt to read bytes from this socket and place
+ /// them into the list of buffers provided. Note that each buffer is an
+ /// `IoVec` which can be created from a byte slice.
+ ///
+ /// The buffers provided will be filled sequentially. A buffer will be
+ /// entirely filled up before the next is written to.
+ ///
+ /// The number of bytes read is returned, if successful, or an error is
+ /// returned otherwise. If no bytes are available to be read yet then
+ /// a [`WouldBlock`][link] error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `readv` syscall.
+ ///
+ /// [link]: https://doc.rust-lang.org/nightly/std/io/enum.ErrorKind.html#variant.WouldBlock
+ #[cfg(all(unix, not(target_os = "fuchsia")))]
+ pub fn recv_bufs(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.sys.readv(bufs)
+ }
+
+ /// Sends data on the socket to the address previously bound via connect.
+ ///
+ /// This operation will attempt to send a list of byte buffers to this
+ /// socket in a single datagram. Note that each buffer is an `IoVec`
+ /// which can be created from a byte slice.
+ ///
+ /// The buffers provided will be written sequentially. A buffer will be
+ /// entirely written before the next is written.
+ ///
+ /// The number of bytes written is returned, if successful, or an error is
+ /// returned otherwise. If the socket is not currently writable then a
+ /// [`WouldBlock`][link] error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `writev` syscall.
+ ///
+ /// [link]: https://doc.rust-lang.org/nightly/std/io/enum.ErrorKind.html#variant.WouldBlock
+ #[cfg(all(unix, not(target_os = "fuchsia")))]
+ pub fn send_bufs(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.sys.writev(bufs)
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.sys, f)
+ }
+}
+
+/*
+ *
+ * ===== UNIX ext =====
+ *
+ */
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for UdpSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}
+
diff --git a/third_party/rust/mio-0.6.23/src/poll.rs b/third_party/rust/mio-0.6.23/src/poll.rs
new file mode 100644
index 0000000000..7985d456cd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/poll.rs
@@ -0,0 +1,2783 @@
+use {sys, Token};
+use event_imp::{self as event, Ready, Event, Evented, PollOpt};
+use std::{fmt, io, ptr, usize};
+use std::cell::UnsafeCell;
+use std::{mem, ops, isize};
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::AsRawFd;
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::RawFd;
+use std::process;
+use std::sync::{Arc, Mutex, Condvar};
+use std::sync::atomic::{AtomicUsize, AtomicPtr, AtomicBool};
+use std::sync::atomic::Ordering::{self, Acquire, Release, AcqRel, Relaxed, SeqCst};
+use std::time::{Duration, Instant};
+
+// Poll is backed by two readiness queues. The first is a system readiness queue
+// represented by `sys::Selector`. The system readiness queue handles events
+// provided by the system, such as TCP and UDP. The second readiness queue is
+// implemented in user space by `ReadinessQueue`. It provides a way to implement
+// purely user space `Evented` types.
+//
+// `ReadinessQueue` is backed by a MPSC queue that supports reuse of linked
+// list nodes. This significantly reduces the number of required allocations.
+// Each `Registration` / `SetReadiness` pair allocates a single readiness node
+// that is used for the lifetime of the registration.
+//
+// The readiness node also includes a single atomic variable, `state` that
+// tracks most of the state associated with the registration. This includes the
+// current readiness, interest, poll options, and internal state. When the node
+// state is mutated, it is queued in the MPSC channel. A call to
+// `ReadinessQueue::poll` will dequeue and process nodes. The node state can
+// still be mutated while it is queued in the channel for processing.
+// Intermediate state values do not matter as long as the final state is
+// included in the call to `poll`. This is the eventually consistent nature of
+// the readiness queue.
+//
+// The readiness node is ref counted using the `ref_count` field. On creation,
+// the ref_count is initialized to 3: one `Registration` handle, one
+// `SetReadiness` handle, and one for the readiness queue. Since the readiness queue
+// doesn't *always* hold a handle to the node, we don't use the Arc type for
+// managing ref counts (this is to avoid constantly incrementing and
+// decrementing the ref count when pushing & popping from the queue). When the
+// `Registration` handle is dropped, the `dropped` flag is set on the node, then
+// the node is pushed into the registration queue. When Poll::poll pops the
+// node, it sees the drop flag is set, and decrements it's ref count.
+//
+// The MPSC queue is a modified version of the intrusive MPSC node based queue
+// described by 1024cores [1].
+//
+// The first modification is that two markers are used instead of a single
+// `stub`. The second marker is a `sleep_marker` which is used to signal to
+// producers that the consumer is going to sleep. This sleep_marker is only used
+// when the queue is empty, implying that the only node in the queue is
+// `end_marker`.
+//
+// The second modification is an `until` argument passed to the dequeue
+// function. When `poll` encounters a level-triggered node, the node will be
+// immediately pushed back into the queue. In order to avoid an infinite loop,
+// `poll` before pushing the node, the pointer is saved off and then passed
+// again as the `until` argument. If the next node to pop is `until`, then
+// `Dequeue::Empty` is returned.
+//
+// [1] http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue
+
+
+/// Polls for readiness events on all registered values.
+///
+/// `Poll` allows a program to monitor a large number of `Evented` types,
+/// waiting until one or more become "ready" for some class of operations; e.g.
+/// reading and writing. An `Evented` type is considered ready if it is possible
+/// to immediately perform a corresponding operation; e.g. [`read`] or
+/// [`write`].
+///
+/// To use `Poll`, an `Evented` type must first be registered with the `Poll`
+/// instance using the [`register`] method, supplying readiness interest. The
+/// readiness interest tells `Poll` which specific operations on the handle to
+/// monitor for readiness. A `Token` is also passed to the [`register`]
+/// function. When `Poll` returns a readiness event, it will include this token.
+/// This associates the event with the `Evented` handle that generated the
+/// event.
+///
+/// [`read`]: tcp/struct.TcpStream.html#method.read
+/// [`write`]: tcp/struct.TcpStream.html#method.write
+/// [`register`]: #method.register
+///
+/// # Examples
+///
+/// A basic example -- establishing a `TcpStream` connection.
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll, Ready, PollOpt, Token};
+/// use mio::net::TcpStream;
+///
+/// use std::net::{TcpListener, SocketAddr};
+///
+/// // Bind a server socket to connect to.
+/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+/// let server = TcpListener::bind(&addr)?;
+///
+/// // Construct a new `Poll` handle as well as the `Events` we'll store into
+/// let poll = Poll::new()?;
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Connect the stream
+/// let stream = TcpStream::connect(&server.local_addr()?)?;
+///
+/// // Register the stream with `Poll`
+/// poll.register(&stream, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+///
+/// // Wait for the socket to become ready. This has to happens in a loop to
+/// // handle spurious wakeups.
+/// loop {
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// if event.token() == Token(0) && event.readiness().is_writable() {
+/// // The socket connected (probably, it could still be a spurious
+/// // wakeup)
+/// return Ok(());
+/// }
+/// }
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// # Edge-triggered and level-triggered
+///
+/// An [`Evented`] registration may request edge-triggered events or
+/// level-triggered events. This is done by setting `register`'s
+/// [`PollOpt`] argument to either [`edge`] or [`level`].
+///
+/// The difference between the two can be described as follows. Supposed that
+/// this scenario happens:
+///
+/// 1. A [`TcpStream`] is registered with `Poll`.
+/// 2. The socket receives 2kb of data.
+/// 3. A call to [`Poll::poll`] returns the token associated with the socket
+/// indicating readable readiness.
+/// 4. 1kb is read from the socket.
+/// 5. Another call to [`Poll::poll`] is made.
+///
+/// If when the socket was registered with `Poll`, edge triggered events were
+/// requested, then the call to [`Poll::poll`] done in step **5** will
+/// (probably) hang despite there being another 1kb still present in the socket
+/// read buffer. The reason for this is that edge-triggered mode delivers events
+/// only when changes occur on the monitored [`Evented`]. So, in step *5* the
+/// caller might end up waiting for some data that is already present inside the
+/// socket buffer.
+///
+/// With edge-triggered events, operations **must** be performed on the
+/// `Evented` type until [`WouldBlock`] is returned. In other words, after
+/// receiving an event indicating readiness for a certain operation, one should
+/// assume that [`Poll::poll`] may never return another event for the same token
+/// and readiness until the operation returns [`WouldBlock`].
+///
+/// By contrast, when level-triggered notifications was requested, each call to
+/// [`Poll::poll`] will return an event for the socket as long as data remains
+/// in the socket buffer. Generally, level-triggered events should be avoided if
+/// high performance is a concern.
+///
+/// Since even with edge-triggered events, multiple events can be generated upon
+/// receipt of multiple chunks of data, the caller has the option to set the
+/// [`oneshot`] flag. This tells `Poll` to disable the associated [`Evented`]
+/// after the event is returned from [`Poll::poll`]. The subsequent calls to
+/// [`Poll::poll`] will no longer include events for [`Evented`] handles that
+/// are disabled even if the readiness state changes. The handle can be
+/// re-enabled by calling [`reregister`]. When handles are disabled, internal
+/// resources used to monitor the handle are maintained until the handle is
+/// dropped or deregistered. This makes re-registering the handle a fast
+/// operation.
+///
+/// For example, in the following scenario:
+///
+/// 1. A [`TcpStream`] is registered with `Poll`.
+/// 2. The socket receives 2kb of data.
+/// 3. A call to [`Poll::poll`] returns the token associated with the socket
+/// indicating readable readiness.
+/// 4. 2kb is read from the socket.
+/// 5. Another call to read is issued and [`WouldBlock`] is returned
+/// 6. The socket receives another 2kb of data.
+/// 7. Another call to [`Poll::poll`] is made.
+///
+/// Assuming the socket was registered with `Poll` with the [`edge`] and
+/// [`oneshot`] options, then the call to [`Poll::poll`] in step 7 would block. This
+/// is because, [`oneshot`] tells `Poll` to disable events for the socket after
+/// returning an event.
+///
+/// In order to receive the event for the data received in step 6, the socket
+/// would need to be reregistered using [`reregister`].
+///
+/// [`PollOpt`]: struct.PollOpt.html
+/// [`edge`]: struct.PollOpt.html#method.edge
+/// [`level`]: struct.PollOpt.html#method.level
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock
+/// [`Evented`]: event/trait.Evented.html
+/// [`TcpStream`]: tcp/struct.TcpStream.html
+/// [`reregister`]: #method.reregister
+/// [`oneshot`]: struct.PollOpt.html#method.oneshot
+///
+/// # Portability
+///
+/// Using `Poll` provides a portable interface across supported platforms as
+/// long as the caller takes the following into consideration:
+///
+/// ### Spurious events
+///
+/// [`Poll::poll`] may return readiness events even if the associated
+/// [`Evented`] handle is not actually ready. Given the same code, this may
+/// happen more on some platforms than others. It is important to never assume
+/// that, just because a readiness notification was received, that the
+/// associated operation will succeed as well.
+///
+/// If operation fails with [`WouldBlock`], then the caller should not treat
+/// this as an error, but instead should wait until another readiness event is
+/// received.
+///
+/// ### Draining readiness
+///
+/// When using edge-triggered mode, once a readiness event is received, the
+/// corresponding operation must be performed repeatedly until it returns
+/// [`WouldBlock`]. Unless this is done, there is no guarantee that another
+/// readiness event will be delivered, even if further data is received for the
+/// [`Evented`] handle.
+///
+/// For example, in the first scenario described above, after step 5, even if
+/// the socket receives more data there is no guarantee that another readiness
+/// event will be delivered.
+///
+/// ### Readiness operations
+///
+/// The only readiness operations that are guaranteed to be present on all
+/// supported platforms are [`readable`] and [`writable`]. All other readiness
+/// operations may have false negatives and as such should be considered
+/// **hints**. This means that if a socket is registered with [`readable`],
+/// [`error`], and [`hup`] interest, and either an error or hup is received, a
+/// readiness event will be generated for the socket, but it **may** only
+/// include `readable` readiness. Also note that, given the potential for
+/// spurious events, receiving a readiness event with `hup` or `error` doesn't
+/// actually mean that a `read` on the socket will return a result matching the
+/// readiness event.
+///
+/// In other words, portable programs that explicitly check for [`hup`] or
+/// [`error`] readiness should be doing so as an **optimization** and always be
+/// able to handle an error or HUP situation when performing the actual read
+/// operation.
+///
+/// [`readable`]: struct.Ready.html#method.readable
+/// [`writable`]: struct.Ready.html#method.writable
+/// [`error`]: unix/struct.UnixReady.html#method.error
+/// [`hup`]: unix/struct.UnixReady.html#method.hup
+///
+/// ### Registering handles
+///
+/// Unless otherwise noted, it should be assumed that types implementing
+/// [`Evented`] will never become ready unless they are registered with `Poll`.
+///
+/// For example:
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Poll, Ready, PollOpt, Token};
+/// use mio::net::TcpStream;
+/// use std::time::Duration;
+/// use std::thread;
+///
+/// let sock = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+///
+/// thread::sleep(Duration::from_secs(1));
+///
+/// let poll = Poll::new()?;
+///
+/// // The connect is not guaranteed to have started until it is registered at
+/// // this point
+/// poll.register(&sock, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// # Implementation notes
+///
+/// `Poll` is backed by the selector provided by the operating system.
+///
+/// | OS | Selector |
+/// |------------|-----------|
+/// | Linux | [epoll] |
+/// | OS X, iOS | [kqueue] |
+/// | Windows | [IOCP] |
+/// | FreeBSD | [kqueue] |
+/// | Android | [epoll] |
+///
+/// On all supported platforms, socket operations are handled by using the
+/// system selector. Platform specific extensions (e.g. [`EventedFd`]) allow
+/// accessing other features provided by individual system selectors. For
+/// example, Linux's [`signalfd`] feature can be used by registering the FD with
+/// `Poll` via [`EventedFd`].
+///
+/// On all platforms except windows, a call to [`Poll::poll`] is mostly just a
+/// direct call to the system selector. However, [IOCP] uses a completion model
+/// instead of a readiness model. In this case, `Poll` must adapt the completion
+/// model Mio's API. While non-trivial, the bridge layer is still quite
+/// efficient. The most expensive part being calls to `read` and `write` require
+/// data to be copied into an intermediate buffer before it is passed to the
+/// kernel.
+///
+/// Notifications generated by [`SetReadiness`] are handled by an internal
+/// readiness queue. A single call to [`Poll::poll`] will collect events from
+/// both from the system selector and the internal readiness queue.
+///
+/// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+/// [IOCP]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx
+/// [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
+/// [`EventedFd`]: unix/struct.EventedFd.html
+/// [`SetReadiness`]: struct.SetReadiness.html
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+pub struct Poll {
+ // Platform specific IO selector
+ selector: sys::Selector,
+
+ // Custom readiness queue
+ readiness_queue: ReadinessQueue,
+
+ // Use an atomic to first check if a full lock will be required. This is a
+ // fast-path check for single threaded cases avoiding the extra syscall
+ lock_state: AtomicUsize,
+
+ // Sequences concurrent calls to `Poll::poll`
+ lock: Mutex<()>,
+
+ // Wakeup the next waiter
+ condvar: Condvar,
+}
+
+/// Handle to a user space `Poll` registration.
+///
+/// `Registration` allows implementing [`Evented`] for types that cannot work
+/// with the [system selector]. A `Registration` is always paired with a
+/// `SetReadiness`, which allows updating the registration's readiness state.
+/// When [`set_readiness`] is called and the `Registration` is associated with a
+/// [`Poll`] instance, a readiness event will be created and eventually returned
+/// by [`poll`].
+///
+/// A `Registration` / `SetReadiness` pair is created by calling
+/// [`Registration::new2`]. At this point, the registration is not being
+/// monitored by a [`Poll`] instance, so calls to `set_readiness` will not
+/// result in any readiness notifications.
+///
+/// `Registration` implements [`Evented`], so it can be used with [`Poll`] using
+/// the same [`register`], [`reregister`], and [`deregister`] functions used
+/// with TCP, UDP, etc... types. Once registered with [`Poll`], readiness state
+/// changes result in readiness events being dispatched to the [`Poll`] instance
+/// with which `Registration` is registered.
+///
+/// **Note**, before using `Registration` be sure to read the
+/// [`set_readiness`] documentation and the [portability] notes. The
+/// guarantees offered by `Registration` may be weaker than expected.
+///
+/// For high level documentation, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::{Ready, Registration, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+///
+/// use std::io;
+/// use std::time::Instant;
+/// use std::thread;
+///
+/// pub struct Deadline {
+/// when: Instant,
+/// registration: Registration,
+/// }
+///
+/// impl Deadline {
+/// pub fn new(when: Instant) -> Deadline {
+/// let (registration, set_readiness) = Registration::new2();
+///
+/// thread::spawn(move || {
+/// let now = Instant::now();
+///
+/// if now < when {
+/// thread::sleep(when - now);
+/// }
+///
+/// set_readiness.set_readiness(Ready::readable());
+/// });
+///
+/// Deadline {
+/// when: when,
+/// registration: registration,
+/// }
+/// }
+///
+/// pub fn is_elapsed(&self) -> bool {
+/// Instant::now() >= self.when
+/// }
+/// }
+///
+/// impl Evented for Deadline {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// poll.deregister(&self.registration)
+/// }
+/// }
+/// ```
+///
+/// [system selector]: struct.Poll.html#implementation-notes
+/// [`Poll`]: struct.Poll.html
+/// [`Registration::new2`]: struct.Registration.html#method.new2
+/// [`Evented`]: event/trait.Evented.html
+/// [`set_readiness`]: struct.SetReadiness.html#method.set_readiness
+/// [`register`]: struct.Poll.html#method.register
+/// [`reregister`]: struct.Poll.html#method.reregister
+/// [`deregister`]: struct.Poll.html#method.deregister
+/// [portability]: struct.Poll.html#portability
+pub struct Registration {
+ inner: RegistrationInner,
+}
+
+unsafe impl Send for Registration {}
+unsafe impl Sync for Registration {}
+
+/// Updates the readiness state of the associated `Registration`.
+///
+/// See [`Registration`] for more documentation on using `SetReadiness` and
+/// [`Poll`] for high level polling documentation.
+///
+/// [`Poll`]: struct.Poll.html
+/// [`Registration`]: struct.Registration.html
+#[derive(Clone)]
+pub struct SetReadiness {
+ inner: RegistrationInner,
+}
+
+unsafe impl Send for SetReadiness {}
+unsafe impl Sync for SetReadiness {}
+
+/// Used to associate an IO type with a Selector
+#[derive(Debug)]
+pub struct SelectorId {
+ id: AtomicUsize,
+}
+
+struct RegistrationInner {
+ // Unsafe pointer to the registration's node. The node is ref counted. This
+ // cannot "simply" be tracked by an Arc because `Poll::poll` has an implicit
+ // handle though it isn't stored anywhere. In other words, `Poll::poll`
+ // needs to decrement the ref count before the node is freed.
+ node: *mut ReadinessNode,
+}
+
+#[derive(Clone)]
+struct ReadinessQueue {
+ inner: Arc<ReadinessQueueInner>,
+}
+
+unsafe impl Send for ReadinessQueue {}
+unsafe impl Sync for ReadinessQueue {}
+
+struct ReadinessQueueInner {
+ // Used to wake up `Poll` when readiness is set in another thread.
+ awakener: sys::Awakener,
+
+ // Head of the MPSC queue used to signal readiness to `Poll::poll`.
+ head_readiness: AtomicPtr<ReadinessNode>,
+
+ // Tail of the readiness queue.
+ //
+ // Only accessed by Poll::poll. Coordination will be handled by the poll fn
+ tail_readiness: UnsafeCell<*mut ReadinessNode>,
+
+ // Fake readiness node used to punctuate the end of the readiness queue.
+ // Before attempting to read from the queue, this node is inserted in order
+ // to partition the queue between nodes that are "owned" by the dequeue end
+ // and nodes that will be pushed on by producers.
+ end_marker: Box<ReadinessNode>,
+
+ // Similar to `end_marker`, but this node signals to producers that `Poll`
+ // has gone to sleep and must be woken up.
+ sleep_marker: Box<ReadinessNode>,
+
+ // Similar to `end_marker`, but the node signals that the queue is closed.
+ // This happens when `ReadyQueue` is dropped and signals to producers that
+ // the nodes should no longer be pushed into the queue.
+ closed_marker: Box<ReadinessNode>,
+}
+
+/// Node shared by a `Registration` / `SetReadiness` pair as well as the node
+/// queued into the MPSC channel.
+struct ReadinessNode {
+ // Node state, see struct docs for `ReadinessState`
+ //
+ // This variable is the primary point of coordination between all the
+ // various threads concurrently accessing the node.
+ state: AtomicState,
+
+ // The registration token cannot fit into the `state` variable, so it is
+ // broken out here. In order to atomically update both the state and token
+ // we have to jump through a few hoops.
+ //
+ // First, `state` includes `token_read_pos` and `token_write_pos`. These can
+ // either be 0, 1, or 2 which represent a token slot. `token_write_pos` is
+ // the token slot that contains the most up to date registration token.
+ // `token_read_pos` is the token slot that `poll` is currently reading from.
+ //
+ // When a call to `update` includes a different token than the one currently
+ // associated with the registration (token_write_pos), first an unused token
+ // slot is found. The unused slot is the one not represented by
+ // `token_read_pos` OR `token_write_pos`. The new token is written to this
+ // slot, then `state` is updated with the new `token_write_pos` value. This
+ // requires that there is only a *single* concurrent call to `update`.
+ //
+ // When `poll` reads a node state, it checks that `token_read_pos` matches
+ // `token_write_pos`. If they do not match, then it atomically updates
+ // `state` such that `token_read_pos` is set to `token_write_pos`. It will
+ // then read the token at the newly updated `token_read_pos`.
+ token_0: UnsafeCell<Token>,
+ token_1: UnsafeCell<Token>,
+ token_2: UnsafeCell<Token>,
+
+ // Used when the node is queued in the readiness linked list. Accessing
+ // this field requires winning the "queue" lock
+ next_readiness: AtomicPtr<ReadinessNode>,
+
+ // Ensures that there is only one concurrent call to `update`.
+ //
+ // Each call to `update` will attempt to swap `update_lock` from `false` to
+ // `true`. If the CAS succeeds, the thread has obtained the update lock. If
+ // the CAS fails, then the `update` call returns immediately and the update
+ // is discarded.
+ update_lock: AtomicBool,
+
+ // Pointer to Arc<ReadinessQueueInner>
+ readiness_queue: AtomicPtr<()>,
+
+ // Tracks the number of `ReadyRef` pointers
+ ref_count: AtomicUsize,
+}
+
+/// Stores the ReadinessNode state in an AtomicUsize. This wrapper around the
+/// atomic variable handles encoding / decoding `ReadinessState` values.
+struct AtomicState {
+ inner: AtomicUsize,
+}
+
+const MASK_2: usize = 4 - 1;
+const MASK_4: usize = 16 - 1;
+const QUEUED_MASK: usize = 1 << QUEUED_SHIFT;
+const DROPPED_MASK: usize = 1 << DROPPED_SHIFT;
+
+const READINESS_SHIFT: usize = 0;
+const INTEREST_SHIFT: usize = 4;
+const POLL_OPT_SHIFT: usize = 8;
+const TOKEN_RD_SHIFT: usize = 12;
+const TOKEN_WR_SHIFT: usize = 14;
+const QUEUED_SHIFT: usize = 16;
+const DROPPED_SHIFT: usize = 17;
+
+/// Tracks all state for a single `ReadinessNode`. The state is packed into a
+/// `usize` variable from low to high bit as follows:
+///
+/// 4 bits: Registration current readiness
+/// 4 bits: Registration interest
+/// 4 bits: Poll options
+/// 2 bits: Token position currently being read from by `poll`
+/// 2 bits: Token position last written to by `update`
+/// 1 bit: Queued flag, set when node is being pushed into MPSC queue.
+/// 1 bit: Dropped flag, set when all `Registration` handles have been dropped.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+struct ReadinessState(usize);
+
+/// Returned by `dequeue_node`. Represents the different states as described by
+/// the queue documentation on 1024cores.net.
+enum Dequeue {
+ Data(*mut ReadinessNode),
+ Empty,
+ Inconsistent,
+}
+
+const AWAKEN: Token = Token(usize::MAX);
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
+/*
+ *
+ * ===== Poll =====
+ *
+ */
+
+impl Poll {
+ /// Return a new `Poll` handle.
+ ///
+ /// This function will make a syscall to the operating system to create the
+ /// system selector. If this syscall fails, `Poll::new` will return with the
+ /// error.
+ ///
+ /// See [struct] level docs for more details.
+ ///
+ /// [struct]: struct.Poll.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Poll, Events};
+ /// use std::time::Duration;
+ ///
+ /// let poll = match Poll::new() {
+ /// Ok(poll) => poll,
+ /// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
+ /// };
+ ///
+ /// // Create a structure to receive polled events
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Wait for events, but none will be received because no `Evented`
+ /// // handles have been registered with this `Poll` instance.
+ /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?;
+ /// assert_eq!(n, 0);
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn new() -> io::Result<Poll> {
+ is_send::<Poll>();
+ is_sync::<Poll>();
+
+ let poll = Poll {
+ selector: sys::Selector::new()?,
+ readiness_queue: ReadinessQueue::new()?,
+ lock_state: AtomicUsize::new(0),
+ lock: Mutex::new(()),
+ condvar: Condvar::new(),
+ };
+
+ // Register the notification wakeup FD with the IO poller
+ poll.readiness_queue.inner.awakener.register(&poll, AWAKEN, Ready::readable(), PollOpt::edge())?;
+
+ Ok(poll)
+ }
+
+ /// Register an `Evented` handle with the `Poll` instance.
+ ///
+ /// Once registered, the `Poll` instance will monitor the `Evented` handle
+ /// for readiness state changes. When it notices a state change, it will
+ /// return a readiness event for the handle the next time [`poll`] is
+ /// called.
+ ///
+ /// See the [`struct`] docs for a high level overview.
+ ///
+ /// # Arguments
+ ///
+ /// `handle: &E: Evented`: This is the handle that the `Poll` instance
+ /// should monitor for readiness state changes.
+ ///
+ /// `token: Token`: The caller picks a token to associate with the socket.
+ /// When [`poll`] returns an event for the handle, this token is included.
+ /// This allows the caller to map the event to its handle. The token
+ /// associated with the `Evented` handle can be changed at any time by
+ /// calling [`reregister`].
+ ///
+ /// `token` cannot be `Token(usize::MAX)` as it is reserved for internal
+ /// usage.
+ ///
+ /// See documentation on [`Token`] for an example showing how to pick
+ /// [`Token`] values.
+ ///
+ /// `interest: Ready`: Specifies which operations `Poll` should monitor for
+ /// readiness. `Poll` will only return readiness events for operations
+ /// specified by this argument.
+ ///
+ /// If a socket is registered with readable interest and the socket becomes
+ /// writable, no event will be returned from [`poll`].
+ ///
+ /// The readiness interest for an `Evented` handle can be changed at any
+ /// time by calling [`reregister`].
+ ///
+ /// `opts: PollOpt`: Specifies the registration options. The most common
+ /// options being [`level`] for level-triggered events, [`edge`] for
+ /// edge-triggered events, and [`oneshot`].
+ ///
+ /// The registration options for an `Evented` handle can be changed at any
+ /// time by calling [`reregister`].
+ ///
+ /// # Notes
+ ///
+ /// Unless otherwise specified, the caller should assume that once an
+ /// `Evented` handle is registered with a `Poll` instance, it is bound to
+ /// that `Poll` instance for the lifetime of the `Evented` handle. This
+ /// remains true even if the `Evented` handle is deregistered from the poll
+ /// instance using [`deregister`].
+ ///
+ /// This function is **thread safe**. It can be called concurrently from
+ /// multiple threads.
+ ///
+ /// [`struct`]: #
+ /// [`reregister`]: #method.reregister
+ /// [`deregister`]: #method.deregister
+ /// [`poll`]: #method.poll
+ /// [`level`]: struct.PollOpt.html#method.level
+ /// [`edge`]: struct.PollOpt.html#method.edge
+ /// [`oneshot`]: struct.PollOpt.html#method.oneshot
+ /// [`Token`]: struct.Token.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ /// use std::time::{Duration, Instant};
+ ///
+ /// let poll = Poll::new()?;
+ /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.register(&socket, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let start = Instant::now();
+ /// let timeout = Duration::from_millis(500);
+ ///
+ /// loop {
+ /// let elapsed = start.elapsed();
+ ///
+ /// if elapsed >= timeout {
+ /// // Connection timed out
+ /// return Ok(());
+ /// }
+ ///
+ /// let remaining = timeout - elapsed;
+ /// poll.poll(&mut events, Some(remaining))?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) {
+ /// // Something (probably) happened on the socket.
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn register<E: ?Sized>(&self, handle: &E, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ validate_args(token)?;
+
+ /*
+ * Undefined behavior:
+ * - Reusing a token with a different `Evented` without deregistering
+ * (or closing) the original `Evented`.
+ */
+ trace!("registering with poller");
+
+ // Register interests for this socket
+ handle.register(self, token, interest, opts)?;
+
+ Ok(())
+ }
+
+ /// Re-register an `Evented` handle with the `Poll` instance.
+ ///
+ /// Re-registering an `Evented` handle allows changing the details of the
+ /// registration. Specifically, it allows updating the associated `token`,
+ /// `interest`, and `opts` specified in previous `register` and `reregister`
+ /// calls.
+ ///
+ /// The `reregister` arguments fully override the previous values. In other
+ /// words, if a socket is registered with [`readable`] interest and the call
+ /// to `reregister` specifies [`writable`], then read interest is no longer
+ /// requested for the handle.
+ ///
+ /// The `Evented` handle must have previously been registered with this
+ /// instance of `Poll` otherwise the call to `reregister` will return with
+ /// an error.
+ ///
+ /// `token` cannot be `Token(usize::MAX)` as it is reserved for internal
+ /// usage.
+ ///
+ /// See the [`register`] documentation for details about the function
+ /// arguments and see the [`struct`] docs for a high level overview of
+ /// polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ ///
+ /// let poll = Poll::new()?;
+ /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+ ///
+ /// // Register the socket with `poll`, requesting readable
+ /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge())?;
+ ///
+ /// // Reregister the socket specifying a different token and write interest
+ /// // instead. `PollOpt::edge()` must be specified even though that value
+ /// // is not being changed.
+ /// poll.reregister(&socket, Token(2), Ready::writable(), PollOpt::edge())?;
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// [`struct`]: #
+ /// [`register`]: #method.register
+ /// [`readable`]: struct.Ready.html#method.readable
+ /// [`writable`]: struct.Ready.html#method.writable
+ pub fn reregister<E: ?Sized>(&self, handle: &E, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ validate_args(token)?;
+
+ trace!("registering with poller");
+
+ // Register interests for this socket
+ handle.reregister(self, token, interest, opts)?;
+
+ Ok(())
+ }
+
+ /// Deregister an `Evented` handle with the `Poll` instance.
+ ///
+ /// When an `Evented` handle is deregistered, the `Poll` instance will
+ /// no longer monitor it for readiness state changes. Unlike disabling
+ /// handles with oneshot, deregistering clears up any internal resources
+ /// needed to track the handle.
+ ///
+ /// A handle can be passed back to `register` after it has been
+ /// deregistered; however, it must be passed back to the **same** `Poll`
+ /// instance.
+ ///
+ /// `Evented` handles are automatically deregistered when they are dropped.
+ /// It is common to never need to explicitly call `deregister`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ /// use std::time::Duration;
+ ///
+ /// let poll = Poll::new()?;
+ /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge())?;
+ ///
+ /// poll.deregister(&socket)?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Set a timeout because this poll should never receive any events.
+ /// let n = poll.poll(&mut events, Some(Duration::from_secs(1)))?;
+ /// assert_eq!(0, n);
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn deregister<E: ?Sized>(&self, handle: &E) -> io::Result<()>
+ where E: Evented
+ {
+ trace!("deregistering handle with poller");
+
+ // Deregister interests for this socket
+ handle.deregister(self)?;
+
+ Ok(())
+ }
+
+ /// Wait for readiness events
+ ///
+ /// Blocks the current thread and waits for readiness events for any of the
+ /// `Evented` handles that have been registered with this `Poll` instance.
+ /// The function will block until either at least one readiness event has
+ /// been received or `timeout` has elapsed. A `timeout` of `None` means that
+ /// `poll` will block until a readiness event has been received.
+ ///
+ /// The supplied `events` will be cleared and newly received readiness events
+ /// will be pushed onto the end. At most `events.capacity()` events will be
+ /// returned. If there are further pending readiness events, they will be
+ /// returned on the next call to `poll`.
+ ///
+ /// A single call to `poll` may result in multiple readiness events being
+ /// returned for a single `Evented` handle. For example, if a TCP socket
+ /// becomes both readable and writable, it may be possible for a single
+ /// readiness event to be returned with both [`readable`] and [`writable`]
+ /// readiness **OR** two separate events may be returned, one with
+ /// [`readable`] set and one with [`writable`] set.
+ ///
+ /// Note that the `timeout` will be rounded up to the system clock
+ /// granularity (usually 1ms), and kernel scheduling delays mean that
+ /// the blocking interval may be overrun by a small amount.
+ ///
+ /// `poll` returns the number of readiness events that have been pushed into
+ /// `events` or `Err` when an error has been encountered with the system
+ /// selector. The value returned is deprecated and will be removed in 0.7.0.
+ /// Accessing the events by index is also deprecated. Events can be
+ /// inserted by other events triggering, thus making sequential access
+ /// problematic. Use the iterator API instead. See [`iter`].
+ ///
+ /// See the [struct] level documentation for a higher level discussion of
+ /// polling.
+ ///
+ /// [`readable`]: struct.Ready.html#method.readable
+ /// [`writable`]: struct.Ready.html#method.writable
+ /// [struct]: #
+ /// [`iter`]: struct.Events.html#method.iter
+ ///
+ /// # Examples
+ ///
+ /// A basic example -- establishing a `TcpStream` connection.
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ ///
+ /// use std::net::{TcpListener, SocketAddr};
+ /// use std::thread;
+ ///
+ /// // Bind a server socket to connect to.
+ /// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let server = TcpListener::bind(&addr)?;
+ /// let addr = server.local_addr()?.clone();
+ ///
+ /// // Spawn a thread to accept the socket
+ /// thread::spawn(move || {
+ /// let _ = server.accept();
+ /// });
+ ///
+ /// // Construct a new `Poll` handle as well as the `Events` we'll store into
+ /// let poll = Poll::new()?;
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Connect the stream
+ /// let stream = TcpStream::connect(&addr)?;
+ ///
+ /// // Register the stream with `Poll`
+ /// poll.register(&stream, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+ ///
+ /// // Wait for the socket to become ready. This has to happens in a loop to
+ /// // handle spurious wakeups.
+ /// loop {
+ /// poll.poll(&mut events, None)?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) && event.readiness().is_writable() {
+ /// // The socket connected (probably, it could still be a spurious
+ /// // wakeup)
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// [struct]: #
+ pub fn poll(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<usize> {
+ self.poll1(events, timeout, false)
+ }
+
+ /// Like `poll`, but may be interrupted by a signal
+ ///
+ /// If `poll` is inturrupted while blocking, it will transparently retry the syscall. If you
+ /// want to handle signals yourself, however, use `poll_interruptible`.
+ pub fn poll_interruptible(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<usize> {
+ self.poll1(events, timeout, true)
+ }
+
+ fn poll1(&self, events: &mut Events, mut timeout: Option<Duration>, interruptible: bool) -> io::Result<usize> {
+ let zero = Some(Duration::from_millis(0));
+
+ // At a high level, the synchronization strategy is to acquire access to
+ // the critical section by transitioning the atomic from unlocked ->
+ // locked. If the attempt fails, the thread will wait on the condition
+ // variable.
+ //
+ // # Some more detail
+ //
+ // The `lock_state` atomic usize combines:
+ //
+ // - locked flag, stored in the least significant bit
+ // - number of waiting threads, stored in the rest of the bits.
+ //
+ // When a thread transitions the locked flag from 0 -> 1, it has
+ // obtained access to the critical section.
+ //
+ // When entering `poll`, a compare-and-swap from 0 -> 1 is attempted.
+ // This is a fast path for the case when there are no concurrent calls
+ // to poll, which is very common.
+ //
+ // On failure, the mutex is locked, and the thread attempts to increment
+ // the number of waiting threads component of `lock_state`. If this is
+ // successfully done while the locked flag is set, then the thread can
+ // wait on the condition variable.
+ //
+ // When a thread exits the critical section, it unsets the locked flag.
+ // If there are any waiters, which is atomically determined while
+ // unsetting the locked flag, then the condvar is notified.
+
+ let mut curr = self.lock_state.compare_and_swap(0, 1, SeqCst);
+
+ if 0 != curr {
+ // Enter slower path
+ let mut lock = self.lock.lock().unwrap();
+ let mut inc = false;
+
+ loop {
+ if curr & 1 == 0 {
+ // The lock is currently free, attempt to grab it
+ let mut next = curr | 1;
+
+ if inc {
+ // The waiter count has previously been incremented, so
+ // decrement it here
+ next -= 2;
+ }
+
+ let actual = self.lock_state.compare_and_swap(curr, next, SeqCst);
+
+ if actual != curr {
+ curr = actual;
+ continue;
+ }
+
+ // Lock acquired, break from the loop
+ break;
+ }
+
+ if timeout == zero {
+ if inc {
+ self.lock_state.fetch_sub(2, SeqCst);
+ }
+
+ return Ok(0);
+ }
+
+ // The lock is currently held, so wait for it to become
+ // free. If the waiter count hasn't been incremented yet, do
+ // so now
+ if !inc {
+ let next = curr.checked_add(2).expect("overflow");
+ let actual = self.lock_state.compare_and_swap(curr, next, SeqCst);
+
+ if actual != curr {
+ curr = actual;
+ continue;
+ }
+
+ // Track that the waiter count has been incremented for
+ // this thread and fall through to the condvar waiting
+ inc = true;
+ }
+
+ lock = match timeout {
+ Some(to) => {
+ let now = Instant::now();
+
+ // Wait to be notified
+ let (l, _) = self.condvar.wait_timeout(lock, to).unwrap();
+
+ // See how much time was elapsed in the wait
+ let elapsed = now.elapsed();
+
+ // Update `timeout` to reflect how much time is left to
+ // wait.
+ if elapsed >= to {
+ timeout = zero;
+ } else {
+ // Update the timeout
+ timeout = Some(to - elapsed);
+ }
+
+ l
+ }
+ None => {
+ self.condvar.wait(lock).unwrap()
+ }
+ };
+
+ // Reload the state
+ curr = self.lock_state.load(SeqCst);
+
+ // Try to lock again...
+ }
+ }
+
+ let ret = self.poll2(events, timeout, interruptible);
+
+ // Release the lock
+ if 1 != self.lock_state.fetch_and(!1, Release) {
+ // Acquire the mutex
+ let _lock = self.lock.lock().unwrap();
+
+ // There is at least one waiting thread, so notify one
+ self.condvar.notify_one();
+ }
+
+ ret
+ }
+
+ #[inline]
+ #[cfg_attr(feature = "cargo-clippy", allow(clippy::if_same_then_else))]
+ fn poll2(&self, events: &mut Events, mut timeout: Option<Duration>, interruptible: bool) -> io::Result<usize> {
+ // Compute the timeout value passed to the system selector. If the
+ // readiness queue has pending nodes, we still want to poll the system
+ // selector for new events, but we don't want to block the thread to
+ // wait for new events.
+ if timeout == Some(Duration::from_millis(0)) {
+ // If blocking is not requested, then there is no need to prepare
+ // the queue for sleep
+ //
+ // The sleep_marker should be removed by readiness_queue.poll().
+ } else if self.readiness_queue.prepare_for_sleep() {
+ // The readiness queue is empty. The call to `prepare_for_sleep`
+ // inserts `sleep_marker` into the queue. This signals to any
+ // threads setting readiness that the `Poll::poll` is going to
+ // sleep, so the awakener should be used.
+ } else {
+ // The readiness queue is not empty, so do not block the thread.
+ timeout = Some(Duration::from_millis(0));
+ }
+
+ loop {
+ let now = Instant::now();
+ // First get selector events
+ let res = self.selector.select(&mut events.inner, AWAKEN, timeout);
+ match res {
+ Ok(true) => {
+ // Some awakeners require reading from a FD.
+ self.readiness_queue.inner.awakener.cleanup();
+ break;
+ }
+ Ok(false) => break,
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted && !interruptible => {
+ // Interrupted by a signal; update timeout if necessary and retry
+ if let Some(to) = timeout {
+ let elapsed = now.elapsed();
+ if elapsed >= to {
+ break;
+ } else {
+ timeout = Some(to - elapsed);
+ }
+ }
+ }
+ Err(e) => return Err(e),
+ }
+ }
+
+ // Poll custom event queue
+ self.readiness_queue.poll(&mut events.inner);
+
+ // Return number of polled events
+ Ok(events.inner.len())
+ }
+}
+
+fn validate_args(token: Token) -> io::Result<()> {
+ if token == AWAKEN {
+ return Err(io::Error::new(io::ErrorKind::Other, "invalid token"));
+ }
+
+ Ok(())
+}
+
+impl fmt::Debug for Poll {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Poll")
+ .finish()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for Poll {
+ fn as_raw_fd(&self) -> RawFd {
+ self.selector.as_raw_fd()
+ }
+}
+
+/// A collection of readiness events.
+///
+/// `Events` is passed as an argument to [`Poll::poll`] and will be used to
+/// receive any new readiness events received since the last poll. Usually, a
+/// single `Events` instance is created at the same time as a [`Poll`] and
+/// reused on each call to [`Poll::poll`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let poll = Poll::new()?;
+///
+/// assert_eq!(0, events.len());
+///
+/// // Register `Evented` handles with `poll`
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in &events {
+/// println!("event={:?}", event);
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+/// [`Poll`]: struct.Poll.html
+pub struct Events {
+ inner: sys::Events,
+}
+
+/// [`Events`] iterator.
+///
+/// This struct is created by the [`iter`] method on [`Events`].
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let poll = Poll::new()?;
+///
+/// // Register handles with `poll`
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events.iter() {
+/// println!("event={:?}", event);
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Events`]: struct.Events.html
+/// [`iter`]: struct.Events.html#method.iter
+#[derive(Debug, Clone)]
+pub struct Iter<'a> {
+ inner: &'a Events,
+ pos: usize,
+}
+
+/// Owned [`Events`] iterator.
+///
+/// This struct is created by the `into_iter` method on [`Events`].
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let poll = Poll::new()?;
+///
+/// // Register handles with `poll`
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events {
+/// println!("event={:?}", event);
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+/// [`Events`]: struct.Events.html
+#[derive(Debug)]
+pub struct IntoIter {
+ inner: Events,
+ pos: usize,
+}
+
+impl Events {
+ /// Return a new `Events` capable of holding up to `capacity` events.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ ///
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn with_capacity(capacity: usize) -> Events {
+ Events {
+ inner: sys::Events::with_capacity(capacity),
+ }
+ }
+
+ #[deprecated(since="0.6.10", note="Index access removed in favor of iterator only API.")]
+ #[doc(hidden)]
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.inner.get(idx)
+ }
+
+ #[doc(hidden)]
+ #[deprecated(since="0.6.10", note="Index access removed in favor of iterator only API.")]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ /// Returns the number of `Event` values that `self` can hold.
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ ///
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ /// Returns `true` if `self` contains no `Event` values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ ///
+ /// assert!(events.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+
+ /// Returns an iterator over the `Event` values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`
+ ///
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// for event in events.iter() {
+ /// println!("event={:?}", event);
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn iter(&self) -> Iter {
+ Iter {
+ inner: self,
+ pos: 0
+ }
+ }
+
+ /// Clearing all `Event` values from container explicitly.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`
+ /// for _ in 0..2 {
+ /// events.clear();
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// for event in events.iter() {
+ /// println!("event={:?}", event);
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn clear(&mut self) {
+ self.inner.clear();
+ }
+}
+
+impl<'a> IntoIterator for &'a Events {
+ type Item = Event;
+ type IntoIter = Iter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = Event;
+
+ fn next(&mut self) -> Option<Event> {
+ let ret = self.inner.inner.get(self.pos);
+ self.pos += 1;
+ ret
+ }
+}
+
+impl IntoIterator for Events {
+ type Item = Event;
+ type IntoIter = IntoIter;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter {
+ inner: self,
+ pos: 0,
+ }
+ }
+}
+
+impl Iterator for IntoIter {
+ type Item = Event;
+
+ fn next(&mut self) -> Option<Event> {
+ let ret = self.inner.inner.get(self.pos);
+ self.pos += 1;
+ ret
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Events")
+ .field("capacity", &self.capacity())
+ .finish()
+ }
+}
+
+// ===== Accessors for internal usage =====
+
+pub fn selector(poll: &Poll) -> &sys::Selector {
+ &poll.selector
+}
+
+/*
+ *
+ * ===== Registration =====
+ *
+ */
+
+// TODO: get rid of this, windows depends on it for now
+#[allow(dead_code)]
+pub fn new_registration(poll: &Poll, token: Token, ready: Ready, opt: PollOpt)
+ -> (Registration, SetReadiness)
+{
+ Registration::new_priv(poll, token, ready, opt)
+}
+
+impl Registration {
+ /// Create and return a new `Registration` and the associated
+ /// `SetReadiness`.
+ ///
+ /// See [struct] documentation for more detail and [`Poll`]
+ /// for high level documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Ready, Registration, Poll, PollOpt, Token};
+ /// use std::thread;
+ ///
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// thread::spawn(move || {
+ /// use std::time::Duration;
+ /// thread::sleep(Duration::from_millis(500));
+ ///
+ /// set_readiness.set_readiness(Ready::readable());
+ /// });
+ ///
+ /// let poll = Poll::new()?;
+ /// poll.register(&registration, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+ ///
+ /// let mut events = Events::with_capacity(256);
+ ///
+ /// loop {
+ /// poll.poll(&mut events, None);
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) && event.readiness().is_readable() {
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ /// [struct]: #
+ /// [`Poll`]: struct.Poll.html
+ pub fn new2() -> (Registration, SetReadiness) {
+ // Allocate the registration node. The new node will have `ref_count`
+ // set to 2: one SetReadiness, one Registration.
+ let node = Box::into_raw(Box::new(ReadinessNode::new(
+ ptr::null_mut(), Token(0), Ready::empty(), PollOpt::empty(), 2)));
+
+ let registration = Registration {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ let set_readiness = SetReadiness {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ (registration, set_readiness)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use `new2` instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn new(poll: &Poll, token: Token, interest: Ready, opt: PollOpt)
+ -> (Registration, SetReadiness)
+ {
+ Registration::new_priv(poll, token, interest, opt)
+ }
+
+ // TODO: Get rid of this (windows depends on it for now)
+ fn new_priv(poll: &Poll, token: Token, interest: Ready, opt: PollOpt)
+ -> (Registration, SetReadiness)
+ {
+ is_send::<Registration>();
+ is_sync::<Registration>();
+ is_send::<SetReadiness>();
+ is_sync::<SetReadiness>();
+
+ // Clone handle to the readiness queue, this bumps the ref count
+ let queue = poll.readiness_queue.inner.clone();
+
+ // Convert to a *mut () pointer
+ let queue: *mut () = unsafe { mem::transmute(queue) };
+
+ // Allocate the registration node. The new node will have `ref_count`
+ // set to 3: one SetReadiness, one Registration, and one Poll handle.
+ let node = Box::into_raw(Box::new(ReadinessNode::new(
+ queue, token, interest, opt, 3)));
+
+ let registration = Registration {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ let set_readiness = SetReadiness {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ (registration, set_readiness)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use `Evented` impl")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn update(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.inner.update(poll, token, interest, opts)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use `Poll::deregister` instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner.update(poll, Token(0), Ready::empty(), PollOpt::empty())
+ }
+}
+
+impl Evented for Registration {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.inner.update(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.inner.update(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner.update(poll, Token(0), Ready::empty(), PollOpt::empty())
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ // `flag_as_dropped` toggles the `dropped` flag and notifies
+ // `Poll::poll` to release its handle (which is just decrementing
+ // the ref count).
+ if self.inner.state.flag_as_dropped() {
+ // Can't do anything if the queuing fails
+ let _ = self.inner.enqueue_with_wakeup();
+ }
+ }
+}
+
+impl fmt::Debug for Registration {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Registration")
+ .finish()
+ }
+}
+
+impl SetReadiness {
+ /// Returns the registration's current readiness.
+ ///
+ /// # Note
+ ///
+ /// There is no guarantee that `readiness` establishes any sort of memory
+ /// ordering. Any concurrent data access must be synchronized using another
+ /// strategy.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Registration, Ready};
+ ///
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// assert!(set_readiness.readiness().is_empty());
+ ///
+ /// set_readiness.set_readiness(Ready::readable())?;
+ /// assert!(set_readiness.readiness().is_readable());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn readiness(&self) -> Ready {
+ self.inner.readiness()
+ }
+
+ /// Set the registration's readiness
+ ///
+ /// If the associated `Registration` is registered with a [`Poll`] instance
+ /// and has requested readiness events that include `ready`, then a future
+ /// call to [`Poll::poll`] will receive a readiness event representing the
+ /// readiness state change.
+ ///
+ /// # Note
+ ///
+ /// There is no guarantee that `readiness` establishes any sort of memory
+ /// ordering. Any concurrent data access must be synchronized using another
+ /// strategy.
+ ///
+ /// There is also no guarantee as to when the readiness event will be
+ /// delivered to poll. A best attempt will be made to make the delivery in a
+ /// "timely" fashion. For example, the following is **not** guaranteed to
+ /// work:
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Registration, Ready, Poll, PollOpt, Token};
+ ///
+ /// let poll = Poll::new()?;
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// poll.register(&registration,
+ /// Token(0),
+ /// Ready::readable(),
+ /// PollOpt::edge())?;
+ ///
+ /// // Set the readiness, then immediately poll to try to get the readiness
+ /// // event
+ /// set_readiness.set_readiness(Ready::readable())?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// poll.poll(&mut events, None)?;
+ ///
+ /// // There is NO guarantee that the following will work. It is possible
+ /// // that the readiness event will be delivered at a later time.
+ /// let event = events.get(0).unwrap();
+ /// assert_eq!(event.token(), Token(0));
+ /// assert!(event.readiness().is_readable());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// # Examples
+ ///
+ /// A simple example, for a more elaborate example, see the [`Evented`]
+ /// documentation.
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Registration, Ready};
+ ///
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// assert!(set_readiness.readiness().is_empty());
+ ///
+ /// set_readiness.set_readiness(Ready::readable())?;
+ /// assert!(set_readiness.readiness().is_readable());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// [`Registration`]: struct.Registration.html
+ /// [`Evented`]: event/trait.Evented.html#examples
+ /// [`Poll`]: struct.Poll.html
+ /// [`Poll::poll`]: struct.Poll.html#method.poll
+ pub fn set_readiness(&self, ready: Ready) -> io::Result<()> {
+ self.inner.set_readiness(ready)
+ }
+}
+
+impl fmt::Debug for SetReadiness {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("SetReadiness")
+ .finish()
+ }
+}
+
+impl RegistrationInner {
+ /// Get the registration's readiness.
+ fn readiness(&self) -> Ready {
+ self.state.load(Relaxed).readiness()
+ }
+
+ /// Set the registration's readiness.
+ ///
+ /// This function can be called concurrently by an arbitrary number of
+ /// SetReadiness handles.
+ fn set_readiness(&self, ready: Ready) -> io::Result<()> {
+ // Load the current atomic state.
+ let mut state = self.state.load(Acquire);
+ let mut next;
+
+ loop {
+ next = state;
+
+ if state.is_dropped() {
+ // Node is dropped, no more notifications
+ return Ok(());
+ }
+
+ // Update the readiness
+ next.set_readiness(ready);
+
+ // If the readiness is not blank, try to obtain permission to
+ // push the node into the readiness queue.
+ if !next.effective_readiness().is_empty() {
+ next.set_queued();
+ }
+
+ let actual = self.state.compare_and_swap(state, next, AcqRel);
+
+ if state == actual {
+ break;
+ }
+
+ state = actual;
+ }
+
+ if !state.is_queued() && next.is_queued() {
+ // We toggled the queued flag, making us responsible for queuing the
+ // node in the MPSC readiness queue.
+ self.enqueue_with_wakeup()?;
+ }
+
+ Ok(())
+ }
+
+ /// Update the registration details associated with the node
+ fn update(&self, poll: &Poll, token: Token, interest: Ready, opt: PollOpt) -> io::Result<()> {
+ // First, ensure poll instances match
+ //
+ // Load the queue pointer, `Relaxed` is sufficient here as only the
+ // pointer is being operated on. The actual memory is guaranteed to be
+ // visible the `poll: &Poll` ref passed as an argument to the function.
+ let mut queue = self.readiness_queue.load(Relaxed);
+ let other: &*mut () = unsafe {
+ &*(&poll.readiness_queue.inner as *const _ as *const *mut ())
+ };
+ let other = *other;
+
+ debug_assert!(mem::size_of::<Arc<ReadinessQueueInner>>() == mem::size_of::<*mut ()>());
+
+ if queue.is_null() {
+ // Attempt to set the queue pointer. `Release` ordering synchronizes
+ // with `Acquire` in `ensure_with_wakeup`.
+ let actual = self.readiness_queue.compare_and_swap(
+ queue, other, Release);
+
+ if actual.is_null() {
+ // The CAS succeeded, this means that the node's ref count
+ // should be incremented to reflect that the `poll` function
+ // effectively owns the node as well.
+ //
+ // `Relaxed` ordering used for the same reason as in
+ // RegistrationInner::clone
+ self.ref_count.fetch_add(1, Relaxed);
+
+ // Note that the `queue` reference stored in our
+ // `readiness_queue` field is intended to be a strong reference,
+ // so now that we've successfully claimed the reference we bump
+ // the refcount here.
+ //
+ // Down below in `release_node` when we deallocate this
+ // `RegistrationInner` is where we'll transmute this back to an
+ // arc and decrement the reference count.
+ mem::forget(poll.readiness_queue.clone());
+ } else {
+ // The CAS failed, another thread set the queue pointer, so ensure
+ // that the pointer and `other` match
+ if actual != other {
+ return Err(io::Error::new(io::ErrorKind::Other, "registration handle associated with another `Poll` instance"));
+ }
+ }
+
+ queue = other;
+ } else if queue != other {
+ return Err(io::Error::new(io::ErrorKind::Other, "registration handle associated with another `Poll` instance"));
+ }
+
+ unsafe {
+ let actual = &poll.readiness_queue.inner as *const _ as *const usize;
+ debug_assert_eq!(queue as usize, *actual);
+ }
+
+ // The `update_lock` atomic is used as a flag ensuring only a single
+ // thread concurrently enters the `update` critical section. Any
+ // concurrent calls to update are discarded. If coordinated updates are
+ // required, the Mio user is responsible for handling that.
+ //
+ // Acquire / Release ordering is used on `update_lock` to ensure that
+ // data access to the `token_*` variables are scoped to the critical
+ // section.
+
+ // Acquire the update lock.
+ if self.update_lock.compare_and_swap(false, true, Acquire) {
+ // The lock is already held. Discard the update
+ return Ok(());
+ }
+
+ // Relaxed ordering is acceptable here as the only memory that needs to
+ // be visible as part of the update are the `token_*` variables, and
+ // ordering has already been handled by the `update_lock` access.
+ let mut state = self.state.load(Relaxed);
+ let mut next;
+
+ // Read the current token, again this memory has been ordered by the
+ // acquire on `update_lock`.
+ let curr_token_pos = state.token_write_pos();
+ let curr_token = unsafe { self::token(self, curr_token_pos) };
+
+ let mut next_token_pos = curr_token_pos;
+
+ // If the `update` call is changing the token, then compute the next
+ // available token slot and write the token there.
+ //
+ // Note that this computation is happening *outside* of the
+ // compare-and-swap loop. The update lock ensures that only a single
+ // thread could be mutating the write_token_position, so the
+ // `next_token_pos` will never need to be recomputed even if
+ // `token_read_pos` concurrently changes. This is because
+ // `token_read_pos` can ONLY concurrently change to the current value of
+ // `token_write_pos`, so `next_token_pos` will always remain valid.
+ if token != curr_token {
+ next_token_pos = state.next_token_pos();
+
+ // Update the token
+ match next_token_pos {
+ 0 => unsafe { *self.token_0.get() = token },
+ 1 => unsafe { *self.token_1.get() = token },
+ 2 => unsafe { *self.token_2.get() = token },
+ _ => unreachable!(),
+ }
+ }
+
+ // Now enter the compare-and-swap loop
+ loop {
+ next = state;
+
+ // The node is only dropped once all `Registration` handles are
+ // dropped. Only `Registration` can call `update`.
+ debug_assert!(!state.is_dropped());
+
+ // Update the write token position, this will also release the token
+ // to Poll::poll.
+ next.set_token_write_pos(next_token_pos);
+
+ // Update readiness and poll opts
+ next.set_interest(interest);
+ next.set_poll_opt(opt);
+
+ // If there is effective readiness, the node will need to be queued
+ // for processing. This exact behavior is still TBD, so we are
+ // conservative for now and always fire.
+ //
+ // See https://github.com/carllerche/mio/issues/535.
+ if !next.effective_readiness().is_empty() {
+ next.set_queued();
+ }
+
+ // compare-and-swap the state values. Only `Release` is needed here.
+ // The `Release` ensures that `Poll::poll` will see the token
+ // update and the update function doesn't care about any other
+ // memory visibility.
+ let actual = self.state.compare_and_swap(state, next, Release);
+
+ if actual == state {
+ break;
+ }
+
+ // CAS failed, but `curr_token_pos` should not have changed given
+ // that we still hold the update lock.
+ debug_assert_eq!(curr_token_pos, actual.token_write_pos());
+
+ state = actual;
+ }
+
+ // Release the lock
+ self.update_lock.store(false, Release);
+
+ if !state.is_queued() && next.is_queued() {
+ // We are responsible for enqueing the node.
+ enqueue_with_wakeup(queue, self)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl ops::Deref for RegistrationInner {
+ type Target = ReadinessNode;
+
+ fn deref(&self) -> &ReadinessNode {
+ unsafe { &*self.node }
+ }
+}
+
+impl Clone for RegistrationInner {
+ fn clone(&self) -> RegistrationInner {
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ let old_size = self.ref_count.fetch_add(1, Relaxed);
+
+ // However we need to guard against massive refcounts in case someone
+ // is `mem::forget`ing Arcs. If we don't do this the count can overflow
+ // and users will use-after free. We racily saturate to `isize::MAX` on
+ // the assumption that there aren't ~2 billion threads incrementing
+ // the reference count at once. This branch will never be taken in
+ // any realistic program.
+ //
+ // We abort because such a program is incredibly degenerate, and we
+ // don't care to support it.
+ if old_size & !MAX_REFCOUNT != 0 {
+ process::abort();
+ }
+
+ RegistrationInner {
+ node: self.node,
+ }
+ }
+}
+
+impl Drop for RegistrationInner {
+ fn drop(&mut self) {
+ // Only handles releasing from `Registration` and `SetReadiness`
+ // handles. Poll has to call this itself.
+ release_node(self.node);
+ }
+}
+
+/*
+ *
+ * ===== ReadinessQueue =====
+ *
+ */
+
+impl ReadinessQueue {
+ /// Create a new `ReadinessQueue`.
+ fn new() -> io::Result<ReadinessQueue> {
+ is_send::<Self>();
+ is_sync::<Self>();
+
+ let end_marker = Box::new(ReadinessNode::marker());
+ let sleep_marker = Box::new(ReadinessNode::marker());
+ let closed_marker = Box::new(ReadinessNode::marker());
+
+ let ptr = &*end_marker as *const _ as *mut _;
+
+ Ok(ReadinessQueue {
+ inner: Arc::new(ReadinessQueueInner {
+ awakener: sys::Awakener::new()?,
+ head_readiness: AtomicPtr::new(ptr),
+ tail_readiness: UnsafeCell::new(ptr),
+ end_marker,
+ sleep_marker,
+ closed_marker,
+ })
+ })
+ }
+
+ /// Poll the queue for new events
+ fn poll(&self, dst: &mut sys::Events) {
+ // `until` is set with the first node that gets re-enqueued due to being
+ // set to have level-triggered notifications. This prevents an infinite
+ // loop where `Poll::poll` will keep dequeuing nodes it enqueues.
+ let mut until = ptr::null_mut();
+
+ if dst.len() == dst.capacity() {
+ // If `dst` is already full, the readiness queue won't be drained.
+ // This might result in `sleep_marker` staying in the queue and
+ // unecessary pipe writes occuring.
+ self.inner.clear_sleep_marker();
+ }
+
+ 'outer:
+ while dst.len() < dst.capacity() {
+ // Dequeue a node. If the queue is in an inconsistent state, then
+ // stop polling. `Poll::poll` will be called again shortly and enter
+ // a syscall, which should be enough to enable the other thread to
+ // finish the queuing process.
+ let ptr = match unsafe { self.inner.dequeue_node(until) } {
+ Dequeue::Empty | Dequeue::Inconsistent => break,
+ Dequeue::Data(ptr) => ptr,
+ };
+
+ let node = unsafe { &*ptr };
+
+ // Read the node state with Acquire ordering. This allows reading
+ // the token variables.
+ let mut state = node.state.load(Acquire);
+ let mut next;
+ let mut readiness;
+ let mut opt;
+
+ loop {
+ // Build up any changes to the readiness node's state and
+ // attempt the CAS at the end
+ next = state;
+
+ // Given that the node was just read from the queue, the
+ // `queued` flag should still be set.
+ debug_assert!(state.is_queued());
+
+ // The dropped flag means we need to release the node and
+ // perform no further processing on it.
+ if state.is_dropped() {
+ // Release the node and continue
+ release_node(ptr);
+ continue 'outer;
+ }
+
+ // Process the node
+ readiness = state.effective_readiness();
+ opt = state.poll_opt();
+
+ if opt.is_edge() {
+ // Mark the node as dequeued
+ next.set_dequeued();
+
+ if opt.is_oneshot() && !readiness.is_empty() {
+ next.disarm();
+ }
+ } else if readiness.is_empty() {
+ next.set_dequeued();
+ }
+
+ // Ensure `token_read_pos` is set to `token_write_pos` so that
+ // we read the most up to date token value.
+ next.update_token_read_pos();
+
+ if state == next {
+ break;
+ }
+
+ let actual = node.state.compare_and_swap(state, next, AcqRel);
+
+ if actual == state {
+ break;
+ }
+
+ state = actual;
+ }
+
+ // If the queued flag is still set, then the node must be requeued.
+ // This typically happens when using level-triggered notifications.
+ if next.is_queued() {
+ if until.is_null() {
+ // We never want to see the node again
+ until = ptr;
+ }
+
+ // Requeue the node
+ self.inner.enqueue_node(node);
+ }
+
+ if !readiness.is_empty() {
+ // Get the token
+ let token = unsafe { token(node, next.token_read_pos()) };
+
+ // Push the event
+ dst.push_event(Event::new(readiness, token));
+ }
+ }
+ }
+
+ /// Prepare the queue for the `Poll::poll` thread to block in the system
+ /// selector. This involves changing `head_readiness` to `sleep_marker`.
+ /// Returns true if successful and `poll` can block.
+ fn prepare_for_sleep(&self) -> bool {
+ let end_marker = self.inner.end_marker();
+ let sleep_marker = self.inner.sleep_marker();
+
+ let tail = unsafe { *self.inner.tail_readiness.get() };
+
+ // If the tail is currently set to the sleep_marker, then check if the
+ // head is as well. If it is, then the queue is currently ready to
+ // sleep. If it is not, then the queue is not empty and there should be
+ // no sleeping.
+ if tail == sleep_marker {
+ return self.inner.head_readiness.load(Acquire) == sleep_marker;
+ }
+
+ // If the tail is not currently set to `end_marker`, then the queue is
+ // not empty.
+ if tail != end_marker {
+ return false;
+ }
+
+ // The sleep marker is *not* currently in the readiness queue.
+ //
+ // The sleep marker is only inserted in this function. It is also only
+ // inserted in the tail position. This is guaranteed by first checking
+ // that the end marker is in the tail position, pushing the sleep marker
+ // after the end marker, then removing the end marker.
+ //
+ // Before inserting a node into the queue, the next pointer has to be
+ // set to null. Again, this is only safe to do when the node is not
+ // currently in the queue, but we already have ensured this.
+ self.inner.sleep_marker.next_readiness.store(ptr::null_mut(), Relaxed);
+
+ let actual = self.inner.head_readiness.compare_and_swap(
+ end_marker, sleep_marker, AcqRel);
+
+ debug_assert!(actual != sleep_marker);
+
+ if actual != end_marker {
+ // The readiness queue is not empty
+ return false;
+ }
+
+ // The current tail should be pointing to `end_marker`
+ debug_assert!(unsafe { *self.inner.tail_readiness.get() == end_marker });
+ // The `end_marker` next pointer should be null
+ debug_assert!(self.inner.end_marker.next_readiness.load(Relaxed).is_null());
+
+ // Update tail pointer.
+ unsafe { *self.inner.tail_readiness.get() = sleep_marker; }
+ true
+ }
+}
+
+impl Drop for ReadinessQueue {
+ fn drop(&mut self) {
+ // Close the queue by enqueuing the closed node
+ self.inner.enqueue_node(&*self.inner.closed_marker);
+
+ loop {
+ // Free any nodes that happen to be left in the readiness queue
+ let ptr = match unsafe { self.inner.dequeue_node(ptr::null_mut()) } {
+ Dequeue::Empty => break,
+ Dequeue::Inconsistent => {
+ // This really shouldn't be possible as all other handles to
+ // `ReadinessQueueInner` are dropped, but handle this by
+ // spinning I guess?
+ continue;
+ }
+ Dequeue::Data(ptr) => ptr,
+ };
+
+ let node = unsafe { &*ptr };
+
+ let state = node.state.load(Acquire);
+
+ debug_assert!(state.is_queued());
+
+ release_node(ptr);
+ }
+ }
+}
+
+impl ReadinessQueueInner {
+ fn wakeup(&self) -> io::Result<()> {
+ self.awakener.wakeup()
+ }
+
+ /// Prepend the given node to the head of the readiness queue. This is done
+ /// with relaxed ordering. Returns true if `Poll` needs to be woken up.
+ fn enqueue_node_with_wakeup(&self, node: &ReadinessNode) -> io::Result<()> {
+ if self.enqueue_node(node) {
+ self.wakeup()?;
+ }
+
+ Ok(())
+ }
+
+ /// Push the node into the readiness queue
+ fn enqueue_node(&self, node: &ReadinessNode) -> bool {
+ // This is the 1024cores.net intrusive MPSC queue [1] "push" function.
+ let node_ptr = node as *const _ as *mut _;
+
+ // Relaxed used as the ordering is "released" when swapping
+ // `head_readiness`
+ node.next_readiness.store(ptr::null_mut(), Relaxed);
+
+ unsafe {
+ let mut prev = self.head_readiness.load(Acquire);
+
+ loop {
+ if prev == self.closed_marker() {
+ debug_assert!(node_ptr != self.closed_marker());
+ // debug_assert!(node_ptr != self.end_marker());
+ debug_assert!(node_ptr != self.sleep_marker());
+
+ if node_ptr != self.end_marker() {
+ // The readiness queue is shutdown, but the enqueue flag was
+ // set. This means that we are responsible for decrementing
+ // the ready queue's ref count
+ debug_assert!(node.ref_count.load(Relaxed) >= 2);
+ release_node(node_ptr);
+ }
+
+ return false;
+ }
+
+ let act = self.head_readiness.compare_and_swap(prev, node_ptr, AcqRel);
+
+ if prev == act {
+ break;
+ }
+
+ prev = act;
+ }
+
+ debug_assert!((*prev).next_readiness.load(Relaxed).is_null());
+
+ (*prev).next_readiness.store(node_ptr, Release);
+
+ prev == self.sleep_marker()
+ }
+ }
+
+ fn clear_sleep_marker(&self) {
+ let end_marker = self.end_marker();
+ let sleep_marker = self.sleep_marker();
+
+ unsafe {
+ let tail = *self.tail_readiness.get();
+
+ if tail != self.sleep_marker() {
+ return;
+ }
+
+ // The empty markeer is *not* currently in the readiness queue
+ // (since the sleep markeris).
+ self.end_marker.next_readiness.store(ptr::null_mut(), Relaxed);
+
+ let actual = self.head_readiness.compare_and_swap(
+ sleep_marker, end_marker, AcqRel);
+
+ debug_assert!(actual != end_marker);
+
+ if actual != sleep_marker {
+ // The readiness queue is not empty, we cannot remove the sleep
+ // markeer
+ return;
+ }
+
+ // Update the tail pointer.
+ *self.tail_readiness.get() = end_marker;
+ }
+ }
+
+ /// Must only be called in `poll` or `drop`
+ unsafe fn dequeue_node(&self, until: *mut ReadinessNode) -> Dequeue {
+ // This is the 1024cores.net intrusive MPSC queue [1] "pop" function
+ // with the modifications mentioned at the top of the file.
+ let mut tail = *self.tail_readiness.get();
+ let mut next = (*tail).next_readiness.load(Acquire);
+
+ if tail == self.end_marker() || tail == self.sleep_marker() || tail == self.closed_marker() {
+ if next.is_null() {
+ // Make sure the sleep marker is removed (as we are no longer
+ // sleeping
+ self.clear_sleep_marker();
+
+ return Dequeue::Empty;
+ }
+
+ *self.tail_readiness.get() = next;
+ tail = next;
+ next = (*next).next_readiness.load(Acquire);
+ }
+
+ // Only need to check `until` at this point. `until` is either null,
+ // which will never match tail OR it is a node that was pushed by
+ // the current thread. This means that either:
+ //
+ // 1) The queue is inconsistent, which is handled explicitly
+ // 2) We encounter `until` at this point in dequeue
+ // 3) we will pop a different node
+ if tail == until {
+ return Dequeue::Empty;
+ }
+
+ if !next.is_null() {
+ *self.tail_readiness.get() = next;
+ return Dequeue::Data(tail);
+ }
+
+ if self.head_readiness.load(Acquire) != tail {
+ return Dequeue::Inconsistent;
+ }
+
+ // Push the stub node
+ self.enqueue_node(&*self.end_marker);
+
+ next = (*tail).next_readiness.load(Acquire);
+
+ if !next.is_null() {
+ *self.tail_readiness.get() = next;
+ return Dequeue::Data(tail);
+ }
+
+ Dequeue::Inconsistent
+ }
+
+ fn end_marker(&self) -> *mut ReadinessNode {
+ &*self.end_marker as *const ReadinessNode as *mut ReadinessNode
+ }
+
+ fn sleep_marker(&self) -> *mut ReadinessNode {
+ &*self.sleep_marker as *const ReadinessNode as *mut ReadinessNode
+ }
+
+ fn closed_marker(&self) -> *mut ReadinessNode {
+ &*self.closed_marker as *const ReadinessNode as *mut ReadinessNode
+ }
+}
+
+impl ReadinessNode {
+ /// Return a new `ReadinessNode`, initialized with a ref_count of 3.
+ fn new(queue: *mut (),
+ token: Token,
+ interest: Ready,
+ opt: PollOpt,
+ ref_count: usize) -> ReadinessNode
+ {
+ ReadinessNode {
+ state: AtomicState::new(interest, opt),
+ // Only the first token is set, the others are initialized to 0
+ token_0: UnsafeCell::new(token),
+ token_1: UnsafeCell::new(Token(0)),
+ token_2: UnsafeCell::new(Token(0)),
+ next_readiness: AtomicPtr::new(ptr::null_mut()),
+ update_lock: AtomicBool::new(false),
+ readiness_queue: AtomicPtr::new(queue),
+ ref_count: AtomicUsize::new(ref_count),
+ }
+ }
+
+ fn marker() -> ReadinessNode {
+ ReadinessNode {
+ state: AtomicState::new(Ready::empty(), PollOpt::empty()),
+ token_0: UnsafeCell::new(Token(0)),
+ token_1: UnsafeCell::new(Token(0)),
+ token_2: UnsafeCell::new(Token(0)),
+ next_readiness: AtomicPtr::new(ptr::null_mut()),
+ update_lock: AtomicBool::new(false),
+ readiness_queue: AtomicPtr::new(ptr::null_mut()),
+ ref_count: AtomicUsize::new(0),
+ }
+ }
+
+ fn enqueue_with_wakeup(&self) -> io::Result<()> {
+ let queue = self.readiness_queue.load(Acquire);
+
+ if queue.is_null() {
+ // Not associated with a queue, nothing to do
+ return Ok(());
+ }
+
+ enqueue_with_wakeup(queue, self)
+ }
+}
+
+fn enqueue_with_wakeup(queue: *mut (), node: &ReadinessNode) -> io::Result<()> {
+ debug_assert!(!queue.is_null());
+ // This is ugly... but we don't want to bump the ref count.
+ let queue: &Arc<ReadinessQueueInner> = unsafe {
+ &*(&queue as *const *mut () as *const Arc<ReadinessQueueInner>)
+ };
+ queue.enqueue_node_with_wakeup(node)
+}
+
+unsafe fn token(node: &ReadinessNode, pos: usize) -> Token {
+ match pos {
+ 0 => *node.token_0.get(),
+ 1 => *node.token_1.get(),
+ 2 => *node.token_2.get(),
+ _ => unreachable!(),
+ }
+}
+
+fn release_node(ptr: *mut ReadinessNode) {
+ unsafe {
+ // `AcqRel` synchronizes with other `release_node` functions and ensures
+ // that the drop happens after any reads / writes on other threads.
+ if (*ptr).ref_count.fetch_sub(1, AcqRel) != 1 {
+ return;
+ }
+
+ let node = Box::from_raw(ptr);
+
+ // Decrement the readiness_queue Arc
+ let queue = node.readiness_queue.load(Acquire);
+
+ if queue.is_null() {
+ return;
+ }
+
+ let _: Arc<ReadinessQueueInner> = mem::transmute(queue);
+ }
+}
+
+impl AtomicState {
+ fn new(interest: Ready, opt: PollOpt) -> AtomicState {
+ let state = ReadinessState::new(interest, opt);
+
+ AtomicState {
+ inner: AtomicUsize::new(state.into()),
+ }
+ }
+
+ /// Loads the current `ReadinessState`
+ fn load(&self, order: Ordering) -> ReadinessState {
+ self.inner.load(order).into()
+ }
+
+ /// Stores a state if the current state is the same as `current`.
+ fn compare_and_swap(&self, current: ReadinessState, new: ReadinessState, order: Ordering) -> ReadinessState {
+ self.inner.compare_and_swap(current.into(), new.into(), order).into()
+ }
+
+ // Returns `true` if the node should be queued
+ fn flag_as_dropped(&self) -> bool {
+ let prev: ReadinessState = self.inner.fetch_or(DROPPED_MASK | QUEUED_MASK, Release).into();
+ // The flag should not have been previously set
+ debug_assert!(!prev.is_dropped());
+
+ !prev.is_queued()
+ }
+}
+
+impl ReadinessState {
+ // Create a `ReadinessState` initialized with the provided arguments
+ #[inline]
+ fn new(interest: Ready, opt: PollOpt) -> ReadinessState {
+ let interest = event::ready_as_usize(interest);
+ let opt = event::opt_as_usize(opt);
+
+ debug_assert!(interest <= MASK_4);
+ debug_assert!(opt <= MASK_4);
+
+ let mut val = interest << INTEREST_SHIFT;
+ val |= opt << POLL_OPT_SHIFT;
+
+ ReadinessState(val)
+ }
+
+ #[inline]
+ fn get(self, mask: usize, shift: usize) -> usize{
+ (self.0 >> shift) & mask
+ }
+
+ #[inline]
+ fn set(&mut self, val: usize, mask: usize, shift: usize) {
+ self.0 = (self.0 & !(mask << shift)) | (val << shift)
+ }
+
+ /// Get the readiness
+ #[inline]
+ fn readiness(self) -> Ready {
+ let v = self.get(MASK_4, READINESS_SHIFT);
+ event::ready_from_usize(v)
+ }
+
+ #[inline]
+ fn effective_readiness(self) -> Ready {
+ self.readiness() & self.interest()
+ }
+
+ /// Set the readiness
+ #[inline]
+ fn set_readiness(&mut self, v: Ready) {
+ self.set(event::ready_as_usize(v), MASK_4, READINESS_SHIFT);
+ }
+
+ /// Get the interest
+ #[inline]
+ fn interest(self) -> Ready {
+ let v = self.get(MASK_4, INTEREST_SHIFT);
+ event::ready_from_usize(v)
+ }
+
+ /// Set the interest
+ #[inline]
+ fn set_interest(&mut self, v: Ready) {
+ self.set(event::ready_as_usize(v), MASK_4, INTEREST_SHIFT);
+ }
+
+ #[inline]
+ fn disarm(&mut self) {
+ self.set_interest(Ready::empty());
+ }
+
+ /// Get the poll options
+ #[inline]
+ fn poll_opt(self) -> PollOpt {
+ let v = self.get(MASK_4, POLL_OPT_SHIFT);
+ event::opt_from_usize(v)
+ }
+
+ /// Set the poll options
+ #[inline]
+ fn set_poll_opt(&mut self, v: PollOpt) {
+ self.set(event::opt_as_usize(v), MASK_4, POLL_OPT_SHIFT);
+ }
+
+ #[inline]
+ fn is_queued(self) -> bool {
+ self.0 & QUEUED_MASK == QUEUED_MASK
+ }
+
+ /// Set the queued flag
+ #[inline]
+ fn set_queued(&mut self) {
+ // Dropped nodes should never be queued
+ debug_assert!(!self.is_dropped());
+ self.0 |= QUEUED_MASK;
+ }
+
+ #[inline]
+ fn set_dequeued(&mut self) {
+ debug_assert!(self.is_queued());
+ self.0 &= !QUEUED_MASK
+ }
+
+ #[inline]
+ fn is_dropped(self) -> bool {
+ self.0 & DROPPED_MASK == DROPPED_MASK
+ }
+
+ #[inline]
+ fn token_read_pos(self) -> usize {
+ self.get(MASK_2, TOKEN_RD_SHIFT)
+ }
+
+ #[inline]
+ fn token_write_pos(self) -> usize {
+ self.get(MASK_2, TOKEN_WR_SHIFT)
+ }
+
+ #[inline]
+ fn next_token_pos(self) -> usize {
+ let rd = self.token_read_pos();
+ let wr = self.token_write_pos();
+
+ match wr {
+ 0 => {
+ match rd {
+ 1 => 2,
+ 2 => 1,
+ 0 => 1,
+ _ => unreachable!(),
+ }
+ }
+ 1 => {
+ match rd {
+ 0 => 2,
+ 2 => 0,
+ 1 => 2,
+ _ => unreachable!(),
+ }
+ }
+ 2 => {
+ match rd {
+ 0 => 1,
+ 1 => 0,
+ 2 => 0,
+ _ => unreachable!(),
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ #[inline]
+ fn set_token_write_pos(&mut self, val: usize) {
+ self.set(val, MASK_2, TOKEN_WR_SHIFT);
+ }
+
+ #[inline]
+ fn update_token_read_pos(&mut self) {
+ let val = self.token_write_pos();
+ self.set(val, MASK_2, TOKEN_RD_SHIFT);
+ }
+}
+
+impl From<ReadinessState> for usize {
+ fn from(src: ReadinessState) -> usize {
+ src.0
+ }
+}
+
+impl From<usize> for ReadinessState {
+ fn from(src: usize) -> ReadinessState {
+ ReadinessState(src)
+ }
+}
+
+fn is_send<T: Send>() {}
+fn is_sync<T: Sync>() {}
+
+impl SelectorId {
+ pub fn new() -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(0),
+ }
+ }
+
+ pub fn associate_selector(&self, poll: &Poll) -> io::Result<()> {
+ let selector_id = self.id.load(Ordering::SeqCst);
+
+ if selector_id != 0 && selector_id != poll.selector.id() {
+ Err(io::Error::new(io::ErrorKind::Other, "socket already registered"))
+ } else {
+ self.id.store(poll.selector.id(), Ordering::SeqCst);
+ Ok(())
+ }
+ }
+}
+
+impl Clone for SelectorId {
+ fn clone(&self) -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(self.id.load(Ordering::SeqCst)),
+ }
+ }
+}
+
+#[test]
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub fn as_raw_fd() {
+ let poll = Poll::new().unwrap();
+ assert!(poll.as_raw_fd() > 0);
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs
new file mode 100644
index 0000000000..19bc762429
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs
@@ -0,0 +1,73 @@
+use {io, poll, Evented, Ready, Poll, PollOpt, Token};
+use zircon;
+use std::sync::{Arc, Mutex, Weak};
+
+pub struct Awakener {
+ /// Token and weak reference to the port on which Awakener was registered.
+ ///
+ /// When `Awakener::wakeup` is called, these are used to send a wakeup message to the port.
+ inner: Mutex<Option<(Token, Weak<zircon::Port>)>>,
+}
+
+impl Awakener {
+ /// Create a new `Awakener`.
+ pub fn new() -> io::Result<Awakener> {
+ Ok(Awakener {
+ inner: Mutex::new(None)
+ })
+ }
+
+ /// Send a wakeup signal to the `Selector` on which the `Awakener` was registered.
+ pub fn wakeup(&self) -> io::Result<()> {
+ let inner_locked = self.inner.lock().unwrap();
+ let &(token, ref weak_port) =
+ inner_locked.as_ref().expect("Called wakeup on unregistered awakener.");
+
+ let port = weak_port.upgrade().expect("Tried to wakeup a closed port.");
+
+ let status = 0; // arbitrary
+ let packet = zircon::Packet::from_user_packet(
+ token.0 as u64, status, zircon::UserPacket::from_u8_array([0; 32]));
+
+ Ok(port.queue(&packet)?)
+ }
+
+ pub fn cleanup(&self) {}
+}
+
+impl Evented for Awakener {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ _events: Ready,
+ _opts: PollOpt) -> io::Result<()>
+ {
+ let mut inner_locked = self.inner.lock().unwrap();
+ if inner_locked.is_some() {
+ panic!("Called register on already-registered Awakener.");
+ }
+ *inner_locked = Some((token, Arc::downgrade(poll::selector(poll).port())));
+
+ Ok(())
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ _events: Ready,
+ _opts: PollOpt) -> io::Result<()>
+ {
+ let mut inner_locked = self.inner.lock().unwrap();
+ *inner_locked = Some((token, Arc::downgrade(poll::selector(poll).port())));
+
+ Ok(())
+ }
+
+ fn deregister(&self, _poll: &Poll) -> io::Result<()>
+ {
+ let mut inner_locked = self.inner.lock().unwrap();
+ *inner_locked = None;
+
+ Ok(())
+ }
+} \ No newline at end of file
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs
new file mode 100644
index 0000000000..e23d0c4a1e
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs
@@ -0,0 +1,263 @@
+use {io, poll, Evented, Ready, Poll, PollOpt, Token};
+use libc;
+use zircon;
+use zircon::AsHandleRef;
+use sys::fuchsia::{DontDrop, poll_opts_to_wait_async, sys};
+use std::mem;
+use std::os::unix::io::RawFd;
+use std::sync::{Arc, Mutex};
+
+/// Properties of an `EventedFd`'s current registration
+#[derive(Debug)]
+pub struct EventedFdRegistration {
+ token: Token,
+ handle: DontDrop<zircon::Handle>,
+ rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>,
+}
+
+impl EventedFdRegistration {
+ unsafe fn new(token: Token,
+ raw_handle: sys::zx_handle_t,
+ rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>,
+ ) -> Self
+ {
+ EventedFdRegistration {
+ token: token,
+ handle: DontDrop::new(zircon::Handle::from_raw(raw_handle)),
+ rereg_signals: rereg_signals
+ }
+ }
+
+ pub fn rereg_signals(&self) -> Option<(zircon::Signals, zircon::WaitAsyncOpts)> {
+ self.rereg_signals
+ }
+}
+
+/// An event-ed file descriptor. The file descriptor is owned by this structure.
+#[derive(Debug)]
+pub struct EventedFdInner {
+ /// Properties of the current registration.
+ registration: Mutex<Option<EventedFdRegistration>>,
+
+ /// Owned file descriptor.
+ ///
+ /// `fd` is closed on `Drop`, so modifying `fd` is a memory-unsafe operation.
+ fd: RawFd,
+
+ /// Owned `fdio_t` pointer.
+ fdio: *const sys::fdio_t,
+}
+
+impl EventedFdInner {
+ pub fn rereg_for_level(&self, port: &zircon::Port) {
+ let registration_opt = self.registration.lock().unwrap();
+ if let Some(ref registration) = *registration_opt {
+ if let Some((rereg_signals, rereg_opts)) = registration.rereg_signals {
+ let _res =
+ registration
+ .handle.inner_ref()
+ .wait_async_handle(
+ port,
+ registration.token.0 as u64,
+ rereg_signals,
+ rereg_opts);
+ }
+ }
+ }
+
+ pub fn registration(&self) -> &Mutex<Option<EventedFdRegistration>> {
+ &self.registration
+ }
+
+ pub fn fdio(&self) -> &sys::fdio_t {
+ unsafe { &*self.fdio }
+ }
+}
+
+impl Drop for EventedFdInner {
+ fn drop(&mut self) {
+ unsafe {
+ sys::__fdio_release(self.fdio);
+ let _ = libc::close(self.fd);
+ }
+ }
+}
+
+// `EventedInner` must be manually declared `Send + Sync` because it contains a `RawFd` and a
+// `*const sys::fdio_t`. These are only used to make thread-safe system calls, so accessing
+// them is entirely thread-safe.
+//
+// Note: one minor exception to this are the calls to `libc::close` and `__fdio_release`, which
+// happen on `Drop`. These accesses are safe because `drop` can only be called at most once from
+// a single thread, and after it is called no other functions can be called on the `EventedFdInner`.
+unsafe impl Sync for EventedFdInner {}
+unsafe impl Send for EventedFdInner {}
+
+#[derive(Clone, Debug)]
+pub struct EventedFd {
+ pub inner: Arc<EventedFdInner>
+}
+
+impl EventedFd {
+ pub unsafe fn new(fd: RawFd) -> Self {
+ let fdio = sys::__fdio_fd_to_io(fd);
+ assert!(fdio != ::std::ptr::null(), "FileDescriptor given to EventedFd must be valid.");
+
+ EventedFd {
+ inner: Arc::new(EventedFdInner {
+ registration: Mutex::new(None),
+ fd: fd,
+ fdio: fdio,
+ })
+ }
+ }
+
+ fn handle_and_signals_for_events(&self, interest: Ready, opts: PollOpt)
+ -> (sys::zx_handle_t, zircon::Signals)
+ {
+ let epoll_events = ioevent_to_epoll(interest, opts);
+
+ unsafe {
+ let mut raw_handle: sys::zx_handle_t = mem::uninitialized();
+ let mut signals: sys::zx_signals_t = mem::uninitialized();
+ sys::__fdio_wait_begin(self.inner.fdio, epoll_events, &mut raw_handle, &mut signals);
+
+ (raw_handle, signals)
+ }
+ }
+
+ fn register_with_lock(
+ &self,
+ registration: &mut Option<EventedFdRegistration>,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ if registration.is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "Called register on an already registered file descriptor."));
+ }
+
+ let (raw_handle, signals) = self.handle_and_signals_for_events(interest, opts);
+
+ let needs_rereg = opts.is_level() && !opts.is_oneshot();
+
+ // If we need to reregister, then each registration should be `oneshot`
+ let opts = opts | if needs_rereg { PollOpt::oneshot() } else { PollOpt::empty() };
+
+ let rereg_signals = if needs_rereg {
+ Some((signals, poll_opts_to_wait_async(opts)))
+ } else {
+ None
+ };
+
+ *registration = Some(
+ unsafe { EventedFdRegistration::new(token, raw_handle, rereg_signals) }
+ );
+
+ // We don't have ownership of the handle, so we can't drop it
+ let handle = DontDrop::new(unsafe { zircon::Handle::from_raw(raw_handle) });
+
+ let registered = poll::selector(poll)
+ .register_fd(handle.inner_ref(), self, token, signals, opts);
+
+ if registered.is_err() {
+ *registration = None;
+ }
+
+ registered
+ }
+
+ fn deregister_with_lock(
+ &self,
+ registration: &mut Option<EventedFdRegistration>,
+ poll: &Poll) -> io::Result<()>
+ {
+ let old_registration = if let Some(old_reg) = registration.take() {
+ old_reg
+ } else {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "Called rereregister on an unregistered file descriptor."))
+ };
+
+ poll::selector(poll)
+ .deregister_fd(old_registration.handle.inner_ref(), old_registration.token)
+ }
+}
+
+impl Evented for EventedFd {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.register_with_lock(
+ &mut *self.inner.registration.lock().unwrap(),
+ poll,
+ token,
+ interest,
+ opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ // Take out the registration lock
+ let mut registration_lock = self.inner.registration.lock().unwrap();
+
+ // Deregister
+ self.deregister_with_lock(&mut *registration_lock, poll)?;
+
+ self.register_with_lock(
+ &mut *registration_lock,
+ poll,
+ token,
+ interest,
+ opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ let mut registration_lock = self.inner.registration.lock().unwrap();
+ self.deregister_with_lock(&mut *registration_lock, poll)
+ }
+}
+
+fn ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32 {
+ use event_imp::ready_from_usize;
+ const HUP: usize = 0b01000;
+
+ let mut kind = 0;
+
+ if interest.is_readable() {
+ kind |= libc::EPOLLIN;
+ }
+
+ if interest.is_writable() {
+ kind |= libc::EPOLLOUT;
+ }
+
+ if interest.contains(ready_from_usize(HUP)) {
+ kind |= libc::EPOLLRDHUP;
+ }
+
+ if opts.is_edge() {
+ kind |= libc::EPOLLET;
+ }
+
+ if opts.is_oneshot() {
+ kind |= libc::EPOLLONESHOT;
+ }
+
+ if opts.is_level() {
+ kind &= !libc::EPOLLET;
+ }
+
+ kind as u32
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs
new file mode 100644
index 0000000000..ae6f07f6d9
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs
@@ -0,0 +1,78 @@
+use {io, poll, Evented, Ready, Poll, PollOpt, Token};
+use zircon_sys::zx_handle_t;
+use std::sync::Mutex;
+
+/// Wrapper for registering a `HandleBase` type with mio.
+#[derive(Debug)]
+pub struct EventedHandle {
+ /// The handle to be registered.
+ handle: zx_handle_t,
+
+ /// The current `Token` with which the handle is registered with mio.
+ token: Mutex<Option<Token>>,
+}
+
+impl EventedHandle {
+ /// Create a new `EventedHandle` which can be registered with mio
+ /// in order to receive event notifications.
+ ///
+ /// The underlying handle must not be dropped while the
+ /// `EventedHandle` still exists.
+ pub unsafe fn new(handle: zx_handle_t) -> Self {
+ EventedHandle {
+ handle: handle,
+ token: Mutex::new(None),
+ }
+ }
+
+ /// Get the underlying handle being registered.
+ pub fn get_handle(&self) -> zx_handle_t {
+ self.handle
+ }
+}
+
+impl Evented for EventedHandle {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ let mut this_token = self.token.lock().unwrap();
+ {
+ poll::selector(poll).register_handle(self.handle, token, interest, opts)?;
+ *this_token = Some(token);
+ }
+ Ok(())
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ let mut this_token = self.token.lock().unwrap();
+ {
+ poll::selector(poll).deregister_handle(self.handle, token)?;
+ *this_token = None;
+ poll::selector(poll).register_handle(self.handle, token, interest, opts)?;
+ *this_token = Some(token);
+ }
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ let mut this_token = self.token.lock().unwrap();
+ let token = if let Some(token) = *this_token { token } else {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "Attempted to deregister an unregistered handle."))
+ };
+ {
+ poll::selector(poll).deregister_handle(self.handle, token)?;
+ *this_token = None;
+ }
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs
new file mode 100644
index 0000000000..10728fc8dc
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs
@@ -0,0 +1,177 @@
+use {io, Ready, PollOpt};
+use libc;
+use zircon;
+use std::mem;
+use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+use std::ops::{Deref, DerefMut};
+use std::os::unix::io::RawFd;
+
+mod awakener;
+mod handles;
+mod eventedfd;
+mod net;
+mod ready;
+mod selector;
+
+use self::eventedfd::{EventedFd, EventedFdInner};
+use self::ready::assert_fuchsia_ready_repr;
+
+pub use self::awakener::Awakener;
+pub use self::handles::EventedHandle;
+pub use self::net::{TcpListener, TcpStream, UdpSocket};
+pub use self::selector::{Events, Selector};
+pub use self::ready::{FuchsiaReady, zx_signals_t};
+
+// Set non-blocking (workaround since the std version doesn't work in fuchsia)
+// TODO: fix the std version and replace this
+pub fn set_nonblock(fd: RawFd) -> io::Result<()> {
+ cvt(unsafe { libc::fcntl(fd, libc::F_SETFL, libc::O_NONBLOCK) }).map(|_| ())
+}
+
+/// Workaround until fuchsia's recv_from is fixed
+unsafe fn recv_from(fd: RawFd, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ let flags = 0;
+
+ let n = cvt(
+ libc::recv(fd,
+ buf.as_mut_ptr() as *mut libc::c_void,
+ buf.len(),
+ flags)
+ )?;
+
+ // random address-- we don't use it
+ let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ Ok((n as usize, addr))
+}
+
+mod sys {
+ #![allow(non_camel_case_types)]
+ use std::os::unix::io::RawFd;
+ pub use zircon_sys::{zx_handle_t, zx_signals_t};
+
+ // 17 fn pointers we don't need for mio :)
+ pub type fdio_ops_t = [usize; 17];
+
+ pub type atomic_int_fast32_t = usize; // TODO: https://github.com/rust-lang/libc/issues/631
+
+ #[repr(C)]
+ pub struct fdio_t {
+ pub ops: *const fdio_ops_t,
+ pub magic: u32,
+ pub refcount: atomic_int_fast32_t,
+ pub dupcount: u32,
+ pub flags: u32,
+ }
+
+ #[link(name="fdio")]
+ extern {
+ pub fn __fdio_fd_to_io(fd: RawFd) -> *const fdio_t;
+ pub fn __fdio_release(io: *const fdio_t);
+
+ pub fn __fdio_wait_begin(
+ io: *const fdio_t,
+ events: u32,
+ handle_out: &mut zx_handle_t,
+ signals_out: &mut zx_signals_t,
+ );
+ pub fn __fdio_wait_end(
+ io: *const fdio_t,
+ signals: zx_signals_t,
+ events_out: &mut u32,
+ );
+ }
+}
+
+fn epoll_event_to_ready(epoll: u32) -> Ready {
+ let epoll = epoll as i32; // casts the bits directly
+ let mut kind = Ready::empty();
+
+ if (epoll & libc::EPOLLIN) != 0 || (epoll & libc::EPOLLPRI) != 0 {
+ kind = kind | Ready::readable();
+ }
+
+ if (epoll & libc::EPOLLOUT) != 0 {
+ kind = kind | Ready::writable();
+ }
+
+ kind
+
+ /* TODO: support?
+ // EPOLLHUP - Usually means a socket error happened
+ if (epoll & libc::EPOLLERR) != 0 {
+ kind = kind | UnixReady::error();
+ }
+
+ if (epoll & libc::EPOLLRDHUP) != 0 || (epoll & libc::EPOLLHUP) != 0 {
+ kind = kind | UnixReady::hup();
+ }
+ */
+}
+
+fn poll_opts_to_wait_async(poll_opts: PollOpt) -> zircon::WaitAsyncOpts {
+ if poll_opts.is_oneshot() {
+ zircon::WaitAsyncOpts::Once
+ } else {
+ zircon::WaitAsyncOpts::Repeating
+ }
+}
+
+trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+impl IsMinusOne for i32 {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+
+impl IsMinusOne for isize {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+
+fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
+ use std::io;
+
+ if t.is_minus_one() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(t)
+ }
+}
+
+/// Utility type to prevent the type inside of it from being dropped.
+#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+struct DontDrop<T>(Option<T>);
+
+impl<T> DontDrop<T> {
+ fn new(t: T) -> DontDrop<T> {
+ DontDrop(Some(t))
+ }
+
+ fn inner_ref(&self) -> &T {
+ self.0.as_ref().unwrap()
+ }
+
+ fn inner_mut(&mut self) -> &mut T {
+ self.0.as_mut().unwrap()
+ }
+}
+
+impl<T> Deref for DontDrop<T> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target {
+ self.inner_ref()
+ }
+}
+
+impl<T> DerefMut for DontDrop<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.inner_mut()
+ }
+}
+
+impl<T> Drop for DontDrop<T> {
+ fn drop(&mut self) {
+ let inner = self.0.take();
+ mem::forget(inner);
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs
new file mode 100644
index 0000000000..d43ad27bb5
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs
@@ -0,0 +1,444 @@
+use {io, Evented, Ready, Poll, PollOpt, Token};
+use iovec::IoVec;
+use iovec::unix as iovec;
+use libc;
+use net2::TcpStreamExt;
+#[allow(unused_imports)] // only here for Rust 1.8
+use net2::UdpSocketExt;
+use sys::fuchsia::{recv_from, set_nonblock, EventedFd, DontDrop};
+use std::cmp;
+use std::io::{Read, Write};
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::os::unix::io::AsRawFd;
+use std::time::Duration;
+
+#[derive(Debug)]
+pub struct TcpStream {
+ io: DontDrop<net::TcpStream>,
+ evented_fd: EventedFd,
+}
+
+impl TcpStream {
+ pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
+ try!(set_nonblock(stream.as_raw_fd()));
+
+ let connected = stream.connect(addr);
+ match connected {
+ Ok(..) => {}
+ Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
+ Err(e) => return Err(e),
+ }
+
+ let evented_fd = unsafe { EventedFd::new(stream.as_raw_fd()) };
+
+ return Ok(TcpStream {
+ io: DontDrop::new(stream),
+ evented_fd: evented_fd,
+ })
+ }
+
+ pub fn from_stream(stream: net::TcpStream) -> TcpStream {
+ let evented_fd = unsafe { EventedFd::new(stream.as_raw_fd()) };
+
+ TcpStream {
+ io: DontDrop::new(stream),
+ evented_fd: evented_fd,
+ }
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.io.peer_addr()
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.io.try_clone().map(|s| {
+ let evented_fd = unsafe { EventedFd::new(s.as_raw_fd()) };
+ TcpStream {
+ io: DontDrop::new(s),
+ evented_fd: evented_fd,
+ }
+ })
+ }
+
+ pub fn shutdown(&self, how: net::Shutdown) -> io::Result<()> {
+ self.io.shutdown(how)
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.io.set_nodelay(nodelay)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.io.nodelay()
+ }
+
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.io.set_recv_buffer_size(size)
+ }
+
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.io.recv_buffer_size()
+ }
+
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.io.set_send_buffer_size(size)
+ }
+
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.io.send_buffer_size()
+ }
+
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.io.set_keepalive(keepalive)
+ }
+
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.io.keepalive()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.io.set_linger(dur)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.io.linger()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.peek(buf)
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice_mut(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::readv(self.io.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::writev(self.io.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.inner_ref().read(buf)
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.io.inner_ref().write(buf)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.io.inner_ref().flush()
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.evented_fd.deregister(poll)
+ }
+}
+
+#[derive(Debug)]
+pub struct TcpListener {
+ io: DontDrop<net::TcpListener>,
+ evented_fd: EventedFd,
+}
+
+impl TcpListener {
+ pub fn new(inner: net::TcpListener) -> io::Result<TcpListener> {
+ set_nonblock(inner.as_raw_fd())?;
+
+ let evented_fd = unsafe { EventedFd::new(inner.as_raw_fd()) };
+
+ Ok(TcpListener {
+ io: DontDrop::new(inner),
+ evented_fd: evented_fd,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.io.try_clone().map(|io| {
+ let evented_fd = unsafe { EventedFd::new(io.as_raw_fd()) };
+ TcpListener {
+ io: DontDrop::new(io),
+ evented_fd: evented_fd,
+ }
+ })
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ self.io.accept().and_then(|(s, a)| {
+ set_nonblock(s.as_raw_fd())?;
+ let evented_fd = unsafe { EventedFd::new(s.as_raw_fd()) };
+ return Ok((TcpStream {
+ io: DontDrop::new(s),
+ evented_fd: evented_fd,
+ }, a))
+ })
+ }
+
+ #[allow(deprecated)]
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ #[allow(deprecated)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+}
+
+impl Evented for TcpListener {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.evented_fd.deregister(poll)
+ }
+}
+
+#[derive(Debug)]
+pub struct UdpSocket {
+ io: DontDrop<net::UdpSocket>,
+ evented_fd: EventedFd,
+}
+
+impl UdpSocket {
+ pub fn new(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ set_nonblock(socket.as_raw_fd())?;
+
+ let evented_fd = unsafe { EventedFd::new(socket.as_raw_fd()) };
+
+ Ok(UdpSocket {
+ io: DontDrop::new(socket),
+ evented_fd: evented_fd,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.io.try_clone().and_then(|io| {
+ UdpSocket::new(io)
+ })
+ }
+
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+ self.io.send_to(buf, target)
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unsafe { recv_from(self.io.as_raw_fd(), buf) }
+ }
+
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.io.send(buf)
+ }
+
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.recv(buf)
+ }
+
+ pub fn connect(&self, addr: SocketAddr)
+ -> io::Result<()> {
+ self.io.connect(addr)
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.io.broadcast()
+ }
+
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.io.set_broadcast(on)
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v4()
+ }
+
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v4(on)
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.io.multicast_ttl_v4()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_multicast_ttl_v4(ttl)
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v6()
+ }
+
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v6(on)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.join_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.join_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.leave_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.leave_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.evented_fd.deregister(poll)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs
new file mode 100644
index 0000000000..97854f8c07
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs
@@ -0,0 +1,181 @@
+use event_imp::{Ready, ready_as_usize, ready_from_usize};
+pub use zircon_sys::{
+ zx_signals_t,
+ ZX_OBJECT_READABLE,
+ ZX_OBJECT_WRITABLE,
+};
+use std::ops;
+
+// The following impls are valid because Fuchsia and mio both represent
+// "readable" as `1 << 0` and "writable" as `1 << 2`.
+// We define this assertion here and call it from `Selector::new`,
+// since `Selector:;new` is guaranteed to be called during a standard mio runtime,
+// unlike the functions in this file.
+#[inline]
+pub fn assert_fuchsia_ready_repr() {
+ debug_assert!(
+ ZX_OBJECT_READABLE.bits() as usize == ready_as_usize(Ready::readable()),
+ "Zircon ZX_OBJECT_READABLE should have the same repr as Ready::readable()"
+ );
+ debug_assert!(
+ ZX_OBJECT_WRITABLE.bits() as usize == ready_as_usize(Ready::writable()),
+ "Zircon ZX_OBJECT_WRITABLE should have the same repr as Ready::writable()"
+ );
+}
+
+/// Fuchsia specific extensions to `Ready`
+///
+/// Provides additional readiness event kinds that are available on Fuchsia.
+///
+/// Conversion traits are implemented between `Ready` and `FuchsiaReady`.
+///
+/// For high level documentation on polling and readiness, see [`Poll`].
+///
+/// [`Poll`]: struct.Poll.html
+#[derive(Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct FuchsiaReady(Ready);
+
+impl FuchsiaReady {
+ /// Returns the `FuchsiaReady` as raw zircon signals.
+ /// This function is just a more explicit, non-generic version of
+ /// `FuchsiaReady::into`.
+ #[inline]
+ pub fn into_zx_signals(self) -> zx_signals_t {
+ zx_signals_t::from_bits_truncate(ready_as_usize(self.0) as u32)
+ }
+}
+
+impl Into<zx_signals_t> for FuchsiaReady {
+ #[inline]
+ fn into(self) -> zx_signals_t {
+ self.into_zx_signals()
+ }
+}
+
+impl From<zx_signals_t> for FuchsiaReady {
+ #[inline]
+ fn from(src: zx_signals_t) -> Self {
+ FuchsiaReady(src.into())
+ }
+}
+
+impl From<zx_signals_t> for Ready {
+ #[inline]
+ fn from(src: zx_signals_t) -> Self {
+ ready_from_usize(src.bits() as usize)
+ }
+}
+
+impl From<Ready> for FuchsiaReady {
+ #[inline]
+ fn from(src: Ready) -> FuchsiaReady {
+ FuchsiaReady(src)
+ }
+}
+
+impl From<FuchsiaReady> for Ready {
+ #[inline]
+ fn from(src: FuchsiaReady) -> Ready {
+ src.0
+ }
+}
+
+impl ops::Deref for FuchsiaReady {
+ type Target = Ready;
+
+ #[inline]
+ fn deref(&self) -> &Ready {
+ &self.0
+ }
+}
+
+impl ops::DerefMut for FuchsiaReady {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Ready {
+ &mut self.0
+ }
+}
+
+impl ops::BitOr for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitor(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 | other.0).into()
+ }
+}
+
+impl ops::BitXor for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitxor(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 ^ other.0).into()
+ }
+}
+
+impl ops::BitAnd for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitand(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 & other.0).into()
+ }
+}
+
+impl ops::Sub for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn sub(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 & !other.0).into()
+ }
+}
+
+#[deprecated(since = "0.6.10", note = "removed")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn not(self) -> FuchsiaReady {
+ (!self.0).into()
+ }
+}
+
+impl ops::BitOr<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitor(self, other: zx_signals_t) -> FuchsiaReady {
+ self | FuchsiaReady::from(other)
+ }
+}
+
+impl ops::BitXor<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitxor(self, other: zx_signals_t) -> FuchsiaReady {
+ self ^ FuchsiaReady::from(other)
+ }
+}
+
+impl ops::BitAnd<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitand(self, other: zx_signals_t) -> FuchsiaReady {
+ self & FuchsiaReady::from(other)
+ }
+}
+
+impl ops::Sub<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn sub(self, other: zx_signals_t) -> FuchsiaReady {
+ self - FuchsiaReady::from(other)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs
new file mode 100644
index 0000000000..27226ac5ff
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs
@@ -0,0 +1,353 @@
+use {io, Event, PollOpt, Ready, Token};
+use sys::fuchsia::{
+ assert_fuchsia_ready_repr,
+ epoll_event_to_ready,
+ poll_opts_to_wait_async,
+ EventedFd,
+ EventedFdInner,
+ FuchsiaReady,
+};
+use zircon;
+use zircon::AsHandleRef;
+use zircon_sys::zx_handle_t;
+use std::collections::hash_map;
+use std::fmt;
+use std::mem;
+use std::sync::atomic::{AtomicBool, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::sync::{Arc, Mutex, Weak};
+use std::time::Duration;
+use sys;
+
+/// The kind of registration-- file descriptor or handle.
+///
+/// The last bit of a token is set to indicate the type of the registration.
+#[derive(Copy, Clone, Eq, PartialEq)]
+enum RegType {
+ Fd,
+ Handle,
+}
+
+fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
+ let key = token.0 as u64;
+ let msb = 1u64 << 63;
+ if (key & msb) != 0 {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Most-significant bit of token must remain unset."));
+ }
+
+ Ok(match reg_type {
+ RegType::Fd => key,
+ RegType::Handle => key | msb,
+ })
+}
+
+fn token_and_type_from_key(key: u64) -> (Token, RegType) {
+ let msb = 1u64 << 63;
+ (
+ Token((key & !msb) as usize),
+ if (key & msb) == 0 {
+ RegType::Fd
+ } else {
+ RegType::Handle
+ }
+ )
+}
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+pub struct Selector {
+ id: usize,
+
+ /// Zircon object on which the handles have been registered, and on which events occur
+ port: Arc<zircon::Port>,
+
+ /// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
+ /// used to prevent having to lock `tokens_to_rereg` when it is empty.
+ has_tokens_to_rereg: AtomicBool,
+
+ /// List of `Token`s corresponding to registrations that need to be reregistered before the
+ /// next `port::wait`. This is necessary to provide level-triggered behavior for
+ /// `Async::repeating` registrations.
+ ///
+ /// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
+ /// that it will be reregistered before the next `port::wait` call, making `port::wait` return
+ /// immediately if the signal was high during the reregistration.
+ ///
+ /// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
+ /// `token_to_fd`.
+ tokens_to_rereg: Mutex<Vec<Token>>,
+
+ /// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
+ /// file handle, its associated `fdio` object, and its current registration.
+ token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
+ // compatible with Ready.
+ assert_fuchsia_ready_repr();
+
+ let port = Arc::new(
+ zircon::Port::create(zircon::PortOpts::Default)?
+ );
+
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+
+ let has_tokens_to_rereg = AtomicBool::new(false);
+ let tokens_to_rereg = Mutex::new(Vec::new());
+ let token_to_fd = Mutex::new(hash_map::HashMap::new());
+
+ Ok(Selector {
+ id: id,
+ port: port,
+ has_tokens_to_rereg: has_tokens_to_rereg,
+ tokens_to_rereg: tokens_to_rereg,
+ token_to_fd: token_to_fd,
+ })
+ }
+
+ pub fn id(&self) -> usize {
+ self.id
+ }
+
+ /// Returns a reference to the underlying port `Arc`.
+ pub fn port(&self) -> &Arc<zircon::Port> { &self.port }
+
+ /// Reregisters all registrations pointed to by the `tokens_to_rereg` list
+ /// if `has_tokens_to_rereg`.
+ fn reregister_handles(&self) -> io::Result<()> {
+ // We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
+ // written before the store using `Ordering::Release`.
+ if self.has_tokens_to_rereg.load(Ordering::Acquire) {
+ let mut tokens = self.tokens_to_rereg.lock().unwrap();
+ let token_to_fd = self.token_to_fd.lock().unwrap();
+ for token in tokens.drain(0..) {
+ if let Some(eventedfd) = token_to_fd.get(&token)
+ .and_then(|h| h.upgrade()) {
+ eventedfd.rereg_for_level(&self.port);
+ }
+ }
+ self.has_tokens_to_rereg.store(false, Ordering::Release);
+ }
+ Ok(())
+ }
+
+ pub fn select(&self,
+ evts: &mut Events,
+ _awakener: Token,
+ timeout: Option<Duration>) -> io::Result<bool>
+ {
+ evts.clear();
+
+ self.reregister_handles()?;
+
+ let deadline = match timeout {
+ Some(duration) => {
+ let nanos = duration.as_secs().saturating_mul(1_000_000_000)
+ .saturating_add(duration.subsec_nanos() as u64);
+
+ zircon::deadline_after(nanos)
+ }
+ None => zircon::ZX_TIME_INFINITE,
+ };
+
+ let packet = match self.port.wait(deadline) {
+ Ok(packet) => packet,
+ Err(zircon::Status::ErrTimedOut) => return Ok(false),
+ Err(e) => Err(e)?,
+ };
+
+ let observed_signals = match packet.contents() {
+ zircon::PacketContents::SignalOne(signal_packet) => {
+ signal_packet.observed()
+ }
+ zircon::PacketContents::SignalRep(signal_packet) => {
+ signal_packet.observed()
+ }
+ zircon::PacketContents::User(_user_packet) => {
+ // User packets are only ever sent by an Awakener
+ return Ok(true);
+ }
+ };
+
+ let key = packet.key();
+ let (token, reg_type) = token_and_type_from_key(key);
+
+ match reg_type {
+ RegType::Handle => {
+ // We can return immediately-- no lookup or registration necessary.
+ evts.events.push(Event::new(Ready::from(observed_signals), token));
+ Ok(false)
+ },
+ RegType::Fd => {
+ // Convert the signals to epoll events using __fdio_wait_end,
+ // and add to reregistration list if necessary.
+ let events: u32;
+ {
+ let handle = if let Some(handle) =
+ self.token_to_fd.lock().unwrap()
+ .get(&token)
+ .and_then(|h| h.upgrade()) {
+ handle
+ } else {
+ // This handle is apparently in the process of removal.
+ // It has been removed from the list, but port_cancel has not been called.
+ return Ok(false);
+ };
+
+ events = unsafe {
+ let mut events: u32 = mem::uninitialized();
+ sys::fuchsia::sys::__fdio_wait_end(handle.fdio(), observed_signals, &mut events);
+ events
+ };
+
+ // If necessary, queue to be reregistered before next port_await
+ let needs_to_rereg = {
+ let registration_lock = handle.registration().lock().unwrap();
+
+ registration_lock
+ .as_ref()
+ .and_then(|r| r.rereg_signals())
+ .is_some()
+ };
+
+ if needs_to_rereg {
+ let mut tokens_to_rereg_lock = self.tokens_to_rereg.lock().unwrap();
+ tokens_to_rereg_lock.push(token);
+ // We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
+ // written before the store.
+ self.has_tokens_to_rereg.store(true, Ordering::Release);
+ }
+ }
+
+ evts.events.push(Event::new(epoll_event_to_ready(events), token));
+ Ok(false)
+ },
+ }
+ }
+
+ /// Register event interests for the given IO handle with the OS
+ pub fn register_fd(&self,
+ handle: &zircon::Handle,
+ fd: &EventedFd,
+ token: Token,
+ signals: zircon::Signals,
+ poll_opts: PollOpt) -> io::Result<()>
+ {
+ {
+ let mut token_to_fd = self.token_to_fd.lock().unwrap();
+ match token_to_fd.entry(token) {
+ hash_map::Entry::Occupied(_) =>
+ return Err(io::Error::new(io::ErrorKind::AlreadyExists,
+ "Attempted to register a filedescriptor on an existing token.")),
+ hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
+ };
+ }
+
+ let wait_async_opts = poll_opts_to_wait_async(poll_opts);
+
+ let wait_res = handle.wait_async_handle(&self.port, token.0 as u64, signals, wait_async_opts);
+
+ if wait_res.is_err() {
+ self.token_to_fd.lock().unwrap().remove(&token);
+ }
+
+ Ok(wait_res?)
+ }
+
+ /// Deregister event interests for the given IO handle with the OS
+ pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
+ self.token_to_fd.lock().unwrap().remove(&token);
+
+ // We ignore NotFound errors since oneshots are automatically deregistered,
+ // but mio will attempt to deregister them manually.
+ self.port.cancel(&*handle, token.0 as u64)
+ .map_err(io::Error::from)
+ .or_else(|e| if e.kind() == io::ErrorKind::NotFound {
+ Ok(())
+ } else {
+ Err(e)
+ })
+ }
+
+ pub fn register_handle(&self,
+ handle: zx_handle_t,
+ token: Token,
+ interests: Ready,
+ poll_opts: PollOpt) -> io::Result<()>
+ {
+ if poll_opts.is_level() && !poll_opts.is_oneshot() {
+ return Err(io::Error::new(io::ErrorKind::InvalidInput,
+ "Repeated level-triggered events are not supported on Fuchsia handles."));
+ }
+
+ let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
+
+ let res = temp_handle.wait_async_handle(
+ &self.port,
+ key_from_token_and_type(token, RegType::Handle)?,
+ FuchsiaReady::from(interests).into_zx_signals(),
+ poll_opts_to_wait_async(poll_opts));
+
+ mem::forget(temp_handle);
+
+ Ok(res?)
+ }
+
+
+ pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()>
+ {
+ let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
+ let res = self.port.cancel(&temp_handle, key_from_token_and_type(token, RegType::Handle)?);
+
+ mem::forget(temp_handle);
+
+ Ok(res?)
+ }
+}
+
+pub struct Events {
+ events: Vec<Event>
+}
+
+impl Events {
+ pub fn with_capacity(_u: usize) -> Events {
+ // The Fuchsia selector only handles one event at a time,
+ // so we ignore the default capacity and set it to one.
+ Events { events: Vec::with_capacity(1) }
+ }
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).map(|e| *e)
+ }
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(event)
+ }
+ pub fn clear(&mut self) {
+ self.events.events.drain(0..);
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Events")
+ .field("len", &self.len())
+ .finish()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/mod.rs b/third_party/rust/mio-0.6.23/src/sys/mod.rs
new file mode 100644
index 0000000000..8a1705db6c
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/mod.rs
@@ -0,0 +1,56 @@
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub use self::unix::{
+ Awakener,
+ EventedFd,
+ Events,
+ Io,
+ Selector,
+ TcpStream,
+ TcpListener,
+ UdpSocket,
+ pipe,
+ set_nonblock,
+};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub use self::unix::READY_ALL;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+#[cfg(feature = "with-deprecated")]
+pub use self::unix::UnixSocket;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub mod unix;
+
+#[cfg(windows)]
+pub use self::windows::{
+ Awakener,
+ Events,
+ Selector,
+ TcpStream,
+ TcpListener,
+ UdpSocket,
+ Overlapped,
+ Binding,
+};
+
+#[cfg(windows)]
+mod windows;
+
+#[cfg(target_os = "fuchsia")]
+pub use self::fuchsia::{
+ Awakener,
+ Events,
+ EventedHandle,
+ Selector,
+ TcpStream,
+ TcpListener,
+ UdpSocket,
+ set_nonblock,
+};
+
+#[cfg(target_os = "fuchsia")]
+pub mod fuchsia;
+
+#[cfg(not(all(unix, not(target_os = "fuchsia"))))]
+pub const READY_ALL: usize = 0;
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs b/third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs
new file mode 100644
index 0000000000..9cc367a78c
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs
@@ -0,0 +1,74 @@
+pub use self::pipe::Awakener;
+
+/// Default awakener backed by a pipe
+mod pipe {
+ use sys::unix;
+ use {io, Ready, Poll, PollOpt, Token};
+ use event::Evented;
+ use std::io::{Read, Write};
+
+ /*
+ *
+ * ===== Awakener =====
+ *
+ */
+
+ pub struct Awakener {
+ reader: unix::Io,
+ writer: unix::Io,
+ }
+
+ impl Awakener {
+ pub fn new() -> io::Result<Awakener> {
+ let (rd, wr) = unix::pipe()?;
+
+ Ok(Awakener {
+ reader: rd,
+ writer: wr,
+ })
+ }
+
+ pub fn wakeup(&self) -> io::Result<()> {
+ match (&self.writer).write(&[1]) {
+ Ok(_) => Ok(()),
+ Err(e) => {
+ if e.kind() == io::ErrorKind::WouldBlock {
+ Ok(())
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+
+ pub fn cleanup(&self) {
+ let mut buf = [0; 128];
+
+ loop {
+ // Consume data until all bytes are purged
+ match (&self.reader).read(&mut buf) {
+ Ok(i) if i > 0 => {},
+ _ => return,
+ }
+ }
+ }
+
+ fn reader(&self) -> &unix::Io {
+ &self.reader
+ }
+ }
+
+ impl Evented for Awakener {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.reader().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.reader().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.reader().deregister(poll)
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs b/third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs
new file mode 100644
index 0000000000..e88c595fc9
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs
@@ -0,0 +1,47 @@
+use std::marker;
+use std::mem;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use libc;
+
+macro_rules! dlsym {
+ (fn $name:ident($($t:ty),*) -> $ret:ty) => (
+ #[allow(bad_style)]
+ static $name: ::sys::unix::dlsym::DlSym<unsafe extern fn($($t),*) -> $ret> =
+ ::sys::unix::dlsym::DlSym {
+ name: concat!(stringify!($name), "\0"),
+ addr: ::std::sync::atomic::ATOMIC_USIZE_INIT,
+ _marker: ::std::marker::PhantomData,
+ };
+ )
+}
+
+pub struct DlSym<F> {
+ pub name: &'static str,
+ pub addr: AtomicUsize,
+ pub _marker: marker::PhantomData<F>,
+}
+
+impl<F> DlSym<F> {
+ pub fn get(&self) -> Option<&F> {
+ assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>());
+ unsafe {
+ if self.addr.load(Ordering::SeqCst) == 0 {
+ self.addr.store(fetch(self.name), Ordering::SeqCst);
+ }
+ if self.addr.load(Ordering::SeqCst) == 1 {
+ None
+ } else {
+ mem::transmute::<&AtomicUsize, Option<&F>>(&self.addr)
+ }
+ }
+ }
+}
+
+unsafe fn fetch(name: &str) -> usize {
+ assert_eq!(name.as_bytes()[name.len() - 1], 0);
+ match libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr() as *const _) as usize {
+ 0 => 1,
+ n => n,
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs b/third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs
new file mode 100644
index 0000000000..0da787bc95
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs
@@ -0,0 +1,268 @@
+#![allow(deprecated)]
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::RawFd;
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::time::Duration;
+use std::{cmp, i32};
+
+use libc::{self, c_int};
+use libc::{EPOLLERR, EPOLLHUP, EPOLLONESHOT};
+use libc::{EPOLLET, EPOLLOUT, EPOLLIN, EPOLLPRI};
+
+use {io, Ready, PollOpt, Token};
+use event_imp::Event;
+use sys::unix::{cvt, UnixReady};
+use sys::unix::io::set_cloexec;
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+#[derive(Debug)]
+pub struct Selector {
+ id: usize,
+ epfd: RawFd,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ let epfd = unsafe {
+ // Emulate `epoll_create` by using `epoll_create1` if it's available
+ // and otherwise falling back to `epoll_create` followed by a call to
+ // set the CLOEXEC flag.
+ dlsym!(fn epoll_create1(c_int) -> c_int);
+
+ match epoll_create1.get() {
+ Some(epoll_create1_fn) => {
+ cvt(epoll_create1_fn(libc::EPOLL_CLOEXEC))?
+ }
+ None => {
+ let fd = cvt(libc::epoll_create(1024))?;
+ drop(set_cloexec(fd));
+ fd
+ }
+ }
+ };
+
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+
+ Ok(Selector {
+ id: id,
+ epfd: epfd,
+ })
+ }
+
+ pub fn id(&self) -> usize {
+ self.id
+ }
+
+ /// Wait for events from the OS
+ pub fn select(&self, evts: &mut Events, awakener: Token, timeout: Option<Duration>) -> io::Result<bool> {
+ // A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ
+ // (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits
+ // architectures. The magic number is the same constant used by libuv.
+ #[cfg(target_pointer_width = "32")]
+ const MAX_SAFE_TIMEOUT: u64 = 1789569;
+ #[cfg(not(target_pointer_width = "32"))]
+ const MAX_SAFE_TIMEOUT: u64 = c_int::max_value() as u64;
+
+ let timeout_ms = timeout
+ .map(|to| cmp::min(millis(to), MAX_SAFE_TIMEOUT) as c_int)
+ .unwrap_or(-1);
+
+ // Wait for epoll events for at most timeout_ms milliseconds
+ evts.clear();
+ unsafe {
+ let cnt = cvt(libc::epoll_wait(self.epfd,
+ evts.events.as_mut_ptr(),
+ evts.events.capacity() as i32,
+ timeout_ms))?;
+ let cnt = cnt as usize;
+ evts.events.set_len(cnt);
+
+ for i in 0..cnt {
+ if evts.events[i].u64 as usize == awakener.into() {
+ evts.events.remove(i);
+ return Ok(true);
+ }
+ }
+ }
+
+ Ok(false)
+ }
+
+ /// Register event interests for the given IO handle with the OS
+ pub fn register(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut info = libc::epoll_event {
+ events: ioevent_to_epoll(interests, opts),
+ u64: usize::from(token) as u64
+ };
+
+ unsafe {
+ cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_ADD, fd, &mut info))?;
+ Ok(())
+ }
+ }
+
+ /// Register event interests for the given IO handle with the OS
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut info = libc::epoll_event {
+ events: ioevent_to_epoll(interests, opts),
+ u64: usize::from(token) as u64
+ };
+
+ unsafe {
+ cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_MOD, fd, &mut info))?;
+ Ok(())
+ }
+ }
+
+ /// Deregister event interests for the given IO handle with the OS
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ // The &info argument should be ignored by the system,
+ // but linux < 2.6.9 required it to be not null.
+ // For compatibility, we provide a dummy EpollEvent.
+ let mut info = libc::epoll_event {
+ events: 0,
+ u64: 0,
+ };
+
+ unsafe {
+ cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_DEL, fd, &mut info))?;
+ Ok(())
+ }
+ }
+}
+
+fn ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32 {
+ let mut kind = 0;
+
+ if interest.is_readable() {
+ kind |= EPOLLIN;
+ }
+
+ if interest.is_writable() {
+ kind |= EPOLLOUT;
+ }
+
+ if UnixReady::from(interest).is_priority() {
+ kind |= EPOLLPRI;
+ }
+
+ if opts.is_edge() {
+ kind |= EPOLLET;
+ }
+
+ if opts.is_oneshot() {
+ kind |= EPOLLONESHOT;
+ }
+
+ if opts.is_level() {
+ kind &= !EPOLLET;
+ }
+
+ kind as u32
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.epfd
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ unsafe {
+ let _ = libc::close(self.epfd);
+ }
+ }
+}
+
+pub struct Events {
+ events: Vec<libc::epoll_event>,
+}
+
+impl Events {
+ pub fn with_capacity(u: usize) -> Events {
+ Events {
+ events: Vec::with_capacity(u)
+ }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ #[inline]
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).map(|event| {
+ let epoll = event.events as c_int;
+ let mut kind = Ready::empty();
+
+ if (epoll & EPOLLIN) != 0 {
+ kind = kind | Ready::readable();
+ }
+
+ if (epoll & EPOLLPRI) != 0 {
+ kind = kind | Ready::readable() | UnixReady::priority();
+ }
+
+ if (epoll & EPOLLOUT) != 0 {
+ kind = kind | Ready::writable();
+ }
+
+ // EPOLLHUP - Usually means a socket error happened
+ if (epoll & EPOLLERR) != 0 {
+ kind = kind | UnixReady::error();
+ }
+
+ if (epoll & EPOLLHUP) != 0 {
+ kind = kind | UnixReady::hup();
+ }
+
+ let token = self.events[idx].u64;
+
+ Event::new(kind, Token(token as usize))
+ })
+ }
+
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(libc::epoll_event {
+ events: ioevent_to_epoll(event.readiness(), PollOpt::empty()),
+ u64: usize::from(event.token()) as u64
+ });
+ }
+
+ pub fn clear(&mut self) {
+ unsafe { self.events.set_len(0); }
+ }
+}
+
+const NANOS_PER_MILLI: u32 = 1_000_000;
+const MILLIS_PER_SEC: u64 = 1_000;
+
+/// Convert a `Duration` to milliseconds, rounding up and saturating at
+/// `u64::MAX`.
+///
+/// The saturating is fine because `u64::MAX` milliseconds are still many
+/// million years.
+pub fn millis(duration: Duration) -> u64 {
+ // Round up.
+ let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
+ duration.as_secs().saturating_mul(MILLIS_PER_SEC).saturating_add(millis as u64)
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs b/third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs
new file mode 100644
index 0000000000..72586f6652
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs
@@ -0,0 +1,107 @@
+use {io, poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use std::os::unix::io::RawFd;
+
+/*
+ *
+ * ===== EventedFd =====
+ *
+ */
+
+#[derive(Debug)]
+
+/// Adapter for [`RawFd`] providing an [`Evented`] implementation.
+///
+/// `EventedFd` enables registering any type with an FD with [`Poll`].
+///
+/// While only implementations for TCP and UDP are provided, Mio supports
+/// registering any FD that can be registered with the underlying OS selector.
+/// `EventedFd` provides the necessary bridge.
+///
+/// Note that `EventedFd` takes a `&RawFd`. This is because `EventedFd` **does
+/// not** take ownership of the FD. Specifically, it will not manage any
+/// lifecycle related operations, such as closing the FD on drop. It is expected
+/// that the `EventedFd` is constructed right before a call to
+/// [`Poll::register`]. See the examples for more detail.
+///
+/// # Examples
+///
+/// Basic usage
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::unix::EventedFd;
+///
+/// use std::os::unix::io::AsRawFd;
+/// use std::net::TcpListener;
+///
+/// // Bind a std listener
+/// let listener = TcpListener::bind("127.0.0.1:0")?;
+///
+/// let poll = Poll::new()?;
+///
+/// // Register the listener
+/// poll.register(&EventedFd(&listener.as_raw_fd()),
+/// Token(0), Ready::readable(), PollOpt::edge())?;
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// Implementing [`Evented`] for a custom type backed by a [`RawFd`].
+///
+/// ```
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+/// use mio::unix::EventedFd;
+///
+/// use std::os::unix::io::RawFd;
+/// use std::io;
+///
+/// pub struct MyIo {
+/// fd: RawFd,
+/// }
+///
+/// impl Evented for MyIo {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// EventedFd(&self.fd).register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// EventedFd(&self.fd).reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// EventedFd(&self.fd).deregister(poll)
+/// }
+/// }
+/// ```
+///
+/// [`RawFd`]: https://doc.rust-lang.org/std/os/unix/io/type.RawFd.html
+/// [`Evented`]: ../event/trait.Evented.html
+/// [`Poll`]: ../struct.Poll.html
+/// [`Poll::register`]: ../struct.Poll.html#method.register
+pub struct EventedFd<'a>(pub &'a RawFd);
+
+impl<'a> Evented for EventedFd<'a> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ poll::selector(poll).register(*self.0, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ poll::selector(poll).reregister(*self.0, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ poll::selector(poll).deregister(*self.0)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/io.rs b/third_party/rust/mio-0.6.23/src/sys/unix/io.rs
new file mode 100644
index 0000000000..47a3a70d1f
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/io.rs
@@ -0,0 +1,107 @@
+use std::fs::File;
+use std::io::{Read, Write};
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+use libc;
+
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use unix::EventedFd;
+use sys::unix::cvt;
+
+pub fn set_nonblock(fd: libc::c_int) -> io::Result<()> {
+ unsafe {
+ let flags = libc::fcntl(fd, libc::F_GETFL);
+ cvt(libc::fcntl(fd, libc::F_SETFL, flags | libc::O_NONBLOCK)).map(|_|())
+ }
+}
+
+pub fn set_cloexec(fd: libc::c_int) -> io::Result<()> {
+ unsafe {
+ let flags = libc::fcntl(fd, libc::F_GETFD);
+ cvt(libc::fcntl(fd, libc::F_SETFD, flags | libc::FD_CLOEXEC)).map(|_| ())
+ }
+}
+
+/*
+ *
+ * ===== Basic IO type =====
+ *
+ */
+
+/// Manages a FD
+#[derive(Debug)]
+pub struct Io {
+ fd: File,
+}
+
+impl Io {
+ /// Try to clone the FD
+ pub fn try_clone(&self) -> io::Result<Io> {
+ Ok(Io { fd: self.fd.try_clone()? })
+ }
+}
+
+impl FromRawFd for Io {
+ unsafe fn from_raw_fd(fd: RawFd) -> Io {
+ Io { fd: File::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for Io {
+ fn into_raw_fd(self) -> RawFd {
+ self.fd.into_raw_fd()
+ }
+}
+
+impl AsRawFd for Io {
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd.as_raw_fd()
+ }
+}
+
+impl Evented for Io {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl Read for Io {
+ fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
+ (&self.fd).read(dst)
+ }
+}
+
+impl<'a> Read for &'a Io {
+ fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
+ (&self.fd).read(dst)
+ }
+}
+
+impl Write for Io {
+ fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+ (&self.fd).write(src)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.fd).flush()
+ }
+}
+
+impl<'a> Write for &'a Io {
+ fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+ (&self.fd).write(src)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.fd).flush()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs b/third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs
new file mode 100644
index 0000000000..59c70e1e18
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs
@@ -0,0 +1,360 @@
+use std::{cmp, fmt, ptr};
+#[cfg(not(target_os = "netbsd"))]
+use std::os::raw::{c_int, c_short};
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::RawFd;
+use std::collections::HashMap;
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::time::Duration;
+
+use libc::{self, time_t};
+
+use {io, Ready, PollOpt, Token};
+use event_imp::{self as event, Event};
+use sys::unix::{cvt, UnixReady};
+use sys::unix::io::set_cloexec;
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+#[cfg(not(target_os = "netbsd"))]
+type Filter = c_short;
+#[cfg(not(target_os = "netbsd"))]
+type UData = *mut ::libc::c_void;
+#[cfg(not(target_os = "netbsd"))]
+type Count = c_int;
+
+#[cfg(target_os = "netbsd")]
+type Filter = u32;
+#[cfg(target_os = "netbsd")]
+type UData = ::libc::intptr_t;
+#[cfg(target_os = "netbsd")]
+type Count = usize;
+
+macro_rules! kevent {
+ ($id: expr, $filter: expr, $flags: expr, $data: expr) => {
+ libc::kevent {
+ ident: $id as ::libc::uintptr_t,
+ filter: $filter as Filter,
+ flags: $flags,
+ fflags: 0,
+ data: 0,
+ udata: $data as UData,
+ }
+ }
+}
+
+pub struct Selector {
+ id: usize,
+ kq: RawFd,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+ let kq = unsafe { cvt(libc::kqueue())? };
+ drop(set_cloexec(kq));
+
+ Ok(Selector {
+ id,
+ kq,
+ })
+ }
+
+ pub fn id(&self) -> usize {
+ self.id
+ }
+
+ pub fn select(&self, evts: &mut Events, awakener: Token, timeout: Option<Duration>) -> io::Result<bool> {
+ let timeout = timeout.map(|to| {
+ libc::timespec {
+ tv_sec: cmp::min(to.as_secs(), time_t::max_value() as u64) as time_t,
+ // `Duration::subsec_nanos` is guaranteed to be less than one
+ // billion (the number of nanoseconds in a second), making the
+ // cast to i32 safe. The cast itself is needed for platforms
+ // where C's long is only 32 bits.
+ tv_nsec: libc::c_long::from(to.subsec_nanos() as i32),
+ }
+ });
+ let timeout = timeout.as_ref().map(|s| s as *const _).unwrap_or(ptr::null_mut());
+
+ evts.clear();
+ unsafe {
+ let cnt = cvt(libc::kevent(self.kq,
+ ptr::null(),
+ 0,
+ evts.sys_events.0.as_mut_ptr(),
+ evts.sys_events.0.capacity() as Count,
+ timeout))?;
+ evts.sys_events.0.set_len(cnt as usize);
+ Ok(evts.coalesce(awakener))
+ }
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ trace!("registering; token={:?}; interests={:?}", token, interests);
+
+ let flags = if opts.contains(PollOpt::edge()) { libc::EV_CLEAR } else { 0 } |
+ if opts.contains(PollOpt::oneshot()) { libc::EV_ONESHOT } else { 0 } |
+ libc::EV_RECEIPT;
+
+ unsafe {
+ let r = if interests.contains(Ready::readable()) { libc::EV_ADD } else { libc::EV_DELETE };
+ let w = if interests.contains(Ready::writable()) { libc::EV_ADD } else { libc::EV_DELETE };
+ let mut changes = [
+ kevent!(fd, libc::EVFILT_READ, flags | r, usize::from(token)),
+ kevent!(fd, libc::EVFILT_WRITE, flags | w, usize::from(token)),
+ ];
+
+ cvt(libc::kevent(self.kq,
+ changes.as_ptr(),
+ changes.len() as Count,
+ changes.as_mut_ptr(),
+ changes.len() as Count,
+ ::std::ptr::null()))?;
+
+ for change in changes.iter() {
+ debug_assert_eq!(change.flags & libc::EV_ERROR, libc::EV_ERROR);
+
+ // Test to see if an error happened
+ if change.data == 0 {
+ continue
+ }
+
+ // Older versions of OSX (10.11 and 10.10 have been witnessed)
+ // can return EPIPE when registering a pipe file descriptor
+ // where the other end has already disappeared. For example code
+ // that creates a pipe, closes a file descriptor, and then
+ // registers the other end will see an EPIPE returned from
+ // `register`.
+ //
+ // It also turns out that kevent will still report events on the
+ // file descriptor, telling us that it's readable/hup at least
+ // after we've done this registration. As a result we just
+ // ignore `EPIPE` here instead of propagating it.
+ //
+ // More info can be found at carllerche/mio#582
+ if change.data as i32 == libc::EPIPE &&
+ change.filter == libc::EVFILT_WRITE as Filter {
+ continue
+ }
+
+ // ignore ENOENT error for EV_DELETE
+ let orig_flags = if change.filter == libc::EVFILT_READ as Filter { r } else { w };
+ if change.data as i32 == libc::ENOENT && orig_flags & libc::EV_DELETE != 0 {
+ continue
+ }
+
+ return Err(::std::io::Error::from_raw_os_error(change.data as i32));
+ }
+ Ok(())
+ }
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ // Just need to call register here since EV_ADD is a mod if already
+ // registered
+ self.register(fd, token, interests, opts)
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ unsafe {
+ // EV_RECEIPT is a nice way to apply changes and get back per-event results while not
+ // draining the actual changes.
+ let filter = libc::EV_DELETE | libc::EV_RECEIPT;
+#[cfg(not(target_os = "netbsd"))]
+ let mut changes = [
+ kevent!(fd, libc::EVFILT_READ, filter, ptr::null_mut()),
+ kevent!(fd, libc::EVFILT_WRITE, filter, ptr::null_mut()),
+ ];
+
+#[cfg(target_os = "netbsd")]
+ let mut changes = [
+ kevent!(fd, libc::EVFILT_READ, filter, 0),
+ kevent!(fd, libc::EVFILT_WRITE, filter, 0),
+ ];
+
+ cvt(libc::kevent(self.kq,
+ changes.as_ptr(),
+ changes.len() as Count,
+ changes.as_mut_ptr(),
+ changes.len() as Count,
+ ::std::ptr::null())).map(|_| ())?;
+
+ if changes[0].data as i32 == libc::ENOENT && changes[1].data as i32 == libc::ENOENT {
+ return Err(::std::io::Error::from_raw_os_error(changes[0].data as i32));
+ }
+ for change in changes.iter() {
+ debug_assert_eq!(libc::EV_ERROR & change.flags, libc::EV_ERROR);
+ if change.data != 0 && change.data as i32 != libc::ENOENT {
+ return Err(::std::io::Error::from_raw_os_error(changes[0].data as i32));
+ }
+ }
+ Ok(())
+ }
+ }
+}
+
+impl fmt::Debug for Selector {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Selector")
+ .field("id", &self.id)
+ .field("kq", &self.kq)
+ .finish()
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.kq
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ unsafe {
+ let _ = libc::close(self.kq);
+ }
+ }
+}
+
+pub struct Events {
+ sys_events: KeventList,
+ events: Vec<Event>,
+ event_map: HashMap<Token, usize>,
+}
+
+struct KeventList(Vec<libc::kevent>);
+
+unsafe impl Send for KeventList {}
+unsafe impl Sync for KeventList {}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ Events {
+ sys_events: KeventList(Vec::with_capacity(cap)),
+ events: Vec::with_capacity(cap),
+ event_map: HashMap::with_capacity(cap)
+ }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).cloned()
+ }
+
+ fn coalesce(&mut self, awakener: Token) -> bool {
+ let mut ret = false;
+ self.events.clear();
+ self.event_map.clear();
+
+ for e in self.sys_events.0.iter() {
+ let token = Token(e.udata as usize);
+ let len = self.events.len();
+
+ if token == awakener {
+ // TODO: Should this return an error if event is an error. It
+ // is not critical as spurious wakeups are permitted.
+ ret = true;
+ continue;
+ }
+
+ let idx = *self.event_map.entry(token)
+ .or_insert(len);
+
+ if idx == len {
+ // New entry, insert the default
+ self.events.push(Event::new(Ready::empty(), token));
+
+ }
+
+ if e.flags & libc::EV_ERROR != 0 {
+ event::kind_mut(&mut self.events[idx]).insert(*UnixReady::error());
+ }
+
+ if e.filter == libc::EVFILT_READ as Filter {
+ event::kind_mut(&mut self.events[idx]).insert(Ready::readable());
+ } else if e.filter == libc::EVFILT_WRITE as Filter {
+ event::kind_mut(&mut self.events[idx]).insert(Ready::writable());
+ }
+#[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ {
+ if e.filter == libc::EVFILT_AIO {
+ event::kind_mut(&mut self.events[idx]).insert(UnixReady::aio());
+ }
+ }
+#[cfg(any(target_os = "freebsd"))]
+ {
+ if e.filter == libc::EVFILT_LIO {
+ event::kind_mut(&mut self.events[idx]).insert(UnixReady::lio());
+ }
+ }
+ }
+
+ ret
+ }
+
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(event);
+ }
+
+ pub fn clear(&mut self) {
+ self.sys_events.0.truncate(0);
+ self.events.truncate(0);
+ self.event_map.clear();
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Events")
+ .field("len", &self.sys_events.0.len())
+ .finish()
+ }
+}
+
+#[test]
+fn does_not_register_rw() {
+ use {Poll, Ready, PollOpt, Token};
+ use unix::EventedFd;
+
+ let kq = unsafe { libc::kqueue() };
+ let kqf = EventedFd(&kq);
+ let poll = Poll::new().unwrap();
+
+ // registering kqueue fd will fail if write is requested (On anything but some versions of OS
+ // X)
+ poll.register(&kqf, Token(1234), Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot()).unwrap();
+}
+
+#[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+#[test]
+fn test_coalesce_aio() {
+ let mut events = Events::with_capacity(1);
+ events.sys_events.0.push(kevent!(0x1234, libc::EVFILT_AIO, 0, 42));
+ events.coalesce(Token(0));
+ assert!(events.events[0].readiness() == UnixReady::aio().into());
+ assert!(events.events[0].token() == Token(42));
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/mod.rs b/third_party/rust/mio-0.6.23/src/sys/unix/mod.rs
new file mode 100644
index 0000000000..c5726c07ce
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/mod.rs
@@ -0,0 +1,105 @@
+use libc::{self, c_int};
+
+#[macro_use]
+pub mod dlsym;
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+))]
+mod epoll;
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+))]
+pub use self::epoll::{Events, Selector};
+
+#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos",
+ target_os = "netbsd", target_os = "openbsd"))]
+mod kqueue;
+
+#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos",
+ target_os = "netbsd", target_os = "openbsd"))]
+pub use self::kqueue::{Events, Selector};
+
+mod awakener;
+mod eventedfd;
+mod io;
+mod ready;
+mod tcp;
+mod udp;
+mod uio;
+
+#[cfg(feature = "with-deprecated")]
+mod uds;
+
+pub use self::awakener::Awakener;
+pub use self::eventedfd::EventedFd;
+pub use self::io::{Io, set_nonblock};
+pub use self::ready::{UnixReady, READY_ALL};
+pub use self::tcp::{TcpStream, TcpListener};
+pub use self::udp::UdpSocket;
+
+#[cfg(feature = "with-deprecated")]
+pub use self::uds::UnixSocket;
+
+pub use iovec::IoVec;
+
+use std::os::unix::io::FromRawFd;
+
+pub fn pipe() -> ::io::Result<(Io, Io)> {
+ // Use pipe2 for atomically setting O_CLOEXEC if we can, but otherwise
+ // just fall back to using `pipe`.
+ dlsym!(fn pipe2(*mut c_int, c_int) -> c_int);
+
+ let mut pipes = [0; 2];
+ unsafe {
+ match pipe2.get() {
+ Some(pipe2_fn) => {
+ let flags = libc::O_NONBLOCK | libc::O_CLOEXEC;
+ cvt(pipe2_fn(pipes.as_mut_ptr(), flags))?;
+ Ok((Io::from_raw_fd(pipes[0]), Io::from_raw_fd(pipes[1])))
+ }
+ None => {
+ cvt(libc::pipe(pipes.as_mut_ptr()))?;
+ // Ensure the pipe are closed if any of the system calls below
+ // fail.
+ let r = Io::from_raw_fd(pipes[0]);
+ let w = Io::from_raw_fd(pipes[1]);
+ cvt(libc::fcntl(pipes[0], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ cvt(libc::fcntl(pipes[1], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ cvt(libc::fcntl(pipes[0], libc::F_SETFL, libc::O_NONBLOCK))?;
+ cvt(libc::fcntl(pipes[1], libc::F_SETFL, libc::O_NONBLOCK))?;
+ Ok((r, w))
+ }
+ }
+ }
+}
+
+trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+impl IsMinusOne for i32 {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+impl IsMinusOne for isize {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+
+fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
+ use std::io;
+
+ if t.is_minus_one() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(t)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/ready.rs b/third_party/rust/mio-0.6.23/src/sys/unix/ready.rs
new file mode 100644
index 0000000000..88f56252dd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/ready.rs
@@ -0,0 +1,525 @@
+use event_imp::{Ready, ready_as_usize, ready_from_usize};
+
+use std::ops;
+use std::fmt;
+
+/// Unix specific extensions to `Ready`
+///
+/// Provides additional readiness event kinds that are available on unix
+/// platforms. Unix platforms are able to provide readiness events for
+/// additional socket events, such as HUP and error.
+///
+/// HUP events occur when the remote end of a socket hangs up. In the TCP case,
+/// this occurs when the remote end of a TCP socket shuts down writes.
+///
+/// Error events occur when the socket enters an error state. In this case, the
+/// socket will also receive a readable or writable event. Reading or writing to
+/// the socket will result in an error.
+///
+/// Conversion traits are implemented between `Ready` and `UnixReady`. See the
+/// examples.
+///
+/// For high level documentation on polling and readiness, see [`Poll`].
+///
+/// # Examples
+///
+/// Most of the time, all that is needed is using bit operations
+///
+/// ```
+/// use mio::Ready;
+/// use mio::unix::UnixReady;
+///
+/// let ready = Ready::readable() | UnixReady::hup();
+///
+/// assert!(ready.is_readable());
+/// assert!(UnixReady::from(ready).is_hup());
+/// ```
+///
+/// Basic conversion between ready types.
+///
+/// ```
+/// use mio::Ready;
+/// use mio::unix::UnixReady;
+///
+/// // Start with a portable ready
+/// let ready = Ready::readable();
+///
+/// // Convert to a unix ready, adding HUP
+/// let mut unix_ready = UnixReady::from(ready) | UnixReady::hup();
+///
+/// unix_ready.insert(UnixReady::error());
+///
+/// // `unix_ready` maintains readable interest
+/// assert!(unix_ready.is_readable());
+/// assert!(unix_ready.is_hup());
+/// assert!(unix_ready.is_error());
+///
+/// // Convert back to `Ready`
+/// let ready = Ready::from(unix_ready);
+///
+/// // Readable is maintained
+/// assert!(ready.is_readable());
+/// ```
+///
+/// Registering readable and error interest on a socket
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpStream;
+/// use mio::unix::UnixReady;
+///
+/// let addr = "216.58.193.68:80".parse()?;
+/// let socket = TcpStream::connect(&addr)?;
+///
+/// let poll = Poll::new()?;
+///
+/// poll.register(&socket,
+/// Token(0),
+/// Ready::readable() | UnixReady::error(),
+/// PollOpt::edge())?;
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Poll`]: ../struct.Poll.html
+/// [readiness]: struct.Poll.html#readiness-operations
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct UnixReady(Ready);
+
+const ERROR: usize = 0b00_0100;
+const HUP: usize = 0b00_1000;
+
+#[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+const AIO: usize = 0b01_0000;
+
+#[cfg(not(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+const AIO: usize = 0b00_0000;
+
+#[cfg(any(target_os = "freebsd"))]
+const LIO: usize = 0b10_0000;
+
+#[cfg(not(any(target_os = "freebsd")))]
+const LIO: usize = 0b00_0000;
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+))]
+const PRI: usize = 0b100_0000;
+
+#[cfg(not(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+)))]
+const PRI: usize = 0;
+
+// Export to support `Ready::all`
+pub const READY_ALL: usize = ERROR | HUP | AIO | LIO | PRI;
+
+#[test]
+fn test_ready_all() {
+ let readable = Ready::readable().as_usize();
+ let writable = Ready::writable().as_usize();
+
+ assert_eq!(
+ READY_ALL | readable | writable,
+ ERROR + HUP + AIO + LIO + PRI + readable + writable
+ );
+
+ // Issue #896.
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ assert!(!Ready::from(UnixReady::priority()).is_writable());
+}
+
+impl UnixReady {
+ /// Returns a `Ready` representing AIO completion readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::aio();
+ ///
+ /// assert!(ready.is_aio());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ #[inline]
+ #[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn aio() -> UnixReady {
+ UnixReady(ready_from_usize(AIO))
+ }
+
+ #[cfg(not(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+ #[deprecated(since = "0.6.12", note = "this function is now platform specific")]
+ #[doc(hidden)]
+ pub fn aio() -> UnixReady {
+ UnixReady(Ready::empty())
+ }
+
+ /// Returns a `Ready` representing error readiness.
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `error` readiness
+ /// should be treated as a hint. For more details, see [readiness] in the
+ /// poll documentation.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::error();
+ ///
+ /// assert!(ready.is_error());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ #[inline]
+ pub fn error() -> UnixReady {
+ UnixReady(ready_from_usize(ERROR))
+ }
+
+ /// Returns a `Ready` representing HUP readiness.
+ ///
+ /// A HUP (or hang-up) signifies that a stream socket **peer** closed the
+ /// connection, or shut down the writing half of the connection.
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `hup` readiness
+ /// should be treated as a hint. For more details, see [readiness] in the
+ /// poll documentation. It is also unclear if HUP readiness will remain in 0.7. See
+ /// [here][issue-941].
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::hup();
+ ///
+ /// assert!(ready.is_hup());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ /// [issue-941]: https://github.com/tokio-rs/mio/issues/941
+ #[inline]
+ pub fn hup() -> UnixReady {
+ UnixReady(ready_from_usize(HUP))
+ }
+
+ /// Returns a `Ready` representing LIO completion readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::lio();
+ ///
+ /// assert!(ready.is_lio());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ #[cfg(any(target_os = "freebsd"))]
+ pub fn lio() -> UnixReady {
+ UnixReady(ready_from_usize(LIO))
+ }
+
+ /// Returns a `Ready` representing priority (`EPOLLPRI`) readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::priority();
+ ///
+ /// assert!(ready.is_priority());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ pub fn priority() -> UnixReady {
+ UnixReady(ready_from_usize(PRI))
+ }
+
+ /// Returns true if `Ready` contains AIO readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::aio();
+ ///
+ /// assert!(ready.is_aio());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ #[inline]
+ #[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn is_aio(&self) -> bool {
+ self.contains(ready_from_usize(AIO))
+ }
+
+ #[deprecated(since = "0.6.12", note = "this function is now platform specific")]
+ #[cfg(feature = "with-deprecated")]
+ #[cfg(not(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+ #[doc(hidden)]
+ pub fn is_aio(&self) -> bool {
+ false
+ }
+
+ /// Returns true if the value includes error readiness
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `error` readiness should
+ /// be treated as a hint. For more details, see [readiness] in the poll
+ /// documentation.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::error();
+ ///
+ /// assert!(ready.is_error());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ #[inline]
+ pub fn is_error(&self) -> bool {
+ self.contains(ready_from_usize(ERROR))
+ }
+
+ /// Returns true if the value includes HUP readiness
+ ///
+ /// A HUP (or hang-up) signifies that a stream socket **peer** closed the
+ /// connection, or shut down the writing half of the connection.
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `hup` readiness
+ /// should be treated as a hint. For more details, see [readiness] in the
+ /// poll documentation.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::hup();
+ ///
+ /// assert!(ready.is_hup());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ #[inline]
+ pub fn is_hup(&self) -> bool {
+ self.contains(ready_from_usize(HUP))
+ }
+
+ /// Returns true if `Ready` contains LIO readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::lio();
+ ///
+ /// assert!(ready.is_lio());
+ /// ```
+ #[inline]
+ #[cfg(any(target_os = "freebsd"))]
+ pub fn is_lio(&self) -> bool {
+ self.contains(ready_from_usize(LIO))
+ }
+
+ /// Returns true if `Ready` contains priority (`EPOLLPRI`) readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::priority();
+ ///
+ /// assert!(ready.is_priority());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ pub fn is_priority(&self) -> bool {
+ self.contains(ready_from_usize(PRI))
+ }
+}
+
+impl From<Ready> for UnixReady {
+ fn from(src: Ready) -> UnixReady {
+ UnixReady(src)
+ }
+}
+
+impl From<UnixReady> for Ready {
+ fn from(src: UnixReady) -> Ready {
+ src.0
+ }
+}
+
+impl ops::Deref for UnixReady {
+ type Target = Ready;
+
+ fn deref(&self) -> &Ready {
+ &self.0
+ }
+}
+
+impl ops::DerefMut for UnixReady {
+ fn deref_mut(&mut self) -> &mut Ready {
+ &mut self.0
+ }
+}
+
+impl ops::BitOr for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn bitor(self, other: UnixReady) -> UnixReady {
+ (self.0 | other.0).into()
+ }
+}
+
+impl ops::BitXor for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn bitxor(self, other: UnixReady) -> UnixReady {
+ (self.0 ^ other.0).into()
+ }
+}
+
+impl ops::BitAnd for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn bitand(self, other: UnixReady) -> UnixReady {
+ (self.0 & other.0).into()
+ }
+}
+
+impl ops::Sub for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn sub(self, other: UnixReady) -> UnixReady {
+ ready_from_usize(ready_as_usize(self.0) & !ready_as_usize(other.0)).into()
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn not(self) -> UnixReady {
+ (!self.0).into()
+ }
+}
+
+impl fmt::Debug for UnixReady {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut one = false;
+ let flags = [
+ (UnixReady(Ready::readable()), "Readable"),
+ (UnixReady(Ready::writable()), "Writable"),
+ (UnixReady::error(), "Error"),
+ (UnixReady::hup(), "Hup"),
+ #[allow(deprecated)]
+ (UnixReady::aio(), "Aio"),
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ (UnixReady::priority(), "Priority"),
+ ];
+
+ for &(flag, msg) in &flags {
+ if self.contains(flag) {
+ if one { write!(fmt, " | ")? }
+ write!(fmt, "{}", msg)?;
+
+ one = true
+ }
+ }
+
+ if !one {
+ fmt.write_str("(empty)")?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs b/third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs
new file mode 100644
index 0000000000..7962fcecb3
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs
@@ -0,0 +1,286 @@
+use std::fmt;
+use std::io::{Read, Write};
+use std::net::{self, SocketAddr};
+use std::os::unix::io::{RawFd, FromRawFd, IntoRawFd, AsRawFd};
+use std::time::Duration;
+
+use libc;
+use net2::TcpStreamExt;
+use iovec::IoVec;
+
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+
+use sys::unix::eventedfd::EventedFd;
+use sys::unix::io::set_nonblock;
+use sys::unix::uio::VecIo;
+
+pub struct TcpStream {
+ inner: net::TcpStream,
+}
+
+pub struct TcpListener {
+ inner: net::TcpListener,
+}
+
+impl TcpStream {
+ pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
+ set_nonblock(stream.as_raw_fd())?;
+
+ match stream.connect(addr) {
+ Ok(..) => {}
+ Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
+ Err(e) => return Err(e),
+ }
+
+ Ok(TcpStream {
+ inner: stream,
+ })
+ }
+
+ pub fn from_stream(stream: net::TcpStream) -> TcpStream {
+ TcpStream {
+ inner: stream,
+ }
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.peer_addr()
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.inner.try_clone().map(|s| {
+ TcpStream {
+ inner: s,
+ }
+ })
+ }
+
+ pub fn shutdown(&self, how: net::Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.inner.set_nodelay(nodelay)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.inner.nodelay()
+ }
+
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.inner.set_recv_buffer_size(size)
+ }
+
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.inner.recv_buffer_size()
+ }
+
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.inner.set_send_buffer_size(size)
+ }
+
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.inner.send_buffer_size()
+ }
+
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.inner.set_keepalive(keepalive)
+ }
+
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.inner.keepalive()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.inner.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.inner.only_v6()
+ }
+
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ TcpStreamExt::set_linger(&self.inner, dur)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ TcpStreamExt::linger(&self.inner)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.inner.readv(bufs)
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.inner.writev(bufs)
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.inner).read(buf)
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.inner).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.inner).flush()
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, f)
+ }
+}
+
+impl FromRawFd for TcpStream {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
+ TcpStream {
+ inner: net::TcpStream::from_raw_fd(fd),
+ }
+ }
+}
+
+impl IntoRawFd for TcpStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_raw_fd()
+ }
+}
+
+impl AsRawFd for TcpStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl TcpListener {
+ pub fn new(inner: net::TcpListener) -> io::Result<TcpListener> {
+ set_nonblock(inner.as_raw_fd())?;
+ Ok(TcpListener {
+ inner,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.inner.try_clone().map(|s| {
+ TcpListener {
+ inner: s,
+ }
+ })
+ }
+
+ pub fn accept(&self) -> io::Result<(net::TcpStream, SocketAddr)> {
+ self.inner.accept()
+ }
+
+ #[allow(deprecated)]
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.inner.set_only_v6(only_v6)
+ }
+
+ #[allow(deprecated)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.inner.only_v6()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl Evented for TcpListener {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, f)
+ }
+}
+
+impl FromRawFd for TcpListener {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
+ TcpListener {
+ inner: net::TcpListener::from_raw_fd(fd),
+ }
+ }
+}
+
+impl IntoRawFd for TcpListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_raw_fd()
+ }
+}
+
+impl AsRawFd for TcpListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/udp.rs b/third_party/rust/mio-0.6.23/src/sys/unix/udp.rs
new file mode 100644
index 0000000000..c77a9d6380
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/udp.rs
@@ -0,0 +1,181 @@
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use unix::EventedFd;
+use sys::unix::uio::VecIo;
+use std::fmt;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::os::unix::io::{RawFd, IntoRawFd, AsRawFd, FromRawFd};
+
+#[allow(unused_imports)] // only here for Rust 1.8
+use net2::UdpSocketExt;
+use iovec::IoVec;
+
+pub struct UdpSocket {
+ io: net::UdpSocket,
+}
+
+impl UdpSocket {
+ pub fn new(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ socket.set_nonblocking(true)?;
+ Ok(UdpSocket {
+ io: socket,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.io.try_clone().map(|io| {
+ UdpSocket {
+ io,
+ }
+ })
+ }
+
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+ self.io.send_to(buf, target)
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.io.recv_from(buf)
+ }
+
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.io.send(buf)
+ }
+
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.recv(buf)
+ }
+
+ pub fn connect(&self, addr: SocketAddr)
+ -> io::Result<()> {
+ self.io.connect(addr)
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.io.broadcast()
+ }
+
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.io.set_broadcast(on)
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v4()
+ }
+
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v4(on)
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.io.multicast_ttl_v4()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_multicast_ttl_v4(ttl)
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v6()
+ }
+
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v6(on)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.join_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.join_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.leave_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.leave_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.io.readv(bufs)
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.io.writev(bufs)
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.io, f)
+ }
+}
+
+impl FromRawFd for UdpSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket {
+ io: net::UdpSocket::from_raw_fd(fd),
+ }
+ }
+}
+
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/uds.rs b/third_party/rust/mio-0.6.23/src/sys/unix/uds.rs
new file mode 100644
index 0000000000..f6706784f8
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/uds.rs
@@ -0,0 +1,265 @@
+use std::io::{Read, Write};
+use std::mem;
+use std::net::Shutdown;
+use std::os::unix::prelude::*;
+use std::path::Path;
+use std::ptr;
+
+use libc;
+
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::unix::{cvt, Io};
+use sys::unix::io::{set_nonblock, set_cloexec};
+
+trait MyInto<T> {
+ fn my_into(self) -> T;
+}
+
+impl MyInto<u32> for usize {
+ fn my_into(self) -> u32 { self as u32 }
+}
+
+impl MyInto<usize> for usize {
+ fn my_into(self) -> usize { self }
+}
+
+unsafe fn sockaddr_un(path: &Path)
+ -> io::Result<(libc::sockaddr_un, libc::socklen_t)> {
+ let mut addr: libc::sockaddr_un = mem::zeroed();
+ addr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+ let bytes = path.as_os_str().as_bytes();
+
+ if bytes.len() >= addr.sun_path.len() {
+ return Err(io::Error::new(io::ErrorKind::InvalidInput,
+ "path must be shorter than SUN_LEN"))
+ }
+ for (dst, src) in addr.sun_path.iter_mut().zip(bytes.iter()) {
+ *dst = *src as libc::c_char;
+ }
+ // null byte for pathname addresses is already there because we zeroed the
+ // struct
+
+ let mut len = sun_path_offset() + bytes.len();
+ match bytes.get(0) {
+ Some(&0) | None => {}
+ Some(_) => len += 1,
+ }
+ Ok((addr, len as libc::socklen_t))
+}
+
+fn sun_path_offset() -> usize {
+ // Silence rustc 1.65 warning about mem::uninitialized.
+ #[allow(invalid_value)]
+ unsafe {
+ // Work with an actual instance of the type since using a null pointer is UB
+ let addr: libc::sockaddr_un = mem::uninitialized();
+ let base = &addr as *const _ as usize;
+ let path = &addr.sun_path as *const _ as usize;
+ path - base
+ }
+}
+
+#[derive(Debug)]
+pub struct UnixSocket {
+ io: Io,
+}
+
+impl UnixSocket {
+ /// Returns a new, unbound, non-blocking Unix domain socket
+ pub fn stream() -> io::Result<UnixSocket> {
+ #[cfg(target_os = "linux")]
+ use libc::{SOCK_CLOEXEC, SOCK_NONBLOCK};
+ #[cfg(not(target_os = "linux"))]
+ const SOCK_CLOEXEC: libc::c_int = 0;
+ #[cfg(not(target_os = "linux"))]
+ const SOCK_NONBLOCK: libc::c_int = 0;
+
+ unsafe {
+ if cfg!(target_os = "linux") {
+ let flags = libc::SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK;
+ match cvt(libc::socket(libc::AF_UNIX, flags, 0)) {
+ Ok(fd) => return Ok(UnixSocket::from_raw_fd(fd)),
+ Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {}
+ Err(e) => return Err(e),
+ }
+ }
+
+ let fd = cvt(libc::socket(libc::AF_UNIX, libc::SOCK_STREAM, 0))?;
+ let fd = UnixSocket::from_raw_fd(fd);
+ set_cloexec(fd.as_raw_fd())?;
+ set_nonblock(fd.as_raw_fd())?;
+ Ok(fd)
+ }
+ }
+
+ /// Connect the socket to the specified address
+ pub fn connect<P: AsRef<Path> + ?Sized>(&self, addr: &P) -> io::Result<()> {
+ unsafe {
+ let (addr, len) = sockaddr_un(addr.as_ref())?;
+ cvt(libc::connect(self.as_raw_fd(),
+ &addr as *const _ as *const _,
+ len))?;
+ Ok(())
+ }
+ }
+
+ /// Listen for incoming requests
+ pub fn listen(&self, backlog: usize) -> io::Result<()> {
+ unsafe {
+ cvt(libc::listen(self.as_raw_fd(), backlog as i32))?;
+ Ok(())
+ }
+ }
+
+ pub fn accept(&self) -> io::Result<UnixSocket> {
+ unsafe {
+ let fd = cvt(libc::accept(self.as_raw_fd(),
+ ptr::null_mut(),
+ ptr::null_mut()))?;
+ let fd = Io::from_raw_fd(fd);
+ set_cloexec(fd.as_raw_fd())?;
+ set_nonblock(fd.as_raw_fd())?;
+ Ok(UnixSocket { io: fd })
+ }
+ }
+
+ /// Bind the socket to the specified address
+ pub fn bind<P: AsRef<Path> + ?Sized>(&self, addr: &P) -> io::Result<()> {
+ unsafe {
+ let (addr, len) = sockaddr_un(addr.as_ref())?;
+ cvt(libc::bind(self.as_raw_fd(),
+ &addr as *const _ as *const _,
+ len))?;
+ Ok(())
+ }
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixSocket> {
+ Ok(UnixSocket { io: self.io.try_clone()? })
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let how = match how {
+ Shutdown::Read => libc::SHUT_RD,
+ Shutdown::Write => libc::SHUT_WR,
+ Shutdown::Both => libc::SHUT_RDWR,
+ };
+ unsafe {
+ cvt(libc::shutdown(self.as_raw_fd(), how))?;
+ Ok(())
+ }
+ }
+
+ pub fn read_recv_fd(&mut self, buf: &mut [u8]) -> io::Result<(usize, Option<RawFd>)> {
+ unsafe {
+ let mut iov = libc::iovec {
+ iov_base: buf.as_mut_ptr() as *mut _,
+ iov_len: buf.len(),
+ };
+ struct Cmsg {
+ hdr: libc::cmsghdr,
+ data: [libc::c_int; 1],
+ }
+ let mut cmsg: Cmsg = mem::zeroed();
+ let mut msg: libc::msghdr = mem::zeroed();
+ msg.msg_iov = &mut iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &mut cmsg as *mut _ as *mut _;
+ msg.msg_controllen = mem::size_of_val(&cmsg).my_into();
+ let bytes = cvt(libc::recvmsg(self.as_raw_fd(), &mut msg, 0))?;
+
+ const SCM_RIGHTS: libc::c_int = 1;
+
+ let fd = if cmsg.hdr.cmsg_level == libc::SOL_SOCKET &&
+ cmsg.hdr.cmsg_type == SCM_RIGHTS {
+ Some(cmsg.data[0])
+ } else {
+ None
+ };
+ Ok((bytes as usize, fd))
+ }
+ }
+
+ pub fn write_send_fd(&mut self, buf: &[u8], fd: RawFd) -> io::Result<usize> {
+ unsafe {
+ let mut iov = libc::iovec {
+ iov_base: buf.as_ptr() as *mut _,
+ iov_len: buf.len(),
+ };
+ struct Cmsg {
+ #[allow(dead_code)]
+ hdr: libc::cmsghdr,
+ data: [libc::c_int; 1],
+ }
+ let mut cmsg: Cmsg = mem::zeroed();
+ cmsg.hdr.cmsg_len = mem::size_of_val(&cmsg).my_into();
+ cmsg.hdr.cmsg_level = libc::SOL_SOCKET;
+ cmsg.hdr.cmsg_type = 1; // SCM_RIGHTS
+ cmsg.data[0] = fd;
+ let mut msg: libc::msghdr = mem::zeroed();
+ msg.msg_iov = &mut iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &mut cmsg as *mut _ as *mut _;
+ msg.msg_controllen = mem::size_of_val(&cmsg).my_into();
+ let bytes = cvt(libc::sendmsg(self.as_raw_fd(), &msg, 0))?;
+ Ok(bytes as usize)
+ }
+ }
+}
+
+impl Read for UnixSocket {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.read(buf)
+ }
+}
+
+impl Write for UnixSocket {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.io.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.io.flush()
+ }
+}
+
+impl Evented for UnixSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.io.deregister(poll)
+ }
+}
+
+
+impl From<Io> for UnixSocket {
+ fn from(io: Io) -> UnixSocket {
+ UnixSocket { io }
+ }
+}
+
+impl FromRawFd for UnixSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixSocket {
+ UnixSocket { io: Io::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for UnixSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/uio.rs b/third_party/rust/mio-0.6.23/src/sys/unix/uio.rs
new file mode 100644
index 0000000000..e38cd4983b
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/uio.rs
@@ -0,0 +1,44 @@
+use std::cmp;
+use std::io;
+use std::os::unix::io::AsRawFd;
+use libc;
+use iovec::IoVec;
+use iovec::unix as iovec;
+
+pub trait VecIo {
+ fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize>;
+
+ fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize>;
+}
+
+impl<T: AsRawFd> VecIo for T {
+ fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice_mut(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::readv(self.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+
+ fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::writev(self.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs b/third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs
new file mode 100644
index 0000000000..c913bc93f8
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs
@@ -0,0 +1,66 @@
+use std::sync::Mutex;
+
+use miow::iocp::CompletionStatus;
+use {io, poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::windows::Selector;
+
+pub struct Awakener {
+ inner: Mutex<Option<AwakenerInner>>,
+}
+
+struct AwakenerInner {
+ token: Token,
+ selector: Selector,
+}
+
+impl Awakener {
+ pub fn new() -> io::Result<Awakener> {
+ Ok(Awakener {
+ inner: Mutex::new(None),
+ })
+ }
+
+ pub fn wakeup(&self) -> io::Result<()> {
+ // Each wakeup notification has NULL as its `OVERLAPPED` pointer to
+ // indicate that it's from this awakener and not part of an I/O
+ // operation. This is specially recognized by the selector.
+ //
+ // If we haven't been registered with an event loop yet just silently
+ // succeed.
+ if let Some(inner) = self.inner.lock().unwrap().as_ref() {
+ let status = CompletionStatus::new(0,
+ usize::from(inner.token),
+ 0 as *mut _);
+ inner.selector.port().post(status)?;
+ }
+ Ok(())
+ }
+
+ pub fn cleanup(&self) {
+ // noop
+ }
+}
+
+impl Evented for Awakener {
+ fn register(&self, poll: &Poll, token: Token, events: Ready,
+ opts: PollOpt) -> io::Result<()> {
+ assert_eq!(opts, PollOpt::edge());
+ assert_eq!(events, Ready::readable());
+ *self.inner.lock().unwrap() = Some(AwakenerInner {
+ selector: poll::selector(poll).clone_ref(),
+ token: token,
+ });
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, events: Ready,
+ opts: PollOpt) -> io::Result<()> {
+ self.register(poll, token, events, opts)
+ }
+
+ fn deregister(&self, _poll: &Poll) -> io::Result<()> {
+ *self.inner.lock().unwrap() = None;
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs b/third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs
new file mode 100644
index 0000000000..86754593fd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs
@@ -0,0 +1,20 @@
+pub struct BufferPool {
+ pool: Vec<Vec<u8>>,
+}
+
+impl BufferPool {
+ pub fn new(cap: usize) -> BufferPool {
+ BufferPool { pool: Vec::with_capacity(cap) }
+ }
+
+ pub fn get(&mut self, default_cap: usize) -> Vec<u8> {
+ self.pool.pop().unwrap_or_else(|| Vec::with_capacity(default_cap))
+ }
+
+ pub fn put(&mut self, mut buf: Vec<u8>) {
+ if self.pool.len() < self.pool.capacity(){
+ unsafe { buf.set_len(0); }
+ self.pool.push(buf);
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs b/third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs
new file mode 100644
index 0000000000..b6d38b2408
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs
@@ -0,0 +1,116 @@
+//! A "Manual Arc" which allows manually frobbing the reference count
+//!
+//! This module contains a copy of the `Arc` found in the standard library,
+//! stripped down to the bare bones of what we actually need. The reason this is
+//! done is for the ability to concretely know the memory layout of the `Inner`
+//! structure of the arc pointer itself (e.g. `ArcInner` in the standard
+//! library).
+//!
+//! We do some unsafe casting from `*mut OVERLAPPED` to a `FromRawArc<T>` to
+//! ensure that data lives for the length of an I/O operation, but this means
+//! that we have to know the layouts of the structures involved. This
+//! representation primarily guarantees that the data, `T` is at the front of
+//! the inner pointer always.
+//!
+//! Note that we're missing out on some various optimizations implemented in the
+//! standard library:
+//!
+//! * The size of `FromRawArc` is actually two words because of the drop flag
+//! * The compiler doesn't understand that the pointer in `FromRawArc` is never
+//! null, so Option<FromRawArc<T>> is not a nullable pointer.
+
+use std::ops::Deref;
+use std::mem;
+use std::sync::atomic::{self, AtomicUsize, Ordering};
+
+pub struct FromRawArc<T> {
+ _inner: *mut Inner<T>,
+}
+
+unsafe impl<T: Sync + Send> Send for FromRawArc<T> { }
+unsafe impl<T: Sync + Send> Sync for FromRawArc<T> { }
+
+#[repr(C)]
+struct Inner<T> {
+ data: T,
+ cnt: AtomicUsize,
+}
+
+impl<T> FromRawArc<T> {
+ pub fn new(data: T) -> FromRawArc<T> {
+ let x = Box::new(Inner {
+ data: data,
+ cnt: AtomicUsize::new(1),
+ });
+ FromRawArc { _inner: unsafe { mem::transmute(x) } }
+ }
+
+ pub unsafe fn from_raw(ptr: *mut T) -> FromRawArc<T> {
+ // Note that if we could use `mem::transmute` here to get a libstd Arc
+ // (guaranteed) then we could just use std::sync::Arc, but this is the
+ // crucial reason this currently exists.
+ FromRawArc { _inner: ptr as *mut Inner<T> }
+ }
+}
+
+impl<T> Clone for FromRawArc<T> {
+ fn clone(&self) -> FromRawArc<T> {
+ // Atomic ordering of Relaxed lifted from libstd, but the general idea
+ // is that you need synchronization to communicate this increment to
+ // another thread, so this itself doesn't need to be synchronized.
+ unsafe {
+ (*self._inner).cnt.fetch_add(1, Ordering::Relaxed);
+ }
+ FromRawArc { _inner: self._inner }
+ }
+}
+
+impl<T> Deref for FromRawArc<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &(*self._inner).data }
+ }
+}
+
+impl<T> Drop for FromRawArc<T> {
+ fn drop(&mut self) {
+ unsafe {
+ // Atomic orderings lifted from the standard library
+ if (*self._inner).cnt.fetch_sub(1, Ordering::Release) != 1 {
+ return
+ }
+ atomic::fence(Ordering::Acquire);
+ drop(mem::transmute::<_, Box<T>>(self._inner));
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::FromRawArc;
+
+ #[test]
+ fn smoke() {
+ let a = FromRawArc::new(1);
+ assert_eq!(*a, 1);
+ assert_eq!(*a.clone(), 1);
+ }
+
+ #[test]
+ fn drops() {
+ struct A<'a>(&'a mut bool);
+ impl<'a> Drop for A<'a> {
+ fn drop(&mut self) {
+ *self.0 = true;
+ }
+ }
+ let mut a = false;
+ {
+ let a = FromRawArc::new(A(&mut a));
+ let _ = a.clone();
+ assert!(!*a.0);
+ }
+ assert!(a);
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/mod.rs b/third_party/rust/mio-0.6.23/src/sys/windows/mod.rs
new file mode 100644
index 0000000000..9b9f054495
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/mod.rs
@@ -0,0 +1,193 @@
+//! Implementation of mio for Windows using IOCP
+//!
+//! This module uses I/O Completion Ports (IOCP) on Windows to implement mio's
+//! Unix epoll-like interface. Unfortunately these two I/O models are
+//! fundamentally incompatible:
+//!
+//! * IOCP is a completion-based model where work is submitted to the kernel and
+//! a program is notified later when the work finished.
+//! * epoll is a readiness-based model where the kernel is queried as to what
+//! work can be done, and afterwards the work is done.
+//!
+//! As a result, this implementation for Windows is much less "low level" than
+//! the Unix implementation of mio. This design decision was intentional,
+//! however.
+//!
+//! ## What is IOCP?
+//!
+//! The [official docs][docs] have a comprehensive explanation of what IOCP is,
+//! but at a high level it requires the following operations to be executed to
+//! perform some I/O:
+//!
+//! 1. A completion port is created
+//! 2. An I/O handle and a token is registered with this completion port
+//! 3. Some I/O is issued on the handle. This generally means that an API was
+//! invoked with a zeroed `OVERLAPPED` structure. The API will immediately
+//! return.
+//! 4. After some time, the application queries the I/O port for completed
+//! events. The port will returned a pointer to the `OVERLAPPED` along with
+//! the token presented at registration time.
+//!
+//! Many I/O operations can be fired off before waiting on a port, and the port
+//! will block execution of the calling thread until an I/O event has completed
+//! (or a timeout has elapsed).
+//!
+//! Currently all of these low-level operations are housed in a separate `miow`
+//! crate to provide a 0-cost abstraction over IOCP. This crate uses that to
+//! implement all fiddly bits so there's very few actual Windows API calls or
+//! `unsafe` blocks as a result.
+//!
+//! [docs]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198%28v=vs.85%29.aspx
+//!
+//! ## Safety of IOCP
+//!
+//! Unfortunately for us, IOCP is pretty unsafe in terms of Rust lifetimes and
+//! such. When an I/O operation is submitted to the kernel, it involves handing
+//! the kernel a few pointers like a buffer to read/write, an `OVERLAPPED`
+//! structure pointer, and perhaps some other buffers such as for socket
+//! addresses. These pointers all have to remain valid **for the entire I/O
+//! operation's duration**.
+//!
+//! There's no way to define a safe lifetime for these pointers/buffers over
+//! the span of an I/O operation, so we're forced to add a layer of abstraction
+//! (not 0-cost) to make these APIs safe. Currently this implementation
+//! basically just boxes everything up on the heap to give it a stable address
+//! and then keys off that most of the time.
+//!
+//! ## From completion to readiness
+//!
+//! Translating a completion-based model to a readiness-based model is also no
+//! easy task, and a significant portion of this implementation is managing this
+//! translation. The basic idea behind this implementation is to issue I/O
+//! operations preemptively and then translate their completions to a "I'm
+//! ready" event.
+//!
+//! For example, in the case of reading a `TcpSocket`, as soon as a socket is
+//! connected (or registered after an accept) a read operation is executed.
+//! While the read is in progress calls to `read` will return `WouldBlock`, and
+//! once the read is completed we translate the completion notification into a
+//! `readable` event. Once the internal buffer is drained (e.g. all data from it
+//! has been read) a read operation is re-issued.
+//!
+//! Write operations are a little different from reads, and the current
+//! implementation is to just schedule a write as soon as `write` is first
+//! called. While that write operation is in progress all future calls to
+//! `write` will return `WouldBlock`. Completion of the write then translates to
+//! a `writable` event. Note that this will probably want to add some layer of
+//! internal buffering in the future.
+//!
+//! ## Buffer Management
+//!
+//! As there's lots of I/O operations in flight at any one point in time,
+//! there's lots of live buffers that need to be juggled around (e.g. this
+//! implementation's own internal buffers).
+//!
+//! Currently all buffers are created for the I/O operation at hand and are then
+//! discarded when it completes (this is listed as future work below).
+//!
+//! ## Callback Management
+//!
+//! When the main event loop receives a notification that an I/O operation has
+//! completed, some work needs to be done to translate that to a set of events
+//! or perhaps some more I/O needs to be scheduled. For example after a
+//! `TcpStream` is connected it generates a writable event and also schedules a
+//! read.
+//!
+//! To manage all this the `Selector` uses the `OVERLAPPED` pointer from the
+//! completion status. The selector assumes that all `OVERLAPPED` pointers are
+//! actually pointers to the interior of a `selector::Overlapped` which means
+//! that right after the `OVERLAPPED` itself there's a function pointer. This
+//! function pointer is given the completion status as well as another callback
+//! to push events onto the selector.
+//!
+//! The callback for each I/O operation doesn't have any environment, so it
+//! relies on memory layout and unsafe casting to translate an `OVERLAPPED`
+//! pointer (or in this case a `selector::Overlapped` pointer) to a type of
+//! `FromRawArc<T>` (see module docs for why this type exists).
+//!
+//! ## Thread Safety
+//!
+//! Currently all of the I/O primitives make liberal use of `Arc` and `Mutex`
+//! as an implementation detail. The main reason for this is to ensure that the
+//! types are `Send` and `Sync`, but the implementations have not been stressed
+//! in multithreaded situations yet. As a result, there are bound to be
+//! functional surprises in using these concurrently.
+//!
+//! ## Future Work
+//!
+//! First up, let's take a look at unimplemented portions of this module:
+//!
+//! * The `PollOpt::level()` option is currently entirely unimplemented.
+//! * Each `EventLoop` currently owns its completion port, but this prevents an
+//! I/O handle from being added to multiple event loops (something that can be
+//! done on Unix). Additionally, it hinders event loops moving across threads.
+//! This should be solved by likely having a global `Selector` which all
+//! others then communicate with.
+//! * Although Unix sockets don't exist on Windows, there are named pipes and
+//! those should likely be bound here in a similar fashion to `TcpStream`.
+//!
+//! Next up, there are a few performance improvements and optimizations that can
+//! still be implemented
+//!
+//! * Buffer management right now is pretty bad, they're all just allocated
+//! right before an I/O operation and discarded right after. There should at
+//! least be some form of buffering buffers.
+//! * No calls to `write` are internally buffered before being scheduled, which
+//! means that writing performance is abysmal compared to Unix. There should
+//! be some level of buffering of writes probably.
+
+use std::io;
+use std::os::windows::prelude::*;
+
+mod kernel32 {
+ pub use ::winapi::um::ioapiset::CancelIoEx;
+ pub use ::winapi::um::winbase::SetFileCompletionNotificationModes;
+}
+mod winapi {
+ pub use ::winapi::shared::minwindef::{TRUE, UCHAR};
+ pub use ::winapi::um::winnt::HANDLE;
+}
+
+mod awakener;
+#[macro_use]
+mod selector;
+mod tcp;
+mod udp;
+mod from_raw_arc;
+mod buffer_pool;
+
+pub use self::awakener::Awakener;
+pub use self::selector::{Events, Selector, Overlapped, Binding};
+pub use self::tcp::{TcpStream, TcpListener};
+pub use self::udp::UdpSocket;
+
+#[derive(Copy, Clone)]
+enum Family {
+ V4, V6,
+}
+
+unsafe fn cancel(socket: &AsRawSocket,
+ overlapped: &Overlapped) -> io::Result<()> {
+ let handle = socket.as_raw_socket() as winapi::HANDLE;
+ let ret = kernel32::CancelIoEx(handle, overlapped.as_mut_ptr());
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+unsafe fn no_notify_on_instant_completion(handle: winapi::HANDLE) -> io::Result<()> {
+ // TODO: move those to winapi
+ const FILE_SKIP_COMPLETION_PORT_ON_SUCCESS: winapi::UCHAR = 1;
+ const FILE_SKIP_SET_EVENT_ON_HANDLE: winapi::UCHAR = 2;
+
+ let flags = FILE_SKIP_COMPLETION_PORT_ON_SUCCESS | FILE_SKIP_SET_EVENT_ON_HANDLE;
+
+ let r = kernel32::SetFileCompletionNotificationModes(handle, flags);
+ if r == winapi::TRUE {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/selector.rs b/third_party/rust/mio-0.6.23/src/sys/windows/selector.rs
new file mode 100644
index 0000000000..23b145acdd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/selector.rs
@@ -0,0 +1,538 @@
+#![allow(deprecated)]
+
+use std::{fmt, io};
+use std::cell::UnsafeCell;
+use std::os::windows::prelude::*;
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::time::Duration;
+
+use lazycell::AtomicLazyCell;
+
+use winapi::shared::winerror::WAIT_TIMEOUT;
+use winapi::um::minwinbase::{OVERLAPPED, OVERLAPPED_ENTRY};
+use miow;
+use miow::iocp::{CompletionPort, CompletionStatus};
+
+use event_imp::{Event, Evented, Ready};
+use poll::{self, Poll};
+use sys::windows::buffer_pool::BufferPool;
+use {Token, PollOpt};
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+/// The guts of the Windows event loop, this is the struct which actually owns
+/// a completion port.
+///
+/// Internally this is just an `Arc`, and this allows handing out references to
+/// the internals to I/O handles registered on this selector. This is
+/// required to schedule I/O operations independently of being inside the event
+/// loop (e.g. when a call to `write` is seen we're not "in the event loop").
+pub struct Selector {
+ inner: Arc<SelectorInner>,
+}
+
+struct SelectorInner {
+ /// Unique identifier of the `Selector`
+ id: usize,
+
+ /// The actual completion port that's used to manage all I/O
+ port: CompletionPort,
+
+ /// A pool of buffers usable by this selector.
+ ///
+ /// Primitives will take buffers from this pool to perform I/O operations,
+ /// and once complete they'll be put back in.
+ buffers: Mutex<BufferPool>,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+
+ CompletionPort::new(0).map(|cp| {
+ Selector {
+ inner: Arc::new(SelectorInner {
+ id: id,
+ port: cp,
+ buffers: Mutex::new(BufferPool::new(256)),
+ }),
+ }
+ })
+ }
+
+ pub fn select(&self,
+ events: &mut Events,
+ awakener: Token,
+ timeout: Option<Duration>) -> io::Result<bool> {
+ trace!("select; timeout={:?}", timeout);
+
+ // Clear out the previous list of I/O events and get some more!
+ events.clear();
+
+ trace!("polling IOCP");
+ let n = match self.inner.port.get_many(&mut events.statuses, timeout) {
+ Ok(statuses) => statuses.len(),
+ Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => 0,
+ Err(e) => return Err(e),
+ };
+
+ let mut ret = false;
+ for status in events.statuses[..n].iter() {
+ // This should only ever happen from the awakener, and we should
+ // only ever have one awakener right now, so assert as such.
+ if status.overlapped() as usize == 0 {
+ assert_eq!(status.token(), usize::from(awakener));
+ ret = true;
+ continue;
+ }
+
+ let callback = unsafe {
+ (*(status.overlapped() as *mut Overlapped)).callback
+ };
+
+ trace!("select; -> got overlapped");
+ callback(status.entry());
+ }
+
+ trace!("returning");
+ Ok(ret)
+ }
+
+ /// Gets a reference to the underlying `CompletionPort` structure.
+ pub fn port(&self) -> &CompletionPort {
+ &self.inner.port
+ }
+
+ /// Gets a new reference to this selector, although all underlying data
+ /// structures will refer to the same completion port.
+ pub fn clone_ref(&self) -> Selector {
+ Selector { inner: self.inner.clone() }
+ }
+
+ /// Return the `Selector`'s identifier
+ pub fn id(&self) -> usize {
+ self.inner.id
+ }
+}
+
+impl SelectorInner {
+ fn identical(&self, other: &SelectorInner) -> bool {
+ (self as *const SelectorInner) == (other as *const SelectorInner)
+ }
+}
+
+// A registration is stored in each I/O object which keeps track of how it is
+// associated with a `Selector` above.
+//
+// Once associated with a `Selector`, a registration can never be un-associated
+// (due to IOCP requirements). This is actually implemented through the
+// `poll::Registration` and `poll::SetReadiness` APIs to keep track of all the
+// level/edge/filtering business.
+/// A `Binding` is embedded in all I/O objects associated with a `Poll`
+/// object.
+///
+/// Each registration keeps track of which selector the I/O object is
+/// associated with, ensuring that implementations of `Evented` can be
+/// conformant for the various methods on Windows.
+///
+/// If you're working with custom IOCP-enabled objects then you'll want to
+/// ensure that one of these instances is stored in your object and used in the
+/// implementation of `Evented`.
+///
+/// For more information about how to use this see the `windows` module
+/// documentation in this crate.
+pub struct Binding {
+ selector: AtomicLazyCell<Arc<SelectorInner>>,
+}
+
+impl Binding {
+ /// Creates a new blank binding ready to be inserted into an I/O
+ /// object.
+ ///
+ /// Won't actually do anything until associated with a `Poll` loop.
+ pub fn new() -> Binding {
+ Binding { selector: AtomicLazyCell::new() }
+ }
+
+ /// Registers a new handle with the `Poll` specified, also assigning the
+ /// `token` specified.
+ ///
+ /// This function is intended to be used as part of `Evented::register` for
+ /// custom IOCP objects. It will add the specified handle to the internal
+ /// IOCP object with the provided `token`. All future events generated by
+ /// the handled provided will be received by the `Poll`'s internal IOCP
+ /// object.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe as the `Poll` instance has assumptions about
+ /// what the `OVERLAPPED` pointer used for each I/O operation looks like.
+ /// Specifically they must all be instances of the `Overlapped` type in
+ /// this crate. More information about this can be found on the
+ /// `windows` module in this crate.
+ pub unsafe fn register_handle(&self,
+ handle: &AsRawHandle,
+ token: Token,
+ poll: &Poll) -> io::Result<()> {
+ let selector = poll::selector(poll);
+
+ // Ignore errors, we'll see them on the next line.
+ drop(self.selector.fill(selector.inner.clone()));
+ self.check_same_selector(poll)?;
+
+ selector.inner.port.add_handle(usize::from(token), handle)
+ }
+
+ /// Same as `register_handle` but for sockets.
+ pub unsafe fn register_socket(&self,
+ handle: &AsRawSocket,
+ token: Token,
+ poll: &Poll) -> io::Result<()> {
+ let selector = poll::selector(poll);
+ drop(self.selector.fill(selector.inner.clone()));
+ self.check_same_selector(poll)?;
+ selector.inner.port.add_socket(usize::from(token), handle)
+ }
+
+ /// Reregisters the handle provided from the `Poll` provided.
+ ///
+ /// This is intended to be used as part of `Evented::reregister` but note
+ /// that this function does not currently reregister the provided handle
+ /// with the `poll` specified. IOCP has a special binding for changing the
+ /// token which has not yet been implemented. Instead this function should
+ /// be used to assert that the call to `reregister` happened on the same
+ /// `Poll` that was passed into to `register`.
+ ///
+ /// Eventually, though, the provided `handle` will be re-assigned to have
+ /// the token `token` on the given `poll` assuming that it's been
+ /// previously registered with it.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe for similar reasons to `register`. That is,
+ /// there may be pending I/O events and such which aren't handled correctly.
+ pub unsafe fn reregister_handle(&self,
+ _handle: &AsRawHandle,
+ _token: Token,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ /// Same as `reregister_handle`, but for sockets.
+ pub unsafe fn reregister_socket(&self,
+ _socket: &AsRawSocket,
+ _token: Token,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ /// Deregisters the handle provided from the `Poll` provided.
+ ///
+ /// This is intended to be used as part of `Evented::deregister` but note
+ /// that this function does not currently deregister the provided handle
+ /// from the `poll` specified. IOCP has a special binding for that which has
+ /// not yet been implemented. Instead this function should be used to assert
+ /// that the call to `deregister` happened on the same `Poll` that was
+ /// passed into to `register`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe for similar reasons to `register`. That is,
+ /// there may be pending I/O events and such which aren't handled correctly.
+ pub unsafe fn deregister_handle(&self,
+ _handle: &AsRawHandle,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ /// Same as `deregister_handle`, but for sockets.
+ pub unsafe fn deregister_socket(&self,
+ _socket: &AsRawSocket,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ fn check_same_selector(&self, poll: &Poll) -> io::Result<()> {
+ let selector = poll::selector(poll);
+ match self.selector.borrow() {
+ Some(prev) if prev.identical(&selector.inner) => Ok(()),
+ Some(_) |
+ None => Err(other("socket already registered")),
+ }
+ }
+}
+
+impl fmt::Debug for Binding {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Binding")
+ .finish()
+ }
+}
+
+/// Helper struct used for TCP and UDP which bundles a `binding` with a
+/// `SetReadiness` handle.
+pub struct ReadyBinding {
+ binding: Binding,
+ readiness: Option<poll::SetReadiness>,
+}
+
+impl ReadyBinding {
+ /// Creates a new blank binding ready to be inserted into an I/O object.
+ ///
+ /// Won't actually do anything until associated with an `Selector` loop.
+ pub fn new() -> ReadyBinding {
+ ReadyBinding {
+ binding: Binding::new(),
+ readiness: None,
+ }
+ }
+
+ /// Returns whether this binding has been associated with a selector
+ /// yet.
+ pub fn registered(&self) -> bool {
+ self.readiness.is_some()
+ }
+
+ /// Acquires a buffer with at least `size` capacity.
+ ///
+ /// If associated with a selector, this will attempt to pull a buffer from
+ /// that buffer pool. If not associated with a selector, this will allocate
+ /// a fresh buffer.
+ pub fn get_buffer(&self, size: usize) -> Vec<u8> {
+ match self.binding.selector.borrow() {
+ Some(i) => i.buffers.lock().unwrap().get(size),
+ None => Vec::with_capacity(size),
+ }
+ }
+
+ /// Returns a buffer to this binding.
+ ///
+ /// If associated with a selector, this will push the buffer back into the
+ /// selector's pool of buffers. Otherwise this will just drop the buffer.
+ pub fn put_buffer(&self, buf: Vec<u8>) {
+ if let Some(i) = self.binding.selector.borrow() {
+ i.buffers.lock().unwrap().put(buf);
+ }
+ }
+
+ /// Sets the readiness of this I/O object to a particular `set`.
+ ///
+ /// This is later used to fill out and respond to requests to `poll`. Note
+ /// that this is all implemented through the `SetReadiness` structure in the
+ /// `poll` module.
+ pub fn set_readiness(&self, set: Ready) {
+ if let Some(ref i) = self.readiness {
+ trace!("set readiness to {:?}", set);
+ i.set_readiness(set).expect("event loop disappeared?");
+ }
+ }
+
+ /// Queries what the current readiness of this I/O object is.
+ ///
+ /// This is what's being used to generate events returned by `poll`.
+ pub fn readiness(&self) -> Ready {
+ match self.readiness {
+ Some(ref i) => i.readiness(),
+ None => Ready::empty(),
+ }
+ }
+
+ /// Implementation of the `Evented::register` function essentially.
+ ///
+ /// Returns an error if we're already registered with another event loop,
+ /// and otherwise just reassociates ourselves with the event loop to
+ /// possible change tokens.
+ pub fn register_socket(&mut self,
+ socket: &AsRawSocket,
+ poll: &Poll,
+ token: Token,
+ events: Ready,
+ opts: PollOpt,
+ registration: &Mutex<Option<poll::Registration>>)
+ -> io::Result<()> {
+ trace!("register {:?} {:?}", token, events);
+ unsafe {
+ self.binding.register_socket(socket, token, poll)?;
+ }
+
+ let (r, s) = poll::new_registration(poll, token, events, opts);
+ self.readiness = Some(s);
+ *registration.lock().unwrap() = Some(r);
+ Ok(())
+ }
+
+ /// Implementation of `Evented::reregister` function.
+ pub fn reregister_socket(&mut self,
+ socket: &AsRawSocket,
+ poll: &Poll,
+ token: Token,
+ events: Ready,
+ opts: PollOpt,
+ registration: &Mutex<Option<poll::Registration>>)
+ -> io::Result<()> {
+ trace!("reregister {:?} {:?}", token, events);
+ unsafe {
+ self.binding.reregister_socket(socket, token, poll)?;
+ }
+
+ registration.lock().unwrap()
+ .as_mut().unwrap()
+ .reregister(poll, token, events, opts)
+ }
+
+ /// Implementation of the `Evented::deregister` function.
+ ///
+ /// Doesn't allow registration with another event loop, just shuts down
+ /// readiness notifications and such.
+ pub fn deregister(&mut self,
+ socket: &AsRawSocket,
+ poll: &Poll,
+ registration: &Mutex<Option<poll::Registration>>)
+ -> io::Result<()> {
+ trace!("deregistering");
+ unsafe {
+ self.binding.deregister_socket(socket, poll)?;
+ }
+
+ registration.lock().unwrap()
+ .as_ref().unwrap()
+ .deregister(poll)
+ }
+}
+
+fn other(s: &str) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, s)
+}
+
+#[derive(Debug)]
+pub struct Events {
+ /// Raw I/O event completions are filled in here by the call to `get_many`
+ /// on the completion port above. These are then processed to run callbacks
+ /// which figure out what to do after the event is done.
+ statuses: Box<[CompletionStatus]>,
+
+ /// Literal events returned by `get` to the upwards `EventLoop`. This file
+ /// doesn't really modify this (except for the awakener), instead almost all
+ /// events are filled in by the `ReadinessQueue` from the `poll` module.
+ events: Vec<Event>,
+}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ // Note that it's possible for the output `events` to grow beyond the
+ // capacity as it can also include deferred events, but that's certainly
+ // not the end of the world!
+ Events {
+ statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(),
+ events: Vec::with_capacity(cap),
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).map(|e| *e)
+ }
+
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(event);
+ }
+
+ pub fn clear(&mut self) {
+ self.events.truncate(0);
+ }
+}
+
+macro_rules! overlapped2arc {
+ ($e:expr, $t:ty, $($field:ident).+) => (
+ #[allow(deref_nullptr)]
+ {
+ let offset = offset_of!($t, $($field).+);
+ debug_assert!(offset < mem::size_of::<$t>());
+ FromRawArc::from_raw(($e as usize - offset) as *mut $t)
+ }
+ )
+}
+
+macro_rules! offset_of {
+ ($t:ty, $($field:ident).+) => (
+ &(*(0 as *const $t)).$($field).+ as *const _ as usize
+ )
+}
+
+// See sys::windows module docs for why this exists.
+//
+// The gist of it is that `Selector` assumes that all `OVERLAPPED` pointers are
+// actually inside one of these structures so it can use the `Callback` stored
+// right after it.
+//
+// We use repr(C) here to ensure that we can assume the overlapped pointer is
+// at the start of the structure so we can just do a cast.
+/// A wrapper around an internal instance over `miow::Overlapped` which is in
+/// turn a wrapper around the Windows type `OVERLAPPED`.
+///
+/// This type is required to be used for all IOCP operations on handles that are
+/// registered with an event loop. The event loop will receive notifications
+/// over `OVERLAPPED` pointers that have completed, and it will cast that
+/// pointer to a pointer to this structure and invoke the associated callback.
+#[repr(C)]
+pub struct Overlapped {
+ inner: UnsafeCell<miow::Overlapped>,
+ callback: fn(&OVERLAPPED_ENTRY),
+}
+
+impl Overlapped {
+ /// Creates a new `Overlapped` which will invoke the provided `cb` callback
+ /// whenever it's triggered.
+ ///
+ /// The returned `Overlapped` must be used as the `OVERLAPPED` passed to all
+ /// I/O operations that are registered with mio's event loop. When the I/O
+ /// operation associated with an `OVERLAPPED` pointer completes the event
+ /// loop will invoke the function pointer provided by `cb`.
+ pub fn new(cb: fn(&OVERLAPPED_ENTRY)) -> Overlapped {
+ Overlapped {
+ inner: UnsafeCell::new(miow::Overlapped::zero()),
+ callback: cb,
+ }
+ }
+
+ /// Get the underlying `Overlapped` instance as a raw pointer.
+ ///
+ /// This can be useful when only a shared borrow is held and the overlapped
+ /// pointer needs to be passed down to winapi.
+ pub fn as_mut_ptr(&self) -> *mut OVERLAPPED {
+ unsafe {
+ (*self.inner.get()).raw()
+ }
+ }
+}
+
+impl fmt::Debug for Overlapped {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Overlapped")
+ .finish()
+ }
+}
+
+// Overlapped's APIs are marked as unsafe Overlapped's APIs are marked as
+// unsafe as they must be used with caution to ensure thread safety. The
+// structure itself is safe to send across threads.
+unsafe impl Send for Overlapped {}
+unsafe impl Sync for Overlapped {}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs b/third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs
new file mode 100644
index 0000000000..236e7866a6
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs
@@ -0,0 +1,853 @@
+use std::fmt;
+use std::io::{self, Read, ErrorKind};
+use std::mem;
+use std::net::{self, SocketAddr, Shutdown};
+use std::os::windows::prelude::*;
+use std::sync::{Mutex, MutexGuard};
+use std::time::Duration;
+
+use miow::iocp::CompletionStatus;
+use miow::net::*;
+use net2::{TcpBuilder, TcpStreamExt as Net2TcpExt};
+use winapi::um::minwinbase::OVERLAPPED_ENTRY;
+use winapi::um::winnt::HANDLE;
+use iovec::IoVec;
+
+use {poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::windows::from_raw_arc::FromRawArc;
+use sys::windows::selector::{Overlapped, ReadyBinding};
+use sys::windows::Family;
+
+pub struct TcpStream {
+ /// Separately stored implementation to ensure that the `Drop`
+ /// implementation on this type is only executed when it's actually dropped
+ /// (many clones of this `imp` are made).
+ imp: StreamImp,
+ registration: Mutex<Option<poll::Registration>>,
+}
+
+pub struct TcpListener {
+ imp: ListenerImp,
+ registration: Mutex<Option<poll::Registration>>,
+}
+
+#[derive(Clone)]
+struct StreamImp {
+ /// A stable address and synchronized access for all internals. This serves
+ /// to ensure that all `Overlapped` pointers are valid for a long period of
+ /// time as well as allowing completion callbacks to have access to the
+ /// internals without having ownership.
+ ///
+ /// Note that the reference count also allows us "loan out" copies to
+ /// completion ports while I/O is running to guarantee that this stays alive
+ /// until the I/O completes. You'll notice a number of calls to
+ /// `mem::forget` below, and these only happen on successful scheduling of
+ /// I/O and are paired with `overlapped2arc!` macro invocations in the
+ /// completion callbacks (to have a decrement match the increment).
+ inner: FromRawArc<StreamIo>,
+}
+
+#[derive(Clone)]
+struct ListenerImp {
+ inner: FromRawArc<ListenerIo>,
+}
+
+struct StreamIo {
+ inner: Mutex<StreamInner>,
+ read: Overlapped, // also used for connect
+ write: Overlapped,
+ socket: net::TcpStream,
+}
+
+struct ListenerIo {
+ inner: Mutex<ListenerInner>,
+ accept: Overlapped,
+ family: Family,
+ socket: net::TcpListener,
+}
+
+struct StreamInner {
+ iocp: ReadyBinding,
+ deferred_connect: Option<SocketAddr>,
+ read: State<(), ()>,
+ write: State<(Vec<u8>, usize), (Vec<u8>, usize)>,
+ /// whether we are instantly notified of success
+ /// (FILE_SKIP_COMPLETION_PORT_ON_SUCCESS,
+ /// without a roundtrip through the event loop)
+ instant_notify: bool,
+}
+
+struct ListenerInner {
+ iocp: ReadyBinding,
+ accept: State<net::TcpStream, (net::TcpStream, SocketAddr)>,
+ accept_buf: AcceptAddrsBuf,
+}
+
+enum State<T, U> {
+ Empty, // no I/O operation in progress
+ Pending(T), // an I/O operation is in progress
+ Ready(U), // I/O has finished with this value
+ Error(io::Error), // there was an I/O error
+}
+
+impl TcpStream {
+ fn new(socket: net::TcpStream,
+ deferred_connect: Option<SocketAddr>) -> TcpStream {
+ TcpStream {
+ registration: Mutex::new(None),
+ imp: StreamImp {
+ inner: FromRawArc::new(StreamIo {
+ read: Overlapped::new(read_done),
+ write: Overlapped::new(write_done),
+ socket: socket,
+ inner: Mutex::new(StreamInner {
+ iocp: ReadyBinding::new(),
+ deferred_connect: deferred_connect,
+ read: State::Empty,
+ write: State::Empty,
+ instant_notify: false,
+ }),
+ }),
+ },
+ }
+ }
+
+ pub fn connect(socket: net::TcpStream, addr: &SocketAddr)
+ -> io::Result<TcpStream> {
+ socket.set_nonblocking(true)?;
+ Ok(TcpStream::new(socket, Some(*addr)))
+ }
+
+ pub fn from_stream(stream: net::TcpStream) -> TcpStream {
+ TcpStream::new(stream, None)
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.peer_addr()
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.imp.inner.socket.try_clone().map(|s| TcpStream::new(s, None))
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.imp.inner.socket.shutdown(how)
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_nodelay(nodelay)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.imp.inner.socket.nodelay()
+ }
+
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.imp.inner.socket.set_recv_buffer_size(size)
+ }
+
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.imp.inner.socket.recv_buffer_size()
+ }
+
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.imp.inner.socket.set_send_buffer_size(size)
+ }
+
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.imp.inner.socket.send_buffer_size()
+ }
+
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.imp.inner.socket.set_keepalive(keepalive)
+ }
+
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.imp.inner.socket.keepalive()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.imp.inner.socket.ttl()
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.only_v6()
+ }
+
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ Net2TcpExt::set_linger(&self.imp.inner.socket, dur)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ Net2TcpExt::linger(&self.imp.inner.socket)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ if let Some(e) = self.imp.inner.socket.take_error()? {
+ return Ok(Some(e))
+ }
+
+ // If the syscall didn't return anything then also check to see if we've
+ // squirreled away an error elsewhere for example as part of a connect
+ // operation.
+ //
+ // Typically this is used like so:
+ //
+ // 1. A `connect` is issued
+ // 2. Wait for the socket to be writable
+ // 3. Call `take_error` to see if the connect succeeded.
+ //
+ // Right now the `connect` operation finishes in `read_done` below and
+ // fill will in `State::Error` in the `read` slot if it fails, so we
+ // extract that here.
+ let mut me = self.inner();
+ match mem::replace(&mut me.read, State::Empty) {
+ State::Error(e) => {
+ self.imp.schedule_read(&mut me);
+ Ok(Some(e))
+ }
+ other => {
+ me.read = other;
+ Ok(None)
+ }
+ }
+ }
+
+ fn inner(&self) -> MutexGuard<StreamInner> {
+ self.imp.inner()
+ }
+
+ fn before_read(&self) -> io::Result<MutexGuard<StreamInner>> {
+ let mut me = self.inner();
+
+ match me.read {
+ // Empty == we're not associated yet, and if we're pending then
+ // these are both cases where we return "would block"
+ State::Empty |
+ State::Pending(()) => return Err(io::ErrorKind::WouldBlock.into()),
+
+ // If we got a delayed error as part of a `read_overlapped` below,
+ // return that here. Also schedule another read in case it was
+ // transient.
+ State::Error(_) => {
+ let e = match mem::replace(&mut me.read, State::Empty) {
+ State::Error(e) => e,
+ _ => panic!(),
+ };
+ self.imp.schedule_read(&mut me);
+ return Err(e)
+ }
+
+ // If we're ready for a read then some previous 0-byte read has
+ // completed. In that case the OS's socket buffer has something for
+ // us, so we just keep pulling out bytes while we can in the loop
+ // below.
+ State::Ready(()) => {}
+ }
+
+ Ok(me)
+ }
+
+ fn post_register(&self, interest: Ready, me: &mut StreamInner) {
+ if interest.is_readable() {
+ self.imp.schedule_read(me);
+ }
+
+ // At least with epoll, if a socket is registered with an interest in
+ // writing and it's immediately writable then a writable event is
+ // generated immediately, so do so here.
+ if interest.is_writable() {
+ if let State::Empty = me.write {
+ self.imp.add_readiness(me, Ready::writable());
+ }
+ }
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ match IoVec::from_bytes_mut(buf) {
+ Some(vec) => self.readv(&mut [vec]),
+ None => Ok(0),
+ }
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut me = self.before_read()?;
+
+ match (&self.imp.inner.socket).peek(buf) {
+ Ok(n) => Ok(n),
+ Err(e) => {
+ me.read = State::Empty;
+ self.imp.schedule_read(&mut me);
+ Err(e)
+ }
+ }
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ let mut me = self.before_read()?;
+
+ // TODO: Does WSARecv work on a nonblocking sockets? We ideally want to
+ // call that instead of looping over all the buffers and calling
+ // `recv` on each buffer. I'm not sure though if an overlapped
+ // socket in nonblocking mode would work with that use case,
+ // however, so for now we just call `recv`.
+
+ let mut amt = 0;
+ for buf in bufs {
+ match (&self.imp.inner.socket).read(buf) {
+ // If we did a partial read, then return what we've read so far
+ Ok(n) if n < buf.len() => return Ok(amt + n),
+
+ // Otherwise filled this buffer entirely, so try to fill the
+ // next one as well.
+ Ok(n) => amt += n,
+
+ // If we hit an error then things get tricky if we've already
+ // read some data. If the error is "would block" then we just
+ // return the data we've read so far while scheduling another
+ // 0-byte read.
+ //
+ // If we've read data and the error kind is not "would block",
+ // then we stash away the error to get returned later and return
+ // the data that we've read.
+ //
+ // Finally if we haven't actually read any data we just
+ // reschedule a 0-byte read to happen again and then return the
+ // error upwards.
+ Err(e) => {
+ if amt > 0 && e.kind() == io::ErrorKind::WouldBlock {
+ me.read = State::Empty;
+ self.imp.schedule_read(&mut me);
+ return Ok(amt)
+ } else if amt > 0 {
+ me.read = State::Error(e);
+ return Ok(amt)
+ } else {
+ me.read = State::Empty;
+ self.imp.schedule_read(&mut me);
+ return Err(e)
+ }
+ }
+ }
+ }
+
+ Ok(amt)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ match IoVec::from_bytes(buf) {
+ Some(vec) => self.writev(&[vec]),
+ None => Ok(0),
+ }
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ let mut me = self.inner();
+ let me = &mut *me;
+
+ match mem::replace(&mut me.write, State::Empty) {
+ State::Empty => {}
+ State::Error(e) => return Err(e),
+ other => {
+ me.write = other;
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+ }
+
+ if !me.iocp.registered() {
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+
+ if bufs.is_empty() {
+ return Ok(0)
+ }
+
+ let len = bufs.iter().map(|b| b.len()).fold(0, |a, b| a + b);
+ let mut intermediate = me.iocp.get_buffer(len);
+ for buf in bufs {
+ intermediate.extend_from_slice(buf);
+ }
+ self.imp.schedule_write(intermediate, 0, me);
+ Ok(len)
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl StreamImp {
+ fn inner(&self) -> MutexGuard<StreamInner> {
+ self.inner.inner.lock().unwrap()
+ }
+
+ fn schedule_connect(&self, addr: &SocketAddr) -> io::Result<()> {
+ unsafe {
+ trace!("scheduling a connect");
+ self.inner.socket.connect_overlapped(addr, &[], self.inner.read.as_mut_ptr())?;
+ }
+ // see docs above on StreamImp.inner for rationale on forget
+ mem::forget(self.clone());
+ Ok(())
+ }
+
+ /// Schedule a read to happen on this socket, enqueuing us to receive a
+ /// notification when a read is ready.
+ ///
+ /// Note that this does *not* work with a buffer. When reading a TCP stream
+ /// we actually read into a 0-byte buffer so Windows will send us a
+ /// notification when the socket is otherwise ready for reading. This allows
+ /// us to avoid buffer allocations for in-flight reads.
+ fn schedule_read(&self, me: &mut StreamInner) {
+ match me.read {
+ State::Empty => {}
+ State::Ready(_) | State::Error(_) => {
+ self.add_readiness(me, Ready::readable());
+ return;
+ }
+ _ => return,
+ }
+
+ me.iocp.set_readiness(me.iocp.readiness() - Ready::readable());
+
+ trace!("scheduling a read");
+ let res = unsafe {
+ self.inner.socket.read_overlapped(&mut [], self.inner.read.as_mut_ptr())
+ };
+ match res {
+ // Note that `Ok(true)` means that this completed immediately and
+ // our socket is readable. This typically means that the caller of
+ // this function (likely `read` above) can try again as an
+ // optimization and return bytes quickly.
+ //
+ // Normally, though, although the read completed immediately
+ // there's still an IOCP completion packet enqueued that we're going
+ // to receive.
+ //
+ // You can configure this behavior (miow) with
+ // SetFileCompletionNotificationModes to indicate that `Ok(true)`
+ // does **not** enqueue a completion packet. (This is the case
+ // for me.instant_notify)
+ //
+ // Note that apparently libuv has scary code to work around bugs in
+ // `WSARecv` for UDP sockets apparently for handles which have had
+ // the `SetFileCompletionNotificationModes` function called on them,
+ // worth looking into!
+ Ok(Some(_)) if me.instant_notify => {
+ me.read = State::Ready(());
+ self.add_readiness(me, Ready::readable());
+ }
+ Ok(_) => {
+ // see docs above on StreamImp.inner for rationale on forget
+ me.read = State::Pending(());
+ mem::forget(self.clone());
+ }
+ Err(e) => {
+ me.read = State::Error(e);
+ self.add_readiness(me, Ready::readable());
+ }
+ }
+ }
+
+ /// Similar to `schedule_read`, except that this issues, well, writes.
+ ///
+ /// This function will continually attempt to write the entire contents of
+ /// the buffer `buf` until they have all been written. The `pos` argument is
+ /// the current offset within the buffer up to which the contents have
+ /// already been written.
+ ///
+ /// A new writable event (e.g. allowing another write) will only happen once
+ /// the buffer has been written completely (or hit an error).
+ fn schedule_write(&self,
+ buf: Vec<u8>,
+ mut pos: usize,
+ me: &mut StreamInner) {
+
+ // About to write, clear any pending level triggered events
+ me.iocp.set_readiness(me.iocp.readiness() - Ready::writable());
+
+ loop {
+ trace!("scheduling a write of {} bytes", buf[pos..].len());
+ let ret = unsafe {
+ self.inner.socket.write_overlapped(&buf[pos..], self.inner.write.as_mut_ptr())
+ };
+ match ret {
+ Ok(Some(transferred_bytes)) if me.instant_notify => {
+ trace!("done immediately with {} bytes", transferred_bytes);
+ if transferred_bytes == buf.len() - pos {
+ self.add_readiness(me, Ready::writable());
+ me.write = State::Empty;
+ break;
+ }
+ pos += transferred_bytes;
+ }
+ Ok(_) => {
+ trace!("scheduled for later");
+ // see docs above on StreamImp.inner for rationale on forget
+ me.write = State::Pending((buf, pos));
+ mem::forget(self.clone());
+ break;
+ }
+ Err(e) => {
+ trace!("write error: {}", e);
+ me.write = State::Error(e);
+ self.add_readiness(me, Ready::writable());
+ me.iocp.put_buffer(buf);
+ break;
+ }
+ }
+ }
+ }
+
+ /// Pushes an event for this socket onto the selector its registered for.
+ ///
+ /// When an event is generated on this socket, if it happened after the
+ /// socket was closed then we don't want to actually push the event onto our
+ /// selector as otherwise it's just a spurious notification.
+ fn add_readiness(&self, me: &mut StreamInner, set: Ready) {
+ me.iocp.set_readiness(set | me.iocp.readiness());
+ }
+}
+
+fn read_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ let me2 = StreamImp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), StreamIo, read) },
+ };
+
+ let mut me = me2.inner();
+ match mem::replace(&mut me.read, State::Empty) {
+ State::Pending(()) => {
+ trace!("finished a read: {}", status.bytes_transferred());
+ assert_eq!(status.bytes_transferred(), 0);
+ me.read = State::Ready(());
+ return me2.add_readiness(&mut me, Ready::readable())
+ }
+ s => me.read = s,
+ }
+
+ // If a read didn't complete, then the connect must have just finished.
+ trace!("finished a connect");
+
+ // By guarding with socket.result(), we ensure that a connection
+ // was successfully made before performing operations requiring a
+ // connected socket.
+ match unsafe { me2.inner.socket.result(status.overlapped()) }
+ .and_then(|_| me2.inner.socket.connect_complete())
+ {
+ Ok(()) => {
+ me2.add_readiness(&mut me, Ready::writable());
+ me2.schedule_read(&mut me);
+ }
+ Err(e) => {
+ me2.add_readiness(&mut me, Ready::readable() | Ready::writable());
+ me.read = State::Error(e);
+ }
+ }
+}
+
+fn write_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ trace!("finished a write {}", status.bytes_transferred());
+ let me2 = StreamImp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), StreamIo, write) },
+ };
+ let mut me = me2.inner();
+ let (buf, pos) = match mem::replace(&mut me.write, State::Empty) {
+ State::Pending(pair) => pair,
+ _ => unreachable!(),
+ };
+ let new_pos = pos + (status.bytes_transferred() as usize);
+ if new_pos == buf.len() {
+ me2.add_readiness(&mut me, Ready::writable());
+ } else {
+ me2.schedule_write(buf, new_pos, &mut me);
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.register_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+
+ unsafe {
+ super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)?;
+ me.instant_notify = true;
+ }
+
+ // If we were connected before being registered process that request
+ // here and go along our merry ways. Note that the callback for a
+ // successful connect will worry about generating writable/readable
+ // events and scheduling a new read.
+ if let Some(addr) = me.deferred_connect.take() {
+ return self.imp.schedule_connect(&addr).map(|_| ())
+ }
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.reregister_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner().iocp.deregister(&self.imp.inner.socket,
+ poll, &self.registration)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("TcpStream")
+ .finish()
+ }
+}
+
+impl Drop for TcpStream {
+ fn drop(&mut self) {
+ // If we're still internally reading, we're no longer interested. Note
+ // though that we don't cancel any writes which may have been issued to
+ // preserve the same semantics as Unix.
+ //
+ // Note that "Empty" here may mean that a connect is pending, so we
+ // cancel even if that happens as well.
+ unsafe {
+ match self.inner().read {
+ State::Pending(_) | State::Empty => {
+ trace!("cancelling active TCP read");
+ drop(super::cancel(&self.imp.inner.socket,
+ &self.imp.inner.read));
+ }
+ State::Ready(_) | State::Error(_) => {}
+ }
+ }
+ }
+}
+
+impl TcpListener {
+ pub fn new(socket: net::TcpListener)
+ -> io::Result<TcpListener> {
+ let addr = socket.local_addr()?;
+ Ok(TcpListener::new_family(socket, match addr {
+ SocketAddr::V4(..) => Family::V4,
+ SocketAddr::V6(..) => Family::V6,
+ }))
+ }
+
+ fn new_family(socket: net::TcpListener, family: Family) -> TcpListener {
+ TcpListener {
+ registration: Mutex::new(None),
+ imp: ListenerImp {
+ inner: FromRawArc::new(ListenerIo {
+ accept: Overlapped::new(accept_done),
+ family: family,
+ socket: socket,
+ inner: Mutex::new(ListenerInner {
+ iocp: ReadyBinding::new(),
+ accept: State::Empty,
+ accept_buf: AcceptAddrsBuf::new(),
+ }),
+ }),
+ },
+ }
+ }
+
+ pub fn accept(&self) -> io::Result<(net::TcpStream, SocketAddr)> {
+ let mut me = self.inner();
+
+ let ret = match mem::replace(&mut me.accept, State::Empty) {
+ State::Empty => return Err(io::ErrorKind::WouldBlock.into()),
+ State::Pending(t) => {
+ me.accept = State::Pending(t);
+ return Err(io::ErrorKind::WouldBlock.into());
+ }
+ State::Ready((s, a)) => Ok((s, a)),
+ State::Error(e) => Err(e),
+ };
+
+ self.imp.schedule_accept(&mut me);
+
+ return ret
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.imp.inner.socket.try_clone().map(|s| {
+ TcpListener::new_family(s, self.imp.inner.family)
+ })
+ }
+
+ #[allow(deprecated)]
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_only_v6(only_v6)
+ }
+
+ #[allow(deprecated)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.only_v6()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.imp.inner.socket.ttl()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.imp.inner.socket.take_error()
+ }
+
+ fn inner(&self) -> MutexGuard<ListenerInner> {
+ self.imp.inner()
+ }
+}
+
+impl ListenerImp {
+ fn inner(&self) -> MutexGuard<ListenerInner> {
+ self.inner.inner.lock().unwrap()
+ }
+
+ fn schedule_accept(&self, me: &mut ListenerInner) {
+ match me.accept {
+ State::Empty => {}
+ _ => return
+ }
+
+ me.iocp.set_readiness(me.iocp.readiness() - Ready::readable());
+
+ let res = match self.inner.family {
+ Family::V4 => TcpBuilder::new_v4(),
+ Family::V6 => TcpBuilder::new_v6(),
+ }
+ .and_then(|builder| builder.to_tcp_stream())
+ .and_then(|stream| unsafe {
+ trace!("scheduling an accept");
+ self.inner
+ .socket
+ .accept_overlapped(&stream, &mut me.accept_buf, self.inner.accept.as_mut_ptr())
+ .map(|x| (stream, x))
+ });
+ match res {
+ Ok((socket, _)) => {
+ // see docs above on StreamImp.inner for rationale on forget
+ me.accept = State::Pending(socket);
+ mem::forget(self.clone());
+ }
+ Err(e) => {
+ me.accept = State::Error(e);
+ self.add_readiness(me, Ready::readable());
+ }
+ }
+ }
+
+ // See comments in StreamImp::push
+ fn add_readiness(&self, me: &mut ListenerInner, set: Ready) {
+ me.iocp.set_readiness(set | me.iocp.readiness());
+ }
+}
+
+fn accept_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ let me2 = ListenerImp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), ListenerIo, accept) },
+ };
+
+ let mut me = me2.inner();
+ let socket = match mem::replace(&mut me.accept, State::Empty) {
+ State::Pending(s) => s,
+ _ => unreachable!(),
+ };
+ trace!("finished an accept");
+ let result = me2.inner.socket.accept_complete(&socket).and_then(|()| {
+ me.accept_buf.parse(&me2.inner.socket)
+ }).and_then(|buf| {
+ buf.remote().ok_or_else(|| {
+ io::Error::new(ErrorKind::Other, "could not obtain remote address")
+ })
+ });
+ me.accept = match result {
+ Ok(remote_addr) => State::Ready((socket, remote_addr)),
+ Err(e) => State::Error(e),
+ };
+ me2.add_readiness(&mut me, Ready::readable());
+}
+
+impl Evented for TcpListener {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.register_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+
+ unsafe {
+ super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)?;
+ }
+
+ self.imp.schedule_accept(&mut me);
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.reregister_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+ self.imp.schedule_accept(&mut me);
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner().iocp.deregister(&self.imp.inner.socket,
+ poll, &self.registration)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("TcpListener")
+ .finish()
+ }
+}
+
+impl Drop for TcpListener {
+ fn drop(&mut self) {
+ // If we're still internally reading, we're no longer interested.
+ unsafe {
+ match self.inner().accept {
+ State::Pending(_) => {
+ trace!("cancelling active TCP accept");
+ drop(super::cancel(&self.imp.inner.socket,
+ &self.imp.inner.accept));
+ }
+ State::Empty |
+ State::Ready(_) |
+ State::Error(_) => {}
+ }
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/udp.rs b/third_party/rust/mio-0.6.23/src/sys/windows/udp.rs
new file mode 100644
index 0000000000..f5ea96c324
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/udp.rs
@@ -0,0 +1,414 @@
+//! UDP for IOCP
+//!
+//! Note that most of this module is quite similar to the TCP module, so if
+//! something seems odd you may also want to try the docs over there.
+
+use std::fmt;
+use std::io::prelude::*;
+use std::io;
+use std::mem;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::sync::{Mutex, MutexGuard};
+
+#[allow(unused_imports)]
+use net2::{UdpBuilder, UdpSocketExt};
+use winapi::shared::winerror::WSAEMSGSIZE;
+use winapi::um::minwinbase::OVERLAPPED_ENTRY;
+use miow::iocp::CompletionStatus;
+use miow::net::SocketAddrBuf;
+use miow::net::UdpSocketExt as MiowUdpSocketExt;
+
+use {poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::windows::from_raw_arc::FromRawArc;
+use sys::windows::selector::{Overlapped, ReadyBinding};
+
+pub struct UdpSocket {
+ imp: Imp,
+ registration: Mutex<Option<poll::Registration>>,
+}
+
+#[derive(Clone)]
+struct Imp {
+ inner: FromRawArc<Io>,
+}
+
+struct Io {
+ read: Overlapped,
+ write: Overlapped,
+ socket: net::UdpSocket,
+ inner: Mutex<Inner>,
+}
+
+struct Inner {
+ iocp: ReadyBinding,
+ read: State<Vec<u8>, Vec<u8>>,
+ write: State<Vec<u8>, (Vec<u8>, usize)>,
+ read_buf: SocketAddrBuf,
+}
+
+enum State<T, U> {
+ Empty,
+ Pending(T),
+ Ready(U),
+ Error(io::Error),
+}
+
+impl UdpSocket {
+ pub fn new(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ Ok(UdpSocket {
+ registration: Mutex::new(None),
+ imp: Imp {
+ inner: FromRawArc::new(Io {
+ read: Overlapped::new(recv_done),
+ write: Overlapped::new(send_done),
+ socket: socket,
+ inner: Mutex::new(Inner {
+ iocp: ReadyBinding::new(),
+ read: State::Empty,
+ write: State::Empty,
+ read_buf: SocketAddrBuf::new(),
+ }),
+ }),
+ },
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.imp.inner.socket.try_clone().and_then(UdpSocket::new)
+ }
+
+ /// Note that unlike `TcpStream::write` this function will not attempt to
+ /// continue writing `buf` until its entirely written.
+ ///
+ /// TODO: This... may be wrong in the long run. We're reporting that we
+ /// successfully wrote all of the bytes in `buf` but it's possible
+ /// that we don't actually end up writing all of them!
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr)
+ -> io::Result<usize> {
+ let mut me = self.inner();
+ let me = &mut *me;
+
+ match me.write {
+ State::Empty => {}
+ _ => return Err(io::ErrorKind::WouldBlock.into()),
+ }
+
+ if !me.iocp.registered() {
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+
+ let interest = me.iocp.readiness();
+ me.iocp.set_readiness(interest - Ready::writable());
+
+ let mut owned_buf = me.iocp.get_buffer(64 * 1024);
+ let amt = owned_buf.write(buf)?;
+ unsafe {
+ trace!("scheduling a send");
+ self.imp.inner.socket.send_to_overlapped(&owned_buf, target,
+ self.imp.inner.write.as_mut_ptr())
+ }?;
+ me.write = State::Pending(owned_buf);
+ mem::forget(self.imp.clone());
+ Ok(amt)
+ }
+
+ /// Note that unlike `TcpStream::write` this function will not attempt to
+ /// continue writing `buf` until its entirely written.
+ ///
+ /// TODO: This... may be wrong in the long run. We're reporting that we
+ /// successfully wrote all of the bytes in `buf` but it's possible
+ /// that we don't actually end up writing all of them!
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ let mut me = self.inner();
+ let me = &mut *me;
+
+ match me.write {
+ State::Empty => {}
+ _ => return Err(io::ErrorKind::WouldBlock.into()),
+ }
+
+ if !me.iocp.registered() {
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+
+ let interest = me.iocp.readiness();
+ me.iocp.set_readiness(interest - Ready::writable());
+
+ let mut owned_buf = me.iocp.get_buffer(64 * 1024);
+ let amt = owned_buf.write(buf)?;
+ unsafe {
+ trace!("scheduling a send");
+ self.imp.inner.socket.send_overlapped(&owned_buf, self.imp.inner.write.as_mut_ptr())
+
+ }?;
+ me.write = State::Pending(owned_buf);
+ mem::forget(self.imp.clone());
+ Ok(amt)
+ }
+
+ pub fn recv_from(&self, mut buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ let mut me = self.inner();
+ match mem::replace(&mut me.read, State::Empty) {
+ State::Empty => Err(io::ErrorKind::WouldBlock.into()),
+ State::Pending(b) => { me.read = State::Pending(b); Err(io::ErrorKind::WouldBlock.into()) }
+ State::Ready(data) => {
+ // If we weren't provided enough space to receive the message
+ // then don't actually read any data, just return an error.
+ if buf.len() < data.len() {
+ me.read = State::Ready(data);
+ Err(io::Error::from_raw_os_error(WSAEMSGSIZE as i32))
+ } else {
+ let r = if let Some(addr) = me.read_buf.to_socket_addr() {
+ buf.write(&data).unwrap();
+ Ok((data.len(), addr))
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other,
+ "failed to parse socket address"))
+ };
+ me.iocp.put_buffer(data);
+ self.imp.schedule_read_from(&mut me);
+ r
+ }
+ }
+ State::Error(e) => {
+ self.imp.schedule_read_from(&mut me);
+ Err(e)
+ }
+ }
+ }
+
+ pub fn recv(&self, buf: &mut [u8])
+ -> io::Result<usize> {
+ //Since recv_from can be used on connected sockets just call it and drop the address.
+ self.recv_from(buf).map(|(size,_)| size)
+ }
+
+ pub fn connect(&self, addr: SocketAddr) -> io::Result<()> {
+ self.imp.inner.socket.connect(addr)
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.imp.inner.socket.broadcast()
+ }
+
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_broadcast(on)
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.imp.inner.socket.multicast_loop_v4()
+ }
+
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_multicast_loop_v4(on)
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.imp.inner.socket.multicast_ttl_v4()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_multicast_ttl_v4(ttl)
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.multicast_loop_v6()
+ }
+
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_multicast_loop_v6(on)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.imp.inner.socket.ttl()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_ttl(ttl)
+ }
+
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.imp.inner.socket.join_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.imp.inner.socket.join_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.imp.inner.socket.leave_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.imp.inner.socket.leave_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.only_v6()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.imp.inner.socket.take_error()
+ }
+
+ fn inner(&self) -> MutexGuard<Inner> {
+ self.imp.inner()
+ }
+
+ fn post_register(&self, interest: Ready, me: &mut Inner) {
+ if interest.is_readable() {
+ //We use recv_from here since it is well specified for both
+ //connected and non-connected sockets and we can discard the address
+ //when calling recv().
+ self.imp.schedule_read_from(me);
+ }
+ // See comments in TcpSocket::post_register for what's going on here
+ if interest.is_writable() {
+ if let State::Empty = me.write {
+ self.imp.add_readiness(me, Ready::writable());
+ }
+ }
+ }
+}
+
+impl Imp {
+ fn inner(&self) -> MutexGuard<Inner> {
+ self.inner.inner.lock().unwrap()
+ }
+
+ fn schedule_read_from(&self, me: &mut Inner) {
+ match me.read {
+ State::Empty => {}
+ _ => return,
+ }
+
+ let interest = me.iocp.readiness();
+ me.iocp.set_readiness(interest - Ready::readable());
+
+ let mut buf = me.iocp.get_buffer(64 * 1024);
+ let res = unsafe {
+ trace!("scheduling a read");
+ let cap = buf.capacity();
+ buf.set_len(cap);
+ self.inner.socket.recv_from_overlapped(&mut buf, &mut me.read_buf,
+ self.inner.read.as_mut_ptr())
+ };
+ match res {
+ Ok(_) => {
+ me.read = State::Pending(buf);
+ mem::forget(self.clone());
+ }
+ Err(e) => {
+ me.read = State::Error(e);
+ self.add_readiness(me, Ready::readable());
+ me.iocp.put_buffer(buf);
+ }
+ }
+ }
+
+ // See comments in tcp::StreamImp::push
+ fn add_readiness(&self, me: &Inner, set: Ready) {
+ me.iocp.set_readiness(set | me.iocp.readiness());
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.register_socket(&self.imp.inner.socket,
+ poll, token, interest, opts,
+ &self.registration)?;
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.reregister_socket(&self.imp.inner.socket,
+ poll, token, interest,
+ opts, &self.registration)?;
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner().iocp.deregister(&self.imp.inner.socket,
+ poll, &self.registration)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("UdpSocket")
+ .finish()
+ }
+}
+
+impl Drop for UdpSocket {
+ fn drop(&mut self) {
+ let inner = self.inner();
+
+ // If we're still internally reading, we're no longer interested. Note
+ // though that we don't cancel any writes which may have been issued to
+ // preserve the same semantics as Unix.
+ unsafe {
+ match inner.read {
+ State::Pending(_) => {
+ drop(super::cancel(&self.imp.inner.socket,
+ &self.imp.inner.read));
+ }
+ State::Empty |
+ State::Ready(_) |
+ State::Error(_) => {}
+ }
+ }
+ }
+}
+
+fn send_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ trace!("finished a send {}", status.bytes_transferred());
+ let me2 = Imp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), Io, write) },
+ };
+ let mut me = me2.inner();
+ me.write = State::Empty;
+ me2.add_readiness(&mut me, Ready::writable());
+}
+
+fn recv_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ trace!("finished a recv {}", status.bytes_transferred());
+ let me2 = Imp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), Io, read) },
+ };
+ let mut me = me2.inner();
+ let mut buf = match mem::replace(&mut me.read, State::Empty) {
+ State::Pending(buf) => buf,
+ _ => unreachable!(),
+ };
+ unsafe {
+ buf.set_len(status.bytes_transferred() as usize);
+ }
+ me.read = State::Ready(buf);
+ me2.add_readiness(&mut me, Ready::readable());
+}
diff --git a/third_party/rust/mio-0.6.23/src/timer.rs b/third_party/rust/mio-0.6.23/src/timer.rs
new file mode 100644
index 0000000000..c591be5e27
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/timer.rs
@@ -0,0 +1,516 @@
+//! Timer optimized for I/O related operations
+
+#![allow(deprecated, missing_debug_implementations)]
+
+use {convert, io, Ready, Poll, PollOpt, Registration, SetReadiness, Token};
+use event::Evented;
+use lazycell::LazyCell;
+use slab::Slab;
+use std::{cmp, error, fmt, u64, usize, iter, thread};
+use std::sync::Arc;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::time::{Duration, Instant};
+
+use self::TimerErrorKind::TimerOverflow;
+
+pub struct Timer<T> {
+ // Size of each tick in milliseconds
+ tick_ms: u64,
+ // Slab of timeout entries
+ entries: Slab<Entry<T>>,
+ // Timeout wheel. Each tick, the timer will look at the next slot for
+ // timeouts that match the current tick.
+ wheel: Vec<WheelEntry>,
+ // Tick 0's time instant
+ start: Instant,
+ // The current tick
+ tick: Tick,
+ // The next entry to possibly timeout
+ next: Token,
+ // Masks the target tick to get the slot
+ mask: u64,
+ // Set on registration with Poll
+ inner: LazyCell<Inner>,
+}
+
+pub struct Builder {
+ // Approximate duration of each tick
+ tick: Duration,
+ // Number of slots in the timer wheel
+ num_slots: usize,
+ // Max number of timeouts that can be in flight at a given time.
+ capacity: usize,
+}
+
+#[derive(Clone, Debug)]
+pub struct Timeout {
+ // Reference into the timer entry slab
+ token: Token,
+ // Tick that it should match up with
+ tick: u64,
+}
+
+struct Inner {
+ registration: Registration,
+ set_readiness: SetReadiness,
+ wakeup_state: WakeupState,
+ wakeup_thread: thread::JoinHandle<()>,
+}
+
+impl Drop for Inner {
+ fn drop(&mut self) {
+ // 1. Set wakeup state to TERMINATE_THREAD (https://github.com/carllerche/mio/blob/master/src/timer.rs#L451)
+ self.wakeup_state.store(TERMINATE_THREAD, Ordering::Release);
+ // 2. Wake him up
+ self.wakeup_thread.thread().unpark();
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+struct WheelEntry {
+ next_tick: Tick,
+ head: Token,
+}
+
+// Doubly linked list of timer entries. Allows for efficient insertion /
+// removal of timeouts.
+struct Entry<T> {
+ state: T,
+ links: EntryLinks,
+}
+
+#[derive(Copy, Clone)]
+struct EntryLinks {
+ tick: Tick,
+ prev: Token,
+ next: Token
+}
+
+type Tick = u64;
+
+const TICK_MAX: Tick = u64::MAX;
+
+// Manages communication with wakeup thread
+type WakeupState = Arc<AtomicUsize>;
+
+pub type Result<T> = ::std::result::Result<T, TimerError>;
+// TODO: remove
+pub type TimerResult<T> = Result<T>;
+
+
+/// Deprecated and unused.
+#[derive(Debug)]
+pub struct TimerError;
+
+/// Deprecated and unused.
+#[derive(Debug)]
+pub enum TimerErrorKind {
+ TimerOverflow,
+}
+
+// TODO: Remove
+pub type OldTimerResult<T> = Result<T>;
+
+const TERMINATE_THREAD: usize = 0;
+const EMPTY: Token = Token(usize::MAX);
+
+impl Builder {
+ pub fn tick_duration(mut self, duration: Duration) -> Builder {
+ self.tick = duration;
+ self
+ }
+
+ pub fn num_slots(mut self, num_slots: usize) -> Builder {
+ self.num_slots = num_slots;
+ self
+ }
+
+ pub fn capacity(mut self, capacity: usize) -> Builder {
+ self.capacity = capacity;
+ self
+ }
+
+ pub fn build<T>(self) -> Timer<T> {
+ Timer::new(convert::millis(self.tick), self.num_slots, self.capacity, Instant::now())
+ }
+}
+
+impl Default for Builder {
+ fn default() -> Builder {
+ Builder {
+ tick: Duration::from_millis(100),
+ num_slots: 256,
+ capacity: 65_536,
+ }
+ }
+}
+
+impl<T> Timer<T> {
+ fn new(tick_ms: u64, num_slots: usize, capacity: usize, start: Instant) -> Timer<T> {
+ let num_slots = num_slots.next_power_of_two();
+ let capacity = capacity.next_power_of_two();
+ let mask = (num_slots as u64) - 1;
+ let wheel = iter::repeat(WheelEntry { next_tick: TICK_MAX, head: EMPTY })
+ .take(num_slots).collect();
+
+ Timer {
+ tick_ms,
+ entries: Slab::with_capacity(capacity),
+ wheel,
+ start,
+ tick: 0,
+ next: EMPTY,
+ mask,
+ inner: LazyCell::new(),
+ }
+ }
+
+ pub fn set_timeout(&mut self, delay_from_now: Duration, state: T) -> Result<Timeout> {
+ let delay_from_start = self.start.elapsed() + delay_from_now;
+ self.set_timeout_at(delay_from_start, state)
+ }
+
+ fn set_timeout_at(&mut self, delay_from_start: Duration, state: T) -> Result<Timeout> {
+ let mut tick = duration_to_tick(delay_from_start, self.tick_ms);
+ trace!("setting timeout; delay={:?}; tick={:?}; current-tick={:?}", delay_from_start, tick, self.tick);
+
+ // Always target at least 1 tick in the future
+ if tick <= self.tick {
+ tick = self.tick + 1;
+ }
+
+ self.insert(tick, state)
+ }
+
+ fn insert(&mut self, tick: Tick, state: T) -> Result<Timeout> {
+ // Get the slot for the requested tick
+ let slot = (tick & self.mask) as usize;
+ let curr = self.wheel[slot];
+
+ // Insert the new entry
+ let entry = Entry::new(state, tick, curr.head);
+ let token = Token(self.entries.insert(entry));
+
+ if curr.head != EMPTY {
+ // If there was a previous entry, set its prev pointer to the new
+ // entry
+ self.entries[curr.head.into()].links.prev = token;
+ }
+
+ // Update the head slot
+ self.wheel[slot] = WheelEntry {
+ next_tick: cmp::min(tick, curr.next_tick),
+ head: token,
+ };
+
+ self.schedule_readiness(tick);
+
+ trace!("inserted timeout; slot={}; token={:?}", slot, token);
+
+ // Return the new timeout
+ Ok(Timeout {
+ token,
+ tick
+ })
+ }
+
+ pub fn cancel_timeout(&mut self, timeout: &Timeout) -> Option<T> {
+ let links = match self.entries.get(timeout.token.into()) {
+ Some(e) => e.links,
+ None => return None
+ };
+
+ // Sanity check
+ if links.tick != timeout.tick {
+ return None;
+ }
+
+ self.unlink(&links, timeout.token);
+ Some(self.entries.remove(timeout.token.into()).state)
+ }
+
+ pub fn poll(&mut self) -> Option<T> {
+ let target_tick = current_tick(self.start, self.tick_ms);
+ self.poll_to(target_tick)
+ }
+
+ fn poll_to(&mut self, mut target_tick: Tick) -> Option<T> {
+ trace!("tick_to; target_tick={}; current_tick={}", target_tick, self.tick);
+
+ if target_tick < self.tick {
+ target_tick = self.tick;
+ }
+
+ while self.tick <= target_tick {
+ let curr = self.next;
+
+ trace!("ticking; curr={:?}", curr);
+
+ if curr == EMPTY {
+ self.tick += 1;
+
+ let slot = self.slot_for(self.tick);
+ self.next = self.wheel[slot].head;
+
+ // Handle the case when a slot has a single timeout which gets
+ // canceled before the timeout expires. In this case, the
+ // slot's head is EMPTY but there is a value for next_tick. Not
+ // resetting next_tick here causes the timer to get stuck in a
+ // loop.
+ if self.next == EMPTY {
+ self.wheel[slot].next_tick = TICK_MAX;
+ }
+ } else {
+ let slot = self.slot_for(self.tick);
+
+ if curr == self.wheel[slot].head {
+ self.wheel[slot].next_tick = TICK_MAX;
+ }
+
+ let links = self.entries[curr.into()].links;
+
+ if links.tick <= self.tick {
+ trace!("triggering; token={:?}", curr);
+
+ // Unlink will also advance self.next
+ self.unlink(&links, curr);
+
+ // Remove and return the token
+ return Some(self.entries.remove(curr.into()).state);
+ } else {
+ let next_tick = self.wheel[slot].next_tick;
+ self.wheel[slot].next_tick = cmp::min(next_tick, links.tick);
+ self.next = links.next;
+ }
+ }
+ }
+
+ // No more timeouts to poll
+ if let Some(inner) = self.inner.borrow() {
+ trace!("unsetting readiness");
+ let _ = inner.set_readiness.set_readiness(Ready::empty());
+
+ if let Some(tick) = self.next_tick() {
+ self.schedule_readiness(tick);
+ }
+ }
+
+ None
+ }
+
+ fn unlink(&mut self, links: &EntryLinks, token: Token) {
+ trace!("unlinking timeout; slot={}; token={:?}",
+ self.slot_for(links.tick), token);
+
+ if links.prev == EMPTY {
+ let slot = self.slot_for(links.tick);
+ self.wheel[slot].head = links.next;
+ } else {
+ self.entries[links.prev.into()].links.next = links.next;
+ }
+
+ if links.next != EMPTY {
+ self.entries[links.next.into()].links.prev = links.prev;
+
+ if token == self.next {
+ self.next = links.next;
+ }
+ } else if token == self.next {
+ self.next = EMPTY;
+ }
+ }
+
+ fn schedule_readiness(&self, tick: Tick) {
+ if let Some(inner) = self.inner.borrow() {
+ // Coordinate setting readiness w/ the wakeup thread
+ let mut curr = inner.wakeup_state.load(Ordering::Acquire);
+
+ loop {
+ if curr as Tick <= tick {
+ // Nothing to do, wakeup is already scheduled
+ return;
+ }
+
+ // Attempt to move the wakeup time forward
+ trace!("advancing the wakeup time; target={}; curr={}", tick, curr);
+ let actual = inner.wakeup_state.compare_and_swap(curr, tick as usize, Ordering::Release);
+
+ if actual == curr {
+ // Signal to the wakeup thread that the wakeup time has
+ // been changed.
+ trace!("unparking wakeup thread");
+ inner.wakeup_thread.thread().unpark();
+ return;
+ }
+
+ curr = actual;
+ }
+ }
+ }
+
+ // Next tick containing a timeout
+ fn next_tick(&self) -> Option<Tick> {
+ if self.next != EMPTY {
+ let slot = self.slot_for(self.entries[self.next.into()].links.tick);
+
+ if self.wheel[slot].next_tick == self.tick {
+ // There is data ready right now
+ return Some(self.tick);
+ }
+ }
+
+ self.wheel.iter().map(|e| e.next_tick).min()
+ }
+
+ fn slot_for(&self, tick: Tick) -> usize {
+ (self.mask & tick) as usize
+ }
+}
+
+impl<T> Default for Timer<T> {
+ fn default() -> Timer<T> {
+ Builder::default().build()
+ }
+}
+
+impl<T> Evented for Timer<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ if self.inner.borrow().is_some() {
+ return Err(io::Error::new(io::ErrorKind::Other, "timer already registered"));
+ }
+
+ let (registration, set_readiness) = Registration::new(poll, token, interest, opts);
+ let wakeup_state = Arc::new(AtomicUsize::new(usize::MAX));
+ let thread_handle = spawn_wakeup_thread(
+ wakeup_state.clone(),
+ set_readiness.clone(),
+ self.start, self.tick_ms);
+
+ self.inner.fill(Inner {
+ registration,
+ set_readiness,
+ wakeup_state,
+ wakeup_thread: thread_handle,
+ }).expect("timer already registered");
+
+ if let Some(next_tick) = self.next_tick() {
+ self.schedule_readiness(next_tick);
+ }
+
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ match self.inner.borrow() {
+ Some(inner) => inner.registration.update(poll, token, interest, opts),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ match self.inner.borrow() {
+ Some(inner) => inner.registration.deregister(poll),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+}
+
+impl fmt::Debug for Inner {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Inner")
+ .field("registration", &self.registration)
+ .field("wakeup_state", &self.wakeup_state.load(Ordering::Relaxed))
+ .finish()
+ }
+}
+
+fn spawn_wakeup_thread(state: WakeupState, set_readiness: SetReadiness, start: Instant, tick_ms: u64) -> thread::JoinHandle<()> {
+ thread::spawn(move || {
+ let mut sleep_until_tick = state.load(Ordering::Acquire) as Tick;
+
+ loop {
+ if sleep_until_tick == TERMINATE_THREAD as Tick {
+ return;
+ }
+
+ let now_tick = current_tick(start, tick_ms);
+
+ trace!("wakeup thread: sleep_until_tick={:?}; now_tick={:?}", sleep_until_tick, now_tick);
+
+ if now_tick < sleep_until_tick {
+ // Calling park_timeout with u64::MAX leads to undefined
+ // behavior in pthread, causing the park to return immediately
+ // and causing the thread to tightly spin. Instead of u64::MAX
+ // on large values, simply use a blocking park.
+ match tick_ms.checked_mul(sleep_until_tick - now_tick) {
+ Some(sleep_duration) => {
+ trace!("sleeping; tick_ms={}; now_tick={}; sleep_until_tick={}; duration={:?}",
+ tick_ms, now_tick, sleep_until_tick, sleep_duration);
+ thread::park_timeout(Duration::from_millis(sleep_duration));
+ }
+ None => {
+ trace!("sleeping; tick_ms={}; now_tick={}; blocking sleep",
+ tick_ms, now_tick);
+ thread::park();
+ }
+ }
+ sleep_until_tick = state.load(Ordering::Acquire) as Tick;
+ } else {
+ let actual = state.compare_and_swap(sleep_until_tick as usize, usize::MAX, Ordering::AcqRel) as Tick;
+
+ if actual == sleep_until_tick {
+ trace!("setting readiness from wakeup thread");
+ let _ = set_readiness.set_readiness(Ready::readable());
+ sleep_until_tick = usize::MAX as Tick;
+ } else {
+ sleep_until_tick = actual as Tick;
+ }
+ }
+ }
+ })
+}
+
+fn duration_to_tick(elapsed: Duration, tick_ms: u64) -> Tick {
+ // Calculate tick rounding up to the closest one
+ let elapsed_ms = convert::millis(elapsed);
+ elapsed_ms.saturating_add(tick_ms / 2) / tick_ms
+}
+
+fn current_tick(start: Instant, tick_ms: u64) -> Tick {
+ duration_to_tick(start.elapsed(), tick_ms)
+}
+
+impl<T> Entry<T> {
+ fn new(state: T, tick: u64, next: Token) -> Entry<T> {
+ Entry {
+ state,
+ links: EntryLinks {
+ tick,
+ prev: EMPTY,
+ next,
+ },
+ }
+ }
+}
+
+impl fmt::Display for TimerError {
+ fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
+ // `TimerError` will never be constructed.
+ unreachable!();
+ }
+}
+
+impl error::Error for TimerError {
+ fn description(&self) -> &str {
+ // `TimerError` will never be constructed.
+ unreachable!();
+ }
+}
+
+impl fmt::Display for TimerErrorKind {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ TimerOverflow => write!(fmt, "TimerOverflow"),
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/token.rs b/third_party/rust/mio-0.6.23/src/token.rs
new file mode 100644
index 0000000000..09e42450bc
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/token.rs
@@ -0,0 +1,153 @@
+/// Associates readiness notifications with [`Evented`] handles.
+///
+/// `Token` is a wrapper around `usize` and is used as an argument to
+/// [`Poll::register`] and [`Poll::reregister`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// # Example
+///
+/// Using `Token` to track which socket generated the notification. In this
+/// example, `HashMap` is used, but usually something like [`slab`] is better.
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpListener;
+///
+/// use std::thread;
+/// use std::io::{self, Read};
+/// use std::collections::HashMap;
+///
+/// // After this number of sockets is accepted, the server will shutdown.
+/// const MAX_SOCKETS: usize = 32;
+///
+/// // Pick a token that will not be used by any other socket and use that one
+/// // for the listener.
+/// const LISTENER: Token = Token(1024);
+///
+/// // Used to store the sockets.
+/// let mut sockets = HashMap::new();
+///
+/// // This is used to generate a unique token for a socket
+/// let mut next_socket_index = 0;
+///
+/// // The `Poll` instance
+/// let poll = Poll::new()?;
+///
+/// // Tcp listener
+/// let listener = TcpListener::bind(&"127.0.0.1:0".parse()?)?;
+///
+/// // Register the listener
+/// poll.register(&listener,
+/// LISTENER,
+/// Ready::readable(),
+/// PollOpt::edge())?;
+///
+/// // Spawn a thread that will connect a bunch of sockets then close them
+/// let addr = listener.local_addr()?;
+/// thread::spawn(move || {
+/// use std::net::TcpStream;
+///
+/// // +1 here is to connect an extra socket to signal the socket to close
+/// for _ in 0..(MAX_SOCKETS+1) {
+/// // Connect then drop the socket
+/// let _ = TcpStream::connect(&addr).unwrap();
+/// }
+/// });
+///
+/// // Event storage
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Read buffer, this will never actually get filled
+/// let mut buf = [0; 256];
+///
+/// // The main event loop
+/// loop {
+/// // Wait for events
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// match event.token() {
+/// LISTENER => {
+/// // Perform operations in a loop until `WouldBlock` is
+/// // encountered.
+/// loop {
+/// match listener.accept() {
+/// Ok((socket, _)) => {
+/// // Shutdown the server
+/// if next_socket_index == MAX_SOCKETS {
+/// return Ok(());
+/// }
+///
+/// // Get the token for the socket
+/// let token = Token(next_socket_index);
+/// next_socket_index += 1;
+///
+/// // Register the new socket w/ poll
+/// poll.register(&socket,
+/// token,
+/// Ready::readable(),
+/// PollOpt::edge())?;
+///
+/// // Store the socket
+/// sockets.insert(token, socket);
+/// }
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop accepting
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// token => {
+/// // Always operate in a loop
+/// loop {
+/// match sockets.get_mut(&token).unwrap().read(&mut buf) {
+/// Ok(0) => {
+/// // Socket is closed, remove it from the map
+/// sockets.remove(&token);
+/// break;
+/// }
+/// // Data is not actually sent in this example
+/// Ok(_) => unreachable!(),
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop reading
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// }
+/// }
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Evented`]: event/trait.Evented.html
+/// [`Poll`]: struct.Poll.html
+/// [`Poll::register`]: struct.Poll.html#method.register
+/// [`Poll::reregister`]: struct.Poll.html#method.reregister
+/// [`slab`]: https://crates.io/crates/slab
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Token(pub usize);
+
+impl From<usize> for Token {
+ fn from(val: usize) -> Token {
+ Token(val)
+ }
+}
+
+impl From<Token> for usize {
+ fn from(val: Token) -> usize {
+ val.0
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/udp.rs b/third_party/rust/mio-0.6.23/src/udp.rs
new file mode 100644
index 0000000000..a71bd21914
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/udp.rs
@@ -0,0 +1,326 @@
+//! Primitives for working with UDP
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+
+#![allow(deprecated)]
+
+use {sys, Ready, Poll, PollOpt, Token};
+use io::{self, MapNonBlock};
+use event::Evented;
+use poll::SelectorId;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+
+/// A User Datagram Protocol socket.
+///
+/// This is an implementation of a bound UDP socket. This supports both IPv4 and
+/// IPv6 addresses, and there is no corresponding notion of a server because UDP
+/// is a datagram protocol.
+#[derive(Debug)]
+pub struct UdpSocket {
+ sys: sys::UdpSocket,
+ selector_id: SelectorId,
+}
+
+impl UdpSocket {
+ /// Creates a UDP socket from the given address.
+ pub fn bind(addr: &SocketAddr) -> io::Result<UdpSocket> {
+ let socket = net::UdpSocket::bind(addr)?;
+ UdpSocket::from_socket(socket)
+ }
+
+ /// Creates a new mio-wrapped socket from an underlying and bound std
+ /// socket.
+ ///
+ /// This function requires that `socket` has previously been bound to an
+ /// address to work correctly, and returns an I/O object which can be used
+ /// with mio to send/receive UDP messages.
+ ///
+ /// This can be used in conjunction with net2's `UdpBuilder` interface to
+ /// configure a socket before it's handed off to mio, such as setting
+ /// options like `reuse_address` or binding to multiple addresses.
+ pub fn from_socket(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ Ok(UdpSocket {
+ sys: sys::UdpSocket::new(socket)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Returns the socket address that this socket was created from.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UdpSocket` is a reference to the same socket that this
+ /// object references. Both handles will read and write the same port, and
+ /// options set on one socket will be propagated to the other.
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.sys.try_clone()
+ .map(|s| {
+ UdpSocket {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Sends data on the socket to the given address. On success, returns the
+ /// number of bytes written.
+ ///
+ /// Address type can be any implementor of `ToSocketAddrs` trait. See its
+ /// documentation for concrete examples.
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr)
+ -> io::Result<Option<usize>> {
+ self.sys.send_to(buf, target).map_non_block()
+ }
+
+ /// Receives data from the socket and stores data in the supplied buffer `buf`. On success,
+ /// returns the number of bytes read and the address from whence the data came.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// The function does not read from `buf`, but is overwriting previous content of `buf`.
+ ///
+ /// Assuming the function has read `n` bytes, slicing `&buf[..n]` provides
+ /// efficient access with iterators and boundary checks.
+ pub fn recv_from(&self, buf: &mut [u8])
+ -> io::Result<Option<(usize, SocketAddr)>> {
+ self.sys.recv_from(buf).map_non_block()
+ }
+
+ /// Sends data on the socket to the address previously bound via connect(). On success,
+ /// returns the number of bytes written.
+ pub fn send(&self, buf: &[u8])
+ -> io::Result<Option<usize>> {
+ self.sys.send(buf).map_non_block()
+ }
+
+ /// Receives data from the socket previously bound with connect() and stores data in
+ /// the supplied buffer `buf`. On success, returns the number of bytes read.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// The function does not read from `buf`, but is overwriting previous content of `buf`.
+ ///
+ /// Assuming the function has read `n` bytes, slicing `&buf[..n]` provides
+ /// efficient access with iterators and boundary checks.
+ pub fn recv(&self, buf: &mut [u8])
+ -> io::Result<Option<usize>> {
+ self.sys.recv(buf).map_non_block()
+ }
+
+ /// Connects the UDP socket setting the default destination for `send()`
+ /// and limiting packets that are read via `recv` from the address specified
+ /// in `addr`.
+ pub fn connect(&self, addr: SocketAddr)
+ -> io::Result<()> {
+ self.sys.connect(addr)
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_broadcast`][link].
+ ///
+ /// [link]: #method.set_broadcast
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.sys.broadcast()
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.sys.set_broadcast(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v4
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v4(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_ttl_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_ttl_v4
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.sys.multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_multicast_ttl_v4(ttl)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v6`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v6
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v6()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ /// Note that this may not have any affect on IPv4 sockets.
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v6(on)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.join_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v4`][link].
+ ///
+ /// [link]: #method.join_multicast_v4
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.leave_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v6`][link].
+ ///
+ /// [link]: #method.join_multicast_v6
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.leave_multicast_v6(multiaddr, interface)
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+/*
+ *
+ * ===== UNIX ext =====
+ *
+ */
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for UdpSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}
diff --git a/third_party/rust/mio-extras/.cargo-checksum.json b/third_party/rust/mio-extras/.cargo-checksum.json
new file mode 100644
index 0000000000..cb15205f9b
--- /dev/null
+++ b/third_party/rust/mio-extras/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"499f8d84e8bee37198044301a5fdfe6a811dd19b62c2c771616185a7543b8334","Cargo.toml":"a4d1b4f5de2908b805a42b31472dfd5fee30a49dc4f575a174c581c7b57b25b7","LICENSE-APACHE":"406e5cbaa2ad1178c300cf28ac5258e8d0db0de4f061e78db559d30e6f38e25c","LICENSE-MIT":"8aa414e6c821efd8be6bade07368a5d9f51f5cc55718bc54e10a59eb826b8d58","README.md":"fa2642be7bd670014c5e25bafbee73b8be0667ddbd193c1cc344a71d7f59463f","src/channel.rs":"a9fb5bcf889b03766821011e94b30a351b80501523c4a9fe5c45796eae218968","src/lib.rs":"2ed1572d3255208681d017265df7f642eb4898b1c2dace91676935f55e03eb04","src/timer.rs":"a1e71e38ab983291557d534ce2454a0ba5872652f4e7c4161131ba5150ec8d57","test/mod.rs":"aa3afc2582f00e5e2a2e5b87d12eb9810b0eed3248b48abef7094fd8d02d9c41","test/test_poll_channel.rs":"508815e265ae44328fb3d7c98cdf210815a9946bde291dd896de81df0394de37","test/test_timer.rs":"d04b6f57e9a395ce190022c0158cc498805758101e9fdad18b63829eb9bb6510"},"package":"52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19"} \ No newline at end of file
diff --git a/third_party/rust/mio-extras/CHANGELOG.md b/third_party/rust/mio-extras/CHANGELOG.md
new file mode 100644
index 0000000000..354b87584c
--- /dev/null
+++ b/third_party/rust/mio-extras/CHANGELOG.md
@@ -0,0 +1,37 @@
+## 2.0.6 (7 Dec 2019)
+
+- fix license metadata in `Cargo.toml` (thanks @ignatenkobrain)
+
+## 2.0.5 (18 Jun 2018)
+
+- update `lazycell` from 0.6 -> 1.0
+
+## 2.0.4 (7 Apr 2018)
+
+- Bump mio dependency (fixes minimal-versions build)
+
+## 2.0.3 (28 Dec 2017)
+
+- update `log` from 0.3 -> 0.4
+
+## 2.0.2
+
+- More docs tidying.
+
+## 2.0.1
+
+- Another try at documenting the timer interface.
+
+## 2.0.0
+
+- Remove channel implementation details from the API. Specifically, the
+ following are no longer public:
+ - `ctl_pair()`
+ - `SenderCtl`
+ - `ReceiverCtl`
+- Document all APIs
+
+## 1.0.0
+
+- Initial release. Essentially identical to
+ [mio-more](https://github.com/carllerche/mio-more).
diff --git a/third_party/rust/mio-extras/Cargo.toml b/third_party/rust/mio-extras/Cargo.toml
new file mode 100644
index 0000000000..d902a46019
--- /dev/null
+++ b/third_party/rust/mio-extras/Cargo.toml
@@ -0,0 +1,40 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "mio-extras"
+version = "2.0.6"
+authors = ["Carl Lerche <me@carllerche.com>", "David Hotham"]
+exclude = [".gitignore"]
+description = "Extra components for use with Mio"
+documentation = "https://docs.rs/mio-extras"
+readme = "README.md"
+keywords = ["io", "async", "non-blocking"]
+categories = ["asynchronous"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/dimbleby/mio-extras"
+
+[[test]]
+name = "test"
+path = "test/mod.rs"
+[dependencies.lazycell]
+version = "1"
+
+[dependencies.log]
+version = "0.4"
+
+[dependencies.mio]
+version = "0.6.14"
+
+[dependencies.slab]
+version = "0.4"
diff --git a/third_party/rust/mio-extras/LICENSE-APACHE b/third_party/rust/mio-extras/LICENSE-APACHE
new file mode 100644
index 0000000000..a6e8ded657
--- /dev/null
+++ b/third_party/rust/mio-extras/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2017 Mio authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/rust/mio-extras/LICENSE-MIT b/third_party/rust/mio-extras/LICENSE-MIT
new file mode 100644
index 0000000000..4cf193e73e
--- /dev/null
+++ b/third_party/rust/mio-extras/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2017 Mio authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/mio-extras/README.md b/third_party/rust/mio-extras/README.md
new file mode 100644
index 0000000000..8ed136a079
--- /dev/null
+++ b/third_party/rust/mio-extras/README.md
@@ -0,0 +1,30 @@
+# mio-extras
+
+Extra components for use with [Mio](https://github.com/tokio-rs/mio):
+
+- a channel that implements `Evented`
+- a timer that implements `Evented`
+
+[![Build Status](https://travis-ci.org/dimbleby/mio-extras.svg?branch=master)](https://travis-ci.org/dimbleby/mio-extras)
+[![crates.io](http://meritbadge.herokuapp.com/mio-extras)](https://crates.io/crates/mio-extras)
+
+[Documentation](https://docs.rs/mio-extras).
+
+## History and maintenance
+
+This repository is forked from
+[`mio-more`](https://github.com/carllerche/mio-more), which is unmaintained.
+
+I don't intend to do very much with this except for routine maintenance - bug
+fixes, updating dependencies, and suchlike.
+
+However if you have some code that you think belongs here, then by all means
+raise an issue or open a pull request.
+
+# License
+
+`mio-extras` is primarily distributed under the terms of both the MIT license
+and the Apache License (Version 2.0), with portions covered by various BSD-like
+licenses.
+
+See LICENSE-APACHE, and LICENSE-MIT for details.
diff --git a/third_party/rust/mio-extras/src/channel.rs b/third_party/rust/mio-extras/src/channel.rs
new file mode 100644
index 0000000000..561317ecbf
--- /dev/null
+++ b/third_party/rust/mio-extras/src/channel.rs
@@ -0,0 +1,431 @@
+//! Thread safe communication channel implementing `Evented`
+use lazycell::{AtomicLazyCell, LazyCell};
+use mio::{Evented, Poll, PollOpt, Ready, Registration, SetReadiness, Token};
+use std::any::Any;
+use std::error;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{mpsc, Arc};
+use std::{fmt, io};
+
+/// Creates a new asynchronous channel, where the `Receiver` can be registered
+/// with `Poll`.
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let (tx_ctl, rx_ctl) = ctl_pair();
+ let (tx, rx) = mpsc::channel();
+
+ let tx = Sender { tx, ctl: tx_ctl };
+
+ let rx = Receiver { rx, ctl: rx_ctl };
+
+ (tx, rx)
+}
+
+/// Creates a new synchronous, bounded channel where the `Receiver` can be
+/// registered with `Poll`.
+pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) {
+ let (tx_ctl, rx_ctl) = ctl_pair();
+ let (tx, rx) = mpsc::sync_channel(bound);
+
+ let tx = SyncSender { tx, ctl: tx_ctl };
+
+ let rx = Receiver { rx, ctl: rx_ctl };
+
+ (tx, rx)
+}
+
+fn ctl_pair() -> (SenderCtl, ReceiverCtl) {
+ let inner = Arc::new(Inner {
+ pending: AtomicUsize::new(0),
+ senders: AtomicUsize::new(1),
+ set_readiness: AtomicLazyCell::new(),
+ });
+
+ let tx = SenderCtl {
+ inner: Arc::clone(&inner),
+ };
+
+ let rx = ReceiverCtl {
+ registration: LazyCell::new(),
+ inner,
+ };
+
+ (tx, rx)
+}
+
+/// Tracks messages sent on a channel in order to update readiness.
+struct SenderCtl {
+ inner: Arc<Inner>,
+}
+
+/// Tracks messages received on a channel in order to track readiness.
+struct ReceiverCtl {
+ registration: LazyCell<Registration>,
+ inner: Arc<Inner>,
+}
+
+/// The sending half of a channel.
+pub struct Sender<T> {
+ tx: mpsc::Sender<T>,
+ ctl: SenderCtl,
+}
+
+/// The sending half of a synchronous channel.
+pub struct SyncSender<T> {
+ tx: mpsc::SyncSender<T>,
+ ctl: SenderCtl,
+}
+
+/// The receiving half of a channel.
+pub struct Receiver<T> {
+ rx: mpsc::Receiver<T>,
+ ctl: ReceiverCtl,
+}
+
+/// An error returned from the `Sender::send` or `SyncSender::send` function.
+pub enum SendError<T> {
+ /// An IO error.
+ Io(io::Error),
+
+ /// The receiving half of the channel has disconnected.
+ Disconnected(T),
+}
+
+/// An error returned from the `SyncSender::try_send` function.
+pub enum TrySendError<T> {
+ /// An IO error.
+ Io(io::Error),
+
+ /// Data could not be sent because it would require the callee to block.
+ Full(T),
+
+ /// The receiving half of the channel has disconnected.
+ Disconnected(T),
+}
+
+struct Inner {
+ // The number of outstanding messages for the receiver to read
+ pending: AtomicUsize,
+ // The number of sender handles
+ senders: AtomicUsize,
+ // The set readiness handle
+ set_readiness: AtomicLazyCell<SetReadiness>,
+}
+
+impl<T> Sender<T> {
+ /// Attempts to send a value on this channel, returning it back if it could not be sent.
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ self.tx.send(t).map_err(SendError::from).and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Sender<T> {
+ Sender {
+ tx: self.tx.clone(),
+ ctl: self.ctl.clone(),
+ }
+ }
+}
+
+impl<T> SyncSender<T> {
+ /// Sends a value on this synchronous channel.
+ ///
+ /// This function will *block* until space in the internal buffer becomes
+ /// available or a receiver is available to hand off the message to.
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ self.tx.send(t).map_err(From::from).and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+
+ /// Attempts to send a value on this channel without blocking.
+ ///
+ /// This method differs from `send` by returning immediately if the channel's
+ /// buffer is full or no receiver is waiting to acquire some data.
+ pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
+ self.tx.try_send(t).map_err(From::from).and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+}
+
+impl<T> Clone for SyncSender<T> {
+ fn clone(&self) -> SyncSender<T> {
+ SyncSender {
+ tx: self.tx.clone(),
+ ctl: self.ctl.clone(),
+ }
+ }
+}
+
+impl<T> Receiver<T> {
+ /// Attempts to return a pending value on this receiver without blocking.
+ pub fn try_recv(&self) -> Result<T, mpsc::TryRecvError> {
+ self.rx.try_recv().and_then(|res| {
+ let _ = self.ctl.dec();
+ Ok(res)
+ })
+ }
+}
+
+impl<T> Evented for Receiver<T> {
+ fn register(
+ &self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ self.ctl.register(poll, token, interest, opts)
+ }
+
+ fn reregister(
+ &self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ self.ctl.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.ctl.deregister(poll)
+ }
+}
+
+/*
+ *
+ * ===== SenderCtl / ReceiverCtl =====
+ *
+ */
+
+impl SenderCtl {
+ /// Call to track that a message has been sent
+ fn inc(&self) -> io::Result<()> {
+ let cnt = self.inner.pending.fetch_add(1, Ordering::Acquire);
+
+ if 0 == cnt {
+ // Toggle readiness to readable
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::readable())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Clone for SenderCtl {
+ fn clone(&self) -> SenderCtl {
+ self.inner.senders.fetch_add(1, Ordering::Relaxed);
+ SenderCtl {
+ inner: Arc::clone(&self.inner),
+ }
+ }
+}
+
+impl Drop for SenderCtl {
+ fn drop(&mut self) {
+ if self.inner.senders.fetch_sub(1, Ordering::Release) == 1 {
+ let _ = self.inc();
+ }
+ }
+}
+
+impl ReceiverCtl {
+ fn dec(&self) -> io::Result<()> {
+ let first = self.inner.pending.load(Ordering::Acquire);
+
+ if first == 1 {
+ // Unset readiness
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::empty())?;
+ }
+ }
+
+ // Decrement
+ let second = self.inner.pending.fetch_sub(1, Ordering::AcqRel);
+
+ if first == 1 && second > 1 {
+ // There are still pending messages. Since readiness was
+ // previously unset, it must be reset here
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::readable())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Evented for ReceiverCtl {
+ fn register(
+ &self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ if self.registration.borrow().is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "receiver already registered",
+ ));
+ }
+
+ let (registration, set_readiness) = Registration::new2();
+ poll.register(&registration, token, interest, opts)?;
+
+ if self.inner.pending.load(Ordering::Relaxed) > 0 {
+ // TODO: Don't drop readiness
+ let _ = set_readiness.set_readiness(Ready::readable());
+ }
+
+ self.registration
+ .fill(registration)
+ .expect("unexpected state encountered");
+ self.inner
+ .set_readiness
+ .fill(set_readiness)
+ .expect("unexpected state encountered");
+
+ Ok(())
+ }
+
+ fn reregister(
+ &self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ match self.registration.borrow() {
+ Some(registration) => poll.reregister(registration, token, interest, opts),
+ None => Err(io::Error::new(
+ io::ErrorKind::Other,
+ "receiver not registered",
+ )),
+ }
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ match self.registration.borrow() {
+ Some(registration) => poll.deregister(registration),
+ None => Err(io::Error::new(
+ io::ErrorKind::Other,
+ "receiver not registered",
+ )),
+ }
+ }
+}
+
+/*
+ *
+ * ===== Error conversions =====
+ *
+ */
+
+impl<T> From<mpsc::SendError<T>> for SendError<T> {
+ fn from(src: mpsc::SendError<T>) -> SendError<T> {
+ SendError::Disconnected(src.0)
+ }
+}
+
+impl<T> From<io::Error> for SendError<T> {
+ fn from(src: io::Error) -> SendError<T> {
+ SendError::Io(src)
+ }
+}
+
+impl<T> From<mpsc::TrySendError<T>> for TrySendError<T> {
+ fn from(src: mpsc::TrySendError<T>) -> TrySendError<T> {
+ match src {
+ mpsc::TrySendError::Full(v) => TrySendError::Full(v),
+ mpsc::TrySendError::Disconnected(v) => TrySendError::Disconnected(v),
+ }
+ }
+}
+
+impl<T> From<mpsc::SendError<T>> for TrySendError<T> {
+ fn from(src: mpsc::SendError<T>) -> TrySendError<T> {
+ TrySendError::Disconnected(src.0)
+ }
+}
+
+impl<T> From<io::Error> for TrySendError<T> {
+ fn from(src: io::Error) -> TrySendError<T> {
+ TrySendError::Io(src)
+ }
+}
+
+/*
+ *
+ * ===== Implement Error, Debug and Display for Errors =====
+ *
+ */
+
+impl<T: Any> error::Error for SendError<T> {
+ fn description(&self) -> &str {
+ match *self {
+ SendError::Io(ref io_err) => io_err.description(),
+ SendError::Disconnected(..) => "Disconnected",
+ }
+ }
+}
+
+impl<T: Any> error::Error for TrySendError<T> {
+ fn description(&self) -> &str {
+ match *self {
+ TrySendError::Io(ref io_err) => io_err.description(),
+ TrySendError::Full(..) => "Full",
+ TrySendError::Disconnected(..) => "Disconnected",
+ }
+ }
+}
+
+impl<T> fmt::Debug for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Display for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Debug for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_try_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Display for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_try_send_error(self, f)
+ }
+}
+
+#[inline]
+fn format_send_error<T>(e: &SendError<T>, f: &mut fmt::Formatter) -> fmt::Result {
+ match *e {
+ SendError::Io(ref io_err) => write!(f, "{}", io_err),
+ SendError::Disconnected(..) => write!(f, "Disconnected"),
+ }
+}
+
+#[inline]
+fn format_try_send_error<T>(e: &TrySendError<T>, f: &mut fmt::Formatter) -> fmt::Result {
+ match *e {
+ TrySendError::Io(ref io_err) => write!(f, "{}", io_err),
+ TrySendError::Full(..) => write!(f, "Full"),
+ TrySendError::Disconnected(..) => write!(f, "Disconnected"),
+ }
+}
diff --git a/third_party/rust/mio-extras/src/lib.rs b/third_party/rust/mio-extras/src/lib.rs
new file mode 100644
index 0000000000..69a000556c
--- /dev/null
+++ b/third_party/rust/mio-extras/src/lib.rs
@@ -0,0 +1,33 @@
+//! Extra components for use with Mio.
+#![deny(missing_docs)]
+extern crate lazycell;
+extern crate mio;
+extern crate slab;
+
+#[macro_use]
+extern crate log;
+
+pub mod channel;
+pub mod timer;
+
+// Conversion utilities
+mod convert {
+ use std::time::Duration;
+
+ const NANOS_PER_MILLI: u32 = 1_000_000;
+ const MILLIS_PER_SEC: u64 = 1_000;
+
+ /// Convert a `Duration` to milliseconds, rounding up and saturating at
+ /// `u64::MAX`.
+ ///
+ /// The saturating is fine because `u64::MAX` milliseconds are still many
+ /// million years.
+ pub fn millis(duration: Duration) -> u64 {
+ // Round up.
+ let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
+ duration
+ .as_secs()
+ .saturating_mul(MILLIS_PER_SEC)
+ .saturating_add(u64::from(millis))
+ }
+}
diff --git a/third_party/rust/mio-extras/src/timer.rs b/third_party/rust/mio-extras/src/timer.rs
new file mode 100644
index 0000000000..876026c99c
--- /dev/null
+++ b/third_party/rust/mio-extras/src/timer.rs
@@ -0,0 +1,751 @@
+//! Timer optimized for I/O related operations
+use crate::convert;
+use lazycell::LazyCell;
+use mio::{Evented, Poll, PollOpt, Ready, Registration, SetReadiness, Token};
+use slab::Slab;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
+use std::time::{Duration, Instant};
+use std::{cmp, fmt, io, iter, thread, u64, usize};
+
+/// A timer.
+///
+/// Typical usage goes like this:
+///
+/// * register the timer with a `mio::Poll`.
+/// * set a timeout, by calling `Timer::set_timeout`. Here you provide some
+/// state to be associated with this timeout.
+/// * poll the `Poll`, to learn when a timeout has occurred.
+/// * retrieve state associated with the timeout by calling `Timer::poll`.
+///
+/// You can omit use of the `Poll` altogether, if you like, and just poll the
+/// `Timer` directly.
+pub struct Timer<T> {
+ // Size of each tick in milliseconds
+ tick_ms: u64,
+ // Slab of timeout entries
+ entries: Slab<Entry<T>>,
+ // Timeout wheel. Each tick, the timer will look at the next slot for
+ // timeouts that match the current tick.
+ wheel: Vec<WheelEntry>,
+ // Tick 0's time instant
+ start: Instant,
+ // The current tick
+ tick: Tick,
+ // The next entry to possibly timeout
+ next: Token,
+ // Masks the target tick to get the slot
+ mask: u64,
+ // Set on registration with Poll
+ inner: LazyCell<Inner>,
+}
+
+/// Used to create a `Timer`.
+pub struct Builder {
+ // Approximate duration of each tick
+ tick: Duration,
+ // Number of slots in the timer wheel
+ num_slots: usize,
+ // Max number of timeouts that can be in flight at a given time.
+ capacity: usize,
+}
+
+/// A timeout, as returned by `Timer::set_timeout`.
+///
+/// Use this as the argument to `Timer::cancel_timeout`, to cancel this timeout.
+#[derive(Clone, Debug)]
+pub struct Timeout {
+ // Reference into the timer entry slab
+ token: Token,
+ // Tick that it should match up with
+ tick: u64,
+}
+
+struct Inner {
+ registration: Registration,
+ set_readiness: SetReadiness,
+ wakeup_state: WakeupState,
+ wakeup_thread: thread::JoinHandle<()>,
+}
+
+impl Drop for Inner {
+ fn drop(&mut self) {
+ // 1. Set wakeup state to TERMINATE_THREAD
+ self.wakeup_state.store(TERMINATE_THREAD, Ordering::Release);
+ // 2. Wake him up
+ self.wakeup_thread.thread().unpark();
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+struct WheelEntry {
+ next_tick: Tick,
+ head: Token,
+}
+
+// Doubly linked list of timer entries. Allows for efficient insertion /
+// removal of timeouts.
+struct Entry<T> {
+ state: T,
+ links: EntryLinks,
+}
+
+#[derive(Copy, Clone)]
+struct EntryLinks {
+ tick: Tick,
+ prev: Token,
+ next: Token,
+}
+
+type Tick = u64;
+
+const TICK_MAX: Tick = u64::MAX;
+
+// Manages communication with wakeup thread
+type WakeupState = Arc<AtomicUsize>;
+
+const TERMINATE_THREAD: usize = 0;
+const EMPTY: Token = Token(usize::MAX);
+
+impl Builder {
+ /// Set the tick duration. Default is 100ms.
+ pub fn tick_duration(mut self, duration: Duration) -> Builder {
+ self.tick = duration;
+ self
+ }
+
+ /// Set the number of slots. Default is 256.
+ pub fn num_slots(mut self, num_slots: usize) -> Builder {
+ self.num_slots = num_slots;
+ self
+ }
+
+ /// Set the capacity. Default is 65536.
+ pub fn capacity(mut self, capacity: usize) -> Builder {
+ self.capacity = capacity;
+ self
+ }
+
+ /// Build a `Timer` with the parameters set on this `Builder`.
+ pub fn build<T>(self) -> Timer<T> {
+ Timer::new(
+ convert::millis(self.tick),
+ self.num_slots,
+ self.capacity,
+ Instant::now(),
+ )
+ }
+}
+
+impl Default for Builder {
+ fn default() -> Builder {
+ Builder {
+ tick: Duration::from_millis(100),
+ num_slots: 1 << 8,
+ capacity: 1 << 16,
+ }
+ }
+}
+
+impl<T> Timer<T> {
+ fn new(tick_ms: u64, num_slots: usize, capacity: usize, start: Instant) -> Timer<T> {
+ let num_slots = num_slots.next_power_of_two();
+ let capacity = capacity.next_power_of_two();
+ let mask = (num_slots as u64) - 1;
+ let wheel = iter::repeat(WheelEntry {
+ next_tick: TICK_MAX,
+ head: EMPTY,
+ })
+ .take(num_slots)
+ .collect();
+
+ Timer {
+ tick_ms,
+ entries: Slab::with_capacity(capacity),
+ wheel,
+ start,
+ tick: 0,
+ next: EMPTY,
+ mask,
+ inner: LazyCell::new(),
+ }
+ }
+
+ /// Set a timeout.
+ ///
+ /// When the timeout occurs, the given state becomes available via `poll`.
+ pub fn set_timeout(&mut self, delay_from_now: Duration, state: T) -> Timeout {
+ let delay_from_start = self.start.elapsed() + delay_from_now;
+ self.set_timeout_at(delay_from_start, state)
+ }
+
+ fn set_timeout_at(&mut self, delay_from_start: Duration, state: T) -> Timeout {
+ let mut tick = duration_to_tick(delay_from_start, self.tick_ms);
+ trace!(
+ "setting timeout; delay={:?}; tick={:?}; current-tick={:?}",
+ delay_from_start,
+ tick,
+ self.tick
+ );
+
+ // Always target at least 1 tick in the future
+ if tick <= self.tick {
+ tick = self.tick + 1;
+ }
+
+ self.insert(tick, state)
+ }
+
+ fn insert(&mut self, tick: Tick, state: T) -> Timeout {
+ // Get the slot for the requested tick
+ let slot = (tick & self.mask) as usize;
+ let curr = self.wheel[slot];
+
+ // Insert the new entry
+ let entry = Entry::new(state, tick, curr.head);
+ let token = Token(self.entries.insert(entry));
+
+ if curr.head != EMPTY {
+ // If there was a previous entry, set its prev pointer to the new
+ // entry
+ self.entries[curr.head.into()].links.prev = token;
+ }
+
+ // Update the head slot
+ self.wheel[slot] = WheelEntry {
+ next_tick: cmp::min(tick, curr.next_tick),
+ head: token,
+ };
+
+ self.schedule_readiness(tick);
+
+ trace!("inserted timout; slot={}; token={:?}", slot, token);
+
+ // Return the new timeout
+ Timeout { token, tick }
+ }
+
+ /// Cancel a timeout.
+ ///
+ /// If the timeout has not yet occurred, the return value holds the
+ /// associated state.
+ pub fn cancel_timeout(&mut self, timeout: &Timeout) -> Option<T> {
+ let links = match self.entries.get(timeout.token.into()) {
+ Some(e) => e.links,
+ None => return None,
+ };
+
+ // Sanity check
+ if links.tick != timeout.tick {
+ return None;
+ }
+
+ self.unlink(&links, timeout.token);
+ Some(self.entries.remove(timeout.token.into()).state)
+ }
+
+ /// Poll for an expired timer.
+ ///
+ /// The return value holds the state associated with the first expired
+ /// timer, if any.
+ pub fn poll(&mut self) -> Option<T> {
+ let target_tick = current_tick(self.start, self.tick_ms);
+ self.poll_to(target_tick)
+ }
+
+ fn poll_to(&mut self, mut target_tick: Tick) -> Option<T> {
+ trace!(
+ "tick_to; target_tick={}; current_tick={}",
+ target_tick,
+ self.tick
+ );
+
+ if target_tick < self.tick {
+ target_tick = self.tick;
+ }
+
+ while self.tick <= target_tick {
+ let curr = self.next;
+
+ trace!("ticking; curr={:?}", curr);
+
+ if curr == EMPTY {
+ self.tick += 1;
+
+ let slot = self.slot_for(self.tick);
+ self.next = self.wheel[slot].head;
+
+ // Handle the case when a slot has a single timeout which gets
+ // canceled before the timeout expires. In this case, the
+ // slot's head is EMPTY but there is a value for next_tick. Not
+ // resetting next_tick here causes the timer to get stuck in a
+ // loop.
+ if self.next == EMPTY {
+ self.wheel[slot].next_tick = TICK_MAX;
+ }
+ } else {
+ let slot = self.slot_for(self.tick);
+
+ if curr == self.wheel[slot].head {
+ self.wheel[slot].next_tick = TICK_MAX;
+ }
+
+ let links = self.entries[curr.into()].links;
+
+ if links.tick <= self.tick {
+ trace!("triggering; token={:?}", curr);
+
+ // Unlink will also advance self.next
+ self.unlink(&links, curr);
+
+ // Remove and return the token
+ return Some(self.entries.remove(curr.into()).state);
+ } else {
+ let next_tick = self.wheel[slot].next_tick;
+ self.wheel[slot].next_tick = cmp::min(next_tick, links.tick);
+ self.next = links.next;
+ }
+ }
+ }
+
+ // No more timeouts to poll
+ if let Some(inner) = self.inner.borrow() {
+ trace!("unsetting readiness");
+ let _ = inner.set_readiness.set_readiness(Ready::empty());
+
+ if let Some(tick) = self.next_tick() {
+ self.schedule_readiness(tick);
+ }
+ }
+
+ None
+ }
+
+ fn unlink(&mut self, links: &EntryLinks, token: Token) {
+ trace!(
+ "unlinking timeout; slot={}; token={:?}",
+ self.slot_for(links.tick),
+ token
+ );
+
+ if links.prev == EMPTY {
+ let slot = self.slot_for(links.tick);
+ self.wheel[slot].head = links.next;
+ } else {
+ self.entries[links.prev.into()].links.next = links.next;
+ }
+
+ if links.next != EMPTY {
+ self.entries[links.next.into()].links.prev = links.prev;
+
+ if token == self.next {
+ self.next = links.next;
+ }
+ } else if token == self.next {
+ self.next = EMPTY;
+ }
+ }
+
+ fn schedule_readiness(&self, tick: Tick) {
+ if let Some(inner) = self.inner.borrow() {
+ // Coordinate setting readiness w/ the wakeup thread
+ let mut curr = inner.wakeup_state.load(Ordering::Acquire);
+
+ loop {
+ if curr as Tick <= tick {
+ // Nothing to do, wakeup is already scheduled
+ return;
+ }
+
+ // Attempt to move the wakeup time forward
+ trace!("advancing the wakeup time; target={}; curr={}", tick, curr);
+ let actual =
+ inner
+ .wakeup_state
+ .compare_and_swap(curr, tick as usize, Ordering::Release);
+
+ if actual == curr {
+ // Signal to the wakeup thread that the wakeup time has
+ // been changed.
+ trace!("unparking wakeup thread");
+ inner.wakeup_thread.thread().unpark();
+ return;
+ }
+
+ curr = actual;
+ }
+ }
+ }
+
+ // Next tick containing a timeout
+ fn next_tick(&self) -> Option<Tick> {
+ if self.next != EMPTY {
+ let slot = self.slot_for(self.entries[self.next.into()].links.tick);
+
+ if self.wheel[slot].next_tick == self.tick {
+ // There is data ready right now
+ return Some(self.tick);
+ }
+ }
+
+ self.wheel.iter().map(|e| e.next_tick).min()
+ }
+
+ fn slot_for(&self, tick: Tick) -> usize {
+ (self.mask & tick) as usize
+ }
+}
+
+impl<T> Default for Timer<T> {
+ fn default() -> Timer<T> {
+ Builder::default().build()
+ }
+}
+
+impl<T> Evented for Timer<T> {
+ fn register(
+ &self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ if self.inner.borrow().is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "timer already registered",
+ ));
+ }
+
+ let (registration, set_readiness) = Registration::new2();
+ poll.register(&registration, token, interest, opts)?;
+ let wakeup_state = Arc::new(AtomicUsize::new(usize::MAX));
+ let thread_handle = spawn_wakeup_thread(
+ Arc::clone(&wakeup_state),
+ set_readiness.clone(),
+ self.start,
+ self.tick_ms,
+ );
+
+ self.inner
+ .fill(Inner {
+ registration,
+ set_readiness,
+ wakeup_state,
+ wakeup_thread: thread_handle,
+ })
+ .expect("timer already registered");
+
+ if let Some(next_tick) = self.next_tick() {
+ self.schedule_readiness(next_tick);
+ }
+
+ Ok(())
+ }
+
+ fn reregister(
+ &self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ match self.inner.borrow() {
+ Some(inner) => poll.reregister(&inner.registration, token, interest, opts),
+ None => Err(io::Error::new(
+ io::ErrorKind::Other,
+ "receiver not registered",
+ )),
+ }
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ match self.inner.borrow() {
+ Some(inner) => poll.deregister(&inner.registration),
+ None => Err(io::Error::new(
+ io::ErrorKind::Other,
+ "receiver not registered",
+ )),
+ }
+ }
+}
+
+impl fmt::Debug for Inner {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Inner")
+ .field("registration", &self.registration)
+ .field("wakeup_state", &self.wakeup_state.load(Ordering::Relaxed))
+ .finish()
+ }
+}
+
+fn spawn_wakeup_thread(
+ state: WakeupState,
+ set_readiness: SetReadiness,
+ start: Instant,
+ tick_ms: u64,
+) -> thread::JoinHandle<()> {
+ thread::spawn(move || {
+ let mut sleep_until_tick = state.load(Ordering::Acquire) as Tick;
+
+ loop {
+ if sleep_until_tick == TERMINATE_THREAD as Tick {
+ return;
+ }
+
+ let now_tick = current_tick(start, tick_ms);
+
+ trace!(
+ "wakeup thread: sleep_until_tick={:?}; now_tick={:?}",
+ sleep_until_tick,
+ now_tick
+ );
+
+ if now_tick < sleep_until_tick {
+ // Calling park_timeout with u64::MAX leads to undefined
+ // behavior in pthread, causing the park to return immediately
+ // and causing the thread to tightly spin. Instead of u64::MAX
+ // on large values, simply use a blocking park.
+ match tick_ms.checked_mul(sleep_until_tick - now_tick) {
+ Some(sleep_duration) => {
+ trace!(
+ "sleeping; tick_ms={}; now_tick={}; sleep_until_tick={}; duration={:?}",
+ tick_ms,
+ now_tick,
+ sleep_until_tick,
+ sleep_duration
+ );
+ thread::park_timeout(Duration::from_millis(sleep_duration));
+ }
+ None => {
+ trace!(
+ "sleeping; tick_ms={}; now_tick={}; blocking sleep",
+ tick_ms,
+ now_tick
+ );
+ thread::park();
+ }
+ }
+ sleep_until_tick = state.load(Ordering::Acquire) as Tick;
+ } else {
+ let actual =
+ state.compare_and_swap(sleep_until_tick as usize, usize::MAX, Ordering::AcqRel)
+ as Tick;
+
+ if actual == sleep_until_tick {
+ trace!("setting readiness from wakeup thread");
+ let _ = set_readiness.set_readiness(Ready::readable());
+ sleep_until_tick = usize::MAX as Tick;
+ } else {
+ sleep_until_tick = actual as Tick;
+ }
+ }
+ }
+ })
+}
+
+fn duration_to_tick(elapsed: Duration, tick_ms: u64) -> Tick {
+ // Calculate tick rounding up to the closest one
+ let elapsed_ms = convert::millis(elapsed);
+ elapsed_ms.saturating_add(tick_ms / 2) / tick_ms
+}
+
+fn current_tick(start: Instant, tick_ms: u64) -> Tick {
+ duration_to_tick(start.elapsed(), tick_ms)
+}
+
+impl<T> Entry<T> {
+ fn new(state: T, tick: u64, next: Token) -> Entry<T> {
+ Entry {
+ state,
+ links: EntryLinks {
+ tick,
+ prev: EMPTY,
+ next,
+ },
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use std::time::{Duration, Instant};
+
+ #[test]
+ pub fn test_timeout_next_tick() {
+ let mut t = timer();
+ let mut tick;
+
+ t.set_timeout_at(Duration::from_millis(100), "a");
+
+ tick = ms_to_tick(&t, 50);
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 100);
+ assert_eq!(Some("a"), t.poll_to(tick));
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 150);
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 200);
+ assert_eq!(None, t.poll_to(tick));
+
+ assert_eq!(count(&t), 0);
+ }
+
+ #[test]
+ pub fn test_clearing_timeout() {
+ let mut t = timer();
+ let mut tick;
+
+ let to = t.set_timeout_at(Duration::from_millis(100), "a");
+ assert_eq!("a", t.cancel_timeout(&to).unwrap());
+
+ tick = ms_to_tick(&t, 100);
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 200);
+ assert_eq!(None, t.poll_to(tick));
+
+ assert_eq!(count(&t), 0);
+ }
+
+ #[test]
+ pub fn test_multiple_timeouts_same_tick() {
+ let mut t = timer();
+ let mut tick;
+
+ t.set_timeout_at(Duration::from_millis(100), "a");
+ t.set_timeout_at(Duration::from_millis(100), "b");
+
+ let mut rcv = vec![];
+
+ tick = ms_to_tick(&t, 100);
+ rcv.push(t.poll_to(tick).unwrap());
+ rcv.push(t.poll_to(tick).unwrap());
+
+ assert_eq!(None, t.poll_to(tick));
+
+ rcv.sort();
+ assert!(rcv == ["a", "b"], "actual={:?}", rcv);
+
+ tick = ms_to_tick(&t, 200);
+ assert_eq!(None, t.poll_to(tick));
+
+ assert_eq!(count(&t), 0);
+ }
+
+ #[test]
+ pub fn test_multiple_timeouts_diff_tick() {
+ let mut t = timer();
+ let mut tick;
+
+ t.set_timeout_at(Duration::from_millis(110), "a");
+ t.set_timeout_at(Duration::from_millis(220), "b");
+ t.set_timeout_at(Duration::from_millis(230), "c");
+ t.set_timeout_at(Duration::from_millis(440), "d");
+ t.set_timeout_at(Duration::from_millis(560), "e");
+
+ tick = ms_to_tick(&t, 100);
+ assert_eq!(Some("a"), t.poll_to(tick));
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 200);
+ assert_eq!(Some("c"), t.poll_to(tick));
+ assert_eq!(Some("b"), t.poll_to(tick));
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 300);
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 400);
+ assert_eq!(Some("d"), t.poll_to(tick));
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 500);
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 600);
+ assert_eq!(Some("e"), t.poll_to(tick));
+ assert_eq!(None, t.poll_to(tick));
+ }
+
+ #[test]
+ pub fn test_catching_up() {
+ let mut t = timer();
+
+ t.set_timeout_at(Duration::from_millis(110), "a");
+ t.set_timeout_at(Duration::from_millis(220), "b");
+ t.set_timeout_at(Duration::from_millis(230), "c");
+ t.set_timeout_at(Duration::from_millis(440), "d");
+
+ let tick = ms_to_tick(&t, 600);
+ assert_eq!(Some("a"), t.poll_to(tick));
+ assert_eq!(Some("c"), t.poll_to(tick));
+ assert_eq!(Some("b"), t.poll_to(tick));
+ assert_eq!(Some("d"), t.poll_to(tick));
+ assert_eq!(None, t.poll_to(tick));
+ }
+
+ #[test]
+ pub fn test_timeout_hash_collision() {
+ let mut t = timer();
+ let mut tick;
+
+ t.set_timeout_at(Duration::from_millis(100), "a");
+ t.set_timeout_at(Duration::from_millis(100 + TICK * SLOTS as u64), "b");
+
+ tick = ms_to_tick(&t, 100);
+ assert_eq!(Some("a"), t.poll_to(tick));
+ assert_eq!(1, count(&t));
+
+ tick = ms_to_tick(&t, 200);
+ assert_eq!(None, t.poll_to(tick));
+ assert_eq!(1, count(&t));
+
+ tick = ms_to_tick(&t, 100 + TICK * SLOTS as u64);
+ assert_eq!(Some("b"), t.poll_to(tick));
+ assert_eq!(0, count(&t));
+ }
+
+ #[test]
+ pub fn test_clearing_timeout_between_triggers() {
+ let mut t = timer();
+ let mut tick;
+
+ let a = t.set_timeout_at(Duration::from_millis(100), "a");
+ let _ = t.set_timeout_at(Duration::from_millis(100), "b");
+ let _ = t.set_timeout_at(Duration::from_millis(200), "c");
+
+ tick = ms_to_tick(&t, 100);
+ assert_eq!(Some("b"), t.poll_to(tick));
+ assert_eq!(2, count(&t));
+
+ t.cancel_timeout(&a);
+ assert_eq!(1, count(&t));
+
+ assert_eq!(None, t.poll_to(tick));
+
+ tick = ms_to_tick(&t, 200);
+ assert_eq!(Some("c"), t.poll_to(tick));
+ assert_eq!(0, count(&t));
+ }
+
+ const TICK: u64 = 100;
+ const SLOTS: usize = 16;
+ const CAPACITY: usize = 32;
+
+ fn count<T>(timer: &Timer<T>) -> usize {
+ timer.entries.len()
+ }
+
+ fn timer() -> Timer<&'static str> {
+ Timer::new(TICK, SLOTS, CAPACITY, Instant::now())
+ }
+
+ fn ms_to_tick<T>(timer: &Timer<T>, ms: u64) -> u64 {
+ ms / timer.tick_ms
+ }
+}
diff --git a/third_party/rust/mio-extras/test/mod.rs b/third_party/rust/mio-extras/test/mod.rs
new file mode 100644
index 0000000000..217069466a
--- /dev/null
+++ b/third_party/rust/mio-extras/test/mod.rs
@@ -0,0 +1,45 @@
+extern crate mio;
+extern crate mio_extras;
+
+use mio::event::Event;
+use mio::{Events, Poll};
+use std::time::Duration;
+
+mod test_poll_channel;
+mod test_timer;
+
+pub fn expect_events(
+ poll: &Poll,
+ event_buffer: &mut Events,
+ poll_try_count: usize,
+ mut expected: Vec<Event>,
+) {
+ const MS: u64 = 1_000;
+
+ for _ in 0..poll_try_count {
+ poll.poll(event_buffer, Some(Duration::from_millis(MS)))
+ .unwrap();
+ for event in event_buffer.iter() {
+ let pos_opt = match expected.iter().position(|exp_event| {
+ (event.token() == exp_event.token())
+ && event.readiness().contains(exp_event.readiness())
+ }) {
+ Some(x) => Some(x),
+ None => None,
+ };
+ if let Some(pos) = pos_opt {
+ expected.remove(pos);
+ }
+ }
+
+ if expected.is_empty() {
+ break;
+ }
+ }
+
+ assert!(
+ expected.is_empty(),
+ "The following expected events were not found: {:?}",
+ expected
+ );
+}
diff --git a/third_party/rust/mio-extras/test/test_poll_channel.rs b/third_party/rust/mio-extras/test/test_poll_channel.rs
new file mode 100644
index 0000000000..7314f26661
--- /dev/null
+++ b/third_party/rust/mio-extras/test/test_poll_channel.rs
@@ -0,0 +1,362 @@
+use crate::expect_events;
+use mio::event::Event;
+use mio::{Events, Poll, PollOpt, Ready, Token};
+use mio_extras::channel;
+use std::sync::mpsc::TryRecvError;
+use std::thread;
+use std::time::Duration;
+
+#[test]
+pub fn test_poll_channel_edge() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let (tx, rx) = channel::channel();
+
+ poll.register(&rx, Token(123), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ // Wait, but nothing should happen
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Push the value
+ tx.send("hello").unwrap();
+
+ // Polling will contain the event
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(1, num);
+
+ let event = events.iter().next().unwrap();
+ assert_eq!(event.token(), Token(123));
+ assert_eq!(event.readiness(), Ready::readable());
+
+ // Poll again and there should be no events
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Read the value
+ assert_eq!("hello", rx.try_recv().unwrap());
+
+ // Poll again, nothing
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Push a value
+ tx.send("goodbye").unwrap();
+
+ // Have an event
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(1, num);
+
+ let event = events.iter().next().unwrap();
+ assert_eq!(event.token(), Token(123));
+ assert_eq!(event.readiness(), Ready::readable());
+
+ // Read the value
+ rx.try_recv().unwrap();
+
+ // Drop the sender half
+ drop(tx);
+
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(1, num);
+
+ let event = events.iter().next().unwrap();
+ assert_eq!(event.token(), Token(123));
+ assert_eq!(event.readiness(), Ready::readable());
+
+ match rx.try_recv() {
+ Err(TryRecvError::Disconnected) => {}
+ no => panic!("unexpected value {:?}", no),
+ }
+}
+
+#[test]
+pub fn test_poll_channel_oneshot() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let (tx, rx) = channel::channel();
+
+ poll.register(
+ &rx,
+ Token(123),
+ Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot(),
+ )
+ .unwrap();
+
+ // Wait, but nothing should happen
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Push the value
+ tx.send("hello").unwrap();
+
+ // Polling will contain the event
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(1, num);
+
+ let event = events.iter().next().unwrap();
+ assert_eq!(event.token(), Token(123));
+ assert_eq!(event.readiness(), Ready::readable());
+
+ // Poll again and there should be no events
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Read the value
+ assert_eq!("hello", rx.try_recv().unwrap());
+
+ // Poll again, nothing
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Push a value
+ tx.send("goodbye").unwrap();
+
+ // Poll again, nothing
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Reregistering will re-trigger the notification
+ for _ in 0..3 {
+ poll.reregister(
+ &rx,
+ Token(123),
+ Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot(),
+ )
+ .unwrap();
+
+ // Have an event
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(1, num);
+
+ let event = events.iter().next().unwrap();
+ assert_eq!(event.token(), Token(123));
+ assert_eq!(event.readiness(), Ready::readable());
+ }
+
+ // Get the value
+ assert_eq!("goodbye", rx.try_recv().unwrap());
+
+ poll.reregister(
+ &rx,
+ Token(123),
+ Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot(),
+ )
+ .unwrap();
+
+ // Have an event
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ poll.reregister(
+ &rx,
+ Token(123),
+ Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot(),
+ )
+ .unwrap();
+
+ // Have an event
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+}
+
+#[test]
+pub fn test_poll_channel_level() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let (tx, rx) = channel::channel();
+
+ poll.register(&rx, Token(123), Ready::readable(), PollOpt::level())
+ .unwrap();
+
+ // Wait, but nothing should happen
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Push the value
+ tx.send("hello").unwrap();
+
+ // Polling will contain the event
+ for i in 0..5 {
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert!(1 == num, "actually got {} on iteration {}", num, i);
+
+ let event = events.iter().next().unwrap();
+ assert_eq!(event.token(), Token(123));
+ assert_eq!(event.readiness(), Ready::readable());
+ }
+
+ // Read the value
+ assert_eq!("hello", rx.try_recv().unwrap());
+
+ // Wait, but nothing should happen
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+}
+
+#[test]
+pub fn test_poll_channel_writable() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let (tx, rx) = channel::channel();
+
+ poll.register(&rx, Token(123), Ready::writable(), PollOpt::edge())
+ .unwrap();
+
+ // Wait, but nothing should happen
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+
+ // Push the value
+ tx.send("hello").unwrap();
+
+ // Wait, but nothing should happen
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+}
+
+#[test]
+pub fn test_dropping_receive_before_poll() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let (tx, rx) = channel::channel();
+
+ poll.register(&rx, Token(123), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ // Push the value
+ tx.send("hello").unwrap();
+
+ // Drop the receive end
+ drop(rx);
+
+ // Wait, but nothing should happen
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(0, num);
+}
+
+#[test]
+pub fn test_mixing_channel_with_socket() {
+ use mio::net::{TcpListener, TcpStream};
+
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let (tx, rx) = channel::channel();
+
+ // Create the listener
+ let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+
+ // Register the listener with `Poll`
+ poll.register(&l, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+ poll.register(&rx, Token(1), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ // Push a value onto the channel
+ tx.send("hello").unwrap();
+
+ // Connect a TCP socket
+ let s1 = TcpStream::connect(&l.local_addr().unwrap()).unwrap();
+
+ // Register the socket
+ poll.register(&s1, Token(2), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ // Sleep a bit to ensure it arrives at dest
+ thread::sleep(Duration::from_millis(250));
+
+ expect_events(
+ &poll,
+ &mut events,
+ 2,
+ vec![
+ Event::new(Ready::empty(), Token(0)),
+ Event::new(Ready::empty(), Token(1)),
+ ],
+ );
+}
+
+#[test]
+pub fn test_sending_from_other_thread_while_polling() {
+ const ITERATIONS: usize = 20;
+ const THREADS: usize = 5;
+
+ // Make sure to run multiple times
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+
+ for _ in 0..ITERATIONS {
+ let (tx, rx) = channel::channel();
+ poll.register(&rx, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ for _ in 0..THREADS {
+ let tx = tx.clone();
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+ tx.send("ping").unwrap();
+ });
+ }
+
+ let mut recv = 0;
+
+ while recv < THREADS {
+ let num = poll.poll(&mut events, None).unwrap();
+
+ if num != 0 {
+ assert_eq!(1, num);
+ assert_eq!(events.iter().next().unwrap().token(), Token(0));
+
+ while let Ok(_) = rx.try_recv() {
+ recv += 1;
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/mio-extras/test/test_timer.rs b/third_party/rust/mio-extras/test/test_timer.rs
new file mode 100644
index 0000000000..ac49833523
--- /dev/null
+++ b/third_party/rust/mio-extras/test/test_timer.rs
@@ -0,0 +1,308 @@
+use mio::{Events, Poll, PollOpt, Ready, Token};
+use mio_extras::timer::{self, Timer};
+
+use std::thread;
+use std::time::Duration;
+
+#[test]
+fn test_basic_timer_without_poll() {
+ let mut timer = Timer::default();
+
+ // Set the timeout
+ timer.set_timeout(Duration::from_millis(200), "hello");
+
+ // Nothing when polled immediately
+ assert!(timer.poll().is_none());
+
+ // Wait for the timeout
+ thread::sleep(Duration::from_millis(250));
+
+ assert_eq!(Some("hello"), timer.poll());
+ assert!(timer.poll().is_none());
+}
+
+#[test]
+fn test_basic_timer_with_poll_edge_set_timeout_after_register() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let mut timer = Timer::default();
+
+ poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+ timer.set_timeout(Duration::from_millis(200), "hello");
+
+ let elapsed = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(is_about(200, elapsed), "actual={:?}", elapsed);
+ assert_eq!("hello", timer.poll().unwrap());
+ assert_eq!(None, timer.poll());
+}
+
+#[test]
+fn test_basic_timer_with_poll_edge_set_timeout_before_register() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let mut timer = Timer::default();
+
+ timer.set_timeout(Duration::from_millis(200), "hello");
+ poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ let elapsed = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(is_about(200, elapsed), "actual={:?}", elapsed);
+ assert_eq!("hello", timer.poll().unwrap());
+ assert_eq!(None, timer.poll());
+}
+
+#[test]
+fn test_setting_later_timeout_then_earlier_one() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let mut timer = Timer::default();
+
+ poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ timer.set_timeout(Duration::from_millis(600), "hello");
+ timer.set_timeout(Duration::from_millis(200), "world");
+
+ let elapsed = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(is_about(200, elapsed), "actual={:?}", elapsed);
+ assert_eq!("world", timer.poll().unwrap());
+ assert_eq!(None, timer.poll());
+
+ let elapsed = self::elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(is_about(400, elapsed), "actual={:?}", elapsed);
+ assert_eq!("hello", timer.poll().unwrap());
+ assert_eq!(None, timer.poll());
+}
+
+#[test]
+fn test_timer_with_looping_wheel() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let mut timer = timer::Builder::default().num_slots(2).build();
+
+ poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ const TOKENS: &[&str] = &["hello", "world", "some", "thing"];
+
+ for (i, msg) in TOKENS.iter().enumerate() {
+ timer.set_timeout(Duration::from_millis(500 * (i as u64 + 1)), msg);
+ }
+
+ for msg in TOKENS {
+ let elapsed = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(
+ is_about(500, elapsed),
+ "actual={:?}; msg={:?}",
+ elapsed,
+ msg
+ );
+ assert_eq!(Some(msg), timer.poll());
+ assert_eq!(None, timer.poll());
+ }
+}
+
+#[test]
+fn test_edge_without_polling() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let mut timer = Timer::default();
+
+ poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ timer.set_timeout(Duration::from_millis(400), "hello");
+
+ let ms = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(is_about(400, ms), "actual={:?}", ms);
+
+ let ms = elapsed(|| {
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(num, 0);
+ });
+
+ assert!(is_about(300, ms), "actual={:?}", ms);
+}
+
+#[test]
+fn test_level_triggered() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let mut timer = Timer::default();
+
+ poll.register(&timer, Token(0), Ready::readable(), PollOpt::level())
+ .unwrap();
+
+ timer.set_timeout(Duration::from_millis(400), "hello");
+
+ let ms = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(is_about(400, ms), "actual={:?}", ms);
+
+ let ms = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+ assert_eq!(num, 1);
+ let event = events.iter().next().unwrap();
+ assert_eq!(Token(0), event.token());
+ assert_eq!(Ready::readable(), event.readiness());
+ });
+
+ assert!(is_about(0, ms), "actual={:?}", ms);
+}
+
+#[test]
+fn test_edge_oneshot_triggered() {
+ let poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+ let mut timer = Timer::default();
+
+ poll.register(
+ &timer,
+ Token(0),
+ Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot(),
+ )
+ .unwrap();
+
+ timer.set_timeout(Duration::from_millis(200), "hello");
+
+ let ms = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+ assert_eq!(num, 1);
+ });
+
+ assert!(is_about(200, ms), "actual={:?}", ms);
+
+ let ms = elapsed(|| {
+ let num = poll
+ .poll(&mut events, Some(Duration::from_millis(300)))
+ .unwrap();
+ assert_eq!(num, 0);
+ });
+
+ assert!(is_about(300, ms), "actual={:?}", ms);
+
+ poll.reregister(
+ &timer,
+ Token(0),
+ Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot(),
+ )
+ .unwrap();
+
+ let ms = elapsed(|| {
+ let num = poll.poll(&mut events, None).unwrap();
+ assert_eq!(num, 1);
+ });
+
+ assert!(is_about(0, ms));
+}
+
+#[test]
+fn test_cancel_timeout() {
+ use std::time::Instant;
+
+ let mut timer: Timer<u32> = Default::default();
+ let timeout = timer.set_timeout(Duration::from_millis(200), 1);
+ timer.cancel_timeout(&timeout);
+
+ let poll = Poll::new().unwrap();
+ poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge())
+ .unwrap();
+
+ let mut events = Events::with_capacity(16);
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(500);
+ let mut i = 0;
+
+ while Instant::now() - now < dur {
+ if i > 10 {
+ panic!("iterated too many times");
+ }
+
+ i += 1;
+
+ let elapsed = Instant::now() - now;
+
+ poll.poll(&mut events, Some(dur - elapsed)).unwrap();
+
+ while let Some(_) = timer.poll() {
+ panic!("did not expect to receive timeout");
+ }
+ }
+}
+
+fn elapsed<F: FnMut()>(mut f: F) -> u64 {
+ use std::time::Instant;
+
+ let now = Instant::now();
+
+ f();
+
+ let elapsed = now.elapsed();
+ elapsed.as_secs() * 1000 + u64::from(elapsed.subsec_millis())
+}
+
+fn is_about(expect: u64, val: u64) -> bool {
+ const WINDOW: i64 = 200;
+
+ ((expect as i64) - (val as i64)).abs() <= WINDOW
+}
diff --git a/third_party/rust/mio/.cargo-checksum.json b/third_party/rust/mio/.cargo-checksum.json
new file mode 100644
index 0000000000..db0a0e67ae
--- /dev/null
+++ b/third_party/rust/mio/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"3122f4f3d0a9037b7751165fd0f85dc4cb5f6ac477b125209592677b46dfd3e5","Cargo.lock":"63104f5dc52f4db8f3f07baf3f864981d0c8e81ab2ce48c42f77e2133a599750","Cargo.toml":"ec9040c14bcf46f749e9be6b6ddcd526747f7eaa4a1f7a528a2ce4e54e508973","LICENSE":"07919255c7e04793d8ea760d6c2ce32d19f9ff02bdbdde3ce90b1e1880929a9b","README.md":"cbdf6b961e2e6d708e4ce724c0e59b63e41644a1604e9d143d2a9bfff63f9ffa","examples/tcp_server.rs":"18b63bbde45de60f6bd402f0255a5ce6bc8ae13bc8ec98f41b8c635ee1f7a810","examples/udp_server.rs":"697037952d7e7cf1a83c63b81208e3d50bd405c138ad6a8eb021d5ad016916fd","src/event/event.rs":"d77844aa192a732ac049a789ebaac496a5c1342b9f07846907f215503edfbdbd","src/event/events.rs":"5cad85e0d0c9e619b6017f868c681ed0dc023e7aae8c0c5c0c7337b2fa96a8a6","src/event/mod.rs":"c129c42114867e3e7337a6605952db8cef8a7ef0938f67a28fa68e73fc420c6a","src/event/source.rs":"7ffe6d1ba738b5c671d7c450b8833abf0983f7294e0edec49c4203c2d7f44ddb","src/interest.rs":"ecffc07ae17e7f73d69628dd94678f2df2f766d807dbef867505ef390c47bcdf","src/io_source.rs":"f9e68e1dc65b322fbc471f57eecb7052351c5c9919d0b0aed1771c0a8e938b71","src/lib.rs":"fed93472856ce7a0b526bb80e80b0ed2f2017268c3fa440710d6aa396495b8fb","src/macros.rs":"c484744396b163555c8593cf15929201dd23b3ff75a4be03b8dc34e46f6d9caa","src/net/mod.rs":"2b1efac93ebdb7352e44d07645e1d28e3fb28a0938d85a71c4ce89c979008200","src/net/tcp/listener.rs":"51afbf8e3d565289df6164a64b13abde257654050bd59693cfbc9157767f9444","src/net/tcp/mod.rs":"f2c50c1d338a69d704e18727142d8495be2927b390d7bbb77cc0cf4e945ed83a","src/net/tcp/stream.rs":"8bcb20d1874ebc3e16c972107059a23fa0df5e451605ba65a47b28d792765284","src/net/udp.rs":"221bdffe16cf8478ecd9fb95fcb07b2490f20cd1b1b2cf0baf7ea51169d9fb03","src/net/uds/datagram.rs":"980944654a03f61311a7208043011db4831d40b92bd440ed57515cac6104ff2f","src/net/uds/listener.rs":"714b1638a522cc95bb70b137441bc1282684fbcde4432b6872ad052f66670164","src/net/uds/mod.rs":"70d4368adae74652d46771b29e633396e5f1b70e9b7d1370cf5fec9a78605c04","src/net/uds/stream.rs":"fa6dd11e5736c8372037945f96c5b0c7adee1a29c2b194bc9bf9f7cc2db7f1f4","src/poll.rs":"0aa428f111810b62080653bdc76c4f73a33d0e47b0368aa687e366588c839b8d","src/sys/mod.rs":"58d6750b37d4bd94ac90e9aaeb88aa79a65f379931857691ae5e7df16a55c15d","src/sys/shell/mod.rs":"6c8b107397e110e916c02e0f1f1f03344974456a7e929a731eeb462d5ba13968","src/sys/shell/selector.rs":"9f6eee2b858c68012962cfcd7ed747acd198c518ddb807ada2a30eb1faf4c052","src/sys/shell/tcp.rs":"020d1885c3bd8f6d74e69f4d068a186fadff605fedca3e811209d857e41f575b","src/sys/shell/udp.rs":"fc830fea77935d141f87179c5ceec95318c67d24060051c155deb5ace14d7134","src/sys/shell/uds.rs":"10c2933423e251fca678322ff9fc7501fcaacdfa769307399cb5dfc7bef63ec3","src/sys/shell/waker.rs":"81bc18cf52345a8d17e8142291e43dd70a36dfd9f5eb486f3e787998ee4451dc","src/sys/unix/mod.rs":"d2b03e3d166d174f1ad283e6a2aa1c3287b4d6e0affb3ed7ea7e033fbbfc5f1d","src/sys/unix/net.rs":"32c15a72d04b76c0c18944de590eb271787442c88a718b6e5f03b0830c533cd3","src/sys/unix/pipe.rs":"a0d308087b9258963014e3b7e61ece41eee0a7b0ee2240fe8b24f2d750238029","src/sys/unix/selector/epoll.rs":"1dfc3b77752f1948b34680f86c77073756cc212d04c209d53cb8cd895b6558c4","src/sys/unix/selector/kqueue.rs":"ce92b47c5dabf38da32cba155cdba74a52c853eb7938ed9cdeca8dc23d0e8714","src/sys/unix/selector/mod.rs":"d4eedd1e067b726fc8e2eb5d662ce540f46675810934a57085f3777901a2e6ce","src/sys/unix/sourcefd.rs":"18292d551d78a4ae6900ee31c969909a784d42b49184b7e0581c08f170dabf04","src/sys/unix/tcp.rs":"369d5520416eb4a7af0cc180bb2ebd2b1b4176f1e7276676427f00d95d78b8fd","src/sys/unix/udp.rs":"babf7d444eba0db29b130f0e254b0715e2566e7ea5b0f0e35cddf72d83f40b3b","src/sys/unix/uds/datagram.rs":"a7da7e4292ac42ccc2be5ad5909ced1d20377ace0198868fed89e086edc016f7","src/sys/unix/uds/listener.rs":"d98a223ed1c98c92cabe0124ae08fba2771cf801ed048a44408afb3afc459303","src/sys/unix/uds/mod.rs":"5b2cecdeada87caf1ef44dfe18d3536ac4883f4f72b3cae959bbf2044a6d2dcc","src/sys/unix/uds/socketaddr.rs":"c6784b61c6705c222e41db8abddc9458c05c70de3bc53e9a7b4aec0ea371058e","src/sys/unix/uds/stream.rs":"f785ff59f9e8ddec8519f5bfbd93150f26674efa91742909eb839d44250fe334","src/sys/unix/waker.rs":"10dda922b21ad0a32bf748bf9087121c4f57395452be0bb619503821f863dede","src/sys/windows/afd.rs":"27bf224b2626c5930a0a50043e95a13e3de29d73e360dc7dfa08ef581c670380","src/sys/windows/event.rs":"75539e2db54858605e565742a68242b2a69fe9b8cd8d60ff7a109cf43e5b5930","src/sys/windows/io_status_block.rs":"9920f2b12ebd90f1c42cd73fff775d015321dd2186d58fd10bca8f349489e5e0","src/sys/windows/mod.rs":"c324e7f2e07757563fa5e5ccf05a06ef0eac92394c23e36b155c70e895e53713","src/sys/windows/named_pipe.rs":"619101d86ca57a3a3a726ee465eed00840acef3d5b20e5fffefeaae1d1fa64d5","src/sys/windows/net.rs":"e1c526a18ea6319ac4b4736dbe0bd5ba85d6e601f708fed49174561d459617fb","src/sys/windows/overlapped.rs":"a041d2a6f03eb5e005ed14f23bc5cdf12dfc33036a7644dc925518ad944987f0","src/sys/windows/selector.rs":"d0718e495a85a67886ba3202b0a84df25aef23a4a8c53ff08b73e1795dded74e","src/sys/windows/tcp.rs":"2b90ec2950274bc4f15ba0166a6addbc447ad61e3eb4e66fbd8237c24983383b","src/sys/windows/udp.rs":"07b104a6be6cba40acf9933cd6283260a2e272b7d50743d836aa13f4bc4b07f3","src/sys/windows/waker.rs":"acae348778e4f60762b9e4cf083ff02305d9a7a14216456154a9be7f7af4c6c9","src/token.rs":"4e64c8e337fbee4e3a2f5e8661241b5261c9c5487e20fa73425d89e2e152e8de","src/waker.rs":"5faa61a35b413302d4c514abe4f4014abbe1665dff0f24a55feb62e9b521e26f"},"package":"ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2"} \ No newline at end of file
diff --git a/third_party/rust/mio/CHANGELOG.md b/third_party/rust/mio/CHANGELOG.md
new file mode 100644
index 0000000000..65dce4f328
--- /dev/null
+++ b/third_party/rust/mio/CHANGELOG.md
@@ -0,0 +1,526 @@
+# 0.8.0
+
+## Removed
+
+* Deprecated features (https://github.com/tokio-rs/mio/commit/105f8f2afb57b01ddea716a0aa9720f226c520e3):
+ * extra-docs (always enabled)
+ * tcp (replaced with "net" feature).
+ * udp (replaced with "net" feature).
+ * uds (replaced with "net" feature).
+ * pipe (replaced with "os-ext" feature).
+* `TcpSocket` type
+ (https://github.com/tokio-rs/mio/commit/02e9be41f27daf822575444fdd2b3067433a5996).
+ The socket2 crate provides all the functionality and more.
+* Support for Solaris, it never really worked anyway
+ (https://github.com/tokio-rs/mio/pull/1528).
+
+## Changes
+
+* Update minimum Rustc version (MSVR) to 1.46.0
+ (https://github.com/tokio-rs/mio/commit/5c577efecd23750a9a3e0f6ad080ab98f14a255d).
+
+## Added
+
+* `UdpSocket::peer_addr`
+ (https://github.com/tokio-rs/mio/commit/5fc104d08e0e74c8a19247f7cba0f058699fc438).
+
+# 0.7.14
+
+## Fixes
+
+* Remove use unsound internal macro (#1519).
+
+## Added
+
+* `sys::unix::SocketAddr::as_abstract_namespace()` (#1520).
+
+# 0.7.13
+
+## Fixes
+
+* Fix `Registry::try_clone` invalid usage of `F_DUPFD_CLOEXEC` (#1497,
+ https://github.com/tokio-rs/mio/commit/2883f5c1f35bf1a59682c5ffc4afe6b97d7d6e68).
+
+# 0.7.12 (yanked)
+
+## Fixes
+
+* Set `FD_CLOEXEC` when calling `Registry::try_clone`
+ (https://github.com/tokio-rs/mio/commit/d1617b567ff6bc669d71e367d22e0e93ff7e2e24 for epoll and
+ (https://github.com/tokio-rs/mio/commit/b367a05e408ca90a26383c3aa16d8a16f019dc59 for kqueue).
+
+# 0.7.11
+
+## Fixes
+
+* Fix missing feature of winapi.
+ (https://github.com/tokio-rs/mio/commit/a7e61db9e3c2b929ef1a33532bfcc22045d163ce).
+
+# 0.7.10
+
+## Fixes
+
+* Fix an instance of not doc(cfg(.*))
+ (https://github.com/tokio-rs/mio/commit/25e8f911357c740034f10a170dfa4ea1b28234ce).
+
+# 0.7.9
+
+## Fixes
+
+* Fix error handling in `NamedPipe::write`
+ (https://github.com/tokio-rs/mio/commit/aec872be9732e5c6685100674278be27f54a271b).
+* Use `accept(2)` on x86 Android instead of `accept4(2)`
+ (https://github.com/tokio-rs/mio/commit/6f86b925d3e48f30905d5cfa54348acf3f1fa036,
+ https://github.com/tokio-rs/mio/commit/8d5414880ab82178305ac1d2c16d715e58633d3e).
+* Improve error message when opening AFD device
+ (https://github.com/tokio-rs/mio/commit/139f7c4422321eb4a17b14ae2c296fddd19a8804).
+
+# 0.7.8
+
+## Fixes
+
+* Fix `TcpStream::set_linger` on macOS
+ (https://github.com/tokio-rs/mio/commit/175773ce02e85977db81224c782c8d140aba8543).
+* Fix compilation on DragonFlyBSD
+ (https://github.com/tokio-rs/mio/commit/b51af46b28871f8dd3233b490ee62237ffed6a26).
+
+# 0.7.7
+
+## Added
+
+* `UdpSocket::only_v6`
+ (https://github.com/tokio-rs/mio/commit/0101e05a800f17fb88f4315d9b9fe0f08cca6e57).
+* `Clone` implementation for `Event`
+ (https://github.com/tokio-rs/mio/commit/26540ebbae89df6d4d08465c56f715d8f2addfc3).
+* `AsRawFd` implementation for `Registry`
+ (https://github.com/tokio-rs/mio/commit/f70daa72da0042b1880256164774c3286d315a02).
+* `Read` and `Write` implementation for `&unix::pipe::Sender` and `Receiver`,
+ that is on the reference to them, an implementation existed on the types
+ themselves already
+ (https://github.com/tokio-rs/mio/commit/1be481dcbbcb6906364008b5d61e7f53cddc3eb3).
+
+## Fixes
+
+* Underflow in `SocketAddr::address`
+ (https://github.com/tokio-rs/mio/commit/6d3fa69240cd4bb95e9d34605c660c30245a18bd).
+* Android build with the net feature enabled, but with os-poll disabled
+ (https://github.com/tokio-rs/mio/commit/49d8fd33e026ad6e2c055d05d6667180ba2af7be).
+* Solaris build with the net feature enabled, but with os-poll disabled
+ (https://github.com/tokio-rs/mio/commit/a6e025e9d9511639ec106ebedc0dd312bdc9be12).
+* Ensure that `Waker::wake` works on illumos systems with poor `pipe(2)` and
+ `epoll(2)` interaction using `EPOLLET`
+ (https://github.com/tokio-rs/mio/commit/943d4249dcc17cd8b4d2250c4fa19116097248fa).
+* Fix `unix::pipe` on illumos
+ (https://github.com/tokio-rs/mio/commit/0db49f6d5caf54b12176821363d154384357e70a).
+
+# 0.7.6
+
+## Added
+
+* `net` feature, replaces `tcp`, `udp` and `uds` features
+ (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7).
+* `os-ext` feature, replaces `os-util` and `pipe` features
+ (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc).
+* Added keepalive support to `TcpSocket`
+ (https://github.com/tokio-rs/mio/commit/290c43a96662d54ab7c4b8814e5a9f9a9e523fda).
+* `TcpSocket::set_{send, recv}_buffer_size`
+ (https://github.com/tokio-rs/mio/commit/40c4af79bf5b32b8fbdbf6f2e5c16290e1d3d406).
+* `TcpSocket::get_linger`
+ (https://github.com/tokio-rs/mio/commit/13e82ced655bbb6e2729226e485a7de9f2c2ccd9).
+* Implement `IntoRawFd` for `TcpSocket`
+ (https://github.com/tokio-rs/mio/commit/50548ed45d0b2c98f1f2e003e210d14195284ef4).
+
+## Deprecated
+
+* The `tcp`, `udp` and `uds` features, replaced by a new `net` feature.
+ (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7).
+* The `extra-docs` feature, now enabled by default.
+ (https://github.com/tokio-rs/mio/commit/25731e8688a2d91c5c700674a2c2d3841240ece1).
+* The `os-util` and `pipe` features, replaced by a new `os-ext` feature.
+ (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc).
+
+## Fixes
+
+* Incorrect assumption of the layout of `std::net::SocketAddr`. Previously Mio
+ would assume that `SocketAddrV{4,6}` had the same layout as
+ `libc::sockaddr_in(6)`, however this is not guaranteed by the standard
+ library.
+ (https://github.com/tokio-rs/mio/commit/152e0751f0be1c9b0cbd6778645b76bcb0eba93c).
+* Also bumped the miow dependency to version 0.3.6 to solve the same problem as
+ above.
+
+# 0.7.5
+
+## Added
+
+* `TcpSocket::get_localaddr()` retrieves local address
+ (https://github.com/tokio-rs/mio/commit/b41a022b2242eef1969c70c8ba93e04c528dba47).
+* `TcpSocket::set_reuseport()` & `TcpSocket::get_reuseport()` configures and reads `SO_REUSEPORT`
+ (https://github.com/tokio-rs/mio/commit/183bbe409ab69cbf9db41d0263b41ec86202d9a0).
+* `unix:pipe()` a wrapper around pipe(2) sys call
+ (https://github.com/tokio-rs/mio/commit/2b7c0967a7362303946deb3d4ca2ae507af6c72d).
+* Add a check that a single Waker is active per Poll instance (only in debug mode)
+ (https://github.com/tokio-rs/mio/commit/f4874f28b32efcf4841691884c65a89734d96a56).
+* Added `Interest:remove()`
+ (https://github.com/tokio-rs/mio/commit/b8639c3d9ac07bb7e2e27685680c8a6510fa1357).
+
+# 0.7.4
+
+## Fixes
+
+* lost "socket closed" events on windows
+ (https://github.com/tokio-rs/mio/commit/50c299aca56c4a26e5ed20c283007239fbe6a7a7).
+
+## Added
+
+* `TcpSocket::set_linger()` configures SO_LINGER
+ (https://github.com/tokio-rs/mio/commit/3b4096565c1a879f651b8f8282ecdcbdbd5c92d3).
+
+# 0.7.3
+
+## Added
+
+* `TcpSocket` for configuring a TCP socket before connecting or listening
+ (https://github.com/tokio-rs/mio/commit/5b09e60d0f64419b989bda88c86a3147334a03b3).
+
+# 0.7.2
+
+## Added
+
+* Windows named pipe support.
+ (https://github.com/tokio-rs/mio/commit/52e8c2220e87696d20f13561402bcaabba4136ed).
+
+# 0.7.1
+
+## Reduced support for 32-bit Apple targets
+
+In January 2020 Rust reduced its support for 32-bit Apple targets
+(https://blog.rust-lang.org/2020/01/03/reducing-support-for-32-bit-apple-targets.html).
+Starting with v0.7.1 Mio will do the same as we're no longer checking 32 bit
+iOS/macOS on our CI.
+
+## Added
+
+* Support for illumos
+ (https://github.com/tokio-rs/mio/commit/976f2354d0e8fbbb64fba3bf017d7131f9c369a0).
+* Report `epoll(2)`'s `EPOLLERR` event as `Event::is_write_closed` if it's the
+ only event
+ (https://github.com/tokio-rs/mio/commit/0c77b5712d675eeb9bd43928b5dd7d22b2c7ac0c).
+* Optimised event::Iter::{size_hint, count}
+ (https://github.com/tokio-rs/mio/commit/40df934a11b05233a7796c4de19a4ee06bc4e03e).
+
+## Fixed
+
+* Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then
+ ~30 minutes effectively infinite
+ (https://github.com/tokio-rs/mio/commit/d555991f5ee81f6c1eec0fe481557d3d5b8d5ff4).
+* Set `SO_NOSIGPIPE` on all sockets (not just UDP) on for Apple targets
+ (https://github.com/tokio-rs/mio/commit/b8bbdcb0d3236f4c4acb257996d42a88dc9987d9).
+* Properly handle `POLL_ABORT` on Windows
+ (https://github.com/tokio-rs/mio/commit/a98da62b3ed1eeed1770aaca12f46d647e4fa749).
+* Improved error handling around failing `SIO_BASE_HANDLE` calls on Windows
+ (https://github.com/tokio-rs/mio/commit/b15fc18458a79ef8a51f73effa92548650f4e5dc).
+
+## Changed
+
+* On NetBSD we now use `accept4(2)`
+ (https://github.com/tokio-rs/mio/commit/4e306addc7144f2e02a7e8397c220b179a006a19).
+* The package uploaded to crates.io should be slightly smaller
+ (https://github.com/tokio-rs/mio/commit/eef8d3b9500bc0db957cd1ac68ee128ebc68351f).
+
+## Removed
+
+* Dependency on `lazy_static` on Windows
+ (https://github.com/tokio-rs/mio/commit/57e4c2a8ac153bc7bb87829e22cf0a21e3927e8a).
+
+# 0.7.0
+
+Version 0.7 of Mio contains various major changes compared to version 0.6.
+Overall a large number of API changes have been made to reduce the complexity of
+the implementation and remove overhead where possible.
+
+Please refer to the [blog post about
+0.7-alpha.1](https://tokio.rs/blog/2019-12-mio-v0.7-alpha.1/) for additional
+information.
+
+## Added
+
+* `Interest` structure that replaces `Ready` in registering event sources.
+* `Registry` structure that separates the registering and polling functionality.
+* `Waker` structure that allows another thread to wake a thread polling `Poll`.
+* Unix Domain Socket (UDS) types: `UnixDatagram`, `UnixListener` and
+ `UnixStream`.
+
+## Removed
+
+* All code deprecated in 0.6 was removed in 0.7.
+* Support for Fuchsia was removed as the code was unmaintained.
+* Support for Bitrig was removed, rustc dropped support for it also.
+* `UnixReady` was merged into `Ready`.
+* Custom user-space readiness queue was removed, this includes the public
+ `Registration` and `SetReadiness` types.
+* `PollOpt` was removed and all registrations use edge-triggers. See the upgrade
+ guide on how to process event using edge-triggers.
+* The network types (types in the `net` module) now support only the same API as
+ found in the standard library, various methods on the types were removed.
+* `TcpStream` now supports vectored I/O.
+* `Poll::poll_interruptible` was removed. Instead `Poll::poll` will now return
+ an error if one occurs.
+* `From<usize>` is removed from `Token`, the internal field is still public, so
+ `Token(my_token)` can still be used.
+
+## Changed
+
+* Various documentation improvements were made around correct usage of `Poll`
+ and registered event sources. It is recommended to reread the documentation of
+ at least `event::Source` and `Poll`.
+* Mio now uses Rust 2018 and rustfmt for all code.
+* `Event` was changed to be a wrapper around the OS event. This means it can be
+ significantly larger on some OSes.
+* `Ready` was removed and replaced with various `is_*` methods on `Event`. For
+ example instead checking for readable readiness using
+ `Event::ready().is_readable()`, you would call `Event::is_readable()`.
+* `Ready::is_hup` was removed in favour of `Event::is_read_closed` and
+ `Event::is_write_closed`.
+* The Iterator implementation of `Events` was changed to return `&Event`.
+* `Evented` was renamed to `event::Source` and now takes mutable reference to
+ the source.
+* Minimum supported Rust version was increased to 1.39.
+* By default Mio now uses a shim implementation. To enable the full
+ implementation, that uses the OS, enable the `os-oll` feature. To enable the
+ network types use `tcp`, `udp` and/or `uds`. For more documentation on the
+ features see the `feature` module in the API documentation (requires the
+ `extra-docs` feature).
+* The entire Windows implementation was rewritten.
+* Various optimisation were made to reduce the number of system calls in
+ creating and using sockets, e.g. making use of `accept4(2)`.
+* The `fmt::Debug` implementation of `Events` is now actually useful as it
+ prints all `Event`s.
+
+# 0.6.23 (Dec 01, 2020)
+
+### Changed
+- **MSRV**: Increased the MSRV from 1.18.0 (Jun 8, 2017) to 1.31.0 (Dec 6,
+ 2018)
+ (https://github.com/tokio-rs/mio/commit/4879e0d32ddfd98e762fc87240e594a3ad8fca30).
+
+### Fixed
+- Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then
+ ~30 minutes effectively infinite
+ (https://github.com/tokio-rs/mio/commit/e7cba59950e9c9fa6194e29b5b1e72029e3df455).
+- Update miow and net2 depedencies to get rid of invalid memory layout assumption
+ (https://github.com/tokio-rs/mio/commit/13f02ac0a86d7c0c0001e5ff8960a0b4340d075c).
+
+# 0.6.22 (May 01, 2020)
+
+### Added
+- Add support for illumos target (#1294)
+
+# 0.6.21 (November 27, 2019)
+
+### Fixed
+- remove `=` dependency on `cfg-if`.
+
+# 0.6.20 (November 21, 2019)
+
+### Fixed
+- Use default IOCP concurrency value (#1161).
+- setting FD_CLOEXEC in pipe (#1095).
+
+# 0.6.19 (May 28, 2018)
+
+### Fixed
+- Do not trigger HUP events on kqueue platforms (#958).
+
+# 0.6.18 (May 24, 2018)
+
+### Fixed
+- Fix compilation on kqueue platforms with 32bit C long (#948).
+
+# 0.6.17 (May 15, 2018)
+
+### Fixed
+- Don't report `RDHUP` as `HUP` (#939)
+- Fix lazycell related compilation issues.
+- Fix EPOLLPRI conflicting with READABLE
+- Abort process on ref count overflows
+
+### Added
+- Define PRI on all targets
+
+# 0.6.16 (September 5, 2018)
+
+* Add EPOLLPRI readiness to UnixReady on supported platforms (#867)
+* Reduce spurious awaken calls (#875)
+
+# 0.6.15 (July 3, 2018)
+
+* Implement `Evented` for containers (#840).
+* Fix android-aarch64 build (#850).
+
+# 0.6.14 (March 8, 2018)
+
+* Add `Poll::poll_interruptible` (#811)
+* Add `Ready::all` and `usize` conversions (#825)
+
+# 0.6.13 (February 5, 2018)
+
+* Fix build on DragonFlyBSD.
+* Add `TcpListener::from_std` that does not require the socket addr.
+* Deprecate `TcpListener::from_listener` in favor of from_std.
+
+# 0.6.12 (January 5, 2018)
+
+* Add `TcpStream::peek` function (#773).
+* Raise minimum Rust version to 1.18.0.
+* `Poll`: retry select() when interrupted by a signal (#742).
+* Deprecate `Events` index access (#713).
+* Add `Events::clear` (#782).
+* Add support for `lio_listio` (#780).
+
+# 0.6.11 (October 25, 2017)
+
+* Allow register to take empty interest (#640).
+* Fix bug with TCP errors on windows (#725).
+* Add TcpListener::accept_std (#733).
+* Update IoVec to fix soundness bug -- includes behavior change. (#747).
+* Minimum Rust version is now 1.14.0.
+* Fix Android x86_64 build.
+* Misc API & doc polish.
+
+# 0.6.10 (July 27, 2017)
+
+* Experimental support for Fuchsia
+* Add `only_v6` option for UDP sockets
+* Fix build on NetBSD
+* Minimum Rust version is now 1.13.0
+* Assignment operators (e.g. `|=`) are now implemented for `Ready`
+
+# 0.6.9 (June 7, 2017)
+
+* More socket options are exposed through the TCP types, brought in through the
+ `net2` crate.
+
+# 0.6.8 (May 26, 2017)
+
+* Support Fuchia
+* POSIX AIO support
+* Fix memory leak caused by Register::new2
+* Windows: fix handling failed TCP connections
+* Fix build on aarch64-linux-android
+* Fix usage of `O_CLOEXEC` with `SETFL`
+
+# 0.6.7 (April 27, 2017)
+
+* Ignore EPIPE coming out of `kevent`
+* Timer thread should exit when timer is dropped.
+
+# 0.6.6 (March 22, 2017)
+
+* Add send(), recv() and connect() to UDPSocket.
+* Fix bug in custom readiness queue
+* Move net types into `net` module
+
+# 0.6.5 (March 14, 2017)
+
+* Misc improvements to kqueue bindings
+* Add official support for iOS, Android, BSD
+* Reimplement custom readiness queue
+* `Poll` is now `Sync`
+* Officially deprecate non-core functionality (timers, channel, etc...)
+* `Registration` now implements `Evented`
+* Fix bug around error conditions with `connect` on windows.
+* Use iovec crate for scatter / gather operations
+* Only support readable and writable readiness on all platforms
+* Expose additional readiness in a platform specific capacity
+
+# 0.6.4 (January 24, 2017)
+
+* Fix compilation on musl
+* Add `TcpStream::from_stream` which converts a std TCP stream to Mio.
+
+# 0.6.3 (January 22, 2017)
+
+* Implement readv/writev for `TcpStream`, allowing vectored reads/writes to
+ work across platforms
+* Remove `nix` dependency
+* Implement `Display` and `Error` for some channel error types.
+* Optimize TCP on Windows through `SetFileCompletionNotificationModes`
+
+# 0.6.2 (December 18, 2016)
+
+* Allow registration of custom handles on Windows (like `EventedFd` on Unix)
+* Send only one byte for the awakener on Unix instead of four
+* Fix a bug in the timer implementation which caused an infinite loop
+
+# 0.6.1 (October 30, 2016)
+
+* Update dependency of `libc` to 0.2.16
+* Fix channel `dec` logic
+* Fix a timer bug around timeout cancellation
+* Don't allocate buffers for TCP reads on Windows
+* Touched up documentation in a few places
+* Fix an infinite looping timer thread on OSX
+* Fix compile on 32-bit OSX
+* Fix compile on FreeBSD
+
+# 0.6.0 (September 2, 2016)
+
+* Shift primary API towards `Poll`
+* `EventLoop` and types to `deprecated` mod. All contents of the
+ `deprecated` mod will be removed by Mio 1.0.
+* Increase minimum supported Rust version to 1.9.0
+* Deprecate unix domain socket implementation in favor of using a
+ version external to Mio. For example: https://github.com/alexcrichton/mio-uds.
+* Remove various types now included in `std`
+* Updated TCP & UDP APIs to match the versions in `std`
+* Enable implementing `Evented` for any type via `Registration`
+* Rename `IoEvent` -> `Event`
+* Access `Event` data via functions vs. public fields.
+* Expose `Events` as a public type that is passed into `Poll`
+* Use `std::time::Duration` for all APIs that require a time duration.
+* Polled events are now retrieved via `Events` type.
+* Implement `std::error::Error` for `TimerError`
+* Relax `Send` bound on notify messages.
+* Remove `Clone` impl for `Timeout` (future proof)
+* Remove `mio::prelude`
+* Remove `mio::util`
+* Remove dependency on bytes
+
+# 0.5.0 (December 3, 2015)
+
+* Windows support (#239)
+* NetBSD support (#306)
+* Android support (#295)
+* Don't re-export bytes types
+* Renamed `EventLoop::register_opt` to `EventLoop::register` (#257)
+* `EventLoopConfig` is now a builder instead of having public struct fields. It
+ is also no longer `Copy`. (#259)
+* `TcpSocket` is no longer exported in the public API (#262)
+* Integrate with net2. (#262)
+* `TcpListener` now returns the remote peer address from `accept` as well (#275)
+* The `UdpSocket::{send_to, recv_from}` methods are no longer generic over `Buf`
+ or `MutBuf` but instead take slices directly. The return types have also been
+ updated to return the number of bytes transferred. (#260)
+* Fix bug with kqueue where an error on registration prevented the
+ changelist from getting flushed (#276)
+* Support sending/receiving FDs over UNIX sockets (#291)
+* Mio's socket types are permanently associated with an EventLoop (#308)
+* Reduce unnecessary poll wakeups (#314)
+
+
+# 0.4.1 (July 21, 2015)
+
+* [BUGFIX] Fix notify channel concurrency bug (#216)
+
+# 0.4.0 (July 16, 2015)
+
+* [BUGFIX] EventLoop::register requests all events, not just readable.
+* [BUGFIX] Attempting to send a message to a shutdown event loop fails correctly.
+* [FEATURE] Expose TCP shutdown
+* [IMPROVEMENT] Coalesce readable & writable into `ready` event (#184)
+* [IMPROVEMENT] Rename TryRead & TryWrite function names to avoid conflict with std.
+* [IMPROVEMENT] Provide TCP and UDP types in Mio (path to windows #155)
+* [IMPROVEMENT] Use clock_ticks crate instead of time (path to windows #155)
+* [IMPROVEMENT] Move unix specific features into mio::unix module
+* [IMPROVEMENT] TcpListener sets SO_REUSEADDR by default
diff --git a/third_party/rust/mio/Cargo.lock b/third_party/rust/mio/Cargo.lock
new file mode 100644
index 0000000000..6f63e4a68e
--- /dev/null
+++ b/third_party/rust/mio/Cargo.lock
@@ -0,0 +1,147 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "env_logger"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3"
+dependencies = [
+ "log",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.107"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219"
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "mio"
+version = "0.8.0"
+dependencies = [
+ "env_logger",
+ "libc",
+ "log",
+ "miow",
+ "ntapi",
+ "rand",
+ "winapi",
+]
+
+[[package]]
+name = "miow"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "ntapi"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba"
+
+[[package]]
+name = "rand"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "wasi"
+version = "0.10.2+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/third_party/rust/mio/Cargo.toml b/third_party/rust/mio/Cargo.toml
new file mode 100644
index 0000000000..78b6418022
--- /dev/null
+++ b/third_party/rust/mio/Cargo.toml
@@ -0,0 +1,64 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "mio"
+version = "0.8.0"
+authors = ["Carl Lerche <me@carllerche.com>", "Thomas de Zeeuw <thomasdezeeuw@gmail.com>", "Tokio Contributors <team@tokio.rs>"]
+include = ["Cargo.toml", "LICENSE", "README.md", "CHANGELOG.md", "src/**/*.rs", "examples/**/*.rs"]
+description = "Lightweight non-blocking IO"
+homepage = "https://github.com/tokio-rs/mio"
+readme = "README.md"
+keywords = ["io", "async", "non-blocking"]
+categories = ["asynchronous"]
+license = "MIT"
+repository = "https://github.com/tokio-rs/mio"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+targets = ["aarch64-apple-ios", "aarch64-linux-android", "x86_64-apple-darwin", "x86_64-pc-windows-msvc", "x86_64-unknown-dragonfly", "x86_64-unknown-freebsd", "x86_64-unknown-illumos", "x86_64-unknown-linux-gnu", "x86_64-unknown-netbsd", "x86_64-unknown-openbsd"]
+
+[package.metadata.playground]
+features = ["os-poll", "os-ext", "net"]
+
+[[example]]
+name = "tcp_server"
+required-features = ["os-poll", "net"]
+
+[[example]]
+name = "udp_server"
+required-features = ["os-poll", "net"]
+[dependencies.log]
+version = "0.4.8"
+[dev-dependencies.env_logger]
+version = "0.8.4"
+default-features = false
+
+[dev-dependencies.rand]
+version = "0.8"
+
+[features]
+default = []
+net = []
+os-ext = ["os-poll"]
+os-poll = []
+[target."cfg(unix)".dependencies.libc]
+version = "0.2.86"
+[target."cfg(windows)".dependencies.miow]
+version = "0.3.6"
+
+[target."cfg(windows)".dependencies.ntapi]
+version = "0.3"
+
+[target."cfg(windows)".dependencies.winapi]
+version = "0.3"
+features = ["winsock2", "mswsock"]
diff --git a/third_party/rust/mio/LICENSE b/third_party/rust/mio/LICENSE
new file mode 100644
index 0000000000..3516413824
--- /dev/null
+++ b/third_party/rust/mio/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014 Carl Lerche and other MIO contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/third_party/rust/mio/README.md b/third_party/rust/mio/README.md
new file mode 100644
index 0000000000..c18b300ab8
--- /dev/null
+++ b/third_party/rust/mio/README.md
@@ -0,0 +1,179 @@
+# Mio – Metal IO
+
+Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs and
+event notification for building high performance I/O apps with as little
+overhead as possible over the OS abstractions.
+
+[![Crates.io][crates-badge]][crates-url]
+[![MIT licensed][mit-badge]][mit-url]
+[![Build Status][azure-badge]][azure-url]
+[![Build Status][cirrus-badge]][cirrus-url]
+
+[crates-badge]: https://img.shields.io/crates/v/mio.svg
+[crates-url]: https://crates.io/crates/mio
+[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
+[mit-url]: LICENSE
+[azure-badge]: https://dev.azure.com/tokio-rs/Tokio/_apis/build/status/tokio-rs.mio?branchName=master
+[azure-url]: https://dev.azure.com/tokio-rs/Tokio/_build/latest?definitionId=2&branchName=master
+[cirrus-badge]: https://api.cirrus-ci.com/github/tokio-rs/mio.svg
+[cirrus-url]: https://cirrus-ci.com/github/tokio-rs/mio
+
+**API documentation**
+
+* [master](https://tokio-rs.github.io/mio/doc/mio/)
+* [v0.7](https://docs.rs/mio/^0.7)
+* [v0.6](https://docs.rs/mio/^0.6)
+
+This is a low level library, if you are looking for something easier to get
+started with, see [Tokio](https://tokio.rs).
+
+## Usage
+
+To use `mio`, first add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+mio = "0.7"
+```
+
+Next we can start using Mio. The following is quick introduction using
+`TcpListener` and `TcpStream`. Note that `features = ["os-poll", "net"]` must be
+specified for this example.
+
+```rust
+use std::error::Error;
+
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Token};
+
+// Some tokens to allow us to identify which event is for which socket.
+const SERVER: Token = Token(0);
+const CLIENT: Token = Token(1);
+
+fn main() -> Result<(), Box<dyn Error>> {
+ // Create a poll instance.
+ let mut poll = Poll::new()?;
+ // Create storage for events.
+ let mut events = Events::with_capacity(128);
+
+ // Setup the server socket.
+ let addr = "127.0.0.1:13265".parse()?;
+ let mut server = TcpListener::bind(addr)?;
+ // Start listening for incoming connections.
+ poll.registry()
+ .register(&mut server, SERVER, Interest::READABLE)?;
+
+ // Setup the client socket.
+ let mut client = TcpStream::connect(addr)?;
+ // Register the socket.
+ poll.registry()
+ .register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?;
+
+ // Start an event loop.
+ loop {
+ // Poll Mio for events, blocking until we get an event.
+ poll.poll(&mut events, None)?;
+
+ // Process each event.
+ for event in events.iter() {
+ // We can use the token we previously provided to `register` to
+ // determine for which socket the event is.
+ match event.token() {
+ SERVER => {
+ // If this is an event for the server, it means a connection
+ // is ready to be accepted.
+ //
+ // Accept the connection and drop it immediately. This will
+ // close the socket and notify the client of the EOF.
+ let connection = server.accept();
+ drop(connection);
+ }
+ CLIENT => {
+ if event.is_writable() {
+ // We can (likely) write to the socket without blocking.
+ }
+
+ if event.is_readable() {
+ // We can (likely) read from the socket without blocking.
+ }
+
+ // Since the server just shuts down the connection, let's
+ // just exit from our event loop.
+ return Ok(());
+ }
+ // We don't expect any events with tokens other than those we provided.
+ _ => unreachable!(),
+ }
+ }
+ }
+}
+```
+
+## Features
+
+* Non-blocking TCP, UDP
+* I/O event queue backed by epoll, kqueue, and IOCP
+* Zero allocations at runtime
+* Platform specific extensions
+
+## Non-goals
+
+The following are specifically omitted from Mio and are left to the user
+or higher-level libraries.
+
+* File operations
+* Thread pools / multi-threaded event loop
+* Timers
+
+## Platforms
+
+Currently supported platforms:
+
+* Android (API level 21)
+* DragonFly BSD
+* FreeBSD
+* Linux
+* NetBSD
+* OpenBSD
+* Solaris
+* Windows
+* iOS
+* macOS
+* Wine (version 6.11+, see [issue #1444])
+
+There are potentially others. If you find that Mio works on another
+platform, submit a PR to update the list!
+
+Mio can handle interfacing with each of the event systems of the aforementioned
+platforms. The details of their implementation are further discussed in the
+`Poll` type of the API documentation (see above).
+
+The Windows implementation for polling sockets is using the [wepoll] strategy.
+This uses the Windows AFD system to access socket readiness events.
+
+[wepoll]: https://github.com/piscisaureus/wepoll
+[issue #1444]: https://github.com/tokio-rs/mio/issues/1444
+
+### Unsupported
+
+* Haiku, see [issue #1472]
+
+[issue #1472]: https://github.com/tokio-rs/mio/issues/1472
+
+## Community
+
+A group of Mio users hang out on [Discord], this can be a good place to go for
+questions.
+
+[Discord]: https://discord.gg/tokio
+
+## Contributing
+
+Interested in getting involved? We would love to help you! For simple
+bug fixes, just submit a PR with the fix and we can discuss the fix
+directly in the PR. If the fix is more complex, start with an issue.
+
+If you want to propose an API change, create an issue to start a
+discussion with the community. Also, feel free to talk with us in Discord.
+
+Finally, be kind. We support the [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct).
diff --git a/third_party/rust/mio/examples/tcp_server.rs b/third_party/rust/mio/examples/tcp_server.rs
new file mode 100644
index 0000000000..6347ab6de0
--- /dev/null
+++ b/third_party/rust/mio/examples/tcp_server.rs
@@ -0,0 +1,183 @@
+// You can run this example from the root of the mio repo:
+// cargo run --example tcp_server --features="os-poll net"
+use mio::event::Event;
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Registry, Token};
+use std::collections::HashMap;
+use std::io::{self, Read, Write};
+use std::str::from_utf8;
+
+// Setup some tokens to allow us to identify which event is for which socket.
+const SERVER: Token = Token(0);
+
+// Some data we'll send over the connection.
+const DATA: &[u8] = b"Hello world!\n";
+
+fn main() -> io::Result<()> {
+ env_logger::init();
+
+ // Create a poll instance.
+ let mut poll = Poll::new()?;
+ // Create storage for events.
+ let mut events = Events::with_capacity(128);
+
+ // Setup the TCP server socket.
+ let addr = "127.0.0.1:9000".parse().unwrap();
+ let mut server = TcpListener::bind(addr)?;
+
+ // Register the server with poll we can receive events for it.
+ poll.registry()
+ .register(&mut server, SERVER, Interest::READABLE)?;
+
+ // Map of `Token` -> `TcpStream`.
+ let mut connections = HashMap::new();
+ // Unique token for each incoming connection.
+ let mut unique_token = Token(SERVER.0 + 1);
+
+ println!("You can connect to the server using `nc`:");
+ println!(" $ nc 127.0.0.1 9000");
+ println!("You'll see our welcome message and anything you type will be printed here.");
+
+ loop {
+ poll.poll(&mut events, None)?;
+
+ for event in events.iter() {
+ match event.token() {
+ SERVER => loop {
+ // Received an event for the TCP server socket, which
+ // indicates we can accept an connection.
+ let (mut connection, address) = match server.accept() {
+ Ok((connection, address)) => (connection, address),
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ // If we get a `WouldBlock` error we know our
+ // listener has no more incoming connections queued,
+ // so we can return to polling and wait for some
+ // more.
+ break;
+ }
+ Err(e) => {
+ // If it was any other kind of error, something went
+ // wrong and we terminate with an error.
+ return Err(e);
+ }
+ };
+
+ println!("Accepted connection from: {}", address);
+
+ let token = next(&mut unique_token);
+ poll.registry().register(
+ &mut connection,
+ token,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )?;
+
+ connections.insert(token, connection);
+ },
+ token => {
+ // Maybe received an event for a TCP connection.
+ let done = if let Some(connection) = connections.get_mut(&token) {
+ handle_connection_event(poll.registry(), connection, event)?
+ } else {
+ // Sporadic events happen, we can safely ignore them.
+ false
+ };
+ if done {
+ if let Some(mut connection) = connections.remove(&token) {
+ poll.registry().deregister(&mut connection)?;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn next(current: &mut Token) -> Token {
+ let next = current.0;
+ current.0 += 1;
+ Token(next)
+}
+
+/// Returns `true` if the connection is done.
+fn handle_connection_event(
+ registry: &Registry,
+ connection: &mut TcpStream,
+ event: &Event,
+) -> io::Result<bool> {
+ if event.is_writable() {
+ // We can (maybe) write to the connection.
+ match connection.write(DATA) {
+ // We want to write the entire `DATA` buffer in a single go. If we
+ // write less we'll return a short write error (same as
+ // `io::Write::write_all` does).
+ Ok(n) if n < DATA.len() => return Err(io::ErrorKind::WriteZero.into()),
+ Ok(_) => {
+ // After we've written something we'll reregister the connection
+ // to only respond to readable events.
+ registry.reregister(connection, event.token(), Interest::READABLE)?
+ }
+ // Would block "errors" are the OS's way of saying that the
+ // connection is not actually ready to perform this I/O operation.
+ Err(ref err) if would_block(err) => {}
+ // Got interrupted (how rude!), we'll try again.
+ Err(ref err) if interrupted(err) => {
+ return handle_connection_event(registry, connection, event)
+ }
+ // Other errors we'll consider fatal.
+ Err(err) => return Err(err),
+ }
+ }
+
+ if event.is_readable() {
+ let mut connection_closed = false;
+ let mut received_data = vec![0; 4096];
+ let mut bytes_read = 0;
+ // We can (maybe) read from the connection.
+ loop {
+ match connection.read(&mut received_data[bytes_read..]) {
+ Ok(0) => {
+ // Reading 0 bytes means the other side has closed the
+ // connection or is done writing, then so are we.
+ connection_closed = true;
+ break;
+ }
+ Ok(n) => {
+ bytes_read += n;
+ if bytes_read == received_data.len() {
+ received_data.resize(received_data.len() + 1024, 0);
+ }
+ }
+ // Would block "errors" are the OS's way of saying that the
+ // connection is not actually ready to perform this I/O operation.
+ Err(ref err) if would_block(err) => break,
+ Err(ref err) if interrupted(err) => continue,
+ // Other errors we'll consider fatal.
+ Err(err) => return Err(err),
+ }
+ }
+
+ if bytes_read != 0 {
+ let received_data = &received_data[..bytes_read];
+ if let Ok(str_buf) = from_utf8(received_data) {
+ println!("Received data: {}", str_buf.trim_end());
+ } else {
+ println!("Received (none UTF-8) data: {:?}", received_data);
+ }
+ }
+
+ if connection_closed {
+ println!("Connection closed");
+ return Ok(true);
+ }
+ }
+
+ Ok(false)
+}
+
+fn would_block(err: &io::Error) -> bool {
+ err.kind() == io::ErrorKind::WouldBlock
+}
+
+fn interrupted(err: &io::Error) -> bool {
+ err.kind() == io::ErrorKind::Interrupted
+}
diff --git a/third_party/rust/mio/examples/udp_server.rs b/third_party/rust/mio/examples/udp_server.rs
new file mode 100644
index 0000000000..ed6881d99c
--- /dev/null
+++ b/third_party/rust/mio/examples/udp_server.rs
@@ -0,0 +1,77 @@
+// You can run this example from the root of the mio repo:
+// cargo run --example udp_server --features="os-poll net"
+use log::warn;
+use mio::net::UdpSocket;
+use mio::{Events, Interest, Poll, Token};
+use std::io;
+
+// A token to allow us to identify which event is for the `UdpSocket`.
+const UDP_SOCKET: Token = Token(0);
+
+fn main() -> io::Result<()> {
+ env_logger::init();
+
+ // Create a poll instance.
+ let mut poll = Poll::new()?;
+ // Create storage for events. Since we will only register a single socket, a
+ // capacity of 1 will do.
+ let mut events = Events::with_capacity(1);
+
+ // Setup the UDP socket.
+ let addr = "127.0.0.1:9000".parse().unwrap();
+ let mut socket = UdpSocket::bind(addr)?;
+
+ // Register our socket with the token defined above and an interest in being
+ // `READABLE`.
+ poll.registry()
+ .register(&mut socket, UDP_SOCKET, Interest::READABLE)?;
+
+ println!("You can connect to the server using `nc`:");
+ println!(" $ nc -u 127.0.0.1 9000");
+ println!("Anything you type will be echoed back to you.");
+
+ // Initialize a buffer for the UDP packet. We use the maximum size of a UDP
+ // packet, which is the maximum value of 16 a bit integer.
+ let mut buf = [0; 1 << 16];
+
+ // Our event loop.
+ loop {
+ // Poll to check if we have events waiting for us.
+ poll.poll(&mut events, None)?;
+
+ // Process each event.
+ for event in events.iter() {
+ // Validate the token we registered our socket with,
+ // in this example it will only ever be one but we
+ // make sure it's valid none the less.
+ match event.token() {
+ UDP_SOCKET => loop {
+ // In this loop we receive all packets queued for the socket.
+ match socket.recv_from(&mut buf) {
+ Ok((packet_size, source_address)) => {
+ // Echo the data.
+ socket.send_to(&buf[..packet_size], source_address)?;
+ }
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ // If we get a `WouldBlock` error we know our socket
+ // has no more packets queued, so we can return to
+ // polling and wait for some more.
+ break;
+ }
+ Err(e) => {
+ // If it was any other kind of error, something went
+ // wrong and we terminate with an error.
+ return Err(e);
+ }
+ }
+ },
+ _ => {
+ // This should never happen as we only registered our
+ // `UdpSocket` using the `UDP_SOCKET` token, but if it ever
+ // does we'll log it.
+ warn!("Got event for unexpected token: {:?}", event);
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/event/event.rs b/third_party/rust/mio/src/event/event.rs
new file mode 100644
index 0000000000..1b4f7b7fbc
--- /dev/null
+++ b/third_party/rust/mio/src/event/event.rs
@@ -0,0 +1,230 @@
+use crate::{sys, Token};
+
+use std::fmt;
+
+/// A readiness event.
+///
+/// `Event` is a readiness state paired with a [`Token`]. It is returned by
+/// [`Poll::poll`].
+///
+/// For more documentation on polling and events, see [`Poll`].
+///
+/// [`Poll::poll`]: ../struct.Poll.html#method.poll
+/// [`Poll`]: ../struct.Poll.html
+/// [`Token`]: ../struct.Token.html
+#[derive(Clone)]
+#[repr(transparent)]
+pub struct Event {
+ inner: sys::Event,
+}
+
+impl Event {
+ /// Returns the event's token.
+ pub fn token(&self) -> Token {
+ sys::event::token(&self.inner)
+ }
+
+ /// Returns true if the event contains readable readiness.
+ ///
+ /// # Notes
+ ///
+ /// Out-of-band (OOB) data also triggers readable events. But must
+ /// application don't actually read OOB data, this could leave an
+ /// application open to a Denial-of-Service (Dos) attack, see
+ /// <https://github.com/sandstorm-io/sandstorm-website/blob/58f93346028c0576e8147627667328eaaf4be9fa/_posts/2015-04-08-osx-security-bug.md>.
+ /// However because Mio uses edge-triggers it will not result in an infinite
+ /// loop as described in the article above.
+ pub fn is_readable(&self) -> bool {
+ sys::event::is_readable(&self.inner)
+ }
+
+ /// Returns true if the event contains writable readiness.
+ pub fn is_writable(&self) -> bool {
+ sys::event::is_writable(&self.inner)
+ }
+
+ /// Returns true if the event contains error readiness.
+ ///
+ /// Error events occur when the socket enters an error state. In this case,
+ /// the socket will also receive a readable or writable event. Reading or
+ /// writing to the socket will result in an error.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but not all platforms trigger the
+ /// error event.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLERR` |
+ /// | [kqueue] | `EV_ERROR` and `EV_EOF` with `fflags` set to `0`. |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_error(&self) -> bool {
+ sys::event::is_error(&self.inner)
+ }
+
+ /// Returns true if the event contains read closed readiness.
+ ///
+ /// # Notes
+ ///
+ /// Read closed readiness can be expected after any of the following have
+ /// occurred:
+ /// * The local stream has shutdown the read half of its socket
+ /// * The local stream has shutdown both the read half and the write half
+ /// of its socket
+ /// * The peer stream has shutdown the write half its socket; this sends a
+ /// `FIN` packet that has been received by the local stream
+ ///
+ /// Method is a best effort implementation. While some platforms may not
+ /// return readiness when read half is closed, it is guaranteed that
+ /// false-positives will not occur.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLHUP`, or |
+ /// | | `EPOLLIN` and `EPOLLRDHUP` |
+ /// | [kqueue] | `EV_EOF` |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_read_closed(&self) -> bool {
+ sys::event::is_read_closed(&self.inner)
+ }
+
+ /// Returns true if the event contains write closed readiness.
+ ///
+ /// # Notes
+ ///
+ /// On [epoll] this is essentially a check for `EPOLLHUP` flag as the
+ /// local stream shutting down its write half does not trigger this event.
+ ///
+ /// On [kqueue] the local stream shutting down the write half of its
+ /// socket will trigger this event.
+ ///
+ /// Method is a best effort implementation. While some platforms may not
+ /// return readiness when write half is closed, it is guaranteed that
+ /// false-positives will not occur.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLHUP`, or |
+ /// | | only `EPOLLERR`, or |
+ /// | | `EPOLLOUT` and `EPOLLERR` |
+ /// | [kqueue] | `EV_EOF` |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_write_closed(&self) -> bool {
+ sys::event::is_write_closed(&self.inner)
+ }
+
+ /// Returns true if the event contains priority readiness.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but not all platforms trigger the
+ /// priority event.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLPRI` |
+ /// | [kqueue] | *Not supported* |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ #[inline]
+ pub fn is_priority(&self) -> bool {
+ sys::event::is_priority(&self.inner)
+ }
+
+ /// Returns true if the event contains AIO readiness.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but not all platforms support AIO.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | *Not supported* |
+ /// | [kqueue]<sup>1</sup> | `EVFILT_AIO` |
+ ///
+ /// 1: Only supported on DragonFly BSD, FreeBSD, iOS and macOS.
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_aio(&self) -> bool {
+ sys::event::is_aio(&self.inner)
+ }
+
+ /// Returns true if the event contains LIO readiness.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but only FreeBSD supports LIO. On
+ /// FreeBSD this method checks the `EVFILT_LIO` flag.
+ pub fn is_lio(&self) -> bool {
+ sys::event::is_lio(&self.inner)
+ }
+
+ /// Create a reference to an `Event` from a platform specific event.
+ pub(crate) fn from_sys_event_ref(sys_event: &sys::Event) -> &Event {
+ unsafe {
+ // This is safe because the memory layout of `Event` is
+ // the same as `sys::Event` due to the `repr(transparent)` attribute.
+ &*(sys_event as *const sys::Event as *const Event)
+ }
+ }
+}
+
+/// When the [alternate] flag is enabled this will print platform specific
+/// details, for example the fields of the `kevent` structure on platforms that
+/// use `kqueue(2)`. Note however that the output of this implementation is
+/// **not** consider a part of the stable API.
+///
+/// [alternate]: fmt::Formatter::alternate
+impl fmt::Debug for Event {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let alternate = f.alternate();
+ let mut d = f.debug_struct("Event");
+ d.field("token", &self.token())
+ .field("readable", &self.is_readable())
+ .field("writable", &self.is_writable())
+ .field("error", &self.is_error())
+ .field("read_closed", &self.is_read_closed())
+ .field("write_closed", &self.is_write_closed())
+ .field("priority", &self.is_priority())
+ .field("aio", &self.is_aio())
+ .field("lio", &self.is_lio());
+
+ if alternate {
+ struct EventDetails<'a>(&'a sys::Event);
+
+ impl<'a> fmt::Debug for EventDetails<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ sys::event::debug_details(f, self.0)
+ }
+ }
+
+ d.field("details", &EventDetails(&self.inner)).finish()
+ } else {
+ d.finish()
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/event/events.rs b/third_party/rust/mio/src/event/events.rs
new file mode 100644
index 0000000000..f3c5a2f02f
--- /dev/null
+++ b/third_party/rust/mio/src/event/events.rs
@@ -0,0 +1,230 @@
+use crate::event::Event;
+use crate::sys;
+
+use std::fmt;
+
+/// A collection of readiness events.
+///
+/// `Events` is passed as an argument to [`Poll::poll`] and will be used to
+/// receive any new readiness events received since the last poll. Usually, a
+/// single `Events` instance is created at the same time as a [`Poll`] and
+/// reused on each call to [`Poll::poll`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// [`Poll::poll`]: ../struct.Poll.html#method.poll
+/// [`Poll`]: ../struct.Poll.html
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let mut poll = Poll::new()?;
+/// #
+/// # assert!(events.is_empty());
+///
+/// // Register `event::Source`s with `poll`.
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events.iter() {
+/// println!("Got an event for {:?}", event.token());
+/// }
+/// # Ok(())
+/// # }
+/// ```
+pub struct Events {
+ inner: sys::Events,
+}
+
+/// [`Events`] iterator.
+///
+/// This struct is created by the [`iter`] method on [`Events`].
+///
+/// [`Events`]: struct.Events.html
+/// [`iter`]: struct.Events.html#method.iter
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let mut poll = Poll::new()?;
+///
+/// // Register handles with `poll`.
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events.iter() {
+/// println!("Got an event for {:?}", event.token());
+/// }
+/// # Ok(())
+/// # }
+/// ```
+#[derive(Debug, Clone)]
+pub struct Iter<'a> {
+ inner: &'a Events,
+ pos: usize,
+}
+
+impl Events {
+ /// Return a new `Events` capable of holding up to `capacity` events.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn with_capacity(capacity: usize) -> Events {
+ Events {
+ inner: sys::Events::with_capacity(capacity),
+ }
+ }
+
+ /// Returns the number of `Event` values that `self` can hold.
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ /// Returns `true` if `self` contains no `Event` values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ /// assert!(events.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+
+ /// Returns an iterator over the `Event` values.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let mut poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`.
+ ///
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// for event in events.iter() {
+ /// println!("Got an event for {:?}", event.token());
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn iter(&self) -> Iter<'_> {
+ Iter {
+ inner: self,
+ pos: 0,
+ }
+ }
+
+ /// Clearing all `Event` values from container explicitly.
+ ///
+ /// # Notes
+ ///
+ /// Events are cleared before every `poll`, so it is not required to call
+ /// this manually.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let mut poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`.
+ ///
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// // Clear all events.
+ /// events.clear();
+ /// assert!(events.is_empty());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn clear(&mut self) {
+ self.inner.clear();
+ }
+
+ /// Returns the inner `sys::Events`.
+ pub(crate) fn sys(&mut self) -> &mut sys::Events {
+ &mut self.inner
+ }
+}
+
+impl<'a> IntoIterator for &'a Events {
+ type Item = &'a Event;
+ type IntoIter = Iter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = &'a Event;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let ret = self
+ .inner
+ .inner
+ .get(self.pos)
+ .map(Event::from_sys_event_ref);
+ self.pos += 1;
+ ret
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let size = self.inner.inner.len();
+ (size, Some(size))
+ }
+
+ fn count(self) -> usize {
+ self.inner.inner.len()
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
diff --git a/third_party/rust/mio/src/event/mod.rs b/third_party/rust/mio/src/event/mod.rs
new file mode 100644
index 0000000000..8e17f82ee5
--- /dev/null
+++ b/third_party/rust/mio/src/event/mod.rs
@@ -0,0 +1,10 @@
+//! Readiness event types and utilities.
+
+#[allow(clippy::module_inception)]
+mod event;
+mod events;
+mod source;
+
+pub use self::event::Event;
+pub use self::events::{Events, Iter};
+pub use self::source::Source;
diff --git a/third_party/rust/mio/src/event/source.rs b/third_party/rust/mio/src/event/source.rs
new file mode 100644
index 0000000000..4f9c6635a1
--- /dev/null
+++ b/third_party/rust/mio/src/event/source.rs
@@ -0,0 +1,139 @@
+use crate::{Interest, Registry, Token};
+
+use std::io;
+
+/// An event source that may be registered with [`Registry`].
+///
+/// Types that implement `event::Source` can be registered with
+/// `Registry`. Users of Mio **should not** use the `event::Source` trait
+/// functions directly. Instead, the equivalent functions on `Registry` should
+/// be used.
+///
+/// See [`Registry`] for more details.
+///
+/// [`Registry`]: ../struct.Registry.html
+///
+/// # Implementing `event::Source`
+///
+/// Event sources are always backed by system handles, such as sockets or other
+/// system handles. These `event::Source`s will be monitored by the system
+/// selector. An implementation of `Source` will almost always delegates to a
+/// lower level handle. Examples of this are [`TcpStream`]s, or the *unix only*
+/// [`SourceFd`].
+///
+/// [`TcpStream`]: ../net/struct.TcpStream.html
+/// [`SourceFd`]: ../unix/struct.SourceFd.html
+///
+/// # Dropping `event::Source`s
+///
+/// All `event::Source`s, unless otherwise specified, need to be [deregistered]
+/// before being dropped for them to not leak resources. This goes against the
+/// normal drop behaviour of types in Rust which cleanup after themselves, e.g.
+/// a `File` will close itself. However since deregistering needs access to
+/// [`Registry`] this cannot be done while being dropped.
+///
+/// [deregistered]: ../struct.Registry.html#method.deregister
+///
+/// # Examples
+///
+/// Implementing `Source` on a struct containing a socket:
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// use mio::{Interest, Registry, Token};
+/// use mio::event::Source;
+/// use mio::net::TcpStream;
+///
+/// use std::io;
+///
+/// # #[allow(dead_code)]
+/// pub struct MySource {
+/// socket: TcpStream,
+/// }
+///
+/// impl Source for MySource {
+/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `register` call to `socket`
+/// self.socket.register(registry, token, interests)
+/// }
+///
+/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `reregister` call to `socket`
+/// self.socket.reregister(registry, token, interests)
+/// }
+///
+/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+/// // Delegate the `deregister` call to `socket`
+/// self.socket.deregister(registry)
+/// }
+/// }
+/// ```
+pub trait Source {
+ /// Register `self` with the given `Registry` instance.
+ ///
+ /// This function should not be called directly. Use [`Registry::register`]
+ /// instead. Implementors should handle registration by delegating the call
+ /// to another `Source` type.
+ ///
+ /// [`Registry::register`]: ../struct.Registry.html#method.register
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()>;
+
+ /// Re-register `self` with the given `Registry` instance.
+ ///
+ /// This function should not be called directly. Use
+ /// [`Registry::reregister`] instead. Implementors should handle
+ /// re-registration by either delegating the call to another `Source` type.
+ ///
+ /// [`Registry::reregister`]: ../struct.Registry.html#method.reregister
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()>;
+
+ /// Deregister `self` from the given `Registry` instance.
+ ///
+ /// This function should not be called directly. Use
+ /// [`Registry::deregister`] instead. Implementors should handle
+ /// deregistration by delegating the call to another `Source` type.
+ ///
+ /// [`Registry::deregister`]: ../struct.Registry.html#method.deregister
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()>;
+}
+
+impl<T> Source for Box<T>
+where
+ T: Source + ?Sized,
+{
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ (&mut **self).register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ (&mut **self).reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ (&mut **self).deregister(registry)
+ }
+}
diff --git a/third_party/rust/mio/src/interest.rs b/third_party/rust/mio/src/interest.rs
new file mode 100644
index 0000000000..0aa0bda895
--- /dev/null
+++ b/third_party/rust/mio/src/interest.rs
@@ -0,0 +1,179 @@
+use std::num::NonZeroU8;
+use std::{fmt, ops};
+
+/// Interest used in registering.
+///
+/// Interest are used in [registering] [`event::Source`]s with [`Poll`], they
+/// indicate what readiness should be monitored for. For example if a socket is
+/// registered with [readable] interests and the socket becomes writable, no
+/// event will be returned from a call to [`poll`].
+///
+/// [registering]: struct.Registry.html#method.register
+/// [`event::Source`]: ./event/trait.Source.html
+/// [`Poll`]: struct.Poll.html
+/// [readable]: struct.Interest.html#associatedconstant.READABLE
+/// [`poll`]: struct.Poll.html#method.poll
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct Interest(NonZeroU8);
+
+// These must be unique.
+const READABLE: u8 = 0b0001;
+const WRITABLE: u8 = 0b0010;
+// The following are not available on all platforms.
+#[cfg_attr(
+ not(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ )),
+ allow(dead_code)
+)]
+const AIO: u8 = 0b0100;
+#[cfg_attr(not(target_os = "freebsd"), allow(dead_code))]
+const LIO: u8 = 0b1000;
+
+impl Interest {
+ /// Returns a `Interest` set representing readable interests.
+ pub const READABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(READABLE) });
+
+ /// Returns a `Interest` set representing writable interests.
+ pub const WRITABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(WRITABLE) });
+
+ /// Returns a `Interest` set representing AIO completion interests.
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ pub const AIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(AIO) });
+
+ /// Returns a `Interest` set representing LIO completion interests.
+ #[cfg(target_os = "freebsd")]
+ pub const LIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(LIO) });
+
+ /// Add together two `Interest`.
+ ///
+ /// This does the same thing as the `BitOr` implementation, but is a
+ /// constant function.
+ ///
+ /// ```
+ /// use mio::Interest;
+ ///
+ /// const INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE);
+ /// # fn silent_dead_code_warning(_: Interest) { }
+ /// # silent_dead_code_warning(INTERESTS)
+ /// ```
+ #[allow(clippy::should_implement_trait)]
+ pub const fn add(self, other: Interest) -> Interest {
+ Interest(unsafe { NonZeroU8::new_unchecked(self.0.get() | other.0.get()) })
+ }
+
+ /// Removes `other` `Interest` from `self`.
+ ///
+ /// Returns `None` if the set would be empty after removing `other`.
+ ///
+ /// ```
+ /// use mio::Interest;
+ ///
+ /// const RW_INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE);
+ ///
+ /// // As long a one interest remain this will return `Some`.
+ /// let w_interest = RW_INTERESTS.remove(Interest::READABLE).unwrap();
+ /// assert!(!w_interest.is_readable());
+ /// assert!(w_interest.is_writable());
+ ///
+ /// // Removing all interests from the set will return `None`.
+ /// assert_eq!(w_interest.remove(Interest::WRITABLE), None);
+ ///
+ /// // Its also possible to remove multiple interests at once.
+ /// assert_eq!(RW_INTERESTS.remove(RW_INTERESTS), None);
+ /// ```
+ pub fn remove(self, other: Interest) -> Option<Interest> {
+ NonZeroU8::new(self.0.get() & !other.0.get()).map(Interest)
+ }
+
+ /// Returns true if the value includes readable readiness.
+ pub const fn is_readable(self) -> bool {
+ (self.0.get() & READABLE) != 0
+ }
+
+ /// Returns true if the value includes writable readiness.
+ pub const fn is_writable(self) -> bool {
+ (self.0.get() & WRITABLE) != 0
+ }
+
+ /// Returns true if `Interest` contains AIO readiness
+ pub const fn is_aio(self) -> bool {
+ (self.0.get() & AIO) != 0
+ }
+
+ /// Returns true if `Interest` contains LIO readiness
+ pub const fn is_lio(self) -> bool {
+ (self.0.get() & LIO) != 0
+ }
+}
+
+impl ops::BitOr for Interest {
+ type Output = Self;
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ self.add(other)
+ }
+}
+
+impl ops::BitOrAssign for Interest {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ self.0 = (*self | other).0;
+ }
+}
+
+impl fmt::Debug for Interest {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut one = false;
+ if self.is_readable() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "READABLE")?;
+ one = true
+ }
+ if self.is_writable() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "WRITABLE")?;
+ one = true
+ }
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ {
+ if self.is_aio() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "AIO")?;
+ one = true
+ }
+ }
+ #[cfg(any(target_os = "freebsd"))]
+ {
+ if self.is_lio() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "LIO")?;
+ one = true
+ }
+ }
+ debug_assert!(one, "printing empty interests");
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio/src/io_source.rs b/third_party/rust/mio/src/io_source.rs
new file mode 100644
index 0000000000..ae1cbf3767
--- /dev/null
+++ b/third_party/rust/mio/src/io_source.rs
@@ -0,0 +1,294 @@
+use std::ops::{Deref, DerefMut};
+#[cfg(unix)]
+use std::os::unix::io::AsRawFd;
+#[cfg(windows)]
+use std::os::windows::io::AsRawSocket;
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::{fmt, io};
+
+use crate::sys::IoSourceState;
+use crate::{event, Interest, Registry, Token};
+
+/// Adapter for a [`RawFd`] or [`RawSocket`] providing an [`event::Source`]
+/// implementation.
+///
+/// `IoSource` enables registering any FD or socket wrapper with [`Poll`].
+///
+/// While only implementations for TCP, UDP, and UDS (Unix only) are provided,
+/// Mio supports registering any FD or socket that can be registered with the
+/// underlying OS selector. `IoSource` provides the necessary bridge.
+///
+/// [`RawFd`]: std::os::unix::io::RawFd
+/// [`RawSocket`]: std::os::windows::io::RawSocket
+///
+/// # Notes
+///
+/// To handle the registrations and events properly **all** I/O operations (such
+/// as `read`, `write`, etc.) must go through the [`do_io`] method to ensure the
+/// internal state is updated accordingly.
+///
+/// [`Poll`]: crate::Poll
+/// [`do_io`]: IoSource::do_io
+/*
+///
+/// # Examples
+///
+/// Basic usage.
+///
+/// ```
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Interest, Poll, Token};
+/// use mio::IoSource;
+///
+/// use std::net;
+///
+/// let poll = Poll::new()?;
+///
+/// // Bind a std TCP listener.
+/// let listener = net::TcpListener::bind("127.0.0.1:0")?;
+/// // Wrap it in the `IoSource` type.
+/// let mut listener = IoSource::new(listener);
+///
+/// // Register the listener.
+/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?;
+/// # Ok(())
+/// # }
+/// ```
+*/
+pub struct IoSource<T> {
+ state: IoSourceState,
+ inner: T,
+ #[cfg(debug_assertions)]
+ selector_id: SelectorId,
+}
+
+impl<T> IoSource<T> {
+ /// Create a new `IoSource`.
+ pub fn new(io: T) -> IoSource<T> {
+ IoSource {
+ state: IoSourceState::new(),
+ inner: io,
+ #[cfg(debug_assertions)]
+ selector_id: SelectorId::new(),
+ }
+ }
+
+ /// Execute an I/O operations ensuring that the socket receives more events
+ /// if it hits a [`WouldBlock`] error.
+ ///
+ /// # Notes
+ ///
+ /// This method is required to be called for **all** I/O operations to
+ /// ensure the user will receive events once the socket is ready again after
+ /// returning a [`WouldBlock`] error.
+ ///
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ pub fn do_io<F, R>(&self, f: F) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ self.state.do_io(f, &self.inner)
+ }
+
+ /// Returns the I/O source, dropping the state.
+ ///
+ /// # Notes
+ ///
+ /// To ensure no more events are to be received for this I/O source first
+ /// [`deregister`] it.
+ ///
+ /// [`deregister`]: Registry::deregister
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+}
+
+/// Be careful when using this method. All I/O operations that may block must go
+/// through the [`do_io`] method.
+///
+/// [`do_io`]: IoSource::do_io
+impl<T> Deref for IoSource<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+/// Be careful when using this method. All I/O operations that may block must go
+/// through the [`do_io`] method.
+///
+/// [`do_io`]: IoSource::do_io
+impl<T> DerefMut for IoSource<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+#[cfg(unix)]
+impl<T> event::Source for IoSource<T>
+where
+ T: AsRawFd,
+{
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.associate(registry)?;
+ registry
+ .selector()
+ .register(self.inner.as_raw_fd(), token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.check_association(registry)?;
+ registry
+ .selector()
+ .reregister(self.inner.as_raw_fd(), token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.remove_association(registry)?;
+ registry.selector().deregister(self.inner.as_raw_fd())
+ }
+}
+
+#[cfg(windows)]
+impl<T> event::Source for IoSource<T>
+where
+ T: AsRawSocket,
+{
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.associate(registry)?;
+ self.state
+ .register(registry, token, interests, self.inner.as_raw_socket())
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.check_association(registry)?;
+ self.state.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.remove_association(_registry)?;
+ self.state.deregister()
+ }
+}
+
+impl<T> fmt::Debug for IoSource<T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+/// Used to associate an `IoSource` with a `sys::Selector`.
+#[cfg(debug_assertions)]
+#[derive(Debug)]
+struct SelectorId {
+ id: AtomicUsize,
+}
+
+#[cfg(debug_assertions)]
+impl SelectorId {
+ /// Value of `id` if `SelectorId` is not associated with any
+ /// `sys::Selector`. Valid selector ids start at 1.
+ const UNASSOCIATED: usize = 0;
+
+ /// Create a new `SelectorId`.
+ const fn new() -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(Self::UNASSOCIATED),
+ }
+ }
+
+ /// Associate an I/O source with `registry`, returning an error if its
+ /// already registered.
+ fn associate(&self, registry: &Registry) -> io::Result<()> {
+ let registry_id = registry.selector().id();
+ let previous_id = self.id.swap(registry_id, Ordering::AcqRel);
+
+ if previous_id == Self::UNASSOCIATED {
+ Ok(())
+ } else {
+ Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a `Registry`",
+ ))
+ }
+ }
+
+ /// Check the association of an I/O source with `registry`, returning an
+ /// error if its registered with a different `Registry` or not registered at
+ /// all.
+ fn check_association(&self, registry: &Registry) -> io::Result<()> {
+ let registry_id = registry.selector().id();
+ let id = self.id.load(Ordering::Acquire);
+
+ if id == registry_id {
+ Ok(())
+ } else if id == Self::UNASSOCIATED {
+ Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ))
+ } else {
+ Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a different `Registry`",
+ ))
+ }
+ }
+
+ /// Remove a previously made association from `registry`, returns an error
+ /// if it was not previously associated with `registry`.
+ fn remove_association(&self, registry: &Registry) -> io::Result<()> {
+ let registry_id = registry.selector().id();
+ let previous_id = self.id.swap(Self::UNASSOCIATED, Ordering::AcqRel);
+
+ if previous_id == registry_id {
+ Ok(())
+ } else {
+ Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ))
+ }
+ }
+}
+
+#[cfg(debug_assertions)]
+impl Clone for SelectorId {
+ fn clone(&self) -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(self.id.load(Ordering::Acquire)),
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/lib.rs b/third_party/rust/mio/src/lib.rs
new file mode 100644
index 0000000000..7a0797c77f
--- /dev/null
+++ b/third_party/rust/mio/src/lib.rs
@@ -0,0 +1,264 @@
+#![deny(
+ missing_docs,
+ missing_debug_implementations,
+ rust_2018_idioms,
+ unused_imports,
+ dead_code
+)]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+// Disallow warnings when running tests.
+#![cfg_attr(test, deny(warnings))]
+// Disallow warnings in examples.
+#![doc(test(attr(deny(warnings))))]
+
+//! Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs
+//! and event notification for building high performance I/O apps with as little
+//! overhead as possible over the OS abstractions.
+//!
+//! # Usage
+//!
+//! Using Mio starts by creating a [`Poll`], which reads events from the OS and
+//! puts them into [`Events`]. You can handle I/O events from the OS with it.
+//!
+//! For more detail, see [`Poll`].
+//!
+//! [`Poll`]: ../mio/struct.Poll.html
+//! [`Events`]: ../mio/event/struct.Events.html
+//!
+//! ## Examples
+//!
+//! Examples can found in the `examples` directory of the source code, or [on
+//! GitHub].
+//!
+//! [on GitHub]: https://github.com/tokio-rs/mio/tree/master/examples
+//!
+//! ## Guide
+//!
+//! A getting started guide is available in the [`guide`] module.
+//!
+//! ## Available features
+//!
+//! The available features are described in the [`features`] module.
+
+// macros used internally
+#[macro_use]
+mod macros;
+
+mod interest;
+mod poll;
+mod sys;
+mod token;
+mod waker;
+
+pub mod event;
+
+cfg_io_source! {
+ mod io_source;
+}
+
+cfg_net! {
+ pub mod net;
+}
+
+#[doc(no_inline)]
+pub use event::Events;
+pub use interest::Interest;
+pub use poll::{Poll, Registry};
+pub use token::Token;
+pub use waker::Waker;
+
+#[cfg(all(unix, feature = "os-ext"))]
+#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "os-ext"))))]
+pub mod unix {
+ //! Unix only extensions.
+
+ pub mod pipe {
+ //! Unix pipe.
+ //!
+ //! See the [`new`] function for documentation.
+
+ pub use crate::sys::pipe::{new, Receiver, Sender};
+ }
+
+ pub use crate::sys::SourceFd;
+}
+
+#[cfg(all(windows, feature = "os-ext"))]
+#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "os-ext"))))]
+pub mod windows {
+ //! Windows only extensions.
+
+ pub use crate::sys::named_pipe::NamedPipe;
+}
+
+pub mod features {
+ //! # Mio's optional features.
+ //!
+ //! This document describes the available features in Mio.
+ //!
+ #![cfg_attr(feature = "os-poll", doc = "## `os-poll` (enabled)")]
+ #![cfg_attr(not(feature = "os-poll"), doc = "## `os-poll` (disabled)")]
+ //!
+ //! Mio by default provides only a shell implementation, that `panic!`s the
+ //! moment it is actually run. To run it requires OS support, this is
+ //! enabled by activating the `os-poll` feature.
+ //!
+ //! This makes `Poll`, `Registry` and `Waker` functional.
+ //!
+ #![cfg_attr(feature = "os-ext", doc = "## `os-ext` (enabled)")]
+ #![cfg_attr(not(feature = "os-ext"), doc = "## `os-ext` (disabled)")]
+ //!
+ //! `os-ext` enables additional OS specific facilities. These facilities can
+ //! be found in the `unix` and `windows` module.
+ //!
+ #![cfg_attr(feature = "net", doc = "## Network types (enabled)")]
+ #![cfg_attr(not(feature = "net"), doc = "## Network types (disabled)")]
+ //!
+ //! The `net` feature enables networking primitives in the `net` module.
+}
+
+pub mod guide {
+ //! # Getting started guide.
+ //!
+ //! In this guide we'll do the following:
+ //!
+ //! 1. Create a [`Poll`] instance (and learn what it is).
+ //! 2. Register an [event source].
+ //! 3. Create an event loop.
+ //!
+ //! At the end you'll have a very small (but quick) TCP server that accepts
+ //! connections and then drops (disconnects) them.
+ //!
+ //! ## 1. Creating a `Poll` instance
+ //!
+ //! Using Mio starts by creating a [`Poll`] instance, which monitors events
+ //! from the OS and puts them into [`Events`]. This allows us to execute I/O
+ //! operations based on what operations are ready.
+ //!
+ //! [`Poll`]: ../struct.Poll.html
+ //! [`Events`]: ../event/struct.Events.html
+ //!
+ #![cfg_attr(feature = "os-poll", doc = "```")]
+ #![cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ //! # use mio::{Poll, Events};
+ //! # fn main() -> std::io::Result<()> {
+ //! // `Poll` allows for polling of readiness events.
+ //! let poll = Poll::new()?;
+ //! // `Events` is collection of readiness `Event`s and can be filled by
+ //! // calling `Poll::poll`.
+ //! let events = Events::with_capacity(128);
+ //! # drop((poll, events));
+ //! # Ok(())
+ //! # }
+ //! ```
+ //!
+ //! For example if we're using a [`TcpListener`], we'll only want to
+ //! attempt to accept an incoming connection *iff* any connections are
+ //! queued and ready to be accepted. We don't want to waste our time if no
+ //! connections are ready.
+ //!
+ //! [`TcpListener`]: ../net/struct.TcpListener.html
+ //!
+ //! ## 2. Registering event source
+ //!
+ //! After we've created a [`Poll`] instance that monitors events from the OS
+ //! for us, we need to provide it with a source of events. This is done by
+ //! registering an [event source]. As the name “event source” suggests it is
+ //! a source of events which can be polled using a `Poll` instance. On Unix
+ //! systems this is usually a file descriptor, or a socket/handle on
+ //! Windows.
+ //!
+ //! In the example below we'll use a [`TcpListener`] for which we'll receive
+ //! an event (from [`Poll`]) once a connection is ready to be accepted.
+ //!
+ //! [event source]: ../event/trait.Source.html
+ //!
+ #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ //! # use mio::net::TcpListener;
+ //! # use mio::{Poll, Token, Interest};
+ //! # fn main() -> std::io::Result<()> {
+ //! # let poll = Poll::new()?;
+ //! # let address = "127.0.0.1:0".parse().unwrap();
+ //! // Create a `TcpListener`, binding it to `address`.
+ //! let mut listener = TcpListener::bind(address)?;
+ //!
+ //! // Next we register it with `Poll` to receive events for it. The `SERVER`
+ //! // `Token` is used to determine that we received an event for the listener
+ //! // later on.
+ //! const SERVER: Token = Token(0);
+ //! poll.registry().register(&mut listener, SERVER, Interest::READABLE)?;
+ //! # Ok(())
+ //! # }
+ //! ```
+ //!
+ //! Multiple event sources can be [registered] (concurrently), so we can
+ //! monitor multiple sources at a time.
+ //!
+ //! [registered]: ../struct.Registry.html#method.register
+ //!
+ //! ## 3. Creating the event loop
+ //!
+ //! After we've created a [`Poll`] instance and registered one or more
+ //! [event sources] with it, we can [poll] it for events. Polling for events
+ //! is simple, we need a container to store the events: [`Events`] and need
+ //! to do something based on the polled events (this part is up to you, we
+ //! can't do it all!). If we do this in a loop we've got ourselves an event
+ //! loop.
+ //!
+ //! The example below shows the event loop in action, completing our small
+ //! TCP server.
+ //!
+ //! [poll]: ../struct.Poll.html#method.poll
+ //! [event sources]: ../event/trait.Source.html
+ //!
+ #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ //! # use std::io;
+ //! # use std::time::Duration;
+ //! # use mio::net::TcpListener;
+ //! # use mio::{Poll, Token, Interest, Events};
+ //! # fn main() -> io::Result<()> {
+ //! # let mut poll = Poll::new()?;
+ //! # let mut events = Events::with_capacity(128);
+ //! # let address = "127.0.0.1:0".parse().unwrap();
+ //! # let mut listener = TcpListener::bind(address)?;
+ //! # const SERVER: Token = Token(0);
+ //! # poll.registry().register(&mut listener, SERVER, Interest::READABLE)?;
+ //! // Start our event loop.
+ //! loop {
+ //! // Poll the OS for events, waiting at most 100 milliseconds.
+ //! poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ //!
+ //! // Process each event.
+ //! for event in events.iter() {
+ //! // We can use the token we previously provided to `register` to
+ //! // determine for which type the event is.
+ //! match event.token() {
+ //! SERVER => loop {
+ //! // One or more connections are ready, so we'll attempt to
+ //! // accept them (in a loop).
+ //! match listener.accept() {
+ //! Ok((connection, address)) => {
+ //! println!("Got a connection from: {}", address);
+ //! # drop(connection);
+ //! },
+ //! // A "would block error" is returned if the operation
+ //! // is not ready, so we'll stop trying to accept
+ //! // connections.
+ //! Err(ref err) if would_block(err) => break,
+ //! Err(err) => return Err(err),
+ //! }
+ //! }
+ //! # _ => unreachable!(),
+ //! }
+ //! }
+ //! # return Ok(());
+ //! }
+ //!
+ //! fn would_block(err: &io::Error) -> bool {
+ //! err.kind() == io::ErrorKind::WouldBlock
+ //! }
+ //! # }
+ //! ```
+}
diff --git a/third_party/rust/mio/src/macros.rs b/third_party/rust/mio/src/macros.rs
new file mode 100644
index 0000000000..f97f90911f
--- /dev/null
+++ b/third_party/rust/mio/src/macros.rs
@@ -0,0 +1,70 @@
+//! Macros to ease conditional code based on enabled features.
+
+// Depending on the features not all macros are used.
+#![allow(unused_macros)]
+
+/// The `os-poll` feature is enabled.
+macro_rules! cfg_os_poll {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "os-poll")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "os-poll")))]
+ $item
+ )*
+ }
+}
+
+/// The `os-poll` feature is disabled.
+macro_rules! cfg_not_os_poll {
+ ($($item:item)*) => {
+ $(
+ #[cfg(not(feature = "os-poll"))]
+ $item
+ )*
+ }
+}
+
+/// The `os-ext` feature is enabled.
+macro_rules! cfg_os_ext {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "os-ext")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "os-ext")))]
+ $item
+ )*
+ }
+}
+
+/// The `net` feature is enabled.
+macro_rules! cfg_net {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "net")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "net")))]
+ $item
+ )*
+ }
+}
+
+/// One of the features enabled that needs `IoSource`. That is `net` or `os-ext`
+/// on Unix (for `pipe`).
+macro_rules! cfg_io_source {
+ ($($item:item)*) => {
+ $(
+ #[cfg(any(feature = "net", all(unix, feature = "os-ext")))]
+ #[cfg_attr(docsrs, doc(cfg(any(feature = "net", all(unix, feature = "os-ext")))))]
+ $item
+ )*
+ }
+}
+
+/// The `os-ext` feature is enabled, or one of the features that need `os-ext`.
+macro_rules! cfg_any_os_ext {
+ ($($item:item)*) => {
+ $(
+ #[cfg(any(feature = "os-ext", feature = "net"))]
+ #[cfg_attr(docsrs, doc(cfg(any(feature = "os-ext", feature = "net"))))]
+ $item
+ )*
+ }
+}
diff --git a/third_party/rust/mio/src/net/mod.rs b/third_party/rust/mio/src/net/mod.rs
new file mode 100644
index 0000000000..c8cef17e98
--- /dev/null
+++ b/third_party/rust/mio/src/net/mod.rs
@@ -0,0 +1,37 @@
+//! Networking primitives.
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+//!
+//! # Notes
+//!
+//! When using a datagram based socket, i.e. [`UdpSocket`] or [`UnixDatagram`],
+//! its only possible to receive a packet once. This means that if you provide a
+//! buffer that is too small you won't be able to receive the data anymore. How
+//! OSs deal with this situation is different for each OS:
+//! * Unixes, such as Linux, FreeBSD and macOS, will simply fill the buffer and
+//! return the amount of bytes written. This means that if the returned value
+//! is equal to the size of the buffer it may have only written a part of the
+//! packet (or the packet has the same size as the buffer).
+//! * Windows returns an `WSAEMSGSIZE` error.
+//!
+//! Mio does not change the value (either ok or error) returned by the OS, it's
+//! up to the user handle this. How to deal with these difference is still up
+//! for debate, specifically in
+//! <https://github.com/rust-lang/rust/issues/55794>. The best advice we can
+//! give is to always call receive with a large enough buffer.
+
+mod tcp;
+pub use self::tcp::{TcpListener, TcpStream};
+
+mod udp;
+pub use self::udp::UdpSocket;
+
+#[cfg(unix)]
+mod uds;
+#[cfg(unix)]
+pub use self::uds::{SocketAddr, UnixDatagram, UnixListener, UnixStream};
diff --git a/third_party/rust/mio/src/net/tcp/listener.rs b/third_party/rust/mio/src/net/tcp/listener.rs
new file mode 100644
index 0000000000..21bffbaffc
--- /dev/null
+++ b/third_party/rust/mio/src/net/tcp/listener.rs
@@ -0,0 +1,217 @@
+use std::net::{self, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
+use std::{fmt, io};
+
+use crate::io_source::IoSource;
+use crate::net::TcpStream;
+#[cfg(unix)]
+use crate::sys::tcp::set_reuseaddr;
+use crate::sys::tcp::{bind, listen, new_for_addr};
+use crate::{event, sys, Interest, Registry, Token};
+
+/// A structure representing a socket server
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Interest, Poll, Token};
+/// use mio::net::TcpListener;
+/// use std::time::Duration;
+///
+/// let mut listener = TcpListener::bind("127.0.0.1:34255".parse()?)?;
+///
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // There may be a socket ready to be accepted
+/// # Ok(())
+/// # }
+/// ```
+pub struct TcpListener {
+ inner: IoSource<net::TcpListener>,
+}
+
+impl TcpListener {
+ /// Convenience method to bind a new TCP listener to the specified address
+ /// to receive new connections.
+ ///
+ /// This function will take the following steps:
+ ///
+ /// 1. Create a new TCP socket.
+ /// 2. Set the `SO_REUSEADDR` option on the socket on Unix.
+ /// 3. Bind the socket to the specified address.
+ /// 4. Calls `listen` on the socket to prepare it to receive new connections.
+ pub fn bind(addr: SocketAddr) -> io::Result<TcpListener> {
+ let socket = new_for_addr(addr)?;
+ #[cfg(unix)]
+ let listener = unsafe { TcpListener::from_raw_fd(socket) };
+ #[cfg(windows)]
+ let listener = unsafe { TcpListener::from_raw_socket(socket as _) };
+
+ // On platforms with Berkeley-derived sockets, this allows to quickly
+ // rebind a socket, without needing to wait for the OS to clean up the
+ // previous one.
+ //
+ // On Windows, this allows rebinding sockets which are actively in use,
+ // which allows “socket hijacking”, so we explicitly don't set it here.
+ // https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
+ #[cfg(not(windows))]
+ set_reuseaddr(&listener.inner, true)?;
+
+ bind(&listener.inner, addr)?;
+ listen(&listener.inner, 1024)?;
+ Ok(listener)
+ }
+
+ /// Creates a new `TcpListener` from a standard `net::TcpListener`.
+ ///
+ /// This function is intended to be used to wrap a TCP listener from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying listener; ; it is left up to the user to set it
+ /// in non-blocking mode.
+ pub fn from_std(listener: net::TcpListener) -> TcpListener {
+ TcpListener {
+ inner: IoSource::new(listener),
+ }
+ }
+
+ /// Accepts a new `TcpStream`.
+ ///
+ /// This may return an `Err(e)` where `e.kind()` is
+ /// `io::ErrorKind::WouldBlock`. This means a stream may be ready at a later
+ /// point and one should wait for an event before calling `accept` again.
+ ///
+ /// If an accepted stream is returned, the remote address of the peer is
+ /// returned along with it.
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ self.inner.do_io(|inner| {
+ sys::tcp::accept(inner).map(|(stream, addr)| (TcpStream::from_std(stream), addr))
+ })
+ }
+
+ /// Returns the local socket address of this listener.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl event::Source for TcpListener {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+#[cfg(unix)]
+impl IntoRawFd for TcpListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for TcpListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl FromRawFd for TcpListener {
+ /// Converts a `RawFd` to a `TcpListener`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
+ TcpListener::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
+
+#[cfg(windows)]
+impl IntoRawSocket for TcpListener {
+ fn into_raw_socket(self) -> RawSocket {
+ self.inner.into_inner().into_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl AsRawSocket for TcpListener {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.inner.as_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl FromRawSocket for TcpListener {
+ /// Converts a `RawSocket` to a `TcpListener`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_socket(socket: RawSocket) -> TcpListener {
+ TcpListener::from_std(FromRawSocket::from_raw_socket(socket))
+ }
+}
diff --git a/third_party/rust/mio/src/net/tcp/mod.rs b/third_party/rust/mio/src/net/tcp/mod.rs
new file mode 100644
index 0000000000..94af5c10e8
--- /dev/null
+++ b/third_party/rust/mio/src/net/tcp/mod.rs
@@ -0,0 +1,5 @@
+mod listener;
+pub use self::listener::TcpListener;
+
+mod stream;
+pub use self::stream::TcpStream;
diff --git a/third_party/rust/mio/src/net/tcp/stream.rs b/third_party/rust/mio/src/net/tcp/stream.rs
new file mode 100644
index 0000000000..029f186b42
--- /dev/null
+++ b/third_party/rust/mio/src/net/tcp/stream.rs
@@ -0,0 +1,334 @@
+use std::fmt;
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::net::{self, Shutdown, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
+
+use crate::io_source::IoSource;
+use crate::sys::tcp::{connect, new_for_addr};
+use crate::{event, Interest, Registry, Token};
+
+/// A non-blocking TCP stream between a local socket and a remote socket.
+///
+/// The socket will be closed when the value is dropped.
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::net::{TcpListener, SocketAddr};
+/// # use std::error::Error;
+/// #
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// let address: SocketAddr = "127.0.0.1:0".parse()?;
+/// let listener = TcpListener::bind(address)?;
+/// use mio::{Events, Interest, Poll, Token};
+/// use mio::net::TcpStream;
+/// use std::time::Duration;
+///
+/// let mut stream = TcpStream::connect(listener.local_addr()?)?;
+///
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.registry().register(&mut stream, Token(0), Interest::WRITABLE)?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // The socket might be ready at this point
+/// # Ok(())
+/// # }
+/// ```
+pub struct TcpStream {
+ inner: IoSource<net::TcpStream>,
+}
+
+impl TcpStream {
+ /// Create a new TCP stream and issue a non-blocking connect to the
+ /// specified address.
+ ///
+ /// # Notes
+ ///
+ /// The returned `TcpStream` may not be connected (and thus usable), unlike
+ /// the API found in `std::net::TcpStream`. Because Mio issues a
+ /// *non-blocking* connect it will not block the thread and instead return
+ /// an unconnected `TcpStream`.
+ ///
+ /// Ensuring the returned stream is connected is surprisingly complex when
+ /// considering cross-platform support. Doing this properly should follow
+ /// the steps below, an example implementation can be found
+ /// [here](https://github.com/Thomasdezeeuw/heph/blob/0c4f1ab3eaf08bea1d65776528bfd6114c9f8374/src/net/tcp/stream.rs#L560-L622).
+ ///
+ /// 1. Call `TcpStream::connect`
+ /// 2. Register the returned stream with at least [read interest].
+ /// 3. Wait for a (readable) event.
+ /// 4. Check `TcpStream::peer_addr`. If it returns `libc::EINPROGRESS` or
+ /// `ErrorKind::NotConnected` it means the stream is not yet connected,
+ /// go back to step 3. If it returns an address it means the stream is
+ /// connected, go to step 5. If another error is returned something
+ /// whent wrong.
+ /// 5. Now the stream can be used.
+ ///
+ /// [read interest]: Interest::READABLE
+ pub fn connect(addr: SocketAddr) -> io::Result<TcpStream> {
+ let socket = new_for_addr(addr)?;
+ #[cfg(unix)]
+ let stream = unsafe { TcpStream::from_raw_fd(socket) };
+ #[cfg(windows)]
+ let stream = unsafe { TcpStream::from_raw_socket(socket as _) };
+ connect(&stream.inner, addr)?;
+ Ok(stream)
+ }
+
+ /// Creates a new `TcpStream` from a standard `net::TcpStream`.
+ ///
+ /// This function is intended to be used to wrap a TCP stream from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying stream; it is left up to the user to set it in
+ /// non-blocking mode.
+ ///
+ /// # Note
+ ///
+ /// The TCP stream here will not have `connect` called on it, so it
+ /// should already be connected via some other means (be it manually, or
+ /// the standard library).
+ pub fn from_std(stream: net::TcpStream) -> TcpStream {
+ TcpStream {
+ inner: IoSource::new(stream),
+ }
+ }
+
+ /// Returns the socket address of the remote peer of this TCP connection.
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.peer_addr()
+ }
+
+ /// Returns the socket address of the local half of this TCP connection.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O on the specified
+ /// portions to return immediately with an appropriate value (see the
+ /// documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+
+ /// Sets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// If set, this option disables the Nagle algorithm. This means that
+ /// segments are always sent as soon as possible, even if there is only a
+ /// small amount of data. When not set, data is buffered until there is a
+ /// sufficient amount to send out, thereby avoiding the frequent sending of
+ /// small packets.
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to set `nodelay` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.inner.set_nodelay(nodelay)
+ }
+
+ /// Gets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// For more information about this option, see [`set_nodelay`][link].
+ ///
+ /// [link]: #method.set_nodelay
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to get `nodelay` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.inner.nodelay()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to set `ttl` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to get `ttl` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying recv system call.
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+}
+
+impl Read for TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl Write for TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl event::Source for TcpStream {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+#[cfg(unix)]
+impl IntoRawFd for TcpStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for TcpStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl FromRawFd for TcpStream {
+ /// Converts a `RawFd` to a `TcpStream`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
+ TcpStream::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
+
+#[cfg(windows)]
+impl IntoRawSocket for TcpStream {
+ fn into_raw_socket(self) -> RawSocket {
+ self.inner.into_inner().into_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl AsRawSocket for TcpStream {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.inner.as_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl FromRawSocket for TcpStream {
+ /// Converts a `RawSocket` to a `TcpStream`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_socket(socket: RawSocket) -> TcpStream {
+ TcpStream::from_std(FromRawSocket::from_raw_socket(socket))
+ }
+}
diff --git a/third_party/rust/mio/src/net/udp.rs b/third_party/rust/mio/src/net/udp.rs
new file mode 100644
index 0000000000..8cfe4e456c
--- /dev/null
+++ b/third_party/rust/mio/src/net/udp.rs
@@ -0,0 +1,635 @@
+//! Primitives for working with UDP.
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+
+use crate::io_source::IoSource;
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::fmt;
+use std::io;
+use std::net;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
+
+/// A User Datagram Protocol socket.
+///
+/// This is an implementation of a bound UDP socket. This supports both IPv4 and
+/// IPv6 addresses, and there is no corresponding notion of a server because UDP
+/// is a datagram protocol.
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// #
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// // An Echo program:
+/// // SENDER -> sends a message.
+/// // ECHOER -> listens and prints the message received.
+///
+/// use mio::net::UdpSocket;
+/// use mio::{Events, Interest, Poll, Token};
+/// use std::time::Duration;
+///
+/// const SENDER: Token = Token(0);
+/// const ECHOER: Token = Token(1);
+///
+/// // This operation will fail if the address is in use, so we select different ports for each
+/// // socket.
+/// let mut sender_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+/// let mut echoer_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+///
+/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from
+/// // respectively.
+/// sender_socket.connect(echoer_socket.local_addr()?)?;
+///
+/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be
+/// // read from.
+/// let mut poll = Poll::new()?;
+///
+/// // We register our sockets here so that we can check if they are ready to be written/read.
+/// poll.registry().register(&mut sender_socket, SENDER, Interest::WRITABLE)?;
+/// poll.registry().register(&mut echoer_socket, ECHOER, Interest::READABLE)?;
+///
+/// let msg_to_send = [9; 9];
+/// let mut buffer = [0; 9];
+///
+/// let mut events = Events::with_capacity(128);
+/// loop {
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+/// for event in events.iter() {
+/// match event.token() {
+/// // Our SENDER is ready to be written into.
+/// SENDER => {
+/// let bytes_sent = sender_socket.send(&msg_to_send)?;
+/// assert_eq!(bytes_sent, 9);
+/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent);
+/// },
+/// // Our ECHOER is ready to be read from.
+/// ECHOER => {
+/// let num_recv = echoer_socket.recv(&mut buffer)?;
+/// println!("echo {:?} -> {:?}", buffer, num_recv);
+/// buffer = [0; 9];
+/// # drop(buffer); // Silence unused assignment warning.
+/// # return Ok(());
+/// }
+/// _ => unreachable!()
+/// }
+/// }
+/// }
+/// # }
+/// ```
+pub struct UdpSocket {
+ inner: IoSource<net::UdpSocket>,
+}
+
+impl UdpSocket {
+ /// Creates a UDP socket from the given address.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// // We must bind it to an open address.
+ /// let socket = match UdpSocket::bind("127.0.0.1:0".parse()?) {
+ /// Ok(new_socket) => new_socket,
+ /// Err(fail) => {
+ /// // We panic! here, but you could try to bind it again on another address.
+ /// panic!("Failed to bind socket. {:?}", fail);
+ /// }
+ /// };
+ ///
+ /// // Our socket was created, but we should not use it before checking it's readiness.
+ /// # drop(socket); // Silence unused variable warning.
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn bind(addr: SocketAddr) -> io::Result<UdpSocket> {
+ sys::udp::bind(addr).map(UdpSocket::from_std)
+ }
+
+ /// Creates a new `UdpSocket` from a standard `net::UdpSocket`.
+ ///
+ /// This function is intended to be used to wrap a UDP socket from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying socket; it is left up to the user to set it in
+ /// non-blocking mode.
+ pub fn from_std(socket: net::UdpSocket) -> UdpSocket {
+ UdpSocket {
+ inner: IoSource::new(socket),
+ }
+ }
+
+ /// Returns the socket address that this socket was created from.
+ ///
+ /// # Examples
+ ///
+ // This assertion is almost, but not quite, universal. It fails on
+ // shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed,
+ // so simply disable the test on FreeBSD.
+ #[cfg_attr(all(feature = "os-poll", not(target_os = "freebsd")), doc = "```")]
+ #[cfg_attr(
+ any(not(feature = "os-poll"), target_os = "freebsd"),
+ doc = "```ignore"
+ )]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let addr = "127.0.0.1:0".parse()?;
+ /// let socket = UdpSocket::bind(addr)?;
+ /// assert_eq!(socket.local_addr()?.ip(), addr.ip());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ /// Returns the socket address of the remote peer this socket was connected to.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let addr = "127.0.0.1:0".parse()?;
+ /// let peer_addr = "127.0.0.1:11100".parse()?;
+ /// let socket = UdpSocket::bind(addr)?;
+ /// socket.connect(peer_addr)?;
+ /// assert_eq!(socket.peer_addr()?.ip(), peer_addr.ip());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.peer_addr()
+ }
+
+ /// Sends data on the socket to the given address. On success, returns the
+ /// number of bytes written.
+ ///
+ /// Address type can be any implementor of `ToSocketAddrs` trait. See its
+ /// documentation for concrete examples.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is writable before calling send_to,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let bytes_sent = socket.send_to(&[9; 9], "127.0.0.1:11100".parse()?)?;
+ /// assert_eq!(bytes_sent, 9);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send_to(buf, target))
+ }
+
+ /// Receives data from the socket. On success, returns the number of bytes
+ /// read and the address from whence the data came.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and recv_from returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is readable before calling recv_from,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let mut buf = [0; 9];
+ /// let (num_recv, from_addr) = socket.recv_from(&mut buf)?;
+ /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.inner.do_io(|inner| inner.recv_from(buf))
+ }
+
+ /// Receives data from the socket, without removing it from the input queue.
+ /// On success, returns the number of bytes read and the address from whence
+ /// the data came.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and peek_from returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is readable before calling recv_from,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let mut buf = [0; 9];
+ /// let (num_recv, from_addr) = socket.peek_from(&mut buf)?;
+ /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.inner.do_io(|inner| inner.peek_from(buf))
+ }
+
+ /// Sends data on the socket to the address previously bound via connect(). On success,
+ /// returns the number of bytes written.
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send(buf))
+ }
+
+ /// Receives data from the socket previously bound with connect(). On success, returns
+ /// the number of bytes read.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and recv returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.recv(buf))
+ }
+
+ /// Receives data from the socket, without removing it from the input queue.
+ /// On success, returns the number of bytes read.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and peek returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.peek(buf))
+ }
+
+ /// Connects the UDP socket setting the default destination for `send()`
+ /// and limiting packets that are read via `recv` from the address specified
+ /// in `addr`.
+ pub fn connect(&self, addr: SocketAddr) -> io::Result<()> {
+ self.inner.connect(addr)
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// if broadcast_socket.broadcast()? == false {
+ /// broadcast_socket.set_broadcast(true)?;
+ /// }
+ ///
+ /// assert_eq!(broadcast_socket.broadcast()?, true);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.inner.set_broadcast(on)
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_broadcast`][link].
+ ///
+ /// [link]: #method.set_broadcast
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// assert_eq!(broadcast_socket.broadcast()?, false);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.inner.broadcast()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.inner.set_multicast_loop_v4(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v4
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.inner.multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_multicast_ttl_v4(ttl)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_ttl_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_ttl_v4
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.inner.multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ /// Note that this may not have any affect on IPv4 sockets.
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.inner.set_multicast_loop_v6(on)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v6`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v6
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.inner.multicast_loop_v6()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// if socket.ttl()? < 255 {
+ /// socket.set_ttl(255)?;
+ /// }
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// socket.set_ttl(255)?;
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ self.inner.join_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.inner.join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v4`][link].
+ ///
+ /// [link]: #method.join_multicast_v4
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ self.inner.leave_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v6`][link].
+ ///
+ /// [link]: #method.join_multicast_v6
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.inner.leave_multicast_v6(multiaddr, interface)
+ }
+
+ /// Get the value of the `IPV6_V6ONLY` option on this socket.
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ sys::udp::only_v6(&self.inner)
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl event::Source for UdpSocket {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+#[cfg(unix)]
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl FromRawFd for UdpSocket {
+ /// Converts a `RawFd` to a `UdpSocket`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
+
+#[cfg(windows)]
+impl IntoRawSocket for UdpSocket {
+ fn into_raw_socket(self) -> RawSocket {
+ self.inner.into_inner().into_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl AsRawSocket for UdpSocket {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.inner.as_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl FromRawSocket for UdpSocket {
+ /// Converts a `RawSocket` to a `UdpSocket`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_socket(socket: RawSocket) -> UdpSocket {
+ UdpSocket::from_std(FromRawSocket::from_raw_socket(socket))
+ }
+}
diff --git a/third_party/rust/mio/src/net/uds/datagram.rs b/third_party/rust/mio/src/net/uds/datagram.rs
new file mode 100644
index 0000000000..0c8f5ffa6a
--- /dev/null
+++ b/third_party/rust/mio/src/net/uds/datagram.rs
@@ -0,0 +1,165 @@
+use crate::io_source::IoSource;
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::net::Shutdown;
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::os::unix::net;
+use std::path::Path;
+use std::{fmt, io};
+
+/// A Unix datagram socket.
+pub struct UnixDatagram {
+ inner: IoSource<net::UnixDatagram>,
+}
+
+impl UnixDatagram {
+ /// Creates a Unix datagram socket bound to the given path.
+ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> {
+ sys::uds::datagram::bind(path.as_ref()).map(UnixDatagram::from_std)
+ }
+
+ /// Creates a new `UnixDatagram` from a standard `net::UnixDatagram`.
+ ///
+ /// This function is intended to be used to wrap a Unix datagram from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying datagram; ; it is left up to the user to set it
+ /// in non-blocking mode.
+ pub fn from_std(socket: net::UnixDatagram) -> UnixDatagram {
+ UnixDatagram {
+ inner: IoSource::new(socket),
+ }
+ }
+
+ /// Connects the socket to the specified address.
+ pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+ self.inner.connect(path)
+ }
+
+ /// Creates a Unix Datagram socket which is not bound to any address.
+ pub fn unbound() -> io::Result<UnixDatagram> {
+ sys::uds::datagram::unbound().map(UnixDatagram::from_std)
+ }
+
+ /// Create an unnamed pair of connected sockets.
+ pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> {
+ sys::uds::datagram::pair().map(|(socket1, socket2)| {
+ (
+ UnixDatagram::from_std(socket1),
+ UnixDatagram::from_std(socket2),
+ )
+ })
+ }
+
+ /// Returns the address of this socket.
+ pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::datagram::local_addr(&self.inner)
+ }
+
+ /// Returns the address of this socket's peer.
+ ///
+ /// The `connect` method will connect the socket to a peer.
+ pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::datagram::peer_addr(&self.inner)
+ }
+
+ /// Receives data from the socket.
+ ///
+ /// On success, returns the number of bytes read and the address from
+ /// whence the data came.
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, sys::SocketAddr)> {
+ self.inner
+ .do_io(|inner| sys::uds::datagram::recv_from(inner, buf))
+ }
+
+ /// Receives data from the socket.
+ ///
+ /// On success, returns the number of bytes read.
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.recv(buf))
+ }
+
+ /// Sends data on the socket to the specified address.
+ ///
+ /// On success, returns the number of bytes written.
+ pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send_to(buf, path))
+ }
+
+ /// Sends data on the socket to the socket's peer.
+ ///
+ /// The peer address may be set by the `connect` method, and this method
+ /// will return an error if the socket has not already been connected.
+ ///
+ /// On success, returns the number of bytes written.
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send(buf))
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ /// Shut down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+}
+
+impl event::Source for UnixDatagram {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UnixDatagram {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+impl IntoRawFd for UnixDatagram {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixDatagram {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixDatagram {
+ /// Converts a `RawFd` to a `UnixDatagram`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram {
+ UnixDatagram::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
diff --git a/third_party/rust/mio/src/net/uds/listener.rs b/third_party/rust/mio/src/net/uds/listener.rs
new file mode 100644
index 0000000000..37e8106d89
--- /dev/null
+++ b/third_party/rust/mio/src/net/uds/listener.rs
@@ -0,0 +1,104 @@
+use crate::io_source::IoSource;
+use crate::net::{SocketAddr, UnixStream};
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::os::unix::net;
+use std::path::Path;
+use std::{fmt, io};
+
+/// A non-blocking Unix domain socket server.
+pub struct UnixListener {
+ inner: IoSource<net::UnixListener>,
+}
+
+impl UnixListener {
+ /// Creates a new `UnixListener` bound to the specified socket.
+ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
+ sys::uds::listener::bind(path.as_ref()).map(UnixListener::from_std)
+ }
+
+ /// Creates a new `UnixListener` from a standard `net::UnixListener`.
+ ///
+ /// This function is intended to be used to wrap a Unix listener from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying listener; it is left up to the user to set it in
+ /// non-blocking mode.
+ pub fn from_std(listener: net::UnixListener) -> UnixListener {
+ UnixListener {
+ inner: IoSource::new(listener),
+ }
+ }
+
+ /// Accepts a new incoming connection to this listener.
+ ///
+ /// The call is responsible for ensuring that the listening socket is in
+ /// non-blocking mode.
+ pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
+ sys::uds::listener::accept(&self.inner)
+ }
+
+ /// Returns the local socket address of this listener.
+ pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::listener::local_addr(&self.inner)
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl event::Source for UnixListener {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UnixListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+impl IntoRawFd for UnixListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixListener {
+ /// Converts a `RawFd` to a `UnixListener`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixListener {
+ UnixListener::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
diff --git a/third_party/rust/mio/src/net/uds/mod.rs b/third_party/rust/mio/src/net/uds/mod.rs
new file mode 100644
index 0000000000..6b4ffdc430
--- /dev/null
+++ b/third_party/rust/mio/src/net/uds/mod.rs
@@ -0,0 +1,10 @@
+mod datagram;
+pub use self::datagram::UnixDatagram;
+
+mod listener;
+pub use self::listener::UnixListener;
+
+mod stream;
+pub use self::stream::UnixStream;
+
+pub use crate::sys::SocketAddr;
diff --git a/third_party/rust/mio/src/net/uds/stream.rs b/third_party/rust/mio/src/net/uds/stream.rs
new file mode 100644
index 0000000000..f21d9e7ba5
--- /dev/null
+++ b/third_party/rust/mio/src/net/uds/stream.rs
@@ -0,0 +1,174 @@
+use crate::io_source::IoSource;
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::fmt;
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::net::Shutdown;
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::os::unix::net;
+use std::path::Path;
+
+/// A non-blocking Unix stream socket.
+pub struct UnixStream {
+ inner: IoSource<net::UnixStream>,
+}
+
+impl UnixStream {
+ /// Connects to the socket named by `path`.
+ pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> {
+ sys::uds::stream::connect(path.as_ref()).map(UnixStream::from_std)
+ }
+
+ /// Creates a new `UnixStream` from a standard `net::UnixStream`.
+ ///
+ /// This function is intended to be used to wrap a Unix stream from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying stream; it is left up to the user to set it in
+ /// non-blocking mode.
+ ///
+ /// # Note
+ ///
+ /// The Unix stream here will not have `connect` called on it, so it
+ /// should already be connected via some other means (be it manually, or
+ /// the standard library).
+ pub fn from_std(stream: net::UnixStream) -> UnixStream {
+ UnixStream {
+ inner: IoSource::new(stream),
+ }
+ }
+
+ /// Creates an unnamed pair of connected sockets.
+ ///
+ /// Returns two `UnixStream`s which are connected to each other.
+ pub fn pair() -> io::Result<(UnixStream, UnixStream)> {
+ sys::uds::stream::pair().map(|(stream1, stream2)| {
+ (UnixStream::from_std(stream1), UnixStream::from_std(stream2))
+ })
+ }
+
+ /// Returns the socket address of the local half of this connection.
+ pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::stream::local_addr(&self.inner)
+ }
+
+ /// Returns the socket address of the remote half of this connection.
+ pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::stream::peer_addr(&self.inner)
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+}
+
+impl Read for UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl<'a> Read for &'a UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl Write for UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl<'a> Write for &'a UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl event::Source for UnixStream {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UnixStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+impl IntoRawFd for UnixStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixStream {
+ /// Converts a `RawFd` to a `UnixStream`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixStream {
+ UnixStream::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
diff --git a/third_party/rust/mio/src/poll.rs b/third_party/rust/mio/src/poll.rs
new file mode 100644
index 0000000000..fd643fdd00
--- /dev/null
+++ b/third_party/rust/mio/src/poll.rs
@@ -0,0 +1,682 @@
+use crate::{event, sys, Events, Interest, Token};
+use log::trace;
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::time::Duration;
+use std::{fmt, io};
+
+/// Polls for readiness events on all registered values.
+///
+/// `Poll` allows a program to monitor a large number of [`event::Source`]s,
+/// waiting until one or more become "ready" for some class of operations; e.g.
+/// reading and writing. An event source is considered ready if it is possible
+/// to immediately perform a corresponding operation; e.g. [`read`] or
+/// [`write`].
+///
+/// To use `Poll`, an `event::Source` must first be registered with the `Poll`
+/// instance using the [`register`] method on its associated `Register`,
+/// supplying readiness interest. The readiness interest tells `Poll` which
+/// specific operations on the handle to monitor for readiness. A `Token` is
+/// also passed to the [`register`] function. When `Poll` returns a readiness
+/// event, it will include this token. This associates the event with the
+/// event source that generated the event.
+///
+/// [`event::Source`]: ./event/trait.Source.html
+/// [`read`]: ./net/struct.TcpStream.html#method.read
+/// [`write`]: ./net/struct.TcpStream.html#method.write
+/// [`register`]: struct.Registry.html#method.register
+///
+/// # Examples
+///
+/// A basic example -- establishing a `TcpStream` connection.
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Poll, Interest, Token};
+/// use mio::net::TcpStream;
+///
+/// use std::net::{self, SocketAddr};
+///
+/// // Bind a server socket to connect to.
+/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+/// let server = net::TcpListener::bind(addr)?;
+///
+/// // Construct a new `Poll` handle as well as the `Events` we'll store into
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Connect the stream
+/// let mut stream = TcpStream::connect(server.local_addr()?)?;
+///
+/// // Register the stream with `Poll`
+/// poll.registry().register(&mut stream, Token(0), Interest::READABLE | Interest::WRITABLE)?;
+///
+/// // Wait for the socket to become ready. This has to happens in a loop to
+/// // handle spurious wakeups.
+/// loop {
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// if event.token() == Token(0) && event.is_writable() {
+/// // The socket connected (probably, it could still be a spurious
+/// // wakeup)
+/// return Ok(());
+/// }
+/// }
+/// }
+/// # }
+/// ```
+///
+/// # Portability
+///
+/// Using `Poll` provides a portable interface across supported platforms as
+/// long as the caller takes the following into consideration:
+///
+/// ### Spurious events
+///
+/// [`Poll::poll`] may return readiness events even if the associated
+/// event source is not actually ready. Given the same code, this may
+/// happen more on some platforms than others. It is important to never assume
+/// that, just because a readiness event was received, that the associated
+/// operation will succeed as well.
+///
+/// If operation fails with [`WouldBlock`], then the caller should not treat
+/// this as an error, but instead should wait until another readiness event is
+/// received.
+///
+/// ### Draining readiness
+///
+/// Once a readiness event is received, the corresponding operation must be
+/// performed repeatedly until it returns [`WouldBlock`]. Unless this is done,
+/// there is no guarantee that another readiness event will be delivered, even
+/// if further data is received for the event source.
+///
+/// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
+///
+/// ### Readiness operations
+///
+/// The only readiness operations that are guaranteed to be present on all
+/// supported platforms are [`readable`] and [`writable`]. All other readiness
+/// operations may have false negatives and as such should be considered
+/// **hints**. This means that if a socket is registered with [`readable`]
+/// interest and either an error or close is received, a readiness event will
+/// be generated for the socket, but it **may** only include `readable`
+/// readiness. Also note that, given the potential for spurious events,
+/// receiving a readiness event with `read_closed`, `write_closed`, or `error`
+/// doesn't actually mean that a `read` on the socket will return a result
+/// matching the readiness event.
+///
+/// In other words, portable programs that explicitly check for [`read_closed`],
+/// [`write_closed`], or [`error`] readiness should be doing so as an
+/// **optimization** and always be able to handle an error or close situation
+/// when performing the actual read operation.
+///
+/// [`readable`]: ./event/struct.Event.html#method.is_readable
+/// [`writable`]: ./event/struct.Event.html#method.is_writable
+/// [`error`]: ./event/struct.Event.html#method.is_error
+/// [`read_closed`]: ./event/struct.Event.html#method.is_read_closed
+/// [`write_closed`]: ./event/struct.Event.html#method.is_write_closed
+///
+/// ### Registering handles
+///
+/// Unless otherwise noted, it should be assumed that types implementing
+/// [`event::Source`] will never become ready unless they are registered with
+/// `Poll`.
+///
+/// For example:
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// # use std::error::Error;
+/// # use std::net;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Poll, Interest, Token};
+/// use mio::net::TcpStream;
+/// use std::net::SocketAddr;
+/// use std::time::Duration;
+/// use std::thread;
+///
+/// let address: SocketAddr = "127.0.0.1:0".parse()?;
+/// let listener = net::TcpListener::bind(address)?;
+/// let mut sock = TcpStream::connect(listener.local_addr()?)?;
+///
+/// thread::sleep(Duration::from_secs(1));
+///
+/// let poll = Poll::new()?;
+///
+/// // The connect is not guaranteed to have started until it is registered at
+/// // this point
+/// poll.registry().register(&mut sock, Token(0), Interest::READABLE | Interest::WRITABLE)?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// ### Dropping `Poll`
+///
+/// When the `Poll` instance is dropped it may cancel in-flight operations for
+/// the registered [event sources], meaning that no further events for them may
+/// be received. It also means operations on the registered event sources may no
+/// longer work. It is up to the user to keep the `Poll` instance alive while
+/// registered event sources are being used.
+///
+/// [event sources]: ./event/trait.Source.html
+///
+/// ### Accessing raw fd/socket/handle
+///
+/// Mio makes it possible for many types to be converted into a raw file
+/// descriptor (fd, Unix), socket (Windows) or handle (Windows). This makes it
+/// possible to support more operations on the type than Mio supports, for
+/// example it makes [mio-aio] possible. However accessing the raw fd is not
+/// without it's pitfalls.
+///
+/// Specifically performing I/O operations outside of Mio on these types (via
+/// the raw fd) has unspecified behaviour. It could cause no more events to be
+/// generated for the type even though it returned `WouldBlock` (in an operation
+/// directly accessing the fd). The behaviour is OS specific and Mio can only
+/// guarantee cross-platform behaviour if it can control the I/O.
+///
+/// [mio-aio]: https://github.com/asomers/mio-aio
+///
+/// *The following is **not** guaranteed, just a description of the current
+/// situation!* Mio is allowed to change the following without it being considered
+/// a breaking change, don't depend on this, it's just here to inform the user.
+/// Currently the kqueue and epoll implementation support direct I/O operations
+/// on the fd without Mio's knowledge. Windows however needs **all** I/O
+/// operations to go through Mio otherwise it is not able to update it's
+/// internal state properly and won't generate events.
+///
+/// # Implementation notes
+///
+/// `Poll` is backed by the selector provided by the operating system.
+///
+/// | OS | Selector |
+/// |---------------|-----------|
+/// | Android | [epoll] |
+/// | DragonFly BSD | [kqueue] |
+/// | FreeBSD | [kqueue] |
+/// | iOS | [kqueue] |
+/// | illumos | [epoll] |
+/// | Linux | [epoll] |
+/// | NetBSD | [kqueue] |
+/// | OpenBSD | [kqueue] |
+/// | Windows | [IOCP] |
+/// | macOS | [kqueue] |
+///
+/// On all supported platforms, socket operations are handled by using the
+/// system selector. Platform specific extensions (e.g. [`SourceFd`]) allow
+/// accessing other features provided by individual system selectors. For
+/// example, Linux's [`signalfd`] feature can be used by registering the FD with
+/// `Poll` via [`SourceFd`].
+///
+/// On all platforms except windows, a call to [`Poll::poll`] is mostly just a
+/// direct call to the system selector. However, [IOCP] uses a completion model
+/// instead of a readiness model. In this case, `Poll` must adapt the completion
+/// model Mio's API. While non-trivial, the bridge layer is still quite
+/// efficient. The most expensive part being calls to `read` and `write` require
+/// data to be copied into an intermediate buffer before it is passed to the
+/// kernel.
+///
+/// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+/// [IOCP]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx
+/// [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
+/// [`SourceFd`]: unix/struct.SourceFd.html
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+pub struct Poll {
+ registry: Registry,
+}
+
+/// Registers I/O resources.
+pub struct Registry {
+ selector: sys::Selector,
+}
+
+impl Poll {
+ /// Create a separate `Registry` which can be used to register
+ /// `event::Source`s.
+ pub fn registry(&self) -> &Registry {
+ &self.registry
+ }
+
+ /// Wait for readiness events
+ ///
+ /// Blocks the current thread and waits for readiness events for any of the
+ /// [`event::Source`]s that have been registered with this `Poll` instance.
+ /// The function will block until either at least one readiness event has
+ /// been received or `timeout` has elapsed. A `timeout` of `None` means that
+ /// `poll` will block until a readiness event has been received.
+ ///
+ /// The supplied `events` will be cleared and newly received readiness events
+ /// will be pushed onto the end. At most `events.capacity()` events will be
+ /// returned. If there are further pending readiness events, they will be
+ /// returned on the next call to `poll`.
+ ///
+ /// A single call to `poll` may result in multiple readiness events being
+ /// returned for a single event source. For example, if a TCP socket becomes
+ /// both readable and writable, it may be possible for a single readiness
+ /// event to be returned with both [`readable`] and [`writable`] readiness
+ /// **OR** two separate events may be returned, one with [`readable`] set
+ /// and one with [`writable`] set.
+ ///
+ /// Note that the `timeout` will be rounded up to the system clock
+ /// granularity (usually 1ms), and kernel scheduling delays mean that
+ /// the blocking interval may be overrun by a small amount.
+ ///
+ /// See the [struct] level documentation for a higher level discussion of
+ /// polling.
+ ///
+ /// [`event::Source`]: ./event/trait.Source.html
+ /// [`readable`]: struct.Interest.html#associatedconstant.READABLE
+ /// [`writable`]: struct.Interest.html#associatedconstant.WRITABLE
+ /// [struct]: struct.Poll.html
+ /// [`iter`]: ./event/struct.Events.html#method.iter
+ ///
+ /// # Notes
+ ///
+ /// This returns any errors without attempting to retry, previous versions
+ /// of Mio would automatically retry the poll call if it was interrupted
+ /// (if `EINTR` was returned).
+ ///
+ /// # Examples
+ ///
+ /// A basic example -- establishing a `TcpStream` connection.
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ ///
+ /// use std::net::{TcpListener, SocketAddr};
+ /// use std::thread;
+ ///
+ /// // Bind a server socket to connect to.
+ /// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let server = TcpListener::bind(addr)?;
+ /// let addr = server.local_addr()?.clone();
+ ///
+ /// // Spawn a thread to accept the socket
+ /// thread::spawn(move || {
+ /// let _ = server.accept();
+ /// });
+ ///
+ /// // Construct a new `Poll` handle as well as the `Events` we'll store into
+ /// let mut poll = Poll::new()?;
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Connect the stream
+ /// let mut stream = TcpStream::connect(addr)?;
+ ///
+ /// // Register the stream with `Poll`
+ /// poll.registry().register(
+ /// &mut stream,
+ /// Token(0),
+ /// Interest::READABLE | Interest::WRITABLE)?;
+ ///
+ /// // Wait for the socket to become ready. This has to happens in a loop to
+ /// // handle spurious wakeups.
+ /// loop {
+ /// poll.poll(&mut events, None)?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) && event.is_writable() {
+ /// // The socket connected (probably, it could still be a spurious
+ /// // wakeup)
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// [struct]: #
+ pub fn poll(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ self.registry.selector.select(events.sys(), timeout)
+ }
+}
+
+cfg_os_poll! {
+ impl Poll {
+ /// Return a new `Poll` handle.
+ ///
+ /// This function will make a syscall to the operating system to create
+ /// the system selector. If this syscall fails, `Poll::new` will return
+ /// with the error.
+ ///
+ /// See [struct] level docs for more details.
+ ///
+ /// [struct]: struct.Poll.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Poll, Events};
+ /// use std::time::Duration;
+ ///
+ /// let mut poll = match Poll::new() {
+ /// Ok(poll) => poll,
+ /// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
+ /// };
+ ///
+ /// // Create a structure to receive polled events
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Wait for events, but none will be received because no
+ /// // `event::Source`s have been registered with this `Poll` instance.
+ /// poll.poll(&mut events, Some(Duration::from_millis(500)))?;
+ /// assert!(events.is_empty());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn new() -> io::Result<Poll> {
+ sys::Selector::new().map(|selector| Poll {
+ registry: Registry { selector },
+ })
+ }
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for Poll {
+ fn as_raw_fd(&self) -> RawFd {
+ self.registry.as_raw_fd()
+ }
+}
+
+impl fmt::Debug for Poll {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Poll").finish()
+ }
+}
+
+impl Registry {
+ /// Register an [`event::Source`] with the `Poll` instance.
+ ///
+ /// Once registered, the `Poll` instance will monitor the event source for
+ /// readiness state changes. When it notices a state change, it will return
+ /// a readiness event for the handle the next time [`poll`] is called.
+ ///
+ /// See [`Poll`] docs for a high level overview.
+ ///
+ /// # Arguments
+ ///
+ /// `source: &mut S: event::Source`: This is the source of events that the
+ /// `Poll` instance should monitor for readiness state changes.
+ ///
+ /// `token: Token`: The caller picks a token to associate with the socket.
+ /// When [`poll`] returns an event for the handle, this token is included.
+ /// This allows the caller to map the event to its source. The token
+ /// associated with the `event::Source` can be changed at any time by
+ /// calling [`reregister`].
+ ///
+ /// See documentation on [`Token`] for an example showing how to pick
+ /// [`Token`] values.
+ ///
+ /// `interest: Interest`: Specifies which operations `Poll` should monitor
+ /// for readiness. `Poll` will only return readiness events for operations
+ /// specified by this argument.
+ ///
+ /// If a socket is registered with readable interest and the socket becomes
+ /// writable, no event will be returned from [`poll`].
+ ///
+ /// The readiness interest for an `event::Source` can be changed at any time
+ /// by calling [`reregister`].
+ ///
+ /// # Notes
+ ///
+ /// Callers must ensure that if a source being registered with a `Poll`
+ /// instance was previously registered with that `Poll` instance, then a
+ /// call to [`deregister`] has already occurred. Consecutive calls to
+ /// `register` is unspecified behavior.
+ ///
+ /// Unless otherwise specified, the caller should assume that once an event
+ /// source is registered with a `Poll` instance, it is bound to that `Poll`
+ /// instance for the lifetime of the event source. This remains true even
+ /// if the event source is deregistered from the poll instance using
+ /// [`deregister`].
+ ///
+ /// [`event::Source`]: ./event/trait.Source.html
+ /// [`poll`]: struct.Poll.html#method.poll
+ /// [`reregister`]: struct.Registry.html#method.reregister
+ /// [`deregister`]: struct.Registry.html#method.deregister
+ /// [`Token`]: struct.Token.html
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # use std::net;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ /// use std::net::SocketAddr;
+ /// use std::time::{Duration, Instant};
+ ///
+ /// let mut poll = Poll::new()?;
+ ///
+ /// let address: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let listener = net::TcpListener::bind(address)?;
+ /// let mut socket = TcpStream::connect(listener.local_addr()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.registry().register(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::READABLE | Interest::WRITABLE)?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let start = Instant::now();
+ /// let timeout = Duration::from_millis(500);
+ ///
+ /// loop {
+ /// let elapsed = start.elapsed();
+ ///
+ /// if elapsed >= timeout {
+ /// // Connection timed out
+ /// return Ok(());
+ /// }
+ ///
+ /// let remaining = timeout - elapsed;
+ /// poll.poll(&mut events, Some(remaining))?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) {
+ /// // Something (probably) happened on the socket.
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ pub fn register<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()>
+ where
+ S: event::Source + ?Sized,
+ {
+ trace!(
+ "registering event source with poller: token={:?}, interests={:?}",
+ token,
+ interests
+ );
+ source.register(self, token, interests)
+ }
+
+ /// Re-register an [`event::Source`] with the `Poll` instance.
+ ///
+ /// Re-registering an event source allows changing the details of the
+ /// registration. Specifically, it allows updating the associated `token`
+ /// and `interests` specified in previous `register` and `reregister` calls.
+ ///
+ /// The `reregister` arguments fully override the previous values. In other
+ /// words, if a socket is registered with [`readable`] interest and the call
+ /// to `reregister` specifies [`writable`], then read interest is no longer
+ /// requested for the handle.
+ ///
+ /// The event source must have previously been registered with this instance
+ /// of `Poll`, otherwise the behavior is unspecified.
+ ///
+ /// See the [`register`] documentation for details about the function
+ /// arguments and see the [`struct`] docs for a high level overview of
+ /// polling.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # use std::net;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ /// use std::net::SocketAddr;
+ ///
+ /// let poll = Poll::new()?;
+ ///
+ /// let address: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let listener = net::TcpListener::bind(address)?;
+ /// let mut socket = TcpStream::connect(listener.local_addr()?)?;
+ ///
+ /// // Register the socket with `poll`, requesting readable
+ /// poll.registry().register(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::READABLE)?;
+ ///
+ /// // Reregister the socket specifying write interest instead. Even though
+ /// // the token is the same it must be specified.
+ /// poll.registry().reregister(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::WRITABLE)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ ///
+ /// [`event::Source`]: ./event/trait.Source.html
+ /// [`struct`]: struct.Poll.html
+ /// [`register`]: struct.Registry.html#method.register
+ /// [`readable`]: ./event/struct.Event.html#is_readable
+ /// [`writable`]: ./event/struct.Event.html#is_writable
+ pub fn reregister<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()>
+ where
+ S: event::Source + ?Sized,
+ {
+ trace!(
+ "reregistering event source with poller: token={:?}, interests={:?}",
+ token,
+ interests
+ );
+ source.reregister(self, token, interests)
+ }
+
+ /// Deregister an [`event::Source`] with the `Poll` instance.
+ ///
+ /// When an event source is deregistered, the `Poll` instance will no longer
+ /// monitor it for readiness state changes. Deregistering clears up any
+ /// internal resources needed to track the handle. After an explicit call
+ /// to this method completes, it is guaranteed that the token previously
+ /// registered to this handle will not be returned by a future poll, so long
+ /// as a happens-before relationship is established between this call and
+ /// the poll.
+ ///
+ /// The event source must have previously been registered with this instance
+ /// of `Poll`, otherwise the behavior is unspecified.
+ ///
+ /// A handle can be passed back to `register` after it has been
+ /// deregistered; however, it must be passed back to the **same** `Poll`
+ /// instance, otherwise the behavior is unspecified.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # use std::net;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ /// use std::net::SocketAddr;
+ /// use std::time::Duration;
+ ///
+ /// let mut poll = Poll::new()?;
+ ///
+ /// let address: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let listener = net::TcpListener::bind(address)?;
+ /// let mut socket = TcpStream::connect(listener.local_addr()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.registry().register(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::READABLE)?;
+ ///
+ /// poll.registry().deregister(&mut socket)?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Set a timeout because this poll should never receive any events.
+ /// poll.poll(&mut events, Some(Duration::from_secs(1)))?;
+ /// assert!(events.is_empty());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn deregister<S>(&self, source: &mut S) -> io::Result<()>
+ where
+ S: event::Source + ?Sized,
+ {
+ trace!("deregistering event source from poller");
+ source.deregister(self)
+ }
+
+ /// Creates a new independently owned `Registry`.
+ ///
+ /// Event sources registered with this `Registry` will be registered with
+ /// the original `Registry` and `Poll` instance.
+ pub fn try_clone(&self) -> io::Result<Registry> {
+ self.selector
+ .try_clone()
+ .map(|selector| Registry { selector })
+ }
+
+ /// Internal check to ensure only a single `Waker` is active per [`Poll`]
+ /// instance.
+ #[cfg(debug_assertions)]
+ pub(crate) fn register_waker(&self) {
+ assert!(
+ !self.selector.register_waker(),
+ "Only a single `Waker` can be active per `Poll` instance"
+ );
+ }
+
+ /// Get access to the `sys::Selector`.
+ pub(crate) fn selector(&self) -> &sys::Selector {
+ &self.selector
+ }
+}
+
+impl fmt::Debug for Registry {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Registry").finish()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for Registry {
+ fn as_raw_fd(&self) -> RawFd {
+ self.selector.as_raw_fd()
+ }
+}
+
+cfg_os_poll! {
+ #[cfg(unix)]
+ #[test]
+ pub fn as_raw_fd() {
+ let poll = Poll::new().unwrap();
+ assert!(poll.as_raw_fd() > 0);
+ }
+}
diff --git a/third_party/rust/mio/src/sys/mod.rs b/third_party/rust/mio/src/sys/mod.rs
new file mode 100644
index 0000000000..81ae6d2e61
--- /dev/null
+++ b/third_party/rust/mio/src/sys/mod.rs
@@ -0,0 +1,80 @@
+//! Module with system specific types.
+//!
+//! Required types:
+//!
+//! * `Event`: a type alias for the system specific event, e.g. `kevent` or
+//! `epoll_event`.
+//! * `event`: a module with various helper functions for `Event`, see
+//! [`crate::event::Event`] for the required functions.
+//! * `Events`: collection of `Event`s, see [`crate::Events`].
+//! * `IoSourceState`: state for the `IoSource` type.
+//! * `Selector`: selector used to register event sources and poll for events,
+//! see [`crate::Poll`] and [`crate::Registry`] for required
+//! methods.
+//! * `tcp` and `udp` modules: see the [`crate::net`] module.
+//! * `Waker`: see [`crate::Waker`].
+
+cfg_os_poll! {
+ macro_rules! debug_detail {
+ (
+ $type: ident ($event_type: ty), $test: path,
+ $($(#[$target: meta])* $libc: ident :: $flag: ident),+ $(,)*
+ ) => {
+ struct $type($event_type);
+
+ impl fmt::Debug for $type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut written_one = false;
+ $(
+ $(#[$target])*
+ #[allow(clippy::bad_bit_mask)] // Apparently some flags are zero.
+ {
+ // Windows doesn't use `libc` but the `afd` module.
+ if $test(&self.0, &$libc :: $flag) {
+ if !written_one {
+ write!(f, "{}", stringify!($flag))?;
+ written_one = true;
+ } else {
+ write!(f, "|{}", stringify!($flag))?;
+ }
+ }
+ }
+ )+
+ if !written_one {
+ write!(f, "(empty)")
+ } else {
+ Ok(())
+ }
+ }
+ }
+ };
+ }
+}
+
+#[cfg(unix)]
+cfg_os_poll! {
+ mod unix;
+ pub use self::unix::*;
+}
+
+#[cfg(windows)]
+cfg_os_poll! {
+ mod windows;
+ pub use self::windows::*;
+}
+
+cfg_not_os_poll! {
+ mod shell;
+ pub(crate) use self::shell::*;
+
+ #[cfg(unix)]
+ cfg_any_os_ext! {
+ mod unix;
+ pub use self::unix::SourceFd;
+ }
+
+ #[cfg(unix)]
+ cfg_net! {
+ pub use self::unix::SocketAddr;
+ }
+}
diff --git a/third_party/rust/mio/src/sys/shell/mod.rs b/third_party/rust/mio/src/sys/shell/mod.rs
new file mode 100644
index 0000000000..7e1533f452
--- /dev/null
+++ b/third_party/rust/mio/src/sys/shell/mod.rs
@@ -0,0 +1,70 @@
+macro_rules! os_required {
+ () => {
+ panic!("mio must be compiled with `os-poll` to run.")
+ };
+}
+
+mod selector;
+pub(crate) use self::selector::{event, Event, Events, Selector};
+
+mod waker;
+pub(crate) use self::waker::Waker;
+
+cfg_net! {
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+ #[cfg(unix)]
+ pub(crate) mod uds;
+}
+
+cfg_io_source! {
+ use std::io;
+ #[cfg(windows)]
+ use std::os::windows::io::RawSocket;
+
+ #[cfg(windows)]
+ use crate::{Registry, Token, Interest};
+
+ pub(crate) struct IoSourceState;
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ // We don't hold state, so we can just call the function and
+ // return.
+ f(io)
+ }
+ }
+
+ #[cfg(windows)]
+ impl IoSourceState {
+ pub fn register(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ _: RawSocket,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn reregister(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn deregister(&mut self) -> io::Result<()> {
+ os_required!()
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/shell/selector.rs b/third_party/rust/mio/src/sys/shell/selector.rs
new file mode 100644
index 0000000000..91fc0bf47c
--- /dev/null
+++ b/third_party/rust/mio/src/sys/shell/selector.rs
@@ -0,0 +1,108 @@
+use std::io;
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::time::Duration;
+
+pub type Event = usize;
+
+pub type Events = Vec<Event>;
+
+#[derive(Debug)]
+pub struct Selector {}
+
+impl Selector {
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ os_required!();
+ }
+
+ pub fn select(&self, _: &mut Events, _: Option<Duration>) -> io::Result<()> {
+ os_required!();
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ os_required!();
+ }
+}
+
+#[cfg(unix)]
+cfg_any_os_ext! {
+ use crate::{Interest, Token};
+
+ impl Selector {
+ pub fn register(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn reregister(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn deregister(&self, _: RawFd) -> io::Result<()> {
+ os_required!();
+ }
+ }
+}
+
+cfg_io_source! {
+ #[cfg(debug_assertions)]
+ impl Selector {
+ pub fn id(&self) -> usize {
+ os_required!();
+ }
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ os_required!()
+ }
+}
+
+#[allow(clippy::trivially_copy_pass_by_ref)]
+pub mod event {
+ use crate::sys::Event;
+ use crate::Token;
+ use std::fmt;
+
+ pub fn token(_: &Event) -> Token {
+ os_required!();
+ }
+
+ pub fn is_readable(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_writable(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_error(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_read_closed(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_write_closed(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_priority(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_aio(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_lio(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn debug_details(_: &mut fmt::Formatter<'_>, _: &Event) -> fmt::Result {
+ os_required!();
+ }
+}
diff --git a/third_party/rust/mio/src/sys/shell/tcp.rs b/third_party/rust/mio/src/sys/shell/tcp.rs
new file mode 100644
index 0000000000..60dfe70f67
--- /dev/null
+++ b/third_party/rust/mio/src/sys/shell/tcp.rs
@@ -0,0 +1,27 @@
+use std::io;
+use std::net::{self, SocketAddr};
+
+pub(crate) fn new_for_addr(_: SocketAddr) -> io::Result<i32> {
+ os_required!();
+}
+
+pub(crate) fn bind(_: &net::TcpListener, _: SocketAddr) -> io::Result<()> {
+ os_required!();
+}
+
+pub(crate) fn connect(_: &net::TcpStream, _: SocketAddr) -> io::Result<()> {
+ os_required!();
+}
+
+pub(crate) fn listen(_: &net::TcpListener, _: u32) -> io::Result<()> {
+ os_required!();
+}
+
+#[cfg(unix)]
+pub(crate) fn set_reuseaddr(_: &net::TcpListener, _: bool) -> io::Result<()> {
+ os_required!();
+}
+
+pub(crate) fn accept(_: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ os_required!();
+}
diff --git a/third_party/rust/mio/src/sys/shell/udp.rs b/third_party/rust/mio/src/sys/shell/udp.rs
new file mode 100644
index 0000000000..48ccac740f
--- /dev/null
+++ b/third_party/rust/mio/src/sys/shell/udp.rs
@@ -0,0 +1,10 @@
+use std::io;
+use std::net::{self, SocketAddr};
+
+pub fn bind(_: SocketAddr) -> io::Result<net::UdpSocket> {
+ os_required!()
+}
+
+pub(crate) fn only_v6(_: &net::UdpSocket) -> io::Result<bool> {
+ os_required!()
+}
diff --git a/third_party/rust/mio/src/sys/shell/uds.rs b/third_party/rust/mio/src/sys/shell/uds.rs
new file mode 100644
index 0000000000..c18aca042f
--- /dev/null
+++ b/third_party/rust/mio/src/sys/shell/uds.rs
@@ -0,0 +1,75 @@
+pub(crate) mod datagram {
+ use crate::net::SocketAddr;
+ use std::io;
+ use std::os::unix::net;
+ use std::path::Path;
+
+ pub(crate) fn bind(_: &Path) -> io::Result<net::UnixDatagram> {
+ os_required!()
+ }
+
+ pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
+ os_required!()
+ }
+
+ pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
+ os_required!()
+ }
+
+ pub(crate) fn local_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+
+ pub(crate) fn peer_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+
+ pub(crate) fn recv_from(
+ _: &net::UnixDatagram,
+ _: &mut [u8],
+ ) -> io::Result<(usize, SocketAddr)> {
+ os_required!()
+ }
+}
+
+pub(crate) mod listener {
+ use crate::net::{SocketAddr, UnixStream};
+ use std::io;
+ use std::os::unix::net;
+ use std::path::Path;
+
+ pub(crate) fn bind(_: &Path) -> io::Result<net::UnixListener> {
+ os_required!()
+ }
+
+ pub(crate) fn accept(_: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
+ os_required!()
+ }
+
+ pub(crate) fn local_addr(_: &net::UnixListener) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+}
+
+pub(crate) mod stream {
+ use crate::net::SocketAddr;
+ use std::io;
+ use std::os::unix::net;
+ use std::path::Path;
+
+ pub(crate) fn connect(_: &Path) -> io::Result<net::UnixStream> {
+ os_required!()
+ }
+
+ pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
+ os_required!()
+ }
+
+ pub(crate) fn local_addr(_: &net::UnixStream) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+
+ pub(crate) fn peer_addr(_: &net::UnixStream) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+}
diff --git a/third_party/rust/mio/src/sys/shell/waker.rs b/third_party/rust/mio/src/sys/shell/waker.rs
new file mode 100644
index 0000000000..bbdd7c33af
--- /dev/null
+++ b/third_party/rust/mio/src/sys/shell/waker.rs
@@ -0,0 +1,16 @@
+use crate::sys::Selector;
+use crate::Token;
+use std::io;
+
+#[derive(Debug)]
+pub struct Waker {}
+
+impl Waker {
+ pub fn new(_: &Selector, _: Token) -> io::Result<Waker> {
+ os_required!();
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ os_required!();
+ }
+}
diff --git a/third_party/rust/mio/src/sys/unix/mod.rs b/third_party/rust/mio/src/sys/unix/mod.rs
new file mode 100644
index 0000000000..231480a5da
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/mod.rs
@@ -0,0 +1,72 @@
+/// Helper macro to execute a system call that returns an `io::Result`.
+//
+// Macro must be defined before any modules that uses them.
+#[allow(unused_macros)]
+macro_rules! syscall {
+ ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{
+ let res = unsafe { libc::$fn($($arg, )*) };
+ if res == -1 {
+ Err(std::io::Error::last_os_error())
+ } else {
+ Ok(res)
+ }
+ }};
+}
+
+cfg_os_poll! {
+ mod selector;
+ pub(crate) use self::selector::{event, Event, Events, Selector};
+
+ mod sourcefd;
+ pub use self::sourcefd::SourceFd;
+
+ mod waker;
+ pub(crate) use self::waker::Waker;
+
+ cfg_net! {
+ mod net;
+
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+ pub(crate) mod uds;
+ pub use self::uds::SocketAddr;
+ }
+
+ cfg_io_source! {
+ use std::io;
+
+ // Both `kqueue` and `epoll` don't need to hold any user space state.
+ pub(crate) struct IoSourceState;
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ // We don't hold state, so we can just call the function and
+ // return.
+ f(io)
+ }
+ }
+ }
+
+ cfg_os_ext! {
+ pub(crate) mod pipe;
+ }
+}
+
+cfg_not_os_poll! {
+ cfg_net! {
+ mod uds;
+ pub use self::uds::SocketAddr;
+ }
+
+ cfg_any_os_ext! {
+ mod sourcefd;
+ pub use self::sourcefd::SourceFd;
+ }
+}
diff --git a/third_party/rust/mio/src/sys/unix/net.rs b/third_party/rust/mio/src/sys/unix/net.rs
new file mode 100644
index 0000000000..78f1387b1f
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/net.rs
@@ -0,0 +1,168 @@
+use std::io;
+use std::mem::size_of;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
+
+pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: libc::c_int) -> io::Result<libc::c_int> {
+ let domain = match addr {
+ SocketAddr::V4(..) => libc::AF_INET,
+ SocketAddr::V6(..) => libc::AF_INET6,
+ };
+
+ new_socket(domain, socket_type)
+}
+
+/// Create a new non-blocking socket.
+pub(crate) fn new_socket(domain: libc::c_int, socket_type: libc::c_int) -> io::Result<libc::c_int> {
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ let socket_type = socket_type | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+
+ // Gives a warning for platforms without SOCK_NONBLOCK.
+ #[allow(clippy::let_and_return)]
+ let socket = syscall!(socket(domain, socket_type, 0));
+
+ // Mimick `libstd` and set `SO_NOSIGPIPE` on apple systems.
+ #[cfg(target_vendor = "apple")]
+ let socket = socket.and_then(|socket| {
+ syscall!(setsockopt(
+ socket,
+ libc::SOL_SOCKET,
+ libc::SO_NOSIGPIPE,
+ &1 as *const libc::c_int as *const libc::c_void,
+ size_of::<libc::c_int>() as libc::socklen_t
+ ))
+ .map(|_| socket)
+ });
+
+ // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ let socket = socket.and_then(|socket| {
+ // For platforms that don't support flags in socket, we need to
+ // set the flags ourselves.
+ syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))
+ .and_then(|_| syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC)).map(|_| socket))
+ .map_err(|e| {
+ // If either of the `fcntl` calls failed, ensure the socket is
+ // closed and return the error.
+ let _ = syscall!(close(socket));
+ e
+ })
+ });
+
+ socket
+}
+
+/// A type with the same memory layout as `libc::sockaddr`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `libc::sockaddr_storage` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: libc::sockaddr_in,
+ v6: libc::sockaddr_in6,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const libc::sockaddr {
+ self as *const _ as *const libc::sockaddr
+ }
+}
+
+/// Converts a Rust `SocketAddr` into the system representation.
+pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, libc::socklen_t) {
+ match addr {
+ SocketAddr::V4(ref addr) => {
+ // `s_addr` is stored as BE on all machine and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ let sin_addr = libc::in_addr {
+ s_addr: u32::from_ne_bytes(addr.ip().octets()),
+ };
+
+ let sockaddr_in = libc::sockaddr_in {
+ sin_family: libc::AF_INET as libc::sa_family_t,
+ sin_port: addr.port().to_be(),
+ sin_addr,
+ sin_zero: [0; 8],
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ sin_len: 0,
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ let socklen = size_of::<libc::sockaddr_in>() as libc::socklen_t;
+ (sockaddr, socklen)
+ }
+ SocketAddr::V6(ref addr) => {
+ let sockaddr_in6 = libc::sockaddr_in6 {
+ sin6_family: libc::AF_INET6 as libc::sa_family_t,
+ sin6_port: addr.port().to_be(),
+ sin6_addr: libc::in6_addr {
+ s6_addr: addr.ip().octets(),
+ },
+ sin6_flowinfo: addr.flowinfo(),
+ sin6_scope_id: addr.scope_id(),
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ sin6_len: 0,
+ #[cfg(target_os = "illumos")]
+ __sin6_src_id: 0,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ let socklen = size_of::<libc::sockaddr_in6>() as libc::socklen_t;
+ (sockaddr, socklen)
+ }
+ }
+}
+
+/// Converts a `libc::sockaddr` compatible struct into a native Rust `SocketAddr`.
+///
+/// # Safety
+///
+/// `storage` must have the `ss_family` field correctly initialized.
+/// `storage` must be initialised to a `sockaddr_in` or `sockaddr_in6`.
+pub(crate) unsafe fn to_socket_addr(
+ storage: *const libc::sockaddr_storage,
+) -> io::Result<SocketAddr> {
+ match (*storage).ss_family as libc::c_int {
+ libc::AF_INET => {
+ // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in.
+ let addr: &libc::sockaddr_in = &*(storage as *const libc::sockaddr_in);
+ let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes());
+ let port = u16::from_be(addr.sin_port);
+ Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
+ }
+ libc::AF_INET6 => {
+ // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6.
+ let addr: &libc::sockaddr_in6 = &*(storage as *const libc::sockaddr_in6);
+ let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr);
+ let port = u16::from_be(addr.sin6_port);
+ Ok(SocketAddr::V6(SocketAddrV6::new(
+ ip,
+ port,
+ addr.sin6_flowinfo,
+ addr.sin6_scope_id,
+ )))
+ }
+ _ => Err(io::ErrorKind::InvalidInput.into()),
+ }
+}
diff --git a/third_party/rust/mio/src/sys/unix/pipe.rs b/third_party/rust/mio/src/sys/unix/pipe.rs
new file mode 100644
index 0000000000..c899dfb2da
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/pipe.rs
@@ -0,0 +1,431 @@
+//! Unix pipe.
+//!
+//! See the [`new`] function for documentation.
+
+use std::fs::File;
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::process::{ChildStderr, ChildStdin, ChildStdout};
+
+use crate::io_source::IoSource;
+use crate::{event, Interest, Registry, Token};
+
+/// Create a new non-blocking Unix pipe.
+///
+/// This is a wrapper around Unix's [`pipe(2)`] system call and can be used as
+/// inter-process or thread communication channel.
+///
+/// This channel may be created before forking the process and then one end used
+/// in each process, e.g. the parent process has the sending end to send command
+/// to the child process.
+///
+/// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html
+///
+/// # Events
+///
+/// The [`Sender`] can be registered with [`WRITABLE`] interest to receive
+/// [writable events], the [`Receiver`] with [`READABLE`] interest. Once data is
+/// written to the `Sender` the `Receiver` will receive an [readable event].
+///
+/// In addition to those events, events will also be generated if the other side
+/// is dropped. To check if the `Sender` is dropped you'll need to check
+/// [`is_read_closed`] on events for the `Receiver`, if it returns true the
+/// `Sender` is dropped. On the `Sender` end check [`is_write_closed`], if it
+/// returns true the `Receiver` was dropped. Also see the second example below.
+///
+/// [`WRITABLE`]: Interest::WRITABLE
+/// [writable events]: event::Event::is_writable
+/// [`READABLE`]: Interest::READABLE
+/// [readable event]: event::Event::is_readable
+/// [`is_read_closed`]: event::Event::is_read_closed
+/// [`is_write_closed`]: event::Event::is_write_closed
+///
+/// # Deregistering
+///
+/// Both `Sender` and `Receiver` will deregister themselves when dropped,
+/// **iff** the file descriptors are not duplicated (via [`dup(2)`]).
+///
+/// [`dup(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/dup.html
+///
+/// # Examples
+///
+/// Simple example that writes data into the sending end and read it from the
+/// receiving end.
+///
+/// ```
+/// use std::io::{self, Read, Write};
+///
+/// use mio::{Poll, Events, Interest, Token};
+/// use mio::unix::pipe;
+///
+/// // Unique tokens for the two ends of the channel.
+/// const PIPE_RECV: Token = Token(0);
+/// const PIPE_SEND: Token = Token(1);
+///
+/// # fn main() -> io::Result<()> {
+/// // Create our `Poll` instance and the `Events` container.
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(8);
+///
+/// // Create a new pipe.
+/// let (mut sender, mut receiver) = pipe::new()?;
+///
+/// // Register both ends of the channel.
+/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
+/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
+///
+/// const MSG: &[u8; 11] = b"Hello world";
+///
+/// loop {
+/// poll.poll(&mut events, None)?;
+///
+/// for event in events.iter() {
+/// match event.token() {
+/// PIPE_SEND => sender.write(MSG)
+/// .and_then(|n| if n != MSG.len() {
+/// // We'll consider a short write an error in this
+/// // example. NOTE: we can't use `write_all` with
+/// // non-blocking I/O.
+/// Err(io::ErrorKind::WriteZero.into())
+/// } else {
+/// Ok(())
+/// })?,
+/// PIPE_RECV => {
+/// let mut buf = [0; 11];
+/// let n = receiver.read(&mut buf)?;
+/// println!("received: {:?}", &buf[0..n]);
+/// assert_eq!(n, MSG.len());
+/// assert_eq!(&buf, &*MSG);
+/// return Ok(());
+/// },
+/// _ => unreachable!(),
+/// }
+/// }
+/// }
+/// # }
+/// ```
+///
+/// Example that receives an event once the `Sender` is dropped.
+///
+/// ```
+/// # use std::io;
+/// #
+/// # use mio::{Poll, Events, Interest, Token};
+/// # use mio::unix::pipe;
+/// #
+/// # const PIPE_RECV: Token = Token(0);
+/// # const PIPE_SEND: Token = Token(1);
+/// #
+/// # fn main() -> io::Result<()> {
+/// // Same setup as in the example above.
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(8);
+///
+/// let (mut sender, mut receiver) = pipe::new()?;
+///
+/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
+/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
+///
+/// // Drop the sender.
+/// drop(sender);
+///
+/// poll.poll(&mut events, None)?;
+///
+/// for event in events.iter() {
+/// match event.token() {
+/// PIPE_RECV if event.is_read_closed() => {
+/// // Detected that the sender was dropped.
+/// println!("Sender dropped!");
+/// return Ok(());
+/// },
+/// _ => unreachable!(),
+/// }
+/// }
+/// # unreachable!();
+/// # }
+/// ```
+pub fn new() -> io::Result<(Sender, Receiver)> {
+ let mut fds: [RawFd; 2] = [-1, -1];
+
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "illumos",
+ ))]
+ unsafe {
+ if libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) != 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ unsafe {
+ // For platforms that don't have `pipe2(2)` we need to manually set the
+ // correct flags on the file descriptor.
+ if libc::pipe(fds.as_mut_ptr()) != 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ for fd in &fds {
+ if libc::fcntl(*fd, libc::F_SETFL, libc::O_NONBLOCK) != 0
+ || libc::fcntl(*fd, libc::F_SETFD, libc::FD_CLOEXEC) != 0
+ {
+ let err = io::Error::last_os_error();
+ // Don't leak file descriptors. Can't handle error though.
+ let _ = libc::close(fds[0]);
+ let _ = libc::close(fds[1]);
+ return Err(err);
+ }
+ }
+ }
+
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "illumos",
+ )))]
+ compile_error!("unsupported target for `mio::unix::pipe`");
+
+ // Safety: we just initialised the `fds` above.
+ let r = unsafe { Receiver::from_raw_fd(fds[0]) };
+ let w = unsafe { Sender::from_raw_fd(fds[1]) };
+ Ok((w, r))
+}
+
+/// Sending end of an Unix pipe.
+///
+/// See [`new`] for documentation, including examples.
+#[derive(Debug)]
+pub struct Sender {
+ inner: IoSource<File>,
+}
+
+impl Sender {
+ /// Set the `Sender` into or out of non-blocking mode.
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ set_nonblocking(self.inner.as_raw_fd(), nonblocking)
+ }
+}
+
+impl event::Source for Sender {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl Write for Sender {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|sender| (&*sender).flush())
+ }
+}
+
+impl Write for &Sender {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|sender| (&*sender).flush())
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStdin> for Sender {
+ fn from(stdin: ChildStdin) -> Sender {
+ // Safety: `ChildStdin` is guaranteed to be a valid file descriptor.
+ unsafe { Sender::from_raw_fd(stdin.into_raw_fd()) }
+ }
+}
+
+impl FromRawFd for Sender {
+ unsafe fn from_raw_fd(fd: RawFd) -> Sender {
+ Sender {
+ inner: IoSource::new(File::from_raw_fd(fd)),
+ }
+ }
+}
+
+impl AsRawFd for Sender {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for Sender {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+/// Receiving end of an Unix pipe.
+///
+/// See [`new`] for documentation, including examples.
+#[derive(Debug)]
+pub struct Receiver {
+ inner: IoSource<File>,
+}
+
+impl Receiver {
+ /// Set the `Receiver` into or out of non-blocking mode.
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ set_nonblocking(self.inner.as_raw_fd(), nonblocking)
+ }
+}
+
+impl event::Source for Receiver {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl Read for Receiver {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read_vectored(bufs))
+ }
+}
+
+impl Read for &Receiver {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read_vectored(bufs))
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStdout> for Receiver {
+ fn from(stdout: ChildStdout) -> Receiver {
+ // Safety: `ChildStdout` is guaranteed to be a valid file descriptor.
+ unsafe { Receiver::from_raw_fd(stdout.into_raw_fd()) }
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStderr> for Receiver {
+ fn from(stderr: ChildStderr) -> Receiver {
+ // Safety: `ChildStderr` is guaranteed to be a valid file descriptor.
+ unsafe { Receiver::from_raw_fd(stderr.into_raw_fd()) }
+ }
+}
+
+impl FromRawFd for Receiver {
+ unsafe fn from_raw_fd(fd: RawFd) -> Receiver {
+ Receiver {
+ inner: IoSource::new(File::from_raw_fd(fd)),
+ }
+ }
+}
+
+impl AsRawFd for Receiver {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for Receiver {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(not(target_os = "illumos"))]
+fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
+ let value = nonblocking as libc::c_int;
+ if unsafe { libc::ioctl(fd, libc::FIONBIO, &value) } == -1 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+#[cfg(target_os = "illumos")]
+fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
+ let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) };
+ if flags < 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ let nflags = if nonblocking {
+ flags | libc::O_NONBLOCK
+ } else {
+ flags & !libc::O_NONBLOCK
+ };
+
+ if flags != nflags {
+ if unsafe { libc::fcntl(fd, libc::F_SETFL, nflags) } < 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+
+ Ok(())
+}
diff --git a/third_party/rust/mio/src/sys/unix/selector/epoll.rs b/third_party/rust/mio/src/sys/unix/selector/epoll.rs
new file mode 100644
index 0000000000..f4430909b0
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/selector/epoll.rs
@@ -0,0 +1,246 @@
+use crate::{Interest, Token};
+
+use libc::{EPOLLET, EPOLLIN, EPOLLOUT, EPOLLRDHUP};
+use log::error;
+use std::os::unix::io::{AsRawFd, RawFd};
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::time::Duration;
+use std::{cmp, i32, io, ptr};
+
+/// Unique id for use as `SelectorId`.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ ep: RawFd,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // According to libuv, `EPOLL_CLOEXEC` is not defined on Android API <
+ // 21. But `EPOLL_CLOEXEC` is an alias for `O_CLOEXEC` on that platform,
+ // so we use it instead.
+ #[cfg(target_os = "android")]
+ let flag = libc::O_CLOEXEC;
+ #[cfg(not(target_os = "android"))]
+ let flag = libc::EPOLL_CLOEXEC;
+
+ syscall!(epoll_create1(flag)).map(|ep| Selector {
+ #[cfg(debug_assertions)]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ ep,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(false),
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ syscall!(fcntl(self.ep, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|ep| Selector {
+ // It's the same selector, so we use the same id.
+ #[cfg(debug_assertions)]
+ id: self.id,
+ ep,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)),
+ })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ // A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ
+ // (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits
+ // architectures. The magic number is the same constant used by libuv.
+ #[cfg(target_pointer_width = "32")]
+ const MAX_SAFE_TIMEOUT: u128 = 1789569;
+ #[cfg(not(target_pointer_width = "32"))]
+ const MAX_SAFE_TIMEOUT: u128 = libc::c_int::max_value() as u128;
+
+ let timeout = timeout
+ .map(|to| cmp::min(to.as_millis(), MAX_SAFE_TIMEOUT) as libc::c_int)
+ .unwrap_or(-1);
+
+ events.clear();
+ syscall!(epoll_wait(
+ self.ep,
+ events.as_mut_ptr(),
+ events.capacity() as i32,
+ timeout,
+ ))
+ .map(|n_events| {
+ // This is safe because `epoll_wait` ensures that `n_events` are
+ // assigned.
+ unsafe { events.set_len(n_events as usize) };
+ })
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let mut event = libc::epoll_event {
+ events: interests_to_epoll(interests),
+ u64: usize::from(token) as u64,
+ };
+
+ syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_ADD, fd, &mut event)).map(|_| ())
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let mut event = libc::epoll_event {
+ events: interests_to_epoll(interests),
+ u64: usize::from(token) as u64,
+ };
+
+ syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_MOD, fd, &mut event)).map(|_| ())
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_DEL, fd, ptr::null_mut())).map(|_| ())
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ self.has_waker.swap(true, Ordering::AcqRel)
+ }
+}
+
+cfg_io_source! {
+ impl Selector {
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.ep
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ if let Err(err) = syscall!(close(self.ep)) {
+ error!("error closing epoll: {}", err);
+ }
+ }
+}
+
+fn interests_to_epoll(interests: Interest) -> u32 {
+ let mut kind = EPOLLET;
+
+ if interests.is_readable() {
+ kind = kind | EPOLLIN | EPOLLRDHUP;
+ }
+
+ if interests.is_writable() {
+ kind |= EPOLLOUT;
+ }
+
+ kind as u32
+}
+
+pub type Event = libc::epoll_event;
+pub type Events = Vec<Event>;
+
+pub mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ pub fn token(event: &Event) -> Token {
+ Token(event.u64 as usize)
+ }
+
+ pub fn is_readable(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLIN) != 0
+ || (event.events as libc::c_int & libc::EPOLLPRI) != 0
+ }
+
+ pub fn is_writable(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLOUT) != 0
+ }
+
+ pub fn is_error(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLERR) != 0
+ }
+
+ pub fn is_read_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ event.events as libc::c_int & libc::EPOLLHUP != 0
+ // Socket has received FIN or called shutdown(SHUT_RD)
+ || (event.events as libc::c_int & libc::EPOLLIN != 0
+ && event.events as libc::c_int & libc::EPOLLRDHUP != 0)
+ }
+
+ pub fn is_write_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ event.events as libc::c_int & libc::EPOLLHUP != 0
+ // Unix pipe write end has closed
+ || (event.events as libc::c_int & libc::EPOLLOUT != 0
+ && event.events as libc::c_int & libc::EPOLLERR != 0)
+ // The other side (read end) of a Unix pipe has closed.
+ || event.events as libc::c_int == libc::EPOLLERR
+ }
+
+ pub fn is_priority(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLPRI) != 0
+ }
+
+ pub fn is_aio(_: &Event) -> bool {
+ // Not supported in the kernel, only in libc.
+ false
+ }
+
+ pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+ }
+
+ pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_events(got: &u32, want: &libc::c_int) -> bool {
+ (*got as libc::c_int & want) != 0
+ }
+ debug_detail!(
+ EventsDetails(u32),
+ check_events,
+ libc::EPOLLIN,
+ libc::EPOLLPRI,
+ libc::EPOLLOUT,
+ libc::EPOLLRDNORM,
+ libc::EPOLLRDBAND,
+ libc::EPOLLWRNORM,
+ libc::EPOLLWRBAND,
+ libc::EPOLLMSG,
+ libc::EPOLLERR,
+ libc::EPOLLHUP,
+ libc::EPOLLET,
+ libc::EPOLLRDHUP,
+ libc::EPOLLONESHOT,
+ #[cfg(target_os = "linux")]
+ libc::EPOLLEXCLUSIVE,
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ libc::EPOLLWAKEUP,
+ libc::EPOLL_CLOEXEC,
+ );
+
+ // Can't reference fields in packed structures.
+ let e_u64 = event.u64;
+ f.debug_struct("epoll_event")
+ .field("events", &EventsDetails(event.events))
+ .field("u64", &e_u64)
+ .finish()
+ }
+}
+
+#[cfg(target_os = "android")]
+#[test]
+fn assert_close_on_exec_flag() {
+ // This assertion need to be true for Selector::new.
+ assert_eq!(libc::O_CLOEXEC, libc::EPOLL_CLOEXEC);
+}
diff --git a/third_party/rust/mio/src/sys/unix/selector/kqueue.rs b/third_party/rust/mio/src/sys/unix/selector/kqueue.rs
new file mode 100644
index 0000000000..b7a01a51c6
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/selector/kqueue.rs
@@ -0,0 +1,688 @@
+use crate::{Interest, Token};
+use log::error;
+use std::mem::MaybeUninit;
+use std::ops::{Deref, DerefMut};
+use std::os::unix::io::{AsRawFd, RawFd};
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::time::Duration;
+use std::{cmp, io, ptr, slice};
+
+/// Unique id for use as `SelectorId`.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+// Type of the `nchanges` and `nevents` parameters in the `kevent` function.
+#[cfg(not(target_os = "netbsd"))]
+type Count = libc::c_int;
+#[cfg(target_os = "netbsd")]
+type Count = libc::size_t;
+
+// Type of the `filter` field in the `kevent` structure.
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
+type Filter = libc::c_short;
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+type Filter = i16;
+#[cfg(target_os = "netbsd")]
+type Filter = u32;
+
+// Type of the `flags` field in the `kevent` structure.
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
+type Flags = libc::c_ushort;
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+type Flags = u16;
+#[cfg(target_os = "netbsd")]
+type Flags = u32;
+
+// Type of the `data` field in the `kevent` structure.
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+))]
+type Data = libc::intptr_t;
+#[cfg(any(target_os = "netbsd", target_os = "openbsd"))]
+type Data = i64;
+
+// Type of the `udata` field in the `kevent` structure.
+#[cfg(not(target_os = "netbsd"))]
+type UData = *mut libc::c_void;
+#[cfg(target_os = "netbsd")]
+type UData = libc::intptr_t;
+
+macro_rules! kevent {
+ ($id: expr, $filter: expr, $flags: expr, $data: expr) => {
+ libc::kevent {
+ ident: $id as libc::uintptr_t,
+ filter: $filter as Filter,
+ flags: $flags,
+ fflags: 0,
+ data: 0,
+ udata: $data as UData,
+ }
+ };
+}
+
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ kq: RawFd,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ syscall!(kqueue())
+ .and_then(|kq| syscall!(fcntl(kq, libc::F_SETFD, libc::FD_CLOEXEC)).map(|_| kq))
+ .map(|kq| Selector {
+ #[cfg(debug_assertions)]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ kq,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(false),
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ syscall!(fcntl(self.kq, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|kq| Selector {
+ // It's the same selector, so we use the same id.
+ #[cfg(debug_assertions)]
+ id: self.id,
+ kq,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)),
+ })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ let timeout = timeout.map(|to| libc::timespec {
+ tv_sec: cmp::min(to.as_secs(), libc::time_t::max_value() as u64) as libc::time_t,
+ // `Duration::subsec_nanos` is guaranteed to be less than one
+ // billion (the number of nanoseconds in a second), making the
+ // cast to i32 safe. The cast itself is needed for platforms
+ // where C's long is only 32 bits.
+ tv_nsec: libc::c_long::from(to.subsec_nanos() as i32),
+ });
+ let timeout = timeout
+ .as_ref()
+ .map(|s| s as *const _)
+ .unwrap_or(ptr::null_mut());
+
+ events.clear();
+ syscall!(kevent(
+ self.kq,
+ ptr::null(),
+ 0,
+ events.as_mut_ptr(),
+ events.capacity() as Count,
+ timeout,
+ ))
+ .map(|n_events| {
+ // This is safe because `kevent` ensures that `n_events` are
+ // assigned.
+ unsafe { events.set_len(n_events as usize) };
+ })
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let flags = libc::EV_CLEAR | libc::EV_RECEIPT | libc::EV_ADD;
+ // At most we need two changes, but maybe we only need 1.
+ let mut changes: [MaybeUninit<libc::kevent>; 2] =
+ [MaybeUninit::uninit(), MaybeUninit::uninit()];
+ let mut n_changes = 0;
+
+ if interests.is_writable() {
+ let kevent = kevent!(fd, libc::EVFILT_WRITE, flags, token.0);
+ changes[n_changes] = MaybeUninit::new(kevent);
+ n_changes += 1;
+ }
+
+ if interests.is_readable() {
+ let kevent = kevent!(fd, libc::EVFILT_READ, flags, token.0);
+ changes[n_changes] = MaybeUninit::new(kevent);
+ n_changes += 1;
+ }
+
+ // Older versions of macOS (OS X 10.11 and 10.10 have been witnessed)
+ // can return EPIPE when registering a pipe file descriptor where the
+ // other end has already disappeared. For example code that creates a
+ // pipe, closes a file descriptor, and then registers the other end will
+ // see an EPIPE returned from `register`.
+ //
+ // It also turns out that kevent will still report events on the file
+ // descriptor, telling us that it's readable/hup at least after we've
+ // done this registration. As a result we just ignore `EPIPE` here
+ // instead of propagating it.
+ //
+ // More info can be found at tokio-rs/mio#582.
+ let changes = unsafe {
+ // This is safe because we ensure that at least `n_changes` are in
+ // the array.
+ slice::from_raw_parts_mut(changes[0].as_mut_ptr(), n_changes)
+ };
+ kevent_register(self.kq, changes, &[libc::EPIPE as Data])
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let flags = libc::EV_CLEAR | libc::EV_RECEIPT;
+ let write_flags = if interests.is_writable() {
+ flags | libc::EV_ADD
+ } else {
+ flags | libc::EV_DELETE
+ };
+ let read_flags = if interests.is_readable() {
+ flags | libc::EV_ADD
+ } else {
+ flags | libc::EV_DELETE
+ };
+
+ let mut changes: [libc::kevent; 2] = [
+ kevent!(fd, libc::EVFILT_WRITE, write_flags, token.0),
+ kevent!(fd, libc::EVFILT_READ, read_flags, token.0),
+ ];
+
+ // Since there is no way to check with which interests the fd was
+ // registered we modify both readable and write, adding it when required
+ // and removing it otherwise, ignoring the ENOENT error when it comes
+ // up. The ENOENT error informs us that a filter we're trying to remove
+ // wasn't there in first place, but we don't really care since our goal
+ // is accomplished.
+ //
+ // For the explanation of ignoring `EPIPE` see `register`.
+ kevent_register(
+ self.kq,
+ &mut changes,
+ &[libc::ENOENT as Data, libc::EPIPE as Data],
+ )
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ let flags = libc::EV_DELETE | libc::EV_RECEIPT;
+ let mut changes: [libc::kevent; 2] = [
+ kevent!(fd, libc::EVFILT_WRITE, flags, 0),
+ kevent!(fd, libc::EVFILT_READ, flags, 0),
+ ];
+
+ // Since there is no way to check with which interests the fd was
+ // registered we remove both filters (readable and writeable) and ignore
+ // the ENOENT error when it comes up. The ENOENT error informs us that
+ // the filter wasn't there in first place, but we don't really care
+ // about that since our goal is to remove it.
+ kevent_register(self.kq, &mut changes, &[libc::ENOENT as Data])
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ self.has_waker.swap(true, Ordering::AcqRel)
+ }
+
+ // Used by `Waker`.
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn setup_waker(&self, token: Token) -> io::Result<()> {
+ // First attempt to accept user space notifications.
+ let mut kevent = kevent!(
+ 0,
+ libc::EVFILT_USER,
+ libc::EV_ADD | libc::EV_CLEAR | libc::EV_RECEIPT,
+ token.0
+ );
+
+ syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
+ if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
+ Err(io::Error::from_raw_os_error(kevent.data as i32))
+ } else {
+ Ok(())
+ }
+ })
+ }
+
+ // Used by `Waker`.
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn wake(&self, token: Token) -> io::Result<()> {
+ let mut kevent = kevent!(
+ 0,
+ libc::EVFILT_USER,
+ libc::EV_ADD | libc::EV_RECEIPT,
+ token.0
+ );
+ kevent.fflags = libc::NOTE_TRIGGER;
+
+ syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
+ if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
+ Err(io::Error::from_raw_os_error(kevent.data as i32))
+ } else {
+ Ok(())
+ }
+ })
+ }
+}
+
+/// Register `changes` with `kq`ueue.
+fn kevent_register(
+ kq: RawFd,
+ changes: &mut [libc::kevent],
+ ignored_errors: &[Data],
+) -> io::Result<()> {
+ syscall!(kevent(
+ kq,
+ changes.as_ptr(),
+ changes.len() as Count,
+ changes.as_mut_ptr(),
+ changes.len() as Count,
+ ptr::null(),
+ ))
+ .map(|_| ())
+ .or_else(|err| {
+ // According to the manual page of FreeBSD: "When kevent() call fails
+ // with EINTR error, all changes in the changelist have been applied",
+ // so we can safely ignore it.
+ if err.raw_os_error() == Some(libc::EINTR) {
+ Ok(())
+ } else {
+ Err(err)
+ }
+ })
+ .and_then(|()| check_errors(changes, ignored_errors))
+}
+
+/// Check all events for possible errors, it returns the first error found.
+fn check_errors(events: &[libc::kevent], ignored_errors: &[Data]) -> io::Result<()> {
+ for event in events {
+ // We can't use references to packed structures (in checking the ignored
+ // errors), so we need copy the data out before use.
+ let data = event.data;
+ // Check for the error flag, the actual error will be in the `data`
+ // field.
+ if (event.flags & libc::EV_ERROR != 0) && data != 0 && !ignored_errors.contains(&data) {
+ return Err(io::Error::from_raw_os_error(data as i32));
+ }
+ }
+ Ok(())
+}
+
+cfg_io_source! {
+ #[cfg(debug_assertions)]
+ impl Selector {
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.kq
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ if let Err(err) = syscall!(close(self.kq)) {
+ error!("error closing kqueue: {}", err);
+ }
+ }
+}
+
+pub type Event = libc::kevent;
+pub struct Events(Vec<libc::kevent>);
+
+impl Events {
+ pub fn with_capacity(capacity: usize) -> Events {
+ Events(Vec::with_capacity(capacity))
+ }
+}
+
+impl Deref for Events {
+ type Target = Vec<libc::kevent>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for Events {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+// `Events` cannot derive `Send` or `Sync` because of the
+// `udata: *mut ::c_void` field in `libc::kevent`. However, `Events`'s public
+// API treats the `udata` field as a `uintptr_t` which is `Send`. `Sync` is
+// safe because with a `events: &Events` value, the only access to the `udata`
+// field is through `fn token(event: &Event)` which cannot mutate the field.
+unsafe impl Send for Events {}
+unsafe impl Sync for Events {}
+
+pub mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ use super::{Filter, Flags};
+
+ pub fn token(event: &Event) -> Token {
+ Token(event.udata as usize)
+ }
+
+ pub fn is_readable(event: &Event) -> bool {
+ event.filter == libc::EVFILT_READ || {
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ // Used by the `Awakener`. On platforms that use `eventfd` or a unix
+ // pipe it will emit a readable event so we'll fake that here as
+ // well.
+ {
+ event.filter == libc::EVFILT_USER
+ }
+ #[cfg(not(any(target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+ {
+ false
+ }
+ }
+ }
+
+ pub fn is_writable(event: &Event) -> bool {
+ event.filter == libc::EVFILT_WRITE
+ }
+
+ pub fn is_error(event: &Event) -> bool {
+ (event.flags & libc::EV_ERROR) != 0 ||
+ // When the read end of the socket is closed, EV_EOF is set on
+ // flags, and fflags contains the error if there is one.
+ (event.flags & libc::EV_EOF) != 0 && event.fflags != 0
+ }
+
+ pub fn is_read_closed(event: &Event) -> bool {
+ event.filter == libc::EVFILT_READ && event.flags & libc::EV_EOF != 0
+ }
+
+ pub fn is_write_closed(event: &Event) -> bool {
+ event.filter == libc::EVFILT_WRITE && event.flags & libc::EV_EOF != 0
+ }
+
+ pub fn is_priority(_: &Event) -> bool {
+ // kqueue doesn't have priority indicators.
+ false
+ }
+
+ #[allow(unused_variables)] // `event` is not used on some platforms.
+ pub fn is_aio(event: &Event) -> bool {
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ {
+ event.filter == libc::EVFILT_AIO
+ }
+ #[cfg(not(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ )))]
+ {
+ false
+ }
+ }
+
+ #[allow(unused_variables)] // `event` is only used on FreeBSD.
+ pub fn is_lio(event: &Event) -> bool {
+ #[cfg(target_os = "freebsd")]
+ {
+ event.filter == libc::EVFILT_LIO
+ }
+ #[cfg(not(target_os = "freebsd"))]
+ {
+ false
+ }
+ }
+
+ pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ debug_detail!(
+ FilterDetails(Filter),
+ PartialEq::eq,
+ libc::EVFILT_READ,
+ libc::EVFILT_WRITE,
+ libc::EVFILT_AIO,
+ libc::EVFILT_VNODE,
+ libc::EVFILT_PROC,
+ libc::EVFILT_SIGNAL,
+ libc::EVFILT_TIMER,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_PROCDESC,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::EVFILT_FS,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_LIO,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::EVFILT_USER,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_SENDFILE,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_EMPTY,
+ #[cfg(target_os = "dragonfly")]
+ libc::EVFILT_EXCEPT,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EVFILT_MACHPORT,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EVFILT_VM,
+ );
+
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flag(got: &Flags, want: &Flags) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FlagsDetails(Flags),
+ check_flag,
+ libc::EV_ADD,
+ libc::EV_DELETE,
+ libc::EV_ENABLE,
+ libc::EV_DISABLE,
+ libc::EV_ONESHOT,
+ libc::EV_CLEAR,
+ libc::EV_RECEIPT,
+ libc::EV_DISPATCH,
+ #[cfg(target_os = "freebsd")]
+ libc::EV_DROP,
+ libc::EV_FLAG1,
+ libc::EV_ERROR,
+ libc::EV_EOF,
+ libc::EV_SYSFLAGS,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EV_FLAG0,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EV_POLL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EV_OOBAND,
+ #[cfg(target_os = "dragonfly")]
+ libc::EV_NODATA,
+ );
+
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_fflag(got: &u32, want: &u32) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FflagsDetails(u32),
+ check_fflag,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_TRIGGER,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFNOP,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFAND,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFOR,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFCOPY,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFCTRLMASK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFLAGSMASK,
+ libc::NOTE_LOWAT,
+ libc::NOTE_DELETE,
+ libc::NOTE_WRITE,
+ #[cfg(target_os = "dragonfly")]
+ libc::NOTE_OOB,
+ #[cfg(target_os = "openbsd")]
+ libc::NOTE_EOF,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXTEND,
+ libc::NOTE_ATTRIB,
+ libc::NOTE_LINK,
+ libc::NOTE_RENAME,
+ libc::NOTE_REVOKE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_NONE,
+ #[cfg(any(target_os = "openbsd"))]
+ libc::NOTE_TRUNCATE,
+ libc::NOTE_EXIT,
+ libc::NOTE_FORK,
+ libc::NOTE_EXEC,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_SIGNAL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXITSTATUS,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_DETAIL,
+ libc::NOTE_PDATAMASK,
+ libc::NOTE_PCTRLMASK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ libc::NOTE_TRACK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ libc::NOTE_TRACKERR,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ libc::NOTE_CHILD,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_DETAIL_MASK,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_DECRYPTFAIL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_MEMORY,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_CSERROR,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_PRESSURE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_PRESSURE_TERMINATE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_PRESSURE_SUDDEN_TERMINATE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_ERROR,
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_SECONDS,
+ #[cfg(any(target_os = "freebsd"))]
+ libc::NOTE_MSECONDS,
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_USECONDS,
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_NSECONDS,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_ABSOLUTE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_LEEWAY,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_CRITICAL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_BACKGROUND,
+ );
+
+ // Can't reference fields in packed structures.
+ let ident = event.ident;
+ let data = event.data;
+ let udata = event.udata;
+ f.debug_struct("kevent")
+ .field("ident", &ident)
+ .field("filter", &FilterDetails(event.filter))
+ .field("flags", &FlagsDetails(event.flags))
+ .field("fflags", &FflagsDetails(event.fflags))
+ .field("data", &data)
+ .field("udata", &udata)
+ .finish()
+ }
+}
+
+#[test]
+#[cfg(feature = "os-ext")]
+fn does_not_register_rw() {
+ use crate::unix::SourceFd;
+ use crate::{Poll, Token};
+
+ let kq = unsafe { libc::kqueue() };
+ let mut kqf = SourceFd(&kq);
+ let poll = Poll::new().unwrap();
+
+ // Registering kqueue fd will fail if write is requested (On anything but
+ // some versions of macOS).
+ poll.registry()
+ .register(&mut kqf, Token(1234), Interest::READABLE)
+ .unwrap();
+}
diff --git a/third_party/rust/mio/src/sys/unix/selector/mod.rs b/third_party/rust/mio/src/sys/unix/selector/mod.rs
new file mode 100644
index 0000000000..da61e14d7e
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/selector/mod.rs
@@ -0,0 +1,35 @@
+#[cfg(any(target_os = "android", target_os = "illumos", target_os = "linux"))]
+mod epoll;
+
+#[cfg(any(target_os = "android", target_os = "illumos", target_os = "linux"))]
+pub(crate) use self::epoll::{event, Event, Events, Selector};
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+mod kqueue;
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+pub(crate) use self::kqueue::{event, Event, Events, Selector};
+
+/// Lowest file descriptor used in `Selector::try_clone`.
+///
+/// # Notes
+///
+/// Usually fds 0, 1 and 2 are standard in, out and error. Some application
+/// blindly assume this to be true, which means using any one of those a select
+/// could result in some interesting and unexpected errors. Avoid that by using
+/// an fd that doesn't have a pre-determined usage.
+const LOWEST_FD: libc::c_int = 3;
diff --git a/third_party/rust/mio/src/sys/unix/sourcefd.rs b/third_party/rust/mio/src/sys/unix/sourcefd.rs
new file mode 100644
index 0000000000..84e776d21d
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/sourcefd.rs
@@ -0,0 +1,116 @@
+use crate::{event, Interest, Registry, Token};
+
+use std::io;
+use std::os::unix::io::RawFd;
+
+/// Adapter for [`RawFd`] providing an [`event::Source`] implementation.
+///
+/// `SourceFd` enables registering any type with an FD with [`Poll`].
+///
+/// While only implementations for TCP and UDP are provided, Mio supports
+/// registering any FD that can be registered with the underlying OS selector.
+/// `SourceFd` provides the necessary bridge.
+///
+/// Note that `SourceFd` takes a `&RawFd`. This is because `SourceFd` **does
+/// not** take ownership of the FD. Specifically, it will not manage any
+/// lifecycle related operations, such as closing the FD on drop. It is expected
+/// that the `SourceFd` is constructed right before a call to
+/// [`Registry::register`]. See the examples for more detail.
+///
+/// [`event::Source`]: ../event/trait.Source.html
+/// [`Poll`]: ../struct.Poll.html
+/// [`Registry::register`]: ../struct.Registry.html#method.register
+///
+/// # Examples
+///
+/// Basic usage.
+///
+#[cfg_attr(
+ all(feature = "os-poll", feature = "net", feature = "os-ext"),
+ doc = "```"
+)]
+#[cfg_attr(
+ not(all(feature = "os-poll", feature = "net", feature = "os-ext")),
+ doc = "```ignore"
+)]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Interest, Poll, Token};
+/// use mio::unix::SourceFd;
+///
+/// use std::os::unix::io::AsRawFd;
+/// use std::net::TcpListener;
+///
+/// // Bind a std listener
+/// let listener = TcpListener::bind("127.0.0.1:0")?;
+///
+/// let poll = Poll::new()?;
+///
+/// // Register the listener
+/// poll.registry().register(
+/// &mut SourceFd(&listener.as_raw_fd()),
+/// Token(0),
+/// Interest::READABLE)?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Implementing [`event::Source`] for a custom type backed by a [`RawFd`].
+///
+#[cfg_attr(all(feature = "os-poll", feature = "os-ext"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "os-ext")), doc = "```ignore")]
+/// use mio::{event, Interest, Registry, Token};
+/// use mio::unix::SourceFd;
+///
+/// use std::os::unix::io::RawFd;
+/// use std::io;
+///
+/// # #[allow(dead_code)]
+/// pub struct MyIo {
+/// fd: RawFd,
+/// }
+///
+/// impl event::Source for MyIo {
+/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// SourceFd(&self.fd).register(registry, token, interests)
+/// }
+///
+/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// SourceFd(&self.fd).reregister(registry, token, interests)
+/// }
+///
+/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+/// SourceFd(&self.fd).deregister(registry)
+/// }
+/// }
+/// ```
+#[derive(Debug)]
+pub struct SourceFd<'a>(pub &'a RawFd);
+
+impl<'a> event::Source for SourceFd<'a> {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ registry.selector().register(*self.0, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ registry.selector().reregister(*self.0, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ registry.selector().deregister(*self.0)
+ }
+}
diff --git a/third_party/rust/mio/src/sys/unix/tcp.rs b/third_party/rust/mio/src/sys/unix/tcp.rs
new file mode 100644
index 0000000000..5b02cfcb52
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/tcp.rs
@@ -0,0 +1,113 @@
+use std::convert::TryInto;
+use std::io;
+use std::mem::{size_of, MaybeUninit};
+use std::net::{self, SocketAddr};
+use std::os::unix::io::{AsRawFd, FromRawFd};
+
+use crate::sys::unix::net::{new_socket, socket_addr, to_socket_addr};
+
+pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<libc::c_int> {
+ let domain = match address {
+ SocketAddr::V4(_) => libc::AF_INET,
+ SocketAddr::V6(_) => libc::AF_INET6,
+ };
+ new_socket(domain, libc::SOCK_STREAM)
+}
+
+pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(bind(socket.as_raw_fd(), raw_addr.as_ptr(), raw_addr_length))?;
+ Ok(())
+}
+
+pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+
+ match syscall!(connect(
+ socket.as_raw_fd(),
+ raw_addr.as_ptr(),
+ raw_addr_length
+ )) {
+ Err(err) if err.raw_os_error() != Some(libc::EINPROGRESS) => Err(err),
+ _ => Ok(()),
+ }
+}
+
+pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
+ let backlog = backlog.try_into().unwrap_or(i32::max_value());
+ syscall!(listen(socket.as_raw_fd(), backlog))?;
+ Ok(())
+}
+
+pub(crate) fn set_reuseaddr(socket: &net::TcpListener, reuseaddr: bool) -> io::Result<()> {
+ let val: libc::c_int = if reuseaddr { 1 } else { 0 };
+ syscall!(setsockopt(
+ socket.as_raw_fd(),
+ libc::SOL_SOCKET,
+ libc::SO_REUSEADDR,
+ &val as *const libc::c_int as *const libc::c_void,
+ size_of::<libc::c_int>() as libc::socklen_t,
+ ))?;
+ Ok(())
+}
+
+pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ let mut addr: MaybeUninit<libc::sockaddr_storage> = MaybeUninit::uninit();
+ let mut length = size_of::<libc::sockaddr_storage>() as libc::socklen_t;
+
+ // On platforms that support it we can use `accept4(2)` to set `NONBLOCK`
+ // and `CLOEXEC` in the call to accept the connection.
+ #[cfg(any(
+ // Android x86's seccomp profile forbids calls to `accept4(2)`
+ // See https://github.com/tokio-rs/mio/issues/1445 for details
+ all(
+ not(target_arch="x86"),
+ target_os = "android"
+ ),
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ let stream = {
+ syscall!(accept4(
+ listener.as_raw_fd(),
+ addr.as_mut_ptr() as *mut _,
+ &mut length,
+ libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK,
+ ))
+ .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
+ }?;
+
+ // But not all platforms have the `accept4(2)` call. Luckily BSD (derived)
+ // OSes inherit the non-blocking flag from the listener, so we just have to
+ // set `CLOEXEC`.
+ #[cfg(any(
+ all(target_arch = "x86", target_os = "android"),
+ target_os = "ios",
+ target_os = "macos",
+ ))]
+ let stream = {
+ syscall!(accept(
+ listener.as_raw_fd(),
+ addr.as_mut_ptr() as *mut _,
+ &mut length
+ ))
+ .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
+ .and_then(|s| {
+ syscall!(fcntl(s.as_raw_fd(), libc::F_SETFD, libc::FD_CLOEXEC))?;
+
+ // See https://github.com/tokio-rs/mio/issues/1450
+ #[cfg(all(target_arch = "x86", target_os = "android"))]
+ syscall!(fcntl(s.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK))?;
+
+ Ok(s)
+ })
+ }?;
+
+ // This is safe because `accept` calls above ensures the address
+ // initialised.
+ unsafe { to_socket_addr(addr.as_ptr()) }.map(|addr| (stream, addr))
+}
diff --git a/third_party/rust/mio/src/sys/unix/udp.rs b/third_party/rust/mio/src/sys/unix/udp.rs
new file mode 100644
index 0000000000..5a97cbd897
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/udp.rs
@@ -0,0 +1,39 @@
+use crate::sys::unix::net::{new_ip_socket, socket_addr};
+
+use std::io;
+use std::mem;
+use std::net::{self, SocketAddr};
+use std::os::unix::io::{AsRawFd, FromRawFd};
+
+pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
+ // Gives a warning for non Apple platforms.
+ #[allow(clippy::let_and_return)]
+ let socket = new_ip_socket(addr, libc::SOCK_DGRAM);
+
+ socket.and_then(|socket| {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(bind(socket, raw_addr.as_ptr(), raw_addr_length))
+ .map_err(|err| {
+ // Close the socket if we hit an error, ignoring the error
+ // from closing since we can't pass back two errors.
+ let _ = unsafe { libc::close(socket) };
+ err
+ })
+ .map(|_| unsafe { net::UdpSocket::from_raw_fd(socket) })
+ })
+}
+
+pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
+ let mut optval: libc::c_int = 0;
+ let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t;
+
+ syscall!(getsockopt(
+ socket.as_raw_fd(),
+ libc::IPPROTO_IPV6,
+ libc::IPV6_V6ONLY,
+ &mut optval as *mut _ as *mut _,
+ &mut optlen,
+ ))?;
+
+ Ok(optval != 0)
+}
diff --git a/third_party/rust/mio/src/sys/unix/uds/datagram.rs b/third_party/rust/mio/src/sys/unix/uds/datagram.rs
new file mode 100644
index 0000000000..d3e5314fe3
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/uds/datagram.rs
@@ -0,0 +1,56 @@
+use super::{socket_addr, SocketAddr};
+use crate::sys::unix::net::new_socket;
+
+use std::io;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::net;
+use std::path::Path;
+
+pub(crate) fn bind(path: &Path) -> io::Result<net::UnixDatagram> {
+ let fd = new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)?;
+ // Ensure the fd is closed.
+ let socket = unsafe { net::UnixDatagram::from_raw_fd(fd) };
+ let (sockaddr, socklen) = socket_addr(path)?;
+ let sockaddr = &sockaddr as *const libc::sockaddr_un as *const _;
+ syscall!(bind(fd, sockaddr, socklen))?;
+ Ok(socket)
+}
+
+pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
+ new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)
+ .map(|socket| unsafe { net::UnixDatagram::from_raw_fd(socket) })
+}
+
+pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
+ super::pair(libc::SOCK_DGRAM)
+}
+
+pub(crate) fn local_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ super::local_addr(socket.as_raw_fd())
+}
+
+pub(crate) fn peer_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ super::peer_addr(socket.as_raw_fd())
+}
+
+pub(crate) fn recv_from(
+ socket: &net::UnixDatagram,
+ dst: &mut [u8],
+) -> io::Result<(usize, SocketAddr)> {
+ let mut count = 0;
+ let socketaddr = SocketAddr::new(|sockaddr, socklen| {
+ syscall!(recvfrom(
+ socket.as_raw_fd(),
+ dst.as_mut_ptr() as *mut _,
+ dst.len(),
+ 0,
+ sockaddr,
+ socklen,
+ ))
+ .map(|c| {
+ count = c;
+ c as libc::c_int
+ })
+ })?;
+ Ok((count as usize, socketaddr))
+}
diff --git a/third_party/rust/mio/src/sys/unix/uds/listener.rs b/third_party/rust/mio/src/sys/unix/uds/listener.rs
new file mode 100644
index 0000000000..b6218427f2
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/uds/listener.rs
@@ -0,0 +1,94 @@
+use super::socket_addr;
+use crate::net::{SocketAddr, UnixStream};
+use crate::sys::unix::net::new_socket;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::net;
+use std::path::Path;
+use std::{io, mem};
+
+pub(crate) fn bind(path: &Path) -> io::Result<net::UnixListener> {
+ let socket = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let (sockaddr, socklen) = socket_addr(path)?;
+ let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr;
+
+ syscall!(bind(socket, sockaddr, socklen))
+ .and_then(|_| syscall!(listen(socket, 1024)))
+ .map_err(|err| {
+ // Close the socket if we hit an error, ignoring the error from
+ // closing since we can't pass back two errors.
+ let _ = unsafe { libc::close(socket) };
+ err
+ })
+ .map(|_| unsafe { net::UnixListener::from_raw_fd(socket) })
+}
+
+pub(crate) fn accept(listener: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
+ let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
+
+ // This is safe to assume because a `libc::sockaddr_un` filled with `0`
+ // bytes is properly initialized.
+ //
+ // `0` is a valid value for `sockaddr_un::sun_family`; it is
+ // `libc::AF_UNSPEC`.
+ //
+ // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
+ // abstract path.
+ let mut sockaddr = unsafe { sockaddr.assume_init() };
+
+ sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+ let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t;
+
+ #[cfg(not(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ // Android x86's seccomp profile forbids calls to `accept4(2)`
+ // See https://github.com/tokio-rs/mio/issues/1445 for details
+ all(
+ target_arch = "x86",
+ target_os = "android"
+ )
+ )))]
+ let socket = {
+ let flags = libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+ syscall!(accept4(
+ listener.as_raw_fd(),
+ &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
+ &mut socklen,
+ flags
+ ))
+ .map(|socket| unsafe { net::UnixStream::from_raw_fd(socket) })
+ };
+
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ all(target_arch = "x86", target_os = "android")
+ ))]
+ let socket = syscall!(accept(
+ listener.as_raw_fd(),
+ &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
+ &mut socklen,
+ ))
+ .and_then(|socket| {
+ // Ensure the socket is closed if either of the `fcntl` calls
+ // error below.
+ let s = unsafe { net::UnixStream::from_raw_fd(socket) };
+ syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC))?;
+
+ // See https://github.com/tokio-rs/mio/issues/1450
+ #[cfg(all(target_arch = "x86", target_os = "android"))]
+ syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))?;
+
+ Ok(s)
+ });
+
+ socket
+ .map(UnixStream::from_std)
+ .map(|stream| (stream, SocketAddr::from_parts(sockaddr, socklen)))
+}
+
+pub(crate) fn local_addr(listener: &net::UnixListener) -> io::Result<SocketAddr> {
+ super::local_addr(listener.as_raw_fd())
+}
diff --git a/third_party/rust/mio/src/sys/unix/uds/mod.rs b/third_party/rust/mio/src/sys/unix/uds/mod.rs
new file mode 100644
index 0000000000..8e28a9573a
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/uds/mod.rs
@@ -0,0 +1,149 @@
+mod socketaddr;
+pub use self::socketaddr::SocketAddr;
+
+/// Get the `sun_path` field offset of `sockaddr_un` for the target OS.
+///
+/// On Linux, this funtion equates to the same value as
+/// `size_of::<sa_family_t>()`, but some other implementations include
+/// other fields before `sun_path`, so the expression more portably
+/// describes the size of the address structure.
+pub(in crate::sys) fn path_offset(sockaddr: &libc::sockaddr_un) -> usize {
+ let base = sockaddr as *const _ as usize;
+ let path = &sockaddr.sun_path as *const _ as usize;
+ path - base
+}
+
+cfg_os_poll! {
+ use std::cmp::Ordering;
+ use std::os::unix::ffi::OsStrExt;
+ use std::os::unix::io::{RawFd, FromRawFd};
+ use std::path::Path;
+ use std::{io, mem};
+
+ pub(crate) mod datagram;
+ pub(crate) mod listener;
+ pub(crate) mod stream;
+
+ pub(in crate::sys) fn socket_addr(path: &Path) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> {
+ let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
+
+ // This is safe to assume because a `libc::sockaddr_un` filled with `0`
+ // bytes is properly initialized.
+ //
+ // `0` is a valid value for `sockaddr_un::sun_family`; it is
+ // `libc::AF_UNSPEC`.
+ //
+ // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
+ // abstract path.
+ let mut sockaddr = unsafe { sockaddr.assume_init() };
+
+ sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+ let bytes = path.as_os_str().as_bytes();
+ match (bytes.get(0), bytes.len().cmp(&sockaddr.sun_path.len())) {
+ // Abstract paths don't need a null terminator
+ (Some(&0), Ordering::Greater) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "path must be no longer than libc::sockaddr_un.sun_path",
+ ));
+ }
+ (_, Ordering::Greater) | (_, Ordering::Equal) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "path must be shorter than libc::sockaddr_un.sun_path",
+ ));
+ }
+ _ => {}
+ }
+
+ for (dst, src) in sockaddr.sun_path.iter_mut().zip(bytes.iter()) {
+ *dst = *src as libc::c_char;
+ }
+
+ let offset = path_offset(&sockaddr);
+ let mut socklen = offset + bytes.len();
+
+ match bytes.get(0) {
+ // The struct has already been zeroes so the null byte for pathname
+ // addresses is already there.
+ Some(&0) | None => {}
+ Some(_) => socklen += 1,
+ }
+
+ Ok((sockaddr, socklen as libc::socklen_t))
+ }
+
+ fn pair<T>(flags: libc::c_int) -> io::Result<(T, T)>
+ where T: FromRawFd,
+ {
+ #[cfg(not(any(target_os = "ios", target_os = "macos")))]
+ let flags = flags | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+
+ let mut fds = [-1; 2];
+ syscall!(socketpair(libc::AF_UNIX, flags, 0, fds.as_mut_ptr()))?;
+ let pair = unsafe { (T::from_raw_fd(fds[0]), T::from_raw_fd(fds[1])) };
+
+ // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
+ //
+ // In order to set those flags, additional `fcntl` sys calls must be
+ // performed. If a `fnctl` fails after the sockets have been created,
+ // the file descriptors will leak. Creating `pair` above ensures that if
+ // there is an error, the file descriptors are closed.
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ {
+ syscall!(fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK))?;
+ syscall!(fcntl(fds[0], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ syscall!(fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK))?;
+ syscall!(fcntl(fds[1], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ }
+ Ok(pair)
+ }
+
+ // The following functions can't simply be replaced with a call to
+ // `net::UnixDatagram` because of our `SocketAddr` type.
+
+ fn local_addr(socket: RawFd) -> io::Result<SocketAddr> {
+ SocketAddr::new(|sockaddr, socklen| syscall!(getsockname(socket, sockaddr, socklen)))
+ }
+
+ fn peer_addr(socket: RawFd) -> io::Result<SocketAddr> {
+ SocketAddr::new(|sockaddr, socklen| syscall!(getpeername(socket, sockaddr, socklen)))
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use super::{path_offset, socket_addr};
+ use std::path::Path;
+ use std::str;
+
+ #[test]
+ fn pathname_address() {
+ const PATH: &str = "./foo/bar.txt";
+ const PATH_LEN: usize = 13;
+
+ // Pathname addresses do have a null terminator, so `socklen` is
+ // expected to be `PATH_LEN` + `offset` + 1.
+ let path = Path::new(PATH);
+ let (sockaddr, actual) = socket_addr(path).unwrap();
+ let offset = path_offset(&sockaddr);
+ let expected = PATH_LEN + offset + 1;
+ assert_eq!(expected as libc::socklen_t, actual)
+ }
+
+ #[test]
+ fn abstract_address() {
+ const PATH: &[u8] = &[0, 116, 111, 107, 105, 111];
+ const PATH_LEN: usize = 6;
+
+ // Abstract addresses do not have a null terminator, so `socklen` is
+ // expected to be `PATH_LEN` + `offset`.
+ let abstract_path = str::from_utf8(PATH).unwrap();
+ let path = Path::new(abstract_path);
+ let (sockaddr, actual) = socket_addr(path).unwrap();
+ let offset = path_offset(&sockaddr);
+ let expected = PATH_LEN + offset;
+ assert_eq!(expected as libc::socklen_t, actual)
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/unix/uds/socketaddr.rs b/third_party/rust/mio/src/sys/unix/uds/socketaddr.rs
new file mode 100644
index 0000000000..4c7c411618
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/uds/socketaddr.rs
@@ -0,0 +1,130 @@
+use super::path_offset;
+use std::ffi::OsStr;
+use std::os::unix::ffi::OsStrExt;
+use std::path::Path;
+use std::{ascii, fmt};
+
+/// An address associated with a `mio` specific Unix socket.
+///
+/// This is implemented instead of imported from [`net::SocketAddr`] because
+/// there is no way to create a [`net::SocketAddr`]. One must be returned by
+/// [`accept`], so this is returned instead.
+///
+/// [`net::SocketAddr`]: std::os::unix::net::SocketAddr
+/// [`accept`]: #method.accept
+pub struct SocketAddr {
+ sockaddr: libc::sockaddr_un,
+ socklen: libc::socklen_t,
+}
+
+struct AsciiEscaped<'a>(&'a [u8]);
+
+enum AddressKind<'a> {
+ Unnamed,
+ Pathname(&'a Path),
+ Abstract(&'a [u8]),
+}
+
+impl SocketAddr {
+ fn address(&self) -> AddressKind<'_> {
+ let offset = path_offset(&self.sockaddr);
+ // Don't underflow in `len` below.
+ if (self.socklen as usize) < offset {
+ return AddressKind::Unnamed;
+ }
+ let len = self.socklen as usize - offset;
+ let path = unsafe { &*(&self.sockaddr.sun_path as *const [libc::c_char] as *const [u8]) };
+
+ // macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses
+ if len == 0
+ || (cfg!(not(any(target_os = "linux", target_os = "android")))
+ && self.sockaddr.sun_path[0] == 0)
+ {
+ AddressKind::Unnamed
+ } else if self.sockaddr.sun_path[0] == 0 {
+ AddressKind::Abstract(&path[1..len])
+ } else {
+ AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref())
+ }
+ }
+}
+
+cfg_os_poll! {
+ use std::{io, mem};
+
+ impl SocketAddr {
+ pub(crate) fn new<F>(f: F) -> io::Result<SocketAddr>
+ where
+ F: FnOnce(*mut libc::sockaddr, &mut libc::socklen_t) -> io::Result<libc::c_int>,
+ {
+ let mut sockaddr = {
+ let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
+ unsafe { sockaddr.assume_init() }
+ };
+
+ let raw_sockaddr = &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr;
+ let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t;
+
+ f(raw_sockaddr, &mut socklen)?;
+ Ok(SocketAddr::from_parts(sockaddr, socklen))
+ }
+
+ pub(crate) fn from_parts(sockaddr: libc::sockaddr_un, socklen: libc::socklen_t) -> SocketAddr {
+ SocketAddr { sockaddr, socklen }
+ }
+
+ /// Returns `true` if the address is unnamed.
+ ///
+ /// Documentation reflected in [`SocketAddr`]
+ ///
+ /// [`SocketAddr`]: std::os::unix::net::SocketAddr
+ pub fn is_unnamed(&self) -> bool {
+ matches!(self.address(), AddressKind::Unnamed)
+ }
+
+ /// Returns the contents of this address if it is a `pathname` address.
+ ///
+ /// Documentation reflected in [`SocketAddr`]
+ ///
+ /// [`SocketAddr`]: std::os::unix::net::SocketAddr
+ pub fn as_pathname(&self) -> Option<&Path> {
+ if let AddressKind::Pathname(path) = self.address() {
+ Some(path)
+ } else {
+ None
+ }
+ }
+
+ /// Returns the contents of this address if it is an abstract namespace
+ /// without the leading null byte.
+ // Link to std::os::unix::net::SocketAddr pending
+ // https://github.com/rust-lang/rust/issues/85410.
+ pub fn as_abstract_namespace(&self) -> Option<&[u8]> {
+ if let AddressKind::Abstract(path) = self.address() {
+ Some(path)
+ } else {
+ None
+ }
+ }
+ }
+}
+
+impl fmt::Debug for SocketAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.address() {
+ AddressKind::Unnamed => write!(fmt, "(unnamed)"),
+ AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)),
+ AddressKind::Pathname(path) => write!(fmt, "{:?} (pathname)", path),
+ }
+ }
+}
+
+impl<'a> fmt::Display for AsciiEscaped<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "\"")?;
+ for byte in self.0.iter().cloned().flat_map(ascii::escape_default) {
+ write!(fmt, "{}", byte as char)?;
+ }
+ write!(fmt, "\"")
+ }
+}
diff --git a/third_party/rust/mio/src/sys/unix/uds/stream.rs b/third_party/rust/mio/src/sys/unix/uds/stream.rs
new file mode 100644
index 0000000000..149dd14e1d
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/uds/stream.rs
@@ -0,0 +1,39 @@
+use super::{socket_addr, SocketAddr};
+use crate::sys::unix::net::new_socket;
+
+use std::io;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::net;
+use std::path::Path;
+
+pub(crate) fn connect(path: &Path) -> io::Result<net::UnixStream> {
+ let socket = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let (sockaddr, socklen) = socket_addr(path)?;
+ let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr;
+
+ match syscall!(connect(socket, sockaddr, socklen)) {
+ Ok(_) => {}
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
+ Err(e) => {
+ // Close the socket if we hit an error, ignoring the error
+ // from closing since we can't pass back two errors.
+ let _ = unsafe { libc::close(socket) };
+
+ return Err(e);
+ }
+ }
+
+ Ok(unsafe { net::UnixStream::from_raw_fd(socket) })
+}
+
+pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
+ super::pair(libc::SOCK_STREAM)
+}
+
+pub(crate) fn local_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> {
+ super::local_addr(socket.as_raw_fd())
+}
+
+pub(crate) fn peer_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> {
+ super::peer_addr(socket.as_raw_fd())
+}
diff --git a/third_party/rust/mio/src/sys/unix/waker.rs b/third_party/rust/mio/src/sys/unix/waker.rs
new file mode 100644
index 0000000000..684fee981e
--- /dev/null
+++ b/third_party/rust/mio/src/sys/unix/waker.rs
@@ -0,0 +1,178 @@
+#[cfg(any(target_os = "linux", target_os = "android"))]
+mod eventfd {
+ use crate::sys::Selector;
+ use crate::{Interest, Token};
+
+ use std::fs::File;
+ use std::io::{self, Read, Write};
+ use std::os::unix::io::FromRawFd;
+
+ /// Waker backed by `eventfd`.
+ ///
+ /// `eventfd` is effectively an 64 bit counter. All writes must be of 8
+ /// bytes (64 bits) and are converted (native endian) into an 64 bit
+ /// unsigned integer and added to the count. Reads must also be 8 bytes and
+ /// reset the count to 0, returning the count.
+ #[derive(Debug)]
+ pub struct Waker {
+ fd: File,
+ }
+
+ impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ syscall!(eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK)).and_then(|fd| {
+ // Turn the file descriptor into a file first so we're ensured
+ // it's closed when dropped, e.g. when register below fails.
+ let file = unsafe { File::from_raw_fd(fd) };
+ selector
+ .register(fd, token, Interest::READABLE)
+ .map(|()| Waker { fd: file })
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ let buf: [u8; 8] = 1u64.to_ne_bytes();
+ match (&self.fd).write(&buf) {
+ Ok(_) => Ok(()),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
+ // Writing only blocks if the counter is going to overflow.
+ // So we'll reset the counter to 0 and wake it again.
+ self.reset()?;
+ self.wake()
+ }
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Reset the eventfd object, only need to call this if `wake` fails.
+ fn reset(&self) -> io::Result<()> {
+ let mut buf: [u8; 8] = 0u64.to_ne_bytes();
+ match (&self.fd).read(&mut buf) {
+ Ok(_) => Ok(()),
+ // If the `Waker` hasn't been awoken yet this will return a
+ // `WouldBlock` error which we can safely ignore.
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()),
+ Err(err) => Err(err),
+ }
+ }
+ }
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub use self::eventfd::Waker;
+
+#[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+mod kqueue {
+ use crate::sys::Selector;
+ use crate::Token;
+
+ use std::io;
+
+ /// Waker backed by kqueue user space notifications (`EVFILT_USER`).
+ ///
+ /// The implementation is fairly simple, first the kqueue must be setup to
+ /// receive waker events this done by calling `Selector.setup_waker`. Next
+ /// we need access to kqueue, thus we need to duplicate the file descriptor.
+ /// Now waking is as simple as adding an event to the kqueue.
+ #[derive(Debug)]
+ pub struct Waker {
+ selector: Selector,
+ token: Token,
+ }
+
+ impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ selector.try_clone().and_then(|selector| {
+ selector
+ .setup_waker(token)
+ .map(|()| Waker { selector, token })
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ self.selector.wake(self.token)
+ }
+ }
+}
+
+#[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+pub use self::kqueue::Waker;
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "illumos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+))]
+mod pipe {
+ use crate::sys::unix::Selector;
+ use crate::{Interest, Token};
+
+ use std::fs::File;
+ use std::io::{self, Read, Write};
+ use std::os::unix::io::FromRawFd;
+
+ /// Waker backed by a unix pipe.
+ ///
+ /// Waker controls both the sending and receiving ends and empties the pipe
+ /// if writing to it (waking) fails.
+ #[derive(Debug)]
+ pub struct Waker {
+ sender: File,
+ receiver: File,
+ }
+
+ impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ let mut fds = [-1; 2];
+ syscall!(pipe2(fds.as_mut_ptr(), libc::O_NONBLOCK | libc::O_CLOEXEC))?;
+ // Turn the file descriptors into files first so we're ensured
+ // they're closed when dropped, e.g. when register below fails.
+ let sender = unsafe { File::from_raw_fd(fds[1]) };
+ let receiver = unsafe { File::from_raw_fd(fds[0]) };
+ selector
+ .register(fds[0], token, Interest::READABLE)
+ .map(|()| Waker { sender, receiver })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ // The epoll emulation on some illumos systems currently requires
+ // the pipe buffer to be completely empty for an edge-triggered
+ // wakeup on the pipe read side.
+ #[cfg(target_os = "illumos")]
+ self.empty();
+
+ match (&self.sender).write(&[1]) {
+ Ok(_) => Ok(()),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
+ // The reading end is full so we'll empty the buffer and try
+ // again.
+ self.empty();
+ self.wake()
+ }
+ Err(ref err) if err.kind() == io::ErrorKind::Interrupted => self.wake(),
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Empty the pipe's buffer, only need to call this if `wake` fails.
+ /// This ignores any errors.
+ fn empty(&self) {
+ let mut buf = [0; 4096];
+ loop {
+ match (&self.receiver).read(&mut buf) {
+ Ok(n) if n > 0 => continue,
+ _ => return,
+ }
+ }
+ }
+ }
+}
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "illumos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+))]
+pub use self::pipe::Waker;
diff --git a/third_party/rust/mio/src/sys/windows/afd.rs b/third_party/rust/mio/src/sys/windows/afd.rs
new file mode 100644
index 0000000000..6eae3bc035
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/afd.rs
@@ -0,0 +1,237 @@
+use ntapi::ntioapi::{IO_STATUS_BLOCK_u, IO_STATUS_BLOCK};
+use ntapi::ntioapi::{NtCancelIoFileEx, NtDeviceIoControlFile};
+use ntapi::ntrtl::RtlNtStatusToDosError;
+use std::fmt;
+use std::fs::File;
+use std::io;
+use std::mem::size_of;
+use std::os::windows::io::AsRawHandle;
+use std::ptr::null_mut;
+use winapi::shared::ntdef::{HANDLE, LARGE_INTEGER, NTSTATUS, PVOID, ULONG};
+use winapi::shared::ntstatus::{STATUS_NOT_FOUND, STATUS_PENDING, STATUS_SUCCESS};
+
+const IOCTL_AFD_POLL: ULONG = 0x00012024;
+
+/// Winsock2 AFD driver instance.
+///
+/// All operations are unsafe due to IO_STATUS_BLOCK parameter are being used by Afd driver during STATUS_PENDING before I/O Completion Port returns its result.
+#[derive(Debug)]
+pub struct Afd {
+ fd: File,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct AfdPollHandleInfo {
+ pub handle: HANDLE,
+ pub events: ULONG,
+ pub status: NTSTATUS,
+}
+
+unsafe impl Send for AfdPollHandleInfo {}
+
+#[repr(C)]
+pub struct AfdPollInfo {
+ pub timeout: LARGE_INTEGER,
+ // Can have only value 1.
+ pub number_of_handles: ULONG,
+ pub exclusive: ULONG,
+ pub handles: [AfdPollHandleInfo; 1],
+}
+
+impl fmt::Debug for AfdPollInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AfdPollInfo").finish()
+ }
+}
+
+impl Afd {
+ /// Poll `Afd` instance with `AfdPollInfo`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// `iosb` needs to be untouched after the call while operation is in effective at ALL TIME except for `cancel` method.
+ /// So be careful not to `poll` twice while polling.
+ /// User should deallocate there overlapped value when error to prevent memory leak.
+ pub unsafe fn poll(
+ &self,
+ info: &mut AfdPollInfo,
+ iosb: *mut IO_STATUS_BLOCK,
+ overlapped: PVOID,
+ ) -> io::Result<bool> {
+ let info_ptr: PVOID = info as *mut _ as PVOID;
+ (*iosb).u.Status = STATUS_PENDING;
+ let status = NtDeviceIoControlFile(
+ self.fd.as_raw_handle(),
+ null_mut(),
+ None,
+ overlapped,
+ iosb,
+ IOCTL_AFD_POLL,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ );
+ match status {
+ STATUS_SUCCESS => Ok(true),
+ STATUS_PENDING => Ok(false),
+ _ => Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ )),
+ }
+ }
+
+ /// Cancel previous polled request of `Afd`.
+ ///
+ /// iosb needs to be used by `poll` first for valid `cancel`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// Use it only with request is still being polled so that you have valid `IO_STATUS_BLOCK` to use.
+ /// User should NOT deallocate there overlapped value after the `cancel` to prevent double free.
+ pub unsafe fn cancel(&self, iosb: *mut IO_STATUS_BLOCK) -> io::Result<()> {
+ if (*iosb).u.Status != STATUS_PENDING {
+ return Ok(());
+ }
+
+ let mut cancel_iosb = IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ };
+ let status = NtCancelIoFileEx(self.fd.as_raw_handle(), iosb, &mut cancel_iosb);
+ if status == STATUS_SUCCESS || status == STATUS_NOT_FOUND {
+ return Ok(());
+ }
+ Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ ))
+ }
+}
+
+cfg_io_source! {
+ use std::mem::zeroed;
+ use std::os::windows::io::{FromRawHandle, RawHandle};
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ use miow::iocp::CompletionPort;
+ use ntapi::ntioapi::{NtCreateFile, FILE_OPEN};
+ use winapi::shared::ntdef::{OBJECT_ATTRIBUTES, UNICODE_STRING, USHORT, WCHAR};
+ use winapi::um::handleapi::INVALID_HANDLE_VALUE;
+ use winapi::um::winbase::{SetFileCompletionNotificationModes, FILE_SKIP_SET_EVENT_ON_HANDLE};
+ use winapi::um::winnt::{SYNCHRONIZE, FILE_SHARE_READ, FILE_SHARE_WRITE};
+
+ const AFD_HELPER_ATTRIBUTES: OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES {
+ Length: size_of::<OBJECT_ATTRIBUTES>() as ULONG,
+ RootDirectory: null_mut(),
+ ObjectName: &AFD_OBJ_NAME as *const _ as *mut _,
+ Attributes: 0,
+ SecurityDescriptor: null_mut(),
+ SecurityQualityOfService: null_mut(),
+ };
+
+ const AFD_OBJ_NAME: UNICODE_STRING = UNICODE_STRING {
+ Length: (AFD_HELPER_NAME.len() * size_of::<WCHAR>()) as USHORT,
+ MaximumLength: (AFD_HELPER_NAME.len() * size_of::<WCHAR>()) as USHORT,
+ Buffer: AFD_HELPER_NAME.as_ptr() as *mut _,
+ };
+
+ const AFD_HELPER_NAME: &[WCHAR] = &[
+ '\\' as _,
+ 'D' as _,
+ 'e' as _,
+ 'v' as _,
+ 'i' as _,
+ 'c' as _,
+ 'e' as _,
+ '\\' as _,
+ 'A' as _,
+ 'f' as _,
+ 'd' as _,
+ '\\' as _,
+ 'M' as _,
+ 'i' as _,
+ 'o' as _
+ ];
+
+ static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(0);
+
+ impl AfdPollInfo {
+ pub fn zeroed() -> AfdPollInfo {
+ unsafe { zeroed() }
+ }
+ }
+
+ impl Afd {
+ /// Create new Afd instance.
+ pub fn new(cp: &CompletionPort) -> io::Result<Afd> {
+ let mut afd_helper_handle: HANDLE = INVALID_HANDLE_VALUE;
+ let mut iosb = IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ };
+
+ unsafe {
+ let status = NtCreateFile(
+ &mut afd_helper_handle as *mut _,
+ SYNCHRONIZE,
+ &AFD_HELPER_ATTRIBUTES as *const _ as *mut _,
+ &mut iosb,
+ null_mut(),
+ 0,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ FILE_OPEN,
+ 0,
+ null_mut(),
+ 0,
+ );
+ if status != STATUS_SUCCESS {
+ let raw_err = io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ );
+ let msg = format!("Failed to open \\Device\\Afd\\Mio: {}", raw_err);
+ return Err(io::Error::new(raw_err.kind(), msg));
+ }
+ let fd = File::from_raw_handle(afd_helper_handle as RawHandle);
+ // Increment by 2 to reserve space for other types of handles.
+ // Non-AFD types (currently only NamedPipe), use odd numbered
+ // tokens. This allows the selector to differentate between them
+ // and dispatch events accordingly.
+ let token = NEXT_TOKEN.fetch_add(2, Ordering::Relaxed) + 2;
+ let afd = Afd { fd };
+ cp.add_handle(token, &afd.fd)?;
+ match SetFileCompletionNotificationModes(
+ afd_helper_handle,
+ FILE_SKIP_SET_EVENT_ON_HANDLE,
+ ) {
+ 0 => Err(io::Error::last_os_error()),
+ _ => Ok(afd),
+ }
+ }
+ }
+ }
+}
+
+pub const POLL_RECEIVE: u32 = 0b0_0000_0001;
+pub const POLL_RECEIVE_EXPEDITED: u32 = 0b0_0000_0010;
+pub const POLL_SEND: u32 = 0b0_0000_0100;
+pub const POLL_DISCONNECT: u32 = 0b0_0000_1000;
+pub const POLL_ABORT: u32 = 0b0_0001_0000;
+pub const POLL_LOCAL_CLOSE: u32 = 0b0_0010_0000;
+// Not used as it indicated in each event where a connection is connected, not
+// just the first time a connection is established.
+// Also see https://github.com/piscisaureus/wepoll/commit/8b7b340610f88af3d83f40fb728e7b850b090ece.
+pub const POLL_CONNECT: u32 = 0b0_0100_0000;
+pub const POLL_ACCEPT: u32 = 0b0_1000_0000;
+pub const POLL_CONNECT_FAIL: u32 = 0b1_0000_0000;
+
+pub const KNOWN_EVENTS: u32 = POLL_RECEIVE
+ | POLL_RECEIVE_EXPEDITED
+ | POLL_SEND
+ | POLL_DISCONNECT
+ | POLL_ABORT
+ | POLL_LOCAL_CLOSE
+ | POLL_ACCEPT
+ | POLL_CONNECT_FAIL;
diff --git a/third_party/rust/mio/src/sys/windows/event.rs b/third_party/rust/mio/src/sys/windows/event.rs
new file mode 100644
index 0000000000..a49252a296
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/event.rs
@@ -0,0 +1,162 @@
+use std::fmt;
+
+use miow::iocp::CompletionStatus;
+
+use super::afd;
+use crate::Token;
+
+#[derive(Clone)]
+pub struct Event {
+ pub flags: u32,
+ pub data: u64,
+}
+
+pub fn token(event: &Event) -> Token {
+ Token(event.data as usize)
+}
+
+impl Event {
+ pub(super) fn new(token: Token) -> Event {
+ Event {
+ flags: 0,
+ data: usize::from(token) as u64,
+ }
+ }
+
+ pub(super) fn set_readable(&mut self) {
+ self.flags |= afd::POLL_RECEIVE
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn set_writable(&mut self) {
+ self.flags |= afd::POLL_SEND;
+ }
+
+ pub(super) fn from_completion_status(status: &CompletionStatus) -> Event {
+ Event {
+ flags: status.bytes_transferred(),
+ data: status.token() as u64,
+ }
+ }
+
+ pub(super) fn to_completion_status(&self) -> CompletionStatus {
+ CompletionStatus::new(self.flags, self.data as usize, std::ptr::null_mut())
+ }
+}
+
+pub(crate) const READABLE_FLAGS: u32 = afd::POLL_RECEIVE
+ | afd::POLL_DISCONNECT
+ | afd::POLL_ACCEPT
+ | afd::POLL_ABORT
+ | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITABLE_FLAGS: u32 = afd::POLL_SEND | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const ERROR_FLAGS: u32 = afd::POLL_CONNECT_FAIL;
+pub(crate) const READ_CLOSED_FLAGS: u32 =
+ afd::POLL_DISCONNECT | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITE_CLOSED_FLAGS: u32 = afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+
+pub fn is_readable(event: &Event) -> bool {
+ event.flags & READABLE_FLAGS != 0
+}
+
+pub fn is_writable(event: &Event) -> bool {
+ event.flags & WRITABLE_FLAGS != 0
+}
+
+pub fn is_error(event: &Event) -> bool {
+ event.flags & ERROR_FLAGS != 0
+}
+
+pub fn is_read_closed(event: &Event) -> bool {
+ event.flags & READ_CLOSED_FLAGS != 0
+}
+
+pub fn is_write_closed(event: &Event) -> bool {
+ event.flags & WRITE_CLOSED_FLAGS != 0
+}
+
+pub fn is_priority(event: &Event) -> bool {
+ event.flags & afd::POLL_RECEIVE_EXPEDITED != 0
+}
+
+pub fn is_aio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flags(got: &u32, want: &u32) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FlagsDetails(u32),
+ check_flags,
+ afd::POLL_RECEIVE,
+ afd::POLL_RECEIVE_EXPEDITED,
+ afd::POLL_SEND,
+ afd::POLL_DISCONNECT,
+ afd::POLL_ABORT,
+ afd::POLL_LOCAL_CLOSE,
+ afd::POLL_CONNECT,
+ afd::POLL_ACCEPT,
+ afd::POLL_CONNECT_FAIL,
+ );
+
+ f.debug_struct("event")
+ .field("flags", &FlagsDetails(event.flags))
+ .field("data", &event.data)
+ .finish()
+}
+
+pub struct Events {
+ /// Raw I/O event completions are filled in here by the call to `get_many`
+ /// on the completion port above. These are then processed to run callbacks
+ /// which figure out what to do after the event is done.
+ pub statuses: Box<[CompletionStatus]>,
+
+ /// Literal events returned by `get` to the upwards `EventLoop`. This file
+ /// doesn't really modify this (except for the waker), instead almost all
+ /// events are filled in by the `ReadinessQueue` from the `poll` module.
+ pub events: Vec<Event>,
+}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ // Note that it's possible for the output `events` to grow beyond the
+ // capacity as it can also include deferred events, but that's certainly
+ // not the end of the world!
+ Events {
+ statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(),
+ events: Vec::with_capacity(cap),
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<&Event> {
+ self.events.get(idx)
+ }
+
+ pub fn clear(&mut self) {
+ self.events.clear();
+ for status in self.statuses.iter_mut() {
+ *status = CompletionStatus::zero();
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/io_status_block.rs b/third_party/rust/mio/src/sys/windows/io_status_block.rs
new file mode 100644
index 0000000000..3e60334961
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/io_status_block.rs
@@ -0,0 +1,40 @@
+use std::fmt;
+use std::ops::{Deref, DerefMut};
+
+use ntapi::ntioapi::IO_STATUS_BLOCK;
+
+pub struct IoStatusBlock(IO_STATUS_BLOCK);
+
+cfg_io_source! {
+ use ntapi::ntioapi::IO_STATUS_BLOCK_u;
+
+ impl IoStatusBlock {
+ pub fn zeroed() -> Self {
+ Self(IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ })
+ }
+ }
+}
+
+unsafe impl Send for IoStatusBlock {}
+
+impl Deref for IoStatusBlock {
+ type Target = IO_STATUS_BLOCK;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for IoStatusBlock {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl fmt::Debug for IoStatusBlock {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IoStatusBlock").finish()
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/mod.rs b/third_party/rust/mio/src/sys/windows/mod.rs
new file mode 100644
index 0000000000..7048351355
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/mod.rs
@@ -0,0 +1,147 @@
+mod afd;
+mod io_status_block;
+
+pub mod event;
+pub use event::{Event, Events};
+
+mod selector;
+pub use selector::{Selector, SelectorInner, SockState};
+
+mod overlapped;
+use overlapped::Overlapped;
+
+// Macros must be defined before the modules that use them
+cfg_net! {
+ /// Helper macro to execute a system call that returns an `io::Result`.
+ //
+ // Macro must be defined before any modules that uses them.
+ macro_rules! syscall {
+ ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{
+ let res = unsafe { $fn($($arg, )*) };
+ if $err_test(&res, &$err_value) {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(res)
+ }
+ }};
+ }
+
+ mod net;
+
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+}
+
+cfg_os_ext! {
+ pub(crate) mod named_pipe;
+}
+
+mod waker;
+pub(crate) use waker::Waker;
+
+cfg_io_source! {
+ use std::io;
+ use std::os::windows::io::RawSocket;
+ use std::pin::Pin;
+ use std::sync::{Arc, Mutex};
+
+ use crate::{Interest, Registry, Token};
+
+ struct InternalState {
+ selector: Arc<SelectorInner>,
+ token: Token,
+ interests: Interest,
+ sock_state: Pin<Arc<Mutex<SockState>>>,
+ }
+
+ impl Drop for InternalState {
+ fn drop(&mut self) {
+ let mut sock_state = self.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ }
+
+ pub struct IoSourceState {
+ // This is `None` if the socket has not yet been registered.
+ //
+ // We box the internal state to not increase the size on the stack as the
+ // type might move around a lot.
+ inner: Option<Box<InternalState>>,
+ }
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState { inner: None }
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ let result = f(io);
+ if let Err(ref e) = result {
+ if e.kind() == io::ErrorKind::WouldBlock {
+ self.inner.as_ref().map_or(Ok(()), |state| {
+ state
+ .selector
+ .reregister(state.sock_state.clone(), state.token, state.interests)
+ })?;
+ }
+ }
+ result
+ }
+
+ pub fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ socket: RawSocket,
+ ) -> io::Result<()> {
+ if self.inner.is_some() {
+ Err(io::ErrorKind::AlreadyExists.into())
+ } else {
+ registry
+ .selector()
+ .register(socket, token, interests)
+ .map(|state| {
+ self.inner = Some(Box::new(state));
+ })
+ }
+ }
+
+ pub fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ registry
+ .selector()
+ .reregister(state.sock_state.clone(), token, interests)
+ .map(|()| {
+ state.token = token;
+ state.interests = interests;
+ })
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+
+ pub fn deregister(&mut self) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ {
+ let mut sock_state = state.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ self.inner = None;
+ Ok(())
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/named_pipe.rs b/third_party/rust/mio/src/sys/windows/named_pipe.rs
new file mode 100644
index 0000000000..adda51f23c
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/named_pipe.rs
@@ -0,0 +1,782 @@
+use std::ffi::OsStr;
+use std::io::{self, Read, Write};
+use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle};
+use std::sync::atomic::Ordering::{Relaxed, SeqCst};
+use std::sync::atomic::{AtomicBool, AtomicUsize};
+use std::sync::{Arc, Mutex};
+use std::{fmt, mem, slice};
+
+use miow::iocp::{CompletionPort, CompletionStatus};
+use miow::pipe;
+use winapi::shared::winerror::{ERROR_BROKEN_PIPE, ERROR_PIPE_LISTENING};
+use winapi::um::ioapiset::CancelIoEx;
+use winapi::um::minwinbase::{OVERLAPPED, OVERLAPPED_ENTRY};
+
+use crate::event::Source;
+use crate::sys::windows::{Event, Overlapped};
+use crate::Registry;
+use crate::{Interest, Token};
+
+/// Non-blocking windows named pipe.
+///
+/// This structure internally contains a `HANDLE` which represents the named
+/// pipe, and also maintains state associated with the mio event loop and active
+/// I/O operations that have been scheduled to translate IOCP to a readiness
+/// model.
+///
+/// Note, IOCP is a *completion* based model whereas mio is a *readiness* based
+/// model. To bridge this, `NamedPipe` performs internal buffering. Writes are
+/// written to an internal buffer and the buffer is submitted to IOCP. IOCP
+/// reads are submitted using internal buffers and `NamedPipe::read` reads from
+/// this internal buffer.
+///
+/// # Trait implementations
+///
+/// The `Read` and `Write` traits are implemented for `NamedPipe` and for
+/// `&NamedPipe`. This represents that a named pipe can be concurrently read and
+/// written to and also can be read and written to at all. Typically a named
+/// pipe needs to be connected to a client before it can be read or written,
+/// however.
+///
+/// Note that for I/O operations on a named pipe to succeed then the named pipe
+/// needs to be associated with an event loop. Until this happens all I/O
+/// operations will return a "would block" error.
+///
+/// # Managing connections
+///
+/// The `NamedPipe` type supports a `connect` method to connect to a client and
+/// a `disconnect` method to disconnect from that client. These two methods only
+/// work once a named pipe is associated with an event loop.
+///
+/// The `connect` method will succeed asynchronously and a completion can be
+/// detected once the object receives a writable notification.
+///
+/// # Named pipe clients
+///
+/// Currently to create a client of a named pipe server then you can use the
+/// `OpenOptions` type in the standard library to create a `File` that connects
+/// to a named pipe. Afterwards you can use the `into_raw_handle` method coupled
+/// with the `NamedPipe::from_raw_handle` method to convert that to a named pipe
+/// that can operate asynchronously. Don't forget to pass the
+/// `FILE_FLAG_OVERLAPPED` flag when opening the `File`.
+pub struct NamedPipe {
+ inner: Arc<Inner>,
+}
+
+/// # Notes
+///
+/// The memory layout of this structure must be fixed as the
+/// `ptr_from_*_overlapped` methods depend on it, see the `ptr_from` test.
+#[repr(C)]
+struct Inner {
+ // NOTE: careful modifying the order of these three fields, the `ptr_from_*`
+ // methods depend on the layout!
+ connect: Overlapped,
+ read: Overlapped,
+ write: Overlapped,
+ // END NOTE.
+ handle: pipe::NamedPipe,
+ connecting: AtomicBool,
+ io: Mutex<Io>,
+ pool: Mutex<BufferPool>,
+}
+
+impl Inner {
+ /// Converts a pointer to `Inner.connect` to a pointer to `Inner`.
+ ///
+ /// # Unsafety
+ ///
+ /// Caller must ensure `ptr` is pointing to `Inner.connect`.
+ unsafe fn ptr_from_conn_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `connect` is the first field, so the pointer are the same.
+ ptr.cast()
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.read`.
+ unsafe fn ptr_from_read_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `read` is after `connect: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(1) as *const Inner
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.write`.
+ unsafe fn ptr_from_write_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `read` is after `connect: Overlapped` and `read: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(2) as *const Inner
+ }
+}
+
+#[test]
+fn ptr_from() {
+ use std::mem::ManuallyDrop;
+ use std::ptr;
+
+ let pipe = unsafe { ManuallyDrop::new(NamedPipe::from_raw_handle(ptr::null_mut())) };
+ let inner: &Inner = &pipe.inner;
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_conn_overlapped(&inner.connect as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_conn_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_read_overlapped(&inner.read as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_read_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_write_overlapped(&inner.write as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_write_overlapped` incorrect"
+ );
+}
+
+struct Io {
+ // Uniquely identifies the selector associated with this named pipe
+ cp: Option<Arc<CompletionPort>>,
+ // Token used to identify events
+ token: Option<Token>,
+ read: State,
+ write: State,
+ connect_error: Option<io::Error>,
+}
+
+#[derive(Debug)]
+enum State {
+ None,
+ Pending(Vec<u8>, usize),
+ Ok(Vec<u8>, usize),
+ Err(io::Error),
+}
+
+// Odd tokens are for named pipes
+static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(1);
+
+fn would_block() -> io::Error {
+ io::ErrorKind::WouldBlock.into()
+}
+
+impl NamedPipe {
+ /// Creates a new named pipe at the specified `addr` given a "reasonable
+ /// set" of initial configuration options.
+ pub fn new<A: AsRef<OsStr>>(addr: A) -> io::Result<NamedPipe> {
+ let pipe = pipe::NamedPipe::new(addr)?;
+ // Safety: nothing actually unsafe about this. The trait fn includes
+ // `unsafe`.
+ Ok(unsafe { NamedPipe::from_raw_handle(pipe.into_raw_handle()) })
+ }
+
+ /// Attempts to call `ConnectNamedPipe`, if possible.
+ ///
+ /// This function will attempt to connect this pipe to a client in an
+ /// asynchronous fashion. If the function immediately establishes a
+ /// connection to a client then `Ok(())` is returned. Otherwise if a
+ /// connection attempt was issued and is now in progress then a "would
+ /// block" error is returned.
+ ///
+ /// When the connection is finished then this object will be flagged as
+ /// being ready for a write, or otherwise in the writable state.
+ ///
+ /// # Errors
+ ///
+ /// This function will return a "would block" error if the pipe has not yet
+ /// been registered with an event loop, if the connection operation has
+ /// previously been issued but has not yet completed, or if the connect
+ /// itself was issued and didn't finish immediately.
+ ///
+ /// Normal I/O errors from the call to `ConnectNamedPipe` are returned
+ /// immediately.
+ pub fn connect(&self) -> io::Result<()> {
+ // "Acquire the connecting lock" or otherwise just make sure we're the
+ // only operation that's using the `connect` overlapped instance.
+ if self.inner.connecting.swap(true, SeqCst) {
+ return Err(would_block());
+ }
+
+ // Now that we've flagged ourselves in the connecting state, issue the
+ // connection attempt. Afterwards interpret the return value and set
+ // internal state accordingly.
+ let res = unsafe {
+ let overlapped = self.inner.connect.as_ptr() as *mut _;
+ self.inner.handle.connect_overlapped(overlapped)
+ };
+
+ match res {
+ // The connection operation finished immediately, so let's schedule
+ // reads/writes and such.
+ Ok(true) => {
+ self.inner.connecting.store(false, SeqCst);
+ Inner::post_register(&self.inner, None);
+ Ok(())
+ }
+
+ // If the overlapped operation was successful and didn't finish
+ // immediately then we forget a copy of the arc we hold
+ // internally. This ensures that when the completion status comes
+ // in for the I/O operation finishing it'll have a reference
+ // associated with it and our data will still be valid. The
+ // `connect_done` function will "reify" this forgotten pointer to
+ // drop the refcount on the other side.
+ Ok(false) => {
+ mem::forget(self.inner.clone());
+ Err(would_block())
+ }
+
+ Err(e) => {
+ self.inner.connecting.store(false, SeqCst);
+ Err(e)
+ }
+ }
+ }
+
+ /// Takes any internal error that has happened after the last I/O operation
+ /// which hasn't been retrieved yet.
+ ///
+ /// This is particularly useful when detecting failed attempts to `connect`.
+ /// After a completed `connect` flags this pipe as writable then callers
+ /// must invoke this method to determine whether the connection actually
+ /// succeeded. If this function returns `None` then a client is connected,
+ /// otherwise it returns an error of what happened and a client shouldn't be
+ /// connected.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ Ok(self.inner.io.lock().unwrap().connect_error.take())
+ }
+
+ /// Disconnects this named pipe from a connected client.
+ ///
+ /// This function will disconnect the pipe from a connected client, if any,
+ /// transitively calling the `DisconnectNamedPipe` function.
+ ///
+ /// After a `disconnect` is issued, then a `connect` may be called again to
+ /// connect to another client.
+ pub fn disconnect(&self) -> io::Result<()> {
+ self.inner.handle.disconnect()
+ }
+}
+
+impl FromRawHandle for NamedPipe {
+ unsafe fn from_raw_handle(handle: RawHandle) -> NamedPipe {
+ NamedPipe {
+ inner: Arc::new(Inner {
+ // Safety: not really unsafe
+ handle: pipe::NamedPipe::from_raw_handle(handle),
+ // transmutes to straddle winapi versions (mio 0.6 is on an
+ // older winapi)
+ connect: Overlapped::new(connect_done),
+ connecting: AtomicBool::new(false),
+ read: Overlapped::new(read_done),
+ write: Overlapped::new(write_done),
+ io: Mutex::new(Io {
+ cp: None,
+ token: None,
+ read: State::None,
+ write: State::None,
+ connect_error: None,
+ }),
+ pool: Mutex::new(BufferPool::with_capacity(2)),
+ }),
+ }
+ }
+}
+
+impl Read for NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ <&NamedPipe as Read>::read(&mut &*self, buf)
+ }
+}
+
+impl Write for NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ <&NamedPipe as Write>::write(&mut &*self, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ <&NamedPipe as Write>::flush(&mut &*self)
+ }
+}
+
+impl<'a> Read for &'a NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut state = self.inner.io.lock().unwrap();
+
+ if state.token.is_none() {
+ return Err(would_block());
+ }
+
+ match mem::replace(&mut state.read, State::None) {
+ // In theory not possible with `token` checked above,
+ // but return would block for now.
+ State::None => Err(would_block()),
+
+ // A read is in flight, still waiting for it to finish
+ State::Pending(buf, amt) => {
+ state.read = State::Pending(buf, amt);
+ Err(would_block())
+ }
+
+ // We previously read something into `data`, try to copy out some
+ // data. If we copy out all the data schedule a new read and
+ // otherwise store the buffer to get read later.
+ State::Ok(data, cur) => {
+ let n = {
+ let mut remaining = &data[cur..];
+ remaining.read(buf)?
+ };
+ let next = cur + n;
+ if next != data.len() {
+ state.read = State::Ok(data, next);
+ } else {
+ self.inner.put_buffer(data);
+ Inner::schedule_read(&self.inner, &mut state, None);
+ }
+ Ok(n)
+ }
+
+ // Looks like an in-flight read hit an error, return that here while
+ // we schedule a new one.
+ State::Err(e) => {
+ Inner::schedule_read(&self.inner, &mut state, None);
+ if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) {
+ Ok(0)
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Write for &'a NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // Make sure there's no writes pending
+ let mut io = self.inner.io.lock().unwrap();
+
+ if io.token.is_none() {
+ return Err(would_block());
+ }
+
+ match io.write {
+ State::None => {}
+ State::Err(_) => match mem::replace(&mut io.write, State::None) {
+ State::Err(e) => return Err(e),
+ // `io` is locked, so this branch is unreachable
+ _ => unreachable!(),
+ },
+ // any other state should be handled in `write_done`
+ _ => {
+ return Err(would_block());
+ }
+ }
+
+ // Move `buf` onto the heap and fire off the write
+ let mut owned_buf = self.inner.get_buffer();
+ owned_buf.extend(buf);
+ match Inner::maybe_schedule_write(&self.inner, owned_buf, 0, &mut io)? {
+ // Some bytes are written immediately
+ Some(n) => Ok(n),
+ // Write operation is anqueued for whole buffer
+ None => Ok(buf.len()),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Source for NamedPipe {
+ fn register(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, false)?;
+
+ if io.token.is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a `Registry`",
+ ));
+ }
+
+ if io.cp.is_none() {
+ let selector = registry.selector();
+
+ io.cp = Some(selector.clone_port());
+
+ let inner_token = NEXT_TOKEN.fetch_add(2, Relaxed) + 2;
+ selector
+ .inner
+ .cp
+ .add_handle(inner_token, &self.inner.handle)?;
+ }
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn reregister(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ if io.token.is_none() {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ));
+ }
+
+ io.token = None;
+ Ok(())
+ }
+}
+
+impl AsRawHandle for NamedPipe {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.inner.handle.as_raw_handle()
+ }
+}
+
+impl fmt::Debug for NamedPipe {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.handle.fmt(f)
+ }
+}
+
+impl Drop for NamedPipe {
+ fn drop(&mut self) {
+ // Cancel pending reads/connects, but don't cancel writes to ensure that
+ // everything is flushed out.
+ unsafe {
+ if self.inner.connecting.load(SeqCst) {
+ drop(cancel(&self.inner.handle, &self.inner.connect));
+ }
+
+ let io = self.inner.io.lock().unwrap();
+ if let State::Pending(..) = io.read {
+ drop(cancel(&self.inner.handle, &self.inner.read));
+ }
+ }
+ }
+}
+
+impl Inner {
+ /// Schedules a read to happen in the background, executing an overlapped
+ /// operation.
+ ///
+ /// This function returns `true` if a normal error happens or if the read
+ /// is scheduled in the background. If the pipe is no longer connected
+ /// (ERROR_PIPE_LISTENING) then `false` is returned and no read is
+ /// scheduled.
+ fn schedule_read(me: &Arc<Inner>, io: &mut Io, events: Option<&mut Vec<Event>>) -> bool {
+ // Check to see if a read is already scheduled/completed
+ match io.read {
+ State::None => {}
+ _ => return true,
+ }
+
+ // Allocate a buffer and schedule the read.
+ let mut buf = me.get_buffer();
+ let e = unsafe {
+ let overlapped = me.read.as_ptr() as *mut _;
+ let slice = slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.capacity());
+ me.handle.read_overlapped(slice, overlapped)
+ };
+
+ match e {
+ // See `NamedPipe::connect` above for the rationale behind `forget`
+ Ok(_) => {
+ io.read = State::Pending(buf, 0); // 0 is ignored on read side
+ mem::forget(me.clone());
+ true
+ }
+
+ // If ERROR_PIPE_LISTENING happens then it's not a real read error,
+ // we just need to wait for a connect.
+ Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_LISTENING as i32) => false,
+
+ // If some other error happened, though, we're now readable to give
+ // out the error.
+ Err(e) => {
+ io.read = State::Err(e);
+ io.notify_readable(events);
+ true
+ }
+ }
+ }
+
+ /// Maybe schedules overlapped write operation.
+ ///
+ /// * `None` means that overlapped operation was enqueued
+ /// * `Some(n)` means that `n` bytes was immediately written.
+ /// Note, that `write_done` will fire anyway to clean up the state.
+ fn maybe_schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ ) -> io::Result<Option<usize>> {
+ // Very similar to `schedule_read` above, just done for the write half.
+ let e = unsafe {
+ let overlapped = me.write.as_ptr() as *mut _;
+ me.handle.write_overlapped(&buf[pos..], overlapped)
+ };
+
+ // See `connect` above for the rationale behind `forget`
+ match e {
+ // `n` bytes are written immediately
+ Ok(Some(n)) => {
+ io.write = State::Ok(buf, pos);
+ mem::forget(me.clone());
+ Ok(Some(n))
+ }
+ // write operation is enqueued
+ Ok(None) => {
+ io.write = State::Pending(buf, pos);
+ mem::forget(me.clone());
+ Ok(None)
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ fn schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ events: Option<&mut Vec<Event>>,
+ ) {
+ match Inner::maybe_schedule_write(me, buf, pos, io) {
+ Ok(Some(_)) => {
+ // immediate result will be handled in `write_done`,
+ // so we'll reinterpret the `Ok` state
+ let state = mem::replace(&mut io.write, State::None);
+ io.write = match state {
+ State::Ok(buf, pos) => State::Pending(buf, pos),
+ // io is locked, so this branch is unreachable
+ _ => unreachable!(),
+ };
+ mem::forget(me.clone());
+ }
+ Ok(None) => (),
+ Err(e) => {
+ io.write = State::Err(e);
+ io.notify_writable(events);
+ }
+ }
+ }
+
+ fn post_register(me: &Arc<Inner>, mut events: Option<&mut Vec<Event>>) {
+ let mut io = me.io.lock().unwrap();
+ #[allow(clippy::needless_option_as_deref)]
+ if Inner::schedule_read(me, &mut io, events.as_deref_mut()) {
+ if let State::None = io.write {
+ io.notify_writable(events);
+ }
+ }
+ }
+
+ fn get_buffer(&self) -> Vec<u8> {
+ self.pool.lock().unwrap().get(4 * 1024)
+ }
+
+ fn put_buffer(&self, buf: Vec<u8>) {
+ self.pool.lock().unwrap().put(buf)
+ }
+}
+
+unsafe fn cancel<T: AsRawHandle>(handle: &T, overlapped: &Overlapped) -> io::Result<()> {
+ let ret = CancelIoEx(handle.as_raw_handle(), overlapped.as_ptr() as *mut _);
+ // `CancelIoEx` returns 0 on error:
+ // https://docs.microsoft.com/en-us/windows/win32/fileio/cancelioex-func
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+fn connect_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `connect` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_conn_overlapped(status.overlapped())) };
+
+ // Flag ourselves as no longer using the `connect` overlapped instances.
+ let prev = me.connecting.swap(false, SeqCst);
+ assert!(prev, "NamedPipe was not previously connecting");
+
+ // Stash away our connect error if one happened
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => debug_assert_eq!(n, 0),
+ Err(e) => me.io.lock().unwrap().connect_error = Some(e),
+ }
+ }
+
+ // We essentially just finished a registration, so kick off a
+ // read and register write readiness.
+ Inner::post_register(&me, events);
+}
+
+fn read_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_read` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_read_overlapped(status.overlapped())) };
+
+ // Move from the `Pending` to `Ok` state.
+ let mut io = me.io.lock().unwrap();
+ let mut buf = match mem::replace(&mut io.read, State::None) {
+ State::Pending(buf, _) => buf,
+ _ => unreachable!(),
+ };
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ buf.set_len(status.bytes_transferred() as usize);
+ io.read = State::Ok(buf, 0);
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.read = State::Err(e);
+ }
+ }
+ }
+
+ // Flag our readiness that we've got data.
+ io.notify_readable(events);
+}
+
+fn write_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_write` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_write_overlapped(status.overlapped())) };
+
+ // Make the state change out of `Pending`. If we wrote the entire buffer
+ // then we're writable again and otherwise we schedule another write.
+ let mut io = me.io.lock().unwrap();
+ let (buf, pos) = match mem::replace(&mut io.write, State::None) {
+ // `Ok` here means, that the operation was completed immediately
+ // `bytes_transferred` is already reported to a client
+ State::Ok(..) => {
+ io.notify_writable(events);
+ return;
+ }
+ State::Pending(buf, pos) => (buf, pos),
+ _ => unreachable!(),
+ };
+
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ let new_pos = pos + (status.bytes_transferred() as usize);
+ if new_pos == buf.len() {
+ me.put_buffer(buf);
+ io.notify_writable(events);
+ } else {
+ Inner::schedule_write(&me, buf, new_pos, &mut io, events);
+ }
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.write = State::Err(e);
+ io.notify_writable(events);
+ }
+ }
+ }
+}
+
+impl Io {
+ fn check_association(&self, registry: &Registry, required: bool) -> io::Result<()> {
+ match self.cp {
+ Some(ref cp) if !registry.selector().same_port(cp) => Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a different `Registry`",
+ )),
+ None if required => Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ )),
+ _ => Ok(()),
+ }
+ }
+
+ fn notify_readable(&self, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_readable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status());
+ }
+ }
+ }
+
+ fn notify_writable(&self, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_writable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status());
+ }
+ }
+ }
+}
+
+struct BufferPool {
+ pool: Vec<Vec<u8>>,
+}
+
+impl BufferPool {
+ fn with_capacity(cap: usize) -> BufferPool {
+ BufferPool {
+ pool: Vec::with_capacity(cap),
+ }
+ }
+
+ fn get(&mut self, default_cap: usize) -> Vec<u8> {
+ self.pool
+ .pop()
+ .unwrap_or_else(|| Vec::with_capacity(default_cap))
+ }
+
+ fn put(&mut self, mut buf: Vec<u8>) {
+ if self.pool.len() < self.pool.capacity() {
+ unsafe {
+ buf.set_len(0);
+ }
+ self.pool.push(buf);
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/net.rs b/third_party/rust/mio/src/sys/windows/net.rs
new file mode 100644
index 0000000000..db1896f198
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/net.rs
@@ -0,0 +1,108 @@
+use std::io;
+use std::mem;
+use std::net::SocketAddr;
+use std::sync::Once;
+
+use winapi::ctypes::c_int;
+use winapi::shared::in6addr::{in6_addr_u, IN6_ADDR};
+use winapi::shared::inaddr::{in_addr_S_un, IN_ADDR};
+use winapi::shared::ws2def::{ADDRESS_FAMILY, AF_INET, AF_INET6, SOCKADDR, SOCKADDR_IN};
+use winapi::shared::ws2ipdef::{SOCKADDR_IN6_LH_u, SOCKADDR_IN6_LH};
+use winapi::um::winsock2::{ioctlsocket, socket, FIONBIO, INVALID_SOCKET, SOCKET};
+
+/// Initialise the network stack for Windows.
+pub(crate) fn init() {
+ static INIT: Once = Once::new();
+ INIT.call_once(|| {
+ // Let standard library call `WSAStartup` for us, we can't do it
+ // ourselves because otherwise using any type in `std::net` would panic
+ // when it tries to call `WSAStartup` a second time.
+ drop(std::net::UdpSocket::bind("127.0.0.1:0"));
+ });
+}
+
+/// Create a new non-blocking socket.
+pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: c_int) -> io::Result<SOCKET> {
+ use winapi::um::winsock2::{PF_INET, PF_INET6};
+
+ let domain = match addr {
+ SocketAddr::V4(..) => PF_INET,
+ SocketAddr::V6(..) => PF_INET6,
+ };
+
+ new_socket(domain, socket_type)
+}
+
+pub(crate) fn new_socket(domain: c_int, socket_type: c_int) -> io::Result<SOCKET> {
+ syscall!(
+ socket(domain, socket_type, 0),
+ PartialEq::eq,
+ INVALID_SOCKET
+ )
+ .and_then(|socket| {
+ syscall!(ioctlsocket(socket, FIONBIO, &mut 1), PartialEq::ne, 0).map(|_| socket as SOCKET)
+ })
+}
+
+/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: SOCKADDR_IN,
+ v6: SOCKADDR_IN6_LH,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const SOCKADDR {
+ self as *const _ as *const SOCKADDR
+ }
+}
+
+pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, c_int) {
+ match addr {
+ SocketAddr::V4(ref addr) => {
+ // `s_addr` is stored as BE on all machine and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ let sin_addr = unsafe {
+ let mut s_un = mem::zeroed::<in_addr_S_un>();
+ *s_un.S_addr_mut() = u32::from_ne_bytes(addr.ip().octets());
+ IN_ADDR { S_un: s_un }
+ };
+
+ let sockaddr_in = SOCKADDR_IN {
+ sin_family: AF_INET as ADDRESS_FAMILY,
+ sin_port: addr.port().to_be(),
+ sin_addr,
+ sin_zero: [0; 8],
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ (sockaddr, mem::size_of::<SOCKADDR_IN>() as c_int)
+ }
+ SocketAddr::V6(ref addr) => {
+ let sin6_addr = unsafe {
+ let mut u = mem::zeroed::<in6_addr_u>();
+ *u.Byte_mut() = addr.ip().octets();
+ IN6_ADDR { u }
+ };
+ let u = unsafe {
+ let mut u = mem::zeroed::<SOCKADDR_IN6_LH_u>();
+ *u.sin6_scope_id_mut() = addr.scope_id();
+ u
+ };
+
+ let sockaddr_in6 = SOCKADDR_IN6_LH {
+ sin6_family: AF_INET6 as ADDRESS_FAMILY,
+ sin6_port: addr.port().to_be(),
+ sin6_addr,
+ sin6_flowinfo: addr.flowinfo(),
+ u,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ (sockaddr, mem::size_of::<SOCKADDR_IN6_LH>() as c_int)
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/overlapped.rs b/third_party/rust/mio/src/sys/windows/overlapped.rs
new file mode 100644
index 0000000000..837b78b60a
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/overlapped.rs
@@ -0,0 +1,37 @@
+use crate::sys::windows::Event;
+
+use std::cell::UnsafeCell;
+use std::fmt;
+
+#[cfg(feature = "os-ext")]
+use winapi::um::minwinbase::OVERLAPPED;
+use winapi::um::minwinbase::OVERLAPPED_ENTRY;
+
+#[repr(C)]
+pub(crate) struct Overlapped {
+ inner: UnsafeCell<miow::Overlapped>,
+ pub(crate) callback: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>),
+}
+
+#[cfg(feature = "os-ext")]
+impl Overlapped {
+ pub(crate) fn new(cb: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>)) -> Overlapped {
+ Overlapped {
+ inner: UnsafeCell::new(miow::Overlapped::zero()),
+ callback: cb,
+ }
+ }
+
+ pub(crate) fn as_ptr(&self) -> *const OVERLAPPED {
+ unsafe { (*self.inner.get()).raw() }
+ }
+}
+
+impl fmt::Debug for Overlapped {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Overlapped").finish()
+ }
+}
+
+unsafe impl Send for Overlapped {}
+unsafe impl Sync for Overlapped {}
diff --git a/third_party/rust/mio/src/sys/windows/selector.rs b/third_party/rust/mio/src/sys/windows/selector.rs
new file mode 100644
index 0000000000..133fefe895
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/selector.rs
@@ -0,0 +1,748 @@
+use super::afd::{self, Afd, AfdPollInfo};
+use super::io_status_block::IoStatusBlock;
+use super::Event;
+use crate::sys::Events;
+
+cfg_net! {
+ use crate::sys::event::{
+ ERROR_FLAGS, READABLE_FLAGS, READ_CLOSED_FLAGS, WRITABLE_FLAGS, WRITE_CLOSED_FLAGS,
+ };
+ use crate::Interest;
+}
+
+use miow::iocp::{CompletionPort, CompletionStatus};
+use std::collections::VecDeque;
+use std::io;
+use std::marker::PhantomPinned;
+use std::os::windows::io::RawSocket;
+use std::pin::Pin;
+#[cfg(debug_assertions)]
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+use winapi::shared::ntdef::NT_SUCCESS;
+use winapi::shared::ntdef::{HANDLE, PVOID};
+use winapi::shared::ntstatus::STATUS_CANCELLED;
+use winapi::shared::winerror::{ERROR_INVALID_HANDLE, ERROR_IO_PENDING, WAIT_TIMEOUT};
+use winapi::um::minwinbase::OVERLAPPED;
+
+#[derive(Debug)]
+struct AfdGroup {
+ #[cfg_attr(not(feature = "net"), allow(dead_code))]
+ cp: Arc<CompletionPort>,
+ afd_group: Mutex<Vec<Arc<Afd>>>,
+}
+
+impl AfdGroup {
+ pub fn new(cp: Arc<CompletionPort>) -> AfdGroup {
+ AfdGroup {
+ afd_group: Mutex::new(Vec::new()),
+ cp,
+ }
+ }
+
+ pub fn release_unused_afd(&self) {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ afd_group.retain(|g| Arc::strong_count(g) > 1);
+ }
+}
+
+cfg_io_source! {
+ const POLL_GROUP__MAX_GROUP_SIZE: usize = 32;
+
+ impl AfdGroup {
+ pub fn acquire(&self) -> io::Result<Arc<Afd>> {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ if afd_group.len() == 0 {
+ self._alloc_afd_group(&mut afd_group)?;
+ } else {
+ // + 1 reference in Vec
+ if Arc::strong_count(afd_group.last().unwrap()) > POLL_GROUP__MAX_GROUP_SIZE {
+ self._alloc_afd_group(&mut afd_group)?;
+ }
+ }
+
+ match afd_group.last() {
+ Some(arc) => Ok(arc.clone()),
+ None => unreachable!(
+ "Cannot acquire afd, {:#?}, afd_group: {:#?}",
+ self, afd_group
+ ),
+ }
+ }
+
+ fn _alloc_afd_group(&self, afd_group: &mut Vec<Arc<Afd>>) -> io::Result<()> {
+ let afd = Afd::new(&self.cp)?;
+ let arc = Arc::new(afd);
+ afd_group.push(arc);
+ Ok(())
+ }
+ }
+}
+
+#[derive(Debug)]
+enum SockPollStatus {
+ Idle,
+ Pending,
+ Cancelled,
+}
+
+#[derive(Debug)]
+pub struct SockState {
+ iosb: IoStatusBlock,
+ poll_info: AfdPollInfo,
+ afd: Arc<Afd>,
+
+ base_socket: RawSocket,
+
+ user_evts: u32,
+ pending_evts: u32,
+
+ user_data: u64,
+
+ poll_status: SockPollStatus,
+ delete_pending: bool,
+
+ // last raw os error
+ error: Option<i32>,
+
+ _pinned: PhantomPinned,
+}
+
+impl SockState {
+ fn update(&mut self, self_arc: &Pin<Arc<Mutex<SockState>>>) -> io::Result<()> {
+ assert!(!self.delete_pending);
+
+ // make sure to reset previous error before a new update
+ self.error = None;
+
+ if let SockPollStatus::Pending = self.poll_status {
+ if (self.user_evts & afd::KNOWN_EVENTS & !self.pending_evts) == 0 {
+ /* All the events the user is interested in are already being monitored by
+ * the pending poll operation. It might spuriously complete because of an
+ * event that we're no longer interested in; when that happens we'll submit
+ * a new poll operation with the updated event mask. */
+ } else {
+ /* A poll operation is already pending, but it's not monitoring for all the
+ * events that the user is interested in. Therefore, cancel the pending
+ * poll operation; when we receive it's completion package, a new poll
+ * operation will be submitted with the correct event mask. */
+ if let Err(e) = self.cancel() {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ return Ok(());
+ }
+ } else if let SockPollStatus::Cancelled = self.poll_status {
+ /* The poll operation has already been cancelled, we're still waiting for
+ * it to return. For now, there's nothing that needs to be done. */
+ } else if let SockPollStatus::Idle = self.poll_status {
+ /* No poll operation is pending; start one. */
+ self.poll_info.exclusive = 0;
+ self.poll_info.number_of_handles = 1;
+ *unsafe { self.poll_info.timeout.QuadPart_mut() } = std::i64::MAX;
+ self.poll_info.handles[0].handle = self.base_socket as HANDLE;
+ self.poll_info.handles[0].status = 0;
+ self.poll_info.handles[0].events = self.user_evts | afd::POLL_LOCAL_CLOSE;
+
+ // Increase the ref count as the memory will be used by the kernel.
+ let overlapped_ptr = into_overlapped(self_arc.clone());
+
+ let result = unsafe {
+ self.afd
+ .poll(&mut self.poll_info, &mut *self.iosb, overlapped_ptr)
+ };
+ if let Err(e) = result {
+ let code = e.raw_os_error().unwrap();
+ if code == ERROR_IO_PENDING as i32 {
+ /* Overlapped poll operation in progress; this is expected. */
+ } else {
+ // Since the operation failed it means the kernel won't be
+ // using the memory any more.
+ drop(from_overlapped(overlapped_ptr as *mut _));
+ if code == ERROR_INVALID_HANDLE as i32 {
+ /* Socket closed; it'll be dropped. */
+ self.mark_delete();
+ return Ok(());
+ } else {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ }
+ }
+
+ self.poll_status = SockPollStatus::Pending;
+ self.pending_evts = self.user_evts;
+ } else {
+ unreachable!("Invalid poll status during update, {:#?}", self)
+ }
+
+ Ok(())
+ }
+
+ fn cancel(&mut self) -> io::Result<()> {
+ match self.poll_status {
+ SockPollStatus::Pending => {}
+ _ => unreachable!("Invalid poll status during cancel, {:#?}", self),
+ };
+ unsafe {
+ self.afd.cancel(&mut *self.iosb)?;
+ }
+ self.poll_status = SockPollStatus::Cancelled;
+ self.pending_evts = 0;
+ Ok(())
+ }
+
+ // This is the function called from the overlapped using as Arc<Mutex<SockState>>. Watch out for reference counting.
+ fn feed_event(&mut self) -> Option<Event> {
+ self.poll_status = SockPollStatus::Idle;
+ self.pending_evts = 0;
+
+ let mut afd_events = 0;
+ // We use the status info in IO_STATUS_BLOCK to determine the socket poll status. It is unsafe to use a pointer of IO_STATUS_BLOCK.
+ unsafe {
+ if self.delete_pending {
+ return None;
+ } else if self.iosb.u.Status == STATUS_CANCELLED {
+ /* The poll request was cancelled by CancelIoEx. */
+ } else if !NT_SUCCESS(self.iosb.u.Status) {
+ /* The overlapped request itself failed in an unexpected way. */
+ afd_events = afd::POLL_CONNECT_FAIL;
+ } else if self.poll_info.number_of_handles < 1 {
+ /* This poll operation succeeded but didn't report any socket events. */
+ } else if self.poll_info.handles[0].events & afd::POLL_LOCAL_CLOSE != 0 {
+ /* The poll operation reported that the socket was closed. */
+ self.mark_delete();
+ return None;
+ } else {
+ afd_events = self.poll_info.handles[0].events;
+ }
+ }
+
+ afd_events &= self.user_evts;
+
+ if afd_events == 0 {
+ return None;
+ }
+
+ // In mio, we have to simulate Edge-triggered behavior to match API usage.
+ // The strategy here is to intercept all read/write from user that could cause WouldBlock usage,
+ // then reregister the socket to reset the interests.
+ self.user_evts &= !afd_events;
+
+ Some(Event {
+ data: self.user_data,
+ flags: afd_events,
+ })
+ }
+
+ pub fn is_pending_deletion(&self) -> bool {
+ self.delete_pending
+ }
+
+ pub fn mark_delete(&mut self) {
+ if !self.delete_pending {
+ if let SockPollStatus::Pending = self.poll_status {
+ drop(self.cancel());
+ }
+
+ self.delete_pending = true;
+ }
+ }
+
+ fn has_error(&self) -> bool {
+ self.error.is_some()
+ }
+}
+
+cfg_io_source! {
+ impl SockState {
+ fn new(raw_socket: RawSocket, afd: Arc<Afd>) -> io::Result<SockState> {
+ Ok(SockState {
+ iosb: IoStatusBlock::zeroed(),
+ poll_info: AfdPollInfo::zeroed(),
+ afd,
+ base_socket: get_base_socket(raw_socket)?,
+ user_evts: 0,
+ pending_evts: 0,
+ user_data: 0,
+ poll_status: SockPollStatus::Idle,
+ delete_pending: false,
+ error: None,
+ _pinned: PhantomPinned,
+ })
+ }
+
+ /// True if need to be added on update queue, false otherwise.
+ fn set_event(&mut self, ev: Event) -> bool {
+ /* afd::POLL_CONNECT_FAIL and afd::POLL_ABORT are always reported, even when not requested by the caller. */
+ let events = ev.flags | afd::POLL_CONNECT_FAIL | afd::POLL_ABORT;
+
+ self.user_evts = events;
+ self.user_data = ev.data;
+
+ (events & !self.pending_evts) != 0
+ }
+ }
+}
+
+impl Drop for SockState {
+ fn drop(&mut self) {
+ self.mark_delete();
+ }
+}
+
+/// Converts the pointer to a `SockState` into a raw pointer.
+/// To revert see `from_overlapped`.
+fn into_overlapped(sock_state: Pin<Arc<Mutex<SockState>>>) -> PVOID {
+ let overlapped_ptr: *const Mutex<SockState> =
+ unsafe { Arc::into_raw(Pin::into_inner_unchecked(sock_state)) };
+ overlapped_ptr as *mut _
+}
+
+/// Convert a raw overlapped pointer into a reference to `SockState`.
+/// Reverts `into_overlapped`.
+fn from_overlapped(ptr: *mut OVERLAPPED) -> Pin<Arc<Mutex<SockState>>> {
+ let sock_ptr: *const Mutex<SockState> = ptr as *const _;
+ unsafe { Pin::new_unchecked(Arc::from_raw(sock_ptr)) }
+}
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
+
+/// Windows implementaion of `sys::Selector`
+///
+/// Edge-triggered event notification is simulated by resetting internal event flag of each socket state `SockState`
+/// and setting all events back by intercepting all requests that could cause `io::ErrorKind::WouldBlock` happening.
+///
+/// This selector is currently only support socket due to `Afd` driver is winsock2 specific.
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ pub(super) inner: Arc<SelectorInner>,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ SelectorInner::new().map(|inner| {
+ #[cfg(debug_assertions)]
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+ Selector {
+ #[cfg(debug_assertions)]
+ id,
+ inner: Arc::new(inner),
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(false),
+ }
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ Ok(Selector {
+ #[cfg(debug_assertions)]
+ id: self.id,
+ inner: Arc::clone(&self.inner),
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)),
+ })
+ }
+
+ /// # Safety
+ ///
+ /// This requires a mutable reference to self because only a single thread
+ /// can poll IOCP at a time.
+ pub fn select(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ self.inner.select(events, timeout)
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ self.has_waker.swap(true, Ordering::AcqRel)
+ }
+
+ pub(super) fn clone_port(&self) -> Arc<CompletionPort> {
+ self.inner.cp.clone()
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn same_port(&self, other: &Arc<CompletionPort>) -> bool {
+ Arc::ptr_eq(&self.inner.cp, other)
+ }
+}
+
+cfg_io_source! {
+ use super::InternalState;
+ use crate::Token;
+
+ impl Selector {
+ pub(super) fn register(
+ &self,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ SelectorInner::register(&self.inner, socket, token, interests)
+ }
+
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(state, token, interests)
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct SelectorInner {
+ pub(super) cp: Arc<CompletionPort>,
+ update_queue: Mutex<VecDeque<Pin<Arc<Mutex<SockState>>>>>,
+ afd_group: AfdGroup,
+ is_polling: AtomicBool,
+}
+
+// We have ensured thread safety by introducing lock manually.
+unsafe impl Sync for SelectorInner {}
+
+impl SelectorInner {
+ pub fn new() -> io::Result<SelectorInner> {
+ CompletionPort::new(0).map(|cp| {
+ let cp = Arc::new(cp);
+ let cp_afd = Arc::clone(&cp);
+
+ SelectorInner {
+ cp,
+ update_queue: Mutex::new(VecDeque::new()),
+ afd_group: AfdGroup::new(cp_afd),
+ is_polling: AtomicBool::new(false),
+ }
+ })
+ }
+
+ /// # Safety
+ ///
+ /// May only be calling via `Selector::select`.
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ events.clear();
+
+ if timeout.is_none() {
+ loop {
+ let len = self.select2(&mut events.statuses, &mut events.events, None)?;
+ if len == 0 {
+ continue;
+ }
+ break Ok(());
+ }
+ } else {
+ self.select2(&mut events.statuses, &mut events.events, timeout)?;
+ Ok(())
+ }
+ }
+
+ pub fn select2(
+ &self,
+ statuses: &mut [CompletionStatus],
+ events: &mut Vec<Event>,
+ timeout: Option<Duration>,
+ ) -> io::Result<usize> {
+ assert!(!self.is_polling.swap(true, Ordering::AcqRel));
+
+ unsafe { self.update_sockets_events() }?;
+
+ let result = self.cp.get_many(statuses, timeout);
+
+ self.is_polling.store(false, Ordering::Relaxed);
+
+ match result {
+ Ok(iocp_events) => Ok(unsafe { self.feed_events(events, iocp_events) }),
+ Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => Ok(0),
+ Err(e) => Err(e),
+ }
+ }
+
+ unsafe fn update_sockets_events(&self) -> io::Result<()> {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for sock in update_queue.iter_mut() {
+ let mut sock_internal = sock.lock().unwrap();
+ if !sock_internal.is_pending_deletion() {
+ sock_internal.update(sock)?;
+ }
+ }
+
+ // remove all sock which do not have error, they have afd op pending
+ update_queue.retain(|sock| sock.lock().unwrap().has_error());
+
+ self.afd_group.release_unused_afd();
+ Ok(())
+ }
+
+ // It returns processed count of iocp_events rather than the events itself.
+ unsafe fn feed_events(
+ &self,
+ events: &mut Vec<Event>,
+ iocp_events: &[CompletionStatus],
+ ) -> usize {
+ let mut n = 0;
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ events.push(Event::from_completion_status(iocp_event));
+ n += 1;
+ continue;
+ } else if iocp_event.token() % 2 == 1 {
+ // Handle is a named pipe. This could be extended to be any non-AFD event.
+ let callback = (*(iocp_event.overlapped() as *mut super::Overlapped)).callback;
+
+ let len = events.len();
+ callback(iocp_event.entry(), Some(events));
+ n += events.len() - len;
+ continue;
+ }
+
+ let sock_state = from_overlapped(iocp_event.overlapped());
+ let mut sock_guard = sock_state.lock().unwrap();
+ if let Some(e) = sock_guard.feed_event() {
+ events.push(e);
+ n += 1;
+ }
+
+ if !sock_guard.is_pending_deletion() {
+ update_queue.push_back(sock_state.clone());
+ }
+ }
+ self.afd_group.release_unused_afd();
+ n
+ }
+}
+
+cfg_io_source! {
+ use std::mem::size_of;
+ use std::ptr::null_mut;
+ use winapi::um::mswsock;
+ use winapi::um::winsock2::WSAGetLastError;
+ use winapi::um::winsock2::{WSAIoctl, SOCKET_ERROR};
+
+ impl SelectorInner {
+ fn register(
+ this: &Arc<Self>,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ let flags = interests_to_afd_flags(interests);
+
+ let sock = {
+ let sock = this._alloc_sock_for_rawsocket(socket)?;
+ let event = Event {
+ flags,
+ data: token.0 as u64,
+ };
+ sock.lock().unwrap().set_event(event);
+ sock
+ };
+
+ let state = InternalState {
+ selector: this.clone(),
+ token,
+ interests,
+ sock_state: sock.clone(),
+ };
+
+ this.queue_state(sock);
+ unsafe { this.update_sockets_events_if_polling()? };
+
+ Ok(state)
+ }
+
+ // Directly accessed in `IoSourceState::do_io`.
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ {
+ let event = Event {
+ flags: interests_to_afd_flags(interests),
+ data: token.0 as u64,
+ };
+
+ state.lock().unwrap().set_event(event);
+ }
+
+ // FIXME: a sock which has_error true should not be re-added to
+ // the update queue because it's already there.
+ self.queue_state(state);
+ unsafe { self.update_sockets_events_if_polling() }
+ }
+
+ /// This function is called by register() and reregister() to start an
+ /// IOCTL_AFD_POLL operation corresponding to the registered events, but
+ /// only if necessary.
+ ///
+ /// Since it is not possible to modify or synchronously cancel an AFD_POLL
+ /// operation, and there can be only one active AFD_POLL operation per
+ /// (socket, completion port) pair at any time, it is expensive to change
+ /// a socket's event registration after it has been submitted to the kernel.
+ ///
+ /// Therefore, if no other threads are polling when interest in a socket
+ /// event is (re)registered, the socket is added to the 'update queue', but
+ /// the actual syscall to start the IOCTL_AFD_POLL operation is deferred
+ /// until just before the GetQueuedCompletionStatusEx() syscall is made.
+ ///
+ /// However, when another thread is already blocked on
+ /// GetQueuedCompletionStatusEx() we tell the kernel about the registered
+ /// socket event(s) immediately.
+ unsafe fn update_sockets_events_if_polling(&self) -> io::Result<()> {
+ if self.is_polling.load(Ordering::Acquire) {
+ self.update_sockets_events()
+ } else {
+ Ok(())
+ }
+ }
+
+ fn queue_state(&self, sock_state: Pin<Arc<Mutex<SockState>>>) {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ update_queue.push_back(sock_state);
+ }
+
+ fn _alloc_sock_for_rawsocket(
+ &self,
+ raw_socket: RawSocket,
+ ) -> io::Result<Pin<Arc<Mutex<SockState>>>> {
+ let afd = self.afd_group.acquire()?;
+ Ok(Arc::pin(Mutex::new(SockState::new(raw_socket, afd)?)))
+ }
+ }
+
+ fn try_get_base_socket(raw_socket: RawSocket, ioctl: u32) -> Result<RawSocket, i32> {
+ let mut base_socket: RawSocket = 0;
+ let mut bytes: u32 = 0;
+ unsafe {
+ if WSAIoctl(
+ raw_socket as usize,
+ ioctl,
+ null_mut(),
+ 0,
+ &mut base_socket as *mut _ as PVOID,
+ size_of::<RawSocket>() as u32,
+ &mut bytes,
+ null_mut(),
+ None,
+ ) != SOCKET_ERROR
+ {
+ Ok(base_socket)
+ } else {
+ Err(WSAGetLastError())
+ }
+ }
+ }
+
+ fn get_base_socket(raw_socket: RawSocket) -> io::Result<RawSocket> {
+ let res = try_get_base_socket(raw_socket, mswsock::SIO_BASE_HANDLE);
+ if let Ok(base_socket) = res {
+ return Ok(base_socket);
+ }
+
+ // The `SIO_BASE_HANDLE` should not be intercepted by LSPs, therefore
+ // it should not fail as long as `raw_socket` is a valid socket. See
+ // https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls.
+ // However, at least one known LSP deliberately breaks it, so we try
+ // some alternative IOCTLs, starting with the most appropriate one.
+ for &ioctl in &[
+ mswsock::SIO_BSP_HANDLE_SELECT,
+ mswsock::SIO_BSP_HANDLE_POLL,
+ mswsock::SIO_BSP_HANDLE,
+ ] {
+ if let Ok(base_socket) = try_get_base_socket(raw_socket, ioctl) {
+ // Since we know now that we're dealing with an LSP (otherwise
+ // SIO_BASE_HANDLE would't have failed), only return any result
+ // when it is different from the original `raw_socket`.
+ if base_socket != raw_socket {
+ return Ok(base_socket);
+ }
+ }
+ }
+
+ // If the alternative IOCTLs also failed, return the original error.
+ let os_error = res.unwrap_err();
+ let err = io::Error::from_raw_os_error(os_error);
+ Err(err)
+ }
+}
+
+impl Drop for SelectorInner {
+ fn drop(&mut self) {
+ loop {
+ let events_num: usize;
+ let mut statuses: [CompletionStatus; 1024] = [CompletionStatus::zero(); 1024];
+
+ let result = self
+ .cp
+ .get_many(&mut statuses, Some(std::time::Duration::from_millis(0)));
+ match result {
+ Ok(iocp_events) => {
+ events_num = iocp_events.iter().len();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ // Custom event
+ } else if iocp_event.token() % 2 == 1 {
+ // Named pipe, dispatch the event so it can release resources
+ let callback = unsafe {
+ (*(iocp_event.overlapped() as *mut super::Overlapped)).callback
+ };
+
+ callback(iocp_event.entry(), None);
+ } else {
+ // drain sock state to release memory of Arc reference
+ let _sock_state = from_overlapped(iocp_event.overlapped());
+ }
+ }
+ }
+
+ Err(_) => {
+ break;
+ }
+ }
+
+ if events_num == 0 {
+ // continue looping until all completion statuses have been drained
+ break;
+ }
+ }
+
+ self.afd_group.release_unused_afd();
+ }
+}
+
+cfg_net! {
+ fn interests_to_afd_flags(interests: Interest) -> u32 {
+ let mut flags = 0;
+
+ if interests.is_readable() {
+ flags |= READABLE_FLAGS | READ_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ if interests.is_writable() {
+ flags |= WRITABLE_FLAGS | WRITE_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ flags
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/tcp.rs b/third_party/rust/mio/src/sys/windows/tcp.rs
new file mode 100644
index 0000000000..b3f05aec65
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/tcp.rs
@@ -0,0 +1,71 @@
+use std::io;
+use std::net::{self, SocketAddr};
+use std::os::windows::io::AsRawSocket;
+
+use winapi::um::winsock2::{self, PF_INET, PF_INET6, SOCKET, SOCKET_ERROR, SOCK_STREAM};
+
+use crate::sys::windows::net::{init, new_socket, socket_addr};
+
+pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<SOCKET> {
+ init();
+ let domain = match address {
+ SocketAddr::V4(_) => PF_INET,
+ SocketAddr::V6(_) => PF_INET6,
+ };
+ new_socket(domain, SOCK_STREAM)
+}
+
+pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
+ use winsock2::bind;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ bind(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
+ use winsock2::connect;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ let res = syscall!(
+ connect(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ );
+
+ match res {
+ Err(err) if err.kind() != io::ErrorKind::WouldBlock => Err(err),
+ _ => Ok(()),
+ }
+}
+
+pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
+ use std::convert::TryInto;
+ use winsock2::listen;
+
+ let backlog = backlog.try_into().unwrap_or(i32::max_value());
+ syscall!(
+ listen(socket.as_raw_socket() as _, backlog),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ // The non-blocking state of `listener` is inherited. See
+ // https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-accept#remarks.
+ listener.accept()
+}
diff --git a/third_party/rust/mio/src/sys/windows/udp.rs b/third_party/rust/mio/src/sys/windows/udp.rs
new file mode 100644
index 0000000000..825ecccff4
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/udp.rs
@@ -0,0 +1,53 @@
+use std::io;
+use std::mem::{self, MaybeUninit};
+use std::net::{self, SocketAddr};
+use std::os::windows::io::{AsRawSocket, FromRawSocket};
+use std::os::windows::raw::SOCKET as StdSocket; // winapi uses usize, stdlib uses u32/u64.
+
+use winapi::ctypes::c_int;
+use winapi::shared::ws2def::IPPROTO_IPV6;
+use winapi::shared::ws2ipdef::IPV6_V6ONLY;
+use winapi::um::winsock2::{bind as win_bind, closesocket, getsockopt, SOCKET_ERROR, SOCK_DGRAM};
+
+use crate::sys::windows::net::{init, new_ip_socket, socket_addr};
+
+pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
+ init();
+ new_ip_socket(addr, SOCK_DGRAM).and_then(|socket| {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ win_bind(socket, raw_addr.as_ptr(), raw_addr_length,),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )
+ .map_err(|err| {
+ // Close the socket if we hit an error, ignoring the error
+ // from closing since we can't pass back two errors.
+ let _ = unsafe { closesocket(socket) };
+ err
+ })
+ .map(|_| unsafe { net::UdpSocket::from_raw_socket(socket as StdSocket) })
+ })
+}
+
+pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
+ let mut optval: MaybeUninit<c_int> = MaybeUninit::uninit();
+ let mut optlen = mem::size_of::<c_int>() as c_int;
+
+ syscall!(
+ getsockopt(
+ socket.as_raw_socket() as usize,
+ IPPROTO_IPV6 as c_int,
+ IPV6_V6ONLY as c_int,
+ optval.as_mut_ptr().cast(),
+ &mut optlen,
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+
+ debug_assert_eq!(optlen as usize, mem::size_of::<c_int>());
+ // Safety: `getsockopt` initialised `optval` for us.
+ let optval = unsafe { optval.assume_init() };
+ Ok(optval != 0)
+}
diff --git a/third_party/rust/mio/src/sys/windows/waker.rs b/third_party/rust/mio/src/sys/windows/waker.rs
new file mode 100644
index 0000000000..ab12c3c689
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/waker.rs
@@ -0,0 +1,29 @@
+use crate::sys::windows::Event;
+use crate::sys::windows::Selector;
+use crate::Token;
+
+use miow::iocp::CompletionPort;
+use std::io;
+use std::sync::Arc;
+
+#[derive(Debug)]
+pub struct Waker {
+ token: Token,
+ port: Arc<CompletionPort>,
+}
+
+impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ Ok(Waker {
+ token,
+ port: selector.clone_port(),
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ let mut ev = Event::new(self.token);
+ ev.set_readable();
+
+ self.port.post(ev.to_completion_status())
+ }
+}
diff --git a/third_party/rust/mio/src/token.rs b/third_party/rust/mio/src/token.rs
new file mode 100644
index 0000000000..91601cde0c
--- /dev/null
+++ b/third_party/rust/mio/src/token.rs
@@ -0,0 +1,138 @@
+/// Associates readiness events with [`event::Source`]s.
+///
+/// `Token` is a wrapper around `usize` and is used as an argument to
+/// [`Registry::register`] and [`Registry::reregister`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// [`event::Source`]: ./event/trait.Source.html
+/// [`Poll`]: struct.Poll.html
+/// [`Registry::register`]: struct.Registry.html#method.register
+/// [`Registry::reregister`]: struct.Registry.html#method.reregister
+///
+/// # Example
+///
+/// Using `Token` to track which socket generated the event. In this example,
+/// `HashMap` is used, but usually something like [`slab`] is better.
+///
+/// [`slab`]: https://crates.io/crates/slab
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Interest, Poll, Token};
+/// use mio::net::TcpListener;
+///
+/// use std::thread;
+/// use std::io::{self, Read};
+/// use std::collections::HashMap;
+///
+/// // After this number of sockets is accepted, the server will shutdown.
+/// const MAX_SOCKETS: usize = 32;
+///
+/// // Pick a token that will not be used by any other socket and use that one
+/// // for the listener.
+/// const LISTENER: Token = Token(1024);
+///
+/// // Used to store the sockets.
+/// let mut sockets = HashMap::new();
+///
+/// // This is used to generate a unique token for a socket
+/// let mut next_socket_index = 0;
+///
+/// // The `Poll` instance
+/// let mut poll = Poll::new()?;
+///
+/// // Tcp listener
+/// let mut listener = TcpListener::bind("127.0.0.1:0".parse()?)?;
+///
+/// // Register the listener
+/// poll.registry().register(&mut listener, LISTENER, Interest::READABLE)?;
+///
+/// // Spawn a thread that will connect a bunch of sockets then close them
+/// let addr = listener.local_addr()?;
+/// thread::spawn(move || {
+/// use std::net::TcpStream;
+///
+/// // +1 here is to connect an extra socket to signal the socket to close
+/// for _ in 0..(MAX_SOCKETS+1) {
+/// // Connect then drop the socket
+/// let _ = TcpStream::connect(addr).unwrap();
+/// }
+/// });
+///
+/// // Event storage
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Read buffer, this will never actually get filled
+/// let mut buf = [0; 256];
+///
+/// // The main event loop
+/// loop {
+/// // Wait for events
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// match event.token() {
+/// LISTENER => {
+/// // Perform operations in a loop until `WouldBlock` is
+/// // encountered.
+/// loop {
+/// match listener.accept() {
+/// Ok((mut socket, _)) => {
+/// // Shutdown the server
+/// if next_socket_index == MAX_SOCKETS {
+/// return Ok(());
+/// }
+///
+/// // Get the token for the socket
+/// let token = Token(next_socket_index);
+/// next_socket_index += 1;
+///
+/// // Register the new socket w/ poll
+/// poll.registry().register(&mut socket, token, Interest::READABLE)?;
+///
+/// // Store the socket
+/// sockets.insert(token, socket);
+/// }
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop accepting
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// token => {
+/// // Always operate in a loop
+/// loop {
+/// match sockets.get_mut(&token).unwrap().read(&mut buf) {
+/// Ok(0) => {
+/// // Socket is closed, remove it from the map
+/// sockets.remove(&token);
+/// break;
+/// }
+/// // Data is not actually sent in this example
+/// Ok(_) => unreachable!(),
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop reading
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// }
+/// }
+/// }
+/// # }
+/// ```
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Token(pub usize);
+
+impl From<Token> for usize {
+ fn from(val: Token) -> usize {
+ val.0
+ }
+}
diff --git a/third_party/rust/mio/src/waker.rs b/third_party/rust/mio/src/waker.rs
new file mode 100644
index 0000000000..b0cfe36b65
--- /dev/null
+++ b/third_party/rust/mio/src/waker.rs
@@ -0,0 +1,96 @@
+use crate::{sys, Registry, Token};
+
+use std::io;
+
+/// Waker allows cross-thread waking of [`Poll`].
+///
+/// When created it will cause events with [`readable`] readiness and the
+/// provided `token` if [`wake`] is called, possibly from another thread.
+///
+/// [`Poll`]: struct.Poll.html
+/// [`readable`]: ./event/struct.Event.html#method.is_readable
+/// [`wake`]: struct.Waker.html#method.wake
+///
+/// # Notes
+///
+/// `Waker` events are only guaranteed to be delivered while the `Waker` value
+/// is alive.
+///
+/// Only a single `Waker` can be active per [`Poll`], if multiple threads need
+/// access to the `Waker` it can be shared via for example an `Arc`. What
+/// happens if multiple `Waker`s are registered with the same `Poll` is
+/// unspecified.
+///
+/// # Implementation notes
+///
+/// On platforms that support kqueue this will use the `EVFILT_USER` event
+/// filter, see [implementation notes of `Poll`] to see what platforms support
+/// kqueue. On Linux it uses [eventfd].
+///
+/// [implementation notes of `Poll`]: struct.Poll.html#implementation-notes
+/// [eventfd]: http://man7.org/linux/man-pages/man2/eventfd.2.html
+///
+/// # Examples
+///
+/// Wake a [`Poll`] instance from another thread.
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// use std::thread;
+/// use std::time::Duration;
+/// use std::sync::Arc;
+///
+/// use mio::{Events, Token, Poll, Waker};
+///
+/// const WAKE_TOKEN: Token = Token(10);
+///
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(2);
+///
+/// let waker = Arc::new(Waker::new(poll.registry(), WAKE_TOKEN)?);
+///
+/// // We need to keep the Waker alive, so we'll create a clone for the
+/// // thread we create below.
+/// let waker1 = waker.clone();
+/// let handle = thread::spawn(move || {
+/// // Working hard, or hardly working?
+/// thread::sleep(Duration::from_millis(500));
+///
+/// // Now we'll wake the queue on the other thread.
+/// waker1.wake().expect("unable to wake");
+/// });
+///
+/// // On our current thread we'll poll for events, without a timeout.
+/// poll.poll(&mut events, None)?;
+///
+/// // After about 500 milliseconds we should be awoken by the other thread and
+/// // get a single event.
+/// assert!(!events.is_empty());
+/// let waker_event = events.iter().next().unwrap();
+/// assert!(waker_event.is_readable());
+/// assert_eq!(waker_event.token(), WAKE_TOKEN);
+/// # handle.join().unwrap();
+/// # Ok(())
+/// # }
+/// ```
+#[derive(Debug)]
+pub struct Waker {
+ inner: sys::Waker,
+}
+
+impl Waker {
+ /// Create a new `Waker`.
+ pub fn new(registry: &Registry, token: Token) -> io::Result<Waker> {
+ #[cfg(debug_assertions)]
+ registry.register_waker();
+ sys::Waker::new(registry.selector(), token).map(|inner| Waker { inner })
+ }
+
+ /// Wake up the [`Poll`] associated with this `Waker`.
+ ///
+ /// [`Poll`]: struct.Poll.html
+ pub fn wake(&self) -> io::Result<()> {
+ self.inner.wake()
+ }
+}
diff --git a/third_party/rust/miow/.cargo-checksum.json b/third_party/rust/miow/.cargo-checksum.json
new file mode 100644
index 0000000000..1b3575ee7e
--- /dev/null
+++ b/third_party/rust/miow/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"588aa2a1146d5f0a0fe15b633a07b047f0dd333c8beb6fd9eb959a7493f3fdbc","Cargo.toml":"4ef0d55cf903edac87248410e260dfb8fef8fe19322e2917053a1fd757448876","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"3ea75d353de268b9dbec44e2e9f56c12cd391579ec4ac5855a4eb68dea216ef2","appveyor.yml":"ffdfb9572a6362866bea6787a726b0d4e43f6bb6516f3a38ebdd561859531602","src/handle.rs":"683af650dcd2975e066891bcd570454e14958f9fb291e0d6ee1d563d2ac1e4ce","src/iocp.rs":"7d11b2269ec05288f0ad693142186f90da60a11dcda264aca3f4ff2f2f78fc57","src/lib.rs":"816e6b1806daa5bf6c354829b974f3ec8bf021fa82e1dd11ef8a4030d6868163","src/net.rs":"a3477bf3f4da11dd062d2f1a482588c9f85d7c9218554516a6a0d64aba66bc03","src/overlapped.rs":"90c65c36dbeb95fb1b402b06e97f957c01be2ebb9d43b612bd735ca6e60d3c14","src/pipe.rs":"ee1aecf7114919cec801465a9ce355b18a5cfddeb9cde3a7ad3153ebe717675d"},"package":"b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21"} \ No newline at end of file
diff --git a/third_party/rust/miow/CHANGELOG.md b/third_party/rust/miow/CHANGELOG.md
new file mode 100644
index 0000000000..e16937d5e1
--- /dev/null
+++ b/third_party/rust/miow/CHANGELOG.md
@@ -0,0 +1,5 @@
+
+## [v0.3.7] - 2021-03-22
+### Changed
+- Upgrade `rand` dev-dependency from 0.4 -> 0.8
+- Upgrade `socket2` dependency from 0.3 to 0.4 and make it a dev-dependency
diff --git a/third_party/rust/miow/Cargo.toml b/third_party/rust/miow/Cargo.toml
new file mode 100644
index 0000000000..c32cc3cc40
--- /dev/null
+++ b/third_party/rust/miow/Cargo.toml
@@ -0,0 +1,35 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "miow"
+version = "0.3.7"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+description = "A zero overhead I/O library for Windows, focusing on IOCP and Async I/O\nabstractions.\n"
+homepage = "https://github.com/yoshuawuyts/miow"
+documentation = "https://docs.rs/miow/0.3/x86_64-pc-windows-msvc/miow/"
+readme = "README.md"
+keywords = ["iocp", "windows", "io", "overlapped"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/yoshuawuyts/miow"
+[package.metadata.docs.rs]
+default-target = "x86_64-pc-windows-msvc"
+targets = ["aarch64-pc-windows-msvc", "i686-pc-windows-msvc", "x86_64-pc-windows-msvc"]
+[dependencies.winapi]
+version = "0.3.3"
+features = ["std", "fileapi", "handleapi", "ioapiset", "minwindef", "namedpipeapi", "ntdef", "synchapi", "winerror", "winsock2", "ws2def", "ws2ipdef"]
+[dev-dependencies.rand]
+version = "0.8.0"
+
+[dev-dependencies.socket2]
+version = "0.4.0"
diff --git a/third_party/rust/miow/LICENSE-APACHE b/third_party/rust/miow/LICENSE-APACHE
new file mode 100644
index 0000000000..16fe87b06e
--- /dev/null
+++ b/third_party/rust/miow/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/rust/miow/LICENSE-MIT b/third_party/rust/miow/LICENSE-MIT
new file mode 100644
index 0000000000..39e0ed6602
--- /dev/null
+++ b/third_party/rust/miow/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/miow/README.md b/third_party/rust/miow/README.md
new file mode 100644
index 0000000000..e6cdddb227
--- /dev/null
+++ b/third_party/rust/miow/README.md
@@ -0,0 +1,31 @@
+# miow
+
+[![Build status](https://ci.appveyor.com/api/projects/status/tc5lsxokjk86949l?svg=true)](https://ci.appveyor.com/project/alexcrichton/miow)
+
+[Documentation](https://docs.rs/miow/0.3/x86_64-pc-windows-msvc/miow/)
+
+A zero overhead Windows I/O library focusing on IOCP and other async I/O
+features.
+
+```toml
+# Cargo.toml
+[dependencies]
+miow = "0.3.6"
+```
+
+# License
+
+This project is licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
+ http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+ http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in miow by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/third_party/rust/miow/appveyor.yml b/third_party/rust/miow/appveyor.yml
new file mode 100644
index 0000000000..2700e425c7
--- /dev/null
+++ b/third_party/rust/miow/appveyor.yml
@@ -0,0 +1,20 @@
+environment:
+ matrix:
+ - TARGET: x86_64-pc-windows-msvc
+ - TARGET: i686-pc-windows-msvc
+ - TARGET: i686-pc-windows-gnu
+ GH_TOKEN:
+ secure: nHB4fVo+y/Aak+L0nYfrT8Rcs8OfUNm0F2xcIVFVYJ9ehf0CzvCmSMUvWguM0kKp
+
+install:
+ - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe"
+ - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust"
+ - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin
+ - SET PATH=%PATH%;C:\MinGW\bin
+ - rustc -V
+ - cargo -V
+
+build: false
+
+test_script:
+ - cargo test --target %TARGET%
diff --git a/third_party/rust/miow/src/handle.rs b/third_party/rust/miow/src/handle.rs
new file mode 100644
index 0000000000..a749fb3269
--- /dev/null
+++ b/third_party/rust/miow/src/handle.rs
@@ -0,0 +1,177 @@
+use std::cmp;
+use std::io;
+use std::ptr;
+
+use winapi::shared::minwindef::*;
+use winapi::shared::ntdef::{BOOLEAN, FALSE, HANDLE, TRUE};
+use winapi::shared::winerror::*;
+use winapi::um::fileapi::*;
+use winapi::um::handleapi::*;
+use winapi::um::ioapiset::*;
+use winapi::um::minwinbase::*;
+
+#[derive(Debug)]
+pub struct Handle(HANDLE);
+
+unsafe impl Send for Handle {}
+unsafe impl Sync for Handle {}
+
+impl Handle {
+ pub fn new(handle: HANDLE) -> Handle {
+ Handle(handle)
+ }
+
+ pub fn raw(&self) -> HANDLE {
+ self.0
+ }
+
+ pub fn into_raw(self) -> HANDLE {
+ use std::mem;
+
+ let ret = self.0;
+ mem::forget(self);
+ ret
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ let mut bytes = 0;
+ let len = cmp::min(buf.len(), <DWORD>::max_value() as usize) as DWORD;
+ crate::cvt(unsafe {
+ WriteFile(
+ self.0,
+ buf.as_ptr() as *const _,
+ len,
+ &mut bytes,
+ 0 as *mut _,
+ )
+ })?;
+ Ok(bytes as usize)
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut bytes = 0;
+ let len = cmp::min(buf.len(), <DWORD>::max_value() as usize) as DWORD;
+ crate::cvt(unsafe {
+ ReadFile(
+ self.0,
+ buf.as_mut_ptr() as *mut _,
+ len,
+ &mut bytes,
+ 0 as *mut _,
+ )
+ })?;
+ Ok(bytes as usize)
+ }
+
+ pub unsafe fn read_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ self.read_overlapped_helper(buf, overlapped, FALSE)
+ }
+
+ pub unsafe fn read_overlapped_wait(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<usize> {
+ match self.read_overlapped_helper(buf, overlapped, TRUE) {
+ Ok(Some(bytes)) => Ok(bytes),
+ Ok(None) => panic!("logic error"),
+ Err(e) => Err(e),
+ }
+ }
+
+ pub unsafe fn read_overlapped_helper(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ wait: BOOLEAN,
+ ) -> io::Result<Option<usize>> {
+ let len = cmp::min(buf.len(), <DWORD>::max_value() as usize) as DWORD;
+ let res = crate::cvt({
+ ReadFile(
+ self.0,
+ buf.as_mut_ptr() as *mut _,
+ len,
+ ptr::null_mut(),
+ overlapped,
+ )
+ });
+ match res {
+ Ok(_) => (),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_IO_PENDING as i32) => (),
+ Err(e) => return Err(e),
+ }
+
+ let mut bytes = 0;
+ let res = crate::cvt({ GetOverlappedResult(self.0, overlapped, &mut bytes, wait as BOOL) });
+ match res {
+ Ok(_) => Ok(Some(bytes as usize)),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) && wait == FALSE => {
+ Ok(None)
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ pub unsafe fn write_overlapped(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ self.write_overlapped_helper(buf, overlapped, FALSE)
+ }
+
+ pub unsafe fn write_overlapped_wait(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<usize> {
+ match self.write_overlapped_helper(buf, overlapped, TRUE) {
+ Ok(Some(bytes)) => Ok(bytes),
+ Ok(None) => panic!("logic error"),
+ Err(e) => Err(e),
+ }
+ }
+
+ unsafe fn write_overlapped_helper(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ wait: BOOLEAN,
+ ) -> io::Result<Option<usize>> {
+ let len = cmp::min(buf.len(), <DWORD>::max_value() as usize) as DWORD;
+ let res = crate::cvt({
+ WriteFile(
+ self.0,
+ buf.as_ptr() as *const _,
+ len,
+ ptr::null_mut(),
+ overlapped,
+ )
+ });
+ match res {
+ Ok(_) => (),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_IO_PENDING as i32) => (),
+ Err(e) => return Err(e),
+ }
+
+ let mut bytes = 0;
+ let res = crate::cvt({ GetOverlappedResult(self.0, overlapped, &mut bytes, wait as BOOL) });
+ match res {
+ Ok(_) => Ok(Some(bytes as usize)),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) && wait == FALSE => {
+ Ok(None)
+ }
+ Err(e) => Err(e),
+ }
+ }
+}
+
+impl Drop for Handle {
+ fn drop(&mut self) {
+ unsafe { CloseHandle(self.0) };
+ }
+}
diff --git a/third_party/rust/miow/src/iocp.rs b/third_party/rust/miow/src/iocp.rs
new file mode 100644
index 0000000000..d862d6bcfa
--- /dev/null
+++ b/third_party/rust/miow/src/iocp.rs
@@ -0,0 +1,328 @@
+//! Bindings to IOCP, I/O Completion Ports
+
+use std::cmp;
+use std::fmt;
+use std::io;
+use std::mem;
+use std::os::windows::io::*;
+use std::time::Duration;
+
+use crate::handle::Handle;
+use crate::Overlapped;
+use winapi::shared::basetsd::*;
+use winapi::shared::ntdef::*;
+use winapi::um::handleapi::*;
+use winapi::um::ioapiset::*;
+use winapi::um::minwinbase::*;
+
+/// A handle to an Windows I/O Completion Port.
+#[derive(Debug)]
+pub struct CompletionPort {
+ handle: Handle,
+}
+
+/// A status message received from an I/O completion port.
+///
+/// These statuses can be created via the `new` or `empty` constructors and then
+/// provided to a completion port, or they are read out of a completion port.
+/// The fields of each status are read through its accessor methods.
+#[derive(Clone, Copy)]
+pub struct CompletionStatus(OVERLAPPED_ENTRY);
+
+impl fmt::Debug for CompletionStatus {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "CompletionStatus(OVERLAPPED_ENTRY)")
+ }
+}
+
+unsafe impl Send for CompletionStatus {}
+unsafe impl Sync for CompletionStatus {}
+
+impl CompletionPort {
+ /// Creates a new I/O completion port with the specified concurrency value.
+ ///
+ /// The number of threads given corresponds to the level of concurrency
+ /// allowed for threads associated with this port. Consult the Windows
+ /// documentation for more information about this value.
+ pub fn new(threads: u32) -> io::Result<CompletionPort> {
+ let ret = unsafe { CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0 as *mut _, 0, threads) };
+ if ret.is_null() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(CompletionPort {
+ handle: Handle::new(ret),
+ })
+ }
+ }
+
+ /// Associates a new `HANDLE` to this I/O completion port.
+ ///
+ /// This function will associate the given handle to this port with the
+ /// given `token` to be returned in status messages whenever it receives a
+ /// notification.
+ ///
+ /// Any object which is convertible to a `HANDLE` via the `AsRawHandle`
+ /// trait can be provided to this function, such as `std::fs::File` and
+ /// friends.
+ pub fn add_handle<T: AsRawHandle + ?Sized>(&self, token: usize, t: &T) -> io::Result<()> {
+ self._add(token, t.as_raw_handle())
+ }
+
+ /// Associates a new `SOCKET` to this I/O completion port.
+ ///
+ /// This function will associate the given socket to this port with the
+ /// given `token` to be returned in status messages whenever it receives a
+ /// notification.
+ ///
+ /// Any object which is convertible to a `SOCKET` via the `AsRawSocket`
+ /// trait can be provided to this function, such as `std::net::TcpStream`
+ /// and friends.
+ pub fn add_socket<T: AsRawSocket + ?Sized>(&self, token: usize, t: &T) -> io::Result<()> {
+ self._add(token, t.as_raw_socket() as HANDLE)
+ }
+
+ fn _add(&self, token: usize, handle: HANDLE) -> io::Result<()> {
+ assert_eq!(mem::size_of_val(&token), mem::size_of::<ULONG_PTR>());
+ let ret =
+ unsafe { CreateIoCompletionPort(handle, self.handle.raw(), token as ULONG_PTR, 0) };
+ if ret.is_null() {
+ Err(io::Error::last_os_error())
+ } else {
+ debug_assert_eq!(ret, self.handle.raw());
+ Ok(())
+ }
+ }
+
+ /// Dequeue a completion status from this I/O completion port.
+ ///
+ /// This function will associate the calling thread with this completion
+ /// port and then wait for a status message to become available. The precise
+ /// semantics on when this function returns depends on the concurrency value
+ /// specified when the port was created.
+ ///
+ /// A timeout can optionally be specified to this function. If `None` is
+ /// provided this function will not time out, and otherwise it will time out
+ /// after the specified duration has passed.
+ ///
+ /// On success this will return the status message which was dequeued from
+ /// this completion port.
+ pub fn get(&self, timeout: Option<Duration>) -> io::Result<CompletionStatus> {
+ let mut bytes = 0;
+ let mut token = 0;
+ let mut overlapped = 0 as *mut _;
+ let timeout = crate::dur2ms(timeout);
+ let ret = unsafe {
+ GetQueuedCompletionStatus(
+ self.handle.raw(),
+ &mut bytes,
+ &mut token,
+ &mut overlapped,
+ timeout,
+ )
+ };
+ crate::cvt(ret).map(|_| {
+ CompletionStatus(OVERLAPPED_ENTRY {
+ dwNumberOfBytesTransferred: bytes,
+ lpCompletionKey: token,
+ lpOverlapped: overlapped,
+ Internal: 0,
+ })
+ })
+ }
+
+ /// Dequeues a number of completion statuses from this I/O completion port.
+ ///
+ /// This function is the same as `get` except that it may return more than
+ /// one status. A buffer of "zero" statuses is provided (the contents are
+ /// not read) and then on success this function will return a sub-slice of
+ /// statuses which represent those which were dequeued from this port. This
+ /// function does not wait to fill up the entire list of statuses provided.
+ ///
+ /// Like with `get`, a timeout may be specified for this operation.
+ pub fn get_many<'a>(
+ &self,
+ list: &'a mut [CompletionStatus],
+ timeout: Option<Duration>,
+ ) -> io::Result<&'a mut [CompletionStatus]> {
+ debug_assert_eq!(
+ mem::size_of::<CompletionStatus>(),
+ mem::size_of::<OVERLAPPED_ENTRY>()
+ );
+ let mut removed = 0;
+ let timeout = crate::dur2ms(timeout);
+ let len = cmp::min(list.len(), <ULONG>::max_value() as usize) as ULONG;
+ let ret = unsafe {
+ GetQueuedCompletionStatusEx(
+ self.handle.raw(),
+ list.as_ptr() as *mut _,
+ len,
+ &mut removed,
+ timeout,
+ FALSE as i32,
+ )
+ };
+ match crate::cvt(ret) {
+ Ok(_) => Ok(&mut list[..removed as usize]),
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Posts a new completion status onto this I/O completion port.
+ ///
+ /// This function will post the given status, with custom parameters, to the
+ /// port. Threads blocked in `get` or `get_many` will eventually receive
+ /// this status.
+ pub fn post(&self, status: CompletionStatus) -> io::Result<()> {
+ let ret = unsafe {
+ PostQueuedCompletionStatus(
+ self.handle.raw(),
+ status.0.dwNumberOfBytesTransferred,
+ status.0.lpCompletionKey,
+ status.0.lpOverlapped,
+ )
+ };
+ crate::cvt(ret).map(|_| ())
+ }
+}
+
+impl AsRawHandle for CompletionPort {
+ fn as_raw_handle(&self) -> HANDLE {
+ self.handle.raw()
+ }
+}
+
+impl FromRawHandle for CompletionPort {
+ unsafe fn from_raw_handle(handle: HANDLE) -> CompletionPort {
+ CompletionPort {
+ handle: Handle::new(handle),
+ }
+ }
+}
+
+impl IntoRawHandle for CompletionPort {
+ fn into_raw_handle(self) -> HANDLE {
+ self.handle.into_raw()
+ }
+}
+
+impl CompletionStatus {
+ /// Creates a new completion status with the provided parameters.
+ ///
+ /// This function is useful when creating a status to send to a port with
+ /// the `post` method. The parameters are opaquely passed through and not
+ /// interpreted by the system at all.
+ pub fn new(bytes: u32, token: usize, overlapped: *mut Overlapped) -> CompletionStatus {
+ assert_eq!(mem::size_of_val(&token), mem::size_of::<ULONG_PTR>());
+ CompletionStatus(OVERLAPPED_ENTRY {
+ dwNumberOfBytesTransferred: bytes,
+ lpCompletionKey: token as ULONG_PTR,
+ lpOverlapped: overlapped as *mut _,
+ Internal: 0,
+ })
+ }
+
+ /// Creates a new borrowed completion status from the borrowed
+ /// `OVERLAPPED_ENTRY` argument provided.
+ ///
+ /// This method will wrap the `OVERLAPPED_ENTRY` in a `CompletionStatus`,
+ /// returning the wrapped structure.
+ pub fn from_entry(entry: &OVERLAPPED_ENTRY) -> &CompletionStatus {
+ unsafe { &*(entry as *const _ as *const _) }
+ }
+
+ /// Creates a new "zero" completion status.
+ ///
+ /// This function is useful when creating a stack buffer or vector of
+ /// completion statuses to be passed to the `get_many` function.
+ pub fn zero() -> CompletionStatus {
+ CompletionStatus::new(0, 0, 0 as *mut _)
+ }
+
+ /// Returns the number of bytes that were transferred for the I/O operation
+ /// associated with this completion status.
+ pub fn bytes_transferred(&self) -> u32 {
+ self.0.dwNumberOfBytesTransferred
+ }
+
+ /// Returns the completion key value associated with the file handle whose
+ /// I/O operation has completed.
+ ///
+ /// A completion key is a per-handle key that is specified when it is added
+ /// to an I/O completion port via `add_handle` or `add_socket`.
+ pub fn token(&self) -> usize {
+ self.0.lpCompletionKey as usize
+ }
+
+ /// Returns a pointer to the `Overlapped` structure that was specified when
+ /// the I/O operation was started.
+ pub fn overlapped(&self) -> *mut OVERLAPPED {
+ self.0.lpOverlapped
+ }
+
+ /// Returns a pointer to the internal `OVERLAPPED_ENTRY` object.
+ pub fn entry(&self) -> &OVERLAPPED_ENTRY {
+ &self.0
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::mem;
+ use std::time::Duration;
+
+ use winapi::shared::basetsd::*;
+ use winapi::shared::winerror::*;
+
+ use crate::iocp::{CompletionPort, CompletionStatus};
+
+ #[test]
+ fn is_send_sync() {
+ fn is_send_sync<T: Send + Sync>() {}
+ is_send_sync::<CompletionPort>();
+ }
+
+ #[test]
+ fn token_right_size() {
+ assert_eq!(mem::size_of::<usize>(), mem::size_of::<ULONG_PTR>());
+ }
+
+ #[test]
+ fn timeout() {
+ let c = CompletionPort::new(1).unwrap();
+ let err = c.get(Some(Duration::from_millis(1))).unwrap_err();
+ assert_eq!(err.raw_os_error(), Some(WAIT_TIMEOUT as i32));
+ }
+
+ #[test]
+ fn get() {
+ let c = CompletionPort::new(1).unwrap();
+ c.post(CompletionStatus::new(1, 2, 3 as *mut _)).unwrap();
+ let s = c.get(None).unwrap();
+ assert_eq!(s.bytes_transferred(), 1);
+ assert_eq!(s.token(), 2);
+ assert_eq!(s.overlapped(), 3 as *mut _);
+ }
+
+ #[test]
+ fn get_many() {
+ let c = CompletionPort::new(1).unwrap();
+
+ c.post(CompletionStatus::new(1, 2, 3 as *mut _)).unwrap();
+ c.post(CompletionStatus::new(4, 5, 6 as *mut _)).unwrap();
+
+ let mut s = vec![CompletionStatus::zero(); 4];
+ {
+ let s = c.get_many(&mut s, None).unwrap();
+ assert_eq!(s.len(), 2);
+ assert_eq!(s[0].bytes_transferred(), 1);
+ assert_eq!(s[0].token(), 2);
+ assert_eq!(s[0].overlapped(), 3 as *mut _);
+ assert_eq!(s[1].bytes_transferred(), 4);
+ assert_eq!(s[1].token(), 5);
+ assert_eq!(s[1].overlapped(), 6 as *mut _);
+ }
+ assert_eq!(s[2].bytes_transferred(), 0);
+ assert_eq!(s[2].token(), 0);
+ assert_eq!(s[2].overlapped(), 0 as *mut _);
+ }
+}
diff --git a/third_party/rust/miow/src/lib.rs b/third_party/rust/miow/src/lib.rs
new file mode 100644
index 0000000000..53c01aeeae
--- /dev/null
+++ b/third_party/rust/miow/src/lib.rs
@@ -0,0 +1,52 @@
+//! A zero overhead Windows I/O library
+
+#![cfg(windows)]
+#![deny(missing_docs)]
+#![allow(bad_style)]
+#![doc(html_root_url = "https://docs.rs/miow/0.3/x86_64-pc-windows-msvc/")]
+
+use std::cmp;
+use std::io;
+use std::time::Duration;
+
+use winapi::shared::minwindef::*;
+use winapi::um::winbase::*;
+
+#[cfg(test)]
+macro_rules! t {
+ ($e:expr) => {
+ match $e {
+ Ok(e) => e,
+ Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+ }
+ };
+}
+
+mod handle;
+mod overlapped;
+
+pub mod iocp;
+pub mod net;
+pub mod pipe;
+
+pub use crate::overlapped::Overlapped;
+
+fn cvt(i: BOOL) -> io::Result<BOOL> {
+ if i == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(i)
+ }
+}
+
+fn dur2ms(dur: Option<Duration>) -> u32 {
+ let dur = match dur {
+ Some(dur) => dur,
+ None => return INFINITE,
+ };
+ let ms = dur.as_secs().checked_mul(1_000);
+ let ms_extra = dur.subsec_nanos() / 1_000_000;
+ ms.and_then(|ms| ms.checked_add(ms_extra as u64))
+ .map(|ms| cmp::min(u32::max_value() as u64, ms) as u32)
+ .unwrap_or(INFINITE - 1)
+}
diff --git a/third_party/rust/miow/src/net.rs b/third_party/rust/miow/src/net.rs
new file mode 100644
index 0000000000..e30bc2d2bf
--- /dev/null
+++ b/third_party/rust/miow/src/net.rs
@@ -0,0 +1,1332 @@
+//! Extensions and types for the standard networking primitives.
+//!
+//! This module contains a number of extension traits for the types in
+//! `std::net` for Windows-specific functionality.
+
+use std::cmp;
+use std::io;
+use std::mem;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6};
+use std::net::{SocketAddr, TcpListener, TcpStream, UdpSocket};
+use std::os::windows::prelude::*;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use winapi::ctypes::*;
+use winapi::shared::guiddef::*;
+use winapi::shared::in6addr::{in6_addr_u, IN6_ADDR};
+use winapi::shared::inaddr::{in_addr_S_un, IN_ADDR};
+use winapi::shared::minwindef::*;
+use winapi::shared::minwindef::{FALSE, TRUE};
+use winapi::shared::ntdef::*;
+use winapi::shared::ws2def::SOL_SOCKET;
+use winapi::shared::ws2def::*;
+use winapi::shared::ws2ipdef::*;
+use winapi::um::minwinbase::*;
+use winapi::um::winsock2::*;
+
+/// A type to represent a buffer in which a socket address will be stored.
+///
+/// This type is used with the `recv_from_overlapped` function on the
+/// `UdpSocketExt` trait to provide space for the overlapped I/O operation to
+/// fill in the address upon completion.
+#[derive(Clone, Copy)]
+pub struct SocketAddrBuf {
+ buf: SOCKADDR_STORAGE,
+ len: c_int,
+}
+
+/// A type to represent a buffer in which an accepted socket's address will be
+/// stored.
+///
+/// This type is used with the `accept_overlapped` method on the
+/// `TcpListenerExt` trait to provide space for the overlapped I/O operation to
+/// fill in the socket addresses upon completion.
+#[repr(C)]
+pub struct AcceptAddrsBuf {
+ // For AcceptEx we've got the restriction that the addresses passed in that
+ // buffer need to be at least 16 bytes more than the maximum address length
+ // for the protocol in question, so add some extra here and there
+ local: SOCKADDR_STORAGE,
+ _pad1: [u8; 16],
+ remote: SOCKADDR_STORAGE,
+ _pad2: [u8; 16],
+}
+
+/// The parsed return value of `AcceptAddrsBuf`.
+pub struct AcceptAddrs<'a> {
+ local: LPSOCKADDR,
+ local_len: c_int,
+ remote: LPSOCKADDR,
+ remote_len: c_int,
+ _data: &'a AcceptAddrsBuf,
+}
+
+struct WsaExtension {
+ guid: GUID,
+ val: AtomicUsize,
+}
+
+/// Additional methods for the `TcpStream` type in the standard library.
+pub trait TcpStreamExt {
+ /// Execute an overlapped read I/O operation on this TCP stream.
+ ///
+ /// This function will issue an overlapped I/O read (via `WSARecv`) on this
+ /// socket. The provided buffer will be filled in when the operation
+ /// completes and the given `OVERLAPPED` instance is used to track the
+ /// overlapped operation.
+ ///
+ /// If the operation succeeds, `Ok(Some(n))` is returned indicating how
+ /// many bytes were read. If the operation returns an error indicating that
+ /// the I/O is currently pending, `Ok(None)` is returned. Otherwise, the
+ /// error associated with the operation is returned and no overlapped
+ /// operation is enqueued.
+ ///
+ /// The number of bytes read will be returned as part of the completion
+ /// notification when the I/O finishes.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers are valid until the end of the I/O operation. The
+ /// kernel also requires that `overlapped` is unique for this I/O operation
+ /// and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that these two input
+ /// pointers are valid until the I/O operation is completed, typically via
+ /// completion ports and waiting to receive the completion notification on
+ /// the port.
+ unsafe fn read_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>>;
+
+ /// Execute an overlapped write I/O operation on this TCP stream.
+ ///
+ /// This function will issue an overlapped I/O write (via `WSASend`) on this
+ /// socket. The provided buffer will be written when the operation completes
+ /// and the given `OVERLAPPED` instance is used to track the overlapped
+ /// operation.
+ ///
+ /// If the operation succeeds, `Ok(Some(n))` is returned where `n` is the
+ /// number of bytes that were written. If the operation returns an error
+ /// indicating that the I/O is currently pending, `Ok(None)` is returned.
+ /// Otherwise, the error associated with the operation is returned and no
+ /// overlapped operation is enqueued.
+ ///
+ /// The number of bytes written will be returned as part of the completion
+ /// notification when the I/O finishes.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers are valid until the end of the I/O operation. The
+ /// kernel also requires that `overlapped` is unique for this I/O operation
+ /// and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that these two input
+ /// pointers are valid until the I/O operation is completed, typically via
+ /// completion ports and waiting to receive the completion notification on
+ /// the port.
+ unsafe fn write_overlapped(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>>;
+
+ /// Attempt to consume the internal socket in this builder by executing an
+ /// overlapped connect operation.
+ ///
+ /// This function will issue a connect operation to the address specified on
+ /// the underlying socket, flagging it as an overlapped operation which will
+ /// complete asynchronously. If successful this function will return the
+ /// corresponding TCP stream.
+ ///
+ /// The `buf` argument provided is an initial buffer of data that should be
+ /// sent after the connection is initiated. It's acceptable to
+ /// pass an empty slice here.
+ ///
+ /// This function will also return whether the connect immediately
+ /// succeeded or not. If `None` is returned then the I/O operation is still
+ /// pending and will complete at a later date, and if `Some(bytes)` is
+ /// returned then that many bytes were transferred.
+ ///
+ /// Note that to succeed this requires that the underlying socket has
+ /// previously been bound via a call to `bind` to a local address.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the
+ /// `overlapped` and `buf` pointers to be valid until the end of the I/O
+ /// operation. The kernel also requires that `overlapped` is unique for
+ /// this I/O operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that this pointer is
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ unsafe fn connect_overlapped(
+ &self,
+ addr: &SocketAddr,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>>;
+
+ /// Once a `connect_overlapped` has finished, this function needs to be
+ /// called to finish the connect operation.
+ ///
+ /// Currently this just calls `setsockopt` with `SO_UPDATE_CONNECT_CONTEXT`
+ /// to ensure that further functions like `getpeername` and `getsockname`
+ /// work correctly.
+ fn connect_complete(&self) -> io::Result<()>;
+
+ /// Calls the `GetOverlappedResult` function to get the result of an
+ /// overlapped operation for this handle.
+ ///
+ /// This function takes the `OVERLAPPED` argument which must have been used
+ /// to initiate an overlapped I/O operation, and returns either the
+ /// successful number of bytes transferred during the operation or an error
+ /// if one occurred, along with the results of the `lpFlags` parameter of
+ /// the relevant operation, if applicable.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe as `overlapped` must have previously been used
+ /// to execute an operation for this handle, and it must also be a valid
+ /// pointer to an `OVERLAPPED` instance.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic
+ unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<(usize, u32)>;
+}
+
+/// Additional methods for the `UdpSocket` type in the standard library.
+pub trait UdpSocketExt {
+ /// Execute an overlapped receive I/O operation on this UDP socket.
+ ///
+ /// This function will issue an overlapped I/O read (via `WSARecvFrom`) on
+ /// this socket. The provided buffer will be filled in when the operation
+ /// completes, the source from where the data came from will be written to
+ /// `addr`, and the given `OVERLAPPED` instance is used to track the
+ /// overlapped operation.
+ ///
+ /// If the operation succeeds, `Ok(Some(n))` is returned where `n` is the
+ /// number of bytes that were read. If the operation returns an error
+ /// indicating that the I/O is currently pending, `Ok(None)` is returned.
+ /// Otherwise, the error associated with the operation is returned and no
+ /// overlapped operation is enqueued.
+ ///
+ /// The number of bytes read will be returned as part of the completion
+ /// notification when the I/O finishes.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf`,
+ /// `addr`, and `overlapped` pointers are valid until the end of the I/O
+ /// operation. The kernel also requires that `overlapped` is unique for this
+ /// I/O operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that these two input
+ /// pointers are valid until the I/O operation is completed, typically via
+ /// completion ports and waiting to receive the completion notification on
+ /// the port.
+ unsafe fn recv_from_overlapped(
+ &self,
+ buf: &mut [u8],
+ addr: *mut SocketAddrBuf,
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>>;
+
+ /// Execute an overlapped receive I/O operation on this UDP socket.
+ ///
+ /// This function will issue an overlapped I/O read (via `WSARecv`) on
+ /// this socket. The provided buffer will be filled in when the operation
+ /// completes, the source from where the data came from will be written to
+ /// `addr`, and the given `OVERLAPPED` instance is used to track the
+ /// overlapped operation.
+ ///
+ /// If the operation succeeds, `Ok(Some(n))` is returned where `n` is the
+ /// number of bytes that were read. If the operation returns an error
+ /// indicating that the I/O is currently pending, `Ok(None)` is returned.
+ /// Otherwise, the error associated with the operation is returned and no
+ /// overlapped operation is enqueued.
+ ///
+ /// The number of bytes read will be returned as part of the completion
+ /// notification when the I/O finishes.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf`,
+ /// and `overlapped` pointers are valid until the end of the I/O
+ /// operation. The kernel also requires that `overlapped` is unique for this
+ /// I/O operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that these two input
+ /// pointers are valid until the I/O operation is completed, typically via
+ /// completion ports and waiting to receive the completion notification on
+ /// the port.
+ unsafe fn recv_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>>;
+
+ /// Execute an overlapped send I/O operation on this UDP socket.
+ ///
+ /// This function will issue an overlapped I/O write (via `WSASendTo`) on
+ /// this socket to the address specified by `addr`. The provided buffer will
+ /// be written when the operation completes and the given `OVERLAPPED`
+ /// instance is used to track the overlapped operation.
+ ///
+ /// If the operation succeeds, `Ok(Some(n0)` is returned where `n` byte
+ /// were written. If the operation returns an error indicating that the I/O
+ /// is currently pending, `Ok(None)` is returned. Otherwise, the error
+ /// associated with the operation is returned and no overlapped operation
+ /// is enqueued.
+ ///
+ /// The number of bytes written will be returned as part of the completion
+ /// notification when the I/O finishes.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers are valid until the end of the I/O operation. The
+ /// kernel also requires that `overlapped` is unique for this I/O operation
+ /// and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that these two input
+ /// pointers are valid until the I/O operation is completed, typically via
+ /// completion ports and waiting to receive the completion notification on
+ /// the port.
+ unsafe fn send_to_overlapped(
+ &self,
+ buf: &[u8],
+ addr: &SocketAddr,
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>>;
+
+ /// Execute an overlapped send I/O operation on this UDP socket.
+ ///
+ /// This function will issue an overlapped I/O write (via `WSASend`) on
+ /// this socket to the address it was previously connected to. The provided
+ /// buffer will be written when the operation completes and the given `OVERLAPPED`
+ /// instance is used to track the overlapped operation.
+ ///
+ /// If the operation succeeds, `Ok(Some(n0)` is returned where `n` byte
+ /// were written. If the operation returns an error indicating that the I/O
+ /// is currently pending, `Ok(None)` is returned. Otherwise, the error
+ /// associated with the operation is returned and no overlapped operation
+ /// is enqueued.
+ ///
+ /// The number of bytes written will be returned as part of the completion
+ /// notification when the I/O finishes.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers are valid until the end of the I/O operation. The
+ /// kernel also requires that `overlapped` is unique for this I/O operation
+ /// and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that these two input
+ /// pointers are valid until the I/O operation is completed, typically via
+ /// completion ports and waiting to receive the completion notification on
+ /// the port.
+ unsafe fn send_overlapped(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>>;
+
+ /// Calls the `GetOverlappedResult` function to get the result of an
+ /// overlapped operation for this handle.
+ ///
+ /// This function takes the `OVERLAPPED` argument which must have been used
+ /// to initiate an overlapped I/O operation, and returns either the
+ /// successful number of bytes transferred during the operation or an error
+ /// if one occurred, along with the results of the `lpFlags` parameter of
+ /// the relevant operation, if applicable.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe as `overlapped` must have previously been used
+ /// to execute an operation for this handle, and it must also be a valid
+ /// pointer to an `OVERLAPPED` instance.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic
+ unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<(usize, u32)>;
+}
+
+/// Additional methods for the `TcpListener` type in the standard library.
+pub trait TcpListenerExt {
+ /// Perform an accept operation on this listener, accepting a connection in
+ /// an overlapped fashion.
+ ///
+ /// This function will issue an I/O request to accept an incoming connection
+ /// with the specified overlapped instance. The `socket` provided must be a
+ /// configured but not bound or connected socket, and if successful this
+ /// will consume the internal socket of the builder to return a TCP stream.
+ ///
+ /// The `addrs` buffer provided will be filled in with the local and remote
+ /// addresses of the connection upon completion.
+ ///
+ /// If the accept succeeds immediately, `Ok(true)` is returned. If
+ /// the connect indicates that the I/O is currently pending, `Ok(false)` is
+ /// returned. Otherwise, the error associated with the operation is
+ /// returned and no overlapped operation is enqueued.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the
+ /// `addrs` and `overlapped` pointers are valid until the end of the I/O
+ /// operation. The kernel also requires that `overlapped` is unique for this
+ /// I/O operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that the pointers are
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ unsafe fn accept_overlapped(
+ &self,
+ socket: &TcpStream,
+ addrs: &mut AcceptAddrsBuf,
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<bool>;
+
+ /// Once an `accept_overlapped` has finished, this function needs to be
+ /// called to finish the accept operation.
+ ///
+ /// Currently this just calls `setsockopt` with `SO_UPDATE_ACCEPT_CONTEXT`
+ /// to ensure that further functions like `getpeername` and `getsockname`
+ /// work correctly.
+ fn accept_complete(&self, socket: &TcpStream) -> io::Result<()>;
+
+ /// Calls the `GetOverlappedResult` function to get the result of an
+ /// overlapped operation for this handle.
+ ///
+ /// This function takes the `OVERLAPPED` argument which must have been used
+ /// to initiate an overlapped I/O operation, and returns either the
+ /// successful number of bytes transferred during the operation or an error
+ /// if one occurred, along with the results of the `lpFlags` parameter of
+ /// the relevant operation, if applicable.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe as `overlapped` must have previously been used
+ /// to execute an operation for this handle, and it must also be a valid
+ /// pointer to an `OVERLAPPED` instance.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic
+ unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<(usize, u32)>;
+}
+
+#[doc(hidden)]
+trait NetInt {
+ fn from_be(i: Self) -> Self;
+ fn to_be(&self) -> Self;
+}
+macro_rules! doit {
+ ($($t:ident)*) => ($(impl NetInt for $t {
+ fn from_be(i: Self) -> Self { <$t>::from_be(i) }
+ fn to_be(&self) -> Self { <$t>::to_be(*self) }
+ })*)
+}
+doit! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
+
+// fn hton<I: NetInt>(i: I) -> I { i.to_be() }
+fn ntoh<I: NetInt>(i: I) -> I {
+ I::from_be(i)
+}
+
+fn last_err() -> io::Result<Option<usize>> {
+ let err = unsafe { WSAGetLastError() };
+ if err == WSA_IO_PENDING as i32 {
+ Ok(None)
+ } else {
+ Err(io::Error::from_raw_os_error(err))
+ }
+}
+
+fn cvt(i: c_int, size: DWORD) -> io::Result<Option<usize>> {
+ if i == SOCKET_ERROR {
+ last_err()
+ } else {
+ Ok(Some(size as usize))
+ }
+}
+
+/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: SOCKADDR_IN,
+ v6: SOCKADDR_IN6_LH,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const SOCKADDR {
+ self as *const _ as *const SOCKADDR
+ }
+}
+
+fn socket_addr_to_ptrs(addr: &SocketAddr) -> (SocketAddrCRepr, c_int) {
+ match *addr {
+ SocketAddr::V4(ref a) => {
+ let sin_addr = unsafe {
+ let mut s_un = mem::zeroed::<in_addr_S_un>();
+ *s_un.S_addr_mut() = u32::from_ne_bytes(a.ip().octets());
+ IN_ADDR { S_un: s_un }
+ };
+
+ let sockaddr_in = SOCKADDR_IN {
+ sin_family: AF_INET as ADDRESS_FAMILY,
+ sin_port: a.port().to_be(),
+ sin_addr,
+ sin_zero: [0; 8],
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ (sockaddr, mem::size_of::<SOCKADDR_IN>() as c_int)
+ }
+ SocketAddr::V6(ref a) => {
+ let sin6_addr = unsafe {
+ let mut u = mem::zeroed::<in6_addr_u>();
+ *u.Byte_mut() = a.ip().octets();
+ IN6_ADDR { u }
+ };
+ let u = unsafe {
+ let mut u = mem::zeroed::<SOCKADDR_IN6_LH_u>();
+ *u.sin6_scope_id_mut() = a.scope_id();
+ u
+ };
+
+ let sockaddr_in6 = SOCKADDR_IN6_LH {
+ sin6_family: AF_INET6 as ADDRESS_FAMILY,
+ sin6_port: a.port().to_be(),
+ sin6_addr,
+ sin6_flowinfo: a.flowinfo(),
+ u,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ (sockaddr, mem::size_of::<SOCKADDR_IN6_LH>() as c_int)
+ }
+ }
+}
+
+unsafe fn ptrs_to_socket_addr(ptr: *const SOCKADDR, len: c_int) -> Option<SocketAddr> {
+ if (len as usize) < mem::size_of::<c_int>() {
+ return None;
+ }
+ match (*ptr).sa_family as i32 {
+ AF_INET if len as usize >= mem::size_of::<SOCKADDR_IN>() => {
+ let b = &*(ptr as *const SOCKADDR_IN);
+ let ip = ntoh(*b.sin_addr.S_un.S_addr());
+ let ip = Ipv4Addr::new(
+ (ip >> 24) as u8,
+ (ip >> 16) as u8,
+ (ip >> 8) as u8,
+ (ip >> 0) as u8,
+ );
+ Some(SocketAddr::V4(SocketAddrV4::new(ip, ntoh(b.sin_port))))
+ }
+ AF_INET6 if len as usize >= mem::size_of::<SOCKADDR_IN6_LH>() => {
+ let b = &*(ptr as *const SOCKADDR_IN6_LH);
+ let arr = b.sin6_addr.u.Byte();
+ let ip = Ipv6Addr::new(
+ ((arr[0] as u16) << 8) | (arr[1] as u16),
+ ((arr[2] as u16) << 8) | (arr[3] as u16),
+ ((arr[4] as u16) << 8) | (arr[5] as u16),
+ ((arr[6] as u16) << 8) | (arr[7] as u16),
+ ((arr[8] as u16) << 8) | (arr[9] as u16),
+ ((arr[10] as u16) << 8) | (arr[11] as u16),
+ ((arr[12] as u16) << 8) | (arr[13] as u16),
+ ((arr[14] as u16) << 8) | (arr[15] as u16),
+ );
+ let addr = SocketAddrV6::new(
+ ip,
+ ntoh(b.sin6_port),
+ ntoh(b.sin6_flowinfo),
+ ntoh(*b.u.sin6_scope_id()),
+ );
+ Some(SocketAddr::V6(addr))
+ }
+ _ => None,
+ }
+}
+
+unsafe fn slice2buf(slice: &[u8]) -> WSABUF {
+ WSABUF {
+ len: cmp::min(slice.len(), <u_long>::max_value() as usize) as u_long,
+ buf: slice.as_ptr() as *mut _,
+ }
+}
+
+unsafe fn result(socket: SOCKET, overlapped: *mut OVERLAPPED) -> io::Result<(usize, u32)> {
+ let mut transferred = 0;
+ let mut flags = 0;
+ let r = WSAGetOverlappedResult(socket, overlapped, &mut transferred, FALSE, &mut flags);
+ if r == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok((transferred as usize, flags))
+ }
+}
+
+impl TcpStreamExt for TcpStream {
+ unsafe fn read_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let mut buf = slice2buf(buf);
+ let mut flags = 0;
+ let mut bytes_read: DWORD = 0;
+ let r = WSARecv(
+ self.as_raw_socket() as SOCKET,
+ &mut buf,
+ 1,
+ &mut bytes_read,
+ &mut flags,
+ overlapped,
+ None,
+ );
+ cvt(r, bytes_read)
+ }
+
+ unsafe fn write_overlapped(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let mut buf = slice2buf(buf);
+ let mut bytes_written = 0;
+
+ // Note here that we capture the number of bytes written. The
+ // documentation on MSDN, however, states:
+ //
+ // > Use NULL for this parameter if the lpOverlapped parameter is not
+ // > NULL to avoid potentially erroneous results. This parameter can be
+ // > NULL only if the lpOverlapped parameter is not NULL.
+ //
+ // If we're not passing a null overlapped pointer here, then why are we
+ // then capturing the number of bytes! Well so it turns out that this is
+ // clearly faster to learn the bytes here rather than later calling
+ // `WSAGetOverlappedResult`, and in practice almost all implementations
+ // use this anyway [1].
+ //
+ // As a result we use this to and report back the result.
+ //
+ // [1]: https://github.com/carllerche/mio/pull/520#issuecomment-273983823
+ let r = WSASend(
+ self.as_raw_socket() as SOCKET,
+ &mut buf,
+ 1,
+ &mut bytes_written,
+ 0,
+ overlapped,
+ None,
+ );
+ cvt(r, bytes_written)
+ }
+
+ unsafe fn connect_overlapped(
+ &self,
+ addr: &SocketAddr,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ connect_overlapped(self.as_raw_socket() as SOCKET, addr, buf, overlapped)
+ }
+
+ fn connect_complete(&self) -> io::Result<()> {
+ const SO_UPDATE_CONNECT_CONTEXT: c_int = 0x7010;
+ let result = unsafe {
+ setsockopt(
+ self.as_raw_socket() as SOCKET,
+ SOL_SOCKET,
+ SO_UPDATE_CONNECT_CONTEXT,
+ 0 as *const _,
+ 0,
+ )
+ };
+ if result == 0 {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+
+ unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<(usize, u32)> {
+ result(self.as_raw_socket() as SOCKET, overlapped)
+ }
+}
+
+unsafe fn connect_overlapped(
+ socket: SOCKET,
+ addr: &SocketAddr,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+) -> io::Result<Option<usize>> {
+ static CONNECTEX: WsaExtension = WsaExtension {
+ guid: GUID {
+ Data1: 0x25a207b9,
+ Data2: 0xddf3,
+ Data3: 0x4660,
+ Data4: [0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e],
+ },
+ val: AtomicUsize::new(0),
+ };
+ type ConnectEx = unsafe extern "system" fn(
+ SOCKET,
+ *const SOCKADDR,
+ c_int,
+ PVOID,
+ DWORD,
+ LPDWORD,
+ LPOVERLAPPED,
+ ) -> BOOL;
+
+ let ptr = CONNECTEX.get(socket)?;
+ assert!(ptr != 0);
+ let connect_ex = mem::transmute::<_, ConnectEx>(ptr);
+
+ let (addr_buf, addr_len) = socket_addr_to_ptrs(addr);
+ let mut bytes_sent: DWORD = 0;
+ let r = connect_ex(
+ socket,
+ addr_buf.as_ptr(),
+ addr_len,
+ buf.as_ptr() as *mut _,
+ buf.len() as u32,
+ &mut bytes_sent,
+ overlapped,
+ );
+ if r == TRUE {
+ Ok(Some(bytes_sent as usize))
+ } else {
+ last_err()
+ }
+}
+
+impl UdpSocketExt for UdpSocket {
+ unsafe fn recv_from_overlapped(
+ &self,
+ buf: &mut [u8],
+ addr: *mut SocketAddrBuf,
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let mut buf = slice2buf(buf);
+ let mut flags = 0;
+ let mut received_bytes: DWORD = 0;
+ let r = WSARecvFrom(
+ self.as_raw_socket() as SOCKET,
+ &mut buf,
+ 1,
+ &mut received_bytes,
+ &mut flags,
+ &mut (*addr).buf as *mut _ as *mut _,
+ &mut (*addr).len,
+ overlapped,
+ None,
+ );
+ cvt(r, received_bytes)
+ }
+
+ unsafe fn recv_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let mut buf = slice2buf(buf);
+ let mut flags = 0;
+ let mut received_bytes: DWORD = 0;
+ let r = WSARecv(
+ self.as_raw_socket() as SOCKET,
+ &mut buf,
+ 1,
+ &mut received_bytes,
+ &mut flags,
+ overlapped,
+ None,
+ );
+ cvt(r, received_bytes)
+ }
+
+ unsafe fn send_to_overlapped(
+ &self,
+ buf: &[u8],
+ addr: &SocketAddr,
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let (addr_buf, addr_len) = socket_addr_to_ptrs(addr);
+ let mut buf = slice2buf(buf);
+ let mut sent_bytes = 0;
+ let r = WSASendTo(
+ self.as_raw_socket() as SOCKET,
+ &mut buf,
+ 1,
+ &mut sent_bytes,
+ 0,
+ addr_buf.as_ptr() as *const _,
+ addr_len,
+ overlapped,
+ None,
+ );
+ cvt(r, sent_bytes)
+ }
+
+ unsafe fn send_overlapped(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let mut buf = slice2buf(buf);
+ let mut sent_bytes = 0;
+ let r = WSASend(
+ self.as_raw_socket() as SOCKET,
+ &mut buf,
+ 1,
+ &mut sent_bytes,
+ 0,
+ overlapped,
+ None,
+ );
+ cvt(r, sent_bytes)
+ }
+
+ unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<(usize, u32)> {
+ result(self.as_raw_socket() as SOCKET, overlapped)
+ }
+}
+
+impl TcpListenerExt for TcpListener {
+ unsafe fn accept_overlapped(
+ &self,
+ socket: &TcpStream,
+ addrs: &mut AcceptAddrsBuf,
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<bool> {
+ static ACCEPTEX: WsaExtension = WsaExtension {
+ guid: GUID {
+ Data1: 0xb5367df1,
+ Data2: 0xcbac,
+ Data3: 0x11cf,
+ Data4: [0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92],
+ },
+ val: AtomicUsize::new(0),
+ };
+ type AcceptEx = unsafe extern "system" fn(
+ SOCKET,
+ SOCKET,
+ PVOID,
+ DWORD,
+ DWORD,
+ DWORD,
+ LPDWORD,
+ LPOVERLAPPED,
+ ) -> BOOL;
+
+ let ptr = ACCEPTEX.get(self.as_raw_socket() as SOCKET)?;
+ assert!(ptr != 0);
+ let accept_ex = mem::transmute::<_, AcceptEx>(ptr);
+
+ let mut bytes = 0;
+ let (a, b, c, d) = (*addrs).args();
+ let r = accept_ex(
+ self.as_raw_socket() as SOCKET,
+ socket.as_raw_socket() as SOCKET,
+ a,
+ b,
+ c,
+ d,
+ &mut bytes,
+ overlapped,
+ );
+ let succeeded = if r == TRUE {
+ true
+ } else {
+ last_err()?;
+ false
+ };
+ Ok(succeeded)
+ }
+
+ fn accept_complete(&self, socket: &TcpStream) -> io::Result<()> {
+ const SO_UPDATE_ACCEPT_CONTEXT: c_int = 0x700B;
+ let me = self.as_raw_socket();
+ let result = unsafe {
+ setsockopt(
+ socket.as_raw_socket() as SOCKET,
+ SOL_SOCKET,
+ SO_UPDATE_ACCEPT_CONTEXT,
+ &me as *const _ as *const _,
+ mem::size_of_val(&me) as c_int,
+ )
+ };
+ if result == 0 {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+
+ unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<(usize, u32)> {
+ result(self.as_raw_socket() as SOCKET, overlapped)
+ }
+}
+
+impl SocketAddrBuf {
+ /// Creates a new blank socket address buffer.
+ ///
+ /// This should be used before a call to `recv_from_overlapped` overlapped
+ /// to create an instance to pass down.
+ pub fn new() -> SocketAddrBuf {
+ SocketAddrBuf {
+ buf: unsafe { mem::zeroed() },
+ len: mem::size_of::<SOCKADDR_STORAGE>() as c_int,
+ }
+ }
+
+ /// Parses this buffer to return a standard socket address.
+ ///
+ /// This function should be called after the buffer has been filled in with
+ /// a call to `recv_from_overlapped` being completed. It will interpret the
+ /// address filled in and return the standard socket address type.
+ ///
+ /// If an error is encountered then `None` is returned.
+ pub fn to_socket_addr(&self) -> Option<SocketAddr> {
+ unsafe { ptrs_to_socket_addr(&self.buf as *const _ as *const _, self.len) }
+ }
+}
+
+static GETACCEPTEXSOCKADDRS: WsaExtension = WsaExtension {
+ guid: GUID {
+ Data1: 0xb5367df2,
+ Data2: 0xcbac,
+ Data3: 0x11cf,
+ Data4: [0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92],
+ },
+ val: AtomicUsize::new(0),
+};
+type GetAcceptExSockaddrs = unsafe extern "system" fn(
+ PVOID,
+ DWORD,
+ DWORD,
+ DWORD,
+ *mut LPSOCKADDR,
+ LPINT,
+ *mut LPSOCKADDR,
+ LPINT,
+);
+
+impl AcceptAddrsBuf {
+ /// Creates a new blank buffer ready to be passed to a call to
+ /// `accept_overlapped`.
+ pub fn new() -> AcceptAddrsBuf {
+ unsafe { mem::zeroed() }
+ }
+
+ /// Parses the data contained in this address buffer, returning the parsed
+ /// result if successful.
+ ///
+ /// This function can be called after a call to `accept_overlapped` has
+ /// succeeded to parse out the data that was written in.
+ pub fn parse(&self, socket: &TcpListener) -> io::Result<AcceptAddrs> {
+ let mut ret = AcceptAddrs {
+ local: 0 as *mut _,
+ local_len: 0,
+ remote: 0 as *mut _,
+ remote_len: 0,
+ _data: self,
+ };
+ let ptr = GETACCEPTEXSOCKADDRS.get(socket.as_raw_socket() as SOCKET)?;
+ assert!(ptr != 0);
+ unsafe {
+ let get_sockaddrs = mem::transmute::<_, GetAcceptExSockaddrs>(ptr);
+ let (a, b, c, d) = self.args();
+ get_sockaddrs(
+ a,
+ b,
+ c,
+ d,
+ &mut ret.local,
+ &mut ret.local_len,
+ &mut ret.remote,
+ &mut ret.remote_len,
+ );
+ Ok(ret)
+ }
+ }
+
+ fn args(&self) -> (PVOID, DWORD, DWORD, DWORD) {
+ let remote_offset = unsafe { &(*(0 as *const AcceptAddrsBuf)).remote as *const _ as usize };
+ (
+ self as *const _ as *mut _,
+ 0,
+ remote_offset as DWORD,
+ (mem::size_of_val(self) - remote_offset) as DWORD,
+ )
+ }
+}
+
+impl<'a> AcceptAddrs<'a> {
+ /// Returns the local socket address contained in this buffer.
+ pub fn local(&self) -> Option<SocketAddr> {
+ unsafe { ptrs_to_socket_addr(self.local, self.local_len) }
+ }
+
+ /// Returns the remote socket address contained in this buffer.
+ pub fn remote(&self) -> Option<SocketAddr> {
+ unsafe { ptrs_to_socket_addr(self.remote, self.remote_len) }
+ }
+}
+
+impl WsaExtension {
+ fn get(&self, socket: SOCKET) -> io::Result<usize> {
+ let prev = self.val.load(Ordering::SeqCst);
+ if prev != 0 && !cfg!(debug_assertions) {
+ return Ok(prev);
+ }
+ let mut ret = 0 as usize;
+ let mut bytes = 0;
+ let r = unsafe {
+ WSAIoctl(
+ socket,
+ SIO_GET_EXTENSION_FUNCTION_POINTER,
+ &self.guid as *const _ as *mut _,
+ mem::size_of_val(&self.guid) as DWORD,
+ &mut ret as *mut _ as *mut _,
+ mem::size_of_val(&ret) as DWORD,
+ &mut bytes,
+ 0 as *mut _,
+ None,
+ )
+ };
+ cvt(r, 0).map(|_| {
+ debug_assert_eq!(bytes as usize, mem::size_of_val(&ret));
+ debug_assert!(prev == 0 || prev == ret);
+ self.val.store(ret, Ordering::SeqCst);
+ ret
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io::prelude::*;
+ use std::net::{
+ IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6, TcpListener, TcpStream, UdpSocket,
+ };
+ use std::slice;
+ use std::thread;
+
+ use socket2::{Domain, Socket, Type};
+
+ use crate::iocp::CompletionPort;
+ use crate::net::{AcceptAddrsBuf, TcpListenerExt};
+ use crate::net::{SocketAddrBuf, TcpStreamExt, UdpSocketExt};
+ use crate::Overlapped;
+
+ fn each_ip(f: &mut dyn FnMut(SocketAddr)) {
+ f(t!("127.0.0.1:0".parse()));
+ f(t!("[::1]:0".parse()));
+ }
+
+ #[test]
+ fn tcp_read() {
+ each_ip(&mut |addr| {
+ let l = t!(TcpListener::bind(addr));
+ let addr = t!(l.local_addr());
+ let t = thread::spawn(move || {
+ let mut a = t!(l.accept()).0;
+ t!(a.write_all(&[1, 2, 3]));
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ let s = t!(TcpStream::connect(addr));
+ t!(cp.add_socket(1, &s));
+
+ let mut b = [0; 10];
+ let a = Overlapped::zero();
+ unsafe {
+ t!(s.read_overlapped(&mut b, a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+ assert_eq!(&b[0..3], &[1, 2, 3]);
+
+ t!(t.join());
+ })
+ }
+
+ #[test]
+ fn tcp_write() {
+ each_ip(&mut |addr| {
+ let l = t!(TcpListener::bind(addr));
+ let addr = t!(l.local_addr());
+ let t = thread::spawn(move || {
+ let mut a = t!(l.accept()).0;
+ let mut b = [0; 10];
+ let n = t!(a.read(&mut b));
+ assert_eq!(n, 3);
+ assert_eq!(&b[0..3], &[1, 2, 3]);
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ let s = t!(TcpStream::connect(addr));
+ t!(cp.add_socket(1, &s));
+
+ let b = [1, 2, 3];
+ let a = Overlapped::zero();
+ unsafe {
+ t!(s.write_overlapped(&b, a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+
+ t!(t.join());
+ })
+ }
+
+ #[test]
+ fn tcp_connect() {
+ each_ip(&mut |addr_template| {
+ let l = t!(TcpListener::bind(addr_template));
+ let addr = t!(l.local_addr());
+ let t = thread::spawn(move || {
+ t!(l.accept());
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ let domain = Domain::for_address(addr);
+ let socket = t!(Socket::new(domain, Type::STREAM, None));
+ t!(socket.bind(&addr_template.into()));
+ let socket = TcpStream::from(socket);
+ t!(cp.add_socket(1, &socket));
+
+ let a = Overlapped::zero();
+ unsafe {
+ t!(socket.connect_overlapped(&addr, &[], a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 0);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+ t!(socket.connect_complete());
+
+ t!(t.join());
+ })
+ }
+
+ #[test]
+ fn udp_recv_from() {
+ each_ip(&mut |addr| {
+ let a = t!(UdpSocket::bind(addr));
+ let b = t!(UdpSocket::bind(addr));
+ let a_addr = t!(a.local_addr());
+ let b_addr = t!(b.local_addr());
+ let t = thread::spawn(move || {
+ t!(a.send_to(&[1, 2, 3], b_addr));
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ t!(cp.add_socket(1, &b));
+
+ let mut buf = [0; 10];
+ let a = Overlapped::zero();
+ let mut addr = SocketAddrBuf::new();
+ unsafe {
+ t!(b.recv_from_overlapped(&mut buf, &mut addr, a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+ assert_eq!(&buf[..3], &[1, 2, 3]);
+ assert_eq!(addr.to_socket_addr(), Some(a_addr));
+
+ t!(t.join());
+ })
+ }
+
+ #[test]
+ fn udp_recv() {
+ each_ip(&mut |addr| {
+ let a = t!(UdpSocket::bind(addr));
+ let b = t!(UdpSocket::bind(addr));
+ let a_addr = t!(a.local_addr());
+ let b_addr = t!(b.local_addr());
+ assert!(b.connect(a_addr).is_ok());
+ assert!(a.connect(b_addr).is_ok());
+ let t = thread::spawn(move || {
+ t!(a.send_to(&[1, 2, 3], b_addr));
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ t!(cp.add_socket(1, &b));
+
+ let mut buf = [0; 10];
+ let a = Overlapped::zero();
+ unsafe {
+ t!(b.recv_overlapped(&mut buf, a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+ assert_eq!(&buf[..3], &[1, 2, 3]);
+
+ t!(t.join());
+ })
+ }
+
+ #[test]
+ fn udp_send_to() {
+ each_ip(&mut |addr| {
+ let a = t!(UdpSocket::bind(addr));
+ let b = t!(UdpSocket::bind(addr));
+ let a_addr = t!(a.local_addr());
+ let b_addr = t!(b.local_addr());
+ let t = thread::spawn(move || {
+ let mut b = [0; 100];
+ let (n, addr) = t!(a.recv_from(&mut b));
+ assert_eq!(n, 3);
+ assert_eq!(addr, b_addr);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ t!(cp.add_socket(1, &b));
+
+ let a = Overlapped::zero();
+ unsafe {
+ t!(b.send_to_overlapped(&[1, 2, 3], &a_addr, a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+
+ t!(t.join());
+ })
+ }
+
+ #[test]
+ fn udp_send() {
+ each_ip(&mut |addr| {
+ let a = t!(UdpSocket::bind(addr));
+ let b = t!(UdpSocket::bind(addr));
+ let a_addr = t!(a.local_addr());
+ let b_addr = t!(b.local_addr());
+ assert!(b.connect(a_addr).is_ok());
+ assert!(a.connect(b_addr).is_ok());
+ let t = thread::spawn(move || {
+ let mut b = [0; 100];
+ let (n, addr) = t!(a.recv_from(&mut b));
+ assert_eq!(n, 3);
+ assert_eq!(addr, b_addr);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ t!(cp.add_socket(1, &b));
+
+ let a = Overlapped::zero();
+ unsafe {
+ t!(b.send_overlapped(&[1, 2, 3], a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+
+ t!(t.join());
+ })
+ }
+
+ #[test]
+ fn tcp_accept() {
+ each_ip(&mut |addr_template| {
+ let l = t!(TcpListener::bind(addr_template));
+ let addr = t!(l.local_addr());
+ let t = thread::spawn(move || {
+ let socket = t!(TcpStream::connect(addr));
+ (socket.local_addr().unwrap(), socket.peer_addr().unwrap())
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ let domain = Domain::for_address(addr);
+ let socket = TcpStream::from(t!(Socket::new(domain, Type::STREAM, None)));
+ t!(cp.add_socket(1, &l));
+
+ let a = Overlapped::zero();
+ let mut addrs = AcceptAddrsBuf::new();
+ unsafe {
+ t!(l.accept_overlapped(&socket, &mut addrs, a.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 0);
+ assert_eq!(status.token(), 1);
+ assert_eq!(status.overlapped(), a.raw());
+ t!(l.accept_complete(&socket));
+
+ let (remote, local) = t!(t.join());
+ let addrs = addrs.parse(&l).unwrap();
+ assert_eq!(addrs.local(), Some(local));
+ assert_eq!(addrs.remote(), Some(remote));
+ })
+ }
+
+ #[test]
+ fn sockaddr_convert_4() {
+ let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(3, 4, 5, 6)), 0xabcd);
+ let (raw_addr, addr_len) = super::socket_addr_to_ptrs(&addr);
+ assert_eq!(addr_len, 16);
+ let addr_bytes =
+ unsafe { slice::from_raw_parts(raw_addr.as_ptr() as *const u8, addr_len as usize) };
+ assert_eq!(
+ addr_bytes,
+ &[2, 0, 0xab, 0xcd, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0]
+ );
+ }
+
+ #[test]
+ fn sockaddr_convert_v6() {
+ let port = 0xabcd;
+ let flowinfo = 0x12345678;
+ let scope_id = 0x87654321;
+ let addr = SocketAddr::V6(SocketAddrV6::new(
+ Ipv6Addr::new(
+ 0x0102, 0x0304, 0x0506, 0x0708, 0x090a, 0x0b0c, 0x0d0e, 0x0f10,
+ ),
+ port,
+ flowinfo,
+ scope_id,
+ ));
+ let (raw_addr, addr_len) = super::socket_addr_to_ptrs(&addr);
+ assert_eq!(addr_len, 28);
+ let addr_bytes =
+ unsafe { slice::from_raw_parts(raw_addr.as_ptr() as *const u8, addr_len as usize) };
+ assert_eq!(
+ addr_bytes,
+ &[
+ 23, 0, // AF_INET6
+ 0xab, 0xcd, // Port
+ 0x78, 0x56, 0x34, 0x12, // flowinfo
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
+ 0x0f, 0x10, // IP
+ 0x21, 0x43, 0x65, 0x87, // scope_id
+ ]
+ );
+ }
+}
diff --git a/third_party/rust/miow/src/overlapped.rs b/third_party/rust/miow/src/overlapped.rs
new file mode 100644
index 0000000000..abe2d37cbb
--- /dev/null
+++ b/third_party/rust/miow/src/overlapped.rs
@@ -0,0 +1,92 @@
+use std::fmt;
+use std::io;
+use std::mem;
+use std::ptr;
+
+use winapi::shared::ntdef::{HANDLE, NULL};
+use winapi::um::minwinbase::*;
+use winapi::um::synchapi::*;
+
+/// A wrapper around `OVERLAPPED` to provide "rustic" accessors and
+/// initializers.
+pub struct Overlapped(OVERLAPPED);
+
+impl fmt::Debug for Overlapped {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "OVERLAPPED")
+ }
+}
+
+unsafe impl Send for Overlapped {}
+unsafe impl Sync for Overlapped {}
+
+impl Overlapped {
+ /// Creates a new zeroed out instance of an overlapped I/O tracking state.
+ ///
+ /// This is suitable for passing to methods which will then later get
+ /// notified via an I/O Completion Port.
+ pub fn zero() -> Overlapped {
+ Overlapped(unsafe { mem::zeroed() })
+ }
+
+ /// Creates a new `Overlapped` with an initialized non-null `hEvent`. The caller is
+ /// responsible for calling `CloseHandle` on the `hEvent` field of the returned
+ /// `Overlapped`. The event is created with `bManualReset` set to `FALSE`, meaning after a
+ /// single thread waits on the event, it will be reset.
+ pub fn initialize_with_autoreset_event() -> io::Result<Overlapped> {
+ let event = unsafe { CreateEventW(ptr::null_mut(), 0i32, 0i32, ptr::null()) };
+ if event == NULL {
+ return Err(io::Error::last_os_error());
+ }
+ let mut overlapped = Self::zero();
+ overlapped.set_event(event);
+ Ok(overlapped)
+ }
+
+ /// Creates a new `Overlapped` function pointer from the underlying
+ /// `OVERLAPPED`, wrapping in the "rusty" wrapper for working with
+ /// accessors.
+ ///
+ /// # Unsafety
+ ///
+ /// This function doesn't validate `ptr` nor the lifetime of the returned
+ /// pointer at all, it's recommended to use this method with extreme
+ /// caution.
+ pub unsafe fn from_raw<'a>(ptr: *mut OVERLAPPED) -> &'a mut Overlapped {
+ &mut *(ptr as *mut Overlapped)
+ }
+
+ /// Gain access to the raw underlying data
+ pub fn raw(&self) -> *mut OVERLAPPED {
+ &self.0 as *const _ as *mut _
+ }
+
+ /// Sets the offset inside this overlapped structure.
+ ///
+ /// Note that for I/O operations in general this only has meaning for I/O
+ /// handles that are on a seeking device that supports the concept of an
+ /// offset.
+ pub fn set_offset(&mut self, offset: u64) {
+ let s = unsafe { self.0.u.s_mut() };
+ s.Offset = offset as u32;
+ s.OffsetHigh = (offset >> 32) as u32;
+ }
+
+ /// Reads the offset inside this overlapped structure.
+ pub fn offset(&self) -> u64 {
+ let s = unsafe { self.0.u.s() };
+ (s.Offset as u64) | ((s.OffsetHigh as u64) << 32)
+ }
+
+ /// Sets the `hEvent` field of this structure.
+ ///
+ /// The event specified can be null.
+ pub fn set_event(&mut self, event: HANDLE) {
+ self.0.hEvent = event;
+ }
+
+ /// Reads the `hEvent` field of this structure, may return null.
+ pub fn event(&self) -> HANDLE {
+ self.0.hEvent
+ }
+}
diff --git a/third_party/rust/miow/src/pipe.rs b/third_party/rust/miow/src/pipe.rs
new file mode 100644
index 0000000000..5088021a86
--- /dev/null
+++ b/third_party/rust/miow/src/pipe.rs
@@ -0,0 +1,788 @@
+//! Interprocess Communication pipes
+//!
+//! A pipe is a section of shared memory that processes use for communication.
+//! The process that creates a pipe is the _pipe server_. A process that connects
+//! to a pipe is a _pipe client_. One process writes information to the pipe, then
+//! the other process reads the information from the pipe. This overview
+//! describes how to create, manage, and use pipes.
+//!
+//! There are two types of pipes: [anonymous pipes](#fn.anonymous.html) and
+//! [named pipes](#fn.named.html). Anonymous pipes require less overhead than
+//! named pipes, but offer limited services.
+//!
+//! # Anonymous pipes
+//!
+//! An anonymous pipe is an unnamed, one-way pipe that typically transfers data
+//! between a parent process and a child process. Anonymous pipes are always
+//! local; they cannot be used for communication over a network.
+//!
+//! # Named pipes
+//!
+//! A *named pipe* is a named, one-way or duplex pipe for communication between
+//! the pipe server and one or more pipe clients. All instances of a named pipe
+//! share the same pipe name, but each instance has its own buffers and handles,
+//! and provides a separate conduit for client/server communication. The use of
+//! instances enables multiple pipe clients to use the same named pipe
+//! simultaneously.
+//!
+//! Any process can access named pipes, subject to security checks, making named
+//! pipes an easy form of communication between related or unrelated processes.
+//!
+//! Any process can act as both a server and a client, making peer-to-peer
+//! communication possible. As used here, the term pipe server refers to a
+//! process that creates a named pipe, and the term pipe client refers to a
+//! process that connects to an instance of a named pipe.
+//!
+//! Named pipes can be used to provide communication between processes on the
+//! same computer or between processes on different computers across a network.
+//! If the server service is running, all named pipes are accessible remotely. If
+//! you intend to use a named pipe locally only, deny access to NT
+//! AUTHORITY\\NETWORK or switch to local RPC.
+//!
+//! # References
+//!
+//! - [win32 pipe docs](https://github.com/MicrosoftDocs/win32/blob/docs/desktop-src/ipc/pipes.md)
+
+use std::cell::RefCell;
+use std::ffi::OsStr;
+use std::fs::{File, OpenOptions};
+use std::io;
+use std::io::prelude::*;
+use std::os::windows::ffi::*;
+use std::os::windows::io::*;
+use std::time::Duration;
+
+use crate::handle::Handle;
+use crate::overlapped::Overlapped;
+use winapi::shared::minwindef::*;
+use winapi::shared::ntdef::HANDLE;
+use winapi::shared::winerror::*;
+use winapi::um::fileapi::*;
+use winapi::um::handleapi::*;
+use winapi::um::ioapiset::*;
+use winapi::um::minwinbase::*;
+use winapi::um::namedpipeapi::*;
+use winapi::um::winbase::*;
+
+/// Readable half of an anonymous pipe.
+#[derive(Debug)]
+pub struct AnonRead(Handle);
+
+/// Writable half of an anonymous pipe.
+#[derive(Debug)]
+pub struct AnonWrite(Handle);
+
+/// A named pipe that can accept connections.
+#[derive(Debug)]
+pub struct NamedPipe(Handle);
+
+/// A builder structure for creating a new named pipe.
+#[derive(Debug)]
+pub struct NamedPipeBuilder {
+ name: Vec<u16>,
+ dwOpenMode: DWORD,
+ dwPipeMode: DWORD,
+ nMaxInstances: DWORD,
+ nOutBufferSize: DWORD,
+ nInBufferSize: DWORD,
+ nDefaultTimeOut: DWORD,
+}
+
+/// Creates a new anonymous in-memory pipe, returning the read/write ends of the
+/// pipe.
+///
+/// The buffer size for this pipe may also be specified, but the system will
+/// normally use this as a suggestion and it's not guaranteed that the buffer
+/// will be precisely this size.
+pub fn anonymous(buffer_size: u32) -> io::Result<(AnonRead, AnonWrite)> {
+ let mut read = 0 as HANDLE;
+ let mut write = 0 as HANDLE;
+ crate::cvt(unsafe { CreatePipe(&mut read, &mut write, 0 as *mut _, buffer_size) })?;
+ Ok((AnonRead(Handle::new(read)), AnonWrite(Handle::new(write))))
+}
+
+impl Read for AnonRead {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+}
+impl<'a> Read for &'a AnonRead {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+}
+
+impl AsRawHandle for AnonRead {
+ fn as_raw_handle(&self) -> HANDLE {
+ self.0.raw()
+ }
+}
+impl FromRawHandle for AnonRead {
+ unsafe fn from_raw_handle(handle: HANDLE) -> AnonRead {
+ AnonRead(Handle::new(handle))
+ }
+}
+impl IntoRawHandle for AnonRead {
+ fn into_raw_handle(self) -> HANDLE {
+ self.0.into_raw()
+ }
+}
+
+impl Write for AnonWrite {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+impl<'a> Write for &'a AnonWrite {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl AsRawHandle for AnonWrite {
+ fn as_raw_handle(&self) -> HANDLE {
+ self.0.raw()
+ }
+}
+impl FromRawHandle for AnonWrite {
+ unsafe fn from_raw_handle(handle: HANDLE) -> AnonWrite {
+ AnonWrite(Handle::new(handle))
+ }
+}
+impl IntoRawHandle for AnonWrite {
+ fn into_raw_handle(self) -> HANDLE {
+ self.0.into_raw()
+ }
+}
+
+/// A convenience function to connect to a named pipe.
+///
+/// This function will block the calling process until it can connect to the
+/// pipe server specified by `addr`. This will use `NamedPipe::wait` internally
+/// to block until it can connect.
+pub fn connect<A: AsRef<OsStr>>(addr: A) -> io::Result<File> {
+ _connect(addr.as_ref())
+}
+
+fn _connect(addr: &OsStr) -> io::Result<File> {
+ let mut r = OpenOptions::new();
+ let mut w = OpenOptions::new();
+ let mut rw = OpenOptions::new();
+ r.read(true);
+ w.write(true);
+ rw.read(true).write(true);
+ loop {
+ let res = rw
+ .open(addr)
+ .or_else(|_| r.open(addr))
+ .or_else(|_| w.open(addr));
+ match res {
+ Ok(f) => return Ok(f),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => {}
+ Err(e) => return Err(e),
+ }
+
+ NamedPipe::wait(addr, Some(Duration::new(20, 0)))?;
+ }
+}
+
+impl NamedPipe {
+ /// Creates a new initial named pipe.
+ ///
+ /// This function is equivalent to:
+ ///
+ /// ```
+ /// use miow::pipe::NamedPipeBuilder;
+ ///
+ /// # let addr = "foo";
+ /// NamedPipeBuilder::new(addr)
+ /// .first(true)
+ /// .inbound(true)
+ /// .outbound(true)
+ /// .out_buffer_size(65536)
+ /// .in_buffer_size(65536)
+ /// .create();
+ /// ```
+ pub fn new<A: AsRef<OsStr>>(addr: A) -> io::Result<NamedPipe> {
+ NamedPipeBuilder::new(addr).create()
+ }
+
+ /// Waits until either a time-out interval elapses or an instance of the
+ /// specified named pipe is available for connection.
+ ///
+ /// If this function succeeds the process can create a `File` to connect to
+ /// the named pipe.
+ pub fn wait<A: AsRef<OsStr>>(addr: A, timeout: Option<Duration>) -> io::Result<()> {
+ NamedPipe::_wait(addr.as_ref(), timeout)
+ }
+
+ fn _wait(addr: &OsStr, timeout: Option<Duration>) -> io::Result<()> {
+ let addr = addr.encode_wide().chain(Some(0)).collect::<Vec<_>>();
+ let timeout = crate::dur2ms(timeout);
+ crate::cvt(unsafe { WaitNamedPipeW(addr.as_ptr(), timeout) }).map(|_| ())
+ }
+
+ /// Connects this named pipe to a client, blocking until one becomes
+ /// available.
+ ///
+ /// This function will call the `ConnectNamedPipe` function to await for a
+ /// client to connect. This can be called immediately after the pipe is
+ /// created, or after it has been disconnected from a previous client.
+ pub fn connect(&self) -> io::Result<()> {
+ match crate::cvt(unsafe { ConnectNamedPipe(self.0.raw(), 0 as *mut _) }) {
+ Ok(_) => Ok(()),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_CONNECTED as i32) => Ok(()),
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Issue a connection request with the specified overlapped operation.
+ ///
+ /// This function will issue a request to connect a client to this server,
+ /// returning immediately after starting the overlapped operation.
+ ///
+ /// If this function immediately succeeds then `Ok(true)` is returned. If
+ /// the overlapped operation is enqueued and pending, then `Ok(false)` is
+ /// returned. Otherwise an error is returned indicating what went wrong.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the
+ /// `overlapped` pointer is valid until the end of the I/O operation. The
+ /// kernel also requires that `overlapped` is unique for this I/O operation
+ /// and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that this pointer is
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ pub unsafe fn connect_overlapped(&self, overlapped: *mut OVERLAPPED) -> io::Result<bool> {
+ match crate::cvt(ConnectNamedPipe(self.0.raw(), overlapped)) {
+ Ok(_) => Ok(true),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_CONNECTED as i32) => Ok(true),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_IO_PENDING as i32) => Ok(false),
+ Err(ref e) if e.raw_os_error() == Some(ERROR_NO_DATA as i32) => Ok(true),
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Disconnects this named pipe from any connected client.
+ pub fn disconnect(&self) -> io::Result<()> {
+ crate::cvt(unsafe { DisconnectNamedPipe(self.0.raw()) }).map(|_| ())
+ }
+
+ /// Issues an overlapped read operation to occur on this pipe.
+ ///
+ /// This function will issue an asynchronous read to occur in an overlapped
+ /// fashion, returning immediately. The `buf` provided will be filled in
+ /// with data and the request is tracked by the `overlapped` function
+ /// provided.
+ ///
+ /// If the operation succeeds immediately, `Ok(Some(n))` is returned where
+ /// `n` is the number of bytes read. If an asynchronous operation is
+ /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred
+ /// it is returned.
+ ///
+ /// When this operation completes (or if it completes immediately), another
+ /// mechanism must be used to learn how many bytes were transferred (such as
+ /// looking at the filed in the IOCP status message).
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers to be valid until the end of the I/O operation.
+ /// The kernel also requires that `overlapped` is unique for this I/O
+ /// operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that the pointers are
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ pub unsafe fn read_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ self.0.read_overlapped(buf, overlapped)
+ }
+
+ /// Issues an overlapped write operation to occur on this pipe.
+ ///
+ /// This function will issue an asynchronous write to occur in an overlapped
+ /// fashion, returning immediately. The `buf` provided will be filled in
+ /// with data and the request is tracked by the `overlapped` function
+ /// provided.
+ ///
+ /// If the operation succeeds immediately, `Ok(Some(n))` is returned where
+ /// `n` is the number of bytes written. If an asynchronous operation is
+ /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred
+ /// it is returned.
+ ///
+ /// When this operation completes (or if it completes immediately), another
+ /// mechanism must be used to learn how many bytes were transferred (such as
+ /// looking at the filed in the IOCP status message).
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because the kernel requires that the `buf` and
+ /// `overlapped` pointers to be valid until the end of the I/O operation.
+ /// The kernel also requires that `overlapped` is unique for this I/O
+ /// operation and is not in use for any other I/O.
+ ///
+ /// To safely use this function callers must ensure that the pointers are
+ /// valid until the I/O operation is completed, typically via completion
+ /// ports and waiting to receive the completion notification on the port.
+ pub unsafe fn write_overlapped(
+ &self,
+ buf: &[u8],
+ overlapped: *mut OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ self.0.write_overlapped(buf, overlapped)
+ }
+
+ /// Calls the `GetOverlappedResult` function to get the result of an
+ /// overlapped operation for this handle.
+ ///
+ /// This function takes the `OVERLAPPED` argument which must have been used
+ /// to initiate an overlapped I/O operation, and returns either the
+ /// successful number of bytes transferred during the operation or an error
+ /// if one occurred.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe as `overlapped` must have previously been used
+ /// to execute an operation for this handle, and it must also be a valid
+ /// pointer to an `Overlapped` instance.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic
+ pub unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<usize> {
+ let mut transferred = 0;
+ let r = GetOverlappedResult(self.0.raw(), overlapped, &mut transferred, FALSE);
+ if r == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(transferred as usize)
+ }
+ }
+}
+
+thread_local! {
+ static NAMED_PIPE_OVERLAPPED: RefCell<Option<Overlapped>> = RefCell::new(None);
+}
+
+/// Call a function with a threadlocal `Overlapped`. The function `f` should be
+/// sure that the event is reset, either manually or by a thread being released.
+fn with_threadlocal_overlapped<F>(f: F) -> io::Result<usize>
+where
+ F: FnOnce(&Overlapped) -> io::Result<usize>,
+{
+ NAMED_PIPE_OVERLAPPED.with(|overlapped| {
+ let mut mborrow = overlapped.borrow_mut();
+ if let None = *mborrow {
+ let op = Overlapped::initialize_with_autoreset_event()?;
+ *mborrow = Some(op);
+ }
+ f(mborrow.as_ref().unwrap())
+ })
+}
+
+impl Read for NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ // This is necessary because the pipe is opened with `FILE_FLAG_OVERLAPPED`.
+ with_threadlocal_overlapped(|overlapped| unsafe {
+ self.0
+ .read_overlapped_wait(buf, overlapped.raw() as *mut OVERLAPPED)
+ })
+ }
+}
+impl<'a> Read for &'a NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ // This is necessary because the pipe is opened with `FILE_FLAG_OVERLAPPED`.
+ with_threadlocal_overlapped(|overlapped| unsafe {
+ self.0
+ .read_overlapped_wait(buf, overlapped.raw() as *mut OVERLAPPED)
+ })
+ }
+}
+
+impl Write for NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // This is necessary because the pipe is opened with `FILE_FLAG_OVERLAPPED`.
+ with_threadlocal_overlapped(|overlapped| unsafe {
+ self.0
+ .write_overlapped_wait(buf, overlapped.raw() as *mut OVERLAPPED)
+ })
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ <&NamedPipe as Write>::flush(&mut &*self)
+ }
+}
+impl<'a> Write for &'a NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // This is necessary because the pipe is opened with `FILE_FLAG_OVERLAPPED`.
+ with_threadlocal_overlapped(|overlapped| unsafe {
+ self.0
+ .write_overlapped_wait(buf, overlapped.raw() as *mut OVERLAPPED)
+ })
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ crate::cvt(unsafe { FlushFileBuffers(self.0.raw()) }).map(|_| ())
+ }
+}
+
+impl AsRawHandle for NamedPipe {
+ fn as_raw_handle(&self) -> HANDLE {
+ self.0.raw()
+ }
+}
+impl FromRawHandle for NamedPipe {
+ unsafe fn from_raw_handle(handle: HANDLE) -> NamedPipe {
+ NamedPipe(Handle::new(handle))
+ }
+}
+impl IntoRawHandle for NamedPipe {
+ fn into_raw_handle(self) -> HANDLE {
+ self.0.into_raw()
+ }
+}
+
+fn flag(slot: &mut DWORD, on: bool, val: DWORD) {
+ if on {
+ *slot |= val;
+ } else {
+ *slot &= !val;
+ }
+}
+
+impl NamedPipeBuilder {
+ /// Creates a new named pipe builder with the default settings.
+ pub fn new<A: AsRef<OsStr>>(addr: A) -> NamedPipeBuilder {
+ NamedPipeBuilder {
+ name: addr.as_ref().encode_wide().chain(Some(0)).collect(),
+ dwOpenMode: PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE | FILE_FLAG_OVERLAPPED,
+ dwPipeMode: PIPE_TYPE_BYTE,
+ nMaxInstances: PIPE_UNLIMITED_INSTANCES,
+ nOutBufferSize: 65536,
+ nInBufferSize: 65536,
+ nDefaultTimeOut: 0,
+ }
+ }
+
+ /// Indicates whether data is allowed to flow from the client to the server.
+ pub fn inbound(&mut self, allowed: bool) -> &mut Self {
+ flag(&mut self.dwOpenMode, allowed, PIPE_ACCESS_INBOUND);
+ self
+ }
+
+ /// Indicates whether data is allowed to flow from the server to the client.
+ pub fn outbound(&mut self, allowed: bool) -> &mut Self {
+ flag(&mut self.dwOpenMode, allowed, PIPE_ACCESS_OUTBOUND);
+ self
+ }
+
+ /// Indicates that this pipe must be the first instance.
+ ///
+ /// If set to true, then creation will fail if there's already an instance
+ /// elsewhere.
+ pub fn first(&mut self, first: bool) -> &mut Self {
+ flag(&mut self.dwOpenMode, first, FILE_FLAG_FIRST_PIPE_INSTANCE);
+ self
+ }
+
+ /// Indicates whether this server can accept remote clients or not.
+ pub fn accept_remote(&mut self, accept: bool) -> &mut Self {
+ flag(&mut self.dwPipeMode, !accept, PIPE_REJECT_REMOTE_CLIENTS);
+ self
+ }
+
+ /// Specifies the maximum number of instances of the server pipe that are
+ /// allowed.
+ ///
+ /// The first instance of a pipe can specify this value. A value of 255
+ /// indicates that there is no limit to the number of instances.
+ pub fn max_instances(&mut self, instances: u8) -> &mut Self {
+ self.nMaxInstances = instances as DWORD;
+ self
+ }
+
+ /// Specifies the number of bytes to reserver for the output buffer
+ pub fn out_buffer_size(&mut self, buffer: u32) -> &mut Self {
+ self.nOutBufferSize = buffer as DWORD;
+ self
+ }
+
+ /// Specifies the number of bytes to reserver for the input buffer
+ pub fn in_buffer_size(&mut self, buffer: u32) -> &mut Self {
+ self.nInBufferSize = buffer as DWORD;
+ self
+ }
+
+ /// Using the options in this builder, attempt to create a new named pipe.
+ ///
+ /// This function will call the `CreateNamedPipe` function and return the
+ /// result.
+ pub fn create(&mut self) -> io::Result<NamedPipe> {
+ unsafe { self.with_security_attributes(::std::ptr::null_mut()) }
+ }
+
+ /// Using the options in the builder and the provided security attributes, attempt to create a
+ /// new named pipe. This function has to be called with a valid pointer to a
+ /// `SECURITY_ATTRIBUTES` struct that will stay valid for the lifetime of this function or a
+ /// null pointer.
+ ///
+ /// This function will call the `CreateNamedPipe` function and return the
+ /// result.
+ pub unsafe fn with_security_attributes(
+ &mut self,
+ attrs: *mut SECURITY_ATTRIBUTES,
+ ) -> io::Result<NamedPipe> {
+ let h = CreateNamedPipeW(
+ self.name.as_ptr(),
+ self.dwOpenMode,
+ self.dwPipeMode,
+ self.nMaxInstances,
+ self.nOutBufferSize,
+ self.nInBufferSize,
+ self.nDefaultTimeOut,
+ attrs,
+ );
+
+ if h == INVALID_HANDLE_VALUE {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(NamedPipe(Handle::new(h)))
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::fs::{File, OpenOptions};
+ use std::io::prelude::*;
+ use std::sync::mpsc::channel;
+ use std::thread;
+ use std::time::Duration;
+
+ use rand::{distributions::Alphanumeric, thread_rng, Rng};
+
+ use super::{anonymous, NamedPipe, NamedPipeBuilder};
+ use crate::iocp::CompletionPort;
+ use crate::Overlapped;
+
+ fn name() -> String {
+ let name = thread_rng()
+ .sample_iter(Alphanumeric)
+ .take(30)
+ .map(char::from)
+ .collect::<String>();
+ format!(r"\\.\pipe\{}", name)
+ }
+
+ #[test]
+ fn anon() {
+ let (mut read, mut write) = t!(anonymous(256));
+ assert_eq!(t!(write.write(&[1, 2, 3])), 3);
+ let mut b = [0; 10];
+ assert_eq!(t!(read.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ }
+
+ #[test]
+ fn named_not_first() {
+ let name = name();
+ let _a = t!(NamedPipe::new(&name));
+ assert!(NamedPipe::new(&name).is_err());
+
+ t!(NamedPipeBuilder::new(&name).first(false).create());
+ }
+
+ #[test]
+ fn named_connect() {
+ let name = name();
+ let a = t!(NamedPipe::new(&name));
+
+ let t = thread::spawn(move || {
+ t!(File::open(name));
+ });
+
+ t!(a.connect());
+ t!(a.disconnect());
+ t!(t.join());
+ }
+
+ #[test]
+ fn named_wait() {
+ let name = name();
+ let a = t!(NamedPipe::new(&name));
+
+ let (tx, rx) = channel();
+ let t = thread::spawn(move || {
+ t!(NamedPipe::wait(&name, None));
+ t!(File::open(&name));
+ assert!(NamedPipe::wait(&name, Some(Duration::from_millis(1))).is_err());
+ t!(tx.send(()));
+ });
+
+ t!(a.connect());
+ t!(rx.recv());
+ t!(a.disconnect());
+ t!(t.join());
+ }
+
+ #[test]
+ fn named_connect_overlapped() {
+ let name = name();
+ let a = t!(NamedPipe::new(&name));
+
+ let t = thread::spawn(move || {
+ t!(File::open(name));
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ t!(cp.add_handle(2, &a));
+
+ let over = Overlapped::zero();
+ unsafe {
+ t!(a.connect_overlapped(over.raw()));
+ }
+
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 0);
+ assert_eq!(status.token(), 2);
+ assert_eq!(status.overlapped(), over.raw());
+ t!(t.join());
+ }
+
+ #[test]
+ fn named_read_write() {
+ let name = name();
+ let mut a = t!(NamedPipe::new(&name));
+
+ let t = thread::spawn(move || {
+ let mut f = t!(OpenOptions::new().read(true).write(true).open(name));
+ t!(f.write_all(&[1, 2, 3]));
+ let mut b = [0; 10];
+ assert_eq!(t!(f.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ });
+
+ t!(a.connect());
+ let mut b = [0; 10];
+ assert_eq!(t!(a.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ t!(a.write_all(&[1, 2, 3]));
+ t!(a.flush());
+ t!(a.disconnect());
+ t!(t.join());
+ }
+
+ #[test]
+ fn named_read_write_multi() {
+ for _ in 0..5 {
+ named_read_write()
+ }
+ }
+
+ #[test]
+ fn named_read_write_multi_same_thread() {
+ let name1 = name();
+ let mut a1 = t!(NamedPipe::new(&name1));
+ let name2 = name();
+ let mut a2 = t!(NamedPipe::new(&name2));
+
+ let t = thread::spawn(move || {
+ let mut f = t!(OpenOptions::new().read(true).write(true).open(name1));
+ t!(f.write_all(&[1, 2, 3]));
+ let mut b = [0; 10];
+ assert_eq!(t!(f.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+
+ let mut f = t!(OpenOptions::new().read(true).write(true).open(name2));
+ t!(f.write_all(&[1, 2, 3]));
+ let mut b = [0; 10];
+ assert_eq!(t!(f.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ });
+
+ t!(a1.connect());
+ let mut b = [0; 10];
+ assert_eq!(t!(a1.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ t!(a1.write_all(&[1, 2, 3]));
+ t!(a1.flush());
+ t!(a1.disconnect());
+
+ t!(a2.connect());
+ let mut b = [0; 10];
+ assert_eq!(t!(a2.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3]);
+ t!(a2.write_all(&[1, 2, 3]));
+ t!(a2.flush());
+ t!(a2.disconnect());
+
+ t!(t.join());
+ }
+
+ #[test]
+ fn named_read_overlapped() {
+ let name = name();
+ let a = t!(NamedPipe::new(&name));
+
+ let t = thread::spawn(move || {
+ let mut f = t!(File::create(name));
+ t!(f.write_all(&[1, 2, 3]));
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ t!(cp.add_handle(3, &a));
+ t!(a.connect());
+
+ let mut b = [0; 10];
+ let over = Overlapped::zero();
+ unsafe {
+ t!(a.read_overlapped(&mut b, over.raw()));
+ }
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 3);
+ assert_eq!(status.overlapped(), over.raw());
+ assert_eq!(&b[..3], &[1, 2, 3]);
+
+ t!(t.join());
+ }
+
+ #[test]
+ fn named_write_overlapped() {
+ let name = name();
+ let a = t!(NamedPipe::new(&name));
+
+ let t = thread::spawn(move || {
+ let mut f = t!(super::connect(name));
+ let mut b = [0; 10];
+ assert_eq!(t!(f.read(&mut b)), 3);
+ assert_eq!(&b[..3], &[1, 2, 3])
+ });
+
+ let cp = t!(CompletionPort::new(1));
+ t!(cp.add_handle(3, &a));
+ t!(a.connect());
+
+ let over = Overlapped::zero();
+ unsafe {
+ t!(a.write_overlapped(&[1, 2, 3], over.raw()));
+ }
+
+ let status = t!(cp.get(None));
+ assert_eq!(status.bytes_transferred(), 3);
+ assert_eq!(status.token(), 3);
+ assert_eq!(status.overlapped(), over.raw());
+
+ t!(t.join());
+ }
+}