summaryrefslogtreecommitdiffstats
path: root/third_party/rust/mio-0.6.23
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/mio-0.6.23
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/mio-0.6.23')
-rw-r--r--third_party/rust/mio-0.6.23/.cargo-checksum.json1
-rw-r--r--third_party/rust/mio-0.6.23/CHANGELOG.md227
-rw-r--r--third_party/rust/mio-0.6.23/Cargo.toml70
-rw-r--r--third_party/rust/mio-0.6.23/LICENSE19
-rw-r--r--third_party/rust/mio-0.6.23/README.md90
-rw-r--r--third_party/rust/mio-0.6.23/src/channel.rs390
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs346
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/handler.rs37
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/io.rs28
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/mod.rs36
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/notify.rs63
-rw-r--r--third_party/rust/mio-0.6.23/src/deprecated/unix.rs420
-rw-r--r--third_party/rust/mio-0.6.23/src/event_imp.rs1162
-rw-r--r--third_party/rust/mio-0.6.23/src/io.rs35
-rw-r--r--third_party/rust/mio-0.6.23/src/lazycell.rs554
-rw-r--r--third_party/rust/mio-0.6.23/src/lib.rs308
-rw-r--r--third_party/rust/mio-0.6.23/src/net/mod.rs14
-rw-r--r--third_party/rust/mio-0.6.23/src/net/tcp.rs737
-rw-r--r--third_party/rust/mio-0.6.23/src/net/udp.rs645
-rw-r--r--third_party/rust/mio-0.6.23/src/poll.rs2783
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs73
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs263
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs78
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs177
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs444
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs181
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs353
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/mod.rs56
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs74
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs47
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs268
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs107
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/io.rs107
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs360
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/mod.rs105
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/ready.rs525
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs286
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/udp.rs181
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/uds.rs265
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/unix/uio.rs44
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs66
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs20
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs116
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/mod.rs193
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/selector.rs538
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs853
-rw-r--r--third_party/rust/mio-0.6.23/src/sys/windows/udp.rs414
-rw-r--r--third_party/rust/mio-0.6.23/src/timer.rs516
-rw-r--r--third_party/rust/mio-0.6.23/src/token.rs153
-rw-r--r--third_party/rust/mio-0.6.23/src/udp.rs326
50 files changed, 15154 insertions, 0 deletions
diff --git a/third_party/rust/mio-0.6.23/.cargo-checksum.json b/third_party/rust/mio-0.6.23/.cargo-checksum.json
new file mode 100644
index 0000000000..87772bc2e9
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"4ba3d031a78895b4251cc75585215ded07c2d4ca84b79dea5c55a68fd973a29d","Cargo.toml":"1cebd8a3a1509948b24b4de3ed6eedd2441f1a36e4831a2b2b3c38539b7ead70","LICENSE":"07919255c7e04793d8ea760d6c2ce32d19f9ff02bdbdde3ce90b1e1880929a9b","README.md":"eedc84973c97348ea27f93ac7d3232098438d4455c7eaedf6fcc7f105ac9f321","src/channel.rs":"b16493a2b74334156e153b1f4143b0e98d43cd4d7bff0275066dfa575dde2402","src/deprecated/event_loop.rs":"ba931d256e6e57d5635c6cfc6e3a4add4551c87f16457d901b334a129f9cf41d","src/deprecated/handler.rs":"13cbc0c193f43a331e125e05d5eddf3712fe86e41a8721186d3672518ef8a9cc","src/deprecated/io.rs":"4948217ffeeba4f508cc89744da5d6af858b4ad7b4be23f927a00df93bdf2984","src/deprecated/mod.rs":"4310471b5a1313dbf53b492769a3031b15353eb269333b7c1a890bc2709def7c","src/deprecated/notify.rs":"8cb108387ebcfb75764e4dd2868d80eb00d793c4b7c867c08cd86ef10b91b023","src/deprecated/unix.rs":"76c832e7db8263395b576930186fe1a3c472589bed41810d445d89f0eed684eb","src/event_imp.rs":"f8cff47dedc52dab9c7cc2d707f2c2d86d7185f942f02ace4c60353cc6094035","src/io.rs":"9207ffc93ea744b09bc6872fa4d378d7c75640f9ac38b1fa67b940c7cb5d0ade","src/lazycell.rs":"871dbd89f6918a354c2ec2d2a8b89e4aa30754e7e3e8dfcf2f5a6071156e39cf","src/lib.rs":"b875273d1852b6ef11a112fb27147587f5ed699e2c3ce99da3175358a8ff6fdd","src/net/mod.rs":"340c63a8efe9ee774b7bf8ed8c0f72fc7563e5c4b35f6a8b243f92d366e145a2","src/net/tcp.rs":"8b06dc8d2dd9fb7cd52db582fd7fe608b6a50cdf7ce18cf0abb9992956e95f6d","src/net/udp.rs":"8b5728924a07917d2845bbfb060cadb842b36a74d5372ac7707eb7f169a67d4d","src/poll.rs":"e76bb316deedbd9306f91ca8ab394d02b5676fa767746bd9770c5c9dff490426","src/sys/fuchsia/awakener.rs":"71a4a0083242457b0a96326c69c0f98b23dfdb97be96deb26ee02fa9d1dfb212","src/sys/fuchsia/eventedfd.rs":"bd8f43d2b61cdd6a5d0df9c0dc1cb43e1708140d01a05611055277ed55a33b63","src/sys/fuchsia/handles.rs":"161a69e8a8d7f71326a9c53bcb7685d0a81d184aba8e20da27c64aa27dfd56b2","src/sys/fuchsia/mod.rs":"9d80f1214abc93f48b6b6c12ce5b6cfaddbda592c8f3410564d0237228cae2d0","src/sys/fuchsia/net.rs":"50340191dd9cbe317bd6e6ae0170c03daa9141f15c96782b96484e3d8b8301b1","src/sys/fuchsia/ready.rs":"7e6bb7235c52ab4a2134d36cf982f6a4fd6e18050e737b40ee84c89a10a9faac","src/sys/fuchsia/selector.rs":"f3be7f8d683d43e4a8313246e4cacb9444549bf66ecb2234f0d0f53172043bf5","src/sys/mod.rs":"64bea046e4a9feb1f2e2fb8326452c1be8b9d56cf8794df0af4fbdf565204255","src/sys/unix/awakener.rs":"20a61d8f39b2f2abf4f166a3ed46fa0d79907ddf92758eaddb880c67321db56c","src/sys/unix/dlsym.rs":"559337d1f6c10e6c1172bd3908a9dcfa5a0828b53b04f7ca3a0d926afa85cd63","src/sys/unix/epoll.rs":"26b34910c87883f1b8170b95aed1bf3d9ecd9442c7afd23ff1b87d54391e2c88","src/sys/unix/eventedfd.rs":"a0bd2096ab5acf42c48110f024bc8ea052ba62c707345c7db46fea7a494388df","src/sys/unix/io.rs":"a518f09020f821e87bcf9c2ecb4bf501d428619ddfd7b35a26629b614919b14c","src/sys/unix/kqueue.rs":"3bf9f9635a8607569e3176998b61d1801e5bb35a94588c827a0a954656eee3ea","src/sys/unix/mod.rs":"15ddcfab101e7dfb926f82fd2d6eebb30c66f43fc2af00e4bb2f687c7059e0d0","src/sys/unix/ready.rs":"8494e27731d6842a90e01ec783d37001802f472f81358177e047d43b4bc68c43","src/sys/unix/tcp.rs":"19d483762fc8c8a1cb182b2f2ead85534f99394cf605a14d5ed46db7f3354667","src/sys/unix/udp.rs":"bc2e8ad142b17797a7d038e720ff57ac9840eb5b2b26696c854180085ccd1873","src/sys/unix/uds.rs":"5223d4d35048019d175679686cc65a08baf027df0b2127428e2f322bbb533309","src/sys/unix/uio.rs":"3942a49548dd3a37e5fd6044a103d92e2635965ace1ab370be10c82245b41f66","src/sys/windows/awakener.rs":"2d3cdaf8b523675e2f64c5fd81e1197d5255384517b9753b718c5c47acf0cabd","src/sys/windows/buffer_pool.rs":"636f4b7510a507f9985987699ce8ec140b2ed852abb231443ee1486c80759eed","src/sys/windows/from_raw_arc.rs":"659dabdf5037546e3783aacc422992b4248b0ba2ddcce52b149d35bc723550e5","src/sys/windows/mod.rs":"afeec8cd4e3adeaf6ffe68b134ad1b4ba07faa3abae96f6f9a00bbc20ff5f2c5","src/sys/windows/selector.rs":"0137276cff457f84511e007bb9527f5e082ec04e898b8f8e0acd39fe65c00148","src/sys/windows/tcp.rs":"9942db351f91229d01a0b9f52dd6c9680050d3abcee9fbb6b4f2f14896dc2c58","src/sys/windows/udp.rs":"1ef869b660bcf89ea6498552474abf8f540946631e14d5b610ca31014cd9045f","src/timer.rs":"540d521c5b4a79f3b1c01296ef2e14e2e3743192f25180ee6e71e367692ce762","src/token.rs":"4a56f851709470df2eed803c57c68b0a4b12ea86fa1b8d2c999bec7a85d58ec0","src/udp.rs":"442e620f3ea0cf010497d3ad775debd585f28e79a025993d40471c8e6839dc98"},"package":"4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4"} \ No newline at end of file
diff --git a/third_party/rust/mio-0.6.23/CHANGELOG.md b/third_party/rust/mio-0.6.23/CHANGELOG.md
new file mode 100644
index 0000000000..c17ebd0151
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/CHANGELOG.md
@@ -0,0 +1,227 @@
+# 0.6.23 (Dec 01, 2020)
+
+### Changed
+- **MSRV**: Increased the MSRV from 1.18.0 (Jun 8, 2017) to 1.31.0 (Dec 6,
+ 2018)
+ (https://github.com/tokio-rs/mio/commit/4879e0d32ddfd98e762fc87240e594a3ad8fca30).
+
+### Fixed
+- Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then
+ ~30 minutes effectively infinite
+ (https://github.com/tokio-rs/mio/commit/e7cba59950e9c9fa6194e29b5b1e72029e3df455).
+- Update miow and net2 depedencies to get rid of invalid memory layout assumption
+ (https://github.com/tokio-rs/mio/commit/13f02ac0a86d7c0c0001e5ff8960a0b4340d075c).
+
+# 0.6.22 (May 01, 2020)
+
+### Added
+- Add support for illumos target (#1294)
+
+# 0.6.21 (November 27, 2019)
+
+### Fixed
+- remove `=` dependency on `cfg-if`.
+
+# 0.6.20 (November 21, 2019)
+
+### Fixed
+- Use default IOCP concurrency value (#1161).
+- setting FD_CLOEXEC in pipe (#1095).
+
+# 0.6.19 (May 28, 2018)
+
+### Fixed
+- Do not trigger HUP events on kqueue platforms (#958).
+
+# 0.6.18 (May 24, 2018)
+
+### Fixed
+- Fix compilation on kqueue platforms with 32bit C long (#948).
+
+# 0.6.17 (May 15, 2018)
+
+### Fixed
+- Don't report `RDHUP` as `HUP` (#939)
+- Fix lazycell related compilation issues.
+- Fix EPOLLPRI conflicting with READABLE
+- Abort process on ref count overflows
+
+### Added
+- Define PRI on all targets
+
+# 0.6.16 (September 5, 2018)
+
+* Add EPOLLPRI readiness to UnixReady on supported platforms (#867)
+* Reduce spurious awaken calls (#875)
+
+# 0.6.15 (July 3, 2018)
+
+* Implement `Evented` for containers (#840).
+* Fix android-aarch64 build (#850).
+
+# 0.6.14 (March 8, 2018)
+
+* Add `Poll::poll_interruptible` (#811)
+* Add `Ready::all` and `usize` conversions (#825)
+
+# 0.6.13 (February 5, 2018)
+
+* Fix build on DragonFlyBSD.
+* Add `TcpListener::from_std` that does not require the socket addr.
+* Deprecate `TcpListener::from_listener` in favor of from_std.
+
+# 0.6.12 (January 5, 2018)
+
+* Add `TcpStream::peek` function (#773).
+* Raise minimum Rust version to 1.18.0.
+* `Poll`: retry select() when interrupted by a signal (#742).
+* Deprecate `Events` index access (#713).
+* Add `Events::clear` (#782).
+* Add support for `lio_listio` (#780).
+
+# 0.6.11 (October 25, 2017)
+
+* Allow register to take empty interest (#640).
+* Fix bug with TCP errors on windows (#725).
+* Add TcpListener::accept_std (#733).
+* Update IoVec to fix soundness bug -- includes behavior change. (#747).
+* Minimum Rust version is now 1.14.0.
+* Fix Android x86_64 build.
+* Misc API & doc polish.
+
+# 0.6.10 (July 27, 2017)
+
+* Experimental support for Fuchsia
+* Add `only_v6` option for UDP sockets
+* Fix build on NetBSD
+* Minimum Rust version is now 1.13.0
+* Assignment operators (e.g. `|=`) are now implemented for `Ready`
+
+# 0.6.9 (June 7, 2017)
+
+* More socket options are exposed through the TCP types, brought in through the
+ `net2` crate.
+
+# 0.6.8 (May 26, 2017)
+
+* Support Fuchia
+* POSIX AIO support
+* Fix memory leak caused by Register::new2
+* Windows: fix handling failed TCP connections
+* Fix build on aarch64-linux-android
+* Fix usage of `O_CLOEXEC` with `SETFL`
+
+# 0.6.7 (April 27, 2017)
+
+* Ignore EPIPE coming out of `kevent`
+* Timer thread should exit when timer is dropped.
+
+# 0.6.6 (March 22, 2017)
+
+* Add send(), recv() and connect() to UDPSocket.
+* Fix bug in custom readiness queue
+* Move net types into `net` module
+
+# 0.6.5 (March 14, 2017)
+
+* Misc improvements to kqueue bindings
+* Add official support for iOS, Android, BSD
+* Reimplement custom readiness queue
+* `Poll` is now `Sync`
+* Officially deprecate non-core functionality (timers, channel, etc...)
+* `Registration` now implements `Evented`
+* Fix bug around error conditions with `connect` on windows.
+* Use iovec crate for scatter / gather operations
+* Only support readable and writable readiness on all platforms
+* Expose additional readiness in a platform specific capacity
+
+# 0.6.4 (January 24, 2017)
+
+* Fix compilation on musl
+* Add `TcpStream::from_stream` which converts a std TCP stream to Mio.
+
+# 0.6.3 (January 22, 2017)
+
+* Implement readv/writev for `TcpStream`, allowing vectored reads/writes to
+ work across platforms
+* Remove `nix` dependency
+* Implement `Display` and `Error` for some channel error types.
+* Optimize TCP on Windows through `SetFileCompletionNotificationModes`
+
+# 0.6.2 (December 18, 2016)
+
+* Allow registration of custom handles on Windows (like `EventedFd` on Unix)
+* Send only one byte for the awakener on Unix instead of four
+* Fix a bug in the timer implementation which caused an infinite loop
+
+# 0.6.1 (October 30, 2016)
+
+* Update dependency of `libc` to 0.2.16
+* Fix channel `dec` logic
+* Fix a timer bug around timeout cancellation
+* Don't allocate buffers for TCP reads on Windows
+* Touched up documentation in a few places
+* Fix an infinite looping timer thread on OSX
+* Fix compile on 32-bit OSX
+* Fix compile on FreeBSD
+
+# 0.6.0 (September 2, 2016)
+
+* Shift primary API towards `Poll`
+* `EventLoop` and types to `deprecated` mod. All contents of the
+ `deprecated` mod will be removed by Mio 1.0.
+* Increase minimum supported Rust version to 1.9.0
+* Deprecate unix domain socket implementation in favor of using a
+ version external to Mio. For example: https://github.com/alexcrichton/mio-uds.
+* Remove various types now included in `std`
+* Updated TCP & UDP APIs to match the versions in `std`
+* Enable implementing `Evented` for any type via `Registration`
+* Rename `IoEvent` -> `Event`
+* Access `Event` data via functions vs. public fields.
+* Expose `Events` as a public type that is passed into `Poll`
+* Use `std::time::Duration` for all APIs that require a time duration.
+* Polled events are now retrieved via `Events` type.
+* Implement `std::error::Error` for `TimerError`
+* Relax `Send` bound on notify messages.
+* Remove `Clone` impl for `Timeout` (future proof)
+* Remove `mio::prelude`
+* Remove `mio::util`
+* Remove dependency on bytes
+
+# 0.5.0 (December 3, 2015)
+
+* Windows support (#239)
+* NetBSD support (#306)
+* Android support (#295)
+* Don't re-export bytes types
+* Renamed `EventLoop::register_opt` to `EventLoop::register` (#257)
+* `EventLoopConfig` is now a builder instead of having public struct fields. It
+ is also no longer `Copy`. (#259)
+* `TcpSocket` is no longer exported in the public API (#262)
+* Integrate with net2. (#262)
+* `TcpListener` now returns the remote peer address from `accept` as well (#275)
+* The `UdpSocket::{send_to, recv_from}` methods are no longer generic over `Buf`
+ or `MutBuf` but instead take slices directly. The return types have also been
+ updated to return the number of bytes transferred. (#260)
+* Fix bug with kqueue where an error on registration prevented the
+ changelist from getting flushed (#276)
+* Support sending/receiving FDs over UNIX sockets (#291)
+* Mio's socket types are permanently associated with an EventLoop (#308)
+* Reduce unnecessary poll wakeups (#314)
+
+
+# 0.4.1 (July 21, 2015)
+
+* [BUGFIX] Fix notify channel concurrency bug (#216)
+
+# 0.4.0 (July 16, 2015)
+
+* [BUGFIX] EventLoop::register requests all events, not just readable.
+* [BUGFIX] Attempting to send a message to a shutdown event loop fails correctly.
+* [FEATURE] Expose TCP shutdown
+* [IMPROVEMENT] Coalesce readable & writable into `ready` event (#184)
+* [IMPROVEMENT] Rename TryRead & TryWrite function names to avoid conflict with std.
+* [IMPROVEMENT] Provide TCP and UDP types in Mio (path to windows #155)
+* [IMPROVEMENT] Use clock_ticks crate instead of time (path to windows #155)
+* [IMPROVEMENT] Move unix specific features into mio::unix module
+* [IMPROVEMENT] TcpListener sets SO_REUSEADDR by default
diff --git a/third_party/rust/mio-0.6.23/Cargo.toml b/third_party/rust/mio-0.6.23/Cargo.toml
new file mode 100644
index 0000000000..08c5ac150f
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/Cargo.toml
@@ -0,0 +1,70 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "mio"
+version = "0.6.23"
+authors = ["Carl Lerche <me@carllerche.com>"]
+include = ["Cargo.toml", "LICENSE", "README.md", "CHANGELOG.md", "src/**/*.rs"]
+description = "Lightweight non-blocking IO"
+homepage = "https://github.com/tokio-rs/mio"
+documentation = "https://docs.rs/mio/0.6.23/mio/"
+readme = "README.md"
+keywords = ["io", "async", "non-blocking"]
+categories = ["asynchronous"]
+license = "MIT"
+repository = "https://github.com/tokio-rs/mio"
+
+[[test]]
+name = "test"
+path = "test/mod.rs"
+[dependencies.cfg-if]
+version = "0.1.9"
+
+[dependencies.iovec]
+version = "0.1.1"
+
+[dependencies.log]
+version = "0.4"
+
+[dependencies.net2]
+version = "0.2.36"
+
+[dependencies.slab]
+version = "0.4.0"
+[dev-dependencies.bytes]
+version = "0.3.0"
+
+[dev-dependencies.env_logger]
+version = "0.4.0"
+default-features = false
+
+[dev-dependencies.tempdir]
+version = "0.3.4"
+
+[features]
+default = ["with-deprecated"]
+with-deprecated = []
+[target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon]
+version = "0.3.2"
+
+[target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon-sys]
+version = "0.3.2"
+[target."cfg(unix)".dependencies.libc]
+version = "0.2.54"
+
+[target."cfg(windows)".dependencies.miow]
+version = "0.3"
+
+[target."cfg(windows)".dependencies.winapi]
+version = "0.3"
+features = ["ioapiset", "minwinbase", "minwindef", "winbase", "winerror", "winnt"]
diff --git a/third_party/rust/mio-0.6.23/LICENSE b/third_party/rust/mio-0.6.23/LICENSE
new file mode 100644
index 0000000000..3516413824
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014 Carl Lerche and other MIO contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/third_party/rust/mio-0.6.23/README.md b/third_party/rust/mio-0.6.23/README.md
new file mode 100644
index 0000000000..2a472bba46
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/README.md
@@ -0,0 +1,90 @@
+# Mio – Metal IO
+
+Mio is a lightweight I/O library for Rust with a focus on adding as little
+overhead as possible over the OS abstractions.
+
+[![Crates.io][crates-badge]][crates-url]
+[![MIT licensed][mit-badge]][mit-url]
+[![Build Status][azure-badge]][azure-url]
+[![Build Status][cirrus-badge]][cirrus-url]
+
+[crates-badge]: https://img.shields.io/crates/v/mio.svg
+[crates-url]: https://crates.io/crates/mio
+[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
+[mit-url]: LICENSE
+[azure-badge]: https://dev.azure.com/tokio-rs/Tokio/_apis/build/status/tokio-rs.mio?branchName=master
+[azure-url]: https://dev.azure.com/tokio-rs/Tokio/_build/latest?definitionId=2&branchName=master
+[cirrus-badge]: https://api.cirrus-ci.com/github/carllerche/mio.svg
+[cirrus-url]: https://cirrus-ci.com/github/carllerche/mio
+
+**API documentation**
+
+* [master](https://tokio-rs.github.io/mio/doc/mio/)
+* [v0.6](https://docs.rs/mio/^0.6)
+
+This is a low level library, if you are looking for something easier to get
+started with, see [Tokio](https://tokio.rs).
+
+## Usage
+
+To use `mio`, first add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+mio = "0.6"
+```
+
+Then, add this to your crate root:
+
+```rust
+extern crate mio;
+```
+
+## Features
+
+* Non-blocking TCP, UDP.
+* I/O event notification queue backed by epoll, kqueue, and IOCP.
+* Zero allocations at runtime
+* Platform specific extensions.
+
+## Non-goals
+
+The following are specifically omitted from Mio and are left to the user
+or higher-level libraries.
+
+* File operations
+* Thread pools / multi-threaded event loop
+* Timers
+
+## Platforms
+
+Currently supported platforms:
+
+* Linux
+* OS X
+* Windows
+* FreeBSD
+* NetBSD
+* Solaris
+* Android
+* iOS
+
+There are potentially others. If you find that Mio works on another
+platform, submit a PR to update the list!
+
+## Community
+
+A group of Mio users hang out in the #mio channel on the Mozilla IRC
+server (irc.mozilla.org). This can be a good place to go for questions.
+
+## Contributing
+
+Interested in getting involved? We would love to help you! For simple
+bug fixes, just submit a PR with the fix and we can discuss the fix
+directly in the PR. If the fix is more complex, start with an issue.
+
+If you want to propose an API change, create an issue to start a
+discussion with the community. Also, feel free to talk with us in the
+IRC channel.
+
+Finally, be kind. We support the [Rust Code of Conduct](https://www.rust-lang.org/conduct.html).
diff --git a/third_party/rust/mio-0.6.23/src/channel.rs b/third_party/rust/mio-0.6.23/src/channel.rs
new file mode 100644
index 0000000000..7077c51f86
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/channel.rs
@@ -0,0 +1,390 @@
+//! Thread safe communication channel implementing `Evented`
+
+#![allow(unused_imports, deprecated, missing_debug_implementations)]
+
+use {io, Ready, Poll, PollOpt, Registration, SetReadiness, Token};
+use event::Evented;
+use lazycell::{LazyCell, AtomicLazyCell};
+use std::any::Any;
+use std::fmt;
+use std::error;
+use std::sync::{mpsc, Arc};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// Creates a new asynchronous channel, where the `Receiver` can be registered
+/// with `Poll`.
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let (tx_ctl, rx_ctl) = ctl_pair();
+ let (tx, rx) = mpsc::channel();
+
+ let tx = Sender {
+ tx,
+ ctl: tx_ctl,
+ };
+
+ let rx = Receiver {
+ rx,
+ ctl: rx_ctl,
+ };
+
+ (tx, rx)
+}
+
+/// Creates a new synchronous, bounded channel where the `Receiver` can be
+/// registered with `Poll`.
+pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) {
+ let (tx_ctl, rx_ctl) = ctl_pair();
+ let (tx, rx) = mpsc::sync_channel(bound);
+
+ let tx = SyncSender {
+ tx,
+ ctl: tx_ctl,
+ };
+
+ let rx = Receiver {
+ rx,
+ ctl: rx_ctl,
+ };
+
+ (tx, rx)
+}
+
+pub fn ctl_pair() -> (SenderCtl, ReceiverCtl) {
+ let inner = Arc::new(Inner {
+ pending: AtomicUsize::new(0),
+ senders: AtomicUsize::new(1),
+ set_readiness: AtomicLazyCell::new(),
+ });
+
+ let tx = SenderCtl {
+ inner: inner.clone(),
+ };
+
+ let rx = ReceiverCtl {
+ registration: LazyCell::new(),
+ inner,
+ };
+
+ (tx, rx)
+}
+
+/// Tracks messages sent on a channel in order to update readiness.
+pub struct SenderCtl {
+ inner: Arc<Inner>,
+}
+
+/// Tracks messages received on a channel in order to track readiness.
+pub struct ReceiverCtl {
+ registration: LazyCell<Registration>,
+ inner: Arc<Inner>,
+}
+
+pub struct Sender<T> {
+ tx: mpsc::Sender<T>,
+ ctl: SenderCtl,
+}
+
+pub struct SyncSender<T> {
+ tx: mpsc::SyncSender<T>,
+ ctl: SenderCtl,
+}
+
+pub struct Receiver<T> {
+ rx: mpsc::Receiver<T>,
+ ctl: ReceiverCtl,
+}
+
+pub enum SendError<T> {
+ Io(io::Error),
+ Disconnected(T),
+}
+
+pub enum TrySendError<T> {
+ Io(io::Error),
+ Full(T),
+ Disconnected(T),
+}
+
+struct Inner {
+ // The number of outstanding messages for the receiver to read
+ pending: AtomicUsize,
+ // The number of sender handles
+ senders: AtomicUsize,
+ // The set readiness handle
+ set_readiness: AtomicLazyCell<SetReadiness>,
+}
+
+impl<T> Sender<T> {
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ self.tx.send(t)
+ .map_err(SendError::from)
+ .and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Sender<T> {
+ Sender {
+ tx: self.tx.clone(),
+ ctl: self.ctl.clone(),
+ }
+ }
+}
+
+impl<T> SyncSender<T> {
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ self.tx.send(t)
+ .map_err(From::from)
+ .and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+
+ pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
+ self.tx.try_send(t)
+ .map_err(From::from)
+ .and_then(|_| {
+ self.ctl.inc()?;
+ Ok(())
+ })
+ }
+}
+
+impl<T> Clone for SyncSender<T> {
+ fn clone(&self) -> SyncSender<T> {
+ SyncSender {
+ tx: self.tx.clone(),
+ ctl: self.ctl.clone(),
+ }
+ }
+}
+
+impl<T> Receiver<T> {
+ pub fn try_recv(&self) -> Result<T, mpsc::TryRecvError> {
+ self.rx.try_recv().and_then(|res| {
+ let _ = self.ctl.dec();
+ Ok(res)
+ })
+ }
+}
+
+impl<T> Evented for Receiver<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.ctl.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.ctl.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.ctl.deregister(poll)
+ }
+}
+
+/*
+ *
+ * ===== SenderCtl / ReceiverCtl =====
+ *
+ */
+
+impl SenderCtl {
+ /// Call to track that a message has been sent
+ pub fn inc(&self) -> io::Result<()> {
+ let cnt = self.inner.pending.fetch_add(1, Ordering::Acquire);
+
+ if 0 == cnt {
+ // Toggle readiness to readable
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::readable())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Clone for SenderCtl {
+ fn clone(&self) -> SenderCtl {
+ self.inner.senders.fetch_add(1, Ordering::Relaxed);
+ SenderCtl { inner: self.inner.clone() }
+ }
+}
+
+impl Drop for SenderCtl {
+ fn drop(&mut self) {
+ if self.inner.senders.fetch_sub(1, Ordering::Release) == 1 {
+ let _ = self.inc();
+ }
+ }
+}
+
+impl ReceiverCtl {
+ pub fn dec(&self) -> io::Result<()> {
+ let first = self.inner.pending.load(Ordering::Acquire);
+
+ if first == 1 {
+ // Unset readiness
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::empty())?;
+ }
+ }
+
+ // Decrement
+ let second = self.inner.pending.fetch_sub(1, Ordering::AcqRel);
+
+ if first == 1 && second > 1 {
+ // There are still pending messages. Since readiness was
+ // previously unset, it must be reset here
+ if let Some(set_readiness) = self.inner.set_readiness.borrow() {
+ set_readiness.set_readiness(Ready::readable())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Evented for ReceiverCtl {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ if self.registration.borrow().is_some() {
+ return Err(io::Error::new(io::ErrorKind::Other, "receiver already registered"));
+ }
+
+ let (registration, set_readiness) = Registration::new(poll, token, interest, opts);
+
+
+ if self.inner.pending.load(Ordering::Relaxed) > 0 {
+ // TODO: Don't drop readiness
+ let _ = set_readiness.set_readiness(Ready::readable());
+ }
+
+ self.registration.fill(registration).expect("unexpected state encountered");
+ self.inner.set_readiness.fill(set_readiness).expect("unexpected state encountered");
+
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ match self.registration.borrow() {
+ Some(registration) => registration.update(poll, token, interest, opts),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ match self.registration.borrow() {
+ Some(registration) => registration.deregister(poll),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+}
+
+/*
+ *
+ * ===== Error conversions =====
+ *
+ */
+
+impl<T> From<mpsc::SendError<T>> for SendError<T> {
+ fn from(src: mpsc::SendError<T>) -> SendError<T> {
+ SendError::Disconnected(src.0)
+ }
+}
+
+impl<T> From<io::Error> for SendError<T> {
+ fn from(src: io::Error) -> SendError<T> {
+ SendError::Io(src)
+ }
+}
+
+impl<T> From<mpsc::TrySendError<T>> for TrySendError<T> {
+ fn from(src: mpsc::TrySendError<T>) -> TrySendError<T> {
+ match src {
+ mpsc::TrySendError::Full(v) => TrySendError::Full(v),
+ mpsc::TrySendError::Disconnected(v) => TrySendError::Disconnected(v),
+ }
+ }
+}
+
+impl<T> From<mpsc::SendError<T>> for TrySendError<T> {
+ fn from(src: mpsc::SendError<T>) -> TrySendError<T> {
+ TrySendError::Disconnected(src.0)
+ }
+}
+
+impl<T> From<io::Error> for TrySendError<T> {
+ fn from(src: io::Error) -> TrySendError<T> {
+ TrySendError::Io(src)
+ }
+}
+
+/*
+ *
+ * ===== Implement Error, Debug and Display for Errors =====
+ *
+ */
+
+impl<T: Any> error::Error for SendError<T> {
+ fn description(&self) -> &str {
+ match *self {
+ SendError::Io(ref io_err) => io_err.description(),
+ SendError::Disconnected(..) => "Disconnected",
+ }
+ }
+}
+
+impl<T: Any> error::Error for TrySendError<T> {
+ fn description(&self) -> &str {
+ match *self {
+ TrySendError::Io(ref io_err) => io_err.description(),
+ TrySendError::Full(..) => "Full",
+ TrySendError::Disconnected(..) => "Disconnected",
+ }
+ }
+}
+
+impl<T> fmt::Debug for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Display for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Debug for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_try_send_error(self, f)
+ }
+}
+
+impl<T> fmt::Display for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ format_try_send_error(self, f)
+ }
+}
+
+#[inline]
+fn format_send_error<T>(e: &SendError<T>, f: &mut fmt::Formatter) -> fmt::Result {
+ match *e {
+ SendError::Io(ref io_err) => write!(f, "{}", io_err),
+ SendError::Disconnected(..) => write!(f, "Disconnected"),
+ }
+}
+
+#[inline]
+fn format_try_send_error<T>(e: &TrySendError<T>, f: &mut fmt::Formatter) -> fmt::Result {
+ match *e {
+ TrySendError::Io(ref io_err) => write!(f, "{}", io_err),
+ TrySendError::Full(..) => write!(f, "Full"),
+ TrySendError::Disconnected(..) => write!(f, "Disconnected"),
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs b/third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs
new file mode 100644
index 0000000000..a4c4580b3a
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/event_loop.rs
@@ -0,0 +1,346 @@
+use {channel, Poll, Events, Token};
+use event::Evented;
+use deprecated::{Handler, NotifyError};
+use event_imp::{Event, Ready, PollOpt};
+use timer::{self, Timer, Timeout};
+use std::{io, fmt, usize};
+use std::default::Default;
+use std::time::Duration;
+
+#[derive(Debug, Default, Clone)]
+pub struct EventLoopBuilder {
+ config: Config,
+}
+
+/// `EventLoop` configuration details
+#[derive(Clone, Debug)]
+struct Config {
+ // == Notifications ==
+ notify_capacity: usize,
+ messages_per_tick: usize,
+
+ // == Timer ==
+ timer_tick: Duration,
+ timer_wheel_size: usize,
+ timer_capacity: usize,
+}
+
+impl Default for Config {
+ fn default() -> Config {
+ // Default EventLoop configuration values
+ Config {
+ notify_capacity: 4_096,
+ messages_per_tick: 256,
+ timer_tick: Duration::from_millis(100),
+ timer_wheel_size: 1_024,
+ timer_capacity: 65_536,
+ }
+ }
+}
+
+impl EventLoopBuilder {
+ /// Construct a new `EventLoopBuilder` with the default configuration
+ /// values.
+ pub fn new() -> EventLoopBuilder {
+ EventLoopBuilder::default()
+ }
+
+ /// Sets the maximum number of messages that can be buffered on the event
+ /// loop's notification channel before a send will fail.
+ ///
+ /// The default value for this is 4096.
+ pub fn notify_capacity(&mut self, capacity: usize) -> &mut Self {
+ self.config.notify_capacity = capacity;
+ self
+ }
+
+ /// Sets the maximum number of messages that can be processed on any tick of
+ /// the event loop.
+ ///
+ /// The default value for this is 256.
+ pub fn messages_per_tick(&mut self, messages: usize) -> &mut Self {
+ self.config.messages_per_tick = messages;
+ self
+ }
+
+ pub fn timer_tick(&mut self, val: Duration) -> &mut Self {
+ self.config.timer_tick = val;
+ self
+ }
+
+ pub fn timer_wheel_size(&mut self, size: usize) -> &mut Self {
+ self.config.timer_wheel_size = size;
+ self
+ }
+
+ pub fn timer_capacity(&mut self, cap: usize) -> &mut Self {
+ self.config.timer_capacity = cap;
+ self
+ }
+
+ /// Constructs a new `EventLoop` using the configured values. The
+ /// `EventLoop` will not be running.
+ pub fn build<H: Handler>(self) -> io::Result<EventLoop<H>> {
+ EventLoop::configured(self.config)
+ }
+}
+
+/// Single threaded IO event loop.
+pub struct EventLoop<H: Handler> {
+ run: bool,
+ poll: Poll,
+ events: Events,
+ timer: Timer<H::Timeout>,
+ notify_tx: channel::SyncSender<H::Message>,
+ notify_rx: channel::Receiver<H::Message>,
+ config: Config,
+}
+
+// Token used to represent notifications
+const NOTIFY: Token = Token(usize::MAX - 1);
+const TIMER: Token = Token(usize::MAX - 2);
+
+impl<H: Handler> EventLoop<H> {
+
+ /// Constructs a new `EventLoop` using the default configuration values.
+ /// The `EventLoop` will not be running.
+ pub fn new() -> io::Result<EventLoop<H>> {
+ EventLoop::configured(Config::default())
+ }
+
+ fn configured(config: Config) -> io::Result<EventLoop<H>> {
+ // Create the IO poller
+ let poll = Poll::new()?;
+
+ let timer = timer::Builder::default()
+ .tick_duration(config.timer_tick)
+ .num_slots(config.timer_wheel_size)
+ .capacity(config.timer_capacity)
+ .build();
+
+ // Create cross thread notification queue
+ let (tx, rx) = channel::sync_channel(config.notify_capacity);
+
+ // Register the notification wakeup FD with the IO poller
+ poll.register(&rx, NOTIFY, Ready::readable(), PollOpt::edge() | PollOpt::oneshot())?;
+ poll.register(&timer, TIMER, Ready::readable(), PollOpt::edge())?;
+
+ Ok(EventLoop {
+ run: true,
+ poll,
+ timer,
+ notify_tx: tx,
+ notify_rx: rx,
+ config,
+ events: Events::with_capacity(1024),
+ })
+ }
+
+ /// Returns a sender that allows sending messages to the event loop in a
+ /// thread-safe way, waking up the event loop if needed.
+ ///
+ /// # Implementation Details
+ ///
+ /// Each [EventLoop](#) contains a lock-free queue with a pre-allocated
+ /// buffer size. The size can be changed by modifying
+ /// [EventLoopConfig.notify_capacity](struct.EventLoopConfig.html#method.notify_capacity).
+ /// When a message is sent to the EventLoop, it is first pushed on to the
+ /// queue. Then, if the EventLoop is currently running, an atomic flag is
+ /// set to indicate that the next loop iteration should be started without
+ /// waiting.
+ ///
+ /// If the loop is blocked waiting for IO events, then it is woken up. The
+ /// strategy for waking up the event loop is platform dependent. For
+ /// example, on a modern Linux OS, eventfd is used. On older OSes, a pipe
+ /// is used.
+ ///
+ /// The strategy of setting an atomic flag if the event loop is not already
+ /// sleeping allows avoiding an expensive wakeup operation if at all possible.
+ pub fn channel(&self) -> Sender<H::Message> {
+ Sender::new(self.notify_tx.clone())
+ }
+
+ /// Schedules a timeout after the requested time interval. When the
+ /// duration has been reached,
+ /// [Handler::timeout](trait.Handler.html#method.timeout) will be invoked
+ /// passing in the supplied token.
+ ///
+ /// Returns a handle to the timeout that can be used to cancel the timeout
+ /// using [#clear_timeout](#method.clear_timeout).
+ pub fn timeout(&mut self, token: H::Timeout, delay: Duration) -> timer::Result<Timeout> {
+ self.timer.set_timeout(delay, token)
+ }
+
+ /// If the supplied timeout has not been triggered, cancel it such that it
+ /// will not be triggered in the future.
+ pub fn clear_timeout(&mut self, timeout: &Timeout) -> bool {
+ self.timer.cancel_timeout(&timeout).is_some()
+ }
+
+ /// Tells the event loop to exit after it is done handling all events in the
+ /// current iteration.
+ pub fn shutdown(&mut self) {
+ self.run = false;
+ }
+
+ /// Indicates whether the event loop is currently running. If it's not it has either
+ /// stopped or is scheduled to stop on the next tick.
+ pub fn is_running(&self) -> bool {
+ self.run
+ }
+
+ /// Registers an IO handle with the event loop.
+ pub fn register<E: ?Sized>(&mut self, io: &E, token: Token, interest: Ready, opt: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ self.poll.register(io, token, interest, opt)
+ }
+
+ /// Re-Registers an IO handle with the event loop.
+ pub fn reregister<E: ?Sized>(&mut self, io: &E, token: Token, interest: Ready, opt: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ self.poll.reregister(io, token, interest, opt)
+ }
+
+ /// Keep spinning the event loop indefinitely, and notify the handler whenever
+ /// any of the registered handles are ready.
+ pub fn run(&mut self, handler: &mut H) -> io::Result<()> {
+ self.run = true;
+
+ while self.run {
+ // Execute ticks as long as the event loop is running
+ self.run_once(handler, None)?;
+ }
+
+ Ok(())
+ }
+
+ /// Deregisters an IO handle with the event loop.
+ ///
+ /// Both kqueue and epoll will automatically clear any pending events when closing a
+ /// file descriptor (socket). In that case, this method does not need to be called
+ /// prior to dropping a connection from the slab.
+ ///
+ /// Warning: kqueue effectively builds in deregister when using edge-triggered mode with
+ /// oneshot. Calling `deregister()` on the socket will cause a TcpStream error.
+ pub fn deregister<E: ?Sized>(&mut self, io: &E) -> io::Result<()> where E: Evented {
+ self.poll.deregister(io)
+ }
+
+ /// Spin the event loop once, with a given timeout (forever if `None`),
+ /// and notify the handler if any of the registered handles become ready
+ /// during that time.
+ pub fn run_once(&mut self, handler: &mut H, timeout: Option<Duration>) -> io::Result<()> {
+ trace!("event loop tick");
+
+ // Check the registered IO handles for any new events. Each poll
+ // is for one second, so a shutdown request can last as long as
+ // one second before it takes effect.
+ let events = match self.io_poll(timeout) {
+ Ok(e) => e,
+ Err(err) => {
+ if err.kind() == io::ErrorKind::Interrupted {
+ handler.interrupted(self);
+ 0
+ } else {
+ return Err(err);
+ }
+ }
+ };
+
+ self.io_process(handler, events);
+ handler.tick(self);
+ Ok(())
+ }
+
+ #[inline]
+ fn io_poll(&mut self, timeout: Option<Duration>) -> io::Result<usize> {
+ self.poll.poll(&mut self.events, timeout)
+ }
+
+ // Process IO events that have been previously polled
+ fn io_process(&mut self, handler: &mut H, cnt: usize) {
+ let mut i = 0;
+
+ trace!("io_process(..); cnt={}; len={}", cnt, self.events.len());
+
+ // Iterate over the notifications. Each event provides the token
+ // it was registered with (which usually represents, at least, the
+ // handle that the event is about) as well as information about
+ // what kind of event occurred (readable, writable, signal, etc.)
+ while i < cnt {
+ let evt = self.events.get(i).unwrap();
+
+ trace!("event={:?}; idx={:?}", evt, i);
+
+ match evt.token() {
+ NOTIFY => self.notify(handler),
+ TIMER => self.timer_process(handler),
+ _ => self.io_event(handler, evt)
+ }
+
+ i += 1;
+ }
+ }
+
+ fn io_event(&mut self, handler: &mut H, evt: Event) {
+ handler.ready(self, evt.token(), evt.readiness());
+ }
+
+ fn notify(&mut self, handler: &mut H) {
+ for _ in 0..self.config.messages_per_tick {
+ match self.notify_rx.try_recv() {
+ Ok(msg) => handler.notify(self, msg),
+ _ => break,
+ }
+ }
+
+ // Re-register
+ let _ = self.poll.reregister(&self.notify_rx, NOTIFY, Ready::readable(), PollOpt::edge() | PollOpt::oneshot());
+ }
+
+ fn timer_process(&mut self, handler: &mut H) {
+ while let Some(t) = self.timer.poll() {
+ handler.timeout(self, t);
+ }
+ }
+}
+
+impl<H: Handler> fmt::Debug for EventLoop<H> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("EventLoop")
+ .field("run", &self.run)
+ .field("poll", &self.poll)
+ .field("config", &self.config)
+ .finish()
+ }
+}
+
+/// Sends messages to the EventLoop from other threads.
+pub struct Sender<M> {
+ tx: channel::SyncSender<M>
+}
+
+impl<M> fmt::Debug for Sender<M> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "Sender<?> {{ ... }}")
+ }
+}
+
+impl<M> Clone for Sender <M> {
+ fn clone(&self) -> Sender<M> {
+ Sender { tx: self.tx.clone() }
+ }
+}
+
+impl<M> Sender<M> {
+ fn new(tx: channel::SyncSender<M>) -> Sender<M> {
+ Sender { tx }
+ }
+
+ pub fn send(&self, msg: M) -> Result<(), NotifyError<M>> {
+ self.tx.try_send(msg)?;
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/handler.rs b/third_party/rust/mio-0.6.23/src/deprecated/handler.rs
new file mode 100644
index 0000000000..db1bc314a7
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/handler.rs
@@ -0,0 +1,37 @@
+use {Ready, Token};
+use deprecated::{EventLoop};
+
+#[allow(unused_variables)]
+pub trait Handler: Sized {
+ type Timeout;
+ type Message;
+
+ /// Invoked when the socket represented by `token` is ready to be operated
+ /// on. `events` indicates the specific operations that are
+ /// ready to be performed.
+ ///
+ /// For example, when a TCP socket is ready to be read from, `events` will
+ /// have `readable` set. When the socket is ready to be written to,
+ /// `events` will have `writable` set.
+ ///
+ /// This function will only be invoked a single time per socket per event
+ /// loop tick.
+ fn ready(&mut self, event_loop: &mut EventLoop<Self>, token: Token, events: Ready) {
+ }
+
+ /// Invoked when a message has been received via the event loop's channel.
+ fn notify(&mut self, event_loop: &mut EventLoop<Self>, msg: Self::Message) {
+ }
+
+ /// Invoked when a timeout has completed.
+ fn timeout(&mut self, event_loop: &mut EventLoop<Self>, timeout: Self::Timeout) {
+ }
+
+ /// Invoked when `EventLoop` has been interrupted by a signal interrupt.
+ fn interrupted(&mut self, event_loop: &mut EventLoop<Self>) {
+ }
+
+ /// Invoked at the end of an event loop tick.
+ fn tick(&mut self, event_loop: &mut EventLoop<Self>) {
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/io.rs b/third_party/rust/mio-0.6.23/src/deprecated/io.rs
new file mode 100644
index 0000000000..16ff27993b
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/io.rs
@@ -0,0 +1,28 @@
+use ::io::MapNonBlock;
+use std::io::{self, Read, Write};
+
+pub trait TryRead {
+ fn try_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>>;
+}
+
+pub trait TryWrite {
+ fn try_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>>;
+}
+
+impl<T: Read> TryRead for T {
+ fn try_read(&mut self, dst: &mut [u8]) -> io::Result<Option<usize>> {
+ self.read(dst).map_non_block()
+ }
+}
+
+impl<T: Write> TryWrite for T {
+ fn try_write(&mut self, src: &[u8]) -> io::Result<Option<usize>> {
+ self.write(src).map_non_block()
+ }
+}
+
+pub trait TryAccept {
+ type Output;
+
+ fn accept(&self) -> io::Result<Option<Self::Output>>;
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/mod.rs b/third_party/rust/mio-0.6.23/src/deprecated/mod.rs
new file mode 100644
index 0000000000..124a2eee3d
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/mod.rs
@@ -0,0 +1,36 @@
+#![allow(deprecated)]
+
+mod event_loop;
+mod io;
+mod handler;
+mod notify;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub mod unix;
+
+pub use self::event_loop::{
+ EventLoop,
+ EventLoopBuilder,
+ Sender,
+};
+pub use self::io::{
+ TryAccept,
+ TryRead,
+ TryWrite,
+};
+pub use self::handler::{
+ Handler,
+};
+pub use self::notify::{
+ NotifyError,
+};
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub use self::unix::{
+ pipe,
+ PipeReader,
+ PipeWriter,
+ UnixListener,
+ UnixSocket,
+ UnixStream,
+ Shutdown,
+};
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/notify.rs b/third_party/rust/mio-0.6.23/src/deprecated/notify.rs
new file mode 100644
index 0000000000..c8432d6b0e
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/notify.rs
@@ -0,0 +1,63 @@
+use {channel};
+use std::{fmt, io, error, any};
+
+pub enum NotifyError<T> {
+ Io(io::Error),
+ Full(T),
+ Closed(Option<T>),
+}
+
+impl<M> fmt::Debug for NotifyError<M> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ NotifyError::Io(ref e) => {
+ write!(fmt, "NotifyError::Io({:?})", e)
+ }
+ NotifyError::Full(..) => {
+ write!(fmt, "NotifyError::Full(..)")
+ }
+ NotifyError::Closed(..) => {
+ write!(fmt, "NotifyError::Closed(..)")
+ }
+ }
+ }
+}
+
+impl<M> fmt::Display for NotifyError<M> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ NotifyError::Io(ref e) => {
+ write!(fmt, "IO error: {}", e)
+ }
+ NotifyError::Full(..) => write!(fmt, "Full"),
+ NotifyError::Closed(..) => write!(fmt, "Closed")
+ }
+ }
+}
+
+impl<M: any::Any> error::Error for NotifyError<M> {
+ fn description(&self) -> &str {
+ match *self {
+ NotifyError::Io(ref err) => err.description(),
+ NotifyError::Closed(..) => "The receiving end has hung up",
+ NotifyError::Full(..) => "Queue is full"
+ }
+ }
+
+ fn cause(&self) -> Option<&error::Error> {
+ match *self {
+ NotifyError::Io(ref err) => Some(err),
+ _ => None
+ }
+ }
+}
+
+impl<M> From<channel::TrySendError<M>> for NotifyError<M> {
+ fn from(src: channel::TrySendError<M>) -> NotifyError<M> {
+ match src {
+ channel::TrySendError::Io(e) => NotifyError::Io(e),
+ channel::TrySendError::Full(v) => NotifyError::Full(v),
+ channel::TrySendError::Disconnected(v) => NotifyError::Closed(Some(v)),
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/deprecated/unix.rs b/third_party/rust/mio-0.6.23/src/deprecated/unix.rs
new file mode 100644
index 0000000000..97c6a60ba4
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/deprecated/unix.rs
@@ -0,0 +1,420 @@
+use {io, sys, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use deprecated::TryAccept;
+use io::MapNonBlock;
+use std::io::{Read, Write};
+use std::path::Path;
+pub use std::net::Shutdown;
+use std::process;
+
+pub use sys::Io;
+
+#[derive(Debug)]
+pub struct UnixSocket {
+ sys: sys::UnixSocket,
+}
+
+impl UnixSocket {
+ /// Returns a new, unbound, non-blocking Unix domain socket
+ pub fn stream() -> io::Result<UnixSocket> {
+ sys::UnixSocket::stream()
+ .map(From::from)
+ }
+
+ /// Connect the socket to the specified address
+ pub fn connect<P: AsRef<Path> + ?Sized>(self, addr: &P) -> io::Result<(UnixStream, bool)> {
+ let complete = match self.sys.connect(addr) {
+ Ok(()) => true,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => false,
+ Err(e) => return Err(e),
+ };
+ Ok((From::from(self.sys), complete))
+ }
+
+ /// Bind the socket to the specified address
+ pub fn bind<P: AsRef<Path> + ?Sized>(&self, addr: &P) -> io::Result<()> {
+ self.sys.bind(addr)
+ }
+
+ /// Listen for incoming requests
+ pub fn listen(self, backlog: usize) -> io::Result<UnixListener> {
+ self.sys.listen(backlog)?;
+ Ok(From::from(self.sys))
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixSocket> {
+ self.sys.try_clone()
+ .map(From::from)
+ }
+}
+
+impl Evented for UnixSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl From<sys::UnixSocket> for UnixSocket {
+ fn from(sys: sys::UnixSocket) -> UnixSocket {
+ UnixSocket { sys }
+ }
+}
+
+/*
+ *
+ * ===== UnixStream =====
+ *
+ */
+
+#[derive(Debug)]
+pub struct UnixStream {
+ sys: sys::UnixSocket,
+}
+
+impl UnixStream {
+ pub fn connect<P: AsRef<Path> + ?Sized>(path: &P) -> io::Result<UnixStream> {
+ UnixSocket::stream()
+ .and_then(|sock| sock.connect(path))
+ .map(|(sock, _)| sock)
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixStream> {
+ self.sys.try_clone()
+ .map(From::from)
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<usize> {
+ self.sys.shutdown(how).map(|_| 0)
+ }
+
+ pub fn read_recv_fd(&mut self, buf: &mut [u8]) -> io::Result<(usize, Option<RawFd>)> {
+ self.sys.read_recv_fd(buf)
+ }
+
+ pub fn try_read_recv_fd(&mut self, buf: &mut [u8]) -> io::Result<Option<(usize, Option<RawFd>)>> {
+ self.read_recv_fd(buf).map_non_block()
+ }
+
+ pub fn write_send_fd(&mut self, buf: &[u8], fd: RawFd) -> io::Result<usize> {
+ self.sys.write_send_fd(buf, fd)
+ }
+
+ pub fn try_write_send_fd(&mut self, buf: &[u8], fd: RawFd) -> io::Result<Option<usize>> {
+ self.write_send_fd(buf, fd).map_non_block()
+ }
+}
+
+impl Read for UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.sys.read(buf)
+ }
+}
+
+impl Write for UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.sys.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.sys.flush()
+ }
+}
+
+impl Evented for UnixStream {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl From<sys::UnixSocket> for UnixStream {
+ fn from(sys: sys::UnixSocket) -> UnixStream {
+ UnixStream { sys }
+ }
+}
+
+/*
+ *
+ * ===== UnixListener =====
+ *
+ */
+
+#[derive(Debug)]
+pub struct UnixListener {
+ sys: sys::UnixSocket,
+}
+
+impl UnixListener {
+ pub fn bind<P: AsRef<Path> + ?Sized>(addr: &P) -> io::Result<UnixListener> {
+ UnixSocket::stream().and_then(|sock| {
+ sock.bind(addr)?;
+ sock.listen(256)
+ })
+ }
+
+ pub fn accept(&self) -> io::Result<UnixStream> {
+ self.sys.accept().map(From::from)
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixListener> {
+ self.sys.try_clone().map(From::from)
+ }
+}
+
+impl Evented for UnixListener {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl TryAccept for UnixListener {
+ type Output = UnixStream;
+
+ fn accept(&self) -> io::Result<Option<UnixStream>> {
+ UnixListener::accept(self).map_non_block()
+ }
+}
+
+impl From<sys::UnixSocket> for UnixListener {
+ fn from(sys: sys::UnixSocket) -> UnixListener {
+ UnixListener { sys }
+ }
+}
+
+/*
+ *
+ * ===== Pipe =====
+ *
+ */
+
+pub fn pipe() -> io::Result<(PipeReader, PipeWriter)> {
+ let (rd, wr) = sys::pipe()?;
+ Ok((From::from(rd), From::from(wr)))
+}
+
+#[derive(Debug)]
+pub struct PipeReader {
+ io: Io,
+}
+
+impl PipeReader {
+ pub fn from_stdout(stdout: process::ChildStdout) -> io::Result<Self> {
+ if let Err(e) = sys::set_nonblock(stdout.as_raw_fd()) {
+ return Err(e);
+ }
+ Ok(PipeReader::from(unsafe { Io::from_raw_fd(stdout.into_raw_fd()) }))
+ }
+ pub fn from_stderr(stderr: process::ChildStderr) -> io::Result<Self> {
+ if let Err(e) = sys::set_nonblock(stderr.as_raw_fd()) {
+ return Err(e);
+ }
+ Ok(PipeReader::from(unsafe { Io::from_raw_fd(stderr.into_raw_fd()) }))
+ }
+}
+
+impl Read for PipeReader {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.read(buf)
+ }
+}
+
+impl<'a> Read for &'a PipeReader {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.io).read(buf)
+ }
+}
+
+impl Evented for PipeReader {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.io.deregister(poll)
+ }
+}
+
+impl From<Io> for PipeReader {
+ fn from(io: Io) -> PipeReader {
+ PipeReader { io }
+ }
+}
+
+#[derive(Debug)]
+pub struct PipeWriter {
+ io: Io,
+}
+
+impl PipeWriter {
+ pub fn from_stdin(stdin: process::ChildStdin) -> io::Result<Self> {
+ if let Err(e) = sys::set_nonblock(stdin.as_raw_fd()) {
+ return Err(e);
+ }
+ Ok(PipeWriter::from(unsafe { Io::from_raw_fd(stdin.into_raw_fd()) }))
+ }
+}
+
+impl Write for PipeWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.io.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.io.flush()
+ }
+}
+
+impl<'a> Write for &'a PipeWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.io).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.io).flush()
+ }
+}
+
+impl Evented for PipeWriter {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.io.deregister(poll)
+ }
+}
+
+impl From<Io> for PipeWriter {
+ fn from(io: Io) -> PipeWriter {
+ PipeWriter { io }
+ }
+}
+
+/*
+ *
+ * ===== Conversions =====
+ *
+ */
+
+use std::os::unix::io::{RawFd, IntoRawFd, AsRawFd, FromRawFd};
+
+impl IntoRawFd for UnixSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixSocket {
+ UnixSocket { sys: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for UnixStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixStream {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixStream {
+ UnixStream { sys: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for UnixListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixListener {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixListener {
+ UnixListener { sys: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for PipeReader {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for PipeReader {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
+
+impl FromRawFd for PipeReader {
+ unsafe fn from_raw_fd(fd: RawFd) -> PipeReader {
+ PipeReader { io: FromRawFd::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for PipeWriter {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for PipeWriter {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
+
+impl FromRawFd for PipeWriter {
+ unsafe fn from_raw_fd(fd: RawFd) -> PipeWriter {
+ PipeWriter { io: FromRawFd::from_raw_fd(fd) }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/event_imp.rs b/third_party/rust/mio-0.6.23/src/event_imp.rs
new file mode 100644
index 0000000000..7573ebca83
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/event_imp.rs
@@ -0,0 +1,1162 @@
+use {Poll, Token};
+use std::{fmt, io, ops};
+
+/// A value that may be registered with `Poll`
+///
+/// Values that implement `Evented` can be registered with `Poll`. Users of Mio
+/// should not use the `Evented` trait functions directly. Instead, the
+/// equivalent functions on `Poll` should be used.
+///
+/// See [`Poll`] for more details.
+///
+/// # Implementing `Evented`
+///
+/// There are two types of `Evented` values.
+///
+/// * **System** handles, which are backed by sockets or other system handles.
+/// These `Evented` handles will be monitored by the system selector. In this
+/// case, an implementation of `Evented` delegates to a lower level handle.
+///
+/// * **User** handles, which are driven entirely in user space using
+/// [`Registration`] and [`SetReadiness`]. In this case, the implementer takes
+/// responsibility for driving the readiness state changes.
+///
+/// [`Poll`]: ../struct.Poll.html
+/// [`Registration`]: ../struct.Registration.html
+/// [`SetReadiness`]: ../struct.SetReadiness.html
+///
+/// # Examples
+///
+/// Implementing `Evented` on a struct containing a socket:
+///
+/// ```
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+/// use mio::net::TcpStream;
+///
+/// use std::io;
+///
+/// pub struct MyEvented {
+/// socket: TcpStream,
+/// }
+///
+/// impl Evented for MyEvented {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `register` call to `socket`
+/// self.socket.register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `reregister` call to `socket`
+/// self.socket.reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// // Delegate the `deregister` call to `socket`
+/// self.socket.deregister(poll)
+/// }
+/// }
+/// ```
+///
+/// Implement `Evented` using [`Registration`] and [`SetReadiness`].
+///
+/// ```
+/// use mio::{Ready, Registration, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+///
+/// use std::io;
+/// use std::time::Instant;
+/// use std::thread;
+///
+/// pub struct Deadline {
+/// when: Instant,
+/// registration: Registration,
+/// }
+///
+/// impl Deadline {
+/// pub fn new(when: Instant) -> Deadline {
+/// let (registration, set_readiness) = Registration::new2();
+///
+/// thread::spawn(move || {
+/// let now = Instant::now();
+///
+/// if now < when {
+/// thread::sleep(when - now);
+/// }
+///
+/// set_readiness.set_readiness(Ready::readable());
+/// });
+///
+/// Deadline {
+/// when: when,
+/// registration: registration,
+/// }
+/// }
+///
+/// pub fn is_elapsed(&self) -> bool {
+/// Instant::now() >= self.when
+/// }
+/// }
+///
+/// impl Evented for Deadline {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// self.registration.deregister(poll)
+/// }
+/// }
+/// ```
+pub trait Evented {
+ /// Register `self` with the given `Poll` instance.
+ ///
+ /// This function should not be called directly. Use [`Poll::register`]
+ /// instead. Implementors should handle registration by either delegating
+ /// the call to another `Evented` type or creating a [`Registration`].
+ ///
+ /// [`Poll::register`]: ../struct.Poll.html#method.register
+ /// [`Registration`]: ../struct.Registration.html
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>;
+
+ /// Re-register `self` with the given `Poll` instance.
+ ///
+ /// This function should not be called directly. Use [`Poll::reregister`]
+ /// instead. Implementors should handle re-registration by either delegating
+ /// the call to another `Evented` type or calling
+ /// [`SetReadiness::set_readiness`].
+ ///
+ /// [`Poll::reregister`]: ../struct.Poll.html#method.reregister
+ /// [`SetReadiness::set_readiness`]: ../struct.SetReadiness.html#method.set_readiness
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>;
+
+ /// Deregister `self` from the given `Poll` instance
+ ///
+ /// This function should not be called directly. Use [`Poll::deregister`]
+ /// instead. Implementors should handle deregistration by either delegating
+ /// the call to another `Evented` type or by dropping the [`Registration`]
+ /// associated with `self`.
+ ///
+ /// [`Poll::deregister`]: ../struct.Poll.html#method.deregister
+ /// [`Registration`]: ../struct.Registration.html
+ fn deregister(&self, poll: &Poll) -> io::Result<()>;
+}
+
+impl Evented for Box<Evented> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.as_ref().deregister(poll)
+ }
+}
+
+impl<T: Evented> Evented for Box<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.as_ref().deregister(poll)
+ }
+}
+
+impl<T: Evented> Evented for ::std::sync::Arc<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.as_ref().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.as_ref().deregister(poll)
+ }
+}
+
+/// Options supplied when registering an `Evented` handle with `Poll`
+///
+/// `PollOpt` values can be combined together using the various bitwise
+/// operators.
+///
+/// For high level documentation on polling and poll options, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::PollOpt;
+///
+/// let opts = PollOpt::edge() | PollOpt::oneshot();
+///
+/// assert!(opts.is_edge());
+/// assert!(opts.is_oneshot());
+/// assert!(!opts.is_level());
+/// ```
+///
+/// [`Poll`]: struct.Poll.html
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct PollOpt(usize);
+
+impl PollOpt {
+ /// Return a `PollOpt` representing no set options.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::empty();
+ ///
+ /// assert!(!opt.is_level());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn empty() -> PollOpt {
+ PollOpt(0)
+ }
+
+ /// Return a `PollOpt` representing edge-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::edge();
+ ///
+ /// assert!(opt.is_edge());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn edge() -> PollOpt {
+ PollOpt(0b0001)
+ }
+
+ /// Return a `PollOpt` representing level-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::level();
+ ///
+ /// assert!(opt.is_level());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn level() -> PollOpt {
+ PollOpt(0b0010)
+ }
+
+ /// Return a `PollOpt` representing oneshot notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot();
+ ///
+ /// assert!(opt.is_oneshot());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn oneshot() -> PollOpt {
+ PollOpt(0b0100)
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn urgent() -> PollOpt {
+ PollOpt(0b1000)
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn all() -> PollOpt {
+ PollOpt::edge() | PollOpt::level() | PollOpt::oneshot()
+ }
+
+ /// Returns true if the options include edge-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::edge();
+ ///
+ /// assert!(opt.is_edge());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_edge(&self) -> bool {
+ self.contains(PollOpt::edge())
+ }
+
+ /// Returns true if the options include level-triggered notifications.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::level();
+ ///
+ /// assert!(opt.is_level());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_level(&self) -> bool {
+ self.contains(PollOpt::level())
+ }
+
+ /// Returns true if the options includes oneshot.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot();
+ ///
+ /// assert!(opt.is_oneshot());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_oneshot(&self) -> bool {
+ self.contains(PollOpt::oneshot())
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[allow(deprecated)]
+ #[inline]
+ pub fn is_urgent(&self) -> bool {
+ self.contains(PollOpt::urgent())
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn bits(&self) -> usize {
+ self.0
+ }
+
+ /// Returns true if `self` is a superset of `other`.
+ ///
+ /// `other` may represent more than one option, in which case the function
+ /// only returns true if `self` contains all of the options specified in
+ /// `other`.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot();
+ ///
+ /// assert!(opt.contains(PollOpt::oneshot()));
+ /// assert!(!opt.contains(PollOpt::edge()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot() | PollOpt::edge();
+ ///
+ /// assert!(opt.contains(PollOpt::oneshot()));
+ /// assert!(opt.contains(PollOpt::edge()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let opt = PollOpt::oneshot() | PollOpt::edge();
+ ///
+ /// assert!(!PollOpt::oneshot().contains(opt));
+ /// assert!(opt.contains(opt));
+ /// assert!((opt | PollOpt::level()).contains(opt));
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn contains(&self, other: PollOpt) -> bool {
+ (*self & other) == other
+ }
+
+ /// Adds all options represented by `other` into `self`.
+ ///
+ /// This is equivalent to `*self = *self | other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let mut opt = PollOpt::empty();
+ /// opt.insert(PollOpt::oneshot());
+ ///
+ /// assert!(opt.is_oneshot());
+ /// ```
+ #[inline]
+ pub fn insert(&mut self, other: PollOpt) {
+ self.0 |= other.0;
+ }
+
+ /// Removes all options represented by `other` from `self`.
+ ///
+ /// This is equivalent to `*self = *self & !other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::PollOpt;
+ ///
+ /// let mut opt = PollOpt::oneshot();
+ /// opt.remove(PollOpt::oneshot());
+ ///
+ /// assert!(!opt.is_oneshot());
+ /// ```
+ #[inline]
+ pub fn remove(&mut self, other: PollOpt) {
+ self.0 &= !other.0;
+ }
+}
+
+impl ops::BitOr for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn bitor(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 | other.0)
+ }
+}
+
+impl ops::BitXor for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn bitxor(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 ^ other.0)
+ }
+}
+
+impl ops::BitAnd for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn bitand(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 & other.0)
+ }
+}
+
+impl ops::Sub for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn sub(self, other: PollOpt) -> PollOpt {
+ PollOpt(self.0 & !other.0)
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for PollOpt {
+ type Output = PollOpt;
+
+ #[inline]
+ fn not(self) -> PollOpt {
+ PollOpt(!self.0)
+ }
+}
+
+impl fmt::Debug for PollOpt {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut one = false;
+ let flags = [
+ (PollOpt::edge(), "Edge-Triggered"),
+ (PollOpt::level(), "Level-Triggered"),
+ (PollOpt::oneshot(), "OneShot")];
+
+ for &(flag, msg) in &flags {
+ if self.contains(flag) {
+ if one { write!(fmt, " | ")? }
+ write!(fmt, "{}", msg)?;
+
+ one = true
+ }
+ }
+
+ if !one {
+ fmt.write_str("(empty)")?;
+ }
+
+ Ok(())
+ }
+}
+
+#[test]
+fn test_debug_pollopt() {
+ assert_eq!("(empty)", format!("{:?}", PollOpt::empty()));
+ assert_eq!("Edge-Triggered", format!("{:?}", PollOpt::edge()));
+ assert_eq!("Level-Triggered", format!("{:?}", PollOpt::level()));
+ assert_eq!("OneShot", format!("{:?}", PollOpt::oneshot()));
+}
+
+/// A set of readiness event kinds
+///
+/// `Ready` is a set of operation descriptors indicating which kind of an
+/// operation is ready to be performed. For example, `Ready::readable()`
+/// indicates that the associated `Evented` handle is ready to perform a
+/// `read` operation.
+///
+/// This struct only represents portable event kinds. Since only readable and
+/// writable events are guaranteed to be raised on all systems, those are the
+/// only ones available via the `Ready` struct. There are also platform specific
+/// extensions to `Ready`, i.e. `UnixReady`, which provide additional readiness
+/// event kinds only available on unix platforms.
+///
+/// `Ready` values can be combined together using the various bitwise operators.
+///
+/// For high level documentation on polling and readiness, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::Ready;
+///
+/// let ready = Ready::readable() | Ready::writable();
+///
+/// assert!(ready.is_readable());
+/// assert!(ready.is_writable());
+/// ```
+///
+/// [`Poll`]: struct.Poll.html
+/// [`readable`]: #method.readable
+/// [`writable`]: #method.writable
+/// [readiness]: struct.Poll.html#readiness-operations
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct Ready(usize);
+
+const READABLE: usize = 0b00001;
+const WRITABLE: usize = 0b00010;
+
+// These are deprecated and are moved into platform specific implementations.
+const ERROR: usize = 0b00100;
+const HUP: usize = 0b01000;
+
+impl Ready {
+ /// Returns the empty `Ready` set.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::empty();
+ ///
+ /// assert!(!ready.is_readable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ pub fn empty() -> Ready {
+ Ready(0)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use Ready::empty instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn none() -> Ready {
+ Ready::empty()
+ }
+
+ /// Returns a `Ready` representing readable readiness.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ ///
+ /// assert!(ready.is_readable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn readable() -> Ready {
+ Ready(READABLE)
+ }
+
+ /// Returns a `Ready` representing writable readiness.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::writable();
+ ///
+ /// assert!(ready.is_writable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn writable() -> Ready {
+ Ready(WRITABLE)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn error() -> Ready {
+ Ready(ERROR)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn hup() -> Ready {
+ Ready(HUP)
+ }
+
+ /// Returns a `Ready` representing readiness for all operations.
+ ///
+ /// This includes platform specific operations as well (`hup`, `aio`,
+ /// `error`, `lio`, `pri`).
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::all();
+ ///
+ /// assert!(ready.is_readable());
+ /// assert!(ready.is_writable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn all() -> Ready {
+ Ready(READABLE | WRITABLE | ::sys::READY_ALL)
+ }
+
+ /// Returns true if `Ready` is the empty set
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::empty();
+ /// assert!(ready.is_empty());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ *self == Ready::empty()
+ }
+
+ #[deprecated(since = "0.6.5", note = "use Ready::is_empty instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn is_none(&self) -> bool {
+ self.is_empty()
+ }
+
+ /// Returns true if the value includes readable readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ ///
+ /// assert!(ready.is_readable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_readable(&self) -> bool {
+ self.contains(Ready::readable())
+ }
+
+ /// Returns true if the value includes writable readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::writable();
+ ///
+ /// assert!(ready.is_writable());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn is_writable(&self) -> bool {
+ self.contains(Ready::writable())
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn is_error(&self) -> bool {
+ self.contains(Ready(ERROR))
+ }
+
+ #[deprecated(since = "0.6.5", note = "use UnixReady instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn is_hup(&self) -> bool {
+ self.contains(Ready(HUP))
+ }
+
+ /// Adds all readiness represented by `other` into `self`.
+ ///
+ /// This is equivalent to `*self = *self | other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let mut readiness = Ready::empty();
+ /// readiness.insert(Ready::readable());
+ ///
+ /// assert!(readiness.is_readable());
+ /// ```
+ #[inline]
+ pub fn insert<T: Into<Self>>(&mut self, other: T) {
+ let other = other.into();
+ self.0 |= other.0;
+ }
+
+ /// Removes all options represented by `other` from `self`.
+ ///
+ /// This is equivalent to `*self = *self & !other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let mut readiness = Ready::readable();
+ /// readiness.remove(Ready::readable());
+ ///
+ /// assert!(!readiness.is_readable());
+ /// ```
+ #[inline]
+ pub fn remove<T: Into<Self>>(&mut self, other: T) {
+ let other = other.into();
+ self.0 &= !other.0;
+ }
+
+ #[deprecated(since = "0.6.5", note = "removed")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ #[inline]
+ pub fn bits(&self) -> usize {
+ self.0
+ }
+
+ /// Returns true if `self` is a superset of `other`.
+ ///
+ /// `other` may represent more than one readiness operations, in which case
+ /// the function only returns true if `self` contains all readiness
+ /// specified in `other`.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let readiness = Ready::readable();
+ ///
+ /// assert!(readiness.contains(Ready::readable()));
+ /// assert!(!readiness.contains(Ready::writable()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let readiness = Ready::readable() | Ready::writable();
+ ///
+ /// assert!(readiness.contains(Ready::readable()));
+ /// assert!(readiness.contains(Ready::writable()));
+ /// ```
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let readiness = Ready::readable() | Ready::writable();
+ ///
+ /// assert!(!Ready::readable().contains(readiness));
+ /// assert!(readiness.contains(readiness));
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ pub fn contains<T: Into<Self>>(&self, other: T) -> bool {
+ let other = other.into();
+ (*self & other) == other
+ }
+
+ /// Create a `Ready` instance using the given `usize` representation.
+ ///
+ /// The `usize` representation must have been obtained from a call to
+ /// `Ready::as_usize`.
+ ///
+ /// The `usize` representation must be treated as opaque. There is no
+ /// guaranteed correlation between the returned value and platform defined
+ /// constants. Also, there is no guarantee that the `usize` representation
+ /// will remain constant across patch releases of Mio.
+ ///
+ /// This function is mainly provided to allow the caller to loa a
+ /// readiness value from an `AtomicUsize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ /// let ready_usize = ready.as_usize();
+ /// let ready2 = Ready::from_usize(ready_usize);
+ ///
+ /// assert_eq!(ready, ready2);
+ /// ```
+ pub fn from_usize(val: usize) -> Ready {
+ Ready(val)
+ }
+
+ /// Returns a `usize` representation of the `Ready` value.
+ ///
+ /// This `usize` representation must be treated as opaque. There is no
+ /// guaranteed correlation between the returned value and platform defined
+ /// constants. Also, there is no guarantee that the `usize` representation
+ /// will remain constant across patch releases of Mio.
+ ///
+ /// This function is mainly provided to allow the caller to store a
+ /// readiness value in an `AtomicUsize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Ready;
+ ///
+ /// let ready = Ready::readable();
+ /// let ready_usize = ready.as_usize();
+ /// let ready2 = Ready::from_usize(ready_usize);
+ ///
+ /// assert_eq!(ready, ready2);
+ /// ```
+ pub fn as_usize(&self) -> usize {
+ self.0
+ }
+}
+
+impl<T: Into<Ready>> ops::BitOr<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn bitor(self, other: T) -> Ready {
+ Ready(self.0 | other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::BitOrAssign<T> for Ready {
+ #[inline]
+ fn bitor_assign(&mut self, other: T) {
+ self.0 |= other.into().0;
+ }
+}
+
+impl<T: Into<Ready>> ops::BitXor<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn bitxor(self, other: T) -> Ready {
+ Ready(self.0 ^ other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::BitXorAssign<T> for Ready {
+ #[inline]
+ fn bitxor_assign(&mut self, other: T) {
+ self.0 ^= other.into().0;
+ }
+}
+
+impl<T: Into<Ready>> ops::BitAnd<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn bitand(self, other: T) -> Ready {
+ Ready(self.0 & other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::BitAndAssign<T> for Ready {
+ #[inline]
+ fn bitand_assign(&mut self, other: T) {
+ self.0 &= other.into().0
+ }
+}
+
+impl<T: Into<Ready>> ops::Sub<T> for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn sub(self, other: T) -> Ready {
+ Ready(self.0 & !other.into().0)
+ }
+}
+
+impl<T: Into<Ready>> ops::SubAssign<T> for Ready {
+ #[inline]
+ fn sub_assign(&mut self, other: T) {
+ self.0 &= !other.into().0;
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for Ready {
+ type Output = Ready;
+
+ #[inline]
+ fn not(self) -> Ready {
+ Ready(!self.0)
+ }
+}
+
+impl fmt::Debug for Ready {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut one = false;
+ let flags = [
+ (Ready::readable(), "Readable"),
+ (Ready::writable(), "Writable"),
+ (Ready(ERROR), "Error"),
+ (Ready(HUP), "Hup")];
+
+ for &(flag, msg) in &flags {
+ if self.contains(flag) {
+ if one { write!(fmt, " | ")? }
+ write!(fmt, "{}", msg)?;
+
+ one = true
+ }
+ }
+
+ if !one {
+ fmt.write_str("(empty)")?;
+ }
+
+ Ok(())
+ }
+}
+
+#[test]
+fn test_debug_ready() {
+ assert_eq!("(empty)", format!("{:?}", Ready::empty()));
+ assert_eq!("Readable", format!("{:?}", Ready::readable()));
+ assert_eq!("Writable", format!("{:?}", Ready::writable()));
+}
+
+/// An readiness event returned by [`Poll::poll`].
+///
+/// `Event` is a [readiness state] paired with a [`Token`]. It is returned by
+/// [`Poll::poll`].
+///
+/// For more documentation on polling and events, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::{Ready, Token};
+/// use mio::event::Event;
+///
+/// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+///
+/// assert_eq!(event.readiness(), Ready::readable() | Ready::writable());
+/// assert_eq!(event.token(), Token(0));
+/// ```
+///
+/// [`Poll::poll`]: ../struct.Poll.html#method.poll
+/// [`Poll`]: ../struct.Poll.html
+/// [readiness state]: ../struct.Ready.html
+/// [`Token`]: ../struct.Token.html
+#[derive(Copy, Clone, Eq, PartialEq, Debug)]
+pub struct Event {
+ kind: Ready,
+ token: Token
+}
+
+impl Event {
+ /// Creates a new `Event` containing `readiness` and `token`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::{Ready, Token};
+ /// use mio::event::Event;
+ ///
+ /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+ ///
+ /// assert_eq!(event.readiness(), Ready::readable() | Ready::writable());
+ /// assert_eq!(event.token(), Token(0));
+ /// ```
+ pub fn new(readiness: Ready, token: Token) -> Event {
+ Event {
+ kind: readiness,
+ token,
+ }
+ }
+
+ /// Returns the event's readiness.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::{Ready, Token};
+ /// use mio::event::Event;
+ ///
+ /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+ ///
+ /// assert_eq!(event.readiness(), Ready::readable() | Ready::writable());
+ /// ```
+ pub fn readiness(&self) -> Ready {
+ self.kind
+ }
+
+ #[deprecated(since = "0.6.5", note = "use Event::readiness()")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn kind(&self) -> Ready {
+ self.kind
+ }
+
+ /// Returns the event's token.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::{Ready, Token};
+ /// use mio::event::Event;
+ ///
+ /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0));
+ ///
+ /// assert_eq!(event.token(), Token(0));
+ /// ```
+ pub fn token(&self) -> Token {
+ self.token
+ }
+}
+
+/*
+ *
+ * ===== Mio internal helpers =====
+ *
+ */
+
+pub fn ready_as_usize(events: Ready) -> usize {
+ events.0
+}
+
+pub fn opt_as_usize(opt: PollOpt) -> usize {
+ opt.0
+}
+
+pub fn ready_from_usize(events: usize) -> Ready {
+ Ready(events)
+}
+
+pub fn opt_from_usize(opt: usize) -> PollOpt {
+ PollOpt(opt)
+}
+
+// Used internally to mutate an `Event` in place
+// Not used on all platforms
+#[allow(dead_code)]
+pub fn kind_mut(event: &mut Event) -> &mut Ready {
+ &mut event.kind
+}
diff --git a/third_party/rust/mio-0.6.23/src/io.rs b/third_party/rust/mio-0.6.23/src/io.rs
new file mode 100644
index 0000000000..275001387d
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/io.rs
@@ -0,0 +1,35 @@
+// Re-export the io::Result / Error types for convenience
+pub use std::io::{Read, Write, Result, Error, ErrorKind};
+
+// TODO: Delete this
+/// A helper trait to provide the map_non_block function on Results.
+pub trait MapNonBlock<T> {
+ /// Maps a `Result<T>` to a `Result<Option<T>>` by converting
+ /// operation-would-block errors into `Ok(None)`.
+ fn map_non_block(self) -> Result<Option<T>>;
+}
+
+impl<T> MapNonBlock<T> for Result<T> {
+ fn map_non_block(self) -> Result<Option<T>> {
+ use std::io::ErrorKind::WouldBlock;
+
+ match self {
+ Ok(value) => Ok(Some(value)),
+ Err(err) => {
+ if let WouldBlock = err.kind() {
+ Ok(None)
+ } else {
+ Err(err)
+ }
+ }
+ }
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+pub mod deprecated {
+ /// Returns a std `WouldBlock` error without allocating
+ pub fn would_block() -> ::std::io::Error {
+ ::std::io::ErrorKind::WouldBlock.into()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/lazycell.rs b/third_party/rust/mio-0.6.23/src/lazycell.rs
new file mode 100644
index 0000000000..681fb2f529
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/lazycell.rs
@@ -0,0 +1,554 @@
+// Original work Copyright (c) 2014 The Rust Project Developers
+// Modified work Copyright (c) 2016-2018 Nikita Pekin and the lazycell contributors
+// See the README.md file at the top-level directory of this distribution.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(missing_docs)]
+#![allow(unused)]
+
+//! This crate provides a `LazyCell` struct which acts as a lazily filled
+//! `Cell`.
+//!
+//! With a `RefCell`, the inner contents cannot be borrowed for the lifetime of
+//! the entire object, but only of the borrows returned. A `LazyCell` is a
+//! variation on `RefCell` which allows borrows to be tied to the lifetime of
+//! the outer object.
+//!
+//! `AtomicLazyCell` is a variant that uses an atomic variable to manage
+//! coordination in a thread-safe fashion. The limitation of an `AtomicLazyCell`
+//! is that after it is initialized, it can't be modified.
+
+use std::cell::UnsafeCell;
+use std::mem;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// A lazily filled `Cell`, with mutable contents.
+///
+/// A `LazyCell` is completely frozen once filled, **unless** you have `&mut`
+/// access to it, in which case `LazyCell::borrow_mut` may be used to mutate the
+/// contents.
+#[derive(Debug, Default)]
+pub struct LazyCell<T> {
+ inner: UnsafeCell<Option<T>>,
+}
+
+impl<T> LazyCell<T> {
+ /// Creates a new, empty, `LazyCell`.
+ pub fn new() -> LazyCell<T> {
+ LazyCell { inner: UnsafeCell::new(None) }
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// This function will return `Err(value)` is the cell is already full.
+ pub fn fill(&self, value: T) -> Result<(), T> {
+ let slot = unsafe { &mut *self.inner.get() };
+ if slot.is_some() {
+ return Err(value);
+ }
+ *slot = Some(value);
+
+ Ok(())
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// Note that this function is infallible but requires `&mut self`. By
+ /// requiring `&mut self` we're guaranteed that no active borrows to this
+ /// cell can exist so we can always fill in the value. This may not always
+ /// be usable, however, as `&mut self` may not be possible to borrow.
+ ///
+ /// # Return value
+ ///
+ /// This function returns the previous value, if any.
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ mem::replace(unsafe { &mut *self.inner.get() }, Some(value))
+ }
+
+ /// Test whether this cell has been previously filled.
+ pub fn filled(&self) -> bool {
+ self.borrow().is_some()
+ }
+
+ /// Borrows the contents of this lazy cell for the duration of the cell
+ /// itself.
+ ///
+ /// This function will return `Some` if the cell has been previously
+ /// initialized, and `None` if it has not yet been initialized.
+ pub fn borrow(&self) -> Option<&T> {
+ unsafe { &*self.inner.get() }.as_ref()
+ }
+
+ /// Borrows the contents of this lazy cell mutably for the duration of the cell
+ /// itself.
+ ///
+ /// This function will return `Some` if the cell has been previously
+ /// initialized, and `None` if it has not yet been initialized.
+ pub fn borrow_mut(&mut self) -> Option<&mut T> {
+ unsafe { &mut *self.inner.get() }.as_mut()
+ }
+
+ /// Borrows the contents of this lazy cell for the duration of the cell
+ /// itself.
+ ///
+ /// If the cell has not yet been filled, the cell is first filled using the
+ /// function provided.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn borrow_with<F: FnOnce() -> T>(&self, f: F) -> &T {
+ if let Some(value) = self.borrow() {
+ return value;
+ }
+ let value = f();
+ if self.fill(value).is_err() {
+ panic!("borrow_with: cell was filled by closure")
+ }
+ self.borrow().unwrap()
+ }
+
+ /// Borrows the contents of this `LazyCell` mutably for the duration of the
+ /// cell itself.
+ ///
+ /// If the cell has not yet been filled, the cell is first filled using the
+ /// function provided.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn borrow_mut_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
+ if !self.filled() {
+ let value = f();
+ if self.fill(value).is_err() {
+ panic!("borrow_mut_with: cell was filled by closure")
+ }
+ }
+
+ self.borrow_mut().unwrap()
+ }
+
+ /// Same as `borrow_with`, but allows the initializing function to fail.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn try_borrow_with<E, F>(&self, f: F) -> Result<&T, E>
+ where F: FnOnce() -> Result<T, E>
+ {
+ if let Some(value) = self.borrow() {
+ return Ok(value);
+ }
+ let value = f()?;
+ if self.fill(value).is_err() {
+ panic!("try_borrow_with: cell was filled by closure")
+ }
+ Ok(self.borrow().unwrap())
+ }
+
+ /// Same as `borrow_mut_with`, but allows the initializing function to fail.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the cell becomes filled as a side effect of `f`.
+ pub fn try_borrow_mut_with<E, F>(&mut self, f: F) -> Result<&mut T, E>
+ where F: FnOnce() -> Result<T, E>
+ {
+ if self.filled() {
+ return Ok(self.borrow_mut().unwrap());
+ }
+ let value = f()?;
+ if self.fill(value).is_err() {
+ panic!("try_borrow_mut_with: cell was filled by closure")
+ }
+ Ok(self.borrow_mut().unwrap())
+ }
+
+ /// Consumes this `LazyCell`, returning the underlying value.
+ pub fn into_inner(self) -> Option<T> {
+ // Rust 1.25 changed UnsafeCell::into_inner() from unsafe to safe
+ // function. This unsafe can be removed when supporting Rust older than
+ // 1.25 is not needed.
+ #[allow(unused_unsafe)]
+ unsafe { self.inner.into_inner() }
+ }
+}
+
+impl<T: Copy> LazyCell<T> {
+ /// Returns a copy of the contents of the lazy cell.
+ ///
+ /// This function will return `Some` if the cell has been previously initialized,
+ /// and `None` if it has not yet been initialized.
+ pub fn get(&self) -> Option<T> {
+ unsafe { *self.inner.get() }
+ }
+}
+
+// Tracks the AtomicLazyCell inner state
+const NONE: usize = 0;
+const LOCK: usize = 1;
+const SOME: usize = 2;
+
+/// A lazily filled and thread-safe `Cell`, with frozen contents.
+#[derive(Debug, Default)]
+pub struct AtomicLazyCell<T> {
+ inner: UnsafeCell<Option<T>>,
+ state: AtomicUsize,
+}
+
+impl<T> AtomicLazyCell<T> {
+ /// Creates a new, empty, `AtomicLazyCell`.
+ pub fn new() -> AtomicLazyCell<T> {
+ Self {
+ inner: UnsafeCell::new(None),
+ state: AtomicUsize::new(NONE),
+ }
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// This function will return `Err(value)` is the cell is already full.
+ pub fn fill(&self, t: T) -> Result<(), T> {
+ if NONE != self.state.compare_and_swap(NONE, LOCK, Ordering::Acquire) {
+ return Err(t);
+ }
+
+ unsafe { *self.inner.get() = Some(t) };
+
+ if LOCK != self.state.compare_and_swap(LOCK, SOME, Ordering::Release) {
+ panic!("unable to release lock");
+ }
+
+ Ok(())
+ }
+
+ /// Put a value into this cell.
+ ///
+ /// Note that this function is infallible but requires `&mut self`. By
+ /// requiring `&mut self` we're guaranteed that no active borrows to this
+ /// cell can exist so we can always fill in the value. This may not always
+ /// be usable, however, as `&mut self` may not be possible to borrow.
+ ///
+ /// # Return value
+ ///
+ /// This function returns the previous value, if any.
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ match mem::replace(self.state.get_mut(), SOME) {
+ NONE | SOME => {}
+ _ => panic!("cell in inconsistent state"),
+ }
+ mem::replace(unsafe { &mut *self.inner.get() }, Some(value))
+ }
+
+ /// Test whether this cell has been previously filled.
+ pub fn filled(&self) -> bool {
+ self.state.load(Ordering::Acquire) == SOME
+ }
+
+ /// Borrows the contents of this lazy cell for the duration of the cell
+ /// itself.
+ ///
+ /// This function will return `Some` if the cell has been previously
+ /// initialized, and `None` if it has not yet been initialized.
+ pub fn borrow(&self) -> Option<&T> {
+ match self.state.load(Ordering::Acquire) {
+ SOME => unsafe { &*self.inner.get() }.as_ref(),
+ _ => None,
+ }
+ }
+
+ /// Consumes this `LazyCell`, returning the underlying value.
+ pub fn into_inner(self) -> Option<T> {
+ // Rust 1.25 changed UnsafeCell::into_inner() from unsafe to safe
+ // function. This unsafe can be removed when supporting Rust older than
+ // 1.25 is not needed.
+ #[allow(unused_unsafe)]
+ unsafe { self.inner.into_inner() }
+ }
+}
+
+impl<T: Copy> AtomicLazyCell<T> {
+ /// Returns a copy of the contents of the lazy cell.
+ ///
+ /// This function will return `Some` if the cell has been previously initialized,
+ /// and `None` if it has not yet been initialized.
+ pub fn get(&self) -> Option<T> {
+ match self.state.load(Ordering::Acquire) {
+ SOME => unsafe { *self.inner.get() },
+ _ => None,
+ }
+ }
+}
+
+unsafe impl<T: Sync + Send> Sync for AtomicLazyCell<T> {}
+
+unsafe impl<T: Send> Send for AtomicLazyCell<T> {}
+
+#[cfg(test)]
+mod tests {
+ use super::{AtomicLazyCell, LazyCell};
+
+ #[test]
+ fn test_borrow_from_empty() {
+ let lazycell: LazyCell<usize> = LazyCell::new();
+
+ let value = lazycell.borrow();
+ assert_eq!(value, None);
+
+ let value = lazycell.get();
+ assert_eq!(value, None);
+ }
+
+ #[test]
+ fn test_fill_and_borrow() {
+ let lazycell = LazyCell::new();
+
+ assert!(!lazycell.filled());
+ lazycell.fill(1).unwrap();
+ assert!(lazycell.filled());
+
+ let value = lazycell.borrow();
+ assert_eq!(value, Some(&1));
+
+ let value = lazycell.get();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn test_borrow_mut() {
+ let mut lazycell = LazyCell::new();
+ assert!(lazycell.borrow_mut().is_none());
+
+ lazycell.fill(1).unwrap();
+ assert_eq!(lazycell.borrow_mut(), Some(&mut 1));
+
+ *lazycell.borrow_mut().unwrap() = 2;
+ assert_eq!(lazycell.borrow_mut(), Some(&mut 2));
+
+ // official way to reset the cell
+ lazycell = LazyCell::new();
+ assert!(lazycell.borrow_mut().is_none());
+ }
+
+ #[test]
+ fn test_already_filled_error() {
+ let lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ assert_eq!(lazycell.fill(1), Err(1));
+ }
+
+ #[test]
+ fn test_borrow_with() {
+ let lazycell = LazyCell::new();
+
+ let value = lazycell.borrow_with(|| 1);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_borrow_with_already_filled() {
+ let lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_with(|| 1);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_borrow_with_not_called_when_filled() {
+ let lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_with(|| 2);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_borrow_with_sound_with_reentrancy() {
+ // Kudos to dbaupp for discovering this issue
+ // https://www.reddit.com/r/rust/comments/5vs9rt/lazycell_a_rust_library_providing_a_lazilyfilled/de527xm/
+ let lazycell: LazyCell<Box<i32>> = LazyCell::new();
+
+ let mut reference: Option<&i32> = None;
+
+ lazycell.borrow_with(|| {
+ let _ = lazycell.fill(Box::new(1));
+ reference = lazycell.borrow().map(|r| &**r);
+ Box::new(2)
+ });
+ }
+
+ #[test]
+ fn test_borrow_mut_with() {
+ let mut lazycell = LazyCell::new();
+
+ {
+ let value = lazycell.borrow_mut_with(|| 1);
+ assert_eq!(&mut 1, value);
+ *value = 2;
+ }
+ assert_eq!(&2, lazycell.borrow().unwrap());
+ }
+
+ #[test]
+ fn test_borrow_mut_with_already_filled() {
+ let mut lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_mut_with(|| 1);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_borrow_mut_with_not_called_when_filled() {
+ let mut lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+
+ let value = lazycell.borrow_mut_with(|| 2);
+ assert_eq!(&1, value);
+ }
+
+ #[test]
+ fn test_try_borrow_with_ok() {
+ let lazycell = LazyCell::new();
+ let result = lazycell.try_borrow_with::<(), _>(|| Ok(1));
+ assert_eq!(result, Ok(&1));
+ }
+
+ #[test]
+ fn test_try_borrow_with_err() {
+ let lazycell = LazyCell::<()>::new();
+ let result = lazycell.try_borrow_with(|| Err(1));
+ assert_eq!(result, Err(1));
+ }
+
+ #[test]
+ fn test_try_borrow_with_already_filled() {
+ let lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+ let result = lazycell.try_borrow_with::<(), _>(|| unreachable!());
+ assert_eq!(result, Ok(&1));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_try_borrow_with_sound_with_reentrancy() {
+ let lazycell: LazyCell<Box<i32>> = LazyCell::new();
+
+ let mut reference: Option<&i32> = None;
+
+ let _ = lazycell.try_borrow_with::<(), _>(|| {
+ let _ = lazycell.fill(Box::new(1));
+ reference = lazycell.borrow().map(|r| &**r);
+ Ok(Box::new(2))
+ });
+ }
+
+ #[test]
+ fn test_try_borrow_mut_with_ok() {
+ let mut lazycell = LazyCell::new();
+ {
+ let result = lazycell.try_borrow_mut_with::<(), _>(|| Ok(1));
+ assert_eq!(result, Ok(&mut 1));
+ *result.unwrap() = 2;
+ }
+ assert_eq!(&mut 2, lazycell.borrow().unwrap());
+ }
+
+ #[test]
+ fn test_try_borrow_mut_with_err() {
+ let mut lazycell = LazyCell::<()>::new();
+ let result = lazycell.try_borrow_mut_with(|| Err(1));
+ assert_eq!(result, Err(1));
+ }
+
+ #[test]
+ fn test_try_borrow_mut_with_already_filled() {
+ let mut lazycell = LazyCell::new();
+ lazycell.fill(1).unwrap();
+ let result = lazycell.try_borrow_mut_with::<(), _>(|| unreachable!());
+ assert_eq!(result, Ok(&mut 1));
+ }
+
+ #[test]
+ fn test_into_inner() {
+ let lazycell = LazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ let value = lazycell.into_inner();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn test_atomic_borrow_from_empty() {
+ let lazycell: AtomicLazyCell<usize> = AtomicLazyCell::new();
+
+ let value = lazycell.borrow();
+ assert_eq!(value, None);
+
+ let value = lazycell.get();
+ assert_eq!(value, None);
+ }
+
+ #[test]
+ fn test_atomic_fill_and_borrow() {
+ let lazycell = AtomicLazyCell::new();
+
+ assert!(!lazycell.filled());
+ lazycell.fill(1).unwrap();
+ assert!(lazycell.filled());
+
+ let value = lazycell.borrow();
+ assert_eq!(value, Some(&1));
+
+ let value = lazycell.get();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn test_atomic_already_filled_panic() {
+ let lazycell = AtomicLazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ assert_eq!(1, lazycell.fill(1).unwrap_err());
+ }
+
+ #[test]
+ fn test_atomic_into_inner() {
+ let lazycell = AtomicLazyCell::new();
+
+ lazycell.fill(1).unwrap();
+ let value = lazycell.into_inner();
+ assert_eq!(value, Some(1));
+ }
+
+ #[test]
+ fn normal_replace() {
+ let mut cell = LazyCell::new();
+ assert_eq!(cell.fill(1), Ok(()));
+ assert_eq!(cell.replace(2), Some(1));
+ assert_eq!(cell.replace(3), Some(2));
+ assert_eq!(cell.borrow(), Some(&3));
+
+ let mut cell = LazyCell::new();
+ assert_eq!(cell.replace(2), None);
+ }
+
+ #[test]
+ fn atomic_replace() {
+ let mut cell = AtomicLazyCell::new();
+ assert_eq!(cell.fill(1), Ok(()));
+ assert_eq!(cell.replace(2), Some(1));
+ assert_eq!(cell.replace(3), Some(2));
+ assert_eq!(cell.borrow(), Some(&3));
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/lib.rs b/third_party/rust/mio-0.6.23/src/lib.rs
new file mode 100644
index 0000000000..96f704603e
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/lib.rs
@@ -0,0 +1,308 @@
+#![doc(html_root_url = "https://docs.rs/mio/0.6.23")]
+// Mio targets old versions of the Rust compiler. In order to do this, uses
+// deprecated APIs.
+#![allow(bare_trait_objects, deprecated, unknown_lints)]
+#![deny(missing_docs, missing_debug_implementations)]
+#![cfg_attr(test, deny(warnings))]
+
+// Many of mio's public methods violate this lint, but they can't be fixed
+// without a breaking change.
+#![cfg_attr(feature = "cargo-clippy", allow(clippy::trivially_copy_pass_by_ref))]
+
+//! A fast, low-level IO library for Rust focusing on non-blocking APIs, event
+//! notification, and other useful utilities for building high performance IO
+//! apps.
+//!
+//! # Features
+//!
+//! * Non-blocking TCP, UDP
+//! * I/O event notification queue backed by epoll, kqueue, and IOCP
+//! * Zero allocations at runtime
+//! * Platform specific extensions
+//!
+//! # Non-goals
+//!
+//! The following are specifically omitted from Mio and are left to the user or higher-level libraries.
+//!
+//! * File operations
+//! * Thread pools / multi-threaded event loop
+//! * Timers
+//!
+//! # Platforms
+//!
+//! Currently supported platforms:
+//!
+//! * Linux
+//! * OS X
+//! * Windows
+//! * FreeBSD
+//! * NetBSD
+//! * Android
+//! * iOS
+//!
+//! mio can handle interfacing with each of the event notification systems of the aforementioned platforms. The details of
+//! their implementation are further discussed in [`Poll`].
+//!
+//! # Usage
+//!
+//! Using mio starts by creating a [`Poll`], which reads events from the OS and
+//! put them into [`Events`]. You can handle IO events from the OS with it.
+//!
+//! For more detail, see [`Poll`].
+//!
+//! [`Poll`]: struct.Poll.html
+//! [`Events`]: struct.Events.html
+//!
+//! # Example
+//!
+//! ```
+//! use mio::*;
+//! use mio::net::{TcpListener, TcpStream};
+//!
+//! // Setup some tokens to allow us to identify which event is
+//! // for which socket.
+//! const SERVER: Token = Token(0);
+//! const CLIENT: Token = Token(1);
+//!
+//! let addr = "127.0.0.1:13265".parse().unwrap();
+//!
+//! // Setup the server socket
+//! let server = TcpListener::bind(&addr).unwrap();
+//!
+//! // Create a poll instance
+//! let poll = Poll::new().unwrap();
+//!
+//! // Start listening for incoming connections
+//! poll.register(&server, SERVER, Ready::readable(),
+//! PollOpt::edge()).unwrap();
+//!
+//! // Setup the client socket
+//! let sock = TcpStream::connect(&addr).unwrap();
+//!
+//! // Register the socket
+//! poll.register(&sock, CLIENT, Ready::readable(),
+//! PollOpt::edge()).unwrap();
+//!
+//! // Create storage for events
+//! let mut events = Events::with_capacity(1024);
+//!
+//! loop {
+//! poll.poll(&mut events, None).unwrap();
+//!
+//! for event in events.iter() {
+//! match event.token() {
+//! SERVER => {
+//! // Accept and drop the socket immediately, this will close
+//! // the socket and notify the client of the EOF.
+//! let _ = server.accept();
+//! }
+//! CLIENT => {
+//! // The server just shuts down the socket, let's just exit
+//! // from our event loop.
+//! return;
+//! }
+//! _ => unreachable!(),
+//! }
+//! }
+//! }
+//!
+//! ```
+
+extern crate net2;
+extern crate iovec;
+extern crate slab;
+
+#[cfg(target_os = "fuchsia")]
+extern crate fuchsia_zircon as zircon;
+#[cfg(target_os = "fuchsia")]
+extern crate fuchsia_zircon_sys as zircon_sys;
+
+#[cfg(unix)]
+extern crate libc;
+
+#[cfg(windows)]
+extern crate miow;
+
+#[cfg(windows)]
+extern crate winapi;
+
+#[macro_use]
+extern crate log;
+
+mod event_imp;
+mod io;
+mod poll;
+mod sys;
+mod token;
+mod lazycell;
+
+pub mod net;
+
+#[deprecated(since = "0.6.5", note = "use mio-extras instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod channel;
+
+#[deprecated(since = "0.6.5", note = "use mio-extras instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod timer;
+
+#[deprecated(since = "0.6.5", note = "update to use `Poll`")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod deprecated;
+
+#[deprecated(since = "0.6.5", note = "use iovec crate directly")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use iovec::IoVec;
+
+#[deprecated(since = "0.6.6", note = "use net module instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod tcp {
+ pub use net::{TcpListener, TcpStream};
+ pub use std::net::Shutdown;
+}
+
+#[deprecated(since = "0.6.6", note = "use net module instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub mod udp;
+
+pub use poll::{
+ Poll,
+ Registration,
+ SetReadiness,
+};
+pub use event_imp::{
+ PollOpt,
+ Ready,
+};
+pub use token::Token;
+
+pub mod event {
+ //! Readiness event types and utilities.
+
+ pub use super::poll::{Events, Iter};
+ pub use super::event_imp::{Event, Evented};
+}
+
+pub use event::{
+ Events,
+};
+
+#[deprecated(since = "0.6.5", note = "use events:: instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use event::{Event, Evented};
+
+#[deprecated(since = "0.6.5", note = "use events::Iter instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use poll::Iter as EventsIter;
+
+#[deprecated(since = "0.6.5", note = "std::io::Error can avoid the allocation now")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use io::deprecated::would_block;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub mod unix {
+ //! Unix only extensions
+ pub use sys::{
+ EventedFd,
+ };
+ pub use sys::unix::UnixReady;
+}
+
+#[cfg(target_os = "fuchsia")]
+pub mod fuchsia {
+ //! Fuchsia-only extensions
+ //!
+ //! # Stability
+ //!
+ //! This module depends on the [magenta-sys crate](https://crates.io/crates/magenta-sys)
+ //! and so might introduce breaking changes, even on minor releases,
+ //! so long as that crate remains unstable.
+ pub use sys::{
+ EventedHandle,
+ };
+ pub use sys::fuchsia::{FuchsiaReady, zx_signals_t};
+}
+
+/// Windows-only extensions to the mio crate.
+///
+/// Mio on windows is currently implemented with IOCP for a high-performance
+/// implementation of asynchronous I/O. Mio then provides TCP and UDP as sample
+/// bindings for the system to connect networking types to asynchronous I/O. On
+/// Unix this scheme is then also extensible to all other file descriptors with
+/// the `EventedFd` type, but on Windows no such analog is available. The
+/// purpose of this module, however, is to similarly provide a mechanism for
+/// foreign I/O types to get hooked up into the IOCP event loop.
+///
+/// This module provides two types for interfacing with a custom IOCP handle:
+///
+/// * `Binding` - this type is intended to govern binding with mio's `Poll`
+/// type. Each I/O object should contain an instance of `Binding` that's
+/// interfaced with for the implementation of the `Evented` trait. The
+/// `register`, `reregister`, and `deregister` methods for the `Evented` trait
+/// all have rough analogs with `Binding`.
+///
+/// Note that this type **does not handle readiness**. That is, this type does
+/// not handle whether sockets are readable/writable/etc. It's intended that
+/// IOCP types will internally manage this state with a `SetReadiness` type
+/// from the `poll` module. The `SetReadiness` is typically lazily created on
+/// the first time that `Evented::register` is called and then stored in the
+/// I/O object.
+///
+/// Also note that for types which represent streams of bytes the mio
+/// interface of *readiness* doesn't map directly to the Windows model of
+/// *completion*. This means that types will have to perform internal
+/// buffering to ensure that a readiness interface can be provided. For a
+/// sample implementation see the TCP/UDP modules in mio itself.
+///
+/// * `Overlapped` - this type is intended to be used as the concrete instances
+/// of the `OVERLAPPED` type that most win32 methods expect. It's crucial, for
+/// safety, that all asynchronous operations are initiated with an instance of
+/// `Overlapped` and not another instantiation of `OVERLAPPED`.
+///
+/// Mio's `Overlapped` type is created with a function pointer that receives
+/// a `OVERLAPPED_ENTRY` type when called. This `OVERLAPPED_ENTRY` type is
+/// defined in the `winapi` crate. Whenever a completion is posted to an IOCP
+/// object the `OVERLAPPED` that was signaled will be interpreted as
+/// `Overlapped` in the mio crate and this function pointer will be invoked.
+/// Through this function pointer, and through the `OVERLAPPED` pointer,
+/// implementations can handle management of I/O events.
+///
+/// When put together these two types enable custom Windows handles to be
+/// registered with mio's event loops. The `Binding` type is used to associate
+/// handles and the `Overlapped` type is used to execute I/O operations. When
+/// the I/O operations are completed a custom function pointer is called which
+/// typically modifies a `SetReadiness` set by `Evented` methods which will get
+/// later hooked into the mio event loop.
+#[cfg(windows)]
+pub mod windows {
+
+ pub use sys::{Overlapped, Binding};
+}
+
+#[cfg(feature = "with-deprecated")]
+mod convert {
+ use std::time::Duration;
+
+ const NANOS_PER_MILLI: u32 = 1_000_000;
+ const MILLIS_PER_SEC: u64 = 1_000;
+
+ /// Convert a `Duration` to milliseconds, rounding up and saturating at
+ /// `u64::MAX`.
+ ///
+ /// The saturating is fine because `u64::MAX` milliseconds are still many
+ /// million years.
+ pub fn millis(duration: Duration) -> u64 {
+ // Round up.
+ let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
+ duration.as_secs().saturating_mul(MILLIS_PER_SEC).saturating_add(u64::from(millis))
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/net/mod.rs b/third_party/rust/mio-0.6.23/src/net/mod.rs
new file mode 100644
index 0000000000..53025c6869
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/net/mod.rs
@@ -0,0 +1,14 @@
+//! Networking primitives
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+
+mod tcp;
+mod udp;
+
+pub use self::tcp::{TcpListener, TcpStream};
+pub use self::udp::UdpSocket;
diff --git a/third_party/rust/mio-0.6.23/src/net/tcp.rs b/third_party/rust/mio-0.6.23/src/net/tcp.rs
new file mode 100644
index 0000000000..cc74ab9451
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/net/tcp.rs
@@ -0,0 +1,737 @@
+//! Primitives for working with TCP
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+/// [portability guidelines]: ../struct.Poll.html#portability
+
+use std::fmt;
+use std::io::{Read, Write};
+use std::net::{self, SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr};
+use std::time::Duration;
+
+use net2::TcpBuilder;
+use iovec::IoVec;
+
+use {io, sys, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use poll::SelectorId;
+
+/*
+ *
+ * ===== TcpStream =====
+ *
+ */
+
+/// A non-blocking TCP stream between a local socket and a remote socket.
+///
+/// The socket will be closed when the value is dropped.
+///
+/// # Examples
+///
+/// ```
+/// # use std::net::TcpListener;
+/// # use std::error::Error;
+/// #
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// # let _listener = TcpListener::bind("127.0.0.1:34254")?;
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpStream;
+/// use std::time::Duration;
+///
+/// let stream = TcpStream::connect(&"127.0.0.1:34254".parse()?)?;
+///
+/// let poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.register(&stream, Token(0), Ready::writable(),
+/// PollOpt::edge())?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // The socket might be ready at this point
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+pub struct TcpStream {
+ sys: sys::TcpStream,
+ selector_id: SelectorId,
+}
+
+use std::net::Shutdown;
+
+// TODO: remove when fuchsia's set_nonblocking is fixed in libstd
+#[cfg(target_os = "fuchsia")]
+fn set_nonblocking(stream: &net::TcpStream) -> io::Result<()> {
+ sys::set_nonblock(
+ ::std::os::unix::io::AsRawFd::as_raw_fd(stream))
+}
+#[cfg(not(target_os = "fuchsia"))]
+fn set_nonblocking(stream: &net::TcpStream) -> io::Result<()> {
+ stream.set_nonblocking(true)
+}
+
+
+impl TcpStream {
+ /// Create a new TCP stream and issue a non-blocking connect to the
+ /// specified address.
+ ///
+ /// This convenience method is available and uses the system's default
+ /// options when creating a socket which is then connected. If fine-grained
+ /// control over the creation of the socket is desired, you can use
+ /// `net2::TcpBuilder` to configure a socket and then pass its socket to
+ /// `TcpStream::connect_stream` to transfer ownership into mio and schedule
+ /// the connect operation.
+ pub fn connect(addr: &SocketAddr) -> io::Result<TcpStream> {
+ let sock = match *addr {
+ SocketAddr::V4(..) => TcpBuilder::new_v4(),
+ SocketAddr::V6(..) => TcpBuilder::new_v6(),
+ }?;
+ // Required on Windows for a future `connect_overlapped` operation to be
+ // executed successfully.
+ if cfg!(windows) {
+ sock.bind(&inaddr_any(addr))?;
+ }
+ TcpStream::connect_stream(sock.to_tcp_stream()?, addr)
+ }
+
+ /// Creates a new `TcpStream` from the pending socket inside the given
+ /// `std::net::TcpBuilder`, connecting it to the address specified.
+ ///
+ /// This constructor allows configuring the socket before it's actually
+ /// connected, and this function will transfer ownership to the returned
+ /// `TcpStream` if successful. An unconnected `TcpStream` can be created
+ /// with the `net2::TcpBuilder` type (and also configured via that route).
+ ///
+ /// The platform specific behavior of this function looks like:
+ ///
+ /// * On Unix, the socket is placed into nonblocking mode and then a
+ /// `connect` call is issued.
+ ///
+ /// * On Windows, the address is stored internally and the connect operation
+ /// is issued when the returned `TcpStream` is registered with an event
+ /// loop. Note that on Windows you must `bind` a socket before it can be
+ /// connected, so if a custom `TcpBuilder` is used it should be bound
+ /// (perhaps to `INADDR_ANY`) before this method is called.
+ pub fn connect_stream(stream: net::TcpStream,
+ addr: &SocketAddr) -> io::Result<TcpStream> {
+ Ok(TcpStream {
+ sys: sys::TcpStream::connect(stream, addr)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Creates a new `TcpStream` from a standard `net::TcpStream`.
+ ///
+ /// This function is intended to be used to wrap a TCP stream from the
+ /// standard library in the mio equivalent. The conversion here will
+ /// automatically set `stream` to nonblocking and the returned object should
+ /// be ready to get associated with an event loop.
+ ///
+ /// Note that the TCP stream here will not have `connect` called on it, so
+ /// it should already be connected via some other means (be it manually, the
+ /// net2 crate, or the standard library).
+ pub fn from_stream(stream: net::TcpStream) -> io::Result<TcpStream> {
+ set_nonblocking(&stream)?;
+
+ Ok(TcpStream {
+ sys: sys::TcpStream::from_stream(stream),
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Returns the socket address of the remote peer of this TCP connection.
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.peer_addr()
+ }
+
+ /// Returns the socket address of the local half of this TCP connection.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `TcpStream` is a reference to the same stream that this
+ /// object references. Both handles will read and write the same stream of
+ /// data, and options set on one stream will be propagated to the other
+ /// stream.
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.sys.try_clone().map(|s| {
+ TcpStream {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O on the specified
+ /// portions to return immediately with an appropriate value (see the
+ /// documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.sys.shutdown(how)
+ }
+
+ /// Sets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// If set, this option disables the Nagle algorithm. This means that
+ /// segments are always sent as soon as possible, even if there is only a
+ /// small amount of data. When not set, data is buffered until there is a
+ /// sufficient amount to send out, thereby avoiding the frequent sending of
+ /// small packets.
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.sys.set_nodelay(nodelay)
+ }
+
+ /// Gets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// For more information about this option, see [`set_nodelay`][link].
+ ///
+ /// [link]: #method.set_nodelay
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.sys.nodelay()
+ }
+
+ /// Sets the value of the `SO_RCVBUF` option on this socket.
+ ///
+ /// Changes the size of the operating system's receive buffer associated
+ /// with the socket.
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.sys.set_recv_buffer_size(size)
+ }
+
+ /// Gets the value of the `SO_RCVBUF` option on this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_recv_buffer_size`][link].
+ ///
+ /// [link]: #method.set_recv_buffer_size
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.sys.recv_buffer_size()
+ }
+
+ /// Sets the value of the `SO_SNDBUF` option on this socket.
+ ///
+ /// Changes the size of the operating system's send buffer associated with
+ /// the socket.
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.sys.set_send_buffer_size(size)
+ }
+
+ /// Gets the value of the `SO_SNDBUF` option on this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_send_buffer_size`][link].
+ ///
+ /// [link]: #method.set_send_buffer_size
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.sys.send_buffer_size()
+ }
+
+ /// Sets whether keepalive messages are enabled to be sent on this socket.
+ ///
+ /// On Unix, this option will set the `SO_KEEPALIVE` as well as the
+ /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform).
+ /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option.
+ ///
+ /// If `None` is specified then keepalive messages are disabled, otherwise
+ /// the duration specified will be the time to remain idle before sending a
+ /// TCP keepalive probe.
+ ///
+ /// Some platforms specify this value in seconds, so sub-second
+ /// specifications may be omitted.
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.sys.set_keepalive(keepalive)
+ }
+
+ /// Returns whether keepalive messages are enabled on this socket, and if so
+ /// the duration of time between them.
+ ///
+ /// For more information about this option, see [`set_keepalive`][link].
+ ///
+ /// [link]: #method.set_keepalive
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.sys.keepalive()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+ ///
+ /// If this is set to `true` then the socket is restricted to sending and
+ /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+ /// can bind the same port at the same time.
+ ///
+ /// If this is set to `false` then the socket can be used to send and
+ /// receive packets from an IPv4-mapped IPv6 address.
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.sys.set_only_v6(only_v6)
+ }
+
+ /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+ ///
+ /// For more information about this option, see [`set_only_v6`][link].
+ ///
+ /// [link]: #method.set_only_v6
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.sys.only_v6()
+ }
+
+ /// Sets the value for the `SO_LINGER` option on this socket.
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.sys.set_linger(dur)
+ }
+
+ /// Gets the value of the `SO_LINGER` option on this socket.
+ ///
+ /// For more information about this option, see [`set_linger`][link].
+ ///
+ /// [link]: #method.set_linger
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.sys.linger()
+ }
+
+ #[deprecated(since = "0.6.9", note = "use set_keepalive")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn set_keepalive_ms(&self, keepalive: Option<u32>) -> io::Result<()> {
+ self.set_keepalive(keepalive.map(|v| {
+ Duration::from_millis(u64::from(v))
+ }))
+ }
+
+ #[deprecated(since = "0.6.9", note = "use keepalive")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn keepalive_ms(&self) -> io::Result<Option<u32>> {
+ self.keepalive().map(|v| {
+ v.map(|v| {
+ ::convert::millis(v) as u32
+ })
+ })
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying recv system call.
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.sys.peek(buf)
+ }
+
+ /// Read in a list of buffers all at once.
+ ///
+ /// This operation will attempt to read bytes from this socket and place
+ /// them into the list of buffers provided. Note that each buffer is an
+ /// `IoVec` which can be created from a byte slice.
+ ///
+ /// The buffers provided will be filled in sequentially. A buffer will be
+ /// entirely filled up before the next is written to.
+ ///
+ /// The number of bytes read is returned, if successful, or an error is
+ /// returned otherwise. If no bytes are available to be read yet then
+ /// a "would block" error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `readv` syscall.
+ pub fn read_bufs(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.sys.readv(bufs)
+ }
+
+ /// Write a list of buffers all at once.
+ ///
+ /// This operation will attempt to write a list of byte buffers to this
+ /// socket. Note that each buffer is an `IoVec` which can be created from a
+ /// byte slice.
+ ///
+ /// The buffers provided will be written sequentially. A buffer will be
+ /// entirely written before the next is written.
+ ///
+ /// The number of bytes written is returned, if successful, or an error is
+ /// returned otherwise. If the socket is not currently writable then a
+ /// "would block" error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `writev` syscall.
+ pub fn write_bufs(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.sys.writev(bufs)
+ }
+}
+
+fn inaddr_any(other: &SocketAddr) -> SocketAddr {
+ match *other {
+ SocketAddr::V4(..) => {
+ let any = Ipv4Addr::new(0, 0, 0, 0);
+ let addr = SocketAddrV4::new(any, 0);
+ SocketAddr::V4(addr)
+ }
+ SocketAddr::V6(..) => {
+ let any = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
+ let addr = SocketAddrV6::new(any, 0, 0, 0);
+ SocketAddr::V6(addr)
+ }
+ }
+}
+
+impl Read for TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.sys).read(buf)
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.sys).read(buf)
+ }
+}
+
+impl Write for TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.sys).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.sys).flush()
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.sys).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.sys).flush()
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.sys, f)
+ }
+}
+
+/*
+ *
+ * ===== TcpListener =====
+ *
+ */
+
+/// A structure representing a socket server
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpListener;
+/// use std::time::Duration;
+///
+/// let listener = TcpListener::bind(&"127.0.0.1:34255".parse()?)?;
+///
+/// let poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.register(&listener, Token(0), Ready::readable(),
+/// PollOpt::edge())?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // There may be a socket ready to be accepted
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+pub struct TcpListener {
+ sys: sys::TcpListener,
+ selector_id: SelectorId,
+}
+
+impl TcpListener {
+ /// Convenience method to bind a new TCP listener to the specified address
+ /// to receive new connections.
+ ///
+ /// This function will take the following steps:
+ ///
+ /// 1. Create a new TCP socket.
+ /// 2. Set the `SO_REUSEADDR` option on the socket.
+ /// 3. Bind the socket to the specified address.
+ /// 4. Call `listen` on the socket to prepare it to receive new connections.
+ ///
+ /// If fine-grained control over the binding and listening process for a
+ /// socket is desired then the `net2::TcpBuilder` methods can be used in
+ /// combination with the `TcpListener::from_listener` method to transfer
+ /// ownership into mio.
+ pub fn bind(addr: &SocketAddr) -> io::Result<TcpListener> {
+ // Create the socket
+ let sock = match *addr {
+ SocketAddr::V4(..) => TcpBuilder::new_v4(),
+ SocketAddr::V6(..) => TcpBuilder::new_v6(),
+ }?;
+
+ // Set SO_REUSEADDR, but only on Unix (mirrors what libstd does)
+ if cfg!(unix) {
+ sock.reuse_address(true)?;
+ }
+
+ // Bind the socket
+ sock.bind(addr)?;
+
+ // listen
+ let listener = sock.listen(1024)?;
+ Ok(TcpListener {
+ sys: sys::TcpListener::new(listener)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ #[deprecated(since = "0.6.13", note = "use from_std instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn from_listener(listener: net::TcpListener, _: &SocketAddr)
+ -> io::Result<TcpListener> {
+ TcpListener::from_std(listener)
+ }
+
+ /// Creates a new `TcpListener` from an instance of a
+ /// `std::net::TcpListener` type.
+ ///
+ /// This function will set the `listener` provided into nonblocking mode on
+ /// Unix, and otherwise the stream will just be wrapped up in an mio stream
+ /// ready to accept new connections and become associated with an event
+ /// loop.
+ ///
+ /// The address provided must be the address that the listener is bound to.
+ pub fn from_std(listener: net::TcpListener) -> io::Result<TcpListener> {
+ sys::TcpListener::new(listener).map(|s| {
+ TcpListener {
+ sys: s,
+ selector_id: SelectorId::new(),
+ }
+ })
+ }
+
+ /// Accepts a new `TcpStream`.
+ ///
+ /// This may return an `Err(e)` where `e.kind()` is
+ /// `io::ErrorKind::WouldBlock`. This means a stream may be ready at a later
+ /// point and one should wait for a notification before calling `accept`
+ /// again.
+ ///
+ /// If an accepted stream is returned, the remote address of the peer is
+ /// returned along with it.
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ let (s, a) = try!(self.accept_std());
+ Ok((TcpStream::from_stream(s)?, a))
+ }
+
+ /// Accepts a new `std::net::TcpStream`.
+ ///
+ /// This method is the same as `accept`, except that it returns a TCP socket
+ /// *in blocking mode* which isn't bound to `mio`. This can be later then
+ /// converted to a `mio` type, if necessary.
+ pub fn accept_std(&self) -> io::Result<(net::TcpStream, SocketAddr)> {
+ self.sys.accept()
+ }
+
+ /// Returns the local socket address of this listener.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `TcpListener` is a reference to the same socket that this
+ /// object references. Both handles can be used to accept incoming
+ /// connections and options set on one listener will affect the other.
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.sys.try_clone().map(|s| {
+ TcpListener {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+ ///
+ /// If this is set to `true` then the socket is restricted to sending and
+ /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+ /// can bind the same port at the same time.
+ ///
+ /// If this is set to `false` then the socket can be used to send and
+ /// receive packets from an IPv4-mapped IPv6 address.
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.sys.set_only_v6(only_v6)
+ }
+
+ /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+ ///
+ /// For more information about this option, see [`set_only_v6`][link].
+ ///
+ /// [link]: #method.set_only_v6
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.sys.only_v6()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+}
+
+impl Evented for TcpListener {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.sys, f)
+ }
+}
+
+/*
+ *
+ * ===== UNIX ext =====
+ *
+ */
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for TcpStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for TcpStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for TcpStream {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
+ TcpStream {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for TcpListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for TcpListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for TcpListener {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
+ TcpListener {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/net/udp.rs b/third_party/rust/mio-0.6.23/src/net/udp.rs
new file mode 100644
index 0000000000..0d89511ac7
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/net/udp.rs
@@ -0,0 +1,645 @@
+//! Primitives for working with UDP
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+/// [portability guidelines]: ../struct.Poll.html#portability
+
+use {io, sys, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use poll::SelectorId;
+use std::fmt;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use iovec::IoVec;
+
+/// A User Datagram Protocol socket.
+///
+/// This is an implementation of a bound UDP socket. This supports both IPv4 and
+/// IPv6 addresses, and there is no corresponding notion of a server because UDP
+/// is a datagram protocol.
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// #
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// // An Echo program:
+/// // SENDER -> sends a message.
+/// // ECHOER -> listens and prints the message received.
+///
+/// use mio::net::UdpSocket;
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use std::time::Duration;
+///
+/// const SENDER: Token = Token(0);
+/// const ECHOER: Token = Token(1);
+///
+/// // This operation will fail if the address is in use, so we select different ports for each
+/// // socket.
+/// let sender_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+/// let echoer_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+///
+/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from
+/// // respectively.
+/// sender_socket.connect(echoer_socket.local_addr().unwrap())?;
+///
+/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be
+/// // read from.
+/// let poll = Poll::new()?;
+///
+/// // We register our sockets here so that we can check if they are ready to be written/read.
+/// poll.register(&sender_socket, SENDER, Ready::writable(), PollOpt::edge())?;
+/// poll.register(&echoer_socket, ECHOER, Ready::readable(), PollOpt::edge())?;
+///
+/// let msg_to_send = [9; 9];
+/// let mut buffer = [0; 9];
+///
+/// let mut events = Events::with_capacity(128);
+/// loop {
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+/// for event in events.iter() {
+/// match event.token() {
+/// // Our SENDER is ready to be written into.
+/// SENDER => {
+/// let bytes_sent = sender_socket.send(&msg_to_send)?;
+/// assert_eq!(bytes_sent, 9);
+/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent);
+/// },
+/// // Our ECHOER is ready to be read from.
+/// ECHOER => {
+/// let num_recv = echoer_socket.recv(&mut buffer)?;
+/// println!("echo {:?} -> {:?}", buffer, num_recv);
+/// buffer = [0; 9];
+/// # return Ok(());
+/// }
+/// _ => unreachable!()
+/// }
+/// }
+/// }
+/// #
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+pub struct UdpSocket {
+ sys: sys::UdpSocket,
+ selector_id: SelectorId,
+}
+
+impl UdpSocket {
+ /// Creates a UDP socket from the given address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// // We must bind it to an open address.
+ /// let socket = match UdpSocket::bind(&"127.0.0.1:0".parse()?) {
+ /// Ok(new_socket) => new_socket,
+ /// Err(fail) => {
+ /// // We panic! here, but you could try to bind it again on another address.
+ /// panic!("Failed to bind socket. {:?}", fail);
+ /// }
+ /// };
+ ///
+ /// // Our socket was created, but we should not use it before checking it's readiness.
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn bind(addr: &SocketAddr) -> io::Result<UdpSocket> {
+ let socket = net::UdpSocket::bind(addr)?;
+ UdpSocket::from_socket(socket)
+ }
+
+ /// Creates a new mio-wrapped socket from an underlying and bound std
+ /// socket.
+ ///
+ /// This function requires that `socket` has previously been bound to an
+ /// address to work correctly, and returns an I/O object which can be used
+ /// with mio to send/receive UDP messages.
+ ///
+ /// This can be used in conjunction with net2's `UdpBuilder` interface to
+ /// configure a socket before it's handed off to mio, such as setting
+ /// options like `reuse_address` or binding to multiple addresses.
+ pub fn from_socket(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ Ok(UdpSocket {
+ sys: sys::UdpSocket::new(socket)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Returns the socket address that this socket was created from.
+ ///
+ /// # Examples
+ ///
+ // This assertion is almost, but not quite, universal. It fails on
+ // shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed,
+ // so simply disable the test on FreeBSD.
+ #[cfg_attr(not(target_os = "freebsd"), doc = " ```")]
+ #[cfg_attr(target_os = "freebsd", doc = " ```no_run")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let addr = "127.0.0.1:0".parse()?;
+ /// let socket = UdpSocket::bind(&addr)?;
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UdpSocket` is a reference to the same socket that this
+ /// object references. Both handles will read and write the same port, and
+ /// options set on one socket will be propagated to the other.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// // We must bind it to an open address.
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// let cloned_socket = socket.try_clone()?;
+ ///
+ /// assert_eq!(socket.local_addr()?, cloned_socket.local_addr()?);
+ ///
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.sys.try_clone()
+ .map(|s| {
+ UdpSocket {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Sends data on the socket to the given address. On success, returns the
+ /// number of bytes written.
+ ///
+ /// Address type can be any implementor of `ToSocketAddrs` trait. See its
+ /// documentation for concrete examples.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is writable before calling send_to,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let bytes_sent = socket.send_to(&[9; 9], &"127.0.0.1:11100".parse()?)?;
+ /// assert_eq!(bytes_sent, 9);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+ self.sys.send_to(buf, target)
+ }
+
+ /// Receives data from the socket. On success, returns the number of bytes
+ /// read and the address from whence the data came.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is readable before calling recv_from,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let mut buf = [0; 9];
+ /// let (num_recv, from_addr) = socket.recv_from(&mut buf)?;
+ /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.sys.recv_from(buf)
+ }
+
+ /// Sends data on the socket to the address previously bound via connect(). On success,
+ /// returns the number of bytes written.
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.sys.send(buf)
+ }
+
+ /// Receives data from the socket previously bound with connect(). On success, returns
+ /// the number of bytes read.
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.sys.recv(buf)
+ }
+
+ /// Connects the UDP socket setting the default destination for `send()`
+ /// and limiting packets that are read via `recv` from the address specified
+ /// in `addr`.
+ pub fn connect(&self, addr: SocketAddr) -> io::Result<()> {
+ self.sys.connect(addr)
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// if broadcast_socket.broadcast()? == false {
+ /// broadcast_socket.set_broadcast(true)?;
+ /// }
+ ///
+ /// assert_eq!(broadcast_socket.broadcast()?, true);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.sys.set_broadcast(on)
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_broadcast`][link].
+ ///
+ /// [link]: #method.set_broadcast
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// assert_eq!(broadcast_socket.broadcast()?, false);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.sys.broadcast()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v4(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v4
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_multicast_ttl_v4(ttl)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_ttl_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_ttl_v4
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.sys.multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ /// Note that this may not have any affect on IPv4 sockets.
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v6(on)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v6`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v6
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v6()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// if socket.ttl()? < 255 {
+ /// socket.set_ttl(255)?;
+ /// }
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// #
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?;
+ /// socket.set_ttl(255)?;
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.join_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v4`][link].
+ ///
+ /// [link]: #method.join_multicast_v4
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.leave_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v6`][link].
+ ///
+ /// [link]: #method.join_multicast_v6
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.leave_multicast_v6(multiaddr, interface)
+ }
+
+ /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+ ///
+ /// If this is set to `true` then the socket is restricted to sending and
+ /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+ /// can bind the same port at the same time.
+ ///
+ /// If this is set to `false` then the socket can be used to send and
+ /// receive packets from an IPv4-mapped IPv6 address.
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.sys.set_only_v6(only_v6)
+ }
+
+ /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+ ///
+ /// For more information about this option, see [`set_only_v6`][link].
+ ///
+ /// [link]: #method.set_only_v6
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.sys.only_v6()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+
+ /// Receives a single datagram message socket previously bound with connect.
+ ///
+ /// This operation will attempt to read bytes from this socket and place
+ /// them into the list of buffers provided. Note that each buffer is an
+ /// `IoVec` which can be created from a byte slice.
+ ///
+ /// The buffers provided will be filled sequentially. A buffer will be
+ /// entirely filled up before the next is written to.
+ ///
+ /// The number of bytes read is returned, if successful, or an error is
+ /// returned otherwise. If no bytes are available to be read yet then
+ /// a [`WouldBlock`][link] error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `readv` syscall.
+ ///
+ /// [link]: https://doc.rust-lang.org/nightly/std/io/enum.ErrorKind.html#variant.WouldBlock
+ #[cfg(all(unix, not(target_os = "fuchsia")))]
+ pub fn recv_bufs(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.sys.readv(bufs)
+ }
+
+ /// Sends data on the socket to the address previously bound via connect.
+ ///
+ /// This operation will attempt to send a list of byte buffers to this
+ /// socket in a single datagram. Note that each buffer is an `IoVec`
+ /// which can be created from a byte slice.
+ ///
+ /// The buffers provided will be written sequentially. A buffer will be
+ /// entirely written before the next is written.
+ ///
+ /// The number of bytes written is returned, if successful, or an error is
+ /// returned otherwise. If the socket is not currently writable then a
+ /// [`WouldBlock`][link] error is returned. This operation does not block.
+ ///
+ /// On Unix this corresponds to the `writev` syscall.
+ ///
+ /// [link]: https://doc.rust-lang.org/nightly/std/io/enum.ErrorKind.html#variant.WouldBlock
+ #[cfg(all(unix, not(target_os = "fuchsia")))]
+ pub fn send_bufs(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.sys.writev(bufs)
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.sys, f)
+ }
+}
+
+/*
+ *
+ * ===== UNIX ext =====
+ *
+ */
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for UdpSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}
+
diff --git a/third_party/rust/mio-0.6.23/src/poll.rs b/third_party/rust/mio-0.6.23/src/poll.rs
new file mode 100644
index 0000000000..7985d456cd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/poll.rs
@@ -0,0 +1,2783 @@
+use {sys, Token};
+use event_imp::{self as event, Ready, Event, Evented, PollOpt};
+use std::{fmt, io, ptr, usize};
+use std::cell::UnsafeCell;
+use std::{mem, ops, isize};
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::AsRawFd;
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::RawFd;
+use std::process;
+use std::sync::{Arc, Mutex, Condvar};
+use std::sync::atomic::{AtomicUsize, AtomicPtr, AtomicBool};
+use std::sync::atomic::Ordering::{self, Acquire, Release, AcqRel, Relaxed, SeqCst};
+use std::time::{Duration, Instant};
+
+// Poll is backed by two readiness queues. The first is a system readiness queue
+// represented by `sys::Selector`. The system readiness queue handles events
+// provided by the system, such as TCP and UDP. The second readiness queue is
+// implemented in user space by `ReadinessQueue`. It provides a way to implement
+// purely user space `Evented` types.
+//
+// `ReadinessQueue` is backed by a MPSC queue that supports reuse of linked
+// list nodes. This significantly reduces the number of required allocations.
+// Each `Registration` / `SetReadiness` pair allocates a single readiness node
+// that is used for the lifetime of the registration.
+//
+// The readiness node also includes a single atomic variable, `state` that
+// tracks most of the state associated with the registration. This includes the
+// current readiness, interest, poll options, and internal state. When the node
+// state is mutated, it is queued in the MPSC channel. A call to
+// `ReadinessQueue::poll` will dequeue and process nodes. The node state can
+// still be mutated while it is queued in the channel for processing.
+// Intermediate state values do not matter as long as the final state is
+// included in the call to `poll`. This is the eventually consistent nature of
+// the readiness queue.
+//
+// The readiness node is ref counted using the `ref_count` field. On creation,
+// the ref_count is initialized to 3: one `Registration` handle, one
+// `SetReadiness` handle, and one for the readiness queue. Since the readiness queue
+// doesn't *always* hold a handle to the node, we don't use the Arc type for
+// managing ref counts (this is to avoid constantly incrementing and
+// decrementing the ref count when pushing & popping from the queue). When the
+// `Registration` handle is dropped, the `dropped` flag is set on the node, then
+// the node is pushed into the registration queue. When Poll::poll pops the
+// node, it sees the drop flag is set, and decrements it's ref count.
+//
+// The MPSC queue is a modified version of the intrusive MPSC node based queue
+// described by 1024cores [1].
+//
+// The first modification is that two markers are used instead of a single
+// `stub`. The second marker is a `sleep_marker` which is used to signal to
+// producers that the consumer is going to sleep. This sleep_marker is only used
+// when the queue is empty, implying that the only node in the queue is
+// `end_marker`.
+//
+// The second modification is an `until` argument passed to the dequeue
+// function. When `poll` encounters a level-triggered node, the node will be
+// immediately pushed back into the queue. In order to avoid an infinite loop,
+// `poll` before pushing the node, the pointer is saved off and then passed
+// again as the `until` argument. If the next node to pop is `until`, then
+// `Dequeue::Empty` is returned.
+//
+// [1] http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue
+
+
+/// Polls for readiness events on all registered values.
+///
+/// `Poll` allows a program to monitor a large number of `Evented` types,
+/// waiting until one or more become "ready" for some class of operations; e.g.
+/// reading and writing. An `Evented` type is considered ready if it is possible
+/// to immediately perform a corresponding operation; e.g. [`read`] or
+/// [`write`].
+///
+/// To use `Poll`, an `Evented` type must first be registered with the `Poll`
+/// instance using the [`register`] method, supplying readiness interest. The
+/// readiness interest tells `Poll` which specific operations on the handle to
+/// monitor for readiness. A `Token` is also passed to the [`register`]
+/// function. When `Poll` returns a readiness event, it will include this token.
+/// This associates the event with the `Evented` handle that generated the
+/// event.
+///
+/// [`read`]: tcp/struct.TcpStream.html#method.read
+/// [`write`]: tcp/struct.TcpStream.html#method.write
+/// [`register`]: #method.register
+///
+/// # Examples
+///
+/// A basic example -- establishing a `TcpStream` connection.
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll, Ready, PollOpt, Token};
+/// use mio::net::TcpStream;
+///
+/// use std::net::{TcpListener, SocketAddr};
+///
+/// // Bind a server socket to connect to.
+/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+/// let server = TcpListener::bind(&addr)?;
+///
+/// // Construct a new `Poll` handle as well as the `Events` we'll store into
+/// let poll = Poll::new()?;
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Connect the stream
+/// let stream = TcpStream::connect(&server.local_addr()?)?;
+///
+/// // Register the stream with `Poll`
+/// poll.register(&stream, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+///
+/// // Wait for the socket to become ready. This has to happens in a loop to
+/// // handle spurious wakeups.
+/// loop {
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// if event.token() == Token(0) && event.readiness().is_writable() {
+/// // The socket connected (probably, it could still be a spurious
+/// // wakeup)
+/// return Ok(());
+/// }
+/// }
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// # Edge-triggered and level-triggered
+///
+/// An [`Evented`] registration may request edge-triggered events or
+/// level-triggered events. This is done by setting `register`'s
+/// [`PollOpt`] argument to either [`edge`] or [`level`].
+///
+/// The difference between the two can be described as follows. Supposed that
+/// this scenario happens:
+///
+/// 1. A [`TcpStream`] is registered with `Poll`.
+/// 2. The socket receives 2kb of data.
+/// 3. A call to [`Poll::poll`] returns the token associated with the socket
+/// indicating readable readiness.
+/// 4. 1kb is read from the socket.
+/// 5. Another call to [`Poll::poll`] is made.
+///
+/// If when the socket was registered with `Poll`, edge triggered events were
+/// requested, then the call to [`Poll::poll`] done in step **5** will
+/// (probably) hang despite there being another 1kb still present in the socket
+/// read buffer. The reason for this is that edge-triggered mode delivers events
+/// only when changes occur on the monitored [`Evented`]. So, in step *5* the
+/// caller might end up waiting for some data that is already present inside the
+/// socket buffer.
+///
+/// With edge-triggered events, operations **must** be performed on the
+/// `Evented` type until [`WouldBlock`] is returned. In other words, after
+/// receiving an event indicating readiness for a certain operation, one should
+/// assume that [`Poll::poll`] may never return another event for the same token
+/// and readiness until the operation returns [`WouldBlock`].
+///
+/// By contrast, when level-triggered notifications was requested, each call to
+/// [`Poll::poll`] will return an event for the socket as long as data remains
+/// in the socket buffer. Generally, level-triggered events should be avoided if
+/// high performance is a concern.
+///
+/// Since even with edge-triggered events, multiple events can be generated upon
+/// receipt of multiple chunks of data, the caller has the option to set the
+/// [`oneshot`] flag. This tells `Poll` to disable the associated [`Evented`]
+/// after the event is returned from [`Poll::poll`]. The subsequent calls to
+/// [`Poll::poll`] will no longer include events for [`Evented`] handles that
+/// are disabled even if the readiness state changes. The handle can be
+/// re-enabled by calling [`reregister`]. When handles are disabled, internal
+/// resources used to monitor the handle are maintained until the handle is
+/// dropped or deregistered. This makes re-registering the handle a fast
+/// operation.
+///
+/// For example, in the following scenario:
+///
+/// 1. A [`TcpStream`] is registered with `Poll`.
+/// 2. The socket receives 2kb of data.
+/// 3. A call to [`Poll::poll`] returns the token associated with the socket
+/// indicating readable readiness.
+/// 4. 2kb is read from the socket.
+/// 5. Another call to read is issued and [`WouldBlock`] is returned
+/// 6. The socket receives another 2kb of data.
+/// 7. Another call to [`Poll::poll`] is made.
+///
+/// Assuming the socket was registered with `Poll` with the [`edge`] and
+/// [`oneshot`] options, then the call to [`Poll::poll`] in step 7 would block. This
+/// is because, [`oneshot`] tells `Poll` to disable events for the socket after
+/// returning an event.
+///
+/// In order to receive the event for the data received in step 6, the socket
+/// would need to be reregistered using [`reregister`].
+///
+/// [`PollOpt`]: struct.PollOpt.html
+/// [`edge`]: struct.PollOpt.html#method.edge
+/// [`level`]: struct.PollOpt.html#method.level
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock
+/// [`Evented`]: event/trait.Evented.html
+/// [`TcpStream`]: tcp/struct.TcpStream.html
+/// [`reregister`]: #method.reregister
+/// [`oneshot`]: struct.PollOpt.html#method.oneshot
+///
+/// # Portability
+///
+/// Using `Poll` provides a portable interface across supported platforms as
+/// long as the caller takes the following into consideration:
+///
+/// ### Spurious events
+///
+/// [`Poll::poll`] may return readiness events even if the associated
+/// [`Evented`] handle is not actually ready. Given the same code, this may
+/// happen more on some platforms than others. It is important to never assume
+/// that, just because a readiness notification was received, that the
+/// associated operation will succeed as well.
+///
+/// If operation fails with [`WouldBlock`], then the caller should not treat
+/// this as an error, but instead should wait until another readiness event is
+/// received.
+///
+/// ### Draining readiness
+///
+/// When using edge-triggered mode, once a readiness event is received, the
+/// corresponding operation must be performed repeatedly until it returns
+/// [`WouldBlock`]. Unless this is done, there is no guarantee that another
+/// readiness event will be delivered, even if further data is received for the
+/// [`Evented`] handle.
+///
+/// For example, in the first scenario described above, after step 5, even if
+/// the socket receives more data there is no guarantee that another readiness
+/// event will be delivered.
+///
+/// ### Readiness operations
+///
+/// The only readiness operations that are guaranteed to be present on all
+/// supported platforms are [`readable`] and [`writable`]. All other readiness
+/// operations may have false negatives and as such should be considered
+/// **hints**. This means that if a socket is registered with [`readable`],
+/// [`error`], and [`hup`] interest, and either an error or hup is received, a
+/// readiness event will be generated for the socket, but it **may** only
+/// include `readable` readiness. Also note that, given the potential for
+/// spurious events, receiving a readiness event with `hup` or `error` doesn't
+/// actually mean that a `read` on the socket will return a result matching the
+/// readiness event.
+///
+/// In other words, portable programs that explicitly check for [`hup`] or
+/// [`error`] readiness should be doing so as an **optimization** and always be
+/// able to handle an error or HUP situation when performing the actual read
+/// operation.
+///
+/// [`readable`]: struct.Ready.html#method.readable
+/// [`writable`]: struct.Ready.html#method.writable
+/// [`error`]: unix/struct.UnixReady.html#method.error
+/// [`hup`]: unix/struct.UnixReady.html#method.hup
+///
+/// ### Registering handles
+///
+/// Unless otherwise noted, it should be assumed that types implementing
+/// [`Evented`] will never become ready unless they are registered with `Poll`.
+///
+/// For example:
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Poll, Ready, PollOpt, Token};
+/// use mio::net::TcpStream;
+/// use std::time::Duration;
+/// use std::thread;
+///
+/// let sock = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+///
+/// thread::sleep(Duration::from_secs(1));
+///
+/// let poll = Poll::new()?;
+///
+/// // The connect is not guaranteed to have started until it is registered at
+/// // this point
+/// poll.register(&sock, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// # Implementation notes
+///
+/// `Poll` is backed by the selector provided by the operating system.
+///
+/// | OS | Selector |
+/// |------------|-----------|
+/// | Linux | [epoll] |
+/// | OS X, iOS | [kqueue] |
+/// | Windows | [IOCP] |
+/// | FreeBSD | [kqueue] |
+/// | Android | [epoll] |
+///
+/// On all supported platforms, socket operations are handled by using the
+/// system selector. Platform specific extensions (e.g. [`EventedFd`]) allow
+/// accessing other features provided by individual system selectors. For
+/// example, Linux's [`signalfd`] feature can be used by registering the FD with
+/// `Poll` via [`EventedFd`].
+///
+/// On all platforms except windows, a call to [`Poll::poll`] is mostly just a
+/// direct call to the system selector. However, [IOCP] uses a completion model
+/// instead of a readiness model. In this case, `Poll` must adapt the completion
+/// model Mio's API. While non-trivial, the bridge layer is still quite
+/// efficient. The most expensive part being calls to `read` and `write` require
+/// data to be copied into an intermediate buffer before it is passed to the
+/// kernel.
+///
+/// Notifications generated by [`SetReadiness`] are handled by an internal
+/// readiness queue. A single call to [`Poll::poll`] will collect events from
+/// both from the system selector and the internal readiness queue.
+///
+/// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+/// [IOCP]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx
+/// [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
+/// [`EventedFd`]: unix/struct.EventedFd.html
+/// [`SetReadiness`]: struct.SetReadiness.html
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+pub struct Poll {
+ // Platform specific IO selector
+ selector: sys::Selector,
+
+ // Custom readiness queue
+ readiness_queue: ReadinessQueue,
+
+ // Use an atomic to first check if a full lock will be required. This is a
+ // fast-path check for single threaded cases avoiding the extra syscall
+ lock_state: AtomicUsize,
+
+ // Sequences concurrent calls to `Poll::poll`
+ lock: Mutex<()>,
+
+ // Wakeup the next waiter
+ condvar: Condvar,
+}
+
+/// Handle to a user space `Poll` registration.
+///
+/// `Registration` allows implementing [`Evented`] for types that cannot work
+/// with the [system selector]. A `Registration` is always paired with a
+/// `SetReadiness`, which allows updating the registration's readiness state.
+/// When [`set_readiness`] is called and the `Registration` is associated with a
+/// [`Poll`] instance, a readiness event will be created and eventually returned
+/// by [`poll`].
+///
+/// A `Registration` / `SetReadiness` pair is created by calling
+/// [`Registration::new2`]. At this point, the registration is not being
+/// monitored by a [`Poll`] instance, so calls to `set_readiness` will not
+/// result in any readiness notifications.
+///
+/// `Registration` implements [`Evented`], so it can be used with [`Poll`] using
+/// the same [`register`], [`reregister`], and [`deregister`] functions used
+/// with TCP, UDP, etc... types. Once registered with [`Poll`], readiness state
+/// changes result in readiness events being dispatched to the [`Poll`] instance
+/// with which `Registration` is registered.
+///
+/// **Note**, before using `Registration` be sure to read the
+/// [`set_readiness`] documentation and the [portability] notes. The
+/// guarantees offered by `Registration` may be weaker than expected.
+///
+/// For high level documentation, see [`Poll`].
+///
+/// # Examples
+///
+/// ```
+/// use mio::{Ready, Registration, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+///
+/// use std::io;
+/// use std::time::Instant;
+/// use std::thread;
+///
+/// pub struct Deadline {
+/// when: Instant,
+/// registration: Registration,
+/// }
+///
+/// impl Deadline {
+/// pub fn new(when: Instant) -> Deadline {
+/// let (registration, set_readiness) = Registration::new2();
+///
+/// thread::spawn(move || {
+/// let now = Instant::now();
+///
+/// if now < when {
+/// thread::sleep(when - now);
+/// }
+///
+/// set_readiness.set_readiness(Ready::readable());
+/// });
+///
+/// Deadline {
+/// when: when,
+/// registration: registration,
+/// }
+/// }
+///
+/// pub fn is_elapsed(&self) -> bool {
+/// Instant::now() >= self.when
+/// }
+/// }
+///
+/// impl Evented for Deadline {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// self.registration.reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// poll.deregister(&self.registration)
+/// }
+/// }
+/// ```
+///
+/// [system selector]: struct.Poll.html#implementation-notes
+/// [`Poll`]: struct.Poll.html
+/// [`Registration::new2`]: struct.Registration.html#method.new2
+/// [`Evented`]: event/trait.Evented.html
+/// [`set_readiness`]: struct.SetReadiness.html#method.set_readiness
+/// [`register`]: struct.Poll.html#method.register
+/// [`reregister`]: struct.Poll.html#method.reregister
+/// [`deregister`]: struct.Poll.html#method.deregister
+/// [portability]: struct.Poll.html#portability
+pub struct Registration {
+ inner: RegistrationInner,
+}
+
+unsafe impl Send for Registration {}
+unsafe impl Sync for Registration {}
+
+/// Updates the readiness state of the associated `Registration`.
+///
+/// See [`Registration`] for more documentation on using `SetReadiness` and
+/// [`Poll`] for high level polling documentation.
+///
+/// [`Poll`]: struct.Poll.html
+/// [`Registration`]: struct.Registration.html
+#[derive(Clone)]
+pub struct SetReadiness {
+ inner: RegistrationInner,
+}
+
+unsafe impl Send for SetReadiness {}
+unsafe impl Sync for SetReadiness {}
+
+/// Used to associate an IO type with a Selector
+#[derive(Debug)]
+pub struct SelectorId {
+ id: AtomicUsize,
+}
+
+struct RegistrationInner {
+ // Unsafe pointer to the registration's node. The node is ref counted. This
+ // cannot "simply" be tracked by an Arc because `Poll::poll` has an implicit
+ // handle though it isn't stored anywhere. In other words, `Poll::poll`
+ // needs to decrement the ref count before the node is freed.
+ node: *mut ReadinessNode,
+}
+
+#[derive(Clone)]
+struct ReadinessQueue {
+ inner: Arc<ReadinessQueueInner>,
+}
+
+unsafe impl Send for ReadinessQueue {}
+unsafe impl Sync for ReadinessQueue {}
+
+struct ReadinessQueueInner {
+ // Used to wake up `Poll` when readiness is set in another thread.
+ awakener: sys::Awakener,
+
+ // Head of the MPSC queue used to signal readiness to `Poll::poll`.
+ head_readiness: AtomicPtr<ReadinessNode>,
+
+ // Tail of the readiness queue.
+ //
+ // Only accessed by Poll::poll. Coordination will be handled by the poll fn
+ tail_readiness: UnsafeCell<*mut ReadinessNode>,
+
+ // Fake readiness node used to punctuate the end of the readiness queue.
+ // Before attempting to read from the queue, this node is inserted in order
+ // to partition the queue between nodes that are "owned" by the dequeue end
+ // and nodes that will be pushed on by producers.
+ end_marker: Box<ReadinessNode>,
+
+ // Similar to `end_marker`, but this node signals to producers that `Poll`
+ // has gone to sleep and must be woken up.
+ sleep_marker: Box<ReadinessNode>,
+
+ // Similar to `end_marker`, but the node signals that the queue is closed.
+ // This happens when `ReadyQueue` is dropped and signals to producers that
+ // the nodes should no longer be pushed into the queue.
+ closed_marker: Box<ReadinessNode>,
+}
+
+/// Node shared by a `Registration` / `SetReadiness` pair as well as the node
+/// queued into the MPSC channel.
+struct ReadinessNode {
+ // Node state, see struct docs for `ReadinessState`
+ //
+ // This variable is the primary point of coordination between all the
+ // various threads concurrently accessing the node.
+ state: AtomicState,
+
+ // The registration token cannot fit into the `state` variable, so it is
+ // broken out here. In order to atomically update both the state and token
+ // we have to jump through a few hoops.
+ //
+ // First, `state` includes `token_read_pos` and `token_write_pos`. These can
+ // either be 0, 1, or 2 which represent a token slot. `token_write_pos` is
+ // the token slot that contains the most up to date registration token.
+ // `token_read_pos` is the token slot that `poll` is currently reading from.
+ //
+ // When a call to `update` includes a different token than the one currently
+ // associated with the registration (token_write_pos), first an unused token
+ // slot is found. The unused slot is the one not represented by
+ // `token_read_pos` OR `token_write_pos`. The new token is written to this
+ // slot, then `state` is updated with the new `token_write_pos` value. This
+ // requires that there is only a *single* concurrent call to `update`.
+ //
+ // When `poll` reads a node state, it checks that `token_read_pos` matches
+ // `token_write_pos`. If they do not match, then it atomically updates
+ // `state` such that `token_read_pos` is set to `token_write_pos`. It will
+ // then read the token at the newly updated `token_read_pos`.
+ token_0: UnsafeCell<Token>,
+ token_1: UnsafeCell<Token>,
+ token_2: UnsafeCell<Token>,
+
+ // Used when the node is queued in the readiness linked list. Accessing
+ // this field requires winning the "queue" lock
+ next_readiness: AtomicPtr<ReadinessNode>,
+
+ // Ensures that there is only one concurrent call to `update`.
+ //
+ // Each call to `update` will attempt to swap `update_lock` from `false` to
+ // `true`. If the CAS succeeds, the thread has obtained the update lock. If
+ // the CAS fails, then the `update` call returns immediately and the update
+ // is discarded.
+ update_lock: AtomicBool,
+
+ // Pointer to Arc<ReadinessQueueInner>
+ readiness_queue: AtomicPtr<()>,
+
+ // Tracks the number of `ReadyRef` pointers
+ ref_count: AtomicUsize,
+}
+
+/// Stores the ReadinessNode state in an AtomicUsize. This wrapper around the
+/// atomic variable handles encoding / decoding `ReadinessState` values.
+struct AtomicState {
+ inner: AtomicUsize,
+}
+
+const MASK_2: usize = 4 - 1;
+const MASK_4: usize = 16 - 1;
+const QUEUED_MASK: usize = 1 << QUEUED_SHIFT;
+const DROPPED_MASK: usize = 1 << DROPPED_SHIFT;
+
+const READINESS_SHIFT: usize = 0;
+const INTEREST_SHIFT: usize = 4;
+const POLL_OPT_SHIFT: usize = 8;
+const TOKEN_RD_SHIFT: usize = 12;
+const TOKEN_WR_SHIFT: usize = 14;
+const QUEUED_SHIFT: usize = 16;
+const DROPPED_SHIFT: usize = 17;
+
+/// Tracks all state for a single `ReadinessNode`. The state is packed into a
+/// `usize` variable from low to high bit as follows:
+///
+/// 4 bits: Registration current readiness
+/// 4 bits: Registration interest
+/// 4 bits: Poll options
+/// 2 bits: Token position currently being read from by `poll`
+/// 2 bits: Token position last written to by `update`
+/// 1 bit: Queued flag, set when node is being pushed into MPSC queue.
+/// 1 bit: Dropped flag, set when all `Registration` handles have been dropped.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+struct ReadinessState(usize);
+
+/// Returned by `dequeue_node`. Represents the different states as described by
+/// the queue documentation on 1024cores.net.
+enum Dequeue {
+ Data(*mut ReadinessNode),
+ Empty,
+ Inconsistent,
+}
+
+const AWAKEN: Token = Token(usize::MAX);
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
+/*
+ *
+ * ===== Poll =====
+ *
+ */
+
+impl Poll {
+ /// Return a new `Poll` handle.
+ ///
+ /// This function will make a syscall to the operating system to create the
+ /// system selector. If this syscall fails, `Poll::new` will return with the
+ /// error.
+ ///
+ /// See [struct] level docs for more details.
+ ///
+ /// [struct]: struct.Poll.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Poll, Events};
+ /// use std::time::Duration;
+ ///
+ /// let poll = match Poll::new() {
+ /// Ok(poll) => poll,
+ /// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
+ /// };
+ ///
+ /// // Create a structure to receive polled events
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Wait for events, but none will be received because no `Evented`
+ /// // handles have been registered with this `Poll` instance.
+ /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?;
+ /// assert_eq!(n, 0);
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn new() -> io::Result<Poll> {
+ is_send::<Poll>();
+ is_sync::<Poll>();
+
+ let poll = Poll {
+ selector: sys::Selector::new()?,
+ readiness_queue: ReadinessQueue::new()?,
+ lock_state: AtomicUsize::new(0),
+ lock: Mutex::new(()),
+ condvar: Condvar::new(),
+ };
+
+ // Register the notification wakeup FD with the IO poller
+ poll.readiness_queue.inner.awakener.register(&poll, AWAKEN, Ready::readable(), PollOpt::edge())?;
+
+ Ok(poll)
+ }
+
+ /// Register an `Evented` handle with the `Poll` instance.
+ ///
+ /// Once registered, the `Poll` instance will monitor the `Evented` handle
+ /// for readiness state changes. When it notices a state change, it will
+ /// return a readiness event for the handle the next time [`poll`] is
+ /// called.
+ ///
+ /// See the [`struct`] docs for a high level overview.
+ ///
+ /// # Arguments
+ ///
+ /// `handle: &E: Evented`: This is the handle that the `Poll` instance
+ /// should monitor for readiness state changes.
+ ///
+ /// `token: Token`: The caller picks a token to associate with the socket.
+ /// When [`poll`] returns an event for the handle, this token is included.
+ /// This allows the caller to map the event to its handle. The token
+ /// associated with the `Evented` handle can be changed at any time by
+ /// calling [`reregister`].
+ ///
+ /// `token` cannot be `Token(usize::MAX)` as it is reserved for internal
+ /// usage.
+ ///
+ /// See documentation on [`Token`] for an example showing how to pick
+ /// [`Token`] values.
+ ///
+ /// `interest: Ready`: Specifies which operations `Poll` should monitor for
+ /// readiness. `Poll` will only return readiness events for operations
+ /// specified by this argument.
+ ///
+ /// If a socket is registered with readable interest and the socket becomes
+ /// writable, no event will be returned from [`poll`].
+ ///
+ /// The readiness interest for an `Evented` handle can be changed at any
+ /// time by calling [`reregister`].
+ ///
+ /// `opts: PollOpt`: Specifies the registration options. The most common
+ /// options being [`level`] for level-triggered events, [`edge`] for
+ /// edge-triggered events, and [`oneshot`].
+ ///
+ /// The registration options for an `Evented` handle can be changed at any
+ /// time by calling [`reregister`].
+ ///
+ /// # Notes
+ ///
+ /// Unless otherwise specified, the caller should assume that once an
+ /// `Evented` handle is registered with a `Poll` instance, it is bound to
+ /// that `Poll` instance for the lifetime of the `Evented` handle. This
+ /// remains true even if the `Evented` handle is deregistered from the poll
+ /// instance using [`deregister`].
+ ///
+ /// This function is **thread safe**. It can be called concurrently from
+ /// multiple threads.
+ ///
+ /// [`struct`]: #
+ /// [`reregister`]: #method.reregister
+ /// [`deregister`]: #method.deregister
+ /// [`poll`]: #method.poll
+ /// [`level`]: struct.PollOpt.html#method.level
+ /// [`edge`]: struct.PollOpt.html#method.edge
+ /// [`oneshot`]: struct.PollOpt.html#method.oneshot
+ /// [`Token`]: struct.Token.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ /// use std::time::{Duration, Instant};
+ ///
+ /// let poll = Poll::new()?;
+ /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.register(&socket, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let start = Instant::now();
+ /// let timeout = Duration::from_millis(500);
+ ///
+ /// loop {
+ /// let elapsed = start.elapsed();
+ ///
+ /// if elapsed >= timeout {
+ /// // Connection timed out
+ /// return Ok(());
+ /// }
+ ///
+ /// let remaining = timeout - elapsed;
+ /// poll.poll(&mut events, Some(remaining))?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) {
+ /// // Something (probably) happened on the socket.
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn register<E: ?Sized>(&self, handle: &E, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ validate_args(token)?;
+
+ /*
+ * Undefined behavior:
+ * - Reusing a token with a different `Evented` without deregistering
+ * (or closing) the original `Evented`.
+ */
+ trace!("registering with poller");
+
+ // Register interests for this socket
+ handle.register(self, token, interest, opts)?;
+
+ Ok(())
+ }
+
+ /// Re-register an `Evented` handle with the `Poll` instance.
+ ///
+ /// Re-registering an `Evented` handle allows changing the details of the
+ /// registration. Specifically, it allows updating the associated `token`,
+ /// `interest`, and `opts` specified in previous `register` and `reregister`
+ /// calls.
+ ///
+ /// The `reregister` arguments fully override the previous values. In other
+ /// words, if a socket is registered with [`readable`] interest and the call
+ /// to `reregister` specifies [`writable`], then read interest is no longer
+ /// requested for the handle.
+ ///
+ /// The `Evented` handle must have previously been registered with this
+ /// instance of `Poll` otherwise the call to `reregister` will return with
+ /// an error.
+ ///
+ /// `token` cannot be `Token(usize::MAX)` as it is reserved for internal
+ /// usage.
+ ///
+ /// See the [`register`] documentation for details about the function
+ /// arguments and see the [`struct`] docs for a high level overview of
+ /// polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ ///
+ /// let poll = Poll::new()?;
+ /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+ ///
+ /// // Register the socket with `poll`, requesting readable
+ /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge())?;
+ ///
+ /// // Reregister the socket specifying a different token and write interest
+ /// // instead. `PollOpt::edge()` must be specified even though that value
+ /// // is not being changed.
+ /// poll.reregister(&socket, Token(2), Ready::writable(), PollOpt::edge())?;
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// [`struct`]: #
+ /// [`register`]: #method.register
+ /// [`readable`]: struct.Ready.html#method.readable
+ /// [`writable`]: struct.Ready.html#method.writable
+ pub fn reregister<E: ?Sized>(&self, handle: &E, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>
+ where E: Evented
+ {
+ validate_args(token)?;
+
+ trace!("registering with poller");
+
+ // Register interests for this socket
+ handle.reregister(self, token, interest, opts)?;
+
+ Ok(())
+ }
+
+ /// Deregister an `Evented` handle with the `Poll` instance.
+ ///
+ /// When an `Evented` handle is deregistered, the `Poll` instance will
+ /// no longer monitor it for readiness state changes. Unlike disabling
+ /// handles with oneshot, deregistering clears up any internal resources
+ /// needed to track the handle.
+ ///
+ /// A handle can be passed back to `register` after it has been
+ /// deregistered; however, it must be passed back to the **same** `Poll`
+ /// instance.
+ ///
+ /// `Evented` handles are automatically deregistered when they are dropped.
+ /// It is common to never need to explicitly call `deregister`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ /// use std::time::Duration;
+ ///
+ /// let poll = Poll::new()?;
+ /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge())?;
+ ///
+ /// poll.deregister(&socket)?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Set a timeout because this poll should never receive any events.
+ /// let n = poll.poll(&mut events, Some(Duration::from_secs(1)))?;
+ /// assert_eq!(0, n);
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn deregister<E: ?Sized>(&self, handle: &E) -> io::Result<()>
+ where E: Evented
+ {
+ trace!("deregistering handle with poller");
+
+ // Deregister interests for this socket
+ handle.deregister(self)?;
+
+ Ok(())
+ }
+
+ /// Wait for readiness events
+ ///
+ /// Blocks the current thread and waits for readiness events for any of the
+ /// `Evented` handles that have been registered with this `Poll` instance.
+ /// The function will block until either at least one readiness event has
+ /// been received or `timeout` has elapsed. A `timeout` of `None` means that
+ /// `poll` will block until a readiness event has been received.
+ ///
+ /// The supplied `events` will be cleared and newly received readiness events
+ /// will be pushed onto the end. At most `events.capacity()` events will be
+ /// returned. If there are further pending readiness events, they will be
+ /// returned on the next call to `poll`.
+ ///
+ /// A single call to `poll` may result in multiple readiness events being
+ /// returned for a single `Evented` handle. For example, if a TCP socket
+ /// becomes both readable and writable, it may be possible for a single
+ /// readiness event to be returned with both [`readable`] and [`writable`]
+ /// readiness **OR** two separate events may be returned, one with
+ /// [`readable`] set and one with [`writable`] set.
+ ///
+ /// Note that the `timeout` will be rounded up to the system clock
+ /// granularity (usually 1ms), and kernel scheduling delays mean that
+ /// the blocking interval may be overrun by a small amount.
+ ///
+ /// `poll` returns the number of readiness events that have been pushed into
+ /// `events` or `Err` when an error has been encountered with the system
+ /// selector. The value returned is deprecated and will be removed in 0.7.0.
+ /// Accessing the events by index is also deprecated. Events can be
+ /// inserted by other events triggering, thus making sequential access
+ /// problematic. Use the iterator API instead. See [`iter`].
+ ///
+ /// See the [struct] level documentation for a higher level discussion of
+ /// polling.
+ ///
+ /// [`readable`]: struct.Ready.html#method.readable
+ /// [`writable`]: struct.Ready.html#method.writable
+ /// [struct]: #
+ /// [`iter`]: struct.Events.html#method.iter
+ ///
+ /// # Examples
+ ///
+ /// A basic example -- establishing a `TcpStream` connection.
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll, Ready, PollOpt, Token};
+ /// use mio::net::TcpStream;
+ ///
+ /// use std::net::{TcpListener, SocketAddr};
+ /// use std::thread;
+ ///
+ /// // Bind a server socket to connect to.
+ /// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let server = TcpListener::bind(&addr)?;
+ /// let addr = server.local_addr()?.clone();
+ ///
+ /// // Spawn a thread to accept the socket
+ /// thread::spawn(move || {
+ /// let _ = server.accept();
+ /// });
+ ///
+ /// // Construct a new `Poll` handle as well as the `Events` we'll store into
+ /// let poll = Poll::new()?;
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Connect the stream
+ /// let stream = TcpStream::connect(&addr)?;
+ ///
+ /// // Register the stream with `Poll`
+ /// poll.register(&stream, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+ ///
+ /// // Wait for the socket to become ready. This has to happens in a loop to
+ /// // handle spurious wakeups.
+ /// loop {
+ /// poll.poll(&mut events, None)?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) && event.readiness().is_writable() {
+ /// // The socket connected (probably, it could still be a spurious
+ /// // wakeup)
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// [struct]: #
+ pub fn poll(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<usize> {
+ self.poll1(events, timeout, false)
+ }
+
+ /// Like `poll`, but may be interrupted by a signal
+ ///
+ /// If `poll` is inturrupted while blocking, it will transparently retry the syscall. If you
+ /// want to handle signals yourself, however, use `poll_interruptible`.
+ pub fn poll_interruptible(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<usize> {
+ self.poll1(events, timeout, true)
+ }
+
+ fn poll1(&self, events: &mut Events, mut timeout: Option<Duration>, interruptible: bool) -> io::Result<usize> {
+ let zero = Some(Duration::from_millis(0));
+
+ // At a high level, the synchronization strategy is to acquire access to
+ // the critical section by transitioning the atomic from unlocked ->
+ // locked. If the attempt fails, the thread will wait on the condition
+ // variable.
+ //
+ // # Some more detail
+ //
+ // The `lock_state` atomic usize combines:
+ //
+ // - locked flag, stored in the least significant bit
+ // - number of waiting threads, stored in the rest of the bits.
+ //
+ // When a thread transitions the locked flag from 0 -> 1, it has
+ // obtained access to the critical section.
+ //
+ // When entering `poll`, a compare-and-swap from 0 -> 1 is attempted.
+ // This is a fast path for the case when there are no concurrent calls
+ // to poll, which is very common.
+ //
+ // On failure, the mutex is locked, and the thread attempts to increment
+ // the number of waiting threads component of `lock_state`. If this is
+ // successfully done while the locked flag is set, then the thread can
+ // wait on the condition variable.
+ //
+ // When a thread exits the critical section, it unsets the locked flag.
+ // If there are any waiters, which is atomically determined while
+ // unsetting the locked flag, then the condvar is notified.
+
+ let mut curr = self.lock_state.compare_and_swap(0, 1, SeqCst);
+
+ if 0 != curr {
+ // Enter slower path
+ let mut lock = self.lock.lock().unwrap();
+ let mut inc = false;
+
+ loop {
+ if curr & 1 == 0 {
+ // The lock is currently free, attempt to grab it
+ let mut next = curr | 1;
+
+ if inc {
+ // The waiter count has previously been incremented, so
+ // decrement it here
+ next -= 2;
+ }
+
+ let actual = self.lock_state.compare_and_swap(curr, next, SeqCst);
+
+ if actual != curr {
+ curr = actual;
+ continue;
+ }
+
+ // Lock acquired, break from the loop
+ break;
+ }
+
+ if timeout == zero {
+ if inc {
+ self.lock_state.fetch_sub(2, SeqCst);
+ }
+
+ return Ok(0);
+ }
+
+ // The lock is currently held, so wait for it to become
+ // free. If the waiter count hasn't been incremented yet, do
+ // so now
+ if !inc {
+ let next = curr.checked_add(2).expect("overflow");
+ let actual = self.lock_state.compare_and_swap(curr, next, SeqCst);
+
+ if actual != curr {
+ curr = actual;
+ continue;
+ }
+
+ // Track that the waiter count has been incremented for
+ // this thread and fall through to the condvar waiting
+ inc = true;
+ }
+
+ lock = match timeout {
+ Some(to) => {
+ let now = Instant::now();
+
+ // Wait to be notified
+ let (l, _) = self.condvar.wait_timeout(lock, to).unwrap();
+
+ // See how much time was elapsed in the wait
+ let elapsed = now.elapsed();
+
+ // Update `timeout` to reflect how much time is left to
+ // wait.
+ if elapsed >= to {
+ timeout = zero;
+ } else {
+ // Update the timeout
+ timeout = Some(to - elapsed);
+ }
+
+ l
+ }
+ None => {
+ self.condvar.wait(lock).unwrap()
+ }
+ };
+
+ // Reload the state
+ curr = self.lock_state.load(SeqCst);
+
+ // Try to lock again...
+ }
+ }
+
+ let ret = self.poll2(events, timeout, interruptible);
+
+ // Release the lock
+ if 1 != self.lock_state.fetch_and(!1, Release) {
+ // Acquire the mutex
+ let _lock = self.lock.lock().unwrap();
+
+ // There is at least one waiting thread, so notify one
+ self.condvar.notify_one();
+ }
+
+ ret
+ }
+
+ #[inline]
+ #[cfg_attr(feature = "cargo-clippy", allow(clippy::if_same_then_else))]
+ fn poll2(&self, events: &mut Events, mut timeout: Option<Duration>, interruptible: bool) -> io::Result<usize> {
+ // Compute the timeout value passed to the system selector. If the
+ // readiness queue has pending nodes, we still want to poll the system
+ // selector for new events, but we don't want to block the thread to
+ // wait for new events.
+ if timeout == Some(Duration::from_millis(0)) {
+ // If blocking is not requested, then there is no need to prepare
+ // the queue for sleep
+ //
+ // The sleep_marker should be removed by readiness_queue.poll().
+ } else if self.readiness_queue.prepare_for_sleep() {
+ // The readiness queue is empty. The call to `prepare_for_sleep`
+ // inserts `sleep_marker` into the queue. This signals to any
+ // threads setting readiness that the `Poll::poll` is going to
+ // sleep, so the awakener should be used.
+ } else {
+ // The readiness queue is not empty, so do not block the thread.
+ timeout = Some(Duration::from_millis(0));
+ }
+
+ loop {
+ let now = Instant::now();
+ // First get selector events
+ let res = self.selector.select(&mut events.inner, AWAKEN, timeout);
+ match res {
+ Ok(true) => {
+ // Some awakeners require reading from a FD.
+ self.readiness_queue.inner.awakener.cleanup();
+ break;
+ }
+ Ok(false) => break,
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted && !interruptible => {
+ // Interrupted by a signal; update timeout if necessary and retry
+ if let Some(to) = timeout {
+ let elapsed = now.elapsed();
+ if elapsed >= to {
+ break;
+ } else {
+ timeout = Some(to - elapsed);
+ }
+ }
+ }
+ Err(e) => return Err(e),
+ }
+ }
+
+ // Poll custom event queue
+ self.readiness_queue.poll(&mut events.inner);
+
+ // Return number of polled events
+ Ok(events.inner.len())
+ }
+}
+
+fn validate_args(token: Token) -> io::Result<()> {
+ if token == AWAKEN {
+ return Err(io::Error::new(io::ErrorKind::Other, "invalid token"));
+ }
+
+ Ok(())
+}
+
+impl fmt::Debug for Poll {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Poll")
+ .finish()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for Poll {
+ fn as_raw_fd(&self) -> RawFd {
+ self.selector.as_raw_fd()
+ }
+}
+
+/// A collection of readiness events.
+///
+/// `Events` is passed as an argument to [`Poll::poll`] and will be used to
+/// receive any new readiness events received since the last poll. Usually, a
+/// single `Events` instance is created at the same time as a [`Poll`] and
+/// reused on each call to [`Poll::poll`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let poll = Poll::new()?;
+///
+/// assert_eq!(0, events.len());
+///
+/// // Register `Evented` handles with `poll`
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in &events {
+/// println!("event={:?}", event);
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+/// [`Poll`]: struct.Poll.html
+pub struct Events {
+ inner: sys::Events,
+}
+
+/// [`Events`] iterator.
+///
+/// This struct is created by the [`iter`] method on [`Events`].
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let poll = Poll::new()?;
+///
+/// // Register handles with `poll`
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events.iter() {
+/// println!("event={:?}", event);
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Events`]: struct.Events.html
+/// [`iter`]: struct.Events.html#method.iter
+#[derive(Debug, Clone)]
+pub struct Iter<'a> {
+ inner: &'a Events,
+ pos: usize,
+}
+
+/// Owned [`Events`] iterator.
+///
+/// This struct is created by the `into_iter` method on [`Events`].
+///
+/// # Examples
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let poll = Poll::new()?;
+///
+/// // Register handles with `poll`
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events {
+/// println!("event={:?}", event);
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+/// [`Events`]: struct.Events.html
+#[derive(Debug)]
+pub struct IntoIter {
+ inner: Events,
+ pos: usize,
+}
+
+impl Events {
+ /// Return a new `Events` capable of holding up to `capacity` events.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ ///
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn with_capacity(capacity: usize) -> Events {
+ Events {
+ inner: sys::Events::with_capacity(capacity),
+ }
+ }
+
+ #[deprecated(since="0.6.10", note="Index access removed in favor of iterator only API.")]
+ #[doc(hidden)]
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.inner.get(idx)
+ }
+
+ #[doc(hidden)]
+ #[deprecated(since="0.6.10", note="Index access removed in favor of iterator only API.")]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ /// Returns the number of `Event` values that `self` can hold.
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ ///
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ /// Returns `true` if `self` contains no `Event` values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ ///
+ /// assert!(events.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+
+ /// Returns an iterator over the `Event` values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`
+ ///
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// for event in events.iter() {
+ /// println!("event={:?}", event);
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn iter(&self) -> Iter {
+ Iter {
+ inner: self,
+ pos: 0
+ }
+ }
+
+ /// Clearing all `Event` values from container explicitly.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`
+ /// for _ in 0..2 {
+ /// events.clear();
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// for event in events.iter() {
+ /// println!("event={:?}", event);
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn clear(&mut self) {
+ self.inner.clear();
+ }
+}
+
+impl<'a> IntoIterator for &'a Events {
+ type Item = Event;
+ type IntoIter = Iter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = Event;
+
+ fn next(&mut self) -> Option<Event> {
+ let ret = self.inner.inner.get(self.pos);
+ self.pos += 1;
+ ret
+ }
+}
+
+impl IntoIterator for Events {
+ type Item = Event;
+ type IntoIter = IntoIter;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter {
+ inner: self,
+ pos: 0,
+ }
+ }
+}
+
+impl Iterator for IntoIter {
+ type Item = Event;
+
+ fn next(&mut self) -> Option<Event> {
+ let ret = self.inner.inner.get(self.pos);
+ self.pos += 1;
+ ret
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Events")
+ .field("capacity", &self.capacity())
+ .finish()
+ }
+}
+
+// ===== Accessors for internal usage =====
+
+pub fn selector(poll: &Poll) -> &sys::Selector {
+ &poll.selector
+}
+
+/*
+ *
+ * ===== Registration =====
+ *
+ */
+
+// TODO: get rid of this, windows depends on it for now
+#[allow(dead_code)]
+pub fn new_registration(poll: &Poll, token: Token, ready: Ready, opt: PollOpt)
+ -> (Registration, SetReadiness)
+{
+ Registration::new_priv(poll, token, ready, opt)
+}
+
+impl Registration {
+ /// Create and return a new `Registration` and the associated
+ /// `SetReadiness`.
+ ///
+ /// See [struct] documentation for more detail and [`Poll`]
+ /// for high level documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Ready, Registration, Poll, PollOpt, Token};
+ /// use std::thread;
+ ///
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// thread::spawn(move || {
+ /// use std::time::Duration;
+ /// thread::sleep(Duration::from_millis(500));
+ ///
+ /// set_readiness.set_readiness(Ready::readable());
+ /// });
+ ///
+ /// let poll = Poll::new()?;
+ /// poll.register(&registration, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?;
+ ///
+ /// let mut events = Events::with_capacity(256);
+ ///
+ /// loop {
+ /// poll.poll(&mut events, None);
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) && event.readiness().is_readable() {
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ /// [struct]: #
+ /// [`Poll`]: struct.Poll.html
+ pub fn new2() -> (Registration, SetReadiness) {
+ // Allocate the registration node. The new node will have `ref_count`
+ // set to 2: one SetReadiness, one Registration.
+ let node = Box::into_raw(Box::new(ReadinessNode::new(
+ ptr::null_mut(), Token(0), Ready::empty(), PollOpt::empty(), 2)));
+
+ let registration = Registration {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ let set_readiness = SetReadiness {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ (registration, set_readiness)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use `new2` instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn new(poll: &Poll, token: Token, interest: Ready, opt: PollOpt)
+ -> (Registration, SetReadiness)
+ {
+ Registration::new_priv(poll, token, interest, opt)
+ }
+
+ // TODO: Get rid of this (windows depends on it for now)
+ fn new_priv(poll: &Poll, token: Token, interest: Ready, opt: PollOpt)
+ -> (Registration, SetReadiness)
+ {
+ is_send::<Registration>();
+ is_sync::<Registration>();
+ is_send::<SetReadiness>();
+ is_sync::<SetReadiness>();
+
+ // Clone handle to the readiness queue, this bumps the ref count
+ let queue = poll.readiness_queue.inner.clone();
+
+ // Convert to a *mut () pointer
+ let queue: *mut () = unsafe { mem::transmute(queue) };
+
+ // Allocate the registration node. The new node will have `ref_count`
+ // set to 3: one SetReadiness, one Registration, and one Poll handle.
+ let node = Box::into_raw(Box::new(ReadinessNode::new(
+ queue, token, interest, opt, 3)));
+
+ let registration = Registration {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ let set_readiness = SetReadiness {
+ inner: RegistrationInner {
+ node,
+ },
+ };
+
+ (registration, set_readiness)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use `Evented` impl")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn update(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.inner.update(poll, token, interest, opts)
+ }
+
+ #[deprecated(since = "0.6.5", note = "use `Poll::deregister` instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner.update(poll, Token(0), Ready::empty(), PollOpt::empty())
+ }
+}
+
+impl Evented for Registration {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.inner.update(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.inner.update(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner.update(poll, Token(0), Ready::empty(), PollOpt::empty())
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ // `flag_as_dropped` toggles the `dropped` flag and notifies
+ // `Poll::poll` to release its handle (which is just decrementing
+ // the ref count).
+ if self.inner.state.flag_as_dropped() {
+ // Can't do anything if the queuing fails
+ let _ = self.inner.enqueue_with_wakeup();
+ }
+ }
+}
+
+impl fmt::Debug for Registration {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Registration")
+ .finish()
+ }
+}
+
+impl SetReadiness {
+ /// Returns the registration's current readiness.
+ ///
+ /// # Note
+ ///
+ /// There is no guarantee that `readiness` establishes any sort of memory
+ /// ordering. Any concurrent data access must be synchronized using another
+ /// strategy.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Registration, Ready};
+ ///
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// assert!(set_readiness.readiness().is_empty());
+ ///
+ /// set_readiness.set_readiness(Ready::readable())?;
+ /// assert!(set_readiness.readiness().is_readable());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ pub fn readiness(&self) -> Ready {
+ self.inner.readiness()
+ }
+
+ /// Set the registration's readiness
+ ///
+ /// If the associated `Registration` is registered with a [`Poll`] instance
+ /// and has requested readiness events that include `ready`, then a future
+ /// call to [`Poll::poll`] will receive a readiness event representing the
+ /// readiness state change.
+ ///
+ /// # Note
+ ///
+ /// There is no guarantee that `readiness` establishes any sort of memory
+ /// ordering. Any concurrent data access must be synchronized using another
+ /// strategy.
+ ///
+ /// There is also no guarantee as to when the readiness event will be
+ /// delivered to poll. A best attempt will be made to make the delivery in a
+ /// "timely" fashion. For example, the following is **not** guaranteed to
+ /// work:
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Events, Registration, Ready, Poll, PollOpt, Token};
+ ///
+ /// let poll = Poll::new()?;
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// poll.register(&registration,
+ /// Token(0),
+ /// Ready::readable(),
+ /// PollOpt::edge())?;
+ ///
+ /// // Set the readiness, then immediately poll to try to get the readiness
+ /// // event
+ /// set_readiness.set_readiness(Ready::readable())?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// poll.poll(&mut events, None)?;
+ ///
+ /// // There is NO guarantee that the following will work. It is possible
+ /// // that the readiness event will be delivered at a later time.
+ /// let event = events.get(0).unwrap();
+ /// assert_eq!(event.token(), Token(0));
+ /// assert!(event.readiness().is_readable());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// # Examples
+ ///
+ /// A simple example, for a more elaborate example, see the [`Evented`]
+ /// documentation.
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn try_main() -> Result<(), Box<Error>> {
+ /// use mio::{Registration, Ready};
+ ///
+ /// let (registration, set_readiness) = Registration::new2();
+ ///
+ /// assert!(set_readiness.readiness().is_empty());
+ ///
+ /// set_readiness.set_readiness(Ready::readable())?;
+ /// assert!(set_readiness.readiness().is_readable());
+ /// # Ok(())
+ /// # }
+ /// #
+ /// # fn main() {
+ /// # try_main().unwrap();
+ /// # }
+ /// ```
+ ///
+ /// [`Registration`]: struct.Registration.html
+ /// [`Evented`]: event/trait.Evented.html#examples
+ /// [`Poll`]: struct.Poll.html
+ /// [`Poll::poll`]: struct.Poll.html#method.poll
+ pub fn set_readiness(&self, ready: Ready) -> io::Result<()> {
+ self.inner.set_readiness(ready)
+ }
+}
+
+impl fmt::Debug for SetReadiness {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("SetReadiness")
+ .finish()
+ }
+}
+
+impl RegistrationInner {
+ /// Get the registration's readiness.
+ fn readiness(&self) -> Ready {
+ self.state.load(Relaxed).readiness()
+ }
+
+ /// Set the registration's readiness.
+ ///
+ /// This function can be called concurrently by an arbitrary number of
+ /// SetReadiness handles.
+ fn set_readiness(&self, ready: Ready) -> io::Result<()> {
+ // Load the current atomic state.
+ let mut state = self.state.load(Acquire);
+ let mut next;
+
+ loop {
+ next = state;
+
+ if state.is_dropped() {
+ // Node is dropped, no more notifications
+ return Ok(());
+ }
+
+ // Update the readiness
+ next.set_readiness(ready);
+
+ // If the readiness is not blank, try to obtain permission to
+ // push the node into the readiness queue.
+ if !next.effective_readiness().is_empty() {
+ next.set_queued();
+ }
+
+ let actual = self.state.compare_and_swap(state, next, AcqRel);
+
+ if state == actual {
+ break;
+ }
+
+ state = actual;
+ }
+
+ if !state.is_queued() && next.is_queued() {
+ // We toggled the queued flag, making us responsible for queuing the
+ // node in the MPSC readiness queue.
+ self.enqueue_with_wakeup()?;
+ }
+
+ Ok(())
+ }
+
+ /// Update the registration details associated with the node
+ fn update(&self, poll: &Poll, token: Token, interest: Ready, opt: PollOpt) -> io::Result<()> {
+ // First, ensure poll instances match
+ //
+ // Load the queue pointer, `Relaxed` is sufficient here as only the
+ // pointer is being operated on. The actual memory is guaranteed to be
+ // visible the `poll: &Poll` ref passed as an argument to the function.
+ let mut queue = self.readiness_queue.load(Relaxed);
+ let other: &*mut () = unsafe {
+ &*(&poll.readiness_queue.inner as *const _ as *const *mut ())
+ };
+ let other = *other;
+
+ debug_assert!(mem::size_of::<Arc<ReadinessQueueInner>>() == mem::size_of::<*mut ()>());
+
+ if queue.is_null() {
+ // Attempt to set the queue pointer. `Release` ordering synchronizes
+ // with `Acquire` in `ensure_with_wakeup`.
+ let actual = self.readiness_queue.compare_and_swap(
+ queue, other, Release);
+
+ if actual.is_null() {
+ // The CAS succeeded, this means that the node's ref count
+ // should be incremented to reflect that the `poll` function
+ // effectively owns the node as well.
+ //
+ // `Relaxed` ordering used for the same reason as in
+ // RegistrationInner::clone
+ self.ref_count.fetch_add(1, Relaxed);
+
+ // Note that the `queue` reference stored in our
+ // `readiness_queue` field is intended to be a strong reference,
+ // so now that we've successfully claimed the reference we bump
+ // the refcount here.
+ //
+ // Down below in `release_node` when we deallocate this
+ // `RegistrationInner` is where we'll transmute this back to an
+ // arc and decrement the reference count.
+ mem::forget(poll.readiness_queue.clone());
+ } else {
+ // The CAS failed, another thread set the queue pointer, so ensure
+ // that the pointer and `other` match
+ if actual != other {
+ return Err(io::Error::new(io::ErrorKind::Other, "registration handle associated with another `Poll` instance"));
+ }
+ }
+
+ queue = other;
+ } else if queue != other {
+ return Err(io::Error::new(io::ErrorKind::Other, "registration handle associated with another `Poll` instance"));
+ }
+
+ unsafe {
+ let actual = &poll.readiness_queue.inner as *const _ as *const usize;
+ debug_assert_eq!(queue as usize, *actual);
+ }
+
+ // The `update_lock` atomic is used as a flag ensuring only a single
+ // thread concurrently enters the `update` critical section. Any
+ // concurrent calls to update are discarded. If coordinated updates are
+ // required, the Mio user is responsible for handling that.
+ //
+ // Acquire / Release ordering is used on `update_lock` to ensure that
+ // data access to the `token_*` variables are scoped to the critical
+ // section.
+
+ // Acquire the update lock.
+ if self.update_lock.compare_and_swap(false, true, Acquire) {
+ // The lock is already held. Discard the update
+ return Ok(());
+ }
+
+ // Relaxed ordering is acceptable here as the only memory that needs to
+ // be visible as part of the update are the `token_*` variables, and
+ // ordering has already been handled by the `update_lock` access.
+ let mut state = self.state.load(Relaxed);
+ let mut next;
+
+ // Read the current token, again this memory has been ordered by the
+ // acquire on `update_lock`.
+ let curr_token_pos = state.token_write_pos();
+ let curr_token = unsafe { self::token(self, curr_token_pos) };
+
+ let mut next_token_pos = curr_token_pos;
+
+ // If the `update` call is changing the token, then compute the next
+ // available token slot and write the token there.
+ //
+ // Note that this computation is happening *outside* of the
+ // compare-and-swap loop. The update lock ensures that only a single
+ // thread could be mutating the write_token_position, so the
+ // `next_token_pos` will never need to be recomputed even if
+ // `token_read_pos` concurrently changes. This is because
+ // `token_read_pos` can ONLY concurrently change to the current value of
+ // `token_write_pos`, so `next_token_pos` will always remain valid.
+ if token != curr_token {
+ next_token_pos = state.next_token_pos();
+
+ // Update the token
+ match next_token_pos {
+ 0 => unsafe { *self.token_0.get() = token },
+ 1 => unsafe { *self.token_1.get() = token },
+ 2 => unsafe { *self.token_2.get() = token },
+ _ => unreachable!(),
+ }
+ }
+
+ // Now enter the compare-and-swap loop
+ loop {
+ next = state;
+
+ // The node is only dropped once all `Registration` handles are
+ // dropped. Only `Registration` can call `update`.
+ debug_assert!(!state.is_dropped());
+
+ // Update the write token position, this will also release the token
+ // to Poll::poll.
+ next.set_token_write_pos(next_token_pos);
+
+ // Update readiness and poll opts
+ next.set_interest(interest);
+ next.set_poll_opt(opt);
+
+ // If there is effective readiness, the node will need to be queued
+ // for processing. This exact behavior is still TBD, so we are
+ // conservative for now and always fire.
+ //
+ // See https://github.com/carllerche/mio/issues/535.
+ if !next.effective_readiness().is_empty() {
+ next.set_queued();
+ }
+
+ // compare-and-swap the state values. Only `Release` is needed here.
+ // The `Release` ensures that `Poll::poll` will see the token
+ // update and the update function doesn't care about any other
+ // memory visibility.
+ let actual = self.state.compare_and_swap(state, next, Release);
+
+ if actual == state {
+ break;
+ }
+
+ // CAS failed, but `curr_token_pos` should not have changed given
+ // that we still hold the update lock.
+ debug_assert_eq!(curr_token_pos, actual.token_write_pos());
+
+ state = actual;
+ }
+
+ // Release the lock
+ self.update_lock.store(false, Release);
+
+ if !state.is_queued() && next.is_queued() {
+ // We are responsible for enqueing the node.
+ enqueue_with_wakeup(queue, self)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl ops::Deref for RegistrationInner {
+ type Target = ReadinessNode;
+
+ fn deref(&self) -> &ReadinessNode {
+ unsafe { &*self.node }
+ }
+}
+
+impl Clone for RegistrationInner {
+ fn clone(&self) -> RegistrationInner {
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ let old_size = self.ref_count.fetch_add(1, Relaxed);
+
+ // However we need to guard against massive refcounts in case someone
+ // is `mem::forget`ing Arcs. If we don't do this the count can overflow
+ // and users will use-after free. We racily saturate to `isize::MAX` on
+ // the assumption that there aren't ~2 billion threads incrementing
+ // the reference count at once. This branch will never be taken in
+ // any realistic program.
+ //
+ // We abort because such a program is incredibly degenerate, and we
+ // don't care to support it.
+ if old_size & !MAX_REFCOUNT != 0 {
+ process::abort();
+ }
+
+ RegistrationInner {
+ node: self.node,
+ }
+ }
+}
+
+impl Drop for RegistrationInner {
+ fn drop(&mut self) {
+ // Only handles releasing from `Registration` and `SetReadiness`
+ // handles. Poll has to call this itself.
+ release_node(self.node);
+ }
+}
+
+/*
+ *
+ * ===== ReadinessQueue =====
+ *
+ */
+
+impl ReadinessQueue {
+ /// Create a new `ReadinessQueue`.
+ fn new() -> io::Result<ReadinessQueue> {
+ is_send::<Self>();
+ is_sync::<Self>();
+
+ let end_marker = Box::new(ReadinessNode::marker());
+ let sleep_marker = Box::new(ReadinessNode::marker());
+ let closed_marker = Box::new(ReadinessNode::marker());
+
+ let ptr = &*end_marker as *const _ as *mut _;
+
+ Ok(ReadinessQueue {
+ inner: Arc::new(ReadinessQueueInner {
+ awakener: sys::Awakener::new()?,
+ head_readiness: AtomicPtr::new(ptr),
+ tail_readiness: UnsafeCell::new(ptr),
+ end_marker,
+ sleep_marker,
+ closed_marker,
+ })
+ })
+ }
+
+ /// Poll the queue for new events
+ fn poll(&self, dst: &mut sys::Events) {
+ // `until` is set with the first node that gets re-enqueued due to being
+ // set to have level-triggered notifications. This prevents an infinite
+ // loop where `Poll::poll` will keep dequeuing nodes it enqueues.
+ let mut until = ptr::null_mut();
+
+ if dst.len() == dst.capacity() {
+ // If `dst` is already full, the readiness queue won't be drained.
+ // This might result in `sleep_marker` staying in the queue and
+ // unecessary pipe writes occuring.
+ self.inner.clear_sleep_marker();
+ }
+
+ 'outer:
+ while dst.len() < dst.capacity() {
+ // Dequeue a node. If the queue is in an inconsistent state, then
+ // stop polling. `Poll::poll` will be called again shortly and enter
+ // a syscall, which should be enough to enable the other thread to
+ // finish the queuing process.
+ let ptr = match unsafe { self.inner.dequeue_node(until) } {
+ Dequeue::Empty | Dequeue::Inconsistent => break,
+ Dequeue::Data(ptr) => ptr,
+ };
+
+ let node = unsafe { &*ptr };
+
+ // Read the node state with Acquire ordering. This allows reading
+ // the token variables.
+ let mut state = node.state.load(Acquire);
+ let mut next;
+ let mut readiness;
+ let mut opt;
+
+ loop {
+ // Build up any changes to the readiness node's state and
+ // attempt the CAS at the end
+ next = state;
+
+ // Given that the node was just read from the queue, the
+ // `queued` flag should still be set.
+ debug_assert!(state.is_queued());
+
+ // The dropped flag means we need to release the node and
+ // perform no further processing on it.
+ if state.is_dropped() {
+ // Release the node and continue
+ release_node(ptr);
+ continue 'outer;
+ }
+
+ // Process the node
+ readiness = state.effective_readiness();
+ opt = state.poll_opt();
+
+ if opt.is_edge() {
+ // Mark the node as dequeued
+ next.set_dequeued();
+
+ if opt.is_oneshot() && !readiness.is_empty() {
+ next.disarm();
+ }
+ } else if readiness.is_empty() {
+ next.set_dequeued();
+ }
+
+ // Ensure `token_read_pos` is set to `token_write_pos` so that
+ // we read the most up to date token value.
+ next.update_token_read_pos();
+
+ if state == next {
+ break;
+ }
+
+ let actual = node.state.compare_and_swap(state, next, AcqRel);
+
+ if actual == state {
+ break;
+ }
+
+ state = actual;
+ }
+
+ // If the queued flag is still set, then the node must be requeued.
+ // This typically happens when using level-triggered notifications.
+ if next.is_queued() {
+ if until.is_null() {
+ // We never want to see the node again
+ until = ptr;
+ }
+
+ // Requeue the node
+ self.inner.enqueue_node(node);
+ }
+
+ if !readiness.is_empty() {
+ // Get the token
+ let token = unsafe { token(node, next.token_read_pos()) };
+
+ // Push the event
+ dst.push_event(Event::new(readiness, token));
+ }
+ }
+ }
+
+ /// Prepare the queue for the `Poll::poll` thread to block in the system
+ /// selector. This involves changing `head_readiness` to `sleep_marker`.
+ /// Returns true if successful and `poll` can block.
+ fn prepare_for_sleep(&self) -> bool {
+ let end_marker = self.inner.end_marker();
+ let sleep_marker = self.inner.sleep_marker();
+
+ let tail = unsafe { *self.inner.tail_readiness.get() };
+
+ // If the tail is currently set to the sleep_marker, then check if the
+ // head is as well. If it is, then the queue is currently ready to
+ // sleep. If it is not, then the queue is not empty and there should be
+ // no sleeping.
+ if tail == sleep_marker {
+ return self.inner.head_readiness.load(Acquire) == sleep_marker;
+ }
+
+ // If the tail is not currently set to `end_marker`, then the queue is
+ // not empty.
+ if tail != end_marker {
+ return false;
+ }
+
+ // The sleep marker is *not* currently in the readiness queue.
+ //
+ // The sleep marker is only inserted in this function. It is also only
+ // inserted in the tail position. This is guaranteed by first checking
+ // that the end marker is in the tail position, pushing the sleep marker
+ // after the end marker, then removing the end marker.
+ //
+ // Before inserting a node into the queue, the next pointer has to be
+ // set to null. Again, this is only safe to do when the node is not
+ // currently in the queue, but we already have ensured this.
+ self.inner.sleep_marker.next_readiness.store(ptr::null_mut(), Relaxed);
+
+ let actual = self.inner.head_readiness.compare_and_swap(
+ end_marker, sleep_marker, AcqRel);
+
+ debug_assert!(actual != sleep_marker);
+
+ if actual != end_marker {
+ // The readiness queue is not empty
+ return false;
+ }
+
+ // The current tail should be pointing to `end_marker`
+ debug_assert!(unsafe { *self.inner.tail_readiness.get() == end_marker });
+ // The `end_marker` next pointer should be null
+ debug_assert!(self.inner.end_marker.next_readiness.load(Relaxed).is_null());
+
+ // Update tail pointer.
+ unsafe { *self.inner.tail_readiness.get() = sleep_marker; }
+ true
+ }
+}
+
+impl Drop for ReadinessQueue {
+ fn drop(&mut self) {
+ // Close the queue by enqueuing the closed node
+ self.inner.enqueue_node(&*self.inner.closed_marker);
+
+ loop {
+ // Free any nodes that happen to be left in the readiness queue
+ let ptr = match unsafe { self.inner.dequeue_node(ptr::null_mut()) } {
+ Dequeue::Empty => break,
+ Dequeue::Inconsistent => {
+ // This really shouldn't be possible as all other handles to
+ // `ReadinessQueueInner` are dropped, but handle this by
+ // spinning I guess?
+ continue;
+ }
+ Dequeue::Data(ptr) => ptr,
+ };
+
+ let node = unsafe { &*ptr };
+
+ let state = node.state.load(Acquire);
+
+ debug_assert!(state.is_queued());
+
+ release_node(ptr);
+ }
+ }
+}
+
+impl ReadinessQueueInner {
+ fn wakeup(&self) -> io::Result<()> {
+ self.awakener.wakeup()
+ }
+
+ /// Prepend the given node to the head of the readiness queue. This is done
+ /// with relaxed ordering. Returns true if `Poll` needs to be woken up.
+ fn enqueue_node_with_wakeup(&self, node: &ReadinessNode) -> io::Result<()> {
+ if self.enqueue_node(node) {
+ self.wakeup()?;
+ }
+
+ Ok(())
+ }
+
+ /// Push the node into the readiness queue
+ fn enqueue_node(&self, node: &ReadinessNode) -> bool {
+ // This is the 1024cores.net intrusive MPSC queue [1] "push" function.
+ let node_ptr = node as *const _ as *mut _;
+
+ // Relaxed used as the ordering is "released" when swapping
+ // `head_readiness`
+ node.next_readiness.store(ptr::null_mut(), Relaxed);
+
+ unsafe {
+ let mut prev = self.head_readiness.load(Acquire);
+
+ loop {
+ if prev == self.closed_marker() {
+ debug_assert!(node_ptr != self.closed_marker());
+ // debug_assert!(node_ptr != self.end_marker());
+ debug_assert!(node_ptr != self.sleep_marker());
+
+ if node_ptr != self.end_marker() {
+ // The readiness queue is shutdown, but the enqueue flag was
+ // set. This means that we are responsible for decrementing
+ // the ready queue's ref count
+ debug_assert!(node.ref_count.load(Relaxed) >= 2);
+ release_node(node_ptr);
+ }
+
+ return false;
+ }
+
+ let act = self.head_readiness.compare_and_swap(prev, node_ptr, AcqRel);
+
+ if prev == act {
+ break;
+ }
+
+ prev = act;
+ }
+
+ debug_assert!((*prev).next_readiness.load(Relaxed).is_null());
+
+ (*prev).next_readiness.store(node_ptr, Release);
+
+ prev == self.sleep_marker()
+ }
+ }
+
+ fn clear_sleep_marker(&self) {
+ let end_marker = self.end_marker();
+ let sleep_marker = self.sleep_marker();
+
+ unsafe {
+ let tail = *self.tail_readiness.get();
+
+ if tail != self.sleep_marker() {
+ return;
+ }
+
+ // The empty markeer is *not* currently in the readiness queue
+ // (since the sleep markeris).
+ self.end_marker.next_readiness.store(ptr::null_mut(), Relaxed);
+
+ let actual = self.head_readiness.compare_and_swap(
+ sleep_marker, end_marker, AcqRel);
+
+ debug_assert!(actual != end_marker);
+
+ if actual != sleep_marker {
+ // The readiness queue is not empty, we cannot remove the sleep
+ // markeer
+ return;
+ }
+
+ // Update the tail pointer.
+ *self.tail_readiness.get() = end_marker;
+ }
+ }
+
+ /// Must only be called in `poll` or `drop`
+ unsafe fn dequeue_node(&self, until: *mut ReadinessNode) -> Dequeue {
+ // This is the 1024cores.net intrusive MPSC queue [1] "pop" function
+ // with the modifications mentioned at the top of the file.
+ let mut tail = *self.tail_readiness.get();
+ let mut next = (*tail).next_readiness.load(Acquire);
+
+ if tail == self.end_marker() || tail == self.sleep_marker() || tail == self.closed_marker() {
+ if next.is_null() {
+ // Make sure the sleep marker is removed (as we are no longer
+ // sleeping
+ self.clear_sleep_marker();
+
+ return Dequeue::Empty;
+ }
+
+ *self.tail_readiness.get() = next;
+ tail = next;
+ next = (*next).next_readiness.load(Acquire);
+ }
+
+ // Only need to check `until` at this point. `until` is either null,
+ // which will never match tail OR it is a node that was pushed by
+ // the current thread. This means that either:
+ //
+ // 1) The queue is inconsistent, which is handled explicitly
+ // 2) We encounter `until` at this point in dequeue
+ // 3) we will pop a different node
+ if tail == until {
+ return Dequeue::Empty;
+ }
+
+ if !next.is_null() {
+ *self.tail_readiness.get() = next;
+ return Dequeue::Data(tail);
+ }
+
+ if self.head_readiness.load(Acquire) != tail {
+ return Dequeue::Inconsistent;
+ }
+
+ // Push the stub node
+ self.enqueue_node(&*self.end_marker);
+
+ next = (*tail).next_readiness.load(Acquire);
+
+ if !next.is_null() {
+ *self.tail_readiness.get() = next;
+ return Dequeue::Data(tail);
+ }
+
+ Dequeue::Inconsistent
+ }
+
+ fn end_marker(&self) -> *mut ReadinessNode {
+ &*self.end_marker as *const ReadinessNode as *mut ReadinessNode
+ }
+
+ fn sleep_marker(&self) -> *mut ReadinessNode {
+ &*self.sleep_marker as *const ReadinessNode as *mut ReadinessNode
+ }
+
+ fn closed_marker(&self) -> *mut ReadinessNode {
+ &*self.closed_marker as *const ReadinessNode as *mut ReadinessNode
+ }
+}
+
+impl ReadinessNode {
+ /// Return a new `ReadinessNode`, initialized with a ref_count of 3.
+ fn new(queue: *mut (),
+ token: Token,
+ interest: Ready,
+ opt: PollOpt,
+ ref_count: usize) -> ReadinessNode
+ {
+ ReadinessNode {
+ state: AtomicState::new(interest, opt),
+ // Only the first token is set, the others are initialized to 0
+ token_0: UnsafeCell::new(token),
+ token_1: UnsafeCell::new(Token(0)),
+ token_2: UnsafeCell::new(Token(0)),
+ next_readiness: AtomicPtr::new(ptr::null_mut()),
+ update_lock: AtomicBool::new(false),
+ readiness_queue: AtomicPtr::new(queue),
+ ref_count: AtomicUsize::new(ref_count),
+ }
+ }
+
+ fn marker() -> ReadinessNode {
+ ReadinessNode {
+ state: AtomicState::new(Ready::empty(), PollOpt::empty()),
+ token_0: UnsafeCell::new(Token(0)),
+ token_1: UnsafeCell::new(Token(0)),
+ token_2: UnsafeCell::new(Token(0)),
+ next_readiness: AtomicPtr::new(ptr::null_mut()),
+ update_lock: AtomicBool::new(false),
+ readiness_queue: AtomicPtr::new(ptr::null_mut()),
+ ref_count: AtomicUsize::new(0),
+ }
+ }
+
+ fn enqueue_with_wakeup(&self) -> io::Result<()> {
+ let queue = self.readiness_queue.load(Acquire);
+
+ if queue.is_null() {
+ // Not associated with a queue, nothing to do
+ return Ok(());
+ }
+
+ enqueue_with_wakeup(queue, self)
+ }
+}
+
+fn enqueue_with_wakeup(queue: *mut (), node: &ReadinessNode) -> io::Result<()> {
+ debug_assert!(!queue.is_null());
+ // This is ugly... but we don't want to bump the ref count.
+ let queue: &Arc<ReadinessQueueInner> = unsafe {
+ &*(&queue as *const *mut () as *const Arc<ReadinessQueueInner>)
+ };
+ queue.enqueue_node_with_wakeup(node)
+}
+
+unsafe fn token(node: &ReadinessNode, pos: usize) -> Token {
+ match pos {
+ 0 => *node.token_0.get(),
+ 1 => *node.token_1.get(),
+ 2 => *node.token_2.get(),
+ _ => unreachable!(),
+ }
+}
+
+fn release_node(ptr: *mut ReadinessNode) {
+ unsafe {
+ // `AcqRel` synchronizes with other `release_node` functions and ensures
+ // that the drop happens after any reads / writes on other threads.
+ if (*ptr).ref_count.fetch_sub(1, AcqRel) != 1 {
+ return;
+ }
+
+ let node = Box::from_raw(ptr);
+
+ // Decrement the readiness_queue Arc
+ let queue = node.readiness_queue.load(Acquire);
+
+ if queue.is_null() {
+ return;
+ }
+
+ let _: Arc<ReadinessQueueInner> = mem::transmute(queue);
+ }
+}
+
+impl AtomicState {
+ fn new(interest: Ready, opt: PollOpt) -> AtomicState {
+ let state = ReadinessState::new(interest, opt);
+
+ AtomicState {
+ inner: AtomicUsize::new(state.into()),
+ }
+ }
+
+ /// Loads the current `ReadinessState`
+ fn load(&self, order: Ordering) -> ReadinessState {
+ self.inner.load(order).into()
+ }
+
+ /// Stores a state if the current state is the same as `current`.
+ fn compare_and_swap(&self, current: ReadinessState, new: ReadinessState, order: Ordering) -> ReadinessState {
+ self.inner.compare_and_swap(current.into(), new.into(), order).into()
+ }
+
+ // Returns `true` if the node should be queued
+ fn flag_as_dropped(&self) -> bool {
+ let prev: ReadinessState = self.inner.fetch_or(DROPPED_MASK | QUEUED_MASK, Release).into();
+ // The flag should not have been previously set
+ debug_assert!(!prev.is_dropped());
+
+ !prev.is_queued()
+ }
+}
+
+impl ReadinessState {
+ // Create a `ReadinessState` initialized with the provided arguments
+ #[inline]
+ fn new(interest: Ready, opt: PollOpt) -> ReadinessState {
+ let interest = event::ready_as_usize(interest);
+ let opt = event::opt_as_usize(opt);
+
+ debug_assert!(interest <= MASK_4);
+ debug_assert!(opt <= MASK_4);
+
+ let mut val = interest << INTEREST_SHIFT;
+ val |= opt << POLL_OPT_SHIFT;
+
+ ReadinessState(val)
+ }
+
+ #[inline]
+ fn get(self, mask: usize, shift: usize) -> usize{
+ (self.0 >> shift) & mask
+ }
+
+ #[inline]
+ fn set(&mut self, val: usize, mask: usize, shift: usize) {
+ self.0 = (self.0 & !(mask << shift)) | (val << shift)
+ }
+
+ /// Get the readiness
+ #[inline]
+ fn readiness(self) -> Ready {
+ let v = self.get(MASK_4, READINESS_SHIFT);
+ event::ready_from_usize(v)
+ }
+
+ #[inline]
+ fn effective_readiness(self) -> Ready {
+ self.readiness() & self.interest()
+ }
+
+ /// Set the readiness
+ #[inline]
+ fn set_readiness(&mut self, v: Ready) {
+ self.set(event::ready_as_usize(v), MASK_4, READINESS_SHIFT);
+ }
+
+ /// Get the interest
+ #[inline]
+ fn interest(self) -> Ready {
+ let v = self.get(MASK_4, INTEREST_SHIFT);
+ event::ready_from_usize(v)
+ }
+
+ /// Set the interest
+ #[inline]
+ fn set_interest(&mut self, v: Ready) {
+ self.set(event::ready_as_usize(v), MASK_4, INTEREST_SHIFT);
+ }
+
+ #[inline]
+ fn disarm(&mut self) {
+ self.set_interest(Ready::empty());
+ }
+
+ /// Get the poll options
+ #[inline]
+ fn poll_opt(self) -> PollOpt {
+ let v = self.get(MASK_4, POLL_OPT_SHIFT);
+ event::opt_from_usize(v)
+ }
+
+ /// Set the poll options
+ #[inline]
+ fn set_poll_opt(&mut self, v: PollOpt) {
+ self.set(event::opt_as_usize(v), MASK_4, POLL_OPT_SHIFT);
+ }
+
+ #[inline]
+ fn is_queued(self) -> bool {
+ self.0 & QUEUED_MASK == QUEUED_MASK
+ }
+
+ /// Set the queued flag
+ #[inline]
+ fn set_queued(&mut self) {
+ // Dropped nodes should never be queued
+ debug_assert!(!self.is_dropped());
+ self.0 |= QUEUED_MASK;
+ }
+
+ #[inline]
+ fn set_dequeued(&mut self) {
+ debug_assert!(self.is_queued());
+ self.0 &= !QUEUED_MASK
+ }
+
+ #[inline]
+ fn is_dropped(self) -> bool {
+ self.0 & DROPPED_MASK == DROPPED_MASK
+ }
+
+ #[inline]
+ fn token_read_pos(self) -> usize {
+ self.get(MASK_2, TOKEN_RD_SHIFT)
+ }
+
+ #[inline]
+ fn token_write_pos(self) -> usize {
+ self.get(MASK_2, TOKEN_WR_SHIFT)
+ }
+
+ #[inline]
+ fn next_token_pos(self) -> usize {
+ let rd = self.token_read_pos();
+ let wr = self.token_write_pos();
+
+ match wr {
+ 0 => {
+ match rd {
+ 1 => 2,
+ 2 => 1,
+ 0 => 1,
+ _ => unreachable!(),
+ }
+ }
+ 1 => {
+ match rd {
+ 0 => 2,
+ 2 => 0,
+ 1 => 2,
+ _ => unreachable!(),
+ }
+ }
+ 2 => {
+ match rd {
+ 0 => 1,
+ 1 => 0,
+ 2 => 0,
+ _ => unreachable!(),
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ #[inline]
+ fn set_token_write_pos(&mut self, val: usize) {
+ self.set(val, MASK_2, TOKEN_WR_SHIFT);
+ }
+
+ #[inline]
+ fn update_token_read_pos(&mut self) {
+ let val = self.token_write_pos();
+ self.set(val, MASK_2, TOKEN_RD_SHIFT);
+ }
+}
+
+impl From<ReadinessState> for usize {
+ fn from(src: ReadinessState) -> usize {
+ src.0
+ }
+}
+
+impl From<usize> for ReadinessState {
+ fn from(src: usize) -> ReadinessState {
+ ReadinessState(src)
+ }
+}
+
+fn is_send<T: Send>() {}
+fn is_sync<T: Sync>() {}
+
+impl SelectorId {
+ pub fn new() -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(0),
+ }
+ }
+
+ pub fn associate_selector(&self, poll: &Poll) -> io::Result<()> {
+ let selector_id = self.id.load(Ordering::SeqCst);
+
+ if selector_id != 0 && selector_id != poll.selector.id() {
+ Err(io::Error::new(io::ErrorKind::Other, "socket already registered"))
+ } else {
+ self.id.store(poll.selector.id(), Ordering::SeqCst);
+ Ok(())
+ }
+ }
+}
+
+impl Clone for SelectorId {
+ fn clone(&self) -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(self.id.load(Ordering::SeqCst)),
+ }
+ }
+}
+
+#[test]
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub fn as_raw_fd() {
+ let poll = Poll::new().unwrap();
+ assert!(poll.as_raw_fd() > 0);
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs
new file mode 100644
index 0000000000..19bc762429
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/awakener.rs
@@ -0,0 +1,73 @@
+use {io, poll, Evented, Ready, Poll, PollOpt, Token};
+use zircon;
+use std::sync::{Arc, Mutex, Weak};
+
+pub struct Awakener {
+ /// Token and weak reference to the port on which Awakener was registered.
+ ///
+ /// When `Awakener::wakeup` is called, these are used to send a wakeup message to the port.
+ inner: Mutex<Option<(Token, Weak<zircon::Port>)>>,
+}
+
+impl Awakener {
+ /// Create a new `Awakener`.
+ pub fn new() -> io::Result<Awakener> {
+ Ok(Awakener {
+ inner: Mutex::new(None)
+ })
+ }
+
+ /// Send a wakeup signal to the `Selector` on which the `Awakener` was registered.
+ pub fn wakeup(&self) -> io::Result<()> {
+ let inner_locked = self.inner.lock().unwrap();
+ let &(token, ref weak_port) =
+ inner_locked.as_ref().expect("Called wakeup on unregistered awakener.");
+
+ let port = weak_port.upgrade().expect("Tried to wakeup a closed port.");
+
+ let status = 0; // arbitrary
+ let packet = zircon::Packet::from_user_packet(
+ token.0 as u64, status, zircon::UserPacket::from_u8_array([0; 32]));
+
+ Ok(port.queue(&packet)?)
+ }
+
+ pub fn cleanup(&self) {}
+}
+
+impl Evented for Awakener {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ _events: Ready,
+ _opts: PollOpt) -> io::Result<()>
+ {
+ let mut inner_locked = self.inner.lock().unwrap();
+ if inner_locked.is_some() {
+ panic!("Called register on already-registered Awakener.");
+ }
+ *inner_locked = Some((token, Arc::downgrade(poll::selector(poll).port())));
+
+ Ok(())
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ _events: Ready,
+ _opts: PollOpt) -> io::Result<()>
+ {
+ let mut inner_locked = self.inner.lock().unwrap();
+ *inner_locked = Some((token, Arc::downgrade(poll::selector(poll).port())));
+
+ Ok(())
+ }
+
+ fn deregister(&self, _poll: &Poll) -> io::Result<()>
+ {
+ let mut inner_locked = self.inner.lock().unwrap();
+ *inner_locked = None;
+
+ Ok(())
+ }
+} \ No newline at end of file
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs
new file mode 100644
index 0000000000..e23d0c4a1e
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/eventedfd.rs
@@ -0,0 +1,263 @@
+use {io, poll, Evented, Ready, Poll, PollOpt, Token};
+use libc;
+use zircon;
+use zircon::AsHandleRef;
+use sys::fuchsia::{DontDrop, poll_opts_to_wait_async, sys};
+use std::mem;
+use std::os::unix::io::RawFd;
+use std::sync::{Arc, Mutex};
+
+/// Properties of an `EventedFd`'s current registration
+#[derive(Debug)]
+pub struct EventedFdRegistration {
+ token: Token,
+ handle: DontDrop<zircon::Handle>,
+ rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>,
+}
+
+impl EventedFdRegistration {
+ unsafe fn new(token: Token,
+ raw_handle: sys::zx_handle_t,
+ rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>,
+ ) -> Self
+ {
+ EventedFdRegistration {
+ token: token,
+ handle: DontDrop::new(zircon::Handle::from_raw(raw_handle)),
+ rereg_signals: rereg_signals
+ }
+ }
+
+ pub fn rereg_signals(&self) -> Option<(zircon::Signals, zircon::WaitAsyncOpts)> {
+ self.rereg_signals
+ }
+}
+
+/// An event-ed file descriptor. The file descriptor is owned by this structure.
+#[derive(Debug)]
+pub struct EventedFdInner {
+ /// Properties of the current registration.
+ registration: Mutex<Option<EventedFdRegistration>>,
+
+ /// Owned file descriptor.
+ ///
+ /// `fd` is closed on `Drop`, so modifying `fd` is a memory-unsafe operation.
+ fd: RawFd,
+
+ /// Owned `fdio_t` pointer.
+ fdio: *const sys::fdio_t,
+}
+
+impl EventedFdInner {
+ pub fn rereg_for_level(&self, port: &zircon::Port) {
+ let registration_opt = self.registration.lock().unwrap();
+ if let Some(ref registration) = *registration_opt {
+ if let Some((rereg_signals, rereg_opts)) = registration.rereg_signals {
+ let _res =
+ registration
+ .handle.inner_ref()
+ .wait_async_handle(
+ port,
+ registration.token.0 as u64,
+ rereg_signals,
+ rereg_opts);
+ }
+ }
+ }
+
+ pub fn registration(&self) -> &Mutex<Option<EventedFdRegistration>> {
+ &self.registration
+ }
+
+ pub fn fdio(&self) -> &sys::fdio_t {
+ unsafe { &*self.fdio }
+ }
+}
+
+impl Drop for EventedFdInner {
+ fn drop(&mut self) {
+ unsafe {
+ sys::__fdio_release(self.fdio);
+ let _ = libc::close(self.fd);
+ }
+ }
+}
+
+// `EventedInner` must be manually declared `Send + Sync` because it contains a `RawFd` and a
+// `*const sys::fdio_t`. These are only used to make thread-safe system calls, so accessing
+// them is entirely thread-safe.
+//
+// Note: one minor exception to this are the calls to `libc::close` and `__fdio_release`, which
+// happen on `Drop`. These accesses are safe because `drop` can only be called at most once from
+// a single thread, and after it is called no other functions can be called on the `EventedFdInner`.
+unsafe impl Sync for EventedFdInner {}
+unsafe impl Send for EventedFdInner {}
+
+#[derive(Clone, Debug)]
+pub struct EventedFd {
+ pub inner: Arc<EventedFdInner>
+}
+
+impl EventedFd {
+ pub unsafe fn new(fd: RawFd) -> Self {
+ let fdio = sys::__fdio_fd_to_io(fd);
+ assert!(fdio != ::std::ptr::null(), "FileDescriptor given to EventedFd must be valid.");
+
+ EventedFd {
+ inner: Arc::new(EventedFdInner {
+ registration: Mutex::new(None),
+ fd: fd,
+ fdio: fdio,
+ })
+ }
+ }
+
+ fn handle_and_signals_for_events(&self, interest: Ready, opts: PollOpt)
+ -> (sys::zx_handle_t, zircon::Signals)
+ {
+ let epoll_events = ioevent_to_epoll(interest, opts);
+
+ unsafe {
+ let mut raw_handle: sys::zx_handle_t = mem::uninitialized();
+ let mut signals: sys::zx_signals_t = mem::uninitialized();
+ sys::__fdio_wait_begin(self.inner.fdio, epoll_events, &mut raw_handle, &mut signals);
+
+ (raw_handle, signals)
+ }
+ }
+
+ fn register_with_lock(
+ &self,
+ registration: &mut Option<EventedFdRegistration>,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ if registration.is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "Called register on an already registered file descriptor."));
+ }
+
+ let (raw_handle, signals) = self.handle_and_signals_for_events(interest, opts);
+
+ let needs_rereg = opts.is_level() && !opts.is_oneshot();
+
+ // If we need to reregister, then each registration should be `oneshot`
+ let opts = opts | if needs_rereg { PollOpt::oneshot() } else { PollOpt::empty() };
+
+ let rereg_signals = if needs_rereg {
+ Some((signals, poll_opts_to_wait_async(opts)))
+ } else {
+ None
+ };
+
+ *registration = Some(
+ unsafe { EventedFdRegistration::new(token, raw_handle, rereg_signals) }
+ );
+
+ // We don't have ownership of the handle, so we can't drop it
+ let handle = DontDrop::new(unsafe { zircon::Handle::from_raw(raw_handle) });
+
+ let registered = poll::selector(poll)
+ .register_fd(handle.inner_ref(), self, token, signals, opts);
+
+ if registered.is_err() {
+ *registration = None;
+ }
+
+ registered
+ }
+
+ fn deregister_with_lock(
+ &self,
+ registration: &mut Option<EventedFdRegistration>,
+ poll: &Poll) -> io::Result<()>
+ {
+ let old_registration = if let Some(old_reg) = registration.take() {
+ old_reg
+ } else {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "Called rereregister on an unregistered file descriptor."))
+ };
+
+ poll::selector(poll)
+ .deregister_fd(old_registration.handle.inner_ref(), old_registration.token)
+ }
+}
+
+impl Evented for EventedFd {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.register_with_lock(
+ &mut *self.inner.registration.lock().unwrap(),
+ poll,
+ token,
+ interest,
+ opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ // Take out the registration lock
+ let mut registration_lock = self.inner.registration.lock().unwrap();
+
+ // Deregister
+ self.deregister_with_lock(&mut *registration_lock, poll)?;
+
+ self.register_with_lock(
+ &mut *registration_lock,
+ poll,
+ token,
+ interest,
+ opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ let mut registration_lock = self.inner.registration.lock().unwrap();
+ self.deregister_with_lock(&mut *registration_lock, poll)
+ }
+}
+
+fn ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32 {
+ use event_imp::ready_from_usize;
+ const HUP: usize = 0b01000;
+
+ let mut kind = 0;
+
+ if interest.is_readable() {
+ kind |= libc::EPOLLIN;
+ }
+
+ if interest.is_writable() {
+ kind |= libc::EPOLLOUT;
+ }
+
+ if interest.contains(ready_from_usize(HUP)) {
+ kind |= libc::EPOLLRDHUP;
+ }
+
+ if opts.is_edge() {
+ kind |= libc::EPOLLET;
+ }
+
+ if opts.is_oneshot() {
+ kind |= libc::EPOLLONESHOT;
+ }
+
+ if opts.is_level() {
+ kind &= !libc::EPOLLET;
+ }
+
+ kind as u32
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs
new file mode 100644
index 0000000000..ae6f07f6d9
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/handles.rs
@@ -0,0 +1,78 @@
+use {io, poll, Evented, Ready, Poll, PollOpt, Token};
+use zircon_sys::zx_handle_t;
+use std::sync::Mutex;
+
+/// Wrapper for registering a `HandleBase` type with mio.
+#[derive(Debug)]
+pub struct EventedHandle {
+ /// The handle to be registered.
+ handle: zx_handle_t,
+
+ /// The current `Token` with which the handle is registered with mio.
+ token: Mutex<Option<Token>>,
+}
+
+impl EventedHandle {
+ /// Create a new `EventedHandle` which can be registered with mio
+ /// in order to receive event notifications.
+ ///
+ /// The underlying handle must not be dropped while the
+ /// `EventedHandle` still exists.
+ pub unsafe fn new(handle: zx_handle_t) -> Self {
+ EventedHandle {
+ handle: handle,
+ token: Mutex::new(None),
+ }
+ }
+
+ /// Get the underlying handle being registered.
+ pub fn get_handle(&self) -> zx_handle_t {
+ self.handle
+ }
+}
+
+impl Evented for EventedHandle {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ let mut this_token = self.token.lock().unwrap();
+ {
+ poll::selector(poll).register_handle(self.handle, token, interest, opts)?;
+ *this_token = Some(token);
+ }
+ Ok(())
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ let mut this_token = self.token.lock().unwrap();
+ {
+ poll::selector(poll).deregister_handle(self.handle, token)?;
+ *this_token = None;
+ poll::selector(poll).register_handle(self.handle, token, interest, opts)?;
+ *this_token = Some(token);
+ }
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ let mut this_token = self.token.lock().unwrap();
+ let token = if let Some(token) = *this_token { token } else {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "Attempted to deregister an unregistered handle."))
+ };
+ {
+ poll::selector(poll).deregister_handle(self.handle, token)?;
+ *this_token = None;
+ }
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs
new file mode 100644
index 0000000000..10728fc8dc
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/mod.rs
@@ -0,0 +1,177 @@
+use {io, Ready, PollOpt};
+use libc;
+use zircon;
+use std::mem;
+use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+use std::ops::{Deref, DerefMut};
+use std::os::unix::io::RawFd;
+
+mod awakener;
+mod handles;
+mod eventedfd;
+mod net;
+mod ready;
+mod selector;
+
+use self::eventedfd::{EventedFd, EventedFdInner};
+use self::ready::assert_fuchsia_ready_repr;
+
+pub use self::awakener::Awakener;
+pub use self::handles::EventedHandle;
+pub use self::net::{TcpListener, TcpStream, UdpSocket};
+pub use self::selector::{Events, Selector};
+pub use self::ready::{FuchsiaReady, zx_signals_t};
+
+// Set non-blocking (workaround since the std version doesn't work in fuchsia)
+// TODO: fix the std version and replace this
+pub fn set_nonblock(fd: RawFd) -> io::Result<()> {
+ cvt(unsafe { libc::fcntl(fd, libc::F_SETFL, libc::O_NONBLOCK) }).map(|_| ())
+}
+
+/// Workaround until fuchsia's recv_from is fixed
+unsafe fn recv_from(fd: RawFd, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ let flags = 0;
+
+ let n = cvt(
+ libc::recv(fd,
+ buf.as_mut_ptr() as *mut libc::c_void,
+ buf.len(),
+ flags)
+ )?;
+
+ // random address-- we don't use it
+ let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ Ok((n as usize, addr))
+}
+
+mod sys {
+ #![allow(non_camel_case_types)]
+ use std::os::unix::io::RawFd;
+ pub use zircon_sys::{zx_handle_t, zx_signals_t};
+
+ // 17 fn pointers we don't need for mio :)
+ pub type fdio_ops_t = [usize; 17];
+
+ pub type atomic_int_fast32_t = usize; // TODO: https://github.com/rust-lang/libc/issues/631
+
+ #[repr(C)]
+ pub struct fdio_t {
+ pub ops: *const fdio_ops_t,
+ pub magic: u32,
+ pub refcount: atomic_int_fast32_t,
+ pub dupcount: u32,
+ pub flags: u32,
+ }
+
+ #[link(name="fdio")]
+ extern {
+ pub fn __fdio_fd_to_io(fd: RawFd) -> *const fdio_t;
+ pub fn __fdio_release(io: *const fdio_t);
+
+ pub fn __fdio_wait_begin(
+ io: *const fdio_t,
+ events: u32,
+ handle_out: &mut zx_handle_t,
+ signals_out: &mut zx_signals_t,
+ );
+ pub fn __fdio_wait_end(
+ io: *const fdio_t,
+ signals: zx_signals_t,
+ events_out: &mut u32,
+ );
+ }
+}
+
+fn epoll_event_to_ready(epoll: u32) -> Ready {
+ let epoll = epoll as i32; // casts the bits directly
+ let mut kind = Ready::empty();
+
+ if (epoll & libc::EPOLLIN) != 0 || (epoll & libc::EPOLLPRI) != 0 {
+ kind = kind | Ready::readable();
+ }
+
+ if (epoll & libc::EPOLLOUT) != 0 {
+ kind = kind | Ready::writable();
+ }
+
+ kind
+
+ /* TODO: support?
+ // EPOLLHUP - Usually means a socket error happened
+ if (epoll & libc::EPOLLERR) != 0 {
+ kind = kind | UnixReady::error();
+ }
+
+ if (epoll & libc::EPOLLRDHUP) != 0 || (epoll & libc::EPOLLHUP) != 0 {
+ kind = kind | UnixReady::hup();
+ }
+ */
+}
+
+fn poll_opts_to_wait_async(poll_opts: PollOpt) -> zircon::WaitAsyncOpts {
+ if poll_opts.is_oneshot() {
+ zircon::WaitAsyncOpts::Once
+ } else {
+ zircon::WaitAsyncOpts::Repeating
+ }
+}
+
+trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+impl IsMinusOne for i32 {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+
+impl IsMinusOne for isize {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+
+fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
+ use std::io;
+
+ if t.is_minus_one() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(t)
+ }
+}
+
+/// Utility type to prevent the type inside of it from being dropped.
+#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+struct DontDrop<T>(Option<T>);
+
+impl<T> DontDrop<T> {
+ fn new(t: T) -> DontDrop<T> {
+ DontDrop(Some(t))
+ }
+
+ fn inner_ref(&self) -> &T {
+ self.0.as_ref().unwrap()
+ }
+
+ fn inner_mut(&mut self) -> &mut T {
+ self.0.as_mut().unwrap()
+ }
+}
+
+impl<T> Deref for DontDrop<T> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target {
+ self.inner_ref()
+ }
+}
+
+impl<T> DerefMut for DontDrop<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.inner_mut()
+ }
+}
+
+impl<T> Drop for DontDrop<T> {
+ fn drop(&mut self) {
+ let inner = self.0.take();
+ mem::forget(inner);
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs
new file mode 100644
index 0000000000..d43ad27bb5
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/net.rs
@@ -0,0 +1,444 @@
+use {io, Evented, Ready, Poll, PollOpt, Token};
+use iovec::IoVec;
+use iovec::unix as iovec;
+use libc;
+use net2::TcpStreamExt;
+#[allow(unused_imports)] // only here for Rust 1.8
+use net2::UdpSocketExt;
+use sys::fuchsia::{recv_from, set_nonblock, EventedFd, DontDrop};
+use std::cmp;
+use std::io::{Read, Write};
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::os::unix::io::AsRawFd;
+use std::time::Duration;
+
+#[derive(Debug)]
+pub struct TcpStream {
+ io: DontDrop<net::TcpStream>,
+ evented_fd: EventedFd,
+}
+
+impl TcpStream {
+ pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
+ try!(set_nonblock(stream.as_raw_fd()));
+
+ let connected = stream.connect(addr);
+ match connected {
+ Ok(..) => {}
+ Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
+ Err(e) => return Err(e),
+ }
+
+ let evented_fd = unsafe { EventedFd::new(stream.as_raw_fd()) };
+
+ return Ok(TcpStream {
+ io: DontDrop::new(stream),
+ evented_fd: evented_fd,
+ })
+ }
+
+ pub fn from_stream(stream: net::TcpStream) -> TcpStream {
+ let evented_fd = unsafe { EventedFd::new(stream.as_raw_fd()) };
+
+ TcpStream {
+ io: DontDrop::new(stream),
+ evented_fd: evented_fd,
+ }
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.io.peer_addr()
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.io.try_clone().map(|s| {
+ let evented_fd = unsafe { EventedFd::new(s.as_raw_fd()) };
+ TcpStream {
+ io: DontDrop::new(s),
+ evented_fd: evented_fd,
+ }
+ })
+ }
+
+ pub fn shutdown(&self, how: net::Shutdown) -> io::Result<()> {
+ self.io.shutdown(how)
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.io.set_nodelay(nodelay)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.io.nodelay()
+ }
+
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.io.set_recv_buffer_size(size)
+ }
+
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.io.recv_buffer_size()
+ }
+
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.io.set_send_buffer_size(size)
+ }
+
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.io.send_buffer_size()
+ }
+
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.io.set_keepalive(keepalive)
+ }
+
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.io.keepalive()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.io.set_linger(dur)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.io.linger()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.peek(buf)
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice_mut(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::readv(self.io.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::writev(self.io.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.inner_ref().read(buf)
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.io.inner_ref().write(buf)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.io.inner_ref().flush()
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.evented_fd.deregister(poll)
+ }
+}
+
+#[derive(Debug)]
+pub struct TcpListener {
+ io: DontDrop<net::TcpListener>,
+ evented_fd: EventedFd,
+}
+
+impl TcpListener {
+ pub fn new(inner: net::TcpListener) -> io::Result<TcpListener> {
+ set_nonblock(inner.as_raw_fd())?;
+
+ let evented_fd = unsafe { EventedFd::new(inner.as_raw_fd()) };
+
+ Ok(TcpListener {
+ io: DontDrop::new(inner),
+ evented_fd: evented_fd,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.io.try_clone().map(|io| {
+ let evented_fd = unsafe { EventedFd::new(io.as_raw_fd()) };
+ TcpListener {
+ io: DontDrop::new(io),
+ evented_fd: evented_fd,
+ }
+ })
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ self.io.accept().and_then(|(s, a)| {
+ set_nonblock(s.as_raw_fd())?;
+ let evented_fd = unsafe { EventedFd::new(s.as_raw_fd()) };
+ return Ok((TcpStream {
+ io: DontDrop::new(s),
+ evented_fd: evented_fd,
+ }, a))
+ })
+ }
+
+ #[allow(deprecated)]
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ #[allow(deprecated)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+}
+
+impl Evented for TcpListener {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.evented_fd.deregister(poll)
+ }
+}
+
+#[derive(Debug)]
+pub struct UdpSocket {
+ io: DontDrop<net::UdpSocket>,
+ evented_fd: EventedFd,
+}
+
+impl UdpSocket {
+ pub fn new(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ set_nonblock(socket.as_raw_fd())?;
+
+ let evented_fd = unsafe { EventedFd::new(socket.as_raw_fd()) };
+
+ Ok(UdpSocket {
+ io: DontDrop::new(socket),
+ evented_fd: evented_fd,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.io.try_clone().and_then(|io| {
+ UdpSocket::new(io)
+ })
+ }
+
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+ self.io.send_to(buf, target)
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unsafe { recv_from(self.io.as_raw_fd(), buf) }
+ }
+
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.io.send(buf)
+ }
+
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.recv(buf)
+ }
+
+ pub fn connect(&self, addr: SocketAddr)
+ -> io::Result<()> {
+ self.io.connect(addr)
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.io.broadcast()
+ }
+
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.io.set_broadcast(on)
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v4()
+ }
+
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v4(on)
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.io.multicast_ttl_v4()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_multicast_ttl_v4(ttl)
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v6()
+ }
+
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v6(on)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.join_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.join_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.leave_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.leave_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self,
+ poll: &Poll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt) -> io::Result<()>
+ {
+ self.evented_fd.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.evented_fd.deregister(poll)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs
new file mode 100644
index 0000000000..97854f8c07
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/ready.rs
@@ -0,0 +1,181 @@
+use event_imp::{Ready, ready_as_usize, ready_from_usize};
+pub use zircon_sys::{
+ zx_signals_t,
+ ZX_OBJECT_READABLE,
+ ZX_OBJECT_WRITABLE,
+};
+use std::ops;
+
+// The following impls are valid because Fuchsia and mio both represent
+// "readable" as `1 << 0` and "writable" as `1 << 2`.
+// We define this assertion here and call it from `Selector::new`,
+// since `Selector:;new` is guaranteed to be called during a standard mio runtime,
+// unlike the functions in this file.
+#[inline]
+pub fn assert_fuchsia_ready_repr() {
+ debug_assert!(
+ ZX_OBJECT_READABLE.bits() as usize == ready_as_usize(Ready::readable()),
+ "Zircon ZX_OBJECT_READABLE should have the same repr as Ready::readable()"
+ );
+ debug_assert!(
+ ZX_OBJECT_WRITABLE.bits() as usize == ready_as_usize(Ready::writable()),
+ "Zircon ZX_OBJECT_WRITABLE should have the same repr as Ready::writable()"
+ );
+}
+
+/// Fuchsia specific extensions to `Ready`
+///
+/// Provides additional readiness event kinds that are available on Fuchsia.
+///
+/// Conversion traits are implemented between `Ready` and `FuchsiaReady`.
+///
+/// For high level documentation on polling and readiness, see [`Poll`].
+///
+/// [`Poll`]: struct.Poll.html
+#[derive(Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct FuchsiaReady(Ready);
+
+impl FuchsiaReady {
+ /// Returns the `FuchsiaReady` as raw zircon signals.
+ /// This function is just a more explicit, non-generic version of
+ /// `FuchsiaReady::into`.
+ #[inline]
+ pub fn into_zx_signals(self) -> zx_signals_t {
+ zx_signals_t::from_bits_truncate(ready_as_usize(self.0) as u32)
+ }
+}
+
+impl Into<zx_signals_t> for FuchsiaReady {
+ #[inline]
+ fn into(self) -> zx_signals_t {
+ self.into_zx_signals()
+ }
+}
+
+impl From<zx_signals_t> for FuchsiaReady {
+ #[inline]
+ fn from(src: zx_signals_t) -> Self {
+ FuchsiaReady(src.into())
+ }
+}
+
+impl From<zx_signals_t> for Ready {
+ #[inline]
+ fn from(src: zx_signals_t) -> Self {
+ ready_from_usize(src.bits() as usize)
+ }
+}
+
+impl From<Ready> for FuchsiaReady {
+ #[inline]
+ fn from(src: Ready) -> FuchsiaReady {
+ FuchsiaReady(src)
+ }
+}
+
+impl From<FuchsiaReady> for Ready {
+ #[inline]
+ fn from(src: FuchsiaReady) -> Ready {
+ src.0
+ }
+}
+
+impl ops::Deref for FuchsiaReady {
+ type Target = Ready;
+
+ #[inline]
+ fn deref(&self) -> &Ready {
+ &self.0
+ }
+}
+
+impl ops::DerefMut for FuchsiaReady {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Ready {
+ &mut self.0
+ }
+}
+
+impl ops::BitOr for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitor(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 | other.0).into()
+ }
+}
+
+impl ops::BitXor for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitxor(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 ^ other.0).into()
+ }
+}
+
+impl ops::BitAnd for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitand(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 & other.0).into()
+ }
+}
+
+impl ops::Sub for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn sub(self, other: FuchsiaReady) -> FuchsiaReady {
+ (self.0 & !other.0).into()
+ }
+}
+
+#[deprecated(since = "0.6.10", note = "removed")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn not(self) -> FuchsiaReady {
+ (!self.0).into()
+ }
+}
+
+impl ops::BitOr<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitor(self, other: zx_signals_t) -> FuchsiaReady {
+ self | FuchsiaReady::from(other)
+ }
+}
+
+impl ops::BitXor<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitxor(self, other: zx_signals_t) -> FuchsiaReady {
+ self ^ FuchsiaReady::from(other)
+ }
+}
+
+impl ops::BitAnd<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn bitand(self, other: zx_signals_t) -> FuchsiaReady {
+ self & FuchsiaReady::from(other)
+ }
+}
+
+impl ops::Sub<zx_signals_t> for FuchsiaReady {
+ type Output = FuchsiaReady;
+
+ #[inline]
+ fn sub(self, other: zx_signals_t) -> FuchsiaReady {
+ self - FuchsiaReady::from(other)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs b/third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs
new file mode 100644
index 0000000000..27226ac5ff
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/fuchsia/selector.rs
@@ -0,0 +1,353 @@
+use {io, Event, PollOpt, Ready, Token};
+use sys::fuchsia::{
+ assert_fuchsia_ready_repr,
+ epoll_event_to_ready,
+ poll_opts_to_wait_async,
+ EventedFd,
+ EventedFdInner,
+ FuchsiaReady,
+};
+use zircon;
+use zircon::AsHandleRef;
+use zircon_sys::zx_handle_t;
+use std::collections::hash_map;
+use std::fmt;
+use std::mem;
+use std::sync::atomic::{AtomicBool, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::sync::{Arc, Mutex, Weak};
+use std::time::Duration;
+use sys;
+
+/// The kind of registration-- file descriptor or handle.
+///
+/// The last bit of a token is set to indicate the type of the registration.
+#[derive(Copy, Clone, Eq, PartialEq)]
+enum RegType {
+ Fd,
+ Handle,
+}
+
+fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
+ let key = token.0 as u64;
+ let msb = 1u64 << 63;
+ if (key & msb) != 0 {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Most-significant bit of token must remain unset."));
+ }
+
+ Ok(match reg_type {
+ RegType::Fd => key,
+ RegType::Handle => key | msb,
+ })
+}
+
+fn token_and_type_from_key(key: u64) -> (Token, RegType) {
+ let msb = 1u64 << 63;
+ (
+ Token((key & !msb) as usize),
+ if (key & msb) == 0 {
+ RegType::Fd
+ } else {
+ RegType::Handle
+ }
+ )
+}
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+pub struct Selector {
+ id: usize,
+
+ /// Zircon object on which the handles have been registered, and on which events occur
+ port: Arc<zircon::Port>,
+
+ /// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
+ /// used to prevent having to lock `tokens_to_rereg` when it is empty.
+ has_tokens_to_rereg: AtomicBool,
+
+ /// List of `Token`s corresponding to registrations that need to be reregistered before the
+ /// next `port::wait`. This is necessary to provide level-triggered behavior for
+ /// `Async::repeating` registrations.
+ ///
+ /// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
+ /// that it will be reregistered before the next `port::wait` call, making `port::wait` return
+ /// immediately if the signal was high during the reregistration.
+ ///
+ /// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
+ /// `token_to_fd`.
+ tokens_to_rereg: Mutex<Vec<Token>>,
+
+ /// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
+ /// file handle, its associated `fdio` object, and its current registration.
+ token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
+ // compatible with Ready.
+ assert_fuchsia_ready_repr();
+
+ let port = Arc::new(
+ zircon::Port::create(zircon::PortOpts::Default)?
+ );
+
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+
+ let has_tokens_to_rereg = AtomicBool::new(false);
+ let tokens_to_rereg = Mutex::new(Vec::new());
+ let token_to_fd = Mutex::new(hash_map::HashMap::new());
+
+ Ok(Selector {
+ id: id,
+ port: port,
+ has_tokens_to_rereg: has_tokens_to_rereg,
+ tokens_to_rereg: tokens_to_rereg,
+ token_to_fd: token_to_fd,
+ })
+ }
+
+ pub fn id(&self) -> usize {
+ self.id
+ }
+
+ /// Returns a reference to the underlying port `Arc`.
+ pub fn port(&self) -> &Arc<zircon::Port> { &self.port }
+
+ /// Reregisters all registrations pointed to by the `tokens_to_rereg` list
+ /// if `has_tokens_to_rereg`.
+ fn reregister_handles(&self) -> io::Result<()> {
+ // We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
+ // written before the store using `Ordering::Release`.
+ if self.has_tokens_to_rereg.load(Ordering::Acquire) {
+ let mut tokens = self.tokens_to_rereg.lock().unwrap();
+ let token_to_fd = self.token_to_fd.lock().unwrap();
+ for token in tokens.drain(0..) {
+ if let Some(eventedfd) = token_to_fd.get(&token)
+ .and_then(|h| h.upgrade()) {
+ eventedfd.rereg_for_level(&self.port);
+ }
+ }
+ self.has_tokens_to_rereg.store(false, Ordering::Release);
+ }
+ Ok(())
+ }
+
+ pub fn select(&self,
+ evts: &mut Events,
+ _awakener: Token,
+ timeout: Option<Duration>) -> io::Result<bool>
+ {
+ evts.clear();
+
+ self.reregister_handles()?;
+
+ let deadline = match timeout {
+ Some(duration) => {
+ let nanos = duration.as_secs().saturating_mul(1_000_000_000)
+ .saturating_add(duration.subsec_nanos() as u64);
+
+ zircon::deadline_after(nanos)
+ }
+ None => zircon::ZX_TIME_INFINITE,
+ };
+
+ let packet = match self.port.wait(deadline) {
+ Ok(packet) => packet,
+ Err(zircon::Status::ErrTimedOut) => return Ok(false),
+ Err(e) => Err(e)?,
+ };
+
+ let observed_signals = match packet.contents() {
+ zircon::PacketContents::SignalOne(signal_packet) => {
+ signal_packet.observed()
+ }
+ zircon::PacketContents::SignalRep(signal_packet) => {
+ signal_packet.observed()
+ }
+ zircon::PacketContents::User(_user_packet) => {
+ // User packets are only ever sent by an Awakener
+ return Ok(true);
+ }
+ };
+
+ let key = packet.key();
+ let (token, reg_type) = token_and_type_from_key(key);
+
+ match reg_type {
+ RegType::Handle => {
+ // We can return immediately-- no lookup or registration necessary.
+ evts.events.push(Event::new(Ready::from(observed_signals), token));
+ Ok(false)
+ },
+ RegType::Fd => {
+ // Convert the signals to epoll events using __fdio_wait_end,
+ // and add to reregistration list if necessary.
+ let events: u32;
+ {
+ let handle = if let Some(handle) =
+ self.token_to_fd.lock().unwrap()
+ .get(&token)
+ .and_then(|h| h.upgrade()) {
+ handle
+ } else {
+ // This handle is apparently in the process of removal.
+ // It has been removed from the list, but port_cancel has not been called.
+ return Ok(false);
+ };
+
+ events = unsafe {
+ let mut events: u32 = mem::uninitialized();
+ sys::fuchsia::sys::__fdio_wait_end(handle.fdio(), observed_signals, &mut events);
+ events
+ };
+
+ // If necessary, queue to be reregistered before next port_await
+ let needs_to_rereg = {
+ let registration_lock = handle.registration().lock().unwrap();
+
+ registration_lock
+ .as_ref()
+ .and_then(|r| r.rereg_signals())
+ .is_some()
+ };
+
+ if needs_to_rereg {
+ let mut tokens_to_rereg_lock = self.tokens_to_rereg.lock().unwrap();
+ tokens_to_rereg_lock.push(token);
+ // We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
+ // written before the store.
+ self.has_tokens_to_rereg.store(true, Ordering::Release);
+ }
+ }
+
+ evts.events.push(Event::new(epoll_event_to_ready(events), token));
+ Ok(false)
+ },
+ }
+ }
+
+ /// Register event interests for the given IO handle with the OS
+ pub fn register_fd(&self,
+ handle: &zircon::Handle,
+ fd: &EventedFd,
+ token: Token,
+ signals: zircon::Signals,
+ poll_opts: PollOpt) -> io::Result<()>
+ {
+ {
+ let mut token_to_fd = self.token_to_fd.lock().unwrap();
+ match token_to_fd.entry(token) {
+ hash_map::Entry::Occupied(_) =>
+ return Err(io::Error::new(io::ErrorKind::AlreadyExists,
+ "Attempted to register a filedescriptor on an existing token.")),
+ hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
+ };
+ }
+
+ let wait_async_opts = poll_opts_to_wait_async(poll_opts);
+
+ let wait_res = handle.wait_async_handle(&self.port, token.0 as u64, signals, wait_async_opts);
+
+ if wait_res.is_err() {
+ self.token_to_fd.lock().unwrap().remove(&token);
+ }
+
+ Ok(wait_res?)
+ }
+
+ /// Deregister event interests for the given IO handle with the OS
+ pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
+ self.token_to_fd.lock().unwrap().remove(&token);
+
+ // We ignore NotFound errors since oneshots are automatically deregistered,
+ // but mio will attempt to deregister them manually.
+ self.port.cancel(&*handle, token.0 as u64)
+ .map_err(io::Error::from)
+ .or_else(|e| if e.kind() == io::ErrorKind::NotFound {
+ Ok(())
+ } else {
+ Err(e)
+ })
+ }
+
+ pub fn register_handle(&self,
+ handle: zx_handle_t,
+ token: Token,
+ interests: Ready,
+ poll_opts: PollOpt) -> io::Result<()>
+ {
+ if poll_opts.is_level() && !poll_opts.is_oneshot() {
+ return Err(io::Error::new(io::ErrorKind::InvalidInput,
+ "Repeated level-triggered events are not supported on Fuchsia handles."));
+ }
+
+ let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
+
+ let res = temp_handle.wait_async_handle(
+ &self.port,
+ key_from_token_and_type(token, RegType::Handle)?,
+ FuchsiaReady::from(interests).into_zx_signals(),
+ poll_opts_to_wait_async(poll_opts));
+
+ mem::forget(temp_handle);
+
+ Ok(res?)
+ }
+
+
+ pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()>
+ {
+ let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
+ let res = self.port.cancel(&temp_handle, key_from_token_and_type(token, RegType::Handle)?);
+
+ mem::forget(temp_handle);
+
+ Ok(res?)
+ }
+}
+
+pub struct Events {
+ events: Vec<Event>
+}
+
+impl Events {
+ pub fn with_capacity(_u: usize) -> Events {
+ // The Fuchsia selector only handles one event at a time,
+ // so we ignore the default capacity and set it to one.
+ Events { events: Vec::with_capacity(1) }
+ }
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).map(|e| *e)
+ }
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(event)
+ }
+ pub fn clear(&mut self) {
+ self.events.events.drain(0..);
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Events")
+ .field("len", &self.len())
+ .finish()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/mod.rs b/third_party/rust/mio-0.6.23/src/sys/mod.rs
new file mode 100644
index 0000000000..8a1705db6c
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/mod.rs
@@ -0,0 +1,56 @@
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub use self::unix::{
+ Awakener,
+ EventedFd,
+ Events,
+ Io,
+ Selector,
+ TcpStream,
+ TcpListener,
+ UdpSocket,
+ pipe,
+ set_nonblock,
+};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub use self::unix::READY_ALL;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+#[cfg(feature = "with-deprecated")]
+pub use self::unix::UnixSocket;
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+pub mod unix;
+
+#[cfg(windows)]
+pub use self::windows::{
+ Awakener,
+ Events,
+ Selector,
+ TcpStream,
+ TcpListener,
+ UdpSocket,
+ Overlapped,
+ Binding,
+};
+
+#[cfg(windows)]
+mod windows;
+
+#[cfg(target_os = "fuchsia")]
+pub use self::fuchsia::{
+ Awakener,
+ Events,
+ EventedHandle,
+ Selector,
+ TcpStream,
+ TcpListener,
+ UdpSocket,
+ set_nonblock,
+};
+
+#[cfg(target_os = "fuchsia")]
+pub mod fuchsia;
+
+#[cfg(not(all(unix, not(target_os = "fuchsia"))))]
+pub const READY_ALL: usize = 0;
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs b/third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs
new file mode 100644
index 0000000000..9cc367a78c
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/awakener.rs
@@ -0,0 +1,74 @@
+pub use self::pipe::Awakener;
+
+/// Default awakener backed by a pipe
+mod pipe {
+ use sys::unix;
+ use {io, Ready, Poll, PollOpt, Token};
+ use event::Evented;
+ use std::io::{Read, Write};
+
+ /*
+ *
+ * ===== Awakener =====
+ *
+ */
+
+ pub struct Awakener {
+ reader: unix::Io,
+ writer: unix::Io,
+ }
+
+ impl Awakener {
+ pub fn new() -> io::Result<Awakener> {
+ let (rd, wr) = unix::pipe()?;
+
+ Ok(Awakener {
+ reader: rd,
+ writer: wr,
+ })
+ }
+
+ pub fn wakeup(&self) -> io::Result<()> {
+ match (&self.writer).write(&[1]) {
+ Ok(_) => Ok(()),
+ Err(e) => {
+ if e.kind() == io::ErrorKind::WouldBlock {
+ Ok(())
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+
+ pub fn cleanup(&self) {
+ let mut buf = [0; 128];
+
+ loop {
+ // Consume data until all bytes are purged
+ match (&self.reader).read(&mut buf) {
+ Ok(i) if i > 0 => {},
+ _ => return,
+ }
+ }
+ }
+
+ fn reader(&self) -> &unix::Io {
+ &self.reader
+ }
+ }
+
+ impl Evented for Awakener {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.reader().register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.reader().reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.reader().deregister(poll)
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs b/third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs
new file mode 100644
index 0000000000..e88c595fc9
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/dlsym.rs
@@ -0,0 +1,47 @@
+use std::marker;
+use std::mem;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use libc;
+
+macro_rules! dlsym {
+ (fn $name:ident($($t:ty),*) -> $ret:ty) => (
+ #[allow(bad_style)]
+ static $name: ::sys::unix::dlsym::DlSym<unsafe extern fn($($t),*) -> $ret> =
+ ::sys::unix::dlsym::DlSym {
+ name: concat!(stringify!($name), "\0"),
+ addr: ::std::sync::atomic::ATOMIC_USIZE_INIT,
+ _marker: ::std::marker::PhantomData,
+ };
+ )
+}
+
+pub struct DlSym<F> {
+ pub name: &'static str,
+ pub addr: AtomicUsize,
+ pub _marker: marker::PhantomData<F>,
+}
+
+impl<F> DlSym<F> {
+ pub fn get(&self) -> Option<&F> {
+ assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>());
+ unsafe {
+ if self.addr.load(Ordering::SeqCst) == 0 {
+ self.addr.store(fetch(self.name), Ordering::SeqCst);
+ }
+ if self.addr.load(Ordering::SeqCst) == 1 {
+ None
+ } else {
+ mem::transmute::<&AtomicUsize, Option<&F>>(&self.addr)
+ }
+ }
+ }
+}
+
+unsafe fn fetch(name: &str) -> usize {
+ assert_eq!(name.as_bytes()[name.len() - 1], 0);
+ match libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr() as *const _) as usize {
+ 0 => 1,
+ n => n,
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs b/third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs
new file mode 100644
index 0000000000..0da787bc95
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/epoll.rs
@@ -0,0 +1,268 @@
+#![allow(deprecated)]
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::RawFd;
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::time::Duration;
+use std::{cmp, i32};
+
+use libc::{self, c_int};
+use libc::{EPOLLERR, EPOLLHUP, EPOLLONESHOT};
+use libc::{EPOLLET, EPOLLOUT, EPOLLIN, EPOLLPRI};
+
+use {io, Ready, PollOpt, Token};
+use event_imp::Event;
+use sys::unix::{cvt, UnixReady};
+use sys::unix::io::set_cloexec;
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+#[derive(Debug)]
+pub struct Selector {
+ id: usize,
+ epfd: RawFd,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ let epfd = unsafe {
+ // Emulate `epoll_create` by using `epoll_create1` if it's available
+ // and otherwise falling back to `epoll_create` followed by a call to
+ // set the CLOEXEC flag.
+ dlsym!(fn epoll_create1(c_int) -> c_int);
+
+ match epoll_create1.get() {
+ Some(epoll_create1_fn) => {
+ cvt(epoll_create1_fn(libc::EPOLL_CLOEXEC))?
+ }
+ None => {
+ let fd = cvt(libc::epoll_create(1024))?;
+ drop(set_cloexec(fd));
+ fd
+ }
+ }
+ };
+
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+
+ Ok(Selector {
+ id: id,
+ epfd: epfd,
+ })
+ }
+
+ pub fn id(&self) -> usize {
+ self.id
+ }
+
+ /// Wait for events from the OS
+ pub fn select(&self, evts: &mut Events, awakener: Token, timeout: Option<Duration>) -> io::Result<bool> {
+ // A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ
+ // (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits
+ // architectures. The magic number is the same constant used by libuv.
+ #[cfg(target_pointer_width = "32")]
+ const MAX_SAFE_TIMEOUT: u64 = 1789569;
+ #[cfg(not(target_pointer_width = "32"))]
+ const MAX_SAFE_TIMEOUT: u64 = c_int::max_value() as u64;
+
+ let timeout_ms = timeout
+ .map(|to| cmp::min(millis(to), MAX_SAFE_TIMEOUT) as c_int)
+ .unwrap_or(-1);
+
+ // Wait for epoll events for at most timeout_ms milliseconds
+ evts.clear();
+ unsafe {
+ let cnt = cvt(libc::epoll_wait(self.epfd,
+ evts.events.as_mut_ptr(),
+ evts.events.capacity() as i32,
+ timeout_ms))?;
+ let cnt = cnt as usize;
+ evts.events.set_len(cnt);
+
+ for i in 0..cnt {
+ if evts.events[i].u64 as usize == awakener.into() {
+ evts.events.remove(i);
+ return Ok(true);
+ }
+ }
+ }
+
+ Ok(false)
+ }
+
+ /// Register event interests for the given IO handle with the OS
+ pub fn register(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut info = libc::epoll_event {
+ events: ioevent_to_epoll(interests, opts),
+ u64: usize::from(token) as u64
+ };
+
+ unsafe {
+ cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_ADD, fd, &mut info))?;
+ Ok(())
+ }
+ }
+
+ /// Register event interests for the given IO handle with the OS
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut info = libc::epoll_event {
+ events: ioevent_to_epoll(interests, opts),
+ u64: usize::from(token) as u64
+ };
+
+ unsafe {
+ cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_MOD, fd, &mut info))?;
+ Ok(())
+ }
+ }
+
+ /// Deregister event interests for the given IO handle with the OS
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ // The &info argument should be ignored by the system,
+ // but linux < 2.6.9 required it to be not null.
+ // For compatibility, we provide a dummy EpollEvent.
+ let mut info = libc::epoll_event {
+ events: 0,
+ u64: 0,
+ };
+
+ unsafe {
+ cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_DEL, fd, &mut info))?;
+ Ok(())
+ }
+ }
+}
+
+fn ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32 {
+ let mut kind = 0;
+
+ if interest.is_readable() {
+ kind |= EPOLLIN;
+ }
+
+ if interest.is_writable() {
+ kind |= EPOLLOUT;
+ }
+
+ if UnixReady::from(interest).is_priority() {
+ kind |= EPOLLPRI;
+ }
+
+ if opts.is_edge() {
+ kind |= EPOLLET;
+ }
+
+ if opts.is_oneshot() {
+ kind |= EPOLLONESHOT;
+ }
+
+ if opts.is_level() {
+ kind &= !EPOLLET;
+ }
+
+ kind as u32
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.epfd
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ unsafe {
+ let _ = libc::close(self.epfd);
+ }
+ }
+}
+
+pub struct Events {
+ events: Vec<libc::epoll_event>,
+}
+
+impl Events {
+ pub fn with_capacity(u: usize) -> Events {
+ Events {
+ events: Vec::with_capacity(u)
+ }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ #[inline]
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).map(|event| {
+ let epoll = event.events as c_int;
+ let mut kind = Ready::empty();
+
+ if (epoll & EPOLLIN) != 0 {
+ kind = kind | Ready::readable();
+ }
+
+ if (epoll & EPOLLPRI) != 0 {
+ kind = kind | Ready::readable() | UnixReady::priority();
+ }
+
+ if (epoll & EPOLLOUT) != 0 {
+ kind = kind | Ready::writable();
+ }
+
+ // EPOLLHUP - Usually means a socket error happened
+ if (epoll & EPOLLERR) != 0 {
+ kind = kind | UnixReady::error();
+ }
+
+ if (epoll & EPOLLHUP) != 0 {
+ kind = kind | UnixReady::hup();
+ }
+
+ let token = self.events[idx].u64;
+
+ Event::new(kind, Token(token as usize))
+ })
+ }
+
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(libc::epoll_event {
+ events: ioevent_to_epoll(event.readiness(), PollOpt::empty()),
+ u64: usize::from(event.token()) as u64
+ });
+ }
+
+ pub fn clear(&mut self) {
+ unsafe { self.events.set_len(0); }
+ }
+}
+
+const NANOS_PER_MILLI: u32 = 1_000_000;
+const MILLIS_PER_SEC: u64 = 1_000;
+
+/// Convert a `Duration` to milliseconds, rounding up and saturating at
+/// `u64::MAX`.
+///
+/// The saturating is fine because `u64::MAX` milliseconds are still many
+/// million years.
+pub fn millis(duration: Duration) -> u64 {
+ // Round up.
+ let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
+ duration.as_secs().saturating_mul(MILLIS_PER_SEC).saturating_add(millis as u64)
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs b/third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs
new file mode 100644
index 0000000000..72586f6652
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/eventedfd.rs
@@ -0,0 +1,107 @@
+use {io, poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use std::os::unix::io::RawFd;
+
+/*
+ *
+ * ===== EventedFd =====
+ *
+ */
+
+#[derive(Debug)]
+
+/// Adapter for [`RawFd`] providing an [`Evented`] implementation.
+///
+/// `EventedFd` enables registering any type with an FD with [`Poll`].
+///
+/// While only implementations for TCP and UDP are provided, Mio supports
+/// registering any FD that can be registered with the underlying OS selector.
+/// `EventedFd` provides the necessary bridge.
+///
+/// Note that `EventedFd` takes a `&RawFd`. This is because `EventedFd` **does
+/// not** take ownership of the FD. Specifically, it will not manage any
+/// lifecycle related operations, such as closing the FD on drop. It is expected
+/// that the `EventedFd` is constructed right before a call to
+/// [`Poll::register`]. See the examples for more detail.
+///
+/// # Examples
+///
+/// Basic usage
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::unix::EventedFd;
+///
+/// use std::os::unix::io::AsRawFd;
+/// use std::net::TcpListener;
+///
+/// // Bind a std listener
+/// let listener = TcpListener::bind("127.0.0.1:0")?;
+///
+/// let poll = Poll::new()?;
+///
+/// // Register the listener
+/// poll.register(&EventedFd(&listener.as_raw_fd()),
+/// Token(0), Ready::readable(), PollOpt::edge())?;
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// Implementing [`Evented`] for a custom type backed by a [`RawFd`].
+///
+/// ```
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::event::Evented;
+/// use mio::unix::EventedFd;
+///
+/// use std::os::unix::io::RawFd;
+/// use std::io;
+///
+/// pub struct MyIo {
+/// fd: RawFd,
+/// }
+///
+/// impl Evented for MyIo {
+/// fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// EventedFd(&self.fd).register(poll, token, interest, opts)
+/// }
+///
+/// fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
+/// -> io::Result<()>
+/// {
+/// EventedFd(&self.fd).reregister(poll, token, interest, opts)
+/// }
+///
+/// fn deregister(&self, poll: &Poll) -> io::Result<()> {
+/// EventedFd(&self.fd).deregister(poll)
+/// }
+/// }
+/// ```
+///
+/// [`RawFd`]: https://doc.rust-lang.org/std/os/unix/io/type.RawFd.html
+/// [`Evented`]: ../event/trait.Evented.html
+/// [`Poll`]: ../struct.Poll.html
+/// [`Poll::register`]: ../struct.Poll.html#method.register
+pub struct EventedFd<'a>(pub &'a RawFd);
+
+impl<'a> Evented for EventedFd<'a> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ poll::selector(poll).register(*self.0, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ poll::selector(poll).reregister(*self.0, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ poll::selector(poll).deregister(*self.0)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/io.rs b/third_party/rust/mio-0.6.23/src/sys/unix/io.rs
new file mode 100644
index 0000000000..47a3a70d1f
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/io.rs
@@ -0,0 +1,107 @@
+use std::fs::File;
+use std::io::{Read, Write};
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+use libc;
+
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use unix::EventedFd;
+use sys::unix::cvt;
+
+pub fn set_nonblock(fd: libc::c_int) -> io::Result<()> {
+ unsafe {
+ let flags = libc::fcntl(fd, libc::F_GETFL);
+ cvt(libc::fcntl(fd, libc::F_SETFL, flags | libc::O_NONBLOCK)).map(|_|())
+ }
+}
+
+pub fn set_cloexec(fd: libc::c_int) -> io::Result<()> {
+ unsafe {
+ let flags = libc::fcntl(fd, libc::F_GETFD);
+ cvt(libc::fcntl(fd, libc::F_SETFD, flags | libc::FD_CLOEXEC)).map(|_| ())
+ }
+}
+
+/*
+ *
+ * ===== Basic IO type =====
+ *
+ */
+
+/// Manages a FD
+#[derive(Debug)]
+pub struct Io {
+ fd: File,
+}
+
+impl Io {
+ /// Try to clone the FD
+ pub fn try_clone(&self) -> io::Result<Io> {
+ Ok(Io { fd: self.fd.try_clone()? })
+ }
+}
+
+impl FromRawFd for Io {
+ unsafe fn from_raw_fd(fd: RawFd) -> Io {
+ Io { fd: File::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for Io {
+ fn into_raw_fd(self) -> RawFd {
+ self.fd.into_raw_fd()
+ }
+}
+
+impl AsRawFd for Io {
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd.as_raw_fd()
+ }
+}
+
+impl Evented for Io {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl Read for Io {
+ fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
+ (&self.fd).read(dst)
+ }
+}
+
+impl<'a> Read for &'a Io {
+ fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
+ (&self.fd).read(dst)
+ }
+}
+
+impl Write for Io {
+ fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+ (&self.fd).write(src)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.fd).flush()
+ }
+}
+
+impl<'a> Write for &'a Io {
+ fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+ (&self.fd).write(src)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.fd).flush()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs b/third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs
new file mode 100644
index 0000000000..59c70e1e18
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/kqueue.rs
@@ -0,0 +1,360 @@
+use std::{cmp, fmt, ptr};
+#[cfg(not(target_os = "netbsd"))]
+use std::os::raw::{c_int, c_short};
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::RawFd;
+use std::collections::HashMap;
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::time::Duration;
+
+use libc::{self, time_t};
+
+use {io, Ready, PollOpt, Token};
+use event_imp::{self as event, Event};
+use sys::unix::{cvt, UnixReady};
+use sys::unix::io::set_cloexec;
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+#[cfg(not(target_os = "netbsd"))]
+type Filter = c_short;
+#[cfg(not(target_os = "netbsd"))]
+type UData = *mut ::libc::c_void;
+#[cfg(not(target_os = "netbsd"))]
+type Count = c_int;
+
+#[cfg(target_os = "netbsd")]
+type Filter = u32;
+#[cfg(target_os = "netbsd")]
+type UData = ::libc::intptr_t;
+#[cfg(target_os = "netbsd")]
+type Count = usize;
+
+macro_rules! kevent {
+ ($id: expr, $filter: expr, $flags: expr, $data: expr) => {
+ libc::kevent {
+ ident: $id as ::libc::uintptr_t,
+ filter: $filter as Filter,
+ flags: $flags,
+ fflags: 0,
+ data: 0,
+ udata: $data as UData,
+ }
+ }
+}
+
+pub struct Selector {
+ id: usize,
+ kq: RawFd,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+ let kq = unsafe { cvt(libc::kqueue())? };
+ drop(set_cloexec(kq));
+
+ Ok(Selector {
+ id,
+ kq,
+ })
+ }
+
+ pub fn id(&self) -> usize {
+ self.id
+ }
+
+ pub fn select(&self, evts: &mut Events, awakener: Token, timeout: Option<Duration>) -> io::Result<bool> {
+ let timeout = timeout.map(|to| {
+ libc::timespec {
+ tv_sec: cmp::min(to.as_secs(), time_t::max_value() as u64) as time_t,
+ // `Duration::subsec_nanos` is guaranteed to be less than one
+ // billion (the number of nanoseconds in a second), making the
+ // cast to i32 safe. The cast itself is needed for platforms
+ // where C's long is only 32 bits.
+ tv_nsec: libc::c_long::from(to.subsec_nanos() as i32),
+ }
+ });
+ let timeout = timeout.as_ref().map(|s| s as *const _).unwrap_or(ptr::null_mut());
+
+ evts.clear();
+ unsafe {
+ let cnt = cvt(libc::kevent(self.kq,
+ ptr::null(),
+ 0,
+ evts.sys_events.0.as_mut_ptr(),
+ evts.sys_events.0.capacity() as Count,
+ timeout))?;
+ evts.sys_events.0.set_len(cnt as usize);
+ Ok(evts.coalesce(awakener))
+ }
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ trace!("registering; token={:?}; interests={:?}", token, interests);
+
+ let flags = if opts.contains(PollOpt::edge()) { libc::EV_CLEAR } else { 0 } |
+ if opts.contains(PollOpt::oneshot()) { libc::EV_ONESHOT } else { 0 } |
+ libc::EV_RECEIPT;
+
+ unsafe {
+ let r = if interests.contains(Ready::readable()) { libc::EV_ADD } else { libc::EV_DELETE };
+ let w = if interests.contains(Ready::writable()) { libc::EV_ADD } else { libc::EV_DELETE };
+ let mut changes = [
+ kevent!(fd, libc::EVFILT_READ, flags | r, usize::from(token)),
+ kevent!(fd, libc::EVFILT_WRITE, flags | w, usize::from(token)),
+ ];
+
+ cvt(libc::kevent(self.kq,
+ changes.as_ptr(),
+ changes.len() as Count,
+ changes.as_mut_ptr(),
+ changes.len() as Count,
+ ::std::ptr::null()))?;
+
+ for change in changes.iter() {
+ debug_assert_eq!(change.flags & libc::EV_ERROR, libc::EV_ERROR);
+
+ // Test to see if an error happened
+ if change.data == 0 {
+ continue
+ }
+
+ // Older versions of OSX (10.11 and 10.10 have been witnessed)
+ // can return EPIPE when registering a pipe file descriptor
+ // where the other end has already disappeared. For example code
+ // that creates a pipe, closes a file descriptor, and then
+ // registers the other end will see an EPIPE returned from
+ // `register`.
+ //
+ // It also turns out that kevent will still report events on the
+ // file descriptor, telling us that it's readable/hup at least
+ // after we've done this registration. As a result we just
+ // ignore `EPIPE` here instead of propagating it.
+ //
+ // More info can be found at carllerche/mio#582
+ if change.data as i32 == libc::EPIPE &&
+ change.filter == libc::EVFILT_WRITE as Filter {
+ continue
+ }
+
+ // ignore ENOENT error for EV_DELETE
+ let orig_flags = if change.filter == libc::EVFILT_READ as Filter { r } else { w };
+ if change.data as i32 == libc::ENOENT && orig_flags & libc::EV_DELETE != 0 {
+ continue
+ }
+
+ return Err(::std::io::Error::from_raw_os_error(change.data as i32));
+ }
+ Ok(())
+ }
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Ready, opts: PollOpt) -> io::Result<()> {
+ // Just need to call register here since EV_ADD is a mod if already
+ // registered
+ self.register(fd, token, interests, opts)
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ unsafe {
+ // EV_RECEIPT is a nice way to apply changes and get back per-event results while not
+ // draining the actual changes.
+ let filter = libc::EV_DELETE | libc::EV_RECEIPT;
+#[cfg(not(target_os = "netbsd"))]
+ let mut changes = [
+ kevent!(fd, libc::EVFILT_READ, filter, ptr::null_mut()),
+ kevent!(fd, libc::EVFILT_WRITE, filter, ptr::null_mut()),
+ ];
+
+#[cfg(target_os = "netbsd")]
+ let mut changes = [
+ kevent!(fd, libc::EVFILT_READ, filter, 0),
+ kevent!(fd, libc::EVFILT_WRITE, filter, 0),
+ ];
+
+ cvt(libc::kevent(self.kq,
+ changes.as_ptr(),
+ changes.len() as Count,
+ changes.as_mut_ptr(),
+ changes.len() as Count,
+ ::std::ptr::null())).map(|_| ())?;
+
+ if changes[0].data as i32 == libc::ENOENT && changes[1].data as i32 == libc::ENOENT {
+ return Err(::std::io::Error::from_raw_os_error(changes[0].data as i32));
+ }
+ for change in changes.iter() {
+ debug_assert_eq!(libc::EV_ERROR & change.flags, libc::EV_ERROR);
+ if change.data != 0 && change.data as i32 != libc::ENOENT {
+ return Err(::std::io::Error::from_raw_os_error(changes[0].data as i32));
+ }
+ }
+ Ok(())
+ }
+ }
+}
+
+impl fmt::Debug for Selector {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Selector")
+ .field("id", &self.id)
+ .field("kq", &self.kq)
+ .finish()
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.kq
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ unsafe {
+ let _ = libc::close(self.kq);
+ }
+ }
+}
+
+pub struct Events {
+ sys_events: KeventList,
+ events: Vec<Event>,
+ event_map: HashMap<Token, usize>,
+}
+
+struct KeventList(Vec<libc::kevent>);
+
+unsafe impl Send for KeventList {}
+unsafe impl Sync for KeventList {}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ Events {
+ sys_events: KeventList(Vec::with_capacity(cap)),
+ events: Vec::with_capacity(cap),
+ event_map: HashMap::with_capacity(cap)
+ }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).cloned()
+ }
+
+ fn coalesce(&mut self, awakener: Token) -> bool {
+ let mut ret = false;
+ self.events.clear();
+ self.event_map.clear();
+
+ for e in self.sys_events.0.iter() {
+ let token = Token(e.udata as usize);
+ let len = self.events.len();
+
+ if token == awakener {
+ // TODO: Should this return an error if event is an error. It
+ // is not critical as spurious wakeups are permitted.
+ ret = true;
+ continue;
+ }
+
+ let idx = *self.event_map.entry(token)
+ .or_insert(len);
+
+ if idx == len {
+ // New entry, insert the default
+ self.events.push(Event::new(Ready::empty(), token));
+
+ }
+
+ if e.flags & libc::EV_ERROR != 0 {
+ event::kind_mut(&mut self.events[idx]).insert(*UnixReady::error());
+ }
+
+ if e.filter == libc::EVFILT_READ as Filter {
+ event::kind_mut(&mut self.events[idx]).insert(Ready::readable());
+ } else if e.filter == libc::EVFILT_WRITE as Filter {
+ event::kind_mut(&mut self.events[idx]).insert(Ready::writable());
+ }
+#[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ {
+ if e.filter == libc::EVFILT_AIO {
+ event::kind_mut(&mut self.events[idx]).insert(UnixReady::aio());
+ }
+ }
+#[cfg(any(target_os = "freebsd"))]
+ {
+ if e.filter == libc::EVFILT_LIO {
+ event::kind_mut(&mut self.events[idx]).insert(UnixReady::lio());
+ }
+ }
+ }
+
+ ret
+ }
+
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(event);
+ }
+
+ pub fn clear(&mut self) {
+ self.sys_events.0.truncate(0);
+ self.events.truncate(0);
+ self.event_map.clear();
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Events")
+ .field("len", &self.sys_events.0.len())
+ .finish()
+ }
+}
+
+#[test]
+fn does_not_register_rw() {
+ use {Poll, Ready, PollOpt, Token};
+ use unix::EventedFd;
+
+ let kq = unsafe { libc::kqueue() };
+ let kqf = EventedFd(&kq);
+ let poll = Poll::new().unwrap();
+
+ // registering kqueue fd will fail if write is requested (On anything but some versions of OS
+ // X)
+ poll.register(&kqf, Token(1234), Ready::readable(),
+ PollOpt::edge() | PollOpt::oneshot()).unwrap();
+}
+
+#[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+#[test]
+fn test_coalesce_aio() {
+ let mut events = Events::with_capacity(1);
+ events.sys_events.0.push(kevent!(0x1234, libc::EVFILT_AIO, 0, 42));
+ events.coalesce(Token(0));
+ assert!(events.events[0].readiness() == UnixReady::aio().into());
+ assert!(events.events[0].token() == Token(42));
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/mod.rs b/third_party/rust/mio-0.6.23/src/sys/unix/mod.rs
new file mode 100644
index 0000000000..c5726c07ce
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/mod.rs
@@ -0,0 +1,105 @@
+use libc::{self, c_int};
+
+#[macro_use]
+pub mod dlsym;
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+))]
+mod epoll;
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+))]
+pub use self::epoll::{Events, Selector};
+
+#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos",
+ target_os = "netbsd", target_os = "openbsd"))]
+mod kqueue;
+
+#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos",
+ target_os = "netbsd", target_os = "openbsd"))]
+pub use self::kqueue::{Events, Selector};
+
+mod awakener;
+mod eventedfd;
+mod io;
+mod ready;
+mod tcp;
+mod udp;
+mod uio;
+
+#[cfg(feature = "with-deprecated")]
+mod uds;
+
+pub use self::awakener::Awakener;
+pub use self::eventedfd::EventedFd;
+pub use self::io::{Io, set_nonblock};
+pub use self::ready::{UnixReady, READY_ALL};
+pub use self::tcp::{TcpStream, TcpListener};
+pub use self::udp::UdpSocket;
+
+#[cfg(feature = "with-deprecated")]
+pub use self::uds::UnixSocket;
+
+pub use iovec::IoVec;
+
+use std::os::unix::io::FromRawFd;
+
+pub fn pipe() -> ::io::Result<(Io, Io)> {
+ // Use pipe2 for atomically setting O_CLOEXEC if we can, but otherwise
+ // just fall back to using `pipe`.
+ dlsym!(fn pipe2(*mut c_int, c_int) -> c_int);
+
+ let mut pipes = [0; 2];
+ unsafe {
+ match pipe2.get() {
+ Some(pipe2_fn) => {
+ let flags = libc::O_NONBLOCK | libc::O_CLOEXEC;
+ cvt(pipe2_fn(pipes.as_mut_ptr(), flags))?;
+ Ok((Io::from_raw_fd(pipes[0]), Io::from_raw_fd(pipes[1])))
+ }
+ None => {
+ cvt(libc::pipe(pipes.as_mut_ptr()))?;
+ // Ensure the pipe are closed if any of the system calls below
+ // fail.
+ let r = Io::from_raw_fd(pipes[0]);
+ let w = Io::from_raw_fd(pipes[1]);
+ cvt(libc::fcntl(pipes[0], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ cvt(libc::fcntl(pipes[1], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ cvt(libc::fcntl(pipes[0], libc::F_SETFL, libc::O_NONBLOCK))?;
+ cvt(libc::fcntl(pipes[1], libc::F_SETFL, libc::O_NONBLOCK))?;
+ Ok((r, w))
+ }
+ }
+ }
+}
+
+trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+impl IsMinusOne for i32 {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+impl IsMinusOne for isize {
+ fn is_minus_one(&self) -> bool { *self == -1 }
+}
+
+fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
+ use std::io;
+
+ if t.is_minus_one() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(t)
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/ready.rs b/third_party/rust/mio-0.6.23/src/sys/unix/ready.rs
new file mode 100644
index 0000000000..88f56252dd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/ready.rs
@@ -0,0 +1,525 @@
+use event_imp::{Ready, ready_as_usize, ready_from_usize};
+
+use std::ops;
+use std::fmt;
+
+/// Unix specific extensions to `Ready`
+///
+/// Provides additional readiness event kinds that are available on unix
+/// platforms. Unix platforms are able to provide readiness events for
+/// additional socket events, such as HUP and error.
+///
+/// HUP events occur when the remote end of a socket hangs up. In the TCP case,
+/// this occurs when the remote end of a TCP socket shuts down writes.
+///
+/// Error events occur when the socket enters an error state. In this case, the
+/// socket will also receive a readable or writable event. Reading or writing to
+/// the socket will result in an error.
+///
+/// Conversion traits are implemented between `Ready` and `UnixReady`. See the
+/// examples.
+///
+/// For high level documentation on polling and readiness, see [`Poll`].
+///
+/// # Examples
+///
+/// Most of the time, all that is needed is using bit operations
+///
+/// ```
+/// use mio::Ready;
+/// use mio::unix::UnixReady;
+///
+/// let ready = Ready::readable() | UnixReady::hup();
+///
+/// assert!(ready.is_readable());
+/// assert!(UnixReady::from(ready).is_hup());
+/// ```
+///
+/// Basic conversion between ready types.
+///
+/// ```
+/// use mio::Ready;
+/// use mio::unix::UnixReady;
+///
+/// // Start with a portable ready
+/// let ready = Ready::readable();
+///
+/// // Convert to a unix ready, adding HUP
+/// let mut unix_ready = UnixReady::from(ready) | UnixReady::hup();
+///
+/// unix_ready.insert(UnixReady::error());
+///
+/// // `unix_ready` maintains readable interest
+/// assert!(unix_ready.is_readable());
+/// assert!(unix_ready.is_hup());
+/// assert!(unix_ready.is_error());
+///
+/// // Convert back to `Ready`
+/// let ready = Ready::from(unix_ready);
+///
+/// // Readable is maintained
+/// assert!(ready.is_readable());
+/// ```
+///
+/// Registering readable and error interest on a socket
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpStream;
+/// use mio::unix::UnixReady;
+///
+/// let addr = "216.58.193.68:80".parse()?;
+/// let socket = TcpStream::connect(&addr)?;
+///
+/// let poll = Poll::new()?;
+///
+/// poll.register(&socket,
+/// Token(0),
+/// Ready::readable() | UnixReady::error(),
+/// PollOpt::edge())?;
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Poll`]: ../struct.Poll.html
+/// [readiness]: struct.Poll.html#readiness-operations
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct UnixReady(Ready);
+
+const ERROR: usize = 0b00_0100;
+const HUP: usize = 0b00_1000;
+
+#[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+const AIO: usize = 0b01_0000;
+
+#[cfg(not(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+const AIO: usize = 0b00_0000;
+
+#[cfg(any(target_os = "freebsd"))]
+const LIO: usize = 0b10_0000;
+
+#[cfg(not(any(target_os = "freebsd")))]
+const LIO: usize = 0b00_0000;
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+))]
+const PRI: usize = 0b100_0000;
+
+#[cfg(not(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+)))]
+const PRI: usize = 0;
+
+// Export to support `Ready::all`
+pub const READY_ALL: usize = ERROR | HUP | AIO | LIO | PRI;
+
+#[test]
+fn test_ready_all() {
+ let readable = Ready::readable().as_usize();
+ let writable = Ready::writable().as_usize();
+
+ assert_eq!(
+ READY_ALL | readable | writable,
+ ERROR + HUP + AIO + LIO + PRI + readable + writable
+ );
+
+ // Issue #896.
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ assert!(!Ready::from(UnixReady::priority()).is_writable());
+}
+
+impl UnixReady {
+ /// Returns a `Ready` representing AIO completion readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::aio();
+ ///
+ /// assert!(ready.is_aio());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ #[inline]
+ #[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn aio() -> UnixReady {
+ UnixReady(ready_from_usize(AIO))
+ }
+
+ #[cfg(not(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+ #[deprecated(since = "0.6.12", note = "this function is now platform specific")]
+ #[doc(hidden)]
+ pub fn aio() -> UnixReady {
+ UnixReady(Ready::empty())
+ }
+
+ /// Returns a `Ready` representing error readiness.
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `error` readiness
+ /// should be treated as a hint. For more details, see [readiness] in the
+ /// poll documentation.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::error();
+ ///
+ /// assert!(ready.is_error());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ #[inline]
+ pub fn error() -> UnixReady {
+ UnixReady(ready_from_usize(ERROR))
+ }
+
+ /// Returns a `Ready` representing HUP readiness.
+ ///
+ /// A HUP (or hang-up) signifies that a stream socket **peer** closed the
+ /// connection, or shut down the writing half of the connection.
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `hup` readiness
+ /// should be treated as a hint. For more details, see [readiness] in the
+ /// poll documentation. It is also unclear if HUP readiness will remain in 0.7. See
+ /// [here][issue-941].
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::hup();
+ ///
+ /// assert!(ready.is_hup());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ /// [issue-941]: https://github.com/tokio-rs/mio/issues/941
+ #[inline]
+ pub fn hup() -> UnixReady {
+ UnixReady(ready_from_usize(HUP))
+ }
+
+ /// Returns a `Ready` representing LIO completion readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::lio();
+ ///
+ /// assert!(ready.is_lio());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ #[cfg(any(target_os = "freebsd"))]
+ pub fn lio() -> UnixReady {
+ UnixReady(ready_from_usize(LIO))
+ }
+
+ /// Returns a `Ready` representing priority (`EPOLLPRI`) readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::priority();
+ ///
+ /// assert!(ready.is_priority());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ pub fn priority() -> UnixReady {
+ UnixReady(ready_from_usize(PRI))
+ }
+
+ /// Returns true if `Ready` contains AIO readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::aio();
+ ///
+ /// assert!(ready.is_aio());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ #[inline]
+ #[cfg(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn is_aio(&self) -> bool {
+ self.contains(ready_from_usize(AIO))
+ }
+
+ #[deprecated(since = "0.6.12", note = "this function is now platform specific")]
+ #[cfg(feature = "with-deprecated")]
+ #[cfg(not(any(target_os = "dragonfly",
+ target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+ #[doc(hidden)]
+ pub fn is_aio(&self) -> bool {
+ false
+ }
+
+ /// Returns true if the value includes error readiness
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `error` readiness should
+ /// be treated as a hint. For more details, see [readiness] in the poll
+ /// documentation.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::error();
+ ///
+ /// assert!(ready.is_error());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ #[inline]
+ pub fn is_error(&self) -> bool {
+ self.contains(ready_from_usize(ERROR))
+ }
+
+ /// Returns true if the value includes HUP readiness
+ ///
+ /// A HUP (or hang-up) signifies that a stream socket **peer** closed the
+ /// connection, or shut down the writing half of the connection.
+ ///
+ /// **Note that only readable and writable readiness is guaranteed to be
+ /// supported on all platforms**. This means that `hup` readiness
+ /// should be treated as a hint. For more details, see [readiness] in the
+ /// poll documentation.
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::hup();
+ ///
+ /// assert!(ready.is_hup());
+ /// ```
+ ///
+ /// [`Poll`]: ../struct.Poll.html
+ /// [readiness]: ../struct.Poll.html#readiness-operations
+ #[inline]
+ pub fn is_hup(&self) -> bool {
+ self.contains(ready_from_usize(HUP))
+ }
+
+ /// Returns true if `Ready` contains LIO readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::lio();
+ ///
+ /// assert!(ready.is_lio());
+ /// ```
+ #[inline]
+ #[cfg(any(target_os = "freebsd"))]
+ pub fn is_lio(&self) -> bool {
+ self.contains(ready_from_usize(LIO))
+ }
+
+ /// Returns true if `Ready` contains priority (`EPOLLPRI`) readiness
+ ///
+ /// See [`Poll`] for more documentation on polling.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::unix::UnixReady;
+ ///
+ /// let ready = UnixReady::priority();
+ ///
+ /// assert!(ready.is_priority());
+ /// ```
+ ///
+ /// [`Poll`]: struct.Poll.html
+ #[inline]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ pub fn is_priority(&self) -> bool {
+ self.contains(ready_from_usize(PRI))
+ }
+}
+
+impl From<Ready> for UnixReady {
+ fn from(src: Ready) -> UnixReady {
+ UnixReady(src)
+ }
+}
+
+impl From<UnixReady> for Ready {
+ fn from(src: UnixReady) -> Ready {
+ src.0
+ }
+}
+
+impl ops::Deref for UnixReady {
+ type Target = Ready;
+
+ fn deref(&self) -> &Ready {
+ &self.0
+ }
+}
+
+impl ops::DerefMut for UnixReady {
+ fn deref_mut(&mut self) -> &mut Ready {
+ &mut self.0
+ }
+}
+
+impl ops::BitOr for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn bitor(self, other: UnixReady) -> UnixReady {
+ (self.0 | other.0).into()
+ }
+}
+
+impl ops::BitXor for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn bitxor(self, other: UnixReady) -> UnixReady {
+ (self.0 ^ other.0).into()
+ }
+}
+
+impl ops::BitAnd for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn bitand(self, other: UnixReady) -> UnixReady {
+ (self.0 & other.0).into()
+ }
+}
+
+impl ops::Sub for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn sub(self, other: UnixReady) -> UnixReady {
+ ready_from_usize(ready_as_usize(self.0) & !ready_as_usize(other.0)).into()
+ }
+}
+
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+impl ops::Not for UnixReady {
+ type Output = UnixReady;
+
+ #[inline]
+ fn not(self) -> UnixReady {
+ (!self.0).into()
+ }
+}
+
+impl fmt::Debug for UnixReady {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut one = false;
+ let flags = [
+ (UnixReady(Ready::readable()), "Readable"),
+ (UnixReady(Ready::writable()), "Writable"),
+ (UnixReady::error(), "Error"),
+ (UnixReady::hup(), "Hup"),
+ #[allow(deprecated)]
+ (UnixReady::aio(), "Aio"),
+ #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "solaris"
+ ))]
+ (UnixReady::priority(), "Priority"),
+ ];
+
+ for &(flag, msg) in &flags {
+ if self.contains(flag) {
+ if one { write!(fmt, " | ")? }
+ write!(fmt, "{}", msg)?;
+
+ one = true
+ }
+ }
+
+ if !one {
+ fmt.write_str("(empty)")?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs b/third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs
new file mode 100644
index 0000000000..7962fcecb3
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/tcp.rs
@@ -0,0 +1,286 @@
+use std::fmt;
+use std::io::{Read, Write};
+use std::net::{self, SocketAddr};
+use std::os::unix::io::{RawFd, FromRawFd, IntoRawFd, AsRawFd};
+use std::time::Duration;
+
+use libc;
+use net2::TcpStreamExt;
+use iovec::IoVec;
+
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+
+use sys::unix::eventedfd::EventedFd;
+use sys::unix::io::set_nonblock;
+use sys::unix::uio::VecIo;
+
+pub struct TcpStream {
+ inner: net::TcpStream,
+}
+
+pub struct TcpListener {
+ inner: net::TcpListener,
+}
+
+impl TcpStream {
+ pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
+ set_nonblock(stream.as_raw_fd())?;
+
+ match stream.connect(addr) {
+ Ok(..) => {}
+ Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
+ Err(e) => return Err(e),
+ }
+
+ Ok(TcpStream {
+ inner: stream,
+ })
+ }
+
+ pub fn from_stream(stream: net::TcpStream) -> TcpStream {
+ TcpStream {
+ inner: stream,
+ }
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.peer_addr()
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.inner.try_clone().map(|s| {
+ TcpStream {
+ inner: s,
+ }
+ })
+ }
+
+ pub fn shutdown(&self, how: net::Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.inner.set_nodelay(nodelay)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.inner.nodelay()
+ }
+
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.inner.set_recv_buffer_size(size)
+ }
+
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.inner.recv_buffer_size()
+ }
+
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.inner.set_send_buffer_size(size)
+ }
+
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.inner.send_buffer_size()
+ }
+
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.inner.set_keepalive(keepalive)
+ }
+
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.inner.keepalive()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.inner.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.inner.only_v6()
+ }
+
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ TcpStreamExt::set_linger(&self.inner, dur)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ TcpStreamExt::linger(&self.inner)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.inner.readv(bufs)
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.inner.writev(bufs)
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&self.inner).read(buf)
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&self.inner).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&self.inner).flush()
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, f)
+ }
+}
+
+impl FromRawFd for TcpStream {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
+ TcpStream {
+ inner: net::TcpStream::from_raw_fd(fd),
+ }
+ }
+}
+
+impl IntoRawFd for TcpStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_raw_fd()
+ }
+}
+
+impl AsRawFd for TcpStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl TcpListener {
+ pub fn new(inner: net::TcpListener) -> io::Result<TcpListener> {
+ set_nonblock(inner.as_raw_fd())?;
+ Ok(TcpListener {
+ inner,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.inner.try_clone().map(|s| {
+ TcpListener {
+ inner: s,
+ }
+ })
+ }
+
+ pub fn accept(&self) -> io::Result<(net::TcpStream, SocketAddr)> {
+ self.inner.accept()
+ }
+
+ #[allow(deprecated)]
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.inner.set_only_v6(only_v6)
+ }
+
+ #[allow(deprecated)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.inner.only_v6()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl Evented for TcpListener {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, f)
+ }
+}
+
+impl FromRawFd for TcpListener {
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
+ TcpListener {
+ inner: net::TcpListener::from_raw_fd(fd),
+ }
+ }
+}
+
+impl IntoRawFd for TcpListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_raw_fd()
+ }
+}
+
+impl AsRawFd for TcpListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/udp.rs b/third_party/rust/mio-0.6.23/src/sys/unix/udp.rs
new file mode 100644
index 0000000000..c77a9d6380
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/udp.rs
@@ -0,0 +1,181 @@
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use unix::EventedFd;
+use sys::unix::uio::VecIo;
+use std::fmt;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::os::unix::io::{RawFd, IntoRawFd, AsRawFd, FromRawFd};
+
+#[allow(unused_imports)] // only here for Rust 1.8
+use net2::UdpSocketExt;
+use iovec::IoVec;
+
+pub struct UdpSocket {
+ io: net::UdpSocket,
+}
+
+impl UdpSocket {
+ pub fn new(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ socket.set_nonblocking(true)?;
+ Ok(UdpSocket {
+ io: socket,
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.io.try_clone().map(|io| {
+ UdpSocket {
+ io,
+ }
+ })
+ }
+
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+ self.io.send_to(buf, target)
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.io.recv_from(buf)
+ }
+
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.io.send(buf)
+ }
+
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.recv(buf)
+ }
+
+ pub fn connect(&self, addr: SocketAddr)
+ -> io::Result<()> {
+ self.io.connect(addr)
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.io.broadcast()
+ }
+
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.io.set_broadcast(on)
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v4()
+ }
+
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v4(on)
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.io.multicast_ttl_v4()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_multicast_ttl_v4(ttl)
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.io.multicast_loop_v6()
+ }
+
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.io.set_multicast_loop_v6(on)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.ttl()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.set_ttl(ttl)
+ }
+
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.join_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.join_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.io.leave_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.io.leave_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.io.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.io.only_v6()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.take_error()
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ self.io.readv(bufs)
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ self.io.writev(bufs)
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.io, f)
+ }
+}
+
+impl FromRawFd for UdpSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket {
+ io: net::UdpSocket::from_raw_fd(fd),
+ }
+ }
+}
+
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/uds.rs b/third_party/rust/mio-0.6.23/src/sys/unix/uds.rs
new file mode 100644
index 0000000000..f6706784f8
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/uds.rs
@@ -0,0 +1,265 @@
+use std::io::{Read, Write};
+use std::mem;
+use std::net::Shutdown;
+use std::os::unix::prelude::*;
+use std::path::Path;
+use std::ptr;
+
+use libc;
+
+use {io, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::unix::{cvt, Io};
+use sys::unix::io::{set_nonblock, set_cloexec};
+
+trait MyInto<T> {
+ fn my_into(self) -> T;
+}
+
+impl MyInto<u32> for usize {
+ fn my_into(self) -> u32 { self as u32 }
+}
+
+impl MyInto<usize> for usize {
+ fn my_into(self) -> usize { self }
+}
+
+unsafe fn sockaddr_un(path: &Path)
+ -> io::Result<(libc::sockaddr_un, libc::socklen_t)> {
+ let mut addr: libc::sockaddr_un = mem::zeroed();
+ addr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+ let bytes = path.as_os_str().as_bytes();
+
+ if bytes.len() >= addr.sun_path.len() {
+ return Err(io::Error::new(io::ErrorKind::InvalidInput,
+ "path must be shorter than SUN_LEN"))
+ }
+ for (dst, src) in addr.sun_path.iter_mut().zip(bytes.iter()) {
+ *dst = *src as libc::c_char;
+ }
+ // null byte for pathname addresses is already there because we zeroed the
+ // struct
+
+ let mut len = sun_path_offset() + bytes.len();
+ match bytes.get(0) {
+ Some(&0) | None => {}
+ Some(_) => len += 1,
+ }
+ Ok((addr, len as libc::socklen_t))
+}
+
+fn sun_path_offset() -> usize {
+ // Silence rustc 1.65 warning about mem::uninitialized.
+ #[allow(invalid_value)]
+ unsafe {
+ // Work with an actual instance of the type since using a null pointer is UB
+ let addr: libc::sockaddr_un = mem::uninitialized();
+ let base = &addr as *const _ as usize;
+ let path = &addr.sun_path as *const _ as usize;
+ path - base
+ }
+}
+
+#[derive(Debug)]
+pub struct UnixSocket {
+ io: Io,
+}
+
+impl UnixSocket {
+ /// Returns a new, unbound, non-blocking Unix domain socket
+ pub fn stream() -> io::Result<UnixSocket> {
+ #[cfg(target_os = "linux")]
+ use libc::{SOCK_CLOEXEC, SOCK_NONBLOCK};
+ #[cfg(not(target_os = "linux"))]
+ const SOCK_CLOEXEC: libc::c_int = 0;
+ #[cfg(not(target_os = "linux"))]
+ const SOCK_NONBLOCK: libc::c_int = 0;
+
+ unsafe {
+ if cfg!(target_os = "linux") {
+ let flags = libc::SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK;
+ match cvt(libc::socket(libc::AF_UNIX, flags, 0)) {
+ Ok(fd) => return Ok(UnixSocket::from_raw_fd(fd)),
+ Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {}
+ Err(e) => return Err(e),
+ }
+ }
+
+ let fd = cvt(libc::socket(libc::AF_UNIX, libc::SOCK_STREAM, 0))?;
+ let fd = UnixSocket::from_raw_fd(fd);
+ set_cloexec(fd.as_raw_fd())?;
+ set_nonblock(fd.as_raw_fd())?;
+ Ok(fd)
+ }
+ }
+
+ /// Connect the socket to the specified address
+ pub fn connect<P: AsRef<Path> + ?Sized>(&self, addr: &P) -> io::Result<()> {
+ unsafe {
+ let (addr, len) = sockaddr_un(addr.as_ref())?;
+ cvt(libc::connect(self.as_raw_fd(),
+ &addr as *const _ as *const _,
+ len))?;
+ Ok(())
+ }
+ }
+
+ /// Listen for incoming requests
+ pub fn listen(&self, backlog: usize) -> io::Result<()> {
+ unsafe {
+ cvt(libc::listen(self.as_raw_fd(), backlog as i32))?;
+ Ok(())
+ }
+ }
+
+ pub fn accept(&self) -> io::Result<UnixSocket> {
+ unsafe {
+ let fd = cvt(libc::accept(self.as_raw_fd(),
+ ptr::null_mut(),
+ ptr::null_mut()))?;
+ let fd = Io::from_raw_fd(fd);
+ set_cloexec(fd.as_raw_fd())?;
+ set_nonblock(fd.as_raw_fd())?;
+ Ok(UnixSocket { io: fd })
+ }
+ }
+
+ /// Bind the socket to the specified address
+ pub fn bind<P: AsRef<Path> + ?Sized>(&self, addr: &P) -> io::Result<()> {
+ unsafe {
+ let (addr, len) = sockaddr_un(addr.as_ref())?;
+ cvt(libc::bind(self.as_raw_fd(),
+ &addr as *const _ as *const _,
+ len))?;
+ Ok(())
+ }
+ }
+
+ pub fn try_clone(&self) -> io::Result<UnixSocket> {
+ Ok(UnixSocket { io: self.io.try_clone()? })
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let how = match how {
+ Shutdown::Read => libc::SHUT_RD,
+ Shutdown::Write => libc::SHUT_WR,
+ Shutdown::Both => libc::SHUT_RDWR,
+ };
+ unsafe {
+ cvt(libc::shutdown(self.as_raw_fd(), how))?;
+ Ok(())
+ }
+ }
+
+ pub fn read_recv_fd(&mut self, buf: &mut [u8]) -> io::Result<(usize, Option<RawFd>)> {
+ unsafe {
+ let mut iov = libc::iovec {
+ iov_base: buf.as_mut_ptr() as *mut _,
+ iov_len: buf.len(),
+ };
+ struct Cmsg {
+ hdr: libc::cmsghdr,
+ data: [libc::c_int; 1],
+ }
+ let mut cmsg: Cmsg = mem::zeroed();
+ let mut msg: libc::msghdr = mem::zeroed();
+ msg.msg_iov = &mut iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &mut cmsg as *mut _ as *mut _;
+ msg.msg_controllen = mem::size_of_val(&cmsg).my_into();
+ let bytes = cvt(libc::recvmsg(self.as_raw_fd(), &mut msg, 0))?;
+
+ const SCM_RIGHTS: libc::c_int = 1;
+
+ let fd = if cmsg.hdr.cmsg_level == libc::SOL_SOCKET &&
+ cmsg.hdr.cmsg_type == SCM_RIGHTS {
+ Some(cmsg.data[0])
+ } else {
+ None
+ };
+ Ok((bytes as usize, fd))
+ }
+ }
+
+ pub fn write_send_fd(&mut self, buf: &[u8], fd: RawFd) -> io::Result<usize> {
+ unsafe {
+ let mut iov = libc::iovec {
+ iov_base: buf.as_ptr() as *mut _,
+ iov_len: buf.len(),
+ };
+ struct Cmsg {
+ #[allow(dead_code)]
+ hdr: libc::cmsghdr,
+ data: [libc::c_int; 1],
+ }
+ let mut cmsg: Cmsg = mem::zeroed();
+ cmsg.hdr.cmsg_len = mem::size_of_val(&cmsg).my_into();
+ cmsg.hdr.cmsg_level = libc::SOL_SOCKET;
+ cmsg.hdr.cmsg_type = 1; // SCM_RIGHTS
+ cmsg.data[0] = fd;
+ let mut msg: libc::msghdr = mem::zeroed();
+ msg.msg_iov = &mut iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &mut cmsg as *mut _ as *mut _;
+ msg.msg_controllen = mem::size_of_val(&cmsg).my_into();
+ let bytes = cvt(libc::sendmsg(self.as_raw_fd(), &msg, 0))?;
+ Ok(bytes as usize)
+ }
+ }
+}
+
+impl Read for UnixSocket {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.io.read(buf)
+ }
+}
+
+impl Write for UnixSocket {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.io.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.io.flush()
+ }
+}
+
+impl Evented for UnixSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.io.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.io.deregister(poll)
+ }
+}
+
+
+impl From<Io> for UnixSocket {
+ fn from(io: Io) -> UnixSocket {
+ UnixSocket { io }
+ }
+}
+
+impl FromRawFd for UnixSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixSocket {
+ UnixSocket { io: Io::from_raw_fd(fd) }
+ }
+}
+
+impl IntoRawFd for UnixSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.io.into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.as_raw_fd()
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/unix/uio.rs b/third_party/rust/mio-0.6.23/src/sys/unix/uio.rs
new file mode 100644
index 0000000000..e38cd4983b
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/unix/uio.rs
@@ -0,0 +1,44 @@
+use std::cmp;
+use std::io;
+use std::os::unix::io::AsRawFd;
+use libc;
+use iovec::IoVec;
+use iovec::unix as iovec;
+
+pub trait VecIo {
+ fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize>;
+
+ fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize>;
+}
+
+impl<T: AsRawFd> VecIo for T {
+ fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice_mut(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::readv(self.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+
+ fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ unsafe {
+ let slice = iovec::as_os_slice(bufs);
+ let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
+ let rc = libc::writev(self.as_raw_fd(),
+ slice.as_ptr(),
+ len as libc::c_int);
+ if rc < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(rc as usize)
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs b/third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs
new file mode 100644
index 0000000000..c913bc93f8
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/awakener.rs
@@ -0,0 +1,66 @@
+use std::sync::Mutex;
+
+use miow::iocp::CompletionStatus;
+use {io, poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::windows::Selector;
+
+pub struct Awakener {
+ inner: Mutex<Option<AwakenerInner>>,
+}
+
+struct AwakenerInner {
+ token: Token,
+ selector: Selector,
+}
+
+impl Awakener {
+ pub fn new() -> io::Result<Awakener> {
+ Ok(Awakener {
+ inner: Mutex::new(None),
+ })
+ }
+
+ pub fn wakeup(&self) -> io::Result<()> {
+ // Each wakeup notification has NULL as its `OVERLAPPED` pointer to
+ // indicate that it's from this awakener and not part of an I/O
+ // operation. This is specially recognized by the selector.
+ //
+ // If we haven't been registered with an event loop yet just silently
+ // succeed.
+ if let Some(inner) = self.inner.lock().unwrap().as_ref() {
+ let status = CompletionStatus::new(0,
+ usize::from(inner.token),
+ 0 as *mut _);
+ inner.selector.port().post(status)?;
+ }
+ Ok(())
+ }
+
+ pub fn cleanup(&self) {
+ // noop
+ }
+}
+
+impl Evented for Awakener {
+ fn register(&self, poll: &Poll, token: Token, events: Ready,
+ opts: PollOpt) -> io::Result<()> {
+ assert_eq!(opts, PollOpt::edge());
+ assert_eq!(events, Ready::readable());
+ *self.inner.lock().unwrap() = Some(AwakenerInner {
+ selector: poll::selector(poll).clone_ref(),
+ token: token,
+ });
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, events: Ready,
+ opts: PollOpt) -> io::Result<()> {
+ self.register(poll, token, events, opts)
+ }
+
+ fn deregister(&self, _poll: &Poll) -> io::Result<()> {
+ *self.inner.lock().unwrap() = None;
+ Ok(())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs b/third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs
new file mode 100644
index 0000000000..86754593fd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/buffer_pool.rs
@@ -0,0 +1,20 @@
+pub struct BufferPool {
+ pool: Vec<Vec<u8>>,
+}
+
+impl BufferPool {
+ pub fn new(cap: usize) -> BufferPool {
+ BufferPool { pool: Vec::with_capacity(cap) }
+ }
+
+ pub fn get(&mut self, default_cap: usize) -> Vec<u8> {
+ self.pool.pop().unwrap_or_else(|| Vec::with_capacity(default_cap))
+ }
+
+ pub fn put(&mut self, mut buf: Vec<u8>) {
+ if self.pool.len() < self.pool.capacity(){
+ unsafe { buf.set_len(0); }
+ self.pool.push(buf);
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs b/third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs
new file mode 100644
index 0000000000..b6d38b2408
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/from_raw_arc.rs
@@ -0,0 +1,116 @@
+//! A "Manual Arc" which allows manually frobbing the reference count
+//!
+//! This module contains a copy of the `Arc` found in the standard library,
+//! stripped down to the bare bones of what we actually need. The reason this is
+//! done is for the ability to concretely know the memory layout of the `Inner`
+//! structure of the arc pointer itself (e.g. `ArcInner` in the standard
+//! library).
+//!
+//! We do some unsafe casting from `*mut OVERLAPPED` to a `FromRawArc<T>` to
+//! ensure that data lives for the length of an I/O operation, but this means
+//! that we have to know the layouts of the structures involved. This
+//! representation primarily guarantees that the data, `T` is at the front of
+//! the inner pointer always.
+//!
+//! Note that we're missing out on some various optimizations implemented in the
+//! standard library:
+//!
+//! * The size of `FromRawArc` is actually two words because of the drop flag
+//! * The compiler doesn't understand that the pointer in `FromRawArc` is never
+//! null, so Option<FromRawArc<T>> is not a nullable pointer.
+
+use std::ops::Deref;
+use std::mem;
+use std::sync::atomic::{self, AtomicUsize, Ordering};
+
+pub struct FromRawArc<T> {
+ _inner: *mut Inner<T>,
+}
+
+unsafe impl<T: Sync + Send> Send for FromRawArc<T> { }
+unsafe impl<T: Sync + Send> Sync for FromRawArc<T> { }
+
+#[repr(C)]
+struct Inner<T> {
+ data: T,
+ cnt: AtomicUsize,
+}
+
+impl<T> FromRawArc<T> {
+ pub fn new(data: T) -> FromRawArc<T> {
+ let x = Box::new(Inner {
+ data: data,
+ cnt: AtomicUsize::new(1),
+ });
+ FromRawArc { _inner: unsafe { mem::transmute(x) } }
+ }
+
+ pub unsafe fn from_raw(ptr: *mut T) -> FromRawArc<T> {
+ // Note that if we could use `mem::transmute` here to get a libstd Arc
+ // (guaranteed) then we could just use std::sync::Arc, but this is the
+ // crucial reason this currently exists.
+ FromRawArc { _inner: ptr as *mut Inner<T> }
+ }
+}
+
+impl<T> Clone for FromRawArc<T> {
+ fn clone(&self) -> FromRawArc<T> {
+ // Atomic ordering of Relaxed lifted from libstd, but the general idea
+ // is that you need synchronization to communicate this increment to
+ // another thread, so this itself doesn't need to be synchronized.
+ unsafe {
+ (*self._inner).cnt.fetch_add(1, Ordering::Relaxed);
+ }
+ FromRawArc { _inner: self._inner }
+ }
+}
+
+impl<T> Deref for FromRawArc<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &(*self._inner).data }
+ }
+}
+
+impl<T> Drop for FromRawArc<T> {
+ fn drop(&mut self) {
+ unsafe {
+ // Atomic orderings lifted from the standard library
+ if (*self._inner).cnt.fetch_sub(1, Ordering::Release) != 1 {
+ return
+ }
+ atomic::fence(Ordering::Acquire);
+ drop(mem::transmute::<_, Box<T>>(self._inner));
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::FromRawArc;
+
+ #[test]
+ fn smoke() {
+ let a = FromRawArc::new(1);
+ assert_eq!(*a, 1);
+ assert_eq!(*a.clone(), 1);
+ }
+
+ #[test]
+ fn drops() {
+ struct A<'a>(&'a mut bool);
+ impl<'a> Drop for A<'a> {
+ fn drop(&mut self) {
+ *self.0 = true;
+ }
+ }
+ let mut a = false;
+ {
+ let a = FromRawArc::new(A(&mut a));
+ let _ = a.clone();
+ assert!(!*a.0);
+ }
+ assert!(a);
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/mod.rs b/third_party/rust/mio-0.6.23/src/sys/windows/mod.rs
new file mode 100644
index 0000000000..9b9f054495
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/mod.rs
@@ -0,0 +1,193 @@
+//! Implementation of mio for Windows using IOCP
+//!
+//! This module uses I/O Completion Ports (IOCP) on Windows to implement mio's
+//! Unix epoll-like interface. Unfortunately these two I/O models are
+//! fundamentally incompatible:
+//!
+//! * IOCP is a completion-based model where work is submitted to the kernel and
+//! a program is notified later when the work finished.
+//! * epoll is a readiness-based model where the kernel is queried as to what
+//! work can be done, and afterwards the work is done.
+//!
+//! As a result, this implementation for Windows is much less "low level" than
+//! the Unix implementation of mio. This design decision was intentional,
+//! however.
+//!
+//! ## What is IOCP?
+//!
+//! The [official docs][docs] have a comprehensive explanation of what IOCP is,
+//! but at a high level it requires the following operations to be executed to
+//! perform some I/O:
+//!
+//! 1. A completion port is created
+//! 2. An I/O handle and a token is registered with this completion port
+//! 3. Some I/O is issued on the handle. This generally means that an API was
+//! invoked with a zeroed `OVERLAPPED` structure. The API will immediately
+//! return.
+//! 4. After some time, the application queries the I/O port for completed
+//! events. The port will returned a pointer to the `OVERLAPPED` along with
+//! the token presented at registration time.
+//!
+//! Many I/O operations can be fired off before waiting on a port, and the port
+//! will block execution of the calling thread until an I/O event has completed
+//! (or a timeout has elapsed).
+//!
+//! Currently all of these low-level operations are housed in a separate `miow`
+//! crate to provide a 0-cost abstraction over IOCP. This crate uses that to
+//! implement all fiddly bits so there's very few actual Windows API calls or
+//! `unsafe` blocks as a result.
+//!
+//! [docs]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198%28v=vs.85%29.aspx
+//!
+//! ## Safety of IOCP
+//!
+//! Unfortunately for us, IOCP is pretty unsafe in terms of Rust lifetimes and
+//! such. When an I/O operation is submitted to the kernel, it involves handing
+//! the kernel a few pointers like a buffer to read/write, an `OVERLAPPED`
+//! structure pointer, and perhaps some other buffers such as for socket
+//! addresses. These pointers all have to remain valid **for the entire I/O
+//! operation's duration**.
+//!
+//! There's no way to define a safe lifetime for these pointers/buffers over
+//! the span of an I/O operation, so we're forced to add a layer of abstraction
+//! (not 0-cost) to make these APIs safe. Currently this implementation
+//! basically just boxes everything up on the heap to give it a stable address
+//! and then keys off that most of the time.
+//!
+//! ## From completion to readiness
+//!
+//! Translating a completion-based model to a readiness-based model is also no
+//! easy task, and a significant portion of this implementation is managing this
+//! translation. The basic idea behind this implementation is to issue I/O
+//! operations preemptively and then translate their completions to a "I'm
+//! ready" event.
+//!
+//! For example, in the case of reading a `TcpSocket`, as soon as a socket is
+//! connected (or registered after an accept) a read operation is executed.
+//! While the read is in progress calls to `read` will return `WouldBlock`, and
+//! once the read is completed we translate the completion notification into a
+//! `readable` event. Once the internal buffer is drained (e.g. all data from it
+//! has been read) a read operation is re-issued.
+//!
+//! Write operations are a little different from reads, and the current
+//! implementation is to just schedule a write as soon as `write` is first
+//! called. While that write operation is in progress all future calls to
+//! `write` will return `WouldBlock`. Completion of the write then translates to
+//! a `writable` event. Note that this will probably want to add some layer of
+//! internal buffering in the future.
+//!
+//! ## Buffer Management
+//!
+//! As there's lots of I/O operations in flight at any one point in time,
+//! there's lots of live buffers that need to be juggled around (e.g. this
+//! implementation's own internal buffers).
+//!
+//! Currently all buffers are created for the I/O operation at hand and are then
+//! discarded when it completes (this is listed as future work below).
+//!
+//! ## Callback Management
+//!
+//! When the main event loop receives a notification that an I/O operation has
+//! completed, some work needs to be done to translate that to a set of events
+//! or perhaps some more I/O needs to be scheduled. For example after a
+//! `TcpStream` is connected it generates a writable event and also schedules a
+//! read.
+//!
+//! To manage all this the `Selector` uses the `OVERLAPPED` pointer from the
+//! completion status. The selector assumes that all `OVERLAPPED` pointers are
+//! actually pointers to the interior of a `selector::Overlapped` which means
+//! that right after the `OVERLAPPED` itself there's a function pointer. This
+//! function pointer is given the completion status as well as another callback
+//! to push events onto the selector.
+//!
+//! The callback for each I/O operation doesn't have any environment, so it
+//! relies on memory layout and unsafe casting to translate an `OVERLAPPED`
+//! pointer (or in this case a `selector::Overlapped` pointer) to a type of
+//! `FromRawArc<T>` (see module docs for why this type exists).
+//!
+//! ## Thread Safety
+//!
+//! Currently all of the I/O primitives make liberal use of `Arc` and `Mutex`
+//! as an implementation detail. The main reason for this is to ensure that the
+//! types are `Send` and `Sync`, but the implementations have not been stressed
+//! in multithreaded situations yet. As a result, there are bound to be
+//! functional surprises in using these concurrently.
+//!
+//! ## Future Work
+//!
+//! First up, let's take a look at unimplemented portions of this module:
+//!
+//! * The `PollOpt::level()` option is currently entirely unimplemented.
+//! * Each `EventLoop` currently owns its completion port, but this prevents an
+//! I/O handle from being added to multiple event loops (something that can be
+//! done on Unix). Additionally, it hinders event loops moving across threads.
+//! This should be solved by likely having a global `Selector` which all
+//! others then communicate with.
+//! * Although Unix sockets don't exist on Windows, there are named pipes and
+//! those should likely be bound here in a similar fashion to `TcpStream`.
+//!
+//! Next up, there are a few performance improvements and optimizations that can
+//! still be implemented
+//!
+//! * Buffer management right now is pretty bad, they're all just allocated
+//! right before an I/O operation and discarded right after. There should at
+//! least be some form of buffering buffers.
+//! * No calls to `write` are internally buffered before being scheduled, which
+//! means that writing performance is abysmal compared to Unix. There should
+//! be some level of buffering of writes probably.
+
+use std::io;
+use std::os::windows::prelude::*;
+
+mod kernel32 {
+ pub use ::winapi::um::ioapiset::CancelIoEx;
+ pub use ::winapi::um::winbase::SetFileCompletionNotificationModes;
+}
+mod winapi {
+ pub use ::winapi::shared::minwindef::{TRUE, UCHAR};
+ pub use ::winapi::um::winnt::HANDLE;
+}
+
+mod awakener;
+#[macro_use]
+mod selector;
+mod tcp;
+mod udp;
+mod from_raw_arc;
+mod buffer_pool;
+
+pub use self::awakener::Awakener;
+pub use self::selector::{Events, Selector, Overlapped, Binding};
+pub use self::tcp::{TcpStream, TcpListener};
+pub use self::udp::UdpSocket;
+
+#[derive(Copy, Clone)]
+enum Family {
+ V4, V6,
+}
+
+unsafe fn cancel(socket: &AsRawSocket,
+ overlapped: &Overlapped) -> io::Result<()> {
+ let handle = socket.as_raw_socket() as winapi::HANDLE;
+ let ret = kernel32::CancelIoEx(handle, overlapped.as_mut_ptr());
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+unsafe fn no_notify_on_instant_completion(handle: winapi::HANDLE) -> io::Result<()> {
+ // TODO: move those to winapi
+ const FILE_SKIP_COMPLETION_PORT_ON_SUCCESS: winapi::UCHAR = 1;
+ const FILE_SKIP_SET_EVENT_ON_HANDLE: winapi::UCHAR = 2;
+
+ let flags = FILE_SKIP_COMPLETION_PORT_ON_SUCCESS | FILE_SKIP_SET_EVENT_ON_HANDLE;
+
+ let r = kernel32::SetFileCompletionNotificationModes(handle, flags);
+ if r == winapi::TRUE {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/selector.rs b/third_party/rust/mio-0.6.23/src/sys/windows/selector.rs
new file mode 100644
index 0000000000..23b145acdd
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/selector.rs
@@ -0,0 +1,538 @@
+#![allow(deprecated)]
+
+use std::{fmt, io};
+use std::cell::UnsafeCell;
+use std::os::windows::prelude::*;
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::time::Duration;
+
+use lazycell::AtomicLazyCell;
+
+use winapi::shared::winerror::WAIT_TIMEOUT;
+use winapi::um::minwinbase::{OVERLAPPED, OVERLAPPED_ENTRY};
+use miow;
+use miow::iocp::{CompletionPort, CompletionStatus};
+
+use event_imp::{Event, Evented, Ready};
+use poll::{self, Poll};
+use sys::windows::buffer_pool::BufferPool;
+use {Token, PollOpt};
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+
+/// The guts of the Windows event loop, this is the struct which actually owns
+/// a completion port.
+///
+/// Internally this is just an `Arc`, and this allows handing out references to
+/// the internals to I/O handles registered on this selector. This is
+/// required to schedule I/O operations independently of being inside the event
+/// loop (e.g. when a call to `write` is seen we're not "in the event loop").
+pub struct Selector {
+ inner: Arc<SelectorInner>,
+}
+
+struct SelectorInner {
+ /// Unique identifier of the `Selector`
+ id: usize,
+
+ /// The actual completion port that's used to manage all I/O
+ port: CompletionPort,
+
+ /// A pool of buffers usable by this selector.
+ ///
+ /// Primitives will take buffers from this pool to perform I/O operations,
+ /// and once complete they'll be put back in.
+ buffers: Mutex<BufferPool>,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // offset by 1 to avoid choosing 0 as the id of a selector
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+
+ CompletionPort::new(0).map(|cp| {
+ Selector {
+ inner: Arc::new(SelectorInner {
+ id: id,
+ port: cp,
+ buffers: Mutex::new(BufferPool::new(256)),
+ }),
+ }
+ })
+ }
+
+ pub fn select(&self,
+ events: &mut Events,
+ awakener: Token,
+ timeout: Option<Duration>) -> io::Result<bool> {
+ trace!("select; timeout={:?}", timeout);
+
+ // Clear out the previous list of I/O events and get some more!
+ events.clear();
+
+ trace!("polling IOCP");
+ let n = match self.inner.port.get_many(&mut events.statuses, timeout) {
+ Ok(statuses) => statuses.len(),
+ Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => 0,
+ Err(e) => return Err(e),
+ };
+
+ let mut ret = false;
+ for status in events.statuses[..n].iter() {
+ // This should only ever happen from the awakener, and we should
+ // only ever have one awakener right now, so assert as such.
+ if status.overlapped() as usize == 0 {
+ assert_eq!(status.token(), usize::from(awakener));
+ ret = true;
+ continue;
+ }
+
+ let callback = unsafe {
+ (*(status.overlapped() as *mut Overlapped)).callback
+ };
+
+ trace!("select; -> got overlapped");
+ callback(status.entry());
+ }
+
+ trace!("returning");
+ Ok(ret)
+ }
+
+ /// Gets a reference to the underlying `CompletionPort` structure.
+ pub fn port(&self) -> &CompletionPort {
+ &self.inner.port
+ }
+
+ /// Gets a new reference to this selector, although all underlying data
+ /// structures will refer to the same completion port.
+ pub fn clone_ref(&self) -> Selector {
+ Selector { inner: self.inner.clone() }
+ }
+
+ /// Return the `Selector`'s identifier
+ pub fn id(&self) -> usize {
+ self.inner.id
+ }
+}
+
+impl SelectorInner {
+ fn identical(&self, other: &SelectorInner) -> bool {
+ (self as *const SelectorInner) == (other as *const SelectorInner)
+ }
+}
+
+// A registration is stored in each I/O object which keeps track of how it is
+// associated with a `Selector` above.
+//
+// Once associated with a `Selector`, a registration can never be un-associated
+// (due to IOCP requirements). This is actually implemented through the
+// `poll::Registration` and `poll::SetReadiness` APIs to keep track of all the
+// level/edge/filtering business.
+/// A `Binding` is embedded in all I/O objects associated with a `Poll`
+/// object.
+///
+/// Each registration keeps track of which selector the I/O object is
+/// associated with, ensuring that implementations of `Evented` can be
+/// conformant for the various methods on Windows.
+///
+/// If you're working with custom IOCP-enabled objects then you'll want to
+/// ensure that one of these instances is stored in your object and used in the
+/// implementation of `Evented`.
+///
+/// For more information about how to use this see the `windows` module
+/// documentation in this crate.
+pub struct Binding {
+ selector: AtomicLazyCell<Arc<SelectorInner>>,
+}
+
+impl Binding {
+ /// Creates a new blank binding ready to be inserted into an I/O
+ /// object.
+ ///
+ /// Won't actually do anything until associated with a `Poll` loop.
+ pub fn new() -> Binding {
+ Binding { selector: AtomicLazyCell::new() }
+ }
+
+ /// Registers a new handle with the `Poll` specified, also assigning the
+ /// `token` specified.
+ ///
+ /// This function is intended to be used as part of `Evented::register` for
+ /// custom IOCP objects. It will add the specified handle to the internal
+ /// IOCP object with the provided `token`. All future events generated by
+ /// the handled provided will be received by the `Poll`'s internal IOCP
+ /// object.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe as the `Poll` instance has assumptions about
+ /// what the `OVERLAPPED` pointer used for each I/O operation looks like.
+ /// Specifically they must all be instances of the `Overlapped` type in
+ /// this crate. More information about this can be found on the
+ /// `windows` module in this crate.
+ pub unsafe fn register_handle(&self,
+ handle: &AsRawHandle,
+ token: Token,
+ poll: &Poll) -> io::Result<()> {
+ let selector = poll::selector(poll);
+
+ // Ignore errors, we'll see them on the next line.
+ drop(self.selector.fill(selector.inner.clone()));
+ self.check_same_selector(poll)?;
+
+ selector.inner.port.add_handle(usize::from(token), handle)
+ }
+
+ /// Same as `register_handle` but for sockets.
+ pub unsafe fn register_socket(&self,
+ handle: &AsRawSocket,
+ token: Token,
+ poll: &Poll) -> io::Result<()> {
+ let selector = poll::selector(poll);
+ drop(self.selector.fill(selector.inner.clone()));
+ self.check_same_selector(poll)?;
+ selector.inner.port.add_socket(usize::from(token), handle)
+ }
+
+ /// Reregisters the handle provided from the `Poll` provided.
+ ///
+ /// This is intended to be used as part of `Evented::reregister` but note
+ /// that this function does not currently reregister the provided handle
+ /// with the `poll` specified. IOCP has a special binding for changing the
+ /// token which has not yet been implemented. Instead this function should
+ /// be used to assert that the call to `reregister` happened on the same
+ /// `Poll` that was passed into to `register`.
+ ///
+ /// Eventually, though, the provided `handle` will be re-assigned to have
+ /// the token `token` on the given `poll` assuming that it's been
+ /// previously registered with it.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe for similar reasons to `register`. That is,
+ /// there may be pending I/O events and such which aren't handled correctly.
+ pub unsafe fn reregister_handle(&self,
+ _handle: &AsRawHandle,
+ _token: Token,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ /// Same as `reregister_handle`, but for sockets.
+ pub unsafe fn reregister_socket(&self,
+ _socket: &AsRawSocket,
+ _token: Token,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ /// Deregisters the handle provided from the `Poll` provided.
+ ///
+ /// This is intended to be used as part of `Evented::deregister` but note
+ /// that this function does not currently deregister the provided handle
+ /// from the `poll` specified. IOCP has a special binding for that which has
+ /// not yet been implemented. Instead this function should be used to assert
+ /// that the call to `deregister` happened on the same `Poll` that was
+ /// passed into to `register`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe for similar reasons to `register`. That is,
+ /// there may be pending I/O events and such which aren't handled correctly.
+ pub unsafe fn deregister_handle(&self,
+ _handle: &AsRawHandle,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ /// Same as `deregister_handle`, but for sockets.
+ pub unsafe fn deregister_socket(&self,
+ _socket: &AsRawSocket,
+ poll: &Poll) -> io::Result<()> {
+ self.check_same_selector(poll)
+ }
+
+ fn check_same_selector(&self, poll: &Poll) -> io::Result<()> {
+ let selector = poll::selector(poll);
+ match self.selector.borrow() {
+ Some(prev) if prev.identical(&selector.inner) => Ok(()),
+ Some(_) |
+ None => Err(other("socket already registered")),
+ }
+ }
+}
+
+impl fmt::Debug for Binding {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Binding")
+ .finish()
+ }
+}
+
+/// Helper struct used for TCP and UDP which bundles a `binding` with a
+/// `SetReadiness` handle.
+pub struct ReadyBinding {
+ binding: Binding,
+ readiness: Option<poll::SetReadiness>,
+}
+
+impl ReadyBinding {
+ /// Creates a new blank binding ready to be inserted into an I/O object.
+ ///
+ /// Won't actually do anything until associated with an `Selector` loop.
+ pub fn new() -> ReadyBinding {
+ ReadyBinding {
+ binding: Binding::new(),
+ readiness: None,
+ }
+ }
+
+ /// Returns whether this binding has been associated with a selector
+ /// yet.
+ pub fn registered(&self) -> bool {
+ self.readiness.is_some()
+ }
+
+ /// Acquires a buffer with at least `size` capacity.
+ ///
+ /// If associated with a selector, this will attempt to pull a buffer from
+ /// that buffer pool. If not associated with a selector, this will allocate
+ /// a fresh buffer.
+ pub fn get_buffer(&self, size: usize) -> Vec<u8> {
+ match self.binding.selector.borrow() {
+ Some(i) => i.buffers.lock().unwrap().get(size),
+ None => Vec::with_capacity(size),
+ }
+ }
+
+ /// Returns a buffer to this binding.
+ ///
+ /// If associated with a selector, this will push the buffer back into the
+ /// selector's pool of buffers. Otherwise this will just drop the buffer.
+ pub fn put_buffer(&self, buf: Vec<u8>) {
+ if let Some(i) = self.binding.selector.borrow() {
+ i.buffers.lock().unwrap().put(buf);
+ }
+ }
+
+ /// Sets the readiness of this I/O object to a particular `set`.
+ ///
+ /// This is later used to fill out and respond to requests to `poll`. Note
+ /// that this is all implemented through the `SetReadiness` structure in the
+ /// `poll` module.
+ pub fn set_readiness(&self, set: Ready) {
+ if let Some(ref i) = self.readiness {
+ trace!("set readiness to {:?}", set);
+ i.set_readiness(set).expect("event loop disappeared?");
+ }
+ }
+
+ /// Queries what the current readiness of this I/O object is.
+ ///
+ /// This is what's being used to generate events returned by `poll`.
+ pub fn readiness(&self) -> Ready {
+ match self.readiness {
+ Some(ref i) => i.readiness(),
+ None => Ready::empty(),
+ }
+ }
+
+ /// Implementation of the `Evented::register` function essentially.
+ ///
+ /// Returns an error if we're already registered with another event loop,
+ /// and otherwise just reassociates ourselves with the event loop to
+ /// possible change tokens.
+ pub fn register_socket(&mut self,
+ socket: &AsRawSocket,
+ poll: &Poll,
+ token: Token,
+ events: Ready,
+ opts: PollOpt,
+ registration: &Mutex<Option<poll::Registration>>)
+ -> io::Result<()> {
+ trace!("register {:?} {:?}", token, events);
+ unsafe {
+ self.binding.register_socket(socket, token, poll)?;
+ }
+
+ let (r, s) = poll::new_registration(poll, token, events, opts);
+ self.readiness = Some(s);
+ *registration.lock().unwrap() = Some(r);
+ Ok(())
+ }
+
+ /// Implementation of `Evented::reregister` function.
+ pub fn reregister_socket(&mut self,
+ socket: &AsRawSocket,
+ poll: &Poll,
+ token: Token,
+ events: Ready,
+ opts: PollOpt,
+ registration: &Mutex<Option<poll::Registration>>)
+ -> io::Result<()> {
+ trace!("reregister {:?} {:?}", token, events);
+ unsafe {
+ self.binding.reregister_socket(socket, token, poll)?;
+ }
+
+ registration.lock().unwrap()
+ .as_mut().unwrap()
+ .reregister(poll, token, events, opts)
+ }
+
+ /// Implementation of the `Evented::deregister` function.
+ ///
+ /// Doesn't allow registration with another event loop, just shuts down
+ /// readiness notifications and such.
+ pub fn deregister(&mut self,
+ socket: &AsRawSocket,
+ poll: &Poll,
+ registration: &Mutex<Option<poll::Registration>>)
+ -> io::Result<()> {
+ trace!("deregistering");
+ unsafe {
+ self.binding.deregister_socket(socket, poll)?;
+ }
+
+ registration.lock().unwrap()
+ .as_ref().unwrap()
+ .deregister(poll)
+ }
+}
+
+fn other(s: &str) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, s)
+}
+
+#[derive(Debug)]
+pub struct Events {
+ /// Raw I/O event completions are filled in here by the call to `get_many`
+ /// on the completion port above. These are then processed to run callbacks
+ /// which figure out what to do after the event is done.
+ statuses: Box<[CompletionStatus]>,
+
+ /// Literal events returned by `get` to the upwards `EventLoop`. This file
+ /// doesn't really modify this (except for the awakener), instead almost all
+ /// events are filled in by the `ReadinessQueue` from the `poll` module.
+ events: Vec<Event>,
+}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ // Note that it's possible for the output `events` to grow beyond the
+ // capacity as it can also include deferred events, but that's certainly
+ // not the end of the world!
+ Events {
+ statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(),
+ events: Vec::with_capacity(cap),
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<Event> {
+ self.events.get(idx).map(|e| *e)
+ }
+
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(event);
+ }
+
+ pub fn clear(&mut self) {
+ self.events.truncate(0);
+ }
+}
+
+macro_rules! overlapped2arc {
+ ($e:expr, $t:ty, $($field:ident).+) => (
+ #[allow(deref_nullptr)]
+ {
+ let offset = offset_of!($t, $($field).+);
+ debug_assert!(offset < mem::size_of::<$t>());
+ FromRawArc::from_raw(($e as usize - offset) as *mut $t)
+ }
+ )
+}
+
+macro_rules! offset_of {
+ ($t:ty, $($field:ident).+) => (
+ &(*(0 as *const $t)).$($field).+ as *const _ as usize
+ )
+}
+
+// See sys::windows module docs for why this exists.
+//
+// The gist of it is that `Selector` assumes that all `OVERLAPPED` pointers are
+// actually inside one of these structures so it can use the `Callback` stored
+// right after it.
+//
+// We use repr(C) here to ensure that we can assume the overlapped pointer is
+// at the start of the structure so we can just do a cast.
+/// A wrapper around an internal instance over `miow::Overlapped` which is in
+/// turn a wrapper around the Windows type `OVERLAPPED`.
+///
+/// This type is required to be used for all IOCP operations on handles that are
+/// registered with an event loop. The event loop will receive notifications
+/// over `OVERLAPPED` pointers that have completed, and it will cast that
+/// pointer to a pointer to this structure and invoke the associated callback.
+#[repr(C)]
+pub struct Overlapped {
+ inner: UnsafeCell<miow::Overlapped>,
+ callback: fn(&OVERLAPPED_ENTRY),
+}
+
+impl Overlapped {
+ /// Creates a new `Overlapped` which will invoke the provided `cb` callback
+ /// whenever it's triggered.
+ ///
+ /// The returned `Overlapped` must be used as the `OVERLAPPED` passed to all
+ /// I/O operations that are registered with mio's event loop. When the I/O
+ /// operation associated with an `OVERLAPPED` pointer completes the event
+ /// loop will invoke the function pointer provided by `cb`.
+ pub fn new(cb: fn(&OVERLAPPED_ENTRY)) -> Overlapped {
+ Overlapped {
+ inner: UnsafeCell::new(miow::Overlapped::zero()),
+ callback: cb,
+ }
+ }
+
+ /// Get the underlying `Overlapped` instance as a raw pointer.
+ ///
+ /// This can be useful when only a shared borrow is held and the overlapped
+ /// pointer needs to be passed down to winapi.
+ pub fn as_mut_ptr(&self) -> *mut OVERLAPPED {
+ unsafe {
+ (*self.inner.get()).raw()
+ }
+ }
+}
+
+impl fmt::Debug for Overlapped {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Overlapped")
+ .finish()
+ }
+}
+
+// Overlapped's APIs are marked as unsafe Overlapped's APIs are marked as
+// unsafe as they must be used with caution to ensure thread safety. The
+// structure itself is safe to send across threads.
+unsafe impl Send for Overlapped {}
+unsafe impl Sync for Overlapped {}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs b/third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs
new file mode 100644
index 0000000000..236e7866a6
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/tcp.rs
@@ -0,0 +1,853 @@
+use std::fmt;
+use std::io::{self, Read, ErrorKind};
+use std::mem;
+use std::net::{self, SocketAddr, Shutdown};
+use std::os::windows::prelude::*;
+use std::sync::{Mutex, MutexGuard};
+use std::time::Duration;
+
+use miow::iocp::CompletionStatus;
+use miow::net::*;
+use net2::{TcpBuilder, TcpStreamExt as Net2TcpExt};
+use winapi::um::minwinbase::OVERLAPPED_ENTRY;
+use winapi::um::winnt::HANDLE;
+use iovec::IoVec;
+
+use {poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::windows::from_raw_arc::FromRawArc;
+use sys::windows::selector::{Overlapped, ReadyBinding};
+use sys::windows::Family;
+
+pub struct TcpStream {
+ /// Separately stored implementation to ensure that the `Drop`
+ /// implementation on this type is only executed when it's actually dropped
+ /// (many clones of this `imp` are made).
+ imp: StreamImp,
+ registration: Mutex<Option<poll::Registration>>,
+}
+
+pub struct TcpListener {
+ imp: ListenerImp,
+ registration: Mutex<Option<poll::Registration>>,
+}
+
+#[derive(Clone)]
+struct StreamImp {
+ /// A stable address and synchronized access for all internals. This serves
+ /// to ensure that all `Overlapped` pointers are valid for a long period of
+ /// time as well as allowing completion callbacks to have access to the
+ /// internals without having ownership.
+ ///
+ /// Note that the reference count also allows us "loan out" copies to
+ /// completion ports while I/O is running to guarantee that this stays alive
+ /// until the I/O completes. You'll notice a number of calls to
+ /// `mem::forget` below, and these only happen on successful scheduling of
+ /// I/O and are paired with `overlapped2arc!` macro invocations in the
+ /// completion callbacks (to have a decrement match the increment).
+ inner: FromRawArc<StreamIo>,
+}
+
+#[derive(Clone)]
+struct ListenerImp {
+ inner: FromRawArc<ListenerIo>,
+}
+
+struct StreamIo {
+ inner: Mutex<StreamInner>,
+ read: Overlapped, // also used for connect
+ write: Overlapped,
+ socket: net::TcpStream,
+}
+
+struct ListenerIo {
+ inner: Mutex<ListenerInner>,
+ accept: Overlapped,
+ family: Family,
+ socket: net::TcpListener,
+}
+
+struct StreamInner {
+ iocp: ReadyBinding,
+ deferred_connect: Option<SocketAddr>,
+ read: State<(), ()>,
+ write: State<(Vec<u8>, usize), (Vec<u8>, usize)>,
+ /// whether we are instantly notified of success
+ /// (FILE_SKIP_COMPLETION_PORT_ON_SUCCESS,
+ /// without a roundtrip through the event loop)
+ instant_notify: bool,
+}
+
+struct ListenerInner {
+ iocp: ReadyBinding,
+ accept: State<net::TcpStream, (net::TcpStream, SocketAddr)>,
+ accept_buf: AcceptAddrsBuf,
+}
+
+enum State<T, U> {
+ Empty, // no I/O operation in progress
+ Pending(T), // an I/O operation is in progress
+ Ready(U), // I/O has finished with this value
+ Error(io::Error), // there was an I/O error
+}
+
+impl TcpStream {
+ fn new(socket: net::TcpStream,
+ deferred_connect: Option<SocketAddr>) -> TcpStream {
+ TcpStream {
+ registration: Mutex::new(None),
+ imp: StreamImp {
+ inner: FromRawArc::new(StreamIo {
+ read: Overlapped::new(read_done),
+ write: Overlapped::new(write_done),
+ socket: socket,
+ inner: Mutex::new(StreamInner {
+ iocp: ReadyBinding::new(),
+ deferred_connect: deferred_connect,
+ read: State::Empty,
+ write: State::Empty,
+ instant_notify: false,
+ }),
+ }),
+ },
+ }
+ }
+
+ pub fn connect(socket: net::TcpStream, addr: &SocketAddr)
+ -> io::Result<TcpStream> {
+ socket.set_nonblocking(true)?;
+ Ok(TcpStream::new(socket, Some(*addr)))
+ }
+
+ pub fn from_stream(stream: net::TcpStream) -> TcpStream {
+ TcpStream::new(stream, None)
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.peer_addr()
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.imp.inner.socket.try_clone().map(|s| TcpStream::new(s, None))
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.imp.inner.socket.shutdown(how)
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_nodelay(nodelay)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.imp.inner.socket.nodelay()
+ }
+
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.imp.inner.socket.set_recv_buffer_size(size)
+ }
+
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.imp.inner.socket.recv_buffer_size()
+ }
+
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.imp.inner.socket.set_send_buffer_size(size)
+ }
+
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.imp.inner.socket.send_buffer_size()
+ }
+
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.imp.inner.socket.set_keepalive(keepalive)
+ }
+
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.imp.inner.socket.keepalive()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.imp.inner.socket.ttl()
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.only_v6()
+ }
+
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ Net2TcpExt::set_linger(&self.imp.inner.socket, dur)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ Net2TcpExt::linger(&self.imp.inner.socket)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ if let Some(e) = self.imp.inner.socket.take_error()? {
+ return Ok(Some(e))
+ }
+
+ // If the syscall didn't return anything then also check to see if we've
+ // squirreled away an error elsewhere for example as part of a connect
+ // operation.
+ //
+ // Typically this is used like so:
+ //
+ // 1. A `connect` is issued
+ // 2. Wait for the socket to be writable
+ // 3. Call `take_error` to see if the connect succeeded.
+ //
+ // Right now the `connect` operation finishes in `read_done` below and
+ // fill will in `State::Error` in the `read` slot if it fails, so we
+ // extract that here.
+ let mut me = self.inner();
+ match mem::replace(&mut me.read, State::Empty) {
+ State::Error(e) => {
+ self.imp.schedule_read(&mut me);
+ Ok(Some(e))
+ }
+ other => {
+ me.read = other;
+ Ok(None)
+ }
+ }
+ }
+
+ fn inner(&self) -> MutexGuard<StreamInner> {
+ self.imp.inner()
+ }
+
+ fn before_read(&self) -> io::Result<MutexGuard<StreamInner>> {
+ let mut me = self.inner();
+
+ match me.read {
+ // Empty == we're not associated yet, and if we're pending then
+ // these are both cases where we return "would block"
+ State::Empty |
+ State::Pending(()) => return Err(io::ErrorKind::WouldBlock.into()),
+
+ // If we got a delayed error as part of a `read_overlapped` below,
+ // return that here. Also schedule another read in case it was
+ // transient.
+ State::Error(_) => {
+ let e = match mem::replace(&mut me.read, State::Empty) {
+ State::Error(e) => e,
+ _ => panic!(),
+ };
+ self.imp.schedule_read(&mut me);
+ return Err(e)
+ }
+
+ // If we're ready for a read then some previous 0-byte read has
+ // completed. In that case the OS's socket buffer has something for
+ // us, so we just keep pulling out bytes while we can in the loop
+ // below.
+ State::Ready(()) => {}
+ }
+
+ Ok(me)
+ }
+
+ fn post_register(&self, interest: Ready, me: &mut StreamInner) {
+ if interest.is_readable() {
+ self.imp.schedule_read(me);
+ }
+
+ // At least with epoll, if a socket is registered with an interest in
+ // writing and it's immediately writable then a writable event is
+ // generated immediately, so do so here.
+ if interest.is_writable() {
+ if let State::Empty = me.write {
+ self.imp.add_readiness(me, Ready::writable());
+ }
+ }
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ match IoVec::from_bytes_mut(buf) {
+ Some(vec) => self.readv(&mut [vec]),
+ None => Ok(0),
+ }
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut me = self.before_read()?;
+
+ match (&self.imp.inner.socket).peek(buf) {
+ Ok(n) => Ok(n),
+ Err(e) => {
+ me.read = State::Empty;
+ self.imp.schedule_read(&mut me);
+ Err(e)
+ }
+ }
+ }
+
+ pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+ let mut me = self.before_read()?;
+
+ // TODO: Does WSARecv work on a nonblocking sockets? We ideally want to
+ // call that instead of looping over all the buffers and calling
+ // `recv` on each buffer. I'm not sure though if an overlapped
+ // socket in nonblocking mode would work with that use case,
+ // however, so for now we just call `recv`.
+
+ let mut amt = 0;
+ for buf in bufs {
+ match (&self.imp.inner.socket).read(buf) {
+ // If we did a partial read, then return what we've read so far
+ Ok(n) if n < buf.len() => return Ok(amt + n),
+
+ // Otherwise filled this buffer entirely, so try to fill the
+ // next one as well.
+ Ok(n) => amt += n,
+
+ // If we hit an error then things get tricky if we've already
+ // read some data. If the error is "would block" then we just
+ // return the data we've read so far while scheduling another
+ // 0-byte read.
+ //
+ // If we've read data and the error kind is not "would block",
+ // then we stash away the error to get returned later and return
+ // the data that we've read.
+ //
+ // Finally if we haven't actually read any data we just
+ // reschedule a 0-byte read to happen again and then return the
+ // error upwards.
+ Err(e) => {
+ if amt > 0 && e.kind() == io::ErrorKind::WouldBlock {
+ me.read = State::Empty;
+ self.imp.schedule_read(&mut me);
+ return Ok(amt)
+ } else if amt > 0 {
+ me.read = State::Error(e);
+ return Ok(amt)
+ } else {
+ me.read = State::Empty;
+ self.imp.schedule_read(&mut me);
+ return Err(e)
+ }
+ }
+ }
+ }
+
+ Ok(amt)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ match IoVec::from_bytes(buf) {
+ Some(vec) => self.writev(&[vec]),
+ None => Ok(0),
+ }
+ }
+
+ pub fn writev(&self, bufs: &[&IoVec]) -> io::Result<usize> {
+ let mut me = self.inner();
+ let me = &mut *me;
+
+ match mem::replace(&mut me.write, State::Empty) {
+ State::Empty => {}
+ State::Error(e) => return Err(e),
+ other => {
+ me.write = other;
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+ }
+
+ if !me.iocp.registered() {
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+
+ if bufs.is_empty() {
+ return Ok(0)
+ }
+
+ let len = bufs.iter().map(|b| b.len()).fold(0, |a, b| a + b);
+ let mut intermediate = me.iocp.get_buffer(len);
+ for buf in bufs {
+ intermediate.extend_from_slice(buf);
+ }
+ self.imp.schedule_write(intermediate, 0, me);
+ Ok(len)
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl StreamImp {
+ fn inner(&self) -> MutexGuard<StreamInner> {
+ self.inner.inner.lock().unwrap()
+ }
+
+ fn schedule_connect(&self, addr: &SocketAddr) -> io::Result<()> {
+ unsafe {
+ trace!("scheduling a connect");
+ self.inner.socket.connect_overlapped(addr, &[], self.inner.read.as_mut_ptr())?;
+ }
+ // see docs above on StreamImp.inner for rationale on forget
+ mem::forget(self.clone());
+ Ok(())
+ }
+
+ /// Schedule a read to happen on this socket, enqueuing us to receive a
+ /// notification when a read is ready.
+ ///
+ /// Note that this does *not* work with a buffer. When reading a TCP stream
+ /// we actually read into a 0-byte buffer so Windows will send us a
+ /// notification when the socket is otherwise ready for reading. This allows
+ /// us to avoid buffer allocations for in-flight reads.
+ fn schedule_read(&self, me: &mut StreamInner) {
+ match me.read {
+ State::Empty => {}
+ State::Ready(_) | State::Error(_) => {
+ self.add_readiness(me, Ready::readable());
+ return;
+ }
+ _ => return,
+ }
+
+ me.iocp.set_readiness(me.iocp.readiness() - Ready::readable());
+
+ trace!("scheduling a read");
+ let res = unsafe {
+ self.inner.socket.read_overlapped(&mut [], self.inner.read.as_mut_ptr())
+ };
+ match res {
+ // Note that `Ok(true)` means that this completed immediately and
+ // our socket is readable. This typically means that the caller of
+ // this function (likely `read` above) can try again as an
+ // optimization and return bytes quickly.
+ //
+ // Normally, though, although the read completed immediately
+ // there's still an IOCP completion packet enqueued that we're going
+ // to receive.
+ //
+ // You can configure this behavior (miow) with
+ // SetFileCompletionNotificationModes to indicate that `Ok(true)`
+ // does **not** enqueue a completion packet. (This is the case
+ // for me.instant_notify)
+ //
+ // Note that apparently libuv has scary code to work around bugs in
+ // `WSARecv` for UDP sockets apparently for handles which have had
+ // the `SetFileCompletionNotificationModes` function called on them,
+ // worth looking into!
+ Ok(Some(_)) if me.instant_notify => {
+ me.read = State::Ready(());
+ self.add_readiness(me, Ready::readable());
+ }
+ Ok(_) => {
+ // see docs above on StreamImp.inner for rationale on forget
+ me.read = State::Pending(());
+ mem::forget(self.clone());
+ }
+ Err(e) => {
+ me.read = State::Error(e);
+ self.add_readiness(me, Ready::readable());
+ }
+ }
+ }
+
+ /// Similar to `schedule_read`, except that this issues, well, writes.
+ ///
+ /// This function will continually attempt to write the entire contents of
+ /// the buffer `buf` until they have all been written. The `pos` argument is
+ /// the current offset within the buffer up to which the contents have
+ /// already been written.
+ ///
+ /// A new writable event (e.g. allowing another write) will only happen once
+ /// the buffer has been written completely (or hit an error).
+ fn schedule_write(&self,
+ buf: Vec<u8>,
+ mut pos: usize,
+ me: &mut StreamInner) {
+
+ // About to write, clear any pending level triggered events
+ me.iocp.set_readiness(me.iocp.readiness() - Ready::writable());
+
+ loop {
+ trace!("scheduling a write of {} bytes", buf[pos..].len());
+ let ret = unsafe {
+ self.inner.socket.write_overlapped(&buf[pos..], self.inner.write.as_mut_ptr())
+ };
+ match ret {
+ Ok(Some(transferred_bytes)) if me.instant_notify => {
+ trace!("done immediately with {} bytes", transferred_bytes);
+ if transferred_bytes == buf.len() - pos {
+ self.add_readiness(me, Ready::writable());
+ me.write = State::Empty;
+ break;
+ }
+ pos += transferred_bytes;
+ }
+ Ok(_) => {
+ trace!("scheduled for later");
+ // see docs above on StreamImp.inner for rationale on forget
+ me.write = State::Pending((buf, pos));
+ mem::forget(self.clone());
+ break;
+ }
+ Err(e) => {
+ trace!("write error: {}", e);
+ me.write = State::Error(e);
+ self.add_readiness(me, Ready::writable());
+ me.iocp.put_buffer(buf);
+ break;
+ }
+ }
+ }
+ }
+
+ /// Pushes an event for this socket onto the selector its registered for.
+ ///
+ /// When an event is generated on this socket, if it happened after the
+ /// socket was closed then we don't want to actually push the event onto our
+ /// selector as otherwise it's just a spurious notification.
+ fn add_readiness(&self, me: &mut StreamInner, set: Ready) {
+ me.iocp.set_readiness(set | me.iocp.readiness());
+ }
+}
+
+fn read_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ let me2 = StreamImp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), StreamIo, read) },
+ };
+
+ let mut me = me2.inner();
+ match mem::replace(&mut me.read, State::Empty) {
+ State::Pending(()) => {
+ trace!("finished a read: {}", status.bytes_transferred());
+ assert_eq!(status.bytes_transferred(), 0);
+ me.read = State::Ready(());
+ return me2.add_readiness(&mut me, Ready::readable())
+ }
+ s => me.read = s,
+ }
+
+ // If a read didn't complete, then the connect must have just finished.
+ trace!("finished a connect");
+
+ // By guarding with socket.result(), we ensure that a connection
+ // was successfully made before performing operations requiring a
+ // connected socket.
+ match unsafe { me2.inner.socket.result(status.overlapped()) }
+ .and_then(|_| me2.inner.socket.connect_complete())
+ {
+ Ok(()) => {
+ me2.add_readiness(&mut me, Ready::writable());
+ me2.schedule_read(&mut me);
+ }
+ Err(e) => {
+ me2.add_readiness(&mut me, Ready::readable() | Ready::writable());
+ me.read = State::Error(e);
+ }
+ }
+}
+
+fn write_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ trace!("finished a write {}", status.bytes_transferred());
+ let me2 = StreamImp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), StreamIo, write) },
+ };
+ let mut me = me2.inner();
+ let (buf, pos) = match mem::replace(&mut me.write, State::Empty) {
+ State::Pending(pair) => pair,
+ _ => unreachable!(),
+ };
+ let new_pos = pos + (status.bytes_transferred() as usize);
+ if new_pos == buf.len() {
+ me2.add_readiness(&mut me, Ready::writable());
+ } else {
+ me2.schedule_write(buf, new_pos, &mut me);
+ }
+}
+
+impl Evented for TcpStream {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.register_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+
+ unsafe {
+ super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)?;
+ me.instant_notify = true;
+ }
+
+ // If we were connected before being registered process that request
+ // here and go along our merry ways. Note that the callback for a
+ // successful connect will worry about generating writable/readable
+ // events and scheduling a new read.
+ if let Some(addr) = me.deferred_connect.take() {
+ return self.imp.schedule_connect(&addr).map(|_| ())
+ }
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.reregister_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner().iocp.deregister(&self.imp.inner.socket,
+ poll, &self.registration)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("TcpStream")
+ .finish()
+ }
+}
+
+impl Drop for TcpStream {
+ fn drop(&mut self) {
+ // If we're still internally reading, we're no longer interested. Note
+ // though that we don't cancel any writes which may have been issued to
+ // preserve the same semantics as Unix.
+ //
+ // Note that "Empty" here may mean that a connect is pending, so we
+ // cancel even if that happens as well.
+ unsafe {
+ match self.inner().read {
+ State::Pending(_) | State::Empty => {
+ trace!("cancelling active TCP read");
+ drop(super::cancel(&self.imp.inner.socket,
+ &self.imp.inner.read));
+ }
+ State::Ready(_) | State::Error(_) => {}
+ }
+ }
+ }
+}
+
+impl TcpListener {
+ pub fn new(socket: net::TcpListener)
+ -> io::Result<TcpListener> {
+ let addr = socket.local_addr()?;
+ Ok(TcpListener::new_family(socket, match addr {
+ SocketAddr::V4(..) => Family::V4,
+ SocketAddr::V6(..) => Family::V6,
+ }))
+ }
+
+ fn new_family(socket: net::TcpListener, family: Family) -> TcpListener {
+ TcpListener {
+ registration: Mutex::new(None),
+ imp: ListenerImp {
+ inner: FromRawArc::new(ListenerIo {
+ accept: Overlapped::new(accept_done),
+ family: family,
+ socket: socket,
+ inner: Mutex::new(ListenerInner {
+ iocp: ReadyBinding::new(),
+ accept: State::Empty,
+ accept_buf: AcceptAddrsBuf::new(),
+ }),
+ }),
+ },
+ }
+ }
+
+ pub fn accept(&self) -> io::Result<(net::TcpStream, SocketAddr)> {
+ let mut me = self.inner();
+
+ let ret = match mem::replace(&mut me.accept, State::Empty) {
+ State::Empty => return Err(io::ErrorKind::WouldBlock.into()),
+ State::Pending(t) => {
+ me.accept = State::Pending(t);
+ return Err(io::ErrorKind::WouldBlock.into());
+ }
+ State::Ready((s, a)) => Ok((s, a)),
+ State::Error(e) => Err(e),
+ };
+
+ self.imp.schedule_accept(&mut me);
+
+ return ret
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.imp.inner.socket.try_clone().map(|s| {
+ TcpListener::new_family(s, self.imp.inner.family)
+ })
+ }
+
+ #[allow(deprecated)]
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_only_v6(only_v6)
+ }
+
+ #[allow(deprecated)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.only_v6()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_ttl(ttl)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.imp.inner.socket.ttl()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.imp.inner.socket.take_error()
+ }
+
+ fn inner(&self) -> MutexGuard<ListenerInner> {
+ self.imp.inner()
+ }
+}
+
+impl ListenerImp {
+ fn inner(&self) -> MutexGuard<ListenerInner> {
+ self.inner.inner.lock().unwrap()
+ }
+
+ fn schedule_accept(&self, me: &mut ListenerInner) {
+ match me.accept {
+ State::Empty => {}
+ _ => return
+ }
+
+ me.iocp.set_readiness(me.iocp.readiness() - Ready::readable());
+
+ let res = match self.inner.family {
+ Family::V4 => TcpBuilder::new_v4(),
+ Family::V6 => TcpBuilder::new_v6(),
+ }
+ .and_then(|builder| builder.to_tcp_stream())
+ .and_then(|stream| unsafe {
+ trace!("scheduling an accept");
+ self.inner
+ .socket
+ .accept_overlapped(&stream, &mut me.accept_buf, self.inner.accept.as_mut_ptr())
+ .map(|x| (stream, x))
+ });
+ match res {
+ Ok((socket, _)) => {
+ // see docs above on StreamImp.inner for rationale on forget
+ me.accept = State::Pending(socket);
+ mem::forget(self.clone());
+ }
+ Err(e) => {
+ me.accept = State::Error(e);
+ self.add_readiness(me, Ready::readable());
+ }
+ }
+ }
+
+ // See comments in StreamImp::push
+ fn add_readiness(&self, me: &mut ListenerInner, set: Ready) {
+ me.iocp.set_readiness(set | me.iocp.readiness());
+ }
+}
+
+fn accept_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ let me2 = ListenerImp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), ListenerIo, accept) },
+ };
+
+ let mut me = me2.inner();
+ let socket = match mem::replace(&mut me.accept, State::Empty) {
+ State::Pending(s) => s,
+ _ => unreachable!(),
+ };
+ trace!("finished an accept");
+ let result = me2.inner.socket.accept_complete(&socket).and_then(|()| {
+ me.accept_buf.parse(&me2.inner.socket)
+ }).and_then(|buf| {
+ buf.remote().ok_or_else(|| {
+ io::Error::new(ErrorKind::Other, "could not obtain remote address")
+ })
+ });
+ me.accept = match result {
+ Ok(remote_addr) => State::Ready((socket, remote_addr)),
+ Err(e) => State::Error(e),
+ };
+ me2.add_readiness(&mut me, Ready::readable());
+}
+
+impl Evented for TcpListener {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.register_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+
+ unsafe {
+ super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)?;
+ }
+
+ self.imp.schedule_accept(&mut me);
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.reregister_socket(&self.imp.inner.socket, poll, token,
+ interest, opts, &self.registration)?;
+ self.imp.schedule_accept(&mut me);
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner().iocp.deregister(&self.imp.inner.socket,
+ poll, &self.registration)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("TcpListener")
+ .finish()
+ }
+}
+
+impl Drop for TcpListener {
+ fn drop(&mut self) {
+ // If we're still internally reading, we're no longer interested.
+ unsafe {
+ match self.inner().accept {
+ State::Pending(_) => {
+ trace!("cancelling active TCP accept");
+ drop(super::cancel(&self.imp.inner.socket,
+ &self.imp.inner.accept));
+ }
+ State::Empty |
+ State::Ready(_) |
+ State::Error(_) => {}
+ }
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/sys/windows/udp.rs b/third_party/rust/mio-0.6.23/src/sys/windows/udp.rs
new file mode 100644
index 0000000000..f5ea96c324
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/sys/windows/udp.rs
@@ -0,0 +1,414 @@
+//! UDP for IOCP
+//!
+//! Note that most of this module is quite similar to the TCP module, so if
+//! something seems odd you may also want to try the docs over there.
+
+use std::fmt;
+use std::io::prelude::*;
+use std::io;
+use std::mem;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::sync::{Mutex, MutexGuard};
+
+#[allow(unused_imports)]
+use net2::{UdpBuilder, UdpSocketExt};
+use winapi::shared::winerror::WSAEMSGSIZE;
+use winapi::um::minwinbase::OVERLAPPED_ENTRY;
+use miow::iocp::CompletionStatus;
+use miow::net::SocketAddrBuf;
+use miow::net::UdpSocketExt as MiowUdpSocketExt;
+
+use {poll, Ready, Poll, PollOpt, Token};
+use event::Evented;
+use sys::windows::from_raw_arc::FromRawArc;
+use sys::windows::selector::{Overlapped, ReadyBinding};
+
+pub struct UdpSocket {
+ imp: Imp,
+ registration: Mutex<Option<poll::Registration>>,
+}
+
+#[derive(Clone)]
+struct Imp {
+ inner: FromRawArc<Io>,
+}
+
+struct Io {
+ read: Overlapped,
+ write: Overlapped,
+ socket: net::UdpSocket,
+ inner: Mutex<Inner>,
+}
+
+struct Inner {
+ iocp: ReadyBinding,
+ read: State<Vec<u8>, Vec<u8>>,
+ write: State<Vec<u8>, (Vec<u8>, usize)>,
+ read_buf: SocketAddrBuf,
+}
+
+enum State<T, U> {
+ Empty,
+ Pending(T),
+ Ready(U),
+ Error(io::Error),
+}
+
+impl UdpSocket {
+ pub fn new(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ Ok(UdpSocket {
+ registration: Mutex::new(None),
+ imp: Imp {
+ inner: FromRawArc::new(Io {
+ read: Overlapped::new(recv_done),
+ write: Overlapped::new(send_done),
+ socket: socket,
+ inner: Mutex::new(Inner {
+ iocp: ReadyBinding::new(),
+ read: State::Empty,
+ write: State::Empty,
+ read_buf: SocketAddrBuf::new(),
+ }),
+ }),
+ },
+ })
+ }
+
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.imp.inner.socket.local_addr()
+ }
+
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.imp.inner.socket.try_clone().and_then(UdpSocket::new)
+ }
+
+ /// Note that unlike `TcpStream::write` this function will not attempt to
+ /// continue writing `buf` until its entirely written.
+ ///
+ /// TODO: This... may be wrong in the long run. We're reporting that we
+ /// successfully wrote all of the bytes in `buf` but it's possible
+ /// that we don't actually end up writing all of them!
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr)
+ -> io::Result<usize> {
+ let mut me = self.inner();
+ let me = &mut *me;
+
+ match me.write {
+ State::Empty => {}
+ _ => return Err(io::ErrorKind::WouldBlock.into()),
+ }
+
+ if !me.iocp.registered() {
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+
+ let interest = me.iocp.readiness();
+ me.iocp.set_readiness(interest - Ready::writable());
+
+ let mut owned_buf = me.iocp.get_buffer(64 * 1024);
+ let amt = owned_buf.write(buf)?;
+ unsafe {
+ trace!("scheduling a send");
+ self.imp.inner.socket.send_to_overlapped(&owned_buf, target,
+ self.imp.inner.write.as_mut_ptr())
+ }?;
+ me.write = State::Pending(owned_buf);
+ mem::forget(self.imp.clone());
+ Ok(amt)
+ }
+
+ /// Note that unlike `TcpStream::write` this function will not attempt to
+ /// continue writing `buf` until its entirely written.
+ ///
+ /// TODO: This... may be wrong in the long run. We're reporting that we
+ /// successfully wrote all of the bytes in `buf` but it's possible
+ /// that we don't actually end up writing all of them!
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ let mut me = self.inner();
+ let me = &mut *me;
+
+ match me.write {
+ State::Empty => {}
+ _ => return Err(io::ErrorKind::WouldBlock.into()),
+ }
+
+ if !me.iocp.registered() {
+ return Err(io::ErrorKind::WouldBlock.into())
+ }
+
+ let interest = me.iocp.readiness();
+ me.iocp.set_readiness(interest - Ready::writable());
+
+ let mut owned_buf = me.iocp.get_buffer(64 * 1024);
+ let amt = owned_buf.write(buf)?;
+ unsafe {
+ trace!("scheduling a send");
+ self.imp.inner.socket.send_overlapped(&owned_buf, self.imp.inner.write.as_mut_ptr())
+
+ }?;
+ me.write = State::Pending(owned_buf);
+ mem::forget(self.imp.clone());
+ Ok(amt)
+ }
+
+ pub fn recv_from(&self, mut buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ let mut me = self.inner();
+ match mem::replace(&mut me.read, State::Empty) {
+ State::Empty => Err(io::ErrorKind::WouldBlock.into()),
+ State::Pending(b) => { me.read = State::Pending(b); Err(io::ErrorKind::WouldBlock.into()) }
+ State::Ready(data) => {
+ // If we weren't provided enough space to receive the message
+ // then don't actually read any data, just return an error.
+ if buf.len() < data.len() {
+ me.read = State::Ready(data);
+ Err(io::Error::from_raw_os_error(WSAEMSGSIZE as i32))
+ } else {
+ let r = if let Some(addr) = me.read_buf.to_socket_addr() {
+ buf.write(&data).unwrap();
+ Ok((data.len(), addr))
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other,
+ "failed to parse socket address"))
+ };
+ me.iocp.put_buffer(data);
+ self.imp.schedule_read_from(&mut me);
+ r
+ }
+ }
+ State::Error(e) => {
+ self.imp.schedule_read_from(&mut me);
+ Err(e)
+ }
+ }
+ }
+
+ pub fn recv(&self, buf: &mut [u8])
+ -> io::Result<usize> {
+ //Since recv_from can be used on connected sockets just call it and drop the address.
+ self.recv_from(buf).map(|(size,_)| size)
+ }
+
+ pub fn connect(&self, addr: SocketAddr) -> io::Result<()> {
+ self.imp.inner.socket.connect(addr)
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.imp.inner.socket.broadcast()
+ }
+
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_broadcast(on)
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.imp.inner.socket.multicast_loop_v4()
+ }
+
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_multicast_loop_v4(on)
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.imp.inner.socket.multicast_ttl_v4()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_multicast_ttl_v4(ttl)
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.multicast_loop_v6()
+ }
+
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_multicast_loop_v6(on)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.imp.inner.socket.ttl()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.imp.inner.socket.set_ttl(ttl)
+ }
+
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.imp.inner.socket.join_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.imp.inner.socket.join_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.imp.inner.socket.leave_multicast_v4(multiaddr, interface)
+ }
+
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.imp.inner.socket.leave_multicast_v6(multiaddr, interface)
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.imp.inner.socket.set_only_v6(only_v6)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.imp.inner.socket.only_v6()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.imp.inner.socket.take_error()
+ }
+
+ fn inner(&self) -> MutexGuard<Inner> {
+ self.imp.inner()
+ }
+
+ fn post_register(&self, interest: Ready, me: &mut Inner) {
+ if interest.is_readable() {
+ //We use recv_from here since it is well specified for both
+ //connected and non-connected sockets and we can discard the address
+ //when calling recv().
+ self.imp.schedule_read_from(me);
+ }
+ // See comments in TcpSocket::post_register for what's going on here
+ if interest.is_writable() {
+ if let State::Empty = me.write {
+ self.imp.add_readiness(me, Ready::writable());
+ }
+ }
+ }
+}
+
+impl Imp {
+ fn inner(&self) -> MutexGuard<Inner> {
+ self.inner.inner.lock().unwrap()
+ }
+
+ fn schedule_read_from(&self, me: &mut Inner) {
+ match me.read {
+ State::Empty => {}
+ _ => return,
+ }
+
+ let interest = me.iocp.readiness();
+ me.iocp.set_readiness(interest - Ready::readable());
+
+ let mut buf = me.iocp.get_buffer(64 * 1024);
+ let res = unsafe {
+ trace!("scheduling a read");
+ let cap = buf.capacity();
+ buf.set_len(cap);
+ self.inner.socket.recv_from_overlapped(&mut buf, &mut me.read_buf,
+ self.inner.read.as_mut_ptr())
+ };
+ match res {
+ Ok(_) => {
+ me.read = State::Pending(buf);
+ mem::forget(self.clone());
+ }
+ Err(e) => {
+ me.read = State::Error(e);
+ self.add_readiness(me, Ready::readable());
+ me.iocp.put_buffer(buf);
+ }
+ }
+ }
+
+ // See comments in tcp::StreamImp::push
+ fn add_readiness(&self, me: &Inner, set: Ready) {
+ me.iocp.set_readiness(set | me.iocp.readiness());
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.register_socket(&self.imp.inner.socket,
+ poll, token, interest, opts,
+ &self.registration)?;
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token,
+ interest: Ready, opts: PollOpt) -> io::Result<()> {
+ let mut me = self.inner();
+ me.iocp.reregister_socket(&self.imp.inner.socket,
+ poll, token, interest,
+ opts, &self.registration)?;
+ self.post_register(interest, &mut me);
+ Ok(())
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.inner().iocp.deregister(&self.imp.inner.socket,
+ poll, &self.registration)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("UdpSocket")
+ .finish()
+ }
+}
+
+impl Drop for UdpSocket {
+ fn drop(&mut self) {
+ let inner = self.inner();
+
+ // If we're still internally reading, we're no longer interested. Note
+ // though that we don't cancel any writes which may have been issued to
+ // preserve the same semantics as Unix.
+ unsafe {
+ match inner.read {
+ State::Pending(_) => {
+ drop(super::cancel(&self.imp.inner.socket,
+ &self.imp.inner.read));
+ }
+ State::Empty |
+ State::Ready(_) |
+ State::Error(_) => {}
+ }
+ }
+ }
+}
+
+fn send_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ trace!("finished a send {}", status.bytes_transferred());
+ let me2 = Imp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), Io, write) },
+ };
+ let mut me = me2.inner();
+ me.write = State::Empty;
+ me2.add_readiness(&mut me, Ready::writable());
+}
+
+fn recv_done(status: &OVERLAPPED_ENTRY) {
+ let status = CompletionStatus::from_entry(status);
+ trace!("finished a recv {}", status.bytes_transferred());
+ let me2 = Imp {
+ inner: unsafe { overlapped2arc!(status.overlapped(), Io, read) },
+ };
+ let mut me = me2.inner();
+ let mut buf = match mem::replace(&mut me.read, State::Empty) {
+ State::Pending(buf) => buf,
+ _ => unreachable!(),
+ };
+ unsafe {
+ buf.set_len(status.bytes_transferred() as usize);
+ }
+ me.read = State::Ready(buf);
+ me2.add_readiness(&mut me, Ready::readable());
+}
diff --git a/third_party/rust/mio-0.6.23/src/timer.rs b/third_party/rust/mio-0.6.23/src/timer.rs
new file mode 100644
index 0000000000..c591be5e27
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/timer.rs
@@ -0,0 +1,516 @@
+//! Timer optimized for I/O related operations
+
+#![allow(deprecated, missing_debug_implementations)]
+
+use {convert, io, Ready, Poll, PollOpt, Registration, SetReadiness, Token};
+use event::Evented;
+use lazycell::LazyCell;
+use slab::Slab;
+use std::{cmp, error, fmt, u64, usize, iter, thread};
+use std::sync::Arc;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::time::{Duration, Instant};
+
+use self::TimerErrorKind::TimerOverflow;
+
+pub struct Timer<T> {
+ // Size of each tick in milliseconds
+ tick_ms: u64,
+ // Slab of timeout entries
+ entries: Slab<Entry<T>>,
+ // Timeout wheel. Each tick, the timer will look at the next slot for
+ // timeouts that match the current tick.
+ wheel: Vec<WheelEntry>,
+ // Tick 0's time instant
+ start: Instant,
+ // The current tick
+ tick: Tick,
+ // The next entry to possibly timeout
+ next: Token,
+ // Masks the target tick to get the slot
+ mask: u64,
+ // Set on registration with Poll
+ inner: LazyCell<Inner>,
+}
+
+pub struct Builder {
+ // Approximate duration of each tick
+ tick: Duration,
+ // Number of slots in the timer wheel
+ num_slots: usize,
+ // Max number of timeouts that can be in flight at a given time.
+ capacity: usize,
+}
+
+#[derive(Clone, Debug)]
+pub struct Timeout {
+ // Reference into the timer entry slab
+ token: Token,
+ // Tick that it should match up with
+ tick: u64,
+}
+
+struct Inner {
+ registration: Registration,
+ set_readiness: SetReadiness,
+ wakeup_state: WakeupState,
+ wakeup_thread: thread::JoinHandle<()>,
+}
+
+impl Drop for Inner {
+ fn drop(&mut self) {
+ // 1. Set wakeup state to TERMINATE_THREAD (https://github.com/carllerche/mio/blob/master/src/timer.rs#L451)
+ self.wakeup_state.store(TERMINATE_THREAD, Ordering::Release);
+ // 2. Wake him up
+ self.wakeup_thread.thread().unpark();
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+struct WheelEntry {
+ next_tick: Tick,
+ head: Token,
+}
+
+// Doubly linked list of timer entries. Allows for efficient insertion /
+// removal of timeouts.
+struct Entry<T> {
+ state: T,
+ links: EntryLinks,
+}
+
+#[derive(Copy, Clone)]
+struct EntryLinks {
+ tick: Tick,
+ prev: Token,
+ next: Token
+}
+
+type Tick = u64;
+
+const TICK_MAX: Tick = u64::MAX;
+
+// Manages communication with wakeup thread
+type WakeupState = Arc<AtomicUsize>;
+
+pub type Result<T> = ::std::result::Result<T, TimerError>;
+// TODO: remove
+pub type TimerResult<T> = Result<T>;
+
+
+/// Deprecated and unused.
+#[derive(Debug)]
+pub struct TimerError;
+
+/// Deprecated and unused.
+#[derive(Debug)]
+pub enum TimerErrorKind {
+ TimerOverflow,
+}
+
+// TODO: Remove
+pub type OldTimerResult<T> = Result<T>;
+
+const TERMINATE_THREAD: usize = 0;
+const EMPTY: Token = Token(usize::MAX);
+
+impl Builder {
+ pub fn tick_duration(mut self, duration: Duration) -> Builder {
+ self.tick = duration;
+ self
+ }
+
+ pub fn num_slots(mut self, num_slots: usize) -> Builder {
+ self.num_slots = num_slots;
+ self
+ }
+
+ pub fn capacity(mut self, capacity: usize) -> Builder {
+ self.capacity = capacity;
+ self
+ }
+
+ pub fn build<T>(self) -> Timer<T> {
+ Timer::new(convert::millis(self.tick), self.num_slots, self.capacity, Instant::now())
+ }
+}
+
+impl Default for Builder {
+ fn default() -> Builder {
+ Builder {
+ tick: Duration::from_millis(100),
+ num_slots: 256,
+ capacity: 65_536,
+ }
+ }
+}
+
+impl<T> Timer<T> {
+ fn new(tick_ms: u64, num_slots: usize, capacity: usize, start: Instant) -> Timer<T> {
+ let num_slots = num_slots.next_power_of_two();
+ let capacity = capacity.next_power_of_two();
+ let mask = (num_slots as u64) - 1;
+ let wheel = iter::repeat(WheelEntry { next_tick: TICK_MAX, head: EMPTY })
+ .take(num_slots).collect();
+
+ Timer {
+ tick_ms,
+ entries: Slab::with_capacity(capacity),
+ wheel,
+ start,
+ tick: 0,
+ next: EMPTY,
+ mask,
+ inner: LazyCell::new(),
+ }
+ }
+
+ pub fn set_timeout(&mut self, delay_from_now: Duration, state: T) -> Result<Timeout> {
+ let delay_from_start = self.start.elapsed() + delay_from_now;
+ self.set_timeout_at(delay_from_start, state)
+ }
+
+ fn set_timeout_at(&mut self, delay_from_start: Duration, state: T) -> Result<Timeout> {
+ let mut tick = duration_to_tick(delay_from_start, self.tick_ms);
+ trace!("setting timeout; delay={:?}; tick={:?}; current-tick={:?}", delay_from_start, tick, self.tick);
+
+ // Always target at least 1 tick in the future
+ if tick <= self.tick {
+ tick = self.tick + 1;
+ }
+
+ self.insert(tick, state)
+ }
+
+ fn insert(&mut self, tick: Tick, state: T) -> Result<Timeout> {
+ // Get the slot for the requested tick
+ let slot = (tick & self.mask) as usize;
+ let curr = self.wheel[slot];
+
+ // Insert the new entry
+ let entry = Entry::new(state, tick, curr.head);
+ let token = Token(self.entries.insert(entry));
+
+ if curr.head != EMPTY {
+ // If there was a previous entry, set its prev pointer to the new
+ // entry
+ self.entries[curr.head.into()].links.prev = token;
+ }
+
+ // Update the head slot
+ self.wheel[slot] = WheelEntry {
+ next_tick: cmp::min(tick, curr.next_tick),
+ head: token,
+ };
+
+ self.schedule_readiness(tick);
+
+ trace!("inserted timeout; slot={}; token={:?}", slot, token);
+
+ // Return the new timeout
+ Ok(Timeout {
+ token,
+ tick
+ })
+ }
+
+ pub fn cancel_timeout(&mut self, timeout: &Timeout) -> Option<T> {
+ let links = match self.entries.get(timeout.token.into()) {
+ Some(e) => e.links,
+ None => return None
+ };
+
+ // Sanity check
+ if links.tick != timeout.tick {
+ return None;
+ }
+
+ self.unlink(&links, timeout.token);
+ Some(self.entries.remove(timeout.token.into()).state)
+ }
+
+ pub fn poll(&mut self) -> Option<T> {
+ let target_tick = current_tick(self.start, self.tick_ms);
+ self.poll_to(target_tick)
+ }
+
+ fn poll_to(&mut self, mut target_tick: Tick) -> Option<T> {
+ trace!("tick_to; target_tick={}; current_tick={}", target_tick, self.tick);
+
+ if target_tick < self.tick {
+ target_tick = self.tick;
+ }
+
+ while self.tick <= target_tick {
+ let curr = self.next;
+
+ trace!("ticking; curr={:?}", curr);
+
+ if curr == EMPTY {
+ self.tick += 1;
+
+ let slot = self.slot_for(self.tick);
+ self.next = self.wheel[slot].head;
+
+ // Handle the case when a slot has a single timeout which gets
+ // canceled before the timeout expires. In this case, the
+ // slot's head is EMPTY but there is a value for next_tick. Not
+ // resetting next_tick here causes the timer to get stuck in a
+ // loop.
+ if self.next == EMPTY {
+ self.wheel[slot].next_tick = TICK_MAX;
+ }
+ } else {
+ let slot = self.slot_for(self.tick);
+
+ if curr == self.wheel[slot].head {
+ self.wheel[slot].next_tick = TICK_MAX;
+ }
+
+ let links = self.entries[curr.into()].links;
+
+ if links.tick <= self.tick {
+ trace!("triggering; token={:?}", curr);
+
+ // Unlink will also advance self.next
+ self.unlink(&links, curr);
+
+ // Remove and return the token
+ return Some(self.entries.remove(curr.into()).state);
+ } else {
+ let next_tick = self.wheel[slot].next_tick;
+ self.wheel[slot].next_tick = cmp::min(next_tick, links.tick);
+ self.next = links.next;
+ }
+ }
+ }
+
+ // No more timeouts to poll
+ if let Some(inner) = self.inner.borrow() {
+ trace!("unsetting readiness");
+ let _ = inner.set_readiness.set_readiness(Ready::empty());
+
+ if let Some(tick) = self.next_tick() {
+ self.schedule_readiness(tick);
+ }
+ }
+
+ None
+ }
+
+ fn unlink(&mut self, links: &EntryLinks, token: Token) {
+ trace!("unlinking timeout; slot={}; token={:?}",
+ self.slot_for(links.tick), token);
+
+ if links.prev == EMPTY {
+ let slot = self.slot_for(links.tick);
+ self.wheel[slot].head = links.next;
+ } else {
+ self.entries[links.prev.into()].links.next = links.next;
+ }
+
+ if links.next != EMPTY {
+ self.entries[links.next.into()].links.prev = links.prev;
+
+ if token == self.next {
+ self.next = links.next;
+ }
+ } else if token == self.next {
+ self.next = EMPTY;
+ }
+ }
+
+ fn schedule_readiness(&self, tick: Tick) {
+ if let Some(inner) = self.inner.borrow() {
+ // Coordinate setting readiness w/ the wakeup thread
+ let mut curr = inner.wakeup_state.load(Ordering::Acquire);
+
+ loop {
+ if curr as Tick <= tick {
+ // Nothing to do, wakeup is already scheduled
+ return;
+ }
+
+ // Attempt to move the wakeup time forward
+ trace!("advancing the wakeup time; target={}; curr={}", tick, curr);
+ let actual = inner.wakeup_state.compare_and_swap(curr, tick as usize, Ordering::Release);
+
+ if actual == curr {
+ // Signal to the wakeup thread that the wakeup time has
+ // been changed.
+ trace!("unparking wakeup thread");
+ inner.wakeup_thread.thread().unpark();
+ return;
+ }
+
+ curr = actual;
+ }
+ }
+ }
+
+ // Next tick containing a timeout
+ fn next_tick(&self) -> Option<Tick> {
+ if self.next != EMPTY {
+ let slot = self.slot_for(self.entries[self.next.into()].links.tick);
+
+ if self.wheel[slot].next_tick == self.tick {
+ // There is data ready right now
+ return Some(self.tick);
+ }
+ }
+
+ self.wheel.iter().map(|e| e.next_tick).min()
+ }
+
+ fn slot_for(&self, tick: Tick) -> usize {
+ (self.mask & tick) as usize
+ }
+}
+
+impl<T> Default for Timer<T> {
+ fn default() -> Timer<T> {
+ Builder::default().build()
+ }
+}
+
+impl<T> Evented for Timer<T> {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ if self.inner.borrow().is_some() {
+ return Err(io::Error::new(io::ErrorKind::Other, "timer already registered"));
+ }
+
+ let (registration, set_readiness) = Registration::new(poll, token, interest, opts);
+ let wakeup_state = Arc::new(AtomicUsize::new(usize::MAX));
+ let thread_handle = spawn_wakeup_thread(
+ wakeup_state.clone(),
+ set_readiness.clone(),
+ self.start, self.tick_ms);
+
+ self.inner.fill(Inner {
+ registration,
+ set_readiness,
+ wakeup_state,
+ wakeup_thread: thread_handle,
+ }).expect("timer already registered");
+
+ if let Some(next_tick) = self.next_tick() {
+ self.schedule_readiness(next_tick);
+ }
+
+ Ok(())
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ match self.inner.borrow() {
+ Some(inner) => inner.registration.update(poll, token, interest, opts),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ match self.inner.borrow() {
+ Some(inner) => inner.registration.deregister(poll),
+ None => Err(io::Error::new(io::ErrorKind::Other, "receiver not registered")),
+ }
+ }
+}
+
+impl fmt::Debug for Inner {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Inner")
+ .field("registration", &self.registration)
+ .field("wakeup_state", &self.wakeup_state.load(Ordering::Relaxed))
+ .finish()
+ }
+}
+
+fn spawn_wakeup_thread(state: WakeupState, set_readiness: SetReadiness, start: Instant, tick_ms: u64) -> thread::JoinHandle<()> {
+ thread::spawn(move || {
+ let mut sleep_until_tick = state.load(Ordering::Acquire) as Tick;
+
+ loop {
+ if sleep_until_tick == TERMINATE_THREAD as Tick {
+ return;
+ }
+
+ let now_tick = current_tick(start, tick_ms);
+
+ trace!("wakeup thread: sleep_until_tick={:?}; now_tick={:?}", sleep_until_tick, now_tick);
+
+ if now_tick < sleep_until_tick {
+ // Calling park_timeout with u64::MAX leads to undefined
+ // behavior in pthread, causing the park to return immediately
+ // and causing the thread to tightly spin. Instead of u64::MAX
+ // on large values, simply use a blocking park.
+ match tick_ms.checked_mul(sleep_until_tick - now_tick) {
+ Some(sleep_duration) => {
+ trace!("sleeping; tick_ms={}; now_tick={}; sleep_until_tick={}; duration={:?}",
+ tick_ms, now_tick, sleep_until_tick, sleep_duration);
+ thread::park_timeout(Duration::from_millis(sleep_duration));
+ }
+ None => {
+ trace!("sleeping; tick_ms={}; now_tick={}; blocking sleep",
+ tick_ms, now_tick);
+ thread::park();
+ }
+ }
+ sleep_until_tick = state.load(Ordering::Acquire) as Tick;
+ } else {
+ let actual = state.compare_and_swap(sleep_until_tick as usize, usize::MAX, Ordering::AcqRel) as Tick;
+
+ if actual == sleep_until_tick {
+ trace!("setting readiness from wakeup thread");
+ let _ = set_readiness.set_readiness(Ready::readable());
+ sleep_until_tick = usize::MAX as Tick;
+ } else {
+ sleep_until_tick = actual as Tick;
+ }
+ }
+ }
+ })
+}
+
+fn duration_to_tick(elapsed: Duration, tick_ms: u64) -> Tick {
+ // Calculate tick rounding up to the closest one
+ let elapsed_ms = convert::millis(elapsed);
+ elapsed_ms.saturating_add(tick_ms / 2) / tick_ms
+}
+
+fn current_tick(start: Instant, tick_ms: u64) -> Tick {
+ duration_to_tick(start.elapsed(), tick_ms)
+}
+
+impl<T> Entry<T> {
+ fn new(state: T, tick: u64, next: Token) -> Entry<T> {
+ Entry {
+ state,
+ links: EntryLinks {
+ tick,
+ prev: EMPTY,
+ next,
+ },
+ }
+ }
+}
+
+impl fmt::Display for TimerError {
+ fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
+ // `TimerError` will never be constructed.
+ unreachable!();
+ }
+}
+
+impl error::Error for TimerError {
+ fn description(&self) -> &str {
+ // `TimerError` will never be constructed.
+ unreachable!();
+ }
+}
+
+impl fmt::Display for TimerErrorKind {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ TimerOverflow => write!(fmt, "TimerOverflow"),
+ }
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/token.rs b/third_party/rust/mio-0.6.23/src/token.rs
new file mode 100644
index 0000000000..09e42450bc
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/token.rs
@@ -0,0 +1,153 @@
+/// Associates readiness notifications with [`Evented`] handles.
+///
+/// `Token` is a wrapper around `usize` and is used as an argument to
+/// [`Poll::register`] and [`Poll::reregister`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// # Example
+///
+/// Using `Token` to track which socket generated the notification. In this
+/// example, `HashMap` is used, but usually something like [`slab`] is better.
+///
+/// ```
+/// # use std::error::Error;
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// use mio::{Events, Ready, Poll, PollOpt, Token};
+/// use mio::net::TcpListener;
+///
+/// use std::thread;
+/// use std::io::{self, Read};
+/// use std::collections::HashMap;
+///
+/// // After this number of sockets is accepted, the server will shutdown.
+/// const MAX_SOCKETS: usize = 32;
+///
+/// // Pick a token that will not be used by any other socket and use that one
+/// // for the listener.
+/// const LISTENER: Token = Token(1024);
+///
+/// // Used to store the sockets.
+/// let mut sockets = HashMap::new();
+///
+/// // This is used to generate a unique token for a socket
+/// let mut next_socket_index = 0;
+///
+/// // The `Poll` instance
+/// let poll = Poll::new()?;
+///
+/// // Tcp listener
+/// let listener = TcpListener::bind(&"127.0.0.1:0".parse()?)?;
+///
+/// // Register the listener
+/// poll.register(&listener,
+/// LISTENER,
+/// Ready::readable(),
+/// PollOpt::edge())?;
+///
+/// // Spawn a thread that will connect a bunch of sockets then close them
+/// let addr = listener.local_addr()?;
+/// thread::spawn(move || {
+/// use std::net::TcpStream;
+///
+/// // +1 here is to connect an extra socket to signal the socket to close
+/// for _ in 0..(MAX_SOCKETS+1) {
+/// // Connect then drop the socket
+/// let _ = TcpStream::connect(&addr).unwrap();
+/// }
+/// });
+///
+/// // Event storage
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Read buffer, this will never actually get filled
+/// let mut buf = [0; 256];
+///
+/// // The main event loop
+/// loop {
+/// // Wait for events
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// match event.token() {
+/// LISTENER => {
+/// // Perform operations in a loop until `WouldBlock` is
+/// // encountered.
+/// loop {
+/// match listener.accept() {
+/// Ok((socket, _)) => {
+/// // Shutdown the server
+/// if next_socket_index == MAX_SOCKETS {
+/// return Ok(());
+/// }
+///
+/// // Get the token for the socket
+/// let token = Token(next_socket_index);
+/// next_socket_index += 1;
+///
+/// // Register the new socket w/ poll
+/// poll.register(&socket,
+/// token,
+/// Ready::readable(),
+/// PollOpt::edge())?;
+///
+/// // Store the socket
+/// sockets.insert(token, socket);
+/// }
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop accepting
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// token => {
+/// // Always operate in a loop
+/// loop {
+/// match sockets.get_mut(&token).unwrap().read(&mut buf) {
+/// Ok(0) => {
+/// // Socket is closed, remove it from the map
+/// sockets.remove(&token);
+/// break;
+/// }
+/// // Data is not actually sent in this example
+/// Ok(_) => unreachable!(),
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop reading
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// }
+/// }
+/// }
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// [`Evented`]: event/trait.Evented.html
+/// [`Poll`]: struct.Poll.html
+/// [`Poll::register`]: struct.Poll.html#method.register
+/// [`Poll::reregister`]: struct.Poll.html#method.reregister
+/// [`slab`]: https://crates.io/crates/slab
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Token(pub usize);
+
+impl From<usize> for Token {
+ fn from(val: usize) -> Token {
+ Token(val)
+ }
+}
+
+impl From<Token> for usize {
+ fn from(val: Token) -> usize {
+ val.0
+ }
+}
diff --git a/third_party/rust/mio-0.6.23/src/udp.rs b/third_party/rust/mio-0.6.23/src/udp.rs
new file mode 100644
index 0000000000..a71bd21914
--- /dev/null
+++ b/third_party/rust/mio-0.6.23/src/udp.rs
@@ -0,0 +1,326 @@
+//! Primitives for working with UDP
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+
+#![allow(deprecated)]
+
+use {sys, Ready, Poll, PollOpt, Token};
+use io::{self, MapNonBlock};
+use event::Evented;
+use poll::SelectorId;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+
+/// A User Datagram Protocol socket.
+///
+/// This is an implementation of a bound UDP socket. This supports both IPv4 and
+/// IPv6 addresses, and there is no corresponding notion of a server because UDP
+/// is a datagram protocol.
+#[derive(Debug)]
+pub struct UdpSocket {
+ sys: sys::UdpSocket,
+ selector_id: SelectorId,
+}
+
+impl UdpSocket {
+ /// Creates a UDP socket from the given address.
+ pub fn bind(addr: &SocketAddr) -> io::Result<UdpSocket> {
+ let socket = net::UdpSocket::bind(addr)?;
+ UdpSocket::from_socket(socket)
+ }
+
+ /// Creates a new mio-wrapped socket from an underlying and bound std
+ /// socket.
+ ///
+ /// This function requires that `socket` has previously been bound to an
+ /// address to work correctly, and returns an I/O object which can be used
+ /// with mio to send/receive UDP messages.
+ ///
+ /// This can be used in conjunction with net2's `UdpBuilder` interface to
+ /// configure a socket before it's handed off to mio, such as setting
+ /// options like `reuse_address` or binding to multiple addresses.
+ pub fn from_socket(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ Ok(UdpSocket {
+ sys: sys::UdpSocket::new(socket)?,
+ selector_id: SelectorId::new(),
+ })
+ }
+
+ /// Returns the socket address that this socket was created from.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.sys.local_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UdpSocket` is a reference to the same socket that this
+ /// object references. Both handles will read and write the same port, and
+ /// options set on one socket will be propagated to the other.
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.sys.try_clone()
+ .map(|s| {
+ UdpSocket {
+ sys: s,
+ selector_id: self.selector_id.clone(),
+ }
+ })
+ }
+
+ /// Sends data on the socket to the given address. On success, returns the
+ /// number of bytes written.
+ ///
+ /// Address type can be any implementor of `ToSocketAddrs` trait. See its
+ /// documentation for concrete examples.
+ pub fn send_to(&self, buf: &[u8], target: &SocketAddr)
+ -> io::Result<Option<usize>> {
+ self.sys.send_to(buf, target).map_non_block()
+ }
+
+ /// Receives data from the socket and stores data in the supplied buffer `buf`. On success,
+ /// returns the number of bytes read and the address from whence the data came.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// The function does not read from `buf`, but is overwriting previous content of `buf`.
+ ///
+ /// Assuming the function has read `n` bytes, slicing `&buf[..n]` provides
+ /// efficient access with iterators and boundary checks.
+ pub fn recv_from(&self, buf: &mut [u8])
+ -> io::Result<Option<(usize, SocketAddr)>> {
+ self.sys.recv_from(buf).map_non_block()
+ }
+
+ /// Sends data on the socket to the address previously bound via connect(). On success,
+ /// returns the number of bytes written.
+ pub fn send(&self, buf: &[u8])
+ -> io::Result<Option<usize>> {
+ self.sys.send(buf).map_non_block()
+ }
+
+ /// Receives data from the socket previously bound with connect() and stores data in
+ /// the supplied buffer `buf`. On success, returns the number of bytes read.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// The function does not read from `buf`, but is overwriting previous content of `buf`.
+ ///
+ /// Assuming the function has read `n` bytes, slicing `&buf[..n]` provides
+ /// efficient access with iterators and boundary checks.
+ pub fn recv(&self, buf: &mut [u8])
+ -> io::Result<Option<usize>> {
+ self.sys.recv(buf).map_non_block()
+ }
+
+ /// Connects the UDP socket setting the default destination for `send()`
+ /// and limiting packets that are read via `recv` from the address specified
+ /// in `addr`.
+ pub fn connect(&self, addr: SocketAddr)
+ -> io::Result<()> {
+ self.sys.connect(addr)
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_broadcast`][link].
+ ///
+ /// [link]: #method.set_broadcast
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.sys.broadcast()
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.sys.set_broadcast(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v4
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v4(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_ttl_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_ttl_v4
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.sys.multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_multicast_ttl_v4(ttl)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v6`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v6
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.sys.multicast_loop_v6()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ /// Note that this may not have any affect on IPv4 sockets.
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.sys.set_multicast_loop_v6(on)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.sys.ttl()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.sys.set_ttl(ttl)
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ pub fn join_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.join_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ pub fn join_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v4`][link].
+ ///
+ /// [link]: #method.join_multicast_v4
+ pub fn leave_multicast_v4(&self,
+ multiaddr: &Ipv4Addr,
+ interface: &Ipv4Addr) -> io::Result<()> {
+ self.sys.leave_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v6`][link].
+ ///
+ /// [link]: #method.join_multicast_v6
+ pub fn leave_multicast_v6(&self,
+ multiaddr: &Ipv6Addr,
+ interface: u32) -> io::Result<()> {
+ self.sys.leave_multicast_v6(multiaddr, interface)
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.sys.take_error()
+ }
+}
+
+impl Evented for UdpSocket {
+ fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.selector_id.associate_selector(poll)?;
+ self.sys.register(poll, token, interest, opts)
+ }
+
+ fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
+ self.sys.reregister(poll, token, interest, opts)
+ }
+
+ fn deregister(&self, poll: &Poll) -> io::Result<()> {
+ self.sys.deregister(poll)
+ }
+}
+
+/*
+ *
+ * ===== UNIX ext =====
+ *
+ */
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd};
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.sys.into_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sys.as_raw_fd()
+ }
+}
+
+#[cfg(all(unix, not(target_os = "fuchsia")))]
+impl FromRawFd for UdpSocket {
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket {
+ sys: FromRawFd::from_raw_fd(fd),
+ selector_id: SelectorId::new(),
+ }
+ }
+}