diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 00:47:55 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-19 00:47:55 +0000 |
commit | 26a029d407be480d791972afb5975cf62c9360a6 (patch) | |
tree | f435a8308119effd964b339f76abb83a57c29483 /third_party/rust/mio | |
parent | Initial commit. (diff) | |
download | firefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz firefox-26a029d407be480d791972afb5975cf62c9360a6.zip |
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/mio')
64 files changed, 12579 insertions, 0 deletions
diff --git a/third_party/rust/mio/.cargo-checksum.json b/third_party/rust/mio/.cargo-checksum.json new file mode 100644 index 0000000000..f005e1c804 --- /dev/null +++ b/third_party/rust/mio/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"8314f990c14695d982bfd16740a1fd7330628e98563003abc3baf14e0bb1646c","Cargo.toml":"d6b33a1d471893ab56b5a02d36339dbc6e3ec205be411c9124ebebf53d74c488","LICENSE":"07919255c7e04793d8ea760d6c2ce32d19f9ff02bdbdde3ce90b1e1880929a9b","README.md":"1e5b9f5dcb0bdcea387fa03dff6183faa3694fc4be2e83704c4c92744b416ae9","examples/tcp_listenfd_server.rs":"f05bbee8eb91a2e42fec29cbacbdd6dc5f9c95097b3ee12dfe944143e45f5996","examples/tcp_server.rs":"fb256c761cc564de6f2e2550205aec41b9a9d59bc4185f2fd42f9b4b3bc8f17c","examples/udp_server.rs":"e2654b88f5f0d6eba9af32f62e99111078dc8bb1ad2ab8e65a2ce89670318daa","src/event/event.rs":"00ff34dd2e391784d5f7c4613029fb9aa8cc31a019306936960c27092486e5a9","src/event/events.rs":"5cad85e0d0c9e619b6017f868c681ed0dc023e7aae8c0c5c0c7337b2fa96a8a6","src/event/mod.rs":"c129c42114867e3e7337a6605952db8cef8a7ef0938f67a28fa68e73fc420c6a","src/event/source.rs":"d2d1aaff10fb31e7d2dfe8a4500d100a9b2d0a2ab1663503c0f275b35b0072b6","src/interest.rs":"c399965f8bb1c5a801573b5c69ad9df7ffb9ea66dd140edd1aea0fcd4a6c3852","src/io_source.rs":"371be50244362b8769f95b6f152f8f4905f04c95decc5f51c285a2ffdc2deec4","src/lib.rs":"230807008c9956195cd74d63a00879aa2ac67cede03723d336bb177c130f237f","src/macros.rs":"954a9c57fcdabc8625664880a41fedf2457ff87d5d5d6fee5e3c8c6112ac765b","src/net/mod.rs":"22b7f18555a06c1d5b074b67ed28ace5c59b1a3842232a9cd50f612cf0214cdc","src/net/tcp/listener.rs":"94e0277c84fc09041ef3f4c757ed0a935afa03edbdeadcb19a6a89938392a691","src/net/tcp/mod.rs":"f2c50c1d338a69d704e18727142d8495be2927b390d7bbb77cc0cf4e945ed83a","src/net/tcp/stream.rs":"aae550afe7dc458ad2b8860b7fc29c1feda345fe63a1e66e4de17610bead970e","src/net/udp.rs":"0d6245c99b1d4bedf6967ec138d0f4f9073a29e0d4404e3f96ff6830edb8250e","src/net/uds/datagram.rs":"a8d781620846997e5a89800d80e09a7b962c3f9ab14622a054b0b0d1b1b5c84e","src/net/uds/listener.rs":"714b1638a522cc95bb70b137441bc1282684fbcde4432b6872ad052f66670164","src/net/uds/mod.rs":"70d4368adae74652d46771b29e633396e5f1b70e9b7d1370cf5fec9a78605c04","src/net/uds/stream.rs":"ef37baaee8fc3158710aa15ec7f647030afcf86424ccc0dd84971270a3ad33c0","src/poll.rs":"0f7354c46099436d320f3a06adb9f58406668f3189ab08f6f3a7344db069a0d9","src/sys/mod.rs":"ad8eaa44a560caa8d01788b4e798705ef26ccdfc29b5de9eae2f23172a064aba","src/sys/shell/mod.rs":"7dfd5e62ba14639cff17f37f7d41366f765f461c9ad11230862b7ea18249d18b","src/sys/shell/selector.rs":"84b9f666e7aea8dc535c7f8d5d97bb016e3f558096f9fc6da3e496f30e7dc30d","src/sys/shell/tcp.rs":"81a5219507eea81895f4f984ceb157a9c0d3c046d50190b1c562f52d77f2b607","src/sys/shell/udp.rs":"0db637a4ce1ec3a5ccb410d1e94bcc525df8ad005c1d4235d7d50111d335dbde","src/sys/shell/uds.rs":"10c2933423e251fca678322ff9fc7501fcaacdfa769307399cb5dfc7bef63ec3","src/sys/shell/waker.rs":"81bc18cf52345a8d17e8142291e43dd70a36dfd9f5eb486f3e787998ee4451dc","src/sys/unix/mod.rs":"d2b03e3d166d174f1ad283e6a2aa1c3287b4d6e0affb3ed7ea7e033fbbfc5f1d","src/sys/unix/net.rs":"ce7e5e45a8e969eddaec77c23b5c6813b3675effa9018fddd16b46e73c2753f8","src/sys/unix/pipe.rs":"c849fddec5afd6ffec682fe27346c55d14945e7980fb41a132a1846bd9cb4eef","src/sys/unix/selector/epoll.rs":"6f4cf6e5bb540f4d549fd07c8110a73f3e82cfe1c699b4f8afae7a5274131585","src/sys/unix/selector/kqueue.rs":"9a79c425f239160d818bc746c72b0310e7a36af565a9e02eb2caf37670d6249b","src/sys/unix/selector/mod.rs":"81cf2b9d8dcb113e5a89f6248a662730876bb843c73ddf437e326a274b900d1b","src/sys/unix/sourcefd.rs":"18292d551d78a4ae6900ee31c969909a784d42b49184b7e0581c08f170dabf04","src/sys/unix/tcp.rs":"b2b1f6485403910be3124a0005190c7f2e25acfbd238e66dca1a8adcb8a46f47","src/sys/unix/udp.rs":"ff8f15655e2cbb2ebec5153bda9bad021a418c2a42976af899392f63f5af9633","src/sys/unix/uds/datagram.rs":"edbe7429112f802d73d9002f46efcbace28042a9fbbab3d8e34c6d1fdc6ea2b2","src/sys/unix/uds/listener.rs":"9c87a104ae0140bef910043cca5053814dadaf1dfaa5411eb036738c074ecbef","src/sys/unix/uds/mod.rs":"36bf5855c030cd777f3ee144a7e0e4a10f9f9f3e263599dabfa7a142001a8bea","src/sys/unix/uds/socketaddr.rs":"c6784b61c6705c222e41db8abddc9458c05c70de3bc53e9a7b4aec0ea371058e","src/sys/unix/uds/stream.rs":"1de3da2c1efeda107617548281bcd50e8e29fdd3da9540f8e245323048ca68f5","src/sys/unix/waker.rs":"5563aeeb5e3c9bd9452b01e1fdca9a002c6d52fecda92dc4d13e6e7671ca782e","src/sys/wasi/mod.rs":"dd4945e34b2169c4d3737fe7566269e2f7d1867664657b9b49bd097c88759699","src/sys/windows/afd.rs":"65a1287b385338e88efb895eb573a3bdb16a432ef6e95152be4f363efb2c1e73","src/sys/windows/event.rs":"0b3f9c4ba4f10e8ede7c7c7163b8b4207c0d8155285dca7b4737cd055ec2c459","src/sys/windows/handle.rs":"f75276733a6f930f755a1bc5651cba8be52320a45709eec0b4ce5efe8779c5b6","src/sys/windows/io_status_block.rs":"052095e6b312244f1247c21cfac238dc66f593d19de3ceb1c430825c60fe01b2","src/sys/windows/iocp.rs":"f82ee941190849e345174d6a20771984685411cc48716e3c79dede5ba925f33d","src/sys/windows/mod.rs":"2359bea017df508a5c47d5d8896d7b70b077e08647ce4f526e1d666072210621","src/sys/windows/named_pipe.rs":"10b3c46a9939093df780b01a683573155ec50920a802e69ae6ef2f4c9730d33e","src/sys/windows/net.rs":"67093e8c2c672bc7e7c809c83f400061abac0b9e01a748811e3b91e410df008b","src/sys/windows/overlapped.rs":"94fa619a7655228485c714ef5b5ed4b703a360e51130cdfc07d20c1aa38cb3c5","src/sys/windows/selector.rs":"d12743aba682b064d9913e8d755aae53350c0de436d7025b1af94113a6804e8f","src/sys/windows/tcp.rs":"1911960fd32657d31a53ed3ba4a374d1592bae2b2c7fca80213cdfbfa773a831","src/sys/windows/udp.rs":"45cbba2941287d89e089f168e29f65cea03b8b11aa8f5b7fee173d53a5c0e6b0","src/sys/windows/waker.rs":"cf27f75061b86dabe362d936c5945fd0e644aea833993de45d40b4be5fbbe56c","src/token.rs":"4e64c8e337fbee4e3a2f5e8661241b5261c9c5487e20fa73425d89e2e152e8de","src/waker.rs":"cd521418aede70369775607232d1162ff70f3993f4951ed113cc18fcd7af6539"},"package":null}
\ No newline at end of file diff --git a/third_party/rust/mio/CHANGELOG.md b/third_party/rust/mio/CHANGELOG.md new file mode 100644 index 0000000000..690d489c64 --- /dev/null +++ b/third_party/rust/mio/CHANGELOG.md @@ -0,0 +1,650 @@ +# 0.8.8 + +## Fixed + +* Fix compilation on WASI (https://github.com/tokio-rs/mio/pull/1676). + +# 0.8.7 + +## Added + +* Add/fix support for tvOS and watchOS, Mio should now build for tvOS and + watchOS, but we don't have a CI setup yet + (https://github.com/tokio-rs/mio/pull/1658). + +## Changed + +* Made the `log` dependency optional behind the `log` feature flag (enabled by + default). Users that disabled Mio's default features will now not see any + logging from Mio, enabling the `log` feature will fix that. This was done in + response to the `log` crate increasing it's MSRV to v1.60, see + https://github.com/rust-lang/log/pull/552 + (https://github.com/tokio-rs/mio/pull/1673). +* Update windows-sys dependency to v0.48 + (https://github.com/tokio-rs/mio/pull/1663). + +## Fixed + +* Fix overflow in `Poll::poll` when using `Duration::MAX` as timeout + (https://github.com/tokio-rs/mio/pull/1657). + +# 0.8.6 + +## Added + +* `Interest::PRIORITY` on Linux and Android, to trigger `Event::is_priority` + (https://github.com/tokio-rs/mio/pull/1647). + +## Changed + +* Updated windows-sys to 0.45 + (https://github.com/tokio-rs/mio/pull/1644). +* We started testing with sanitizers on the CI + (https://github.com/tokio-rs/mio/pull/1648). + +## Fixed + +* A number of potential fd leaks when setup resulted in an error right after + creation (https://github.com/tokio-rs/mio/pull/1636). +* Less truncating for timeout values in `Poll::poll` + (https://github.com/tokio-rs/mio/pull/1642). + +# 0.8.5 + +## Changed + +* Updated `windows-sys` to 0.42.0 + (https://github.com/tokio-rs/mio/pull/1624). +* Officially document Wine as not supported, some people claimed it worked, + other claims it doesn't, but nobody stepped up to fix the problem + (https://github.com/tokio-rs/mio/pull/1596). +* Switch to GitHub Actions + (https://github.com/tokio-rs/mio/pull/1598, https://github.com/tokio-rs/mio/pull/1601). +* Documented the current Poll::poll time behaviour + (https://github.com/tokio-rs/mio/pull/1603). + +## Fixed + +* Timeout less than one millisecond becoming zero millsiconds + (https://github.com/tokio-rs/mio/pull/1615, https://github.com/tokio-rs/mio/pull/1616) +* Undefined reference to `epoll\_create1` on Android API level < 21. + (https://github.com/tokio-rs/mio/pull/1590). + +# 0.8.4 + +## Added + +* Support `Registery::try_clone` on `wasm32-wasi` + (https://github.com/tokio-rs/mio/pull/1576). +* Add docs about polling without registering event sources + (https://github.com/tokio-rs/mio/pull/1585). + +# 0.8.3 + +## Changed + +* Replace `winapi` dependency with `windows-sys`. + (https://github.com/tokio-rs/mio/pull/1556). +* Future proofed the kevent ABI for FreeBSD + (https://github.com/tokio-rs/mio/pull/1572). + +## Fixed + +* Improved support for Redox, making it possible to run on stable Rust + (https://github.com/tokio-rs/mio/pull/1555). +* Don't ignore EAGAIN in UDS connect call + (https://github.com/tokio-rs/mio/pull/1564). +* Documentation of `TcpStream::connect` + (https://github.com/tokio-rs/mio/pull/1565). + +# 0.8.2 + +## Added + +* Experimental support for Redox. + +# 0.8.1 + +## Added + +* Add `try_io` method to all I/O types (#1551). This execute a user defined I/O + closure while updating Mio's internal state ensuring that the I/O type + receives more events if it hits a WouldBlock error. This is added to the + following types: + * `TcpStream` + * `UdpSocket` + * `UnixDatagram` + * `UnixStream` + * `unix::pipe::Sender` + * `unix::pipe::Receiver` +* Basic, experimental support for `wasm32-wasi` target (#1549). Note that a lot + of time type are still missing, e.g. the `Waker`, and may never be possible to + implement. + +# 0.8.0 + +## Removed + +* Deprecated features (https://github.com/tokio-rs/mio/commit/105f8f2afb57b01ddea716a0aa9720f226c520e3): + * extra-docs (always enabled) + * tcp (replaced with "net" feature). + * udp (replaced with "net" feature). + * uds (replaced with "net" feature). + * pipe (replaced with "os-ext" feature). + * os-util (replaced with "os-ext" feature). +* `TcpSocket` type + (https://github.com/tokio-rs/mio/commit/02e9be41f27daf822575444fdd2b3067433a5996). + The socket2 crate provides all the functionality and more. +* Support for Solaris, it never really worked anyway + (https://github.com/tokio-rs/mio/pull/1528). + +## Changes + +* Update minimum Rustc version (MSVR) to 1.46.0 + (https://github.com/tokio-rs/mio/commit/5c577efecd23750a9a3e0f6ad080ab98f14a255d). + +## Added + +* `UdpSocket::peer_addr` + (https://github.com/tokio-rs/mio/commit/5fc104d08e0e74c8a19247f7cba0f058699fc438). + +# 0.7.14 + +## Fixes + +* Remove use unsound internal macro (#1519). + +## Added + +* `sys::unix::SocketAddr::as_abstract_namespace()` (#1520). + +# 0.7.13 + +## Fixes + +* Fix `Registry::try_clone` invalid usage of `F_DUPFD_CLOEXEC` (#1497, + https://github.com/tokio-rs/mio/commit/2883f5c1f35bf1a59682c5ffc4afe6b97d7d6e68). + +# 0.7.12 (yanked) + +## Fixes + +* Set `FD_CLOEXEC` when calling `Registry::try_clone` + (https://github.com/tokio-rs/mio/commit/d1617b567ff6bc669d71e367d22e0e93ff7e2e24 for epoll and + (https://github.com/tokio-rs/mio/commit/b367a05e408ca90a26383c3aa16d8a16f019dc59 for kqueue). + +# 0.7.11 + +## Fixes + +* Fix missing feature of winapi. + (https://github.com/tokio-rs/mio/commit/a7e61db9e3c2b929ef1a33532bfcc22045d163ce). + +# 0.7.10 + +## Fixes + +* Fix an instance of not doc(cfg(.*)) + (https://github.com/tokio-rs/mio/commit/25e8f911357c740034f10a170dfa4ea1b28234ce). + +# 0.7.9 + +## Fixes + +* Fix error handling in `NamedPipe::write` + (https://github.com/tokio-rs/mio/commit/aec872be9732e5c6685100674278be27f54a271b). +* Use `accept(2)` on x86 Android instead of `accept4(2)` + (https://github.com/tokio-rs/mio/commit/6f86b925d3e48f30905d5cfa54348acf3f1fa036, + https://github.com/tokio-rs/mio/commit/8d5414880ab82178305ac1d2c16d715e58633d3e). +* Improve error message when opening AFD device + (https://github.com/tokio-rs/mio/commit/139f7c4422321eb4a17b14ae2c296fddd19a8804). + +# 0.7.8 + +## Fixes + +* Fix `TcpStream::set_linger` on macOS + (https://github.com/tokio-rs/mio/commit/175773ce02e85977db81224c782c8d140aba8543). +* Fix compilation on DragonFlyBSD + (https://github.com/tokio-rs/mio/commit/b51af46b28871f8dd3233b490ee62237ffed6a26). + +# 0.7.7 + +## Added + +* `UdpSocket::only_v6` + (https://github.com/tokio-rs/mio/commit/0101e05a800f17fb88f4315d9b9fe0f08cca6e57). +* `Clone` implementation for `Event` + (https://github.com/tokio-rs/mio/commit/26540ebbae89df6d4d08465c56f715d8f2addfc3). +* `AsRawFd` implementation for `Registry` + (https://github.com/tokio-rs/mio/commit/f70daa72da0042b1880256164774c3286d315a02). +* `Read` and `Write` implementation for `&unix::pipe::Sender` and `Receiver`, + that is on the reference to them, an implementation existed on the types + themselves already + (https://github.com/tokio-rs/mio/commit/1be481dcbbcb6906364008b5d61e7f53cddc3eb3). + +## Fixes + +* Underflow in `SocketAddr::address` + (https://github.com/tokio-rs/mio/commit/6d3fa69240cd4bb95e9d34605c660c30245a18bd). +* Android build with the net feature enabled, but with os-poll disabled + (https://github.com/tokio-rs/mio/commit/49d8fd33e026ad6e2c055d05d6667180ba2af7be). +* Solaris build with the net feature enabled, but with os-poll disabled + (https://github.com/tokio-rs/mio/commit/a6e025e9d9511639ec106ebedc0dd312bdc9be12). +* Ensure that `Waker::wake` works on illumos systems with poor `pipe(2)` and + `epoll(2)` interaction using `EPOLLET` + (https://github.com/tokio-rs/mio/commit/943d4249dcc17cd8b4d2250c4fa19116097248fa). +* Fix `unix::pipe` on illumos + (https://github.com/tokio-rs/mio/commit/0db49f6d5caf54b12176821363d154384357e70a). + +# 0.7.6 + +## Added + +* `net` feature, replaces `tcp`, `udp` and `uds` features + (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7). +* `os-ext` feature, replaces `os-util` and `pipe` features + (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc). +* Added keepalive support to `TcpSocket` + (https://github.com/tokio-rs/mio/commit/290c43a96662d54ab7c4b8814e5a9f9a9e523fda). +* `TcpSocket::set_{send, recv}_buffer_size` + (https://github.com/tokio-rs/mio/commit/40c4af79bf5b32b8fbdbf6f2e5c16290e1d3d406). +* `TcpSocket::get_linger` + (https://github.com/tokio-rs/mio/commit/13e82ced655bbb6e2729226e485a7de9f2c2ccd9). +* Implement `IntoRawFd` for `TcpSocket` + (https://github.com/tokio-rs/mio/commit/50548ed45d0b2c98f1f2e003e210d14195284ef4). + +## Deprecated + +* The `tcp`, `udp` and `uds` features, replaced by a new `net` feature. + (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7). +* The `extra-docs` feature, now enabled by default. + (https://github.com/tokio-rs/mio/commit/25731e8688a2d91c5c700674a2c2d3841240ece1). +* The `os-util` and `pipe` features, replaced by a new `os-ext` feature. + (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc). + +## Fixes + +* Incorrect assumption of the layout of `std::net::SocketAddr`. Previously Mio + would assume that `SocketAddrV{4,6}` had the same layout as + `libc::sockaddr_in(6)`, however this is not guaranteed by the standard + library. + (https://github.com/tokio-rs/mio/commit/152e0751f0be1c9b0cbd6778645b76bcb0eba93c). +* Also bumped the miow dependency to version 0.3.6 to solve the same problem as + above. + +# 0.7.5 + +## Added + +* `TcpSocket::get_localaddr()` retrieves local address + (https://github.com/tokio-rs/mio/commit/b41a022b2242eef1969c70c8ba93e04c528dba47). +* `TcpSocket::set_reuseport()` & `TcpSocket::get_reuseport()` configures and reads `SO_REUSEPORT` + (https://github.com/tokio-rs/mio/commit/183bbe409ab69cbf9db41d0263b41ec86202d9a0). +* `unix:pipe()` a wrapper around pipe(2) sys call + (https://github.com/tokio-rs/mio/commit/2b7c0967a7362303946deb3d4ca2ae507af6c72d). +* Add a check that a single Waker is active per Poll instance (only in debug mode) + (https://github.com/tokio-rs/mio/commit/f4874f28b32efcf4841691884c65a89734d96a56). +* Added `Interest:remove()` + (https://github.com/tokio-rs/mio/commit/b8639c3d9ac07bb7e2e27685680c8a6510fa1357). + +# 0.7.4 + +## Fixes + +* lost "socket closed" events on windows + (https://github.com/tokio-rs/mio/commit/50c299aca56c4a26e5ed20c283007239fbe6a7a7). + +## Added + +* `TcpSocket::set_linger()` configures SO_LINGER + (https://github.com/tokio-rs/mio/commit/3b4096565c1a879f651b8f8282ecdcbdbd5c92d3). + +# 0.7.3 + +## Added + +* `TcpSocket` for configuring a TCP socket before connecting or listening + (https://github.com/tokio-rs/mio/commit/5b09e60d0f64419b989bda88c86a3147334a03b3). + +# 0.7.2 + +## Added + +* Windows named pipe support. + (https://github.com/tokio-rs/mio/commit/52e8c2220e87696d20f13561402bcaabba4136ed). + +# 0.7.1 + +## Reduced support for 32-bit Apple targets + +In January 2020 Rust reduced its support for 32-bit Apple targets +(https://blog.rust-lang.org/2020/01/03/reducing-support-for-32-bit-apple-targets.html). +Starting with v0.7.1 Mio will do the same as we're no longer checking 32 bit +iOS/macOS on our CI. + +## Added + +* Support for illumos + (https://github.com/tokio-rs/mio/commit/976f2354d0e8fbbb64fba3bf017d7131f9c369a0). +* Report `epoll(2)`'s `EPOLLERR` event as `Event::is_write_closed` if it's the + only event + (https://github.com/tokio-rs/mio/commit/0c77b5712d675eeb9bd43928b5dd7d22b2c7ac0c). +* Optimised event::Iter::{size_hint, count} + (https://github.com/tokio-rs/mio/commit/40df934a11b05233a7796c4de19a4ee06bc4e03e). + +## Fixed + +* Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then + ~30 minutes effectively infinite + (https://github.com/tokio-rs/mio/commit/d555991f5ee81f6c1eec0fe481557d3d5b8d5ff4). +* Set `SO_NOSIGPIPE` on all sockets (not just UDP) on for Apple targets + (https://github.com/tokio-rs/mio/commit/b8bbdcb0d3236f4c4acb257996d42a88dc9987d9). +* Properly handle `POLL_ABORT` on Windows + (https://github.com/tokio-rs/mio/commit/a98da62b3ed1eeed1770aaca12f46d647e4fa749). +* Improved error handling around failing `SIO_BASE_HANDLE` calls on Windows + (https://github.com/tokio-rs/mio/commit/b15fc18458a79ef8a51f73effa92548650f4e5dc). + +## Changed + +* On NetBSD we now use `accept4(2)` + (https://github.com/tokio-rs/mio/commit/4e306addc7144f2e02a7e8397c220b179a006a19). +* The package uploaded to crates.io should be slightly smaller + (https://github.com/tokio-rs/mio/commit/eef8d3b9500bc0db957cd1ac68ee128ebc68351f). + +## Removed + +* Dependency on `lazy_static` on Windows + (https://github.com/tokio-rs/mio/commit/57e4c2a8ac153bc7bb87829e22cf0a21e3927e8a). + +# 0.7.0 + +Version 0.7 of Mio contains various major changes compared to version 0.6. +Overall a large number of API changes have been made to reduce the complexity of +the implementation and remove overhead where possible. + +Please refer to the [blog post about +0.7-alpha.1](https://tokio.rs/blog/2019-12-mio-v0.7-alpha.1/) for additional +information. + +## Added + +* `Interest` structure that replaces `Ready` in registering event sources. +* `Registry` structure that separates the registering and polling functionality. +* `Waker` structure that allows another thread to wake a thread polling `Poll`. +* Unix Domain Socket (UDS) types: `UnixDatagram`, `UnixListener` and + `UnixStream`. + +## Removed + +* All code deprecated in 0.6 was removed in 0.7. +* Support for Fuchsia was removed as the code was unmaintained. +* Support for Bitrig was removed, rustc dropped support for it also. +* `UnixReady` was merged into `Ready`. +* Custom user-space readiness queue was removed, this includes the public + `Registration` and `SetReadiness` types. +* `PollOpt` was removed and all registrations use edge-triggers. See the upgrade + guide on how to process event using edge-triggers. +* The network types (types in the `net` module) now support only the same API as + found in the standard library, various methods on the types were removed. +* `TcpStream` now supports vectored I/O. +* `Poll::poll_interruptible` was removed. Instead `Poll::poll` will now return + an error if one occurs. +* `From<usize>` is removed from `Token`, the internal field is still public, so + `Token(my_token)` can still be used. + +## Changed + +* Various documentation improvements were made around correct usage of `Poll` + and registered event sources. It is recommended to reread the documentation of + at least `event::Source` and `Poll`. +* Mio now uses Rust 2018 and rustfmt for all code. +* `Event` was changed to be a wrapper around the OS event. This means it can be + significantly larger on some OSes. +* `Ready` was removed and replaced with various `is_*` methods on `Event`. For + example instead checking for readable readiness using + `Event::ready().is_readable()`, you would call `Event::is_readable()`. +* `Ready::is_hup` was removed in favour of `Event::is_read_closed` and + `Event::is_write_closed`. +* The Iterator implementation of `Events` was changed to return `&Event`. +* `Evented` was renamed to `event::Source` and now takes mutable reference to + the source. +* Minimum supported Rust version was increased to 1.39. +* By default Mio now uses a shim implementation. To enable the full + implementation, that uses the OS, enable the `os-oll` feature. To enable the + network types use `tcp`, `udp` and/or `uds`. For more documentation on the + features see the `feature` module in the API documentation (requires the + `extra-docs` feature). +* The entire Windows implementation was rewritten. +* Various optimisation were made to reduce the number of system calls in + creating and using sockets, e.g. making use of `accept4(2)`. +* The `fmt::Debug` implementation of `Events` is now actually useful as it + prints all `Event`s. + +# 0.6.23 (Dec 01, 2020) + +### Changed +- **MSRV**: Increased the MSRV from 1.18.0 (Jun 8, 2017) to 1.31.0 (Dec 6, + 2018) + (https://github.com/tokio-rs/mio/commit/4879e0d32ddfd98e762fc87240e594a3ad8fca30). + +### Fixed +- Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then + ~30 minutes effectively infinite + (https://github.com/tokio-rs/mio/commit/e7cba59950e9c9fa6194e29b5b1e72029e3df455). +- Update miow and net2 depedencies to get rid of invalid memory layout assumption + (https://github.com/tokio-rs/mio/commit/13f02ac0a86d7c0c0001e5ff8960a0b4340d075c). + +# 0.6.22 (May 01, 2020) + +### Added +- Add support for illumos target (#1294) + +# 0.6.21 (November 27, 2019) + +### Fixed +- remove `=` dependency on `cfg-if`. + +# 0.6.20 (November 21, 2019) + +### Fixed +- Use default IOCP concurrency value (#1161). +- setting FD_CLOEXEC in pipe (#1095). + +# 0.6.19 (May 28, 2018) + +### Fixed +- Do not trigger HUP events on kqueue platforms (#958). + +# 0.6.18 (May 24, 2018) + +### Fixed +- Fix compilation on kqueue platforms with 32bit C long (#948). + +# 0.6.17 (May 15, 2018) + +### Fixed +- Don't report `RDHUP` as `HUP` (#939) +- Fix lazycell related compilation issues. +- Fix EPOLLPRI conflicting with READABLE +- Abort process on ref count overflows + +### Added +- Define PRI on all targets + +# 0.6.16 (September 5, 2018) + +* Add EPOLLPRI readiness to UnixReady on supported platforms (#867) +* Reduce spurious awaken calls (#875) + +# 0.6.15 (July 3, 2018) + +* Implement `Evented` for containers (#840). +* Fix android-aarch64 build (#850). + +# 0.6.14 (March 8, 2018) + +* Add `Poll::poll_interruptible` (#811) +* Add `Ready::all` and `usize` conversions (#825) + +# 0.6.13 (February 5, 2018) + +* Fix build on DragonFlyBSD. +* Add `TcpListener::from_std` that does not require the socket addr. +* Deprecate `TcpListener::from_listener` in favor of from_std. + +# 0.6.12 (January 5, 2018) + +* Add `TcpStream::peek` function (#773). +* Raise minimum Rust version to 1.18.0. +* `Poll`: retry select() when interrupted by a signal (#742). +* Deprecate `Events` index access (#713). +* Add `Events::clear` (#782). +* Add support for `lio_listio` (#780). + +# 0.6.11 (October 25, 2017) + +* Allow register to take empty interest (#640). +* Fix bug with TCP errors on windows (#725). +* Add TcpListener::accept_std (#733). +* Update IoVec to fix soundness bug -- includes behavior change. (#747). +* Minimum Rust version is now 1.14.0. +* Fix Android x86_64 build. +* Misc API & doc polish. + +# 0.6.10 (July 27, 2017) + +* Experimental support for Fuchsia +* Add `only_v6` option for UDP sockets +* Fix build on NetBSD +* Minimum Rust version is now 1.13.0 +* Assignment operators (e.g. `|=`) are now implemented for `Ready` + +# 0.6.9 (June 7, 2017) + +* More socket options are exposed through the TCP types, brought in through the + `net2` crate. + +# 0.6.8 (May 26, 2017) + +* Support Fuchia +* POSIX AIO support +* Fix memory leak caused by Register::new2 +* Windows: fix handling failed TCP connections +* Fix build on aarch64-linux-android +* Fix usage of `O_CLOEXEC` with `SETFL` + +# 0.6.7 (April 27, 2017) + +* Ignore EPIPE coming out of `kevent` +* Timer thread should exit when timer is dropped. + +# 0.6.6 (March 22, 2017) + +* Add send(), recv() and connect() to UDPSocket. +* Fix bug in custom readiness queue +* Move net types into `net` module + +# 0.6.5 (March 14, 2017) + +* Misc improvements to kqueue bindings +* Add official support for iOS, Android, BSD +* Reimplement custom readiness queue +* `Poll` is now `Sync` +* Officially deprecate non-core functionality (timers, channel, etc...) +* `Registration` now implements `Evented` +* Fix bug around error conditions with `connect` on windows. +* Use iovec crate for scatter / gather operations +* Only support readable and writable readiness on all platforms +* Expose additional readiness in a platform specific capacity + +# 0.6.4 (January 24, 2017) + +* Fix compilation on musl +* Add `TcpStream::from_stream` which converts a std TCP stream to Mio. + +# 0.6.3 (January 22, 2017) + +* Implement readv/writev for `TcpStream`, allowing vectored reads/writes to + work across platforms +* Remove `nix` dependency +* Implement `Display` and `Error` for some channel error types. +* Optimize TCP on Windows through `SetFileCompletionNotificationModes` + +# 0.6.2 (December 18, 2016) + +* Allow registration of custom handles on Windows (like `EventedFd` on Unix) +* Send only one byte for the awakener on Unix instead of four +* Fix a bug in the timer implementation which caused an infinite loop + +# 0.6.1 (October 30, 2016) + +* Update dependency of `libc` to 0.2.16 +* Fix channel `dec` logic +* Fix a timer bug around timeout cancellation +* Don't allocate buffers for TCP reads on Windows +* Touched up documentation in a few places +* Fix an infinite looping timer thread on OSX +* Fix compile on 32-bit OSX +* Fix compile on FreeBSD + +# 0.6.0 (September 2, 2016) + +* Shift primary API towards `Poll` +* `EventLoop` and types to `deprecated` mod. All contents of the + `deprecated` mod will be removed by Mio 1.0. +* Increase minimum supported Rust version to 1.9.0 +* Deprecate unix domain socket implementation in favor of using a + version external to Mio. For example: https://github.com/alexcrichton/mio-uds. +* Remove various types now included in `std` +* Updated TCP & UDP APIs to match the versions in `std` +* Enable implementing `Evented` for any type via `Registration` +* Rename `IoEvent` -> `Event` +* Access `Event` data via functions vs. public fields. +* Expose `Events` as a public type that is passed into `Poll` +* Use `std::time::Duration` for all APIs that require a time duration. +* Polled events are now retrieved via `Events` type. +* Implement `std::error::Error` for `TimerError` +* Relax `Send` bound on notify messages. +* Remove `Clone` impl for `Timeout` (future proof) +* Remove `mio::prelude` +* Remove `mio::util` +* Remove dependency on bytes + +# 0.5.0 (December 3, 2015) + +* Windows support (#239) +* NetBSD support (#306) +* Android support (#295) +* Don't re-export bytes types +* Renamed `EventLoop::register_opt` to `EventLoop::register` (#257) +* `EventLoopConfig` is now a builder instead of having public struct fields. It + is also no longer `Copy`. (#259) +* `TcpSocket` is no longer exported in the public API (#262) +* Integrate with net2. (#262) +* `TcpListener` now returns the remote peer address from `accept` as well (#275) +* The `UdpSocket::{send_to, recv_from}` methods are no longer generic over `Buf` + or `MutBuf` but instead take slices directly. The return types have also been + updated to return the number of bytes transferred. (#260) +* Fix bug with kqueue where an error on registration prevented the + changelist from getting flushed (#276) +* Support sending/receiving FDs over UNIX sockets (#291) +* Mio's socket types are permanently associated with an EventLoop (#308) +* Reduce unnecessary poll wakeups (#314) + + +# 0.4.1 (July 21, 2015) + +* [BUGFIX] Fix notify channel concurrency bug (#216) + +# 0.4.0 (July 16, 2015) + +* [BUGFIX] EventLoop::register requests all events, not just readable. +* [BUGFIX] Attempting to send a message to a shutdown event loop fails correctly. +* [FEATURE] Expose TCP shutdown +* [IMPROVEMENT] Coalesce readable & writable into `ready` event (#184) +* [IMPROVEMENT] Rename TryRead & TryWrite function names to avoid conflict with std. +* [IMPROVEMENT] Provide TCP and UDP types in Mio (path to windows #155) +* [IMPROVEMENT] Use clock_ticks crate instead of time (path to windows #155) +* [IMPROVEMENT] Move unix specific features into mio::unix module +* [IMPROVEMENT] TcpListener sets SO_REUSEADDR by default diff --git a/third_party/rust/mio/Cargo.toml b/third_party/rust/mio/Cargo.toml new file mode 100644 index 0000000000..44fe316713 --- /dev/null +++ b/third_party/rust/mio/Cargo.toml @@ -0,0 +1,128 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "mio" +version = "0.8.8" +authors = [ + "Carl Lerche <me@carllerche.com>", + "Thomas de Zeeuw <thomasdezeeuw@gmail.com>", + "Tokio Contributors <team@tokio.rs>", +] +include = [ + "Cargo.toml", + "LICENSE", + "README.md", + "CHANGELOG.md", + "src/**/*.rs", + "examples/**/*.rs", +] +description = "Lightweight non-blocking I/O." +homepage = "https://github.com/tokio-rs/mio" +readme = "README.md" +keywords = [ + "io", + "async", + "non-blocking", +] +categories = ["asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/mio" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] +targets = [ + "aarch64-apple-ios", + "aarch64-linux-android", + "wasm32-wasi", + "x86_64-apple-darwin", + "x86_64-pc-windows-msvc", + "x86_64-unknown-dragonfly", + "x86_64-unknown-freebsd", + "x86_64-unknown-illumos", + "x86_64-unknown-linux-gnu", + "x86_64-unknown-netbsd", + "x86_64-unknown-openbsd", +] + +[package.metadata.playground] +features = [ + "os-poll", + "os-ext", + "net", +] + +[[example]] +name = "tcp_server" +required-features = [ + "os-poll", + "net", +] + +[[example]] +name = "tcp_listenfd_server" +required-features = [ + "os-poll", + "net", +] + +[[example]] +name = "udp_server" +required-features = [ + "os-poll", + "net", +] + +[dependencies.log] +version = "0.4.8" +optional = true + +[dev-dependencies] +rand = "0.8" + +[dev-dependencies.env_logger] +version = "0.9.3" +default-features = false + +[features] +default = ["log"] +net = [] +os-ext = [ + "os-poll", + "windows-sys/Win32_System_Pipes", + "windows-sys/Win32_Security", +] +os-poll = [] + +[target."cfg(target_os = \"wasi\")".dependencies] +libc = "0.2.121" +wasi = "0.11.0" + +[target."cfg(unix)".dependencies] +libc = "0.2.121" + +[target."cfg(windows)".dependencies.windows-sys] +version = "0.52" +features = [ + "Wdk_Foundation", + "Wdk_Storage_FileSystem", + "Wdk_System_IO", + "Win32_Foundation", + "Win32_Networking_WinSock", + "Win32_Storage_FileSystem", + "Win32_System_IO", + "Win32_System_WindowsProgramming", +] diff --git a/third_party/rust/mio/LICENSE b/third_party/rust/mio/LICENSE new file mode 100644 index 0000000000..3516413824 --- /dev/null +++ b/third_party/rust/mio/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Carl Lerche and other MIO contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/third_party/rust/mio/README.md b/third_party/rust/mio/README.md new file mode 100644 index 0000000000..bebd2af711 --- /dev/null +++ b/third_party/rust/mio/README.md @@ -0,0 +1,179 @@ +# Mio – Metal I/O + +Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs and +event notification for building high performance I/O apps with as little +overhead as possible over the OS abstractions. + +[![Crates.io][crates-badge]][crates-url] +[![MIT licensed][mit-badge]][mit-url] +[![Build Status][actions-badge]][actions-url] +[![Build Status][cirrus-badge]][cirrus-url] + +[crates-badge]: https://img.shields.io/crates/v/mio.svg +[crates-url]: https://crates.io/crates/mio +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE +[actions-badge]: https://github.com/tokio-rs/mio/workflows/CI/badge.svg +[actions-url]: https://github.com/tokio-rs/mio/actions?query=workflow%3ACI+branch%3Amaster +[cirrus-badge]: https://api.cirrus-ci.com/github/tokio-rs/mio.svg +[cirrus-url]: https://cirrus-ci.com/github/tokio-rs/mio + +**API documentation** + +* [v0.8](https://docs.rs/mio/^0.8) +* [v0.7](https://docs.rs/mio/^0.7) + +This is a low level library, if you are looking for something easier to get +started with, see [Tokio](https://tokio.rs). + +## Usage + +To use `mio`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +mio = "0.8" +``` + +Next we can start using Mio. The following is quick introduction using +`TcpListener` and `TcpStream`. Note that `features = ["os-poll", "net"]` must be +specified for this example. + +```rust +use std::error::Error; + +use mio::net::{TcpListener, TcpStream}; +use mio::{Events, Interest, Poll, Token}; + +// Some tokens to allow us to identify which event is for which socket. +const SERVER: Token = Token(0); +const CLIENT: Token = Token(1); + +fn main() -> Result<(), Box<dyn Error>> { + // Create a poll instance. + let mut poll = Poll::new()?; + // Create storage for events. + let mut events = Events::with_capacity(128); + + // Setup the server socket. + let addr = "127.0.0.1:13265".parse()?; + let mut server = TcpListener::bind(addr)?; + // Start listening for incoming connections. + poll.registry() + .register(&mut server, SERVER, Interest::READABLE)?; + + // Setup the client socket. + let mut client = TcpStream::connect(addr)?; + // Register the socket. + poll.registry() + .register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?; + + // Start an event loop. + loop { + // Poll Mio for events, blocking until we get an event. + poll.poll(&mut events, None)?; + + // Process each event. + for event in events.iter() { + // We can use the token we previously provided to `register` to + // determine for which socket the event is. + match event.token() { + SERVER => { + // If this is an event for the server, it means a connection + // is ready to be accepted. + // + // Accept the connection and drop it immediately. This will + // close the socket and notify the client of the EOF. + let connection = server.accept(); + drop(connection); + } + CLIENT => { + if event.is_writable() { + // We can (likely) write to the socket without blocking. + } + + if event.is_readable() { + // We can (likely) read from the socket without blocking. + } + + // Since the server just shuts down the connection, let's + // just exit from our event loop. + return Ok(()); + } + // We don't expect any events with tokens other than those we provided. + _ => unreachable!(), + } + } + } +} +``` + +## Features + +* Non-blocking TCP, UDP +* I/O event queue backed by epoll, kqueue, and IOCP +* Zero allocations at runtime +* Platform specific extensions + +## Non-goals + +The following are specifically omitted from Mio and are left to the user +or higher-level libraries. + +* File operations +* Thread pools / multi-threaded event loop +* Timers + +## Platforms + +Currently supported platforms: + +* Android (API level 21) +* DragonFly BSD +* FreeBSD +* Linux +* NetBSD +* OpenBSD +* Windows +* iOS +* macOS + +There are potentially others. If you find that Mio works on another +platform, submit a PR to update the list! + +Mio can handle interfacing with each of the event systems of the aforementioned +platforms. The details of their implementation are further discussed in the +`Poll` type of the API documentation (see above). + +The Windows implementation for polling sockets is using the [wepoll] strategy. +This uses the Windows AFD system to access socket readiness events. + +[wepoll]: https://github.com/piscisaureus/wepoll + +### Unsupported + +* Haiku, see [issue #1472] +* Solaris, see [issue #1152] +* Wine, see [issue #1444] + +[issue #1472]: https://github.com/tokio-rs/mio/issues/1472 +[issue #1152]: https://github.com/tokio-rs/mio/issues/1152 +[issue #1444]: https://github.com/tokio-rs/mio/issues/1444 + +## Community + +A group of Mio users hang out on [Discord], this can be a good place to go for +questions. + +[Discord]: https://discord.gg/tokio + +## Contributing + +Interested in getting involved? We would love to help you! For simple +bug fixes, just submit a PR with the fix and we can discuss the fix +directly in the PR. If the fix is more complex, start with an issue. + +If you want to propose an API change, create an issue to start a +discussion with the community. Also, feel free to talk with us in Discord. + +Finally, be kind. We support the [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct). diff --git a/third_party/rust/mio/examples/tcp_listenfd_server.rs b/third_party/rust/mio/examples/tcp_listenfd_server.rs new file mode 100644 index 0000000000..941d7f0483 --- /dev/null +++ b/third_party/rust/mio/examples/tcp_listenfd_server.rs @@ -0,0 +1,209 @@ +// You can run this example from the root of the mio repo: +// cargo run --example tcp_listenfd_server --features="os-poll net" +// or with wasi: +// cargo +nightly build --target wasm32-wasi --example tcp_listenfd_server --features="os-poll net" +// wasmtime run --tcplisten 127.0.0.1:9000 --env 'LISTEN_FDS=1' target/wasm32-wasi/debug/examples/tcp_listenfd_server.wasm + +use mio::event::Event; +use mio::net::{TcpListener, TcpStream}; +use mio::{Events, Interest, Poll, Registry, Token}; +use std::collections::HashMap; +use std::io::{self, Read, Write}; +use std::str::from_utf8; + +// Setup some tokens to allow us to identify which event is for which socket. +const SERVER: Token = Token(0); + +// Some data we'll send over the connection. +const DATA: &[u8] = b"Hello world!\n"; + +#[cfg(not(windows))] +fn get_first_listen_fd_listener() -> Option<std::net::TcpListener> { + #[cfg(unix)] + use std::os::unix::io::FromRawFd; + #[cfg(target_os = "wasi")] + use std::os::wasi::io::FromRawFd; + + let stdlistener = unsafe { std::net::TcpListener::from_raw_fd(3) }; + stdlistener.set_nonblocking(true).unwrap(); + Some(stdlistener) +} + +#[cfg(windows)] +fn get_first_listen_fd_listener() -> Option<std::net::TcpListener> { + // Windows does not support `LISTEN_FDS` + None +} + +fn main() -> io::Result<()> { + env_logger::init(); + + std::env::var("LISTEN_FDS").expect("LISTEN_FDS environment variable unset"); + + // Create a poll instance. + let mut poll = Poll::new()?; + // Create storage for events. + let mut events = Events::with_capacity(128); + + // Setup the TCP server socket. + let mut server = { + let stdlistener = get_first_listen_fd_listener().unwrap(); + println!("Using preopened socket FD 3"); + println!("You can connect to the server using `nc`:"); + match stdlistener.local_addr() { + Ok(a) => println!(" $ nc {} {}", a.ip(), a.port()), + Err(_) => println!(" $ nc <IP> <PORT>"), + } + println!("You'll see our welcome message and anything you type will be printed here."); + TcpListener::from_std(stdlistener) + }; + + // Register the server with poll we can receive events for it. + poll.registry() + .register(&mut server, SERVER, Interest::READABLE)?; + + // Map of `Token` -> `TcpStream`. + let mut connections = HashMap::new(); + // Unique token for each incoming connection. + let mut unique_token = Token(SERVER.0 + 1); + + loop { + poll.poll(&mut events, None)?; + + for event in events.iter() { + match event.token() { + SERVER => loop { + // Received an event for the TCP server socket, which + // indicates we can accept an connection. + let (mut connection, address) = match server.accept() { + Ok((connection, address)) => (connection, address), + Err(ref e) if would_block(e) => { + // If we get a `WouldBlock` error we know our + // listener has no more incoming connections queued, + // so we can return to polling and wait for some + // more. + break; + } + Err(e) => { + // If it was any other kind of error, something went + // wrong and we terminate with an error. + return Err(e); + } + }; + + println!("Accepted connection from: {}", address); + + let token = next(&mut unique_token); + poll.registry() + .register(&mut connection, token, Interest::WRITABLE)?; + + connections.insert(token, connection); + }, + token => { + // Maybe received an event for a TCP connection. + let done = if let Some(connection) = connections.get_mut(&token) { + handle_connection_event(poll.registry(), connection, event)? + } else { + // Sporadic events happen, we can safely ignore them. + false + }; + if done { + if let Some(mut connection) = connections.remove(&token) { + poll.registry().deregister(&mut connection)?; + } + } + } + } + } + } +} + +fn next(current: &mut Token) -> Token { + let next = current.0; + current.0 += 1; + Token(next) +} + +/// Returns `true` if the connection is done. +fn handle_connection_event( + registry: &Registry, + connection: &mut TcpStream, + event: &Event, +) -> io::Result<bool> { + if event.is_writable() { + // We can (maybe) write to the connection. + match connection.write(DATA) { + // We want to write the entire `DATA` buffer in a single go. If we + // write less we'll return a short write error (same as + // `io::Write::write_all` does). + Ok(n) if n < DATA.len() => return Err(io::ErrorKind::WriteZero.into()), + Ok(_) => { + // After we've written something we'll reregister the connection + // to only respond to readable events. + registry.reregister(connection, event.token(), Interest::READABLE)? + } + // Would block "errors" are the OS's way of saying that the + // connection is not actually ready to perform this I/O operation. + Err(ref err) if would_block(err) => {} + // Got interrupted (how rude!), we'll try again. + Err(ref err) if interrupted(err) => { + return handle_connection_event(registry, connection, event) + } + // Other errors we'll consider fatal. + Err(err) => return Err(err), + } + } + + if event.is_readable() { + let mut connection_closed = false; + let mut received_data = vec![0; 4096]; + let mut bytes_read = 0; + // We can (maybe) read from the connection. + loop { + match connection.read(&mut received_data[bytes_read..]) { + Ok(0) => { + // Reading 0 bytes means the other side has closed the + // connection or is done writing, then so are we. + connection_closed = true; + break; + } + Ok(n) => { + bytes_read += n; + if bytes_read == received_data.len() { + received_data.resize(received_data.len() + 1024, 0); + } + } + // Would block "errors" are the OS's way of saying that the + // connection is not actually ready to perform this I/O operation. + Err(ref err) if would_block(err) => break, + Err(ref err) if interrupted(err) => continue, + // Other errors we'll consider fatal. + Err(err) => return Err(err), + } + } + + if bytes_read != 0 { + let received_data = &received_data[..bytes_read]; + if let Ok(str_buf) = from_utf8(received_data) { + println!("Received data: {}", str_buf.trim_end()); + } else { + println!("Received (none UTF-8) data: {:?}", received_data); + } + } + + if connection_closed { + println!("Connection closed"); + return Ok(true); + } + } + + Ok(false) +} + +fn would_block(err: &io::Error) -> bool { + err.kind() == io::ErrorKind::WouldBlock +} + +fn interrupted(err: &io::Error) -> bool { + err.kind() == io::ErrorKind::Interrupted +} diff --git a/third_party/rust/mio/examples/tcp_server.rs b/third_party/rust/mio/examples/tcp_server.rs new file mode 100644 index 0000000000..cc611ca564 --- /dev/null +++ b/third_party/rust/mio/examples/tcp_server.rs @@ -0,0 +1,189 @@ +// You can run this example from the root of the mio repo: +// cargo run --example tcp_server --features="os-poll net" +use mio::event::Event; +use mio::net::{TcpListener, TcpStream}; +use mio::{Events, Interest, Poll, Registry, Token}; +use std::collections::HashMap; +use std::io::{self, Read, Write}; +use std::str::from_utf8; + +// Setup some tokens to allow us to identify which event is for which socket. +const SERVER: Token = Token(0); + +// Some data we'll send over the connection. +const DATA: &[u8] = b"Hello world!\n"; + +#[cfg(not(target_os = "wasi"))] +fn main() -> io::Result<()> { + env_logger::init(); + + // Create a poll instance. + let mut poll = Poll::new()?; + // Create storage for events. + let mut events = Events::with_capacity(128); + + // Setup the TCP server socket. + let addr = "127.0.0.1:9000".parse().unwrap(); + let mut server = TcpListener::bind(addr)?; + + // Register the server with poll we can receive events for it. + poll.registry() + .register(&mut server, SERVER, Interest::READABLE)?; + + // Map of `Token` -> `TcpStream`. + let mut connections = HashMap::new(); + // Unique token for each incoming connection. + let mut unique_token = Token(SERVER.0 + 1); + + println!("You can connect to the server using `nc`:"); + println!(" $ nc 127.0.0.1 9000"); + println!("You'll see our welcome message and anything you type will be printed here."); + + loop { + poll.poll(&mut events, None)?; + + for event in events.iter() { + match event.token() { + SERVER => loop { + // Received an event for the TCP server socket, which + // indicates we can accept an connection. + let (mut connection, address) = match server.accept() { + Ok((connection, address)) => (connection, address), + Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + // If we get a `WouldBlock` error we know our + // listener has no more incoming connections queued, + // so we can return to polling and wait for some + // more. + break; + } + Err(e) => { + // If it was any other kind of error, something went + // wrong and we terminate with an error. + return Err(e); + } + }; + + println!("Accepted connection from: {}", address); + + let token = next(&mut unique_token); + poll.registry().register( + &mut connection, + token, + Interest::READABLE.add(Interest::WRITABLE), + )?; + + connections.insert(token, connection); + }, + token => { + // Maybe received an event for a TCP connection. + let done = if let Some(connection) = connections.get_mut(&token) { + handle_connection_event(poll.registry(), connection, event)? + } else { + // Sporadic events happen, we can safely ignore them. + false + }; + if done { + if let Some(mut connection) = connections.remove(&token) { + poll.registry().deregister(&mut connection)?; + } + } + } + } + } + } +} + +fn next(current: &mut Token) -> Token { + let next = current.0; + current.0 += 1; + Token(next) +} + +/// Returns `true` if the connection is done. +fn handle_connection_event( + registry: &Registry, + connection: &mut TcpStream, + event: &Event, +) -> io::Result<bool> { + if event.is_writable() { + // We can (maybe) write to the connection. + match connection.write(DATA) { + // We want to write the entire `DATA` buffer in a single go. If we + // write less we'll return a short write error (same as + // `io::Write::write_all` does). + Ok(n) if n < DATA.len() => return Err(io::ErrorKind::WriteZero.into()), + Ok(_) => { + // After we've written something we'll reregister the connection + // to only respond to readable events. + registry.reregister(connection, event.token(), Interest::READABLE)? + } + // Would block "errors" are the OS's way of saying that the + // connection is not actually ready to perform this I/O operation. + Err(ref err) if would_block(err) => {} + // Got interrupted (how rude!), we'll try again. + Err(ref err) if interrupted(err) => { + return handle_connection_event(registry, connection, event) + } + // Other errors we'll consider fatal. + Err(err) => return Err(err), + } + } + + if event.is_readable() { + let mut connection_closed = false; + let mut received_data = vec![0; 4096]; + let mut bytes_read = 0; + // We can (maybe) read from the connection. + loop { + match connection.read(&mut received_data[bytes_read..]) { + Ok(0) => { + // Reading 0 bytes means the other side has closed the + // connection or is done writing, then so are we. + connection_closed = true; + break; + } + Ok(n) => { + bytes_read += n; + if bytes_read == received_data.len() { + received_data.resize(received_data.len() + 1024, 0); + } + } + // Would block "errors" are the OS's way of saying that the + // connection is not actually ready to perform this I/O operation. + Err(ref err) if would_block(err) => break, + Err(ref err) if interrupted(err) => continue, + // Other errors we'll consider fatal. + Err(err) => return Err(err), + } + } + + if bytes_read != 0 { + let received_data = &received_data[..bytes_read]; + if let Ok(str_buf) = from_utf8(received_data) { + println!("Received data: {}", str_buf.trim_end()); + } else { + println!("Received (none UTF-8) data: {:?}", received_data); + } + } + + if connection_closed { + println!("Connection closed"); + return Ok(true); + } + } + + Ok(false) +} + +fn would_block(err: &io::Error) -> bool { + err.kind() == io::ErrorKind::WouldBlock +} + +fn interrupted(err: &io::Error) -> bool { + err.kind() == io::ErrorKind::Interrupted +} + +#[cfg(target_os = "wasi")] +fn main() { + panic!("can't bind to an address with wasi") +} diff --git a/third_party/rust/mio/examples/udp_server.rs b/third_party/rust/mio/examples/udp_server.rs new file mode 100644 index 0000000000..95f8a836a3 --- /dev/null +++ b/third_party/rust/mio/examples/udp_server.rs @@ -0,0 +1,85 @@ +// You can run this example from the root of the mio repo: +// cargo run --example udp_server --features="os-poll net" +use log::warn; +use mio::{Events, Interest, Poll, Token}; +use std::io; + +// A token to allow us to identify which event is for the `UdpSocket`. +const UDP_SOCKET: Token = Token(0); + +#[cfg(not(target_os = "wasi"))] +fn main() -> io::Result<()> { + use mio::net::UdpSocket; + + env_logger::init(); + + // Create a poll instance. + let mut poll = Poll::new()?; + // Create storage for events. Since we will only register a single socket, a + // capacity of 1 will do. + let mut events = Events::with_capacity(1); + + // Setup the UDP socket. + let addr = "127.0.0.1:9000".parse().unwrap(); + + let mut socket = UdpSocket::bind(addr)?; + + // Register our socket with the token defined above and an interest in being + // `READABLE`. + poll.registry() + .register(&mut socket, UDP_SOCKET, Interest::READABLE)?; + + println!("You can connect to the server using `nc`:"); + println!(" $ nc -u 127.0.0.1 9000"); + println!("Anything you type will be echoed back to you."); + + // Initialize a buffer for the UDP packet. We use the maximum size of a UDP + // packet, which is the maximum value of 16 a bit integer. + let mut buf = [0; 1 << 16]; + + // Our event loop. + loop { + // Poll to check if we have events waiting for us. + poll.poll(&mut events, None)?; + + // Process each event. + for event in events.iter() { + // Validate the token we registered our socket with, + // in this example it will only ever be one but we + // make sure it's valid none the less. + match event.token() { + UDP_SOCKET => loop { + // In this loop we receive all packets queued for the socket. + match socket.recv_from(&mut buf) { + Ok((packet_size, source_address)) => { + // Echo the data. + socket.send_to(&buf[..packet_size], source_address)?; + } + Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + // If we get a `WouldBlock` error we know our socket + // has no more packets queued, so we can return to + // polling and wait for some more. + break; + } + Err(e) => { + // If it was any other kind of error, something went + // wrong and we terminate with an error. + return Err(e); + } + } + }, + _ => { + // This should never happen as we only registered our + // `UdpSocket` using the `UDP_SOCKET` token, but if it ever + // does we'll log it. + warn!("Got event for unexpected token: {:?}", event); + } + } + } + } +} + +#[cfg(target_os = "wasi")] +fn main() { + panic!("can't bind to an address with wasi") +} diff --git a/third_party/rust/mio/src/event/event.rs b/third_party/rust/mio/src/event/event.rs new file mode 100644 index 0000000000..2d85742b96 --- /dev/null +++ b/third_party/rust/mio/src/event/event.rs @@ -0,0 +1,230 @@ +use crate::{sys, Token}; + +use std::fmt; + +/// A readiness event. +/// +/// `Event` is a readiness state paired with a [`Token`]. It is returned by +/// [`Poll::poll`]. +/// +/// For more documentation on polling and events, see [`Poll`]. +/// +/// [`Poll::poll`]: ../struct.Poll.html#method.poll +/// [`Poll`]: ../struct.Poll.html +/// [`Token`]: ../struct.Token.html +#[derive(Clone)] +#[repr(transparent)] +pub struct Event { + inner: sys::Event, +} + +impl Event { + /// Returns the event's token. + pub fn token(&self) -> Token { + sys::event::token(&self.inner) + } + + /// Returns true if the event contains readable readiness. + /// + /// # Notes + /// + /// Out-of-band (OOB) data also triggers readable events. But must + /// application don't actually read OOB data, this could leave an + /// application open to a Denial-of-Service (Dos) attack, see + /// <https://github.com/sandstorm-io/sandstorm-website/blob/58f93346028c0576e8147627667328eaaf4be9fa/_posts/2015-04-08-osx-security-bug.md>. + /// However because Mio uses edge-triggers it will not result in an infinite + /// loop as described in the article above. + pub fn is_readable(&self) -> bool { + sys::event::is_readable(&self.inner) + } + + /// Returns true if the event contains writable readiness. + pub fn is_writable(&self) -> bool { + sys::event::is_writable(&self.inner) + } + + /// Returns true if the event contains error readiness. + /// + /// Error events occur when the socket enters an error state. In this case, + /// the socket will also receive a readable or writable event. Reading or + /// writing to the socket will result in an error. + /// + /// # Notes + /// + /// Method is available on all platforms, but not all platforms trigger the + /// error event. + /// + /// The table below shows what flags are checked on what OS. + /// + /// | [OS selector] | Flag(s) checked | + /// |---------------|-----------------| + /// | [epoll] | `EPOLLERR` | + /// | [kqueue] | `EV_ERROR` and `EV_EOF` with `fflags` set to `0`. | + /// + /// [OS selector]: ../struct.Poll.html#implementation-notes + /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html + /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 + pub fn is_error(&self) -> bool { + sys::event::is_error(&self.inner) + } + + /// Returns true if the event contains read closed readiness. + /// + /// # Notes + /// + /// Read closed readiness can be expected after any of the following have + /// occurred: + /// * The local stream has shutdown the read half of its socket + /// * The local stream has shutdown both the read half and the write half + /// of its socket + /// * The peer stream has shutdown the write half its socket; this sends a + /// `FIN` packet that has been received by the local stream + /// + /// Method is a best effort implementation. While some platforms may not + /// return readiness when read half is closed, it is guaranteed that + /// false-positives will not occur. + /// + /// The table below shows what flags are checked on what OS. + /// + /// | [OS selector] | Flag(s) checked | + /// |---------------|-----------------| + /// | [epoll] | `EPOLLHUP`, or | + /// | | `EPOLLIN` and `EPOLLRDHUP` | + /// | [kqueue] | `EV_EOF` | + /// + /// [OS selector]: ../struct.Poll.html#implementation-notes + /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html + /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 + pub fn is_read_closed(&self) -> bool { + sys::event::is_read_closed(&self.inner) + } + + /// Returns true if the event contains write closed readiness. + /// + /// # Notes + /// + /// On [epoll] this is essentially a check for `EPOLLHUP` flag as the + /// local stream shutting down its write half does not trigger this event. + /// + /// On [kqueue] the local stream shutting down the write half of its + /// socket will trigger this event. + /// + /// Method is a best effort implementation. While some platforms may not + /// return readiness when write half is closed, it is guaranteed that + /// false-positives will not occur. + /// + /// The table below shows what flags are checked on what OS. + /// + /// | [OS selector] | Flag(s) checked | + /// |---------------|-----------------| + /// | [epoll] | `EPOLLHUP`, or | + /// | | only `EPOLLERR`, or | + /// | | `EPOLLOUT` and `EPOLLERR` | + /// | [kqueue] | `EV_EOF` | + /// + /// [OS selector]: ../struct.Poll.html#implementation-notes + /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html + /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 + pub fn is_write_closed(&self) -> bool { + sys::event::is_write_closed(&self.inner) + } + + /// Returns true if the event contains priority readiness. + /// + /// # Notes + /// + /// Method is available on all platforms, but not all platforms trigger the + /// priority event. + /// + /// The table below shows what flags are checked on what OS. + /// + /// | [OS selector] | Flag(s) checked | + /// |---------------|-----------------| + /// | [epoll] | `EPOLLPRI` | + /// | [kqueue] | *Not supported* | + /// + /// [OS selector]: ../struct.Poll.html#implementation-notes + /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html + /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 + #[inline] + pub fn is_priority(&self) -> bool { + sys::event::is_priority(&self.inner) + } + + /// Returns true if the event contains AIO readiness. + /// + /// # Notes + /// + /// Method is available on all platforms, but not all platforms support AIO. + /// + /// The table below shows what flags are checked on what OS. + /// + /// | [OS selector] | Flag(s) checked | + /// |---------------|-----------------| + /// | [epoll] | *Not supported* | + /// | [kqueue]<sup>1</sup> | `EVFILT_AIO` | + /// + /// 1: Only supported on DragonFly BSD, FreeBSD, iOS and macOS. + /// + /// [OS selector]: ../struct.Poll.html#implementation-notes + /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html + /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 + pub fn is_aio(&self) -> bool { + sys::event::is_aio(&self.inner) + } + + /// Returns true if the event contains LIO readiness. + /// + /// # Notes + /// + /// Method is available on all platforms, but only FreeBSD supports LIO. On + /// FreeBSD this method checks the `EVFILT_LIO` flag. + pub fn is_lio(&self) -> bool { + sys::event::is_lio(&self.inner) + } + + /// Create a reference to an `Event` from a platform specific event. + pub(crate) fn from_sys_event_ref(sys_event: &sys::Event) -> &Event { + unsafe { + // This is safe because the memory layout of `Event` is + // the same as `sys::Event` due to the `repr(transparent)` attribute. + &*(sys_event as *const sys::Event as *const Event) + } + } +} + +/// When the [alternate] flag is enabled this will print platform specific +/// details, for example the fields of the `kevent` structure on platforms that +/// use `kqueue(2)`. Note however that the output of this implementation is +/// **not** consider a part of the stable API. +/// +/// [alternate]: fmt::Formatter::alternate +impl fmt::Debug for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let alternate = f.alternate(); + let mut d = f.debug_struct("Event"); + d.field("token", &self.token()) + .field("readable", &self.is_readable()) + .field("writable", &self.is_writable()) + .field("error", &self.is_error()) + .field("read_closed", &self.is_read_closed()) + .field("write_closed", &self.is_write_closed()) + .field("priority", &self.is_priority()) + .field("aio", &self.is_aio()) + .field("lio", &self.is_lio()); + + if alternate { + struct EventDetails<'a>(&'a sys::Event); + + impl<'a> fmt::Debug for EventDetails<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + sys::event::debug_details(f, self.0) + } + } + + d.field("details", &EventDetails(&self.inner)).finish() + } else { + d.finish() + } + } +} diff --git a/third_party/rust/mio/src/event/events.rs b/third_party/rust/mio/src/event/events.rs new file mode 100644 index 0000000000..f3c5a2f02f --- /dev/null +++ b/third_party/rust/mio/src/event/events.rs @@ -0,0 +1,230 @@ +use crate::event::Event; +use crate::sys; + +use std::fmt; + +/// A collection of readiness events. +/// +/// `Events` is passed as an argument to [`Poll::poll`] and will be used to +/// receive any new readiness events received since the last poll. Usually, a +/// single `Events` instance is created at the same time as a [`Poll`] and +/// reused on each call to [`Poll::poll`]. +/// +/// See [`Poll`] for more documentation on polling. +/// +/// [`Poll::poll`]: ../struct.Poll.html#method.poll +/// [`Poll`]: ../struct.Poll.html +/// +/// # Examples +/// +#[cfg_attr(feature = "os-poll", doc = "```")] +#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] +/// # use std::error::Error; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Events, Poll}; +/// use std::time::Duration; +/// +/// let mut events = Events::with_capacity(1024); +/// let mut poll = Poll::new()?; +/// # +/// # assert!(events.is_empty()); +/// +/// // Register `event::Source`s with `poll`. +/// +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; +/// +/// for event in events.iter() { +/// println!("Got an event for {:?}", event.token()); +/// } +/// # Ok(()) +/// # } +/// ``` +pub struct Events { + inner: sys::Events, +} + +/// [`Events`] iterator. +/// +/// This struct is created by the [`iter`] method on [`Events`]. +/// +/// [`Events`]: struct.Events.html +/// [`iter`]: struct.Events.html#method.iter +/// +/// # Examples +/// +#[cfg_attr(feature = "os-poll", doc = "```")] +#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] +/// # use std::error::Error; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Events, Poll}; +/// use std::time::Duration; +/// +/// let mut events = Events::with_capacity(1024); +/// let mut poll = Poll::new()?; +/// +/// // Register handles with `poll`. +/// +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; +/// +/// for event in events.iter() { +/// println!("Got an event for {:?}", event.token()); +/// } +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug, Clone)] +pub struct Iter<'a> { + inner: &'a Events, + pos: usize, +} + +impl Events { + /// Return a new `Events` capable of holding up to `capacity` events. + /// + /// # Examples + /// + /// ``` + /// use mio::Events; + /// + /// let events = Events::with_capacity(1024); + /// assert_eq!(1024, events.capacity()); + /// ``` + pub fn with_capacity(capacity: usize) -> Events { + Events { + inner: sys::Events::with_capacity(capacity), + } + } + + /// Returns the number of `Event` values that `self` can hold. + /// + /// ``` + /// use mio::Events; + /// + /// let events = Events::with_capacity(1024); + /// assert_eq!(1024, events.capacity()); + /// ``` + pub fn capacity(&self) -> usize { + self.inner.capacity() + } + + /// Returns `true` if `self` contains no `Event` values. + /// + /// # Examples + /// + /// ``` + /// use mio::Events; + /// + /// let events = Events::with_capacity(1024); + /// assert!(events.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Returns an iterator over the `Event` values. + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::{Events, Poll}; + /// use std::time::Duration; + /// + /// let mut events = Events::with_capacity(1024); + /// let mut poll = Poll::new()?; + /// + /// // Register handles with `poll`. + /// + /// poll.poll(&mut events, Some(Duration::from_millis(100)))?; + /// + /// for event in events.iter() { + /// println!("Got an event for {:?}", event.token()); + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn iter(&self) -> Iter<'_> { + Iter { + inner: self, + pos: 0, + } + } + + /// Clearing all `Event` values from container explicitly. + /// + /// # Notes + /// + /// Events are cleared before every `poll`, so it is not required to call + /// this manually. + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::{Events, Poll}; + /// use std::time::Duration; + /// + /// let mut events = Events::with_capacity(1024); + /// let mut poll = Poll::new()?; + /// + /// // Register handles with `poll`. + /// + /// poll.poll(&mut events, Some(Duration::from_millis(100)))?; + /// + /// // Clear all events. + /// events.clear(); + /// assert!(events.is_empty()); + /// # Ok(()) + /// # } + /// ``` + pub fn clear(&mut self) { + self.inner.clear(); + } + + /// Returns the inner `sys::Events`. + pub(crate) fn sys(&mut self) -> &mut sys::Events { + &mut self.inner + } +} + +impl<'a> IntoIterator for &'a Events { + type Item = &'a Event; + type IntoIter = Iter<'a>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a> Iterator for Iter<'a> { + type Item = &'a Event; + + fn next(&mut self) -> Option<Self::Item> { + let ret = self + .inner + .inner + .get(self.pos) + .map(Event::from_sys_event_ref); + self.pos += 1; + ret + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let size = self.inner.inner.len(); + (size, Some(size)) + } + + fn count(self) -> usize { + self.inner.inner.len() + } +} + +impl fmt::Debug for Events { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self).finish() + } +} diff --git a/third_party/rust/mio/src/event/mod.rs b/third_party/rust/mio/src/event/mod.rs new file mode 100644 index 0000000000..8e17f82ee5 --- /dev/null +++ b/third_party/rust/mio/src/event/mod.rs @@ -0,0 +1,10 @@ +//! Readiness event types and utilities. + +#[allow(clippy::module_inception)] +mod event; +mod events; +mod source; + +pub use self::event::Event; +pub use self::events::{Events, Iter}; +pub use self::source::Source; diff --git a/third_party/rust/mio/src/event/source.rs b/third_party/rust/mio/src/event/source.rs new file mode 100644 index 0000000000..619f72d42a --- /dev/null +++ b/third_party/rust/mio/src/event/source.rs @@ -0,0 +1,139 @@ +use crate::{Interest, Registry, Token}; + +use std::io; + +/// An event source that may be registered with [`Registry`]. +/// +/// Types that implement `event::Source` can be registered with +/// `Registry`. Users of Mio **should not** use the `event::Source` trait +/// functions directly. Instead, the equivalent functions on `Registry` should +/// be used. +/// +/// See [`Registry`] for more details. +/// +/// [`Registry`]: ../struct.Registry.html +/// +/// # Implementing `event::Source` +/// +/// Event sources are always backed by system handles, such as sockets or other +/// system handles. These `event::Source`s will be monitored by the system +/// selector. An implementation of `Source` will almost always delegates to a +/// lower level handle. Examples of this are [`TcpStream`]s, or the *unix only* +/// [`SourceFd`]. +/// +/// [`TcpStream`]: ../net/struct.TcpStream.html +/// [`SourceFd`]: ../unix/struct.SourceFd.html +/// +/// # Dropping `event::Source`s +/// +/// All `event::Source`s, unless otherwise specified, need to be [deregistered] +/// before being dropped for them to not leak resources. This goes against the +/// normal drop behaviour of types in Rust which cleanup after themselves, e.g. +/// a `File` will close itself. However since deregistering needs access to +/// [`Registry`] this cannot be done while being dropped. +/// +/// [deregistered]: ../struct.Registry.html#method.deregister +/// +/// # Examples +/// +/// Implementing `Source` on a struct containing a socket: +/// +#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] +#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] +/// use mio::{Interest, Registry, Token}; +/// use mio::event::Source; +/// use mio::net::TcpStream; +/// +/// use std::io; +/// +/// # #[allow(dead_code)] +/// pub struct MySource { +/// socket: TcpStream, +/// } +/// +/// impl Source for MySource { +/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest) +/// -> io::Result<()> +/// { +/// // Delegate the `register` call to `socket` +/// self.socket.register(registry, token, interests) +/// } +/// +/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest) +/// -> io::Result<()> +/// { +/// // Delegate the `reregister` call to `socket` +/// self.socket.reregister(registry, token, interests) +/// } +/// +/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> { +/// // Delegate the `deregister` call to `socket` +/// self.socket.deregister(registry) +/// } +/// } +/// ``` +pub trait Source { + /// Register `self` with the given `Registry` instance. + /// + /// This function should not be called directly. Use [`Registry::register`] + /// instead. Implementors should handle registration by delegating the call + /// to another `Source` type. + /// + /// [`Registry::register`]: ../struct.Registry.html#method.register + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()>; + + /// Re-register `self` with the given `Registry` instance. + /// + /// This function should not be called directly. Use + /// [`Registry::reregister`] instead. Implementors should handle + /// re-registration by either delegating the call to another `Source` type. + /// + /// [`Registry::reregister`]: ../struct.Registry.html#method.reregister + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()>; + + /// Deregister `self` from the given `Registry` instance. + /// + /// This function should not be called directly. Use + /// [`Registry::deregister`] instead. Implementors should handle + /// deregistration by delegating the call to another `Source` type. + /// + /// [`Registry::deregister`]: ../struct.Registry.html#method.deregister + fn deregister(&mut self, registry: &Registry) -> io::Result<()>; +} + +impl<T> Source for Box<T> +where + T: Source + ?Sized, +{ + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + (**self).register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + (**self).reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + (**self).deregister(registry) + } +} diff --git a/third_party/rust/mio/src/interest.rs b/third_party/rust/mio/src/interest.rs new file mode 100644 index 0000000000..06b1632252 --- /dev/null +++ b/third_party/rust/mio/src/interest.rs @@ -0,0 +1,193 @@ +use std::num::NonZeroU8; +use std::{fmt, ops}; + +/// Interest used in registering. +/// +/// Interest are used in [registering] [`event::Source`]s with [`Poll`], they +/// indicate what readiness should be monitored for. For example if a socket is +/// registered with [readable] interests and the socket becomes writable, no +/// event will be returned from a call to [`poll`]. +/// +/// [registering]: struct.Registry.html#method.register +/// [`event::Source`]: ./event/trait.Source.html +/// [`Poll`]: struct.Poll.html +/// [readable]: struct.Interest.html#associatedconstant.READABLE +/// [`poll`]: struct.Poll.html#method.poll +#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)] +pub struct Interest(NonZeroU8); + +// These must be unique. +const READABLE: u8 = 0b0001; +const WRITABLE: u8 = 0b0010; +// The following are not available on all platforms. +const AIO: u8 = 0b0100; +const LIO: u8 = 0b1000; +const PRIORITY: u8 = 0b10000; + +impl Interest { + /// Returns a `Interest` set representing readable interests. + pub const READABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(READABLE) }); + + /// Returns a `Interest` set representing writable interests. + pub const WRITABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(WRITABLE) }); + + /// Returns a `Interest` set representing AIO completion interests. + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + pub const AIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(AIO) }); + + /// Returns a `Interest` set representing LIO completion interests. + #[cfg(target_os = "freebsd")] + pub const LIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(LIO) }); + + /// Returns a `Interest` set representing priority completion interests. + #[cfg(any(target_os = "linux", target_os = "android"))] + pub const PRIORITY: Interest = Interest(unsafe { NonZeroU8::new_unchecked(PRIORITY) }); + + /// Add together two `Interest`. + /// + /// This does the same thing as the `BitOr` implementation, but is a + /// constant function. + /// + /// ``` + /// use mio::Interest; + /// + /// const INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE); + /// # fn silent_dead_code_warning(_: Interest) { } + /// # silent_dead_code_warning(INTERESTS) + /// ``` + #[allow(clippy::should_implement_trait)] + pub const fn add(self, other: Interest) -> Interest { + Interest(unsafe { NonZeroU8::new_unchecked(self.0.get() | other.0.get()) }) + } + + /// Removes `other` `Interest` from `self`. + /// + /// Returns `None` if the set would be empty after removing `other`. + /// + /// ``` + /// use mio::Interest; + /// + /// const RW_INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE); + /// + /// // As long a one interest remain this will return `Some`. + /// let w_interest = RW_INTERESTS.remove(Interest::READABLE).unwrap(); + /// assert!(!w_interest.is_readable()); + /// assert!(w_interest.is_writable()); + /// + /// // Removing all interests from the set will return `None`. + /// assert_eq!(w_interest.remove(Interest::WRITABLE), None); + /// + /// // Its also possible to remove multiple interests at once. + /// assert_eq!(RW_INTERESTS.remove(RW_INTERESTS), None); + /// ``` + pub fn remove(self, other: Interest) -> Option<Interest> { + NonZeroU8::new(self.0.get() & !other.0.get()).map(Interest) + } + + /// Returns true if the value includes readable readiness. + pub const fn is_readable(self) -> bool { + (self.0.get() & READABLE) != 0 + } + + /// Returns true if the value includes writable readiness. + pub const fn is_writable(self) -> bool { + (self.0.get() & WRITABLE) != 0 + } + + /// Returns true if `Interest` contains AIO readiness. + pub const fn is_aio(self) -> bool { + (self.0.get() & AIO) != 0 + } + + /// Returns true if `Interest` contains LIO readiness. + pub const fn is_lio(self) -> bool { + (self.0.get() & LIO) != 0 + } + + /// Returns true if `Interest` contains priority readiness. + pub const fn is_priority(self) -> bool { + (self.0.get() & PRIORITY) != 0 + } +} + +impl ops::BitOr for Interest { + type Output = Self; + + #[inline] + fn bitor(self, other: Self) -> Self { + self.add(other) + } +} + +impl ops::BitOrAssign for Interest { + #[inline] + fn bitor_assign(&mut self, other: Self) { + self.0 = (*self | other).0; + } +} + +impl fmt::Debug for Interest { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut one = false; + if self.is_readable() { + if one { + write!(fmt, " | ")? + } + write!(fmt, "READABLE")?; + one = true + } + if self.is_writable() { + if one { + write!(fmt, " | ")? + } + write!(fmt, "WRITABLE")?; + one = true + } + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + { + if self.is_aio() { + if one { + write!(fmt, " | ")? + } + write!(fmt, "AIO")?; + one = true + } + } + #[cfg(any(target_os = "freebsd"))] + { + if self.is_lio() { + if one { + write!(fmt, " | ")? + } + write!(fmt, "LIO")?; + one = true + } + } + #[cfg(any(target_os = "linux", target_os = "android"))] + { + if self.is_priority() { + if one { + write!(fmt, " | ")? + } + write!(fmt, "PRIORITY")?; + one = true + } + } + debug_assert!(one, "printing empty interests"); + Ok(()) + } +} diff --git a/third_party/rust/mio/src/io_source.rs b/third_party/rust/mio/src/io_source.rs new file mode 100644 index 0000000000..99623c1165 --- /dev/null +++ b/third_party/rust/mio/src/io_source.rs @@ -0,0 +1,334 @@ +use std::ops::{Deref, DerefMut}; +#[cfg(unix)] +use std::os::unix::io::AsRawFd; +#[cfg(target_os = "wasi")] +use std::os::wasi::io::AsRawFd; +#[cfg(windows)] +use std::os::windows::io::AsRawSocket; +#[cfg(debug_assertions)] +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::{fmt, io}; + +use crate::sys::IoSourceState; +use crate::{event, Interest, Registry, Token}; + +/// Adapter for a [`RawFd`] or [`RawSocket`] providing an [`event::Source`] +/// implementation. +/// +/// `IoSource` enables registering any FD or socket wrapper with [`Poll`]. +/// +/// While only implementations for TCP, UDP, and UDS (Unix only) are provided, +/// Mio supports registering any FD or socket that can be registered with the +/// underlying OS selector. `IoSource` provides the necessary bridge. +/// +/// [`RawFd`]: std::os::unix::io::RawFd +/// [`RawSocket`]: std::os::windows::io::RawSocket +/// +/// # Notes +/// +/// To handle the registrations and events properly **all** I/O operations (such +/// as `read`, `write`, etc.) must go through the [`do_io`] method to ensure the +/// internal state is updated accordingly. +/// +/// [`Poll`]: crate::Poll +/// [`do_io`]: IoSource::do_io +/* +/// +/// # Examples +/// +/// Basic usage. +/// +/// ``` +/// # use std::error::Error; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Interest, Poll, Token}; +/// use mio::IoSource; +/// +/// use std::net; +/// +/// let poll = Poll::new()?; +/// +/// // Bind a std TCP listener. +/// let listener = net::TcpListener::bind("127.0.0.1:0")?; +/// // Wrap it in the `IoSource` type. +/// let mut listener = IoSource::new(listener); +/// +/// // Register the listener. +/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?; +/// # Ok(()) +/// # } +/// ``` +*/ +pub struct IoSource<T> { + state: IoSourceState, + inner: T, + #[cfg(debug_assertions)] + selector_id: SelectorId, +} + +impl<T> IoSource<T> { + /// Create a new `IoSource`. + pub fn new(io: T) -> IoSource<T> { + IoSource { + state: IoSourceState::new(), + inner: io, + #[cfg(debug_assertions)] + selector_id: SelectorId::new(), + } + } + + /// Execute an I/O operations ensuring that the socket receives more events + /// if it hits a [`WouldBlock`] error. + /// + /// # Notes + /// + /// This method is required to be called for **all** I/O operations to + /// ensure the user will receive events once the socket is ready again after + /// returning a [`WouldBlock`] error. + /// + /// [`WouldBlock`]: io::ErrorKind::WouldBlock + pub fn do_io<F, R>(&self, f: F) -> io::Result<R> + where + F: FnOnce(&T) -> io::Result<R>, + { + self.state.do_io(f, &self.inner) + } + + /// Returns the I/O source, dropping the state. + /// + /// # Notes + /// + /// To ensure no more events are to be received for this I/O source first + /// [`deregister`] it. + /// + /// [`deregister`]: Registry::deregister + pub fn into_inner(self) -> T { + self.inner + } +} + +/// Be careful when using this method. All I/O operations that may block must go +/// through the [`do_io`] method. +/// +/// [`do_io`]: IoSource::do_io +impl<T> Deref for IoSource<T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +/// Be careful when using this method. All I/O operations that may block must go +/// through the [`do_io`] method. +/// +/// [`do_io`]: IoSource::do_io +impl<T> DerefMut for IoSource<T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +#[cfg(unix)] +impl<T> event::Source for IoSource<T> +where + T: AsRawFd, +{ + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.associate(registry)?; + registry + .selector() + .register(self.inner.as_raw_fd(), token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.check_association(registry)?; + registry + .selector() + .reregister(self.inner.as_raw_fd(), token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.remove_association(registry)?; + registry.selector().deregister(self.inner.as_raw_fd()) + } +} + +#[cfg(windows)] +impl<T> event::Source for IoSource<T> +where + T: AsRawSocket, +{ + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.associate(registry)?; + self.state + .register(registry, token, interests, self.inner.as_raw_socket()) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.check_association(registry)?; + self.state.reregister(registry, token, interests) + } + + fn deregister(&mut self, _registry: &Registry) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.remove_association(_registry)?; + self.state.deregister() + } +} + +#[cfg(target_os = "wasi")] +impl<T> event::Source for IoSource<T> +where + T: AsRawFd, +{ + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.associate(registry)?; + registry + .selector() + .register(self.inner.as_raw_fd() as _, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.check_association(registry)?; + registry + .selector() + .reregister(self.inner.as_raw_fd() as _, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + #[cfg(debug_assertions)] + self.selector_id.remove_association(registry)?; + registry.selector().deregister(self.inner.as_raw_fd() as _) + } +} + +impl<T> fmt::Debug for IoSource<T> +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// Used to associate an `IoSource` with a `sys::Selector`. +#[cfg(debug_assertions)] +#[derive(Debug)] +struct SelectorId { + id: AtomicUsize, +} + +#[cfg(debug_assertions)] +impl SelectorId { + /// Value of `id` if `SelectorId` is not associated with any + /// `sys::Selector`. Valid selector ids start at 1. + const UNASSOCIATED: usize = 0; + + /// Create a new `SelectorId`. + const fn new() -> SelectorId { + SelectorId { + id: AtomicUsize::new(Self::UNASSOCIATED), + } + } + + /// Associate an I/O source with `registry`, returning an error if its + /// already registered. + fn associate(&self, registry: &Registry) -> io::Result<()> { + let registry_id = registry.selector().id(); + let previous_id = self.id.swap(registry_id, Ordering::AcqRel); + + if previous_id == Self::UNASSOCIATED { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::AlreadyExists, + "I/O source already registered with a `Registry`", + )) + } + } + + /// Check the association of an I/O source with `registry`, returning an + /// error if its registered with a different `Registry` or not registered at + /// all. + fn check_association(&self, registry: &Registry) -> io::Result<()> { + let registry_id = registry.selector().id(); + let id = self.id.load(Ordering::Acquire); + + if id == registry_id { + Ok(()) + } else if id == Self::UNASSOCIATED { + Err(io::Error::new( + io::ErrorKind::NotFound, + "I/O source not registered with `Registry`", + )) + } else { + Err(io::Error::new( + io::ErrorKind::AlreadyExists, + "I/O source already registered with a different `Registry`", + )) + } + } + + /// Remove a previously made association from `registry`, returns an error + /// if it was not previously associated with `registry`. + fn remove_association(&self, registry: &Registry) -> io::Result<()> { + let registry_id = registry.selector().id(); + let previous_id = self.id.swap(Self::UNASSOCIATED, Ordering::AcqRel); + + if previous_id == registry_id { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::NotFound, + "I/O source not registered with `Registry`", + )) + } + } +} + +#[cfg(debug_assertions)] +impl Clone for SelectorId { + fn clone(&self) -> SelectorId { + SelectorId { + id: AtomicUsize::new(self.id.load(Ordering::Acquire)), + } + } +} diff --git a/third_party/rust/mio/src/lib.rs b/third_party/rust/mio/src/lib.rs new file mode 100644 index 0000000000..56a7160beb --- /dev/null +++ b/third_party/rust/mio/src/lib.rs @@ -0,0 +1,266 @@ +#![deny( + missing_docs, + missing_debug_implementations, + rust_2018_idioms, + unused_imports, + dead_code +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +// Disallow warnings when running tests. +#![cfg_attr(test, deny(warnings))] +// Disallow warnings in examples. +#![doc(test(attr(deny(warnings))))] + +//! Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs +//! and event notification for building high performance I/O apps with as little +//! overhead as possible over the OS abstractions. +//! +//! # Usage +//! +//! Using Mio starts by creating a [`Poll`], which reads events from the OS and +//! puts them into [`Events`]. You can handle I/O events from the OS with it. +//! +//! For more detail, see [`Poll`]. +//! +//! [`Poll`]: ../mio/struct.Poll.html +//! [`Events`]: ../mio/event/struct.Events.html +//! +//! ## Examples +//! +//! Examples can found in the `examples` directory of the source code, or [on +//! GitHub]. +//! +//! [on GitHub]: https://github.com/tokio-rs/mio/tree/master/examples +//! +//! ## Guide +//! +//! A getting started guide is available in the [`guide`] module. +//! +//! ## Available features +//! +//! The available features are described in the [`features`] module. + +// macros used internally +#[macro_use] +mod macros; + +mod interest; +mod poll; +mod sys; +mod token; +#[cfg(not(target_os = "wasi"))] +mod waker; + +pub mod event; + +cfg_io_source! { + mod io_source; +} + +cfg_net! { + pub mod net; +} + +#[doc(no_inline)] +pub use event::Events; +pub use interest::Interest; +pub use poll::{Poll, Registry}; +pub use token::Token; +#[cfg(not(target_os = "wasi"))] +pub use waker::Waker; + +#[cfg(all(unix, feature = "os-ext"))] +#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "os-ext"))))] +pub mod unix { + //! Unix only extensions. + + pub mod pipe { + //! Unix pipe. + //! + //! See the [`new`] function for documentation. + + pub use crate::sys::pipe::{new, Receiver, Sender}; + } + + pub use crate::sys::SourceFd; +} + +#[cfg(all(windows, feature = "os-ext"))] +#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "os-ext"))))] +pub mod windows { + //! Windows only extensions. + + pub use crate::sys::named_pipe::NamedPipe; +} + +pub mod features { + //! # Mio's optional features. + //! + //! This document describes the available features in Mio. + //! + #![cfg_attr(feature = "os-poll", doc = "## `os-poll` (enabled)")] + #![cfg_attr(not(feature = "os-poll"), doc = "## `os-poll` (disabled)")] + //! + //! Mio by default provides only a shell implementation that `panic!`s the + //! moment it is actually run. To run it requires OS support, this is + //! enabled by activating the `os-poll` feature. + //! + //! This makes `Poll`, `Registry` and `Waker` functional. + //! + #![cfg_attr(feature = "os-ext", doc = "## `os-ext` (enabled)")] + #![cfg_attr(not(feature = "os-ext"), doc = "## `os-ext` (disabled)")] + //! + //! `os-ext` enables additional OS specific facilities. These facilities can + //! be found in the `unix` and `windows` module. + //! + #![cfg_attr(feature = "net", doc = "## Network types (enabled)")] + #![cfg_attr(not(feature = "net"), doc = "## Network types (disabled)")] + //! + //! The `net` feature enables networking primitives in the `net` module. +} + +pub mod guide { + //! # Getting started guide. + //! + //! In this guide we'll do the following: + //! + //! 1. Create a [`Poll`] instance (and learn what it is). + //! 2. Register an [event source]. + //! 3. Create an event loop. + //! + //! At the end you'll have a very small (but quick) TCP server that accepts + //! connections and then drops (disconnects) them. + //! + //! ## 1. Creating a `Poll` instance + //! + //! Using Mio starts by creating a [`Poll`] instance, which monitors events + //! from the OS and puts them into [`Events`]. This allows us to execute I/O + //! operations based on what operations are ready. + //! + //! [`Poll`]: ../struct.Poll.html + //! [`Events`]: ../event/struct.Events.html + //! + #![cfg_attr(feature = "os-poll", doc = "```")] + #![cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + //! # use mio::{Poll, Events}; + //! # fn main() -> std::io::Result<()> { + //! // `Poll` allows for polling of readiness events. + //! let poll = Poll::new()?; + //! // `Events` is collection of readiness `Event`s and can be filled by + //! // calling `Poll::poll`. + //! let events = Events::with_capacity(128); + //! # drop((poll, events)); + //! # Ok(()) + //! # } + //! ``` + //! + //! For example if we're using a [`TcpListener`], we'll only want to + //! attempt to accept an incoming connection *iff* any connections are + //! queued and ready to be accepted. We don't want to waste our time if no + //! connections are ready. + //! + //! [`TcpListener`]: ../net/struct.TcpListener.html + //! + //! ## 2. Registering event source + //! + //! After we've created a [`Poll`] instance that monitors events from the OS + //! for us, we need to provide it with a source of events. This is done by + //! registering an [event source]. As the name “event source” suggests it is + //! a source of events which can be polled using a `Poll` instance. On Unix + //! systems this is usually a file descriptor, or a socket/handle on + //! Windows. + //! + //! In the example below we'll use a [`TcpListener`] for which we'll receive + //! an event (from [`Poll`]) once a connection is ready to be accepted. + //! + //! [event source]: ../event/trait.Source.html + //! + #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] + #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] + //! # use mio::net::TcpListener; + //! # use mio::{Poll, Token, Interest}; + //! # fn main() -> std::io::Result<()> { + //! # let poll = Poll::new()?; + //! # let address = "127.0.0.1:0".parse().unwrap(); + //! // Create a `TcpListener`, binding it to `address`. + //! let mut listener = TcpListener::bind(address)?; + //! + //! // Next we register it with `Poll` to receive events for it. The `SERVER` + //! // `Token` is used to determine that we received an event for the listener + //! // later on. + //! const SERVER: Token = Token(0); + //! poll.registry().register(&mut listener, SERVER, Interest::READABLE)?; + //! # Ok(()) + //! # } + //! ``` + //! + //! Multiple event sources can be [registered] (concurrently), so we can + //! monitor multiple sources at a time. + //! + //! [registered]: ../struct.Registry.html#method.register + //! + //! ## 3. Creating the event loop + //! + //! After we've created a [`Poll`] instance and registered one or more + //! [event sources] with it, we can [poll] it for events. Polling for events + //! is simple, we need a container to store the events: [`Events`] and need + //! to do something based on the polled events (this part is up to you, we + //! can't do it all!). If we do this in a loop we've got ourselves an event + //! loop. + //! + //! The example below shows the event loop in action, completing our small + //! TCP server. + //! + //! [poll]: ../struct.Poll.html#method.poll + //! [event sources]: ../event/trait.Source.html + //! + #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] + #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] + //! # use std::io; + //! # use std::time::Duration; + //! # use mio::net::TcpListener; + //! # use mio::{Poll, Token, Interest, Events}; + //! # fn main() -> io::Result<()> { + //! # let mut poll = Poll::new()?; + //! # let mut events = Events::with_capacity(128); + //! # let address = "127.0.0.1:0".parse().unwrap(); + //! # let mut listener = TcpListener::bind(address)?; + //! # const SERVER: Token = Token(0); + //! # poll.registry().register(&mut listener, SERVER, Interest::READABLE)?; + //! // Start our event loop. + //! loop { + //! // Poll the OS for events, waiting at most 100 milliseconds. + //! poll.poll(&mut events, Some(Duration::from_millis(100)))?; + //! + //! // Process each event. + //! for event in events.iter() { + //! // We can use the token we previously provided to `register` to + //! // determine for which type the event is. + //! match event.token() { + //! SERVER => loop { + //! // One or more connections are ready, so we'll attempt to + //! // accept them (in a loop). + //! match listener.accept() { + //! Ok((connection, address)) => { + //! println!("Got a connection from: {}", address); + //! # drop(connection); + //! }, + //! // A "would block error" is returned if the operation + //! // is not ready, so we'll stop trying to accept + //! // connections. + //! Err(ref err) if would_block(err) => break, + //! Err(err) => return Err(err), + //! } + //! } + //! # _ => unreachable!(), + //! } + //! } + //! # return Ok(()); + //! } + //! + //! fn would_block(err: &io::Error) -> bool { + //! err.kind() == io::ErrorKind::WouldBlock + //! } + //! # } + //! ``` +} diff --git a/third_party/rust/mio/src/macros.rs b/third_party/rust/mio/src/macros.rs new file mode 100644 index 0000000000..e380c6b14e --- /dev/null +++ b/third_party/rust/mio/src/macros.rs @@ -0,0 +1,98 @@ +//! Macros to ease conditional code based on enabled features. + +// Depending on the features not all macros are used. +#![allow(unused_macros)] + +/// The `os-poll` feature is enabled. +macro_rules! cfg_os_poll { + ($($item:item)*) => { + $( + #[cfg(feature = "os-poll")] + #[cfg_attr(docsrs, doc(cfg(feature = "os-poll")))] + $item + )* + } +} + +/// The `os-poll` feature is disabled. +macro_rules! cfg_not_os_poll { + ($($item:item)*) => { + $( + #[cfg(not(feature = "os-poll"))] + $item + )* + } +} + +/// The `os-ext` feature is enabled. +macro_rules! cfg_os_ext { + ($($item:item)*) => { + $( + #[cfg(feature = "os-ext")] + #[cfg_attr(docsrs, doc(cfg(feature = "os-ext")))] + $item + )* + } +} + +/// The `net` feature is enabled. +macro_rules! cfg_net { + ($($item:item)*) => { + $( + #[cfg(feature = "net")] + #[cfg_attr(docsrs, doc(cfg(feature = "net")))] + $item + )* + } +} + +/// One of the features enabled that needs `IoSource`. That is `net` or `os-ext` +/// on Unix (for `pipe`). +macro_rules! cfg_io_source { + ($($item:item)*) => { + $( + #[cfg(any(feature = "net", all(unix, feature = "os-ext")))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "net", all(unix, feature = "os-ext")))))] + $item + )* + } +} + +/// The `os-ext` feature is enabled, or one of the features that need `os-ext`. +macro_rules! cfg_any_os_ext { + ($($item:item)*) => { + $( + #[cfg(any(feature = "os-ext", feature = "net"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "os-ext", feature = "net"))))] + $item + )* + } +} + +macro_rules! trace { + ($($t:tt)*) => { + log!(trace, $($t)*) + } +} + +macro_rules! warn { + ($($t:tt)*) => { + log!(warn, $($t)*) + } +} + +macro_rules! error { + ($($t:tt)*) => { + log!(error, $($t)*) + } +} + +macro_rules! log { + ($level: ident, $($t:tt)*) => { + #[cfg(feature = "log")] + { log::$level!($($t)*) } + // Silence unused variables warnings. + #[cfg(not(feature = "log"))] + { if false { let _ = ( $($t)* ); } } + } +} diff --git a/third_party/rust/mio/src/net/mod.rs b/third_party/rust/mio/src/net/mod.rs new file mode 100644 index 0000000000..7d714ca00a --- /dev/null +++ b/third_party/rust/mio/src/net/mod.rs @@ -0,0 +1,39 @@ +//! Networking primitives. +//! +//! The types provided in this module are non-blocking by default and are +//! designed to be portable across all supported Mio platforms. As long as the +//! [portability guidelines] are followed, the behavior should be identical no +//! matter the target platform. +//! +//! [portability guidelines]: ../struct.Poll.html#portability +//! +//! # Notes +//! +//! When using a datagram based socket, i.e. [`UdpSocket`] or [`UnixDatagram`], +//! its only possible to receive a packet once. This means that if you provide a +//! buffer that is too small you won't be able to receive the data anymore. How +//! OSs deal with this situation is different for each OS: +//! * Unixes, such as Linux, FreeBSD and macOS, will simply fill the buffer and +//! return the amount of bytes written. This means that if the returned value +//! is equal to the size of the buffer it may have only written a part of the +//! packet (or the packet has the same size as the buffer). +//! * Windows returns an `WSAEMSGSIZE` error. +//! +//! Mio does not change the value (either ok or error) returned by the OS, it's +//! up to the user handle this. How to deal with these difference is still up +//! for debate, specifically in +//! <https://github.com/rust-lang/rust/issues/55794>. The best advice we can +//! give is to always call receive with a large enough buffer. + +mod tcp; +pub use self::tcp::{TcpListener, TcpStream}; + +#[cfg(not(target_os = "wasi"))] +mod udp; +#[cfg(not(target_os = "wasi"))] +pub use self::udp::UdpSocket; + +#[cfg(unix)] +mod uds; +#[cfg(unix)] +pub use self::uds::{SocketAddr, UnixDatagram, UnixListener, UnixStream}; diff --git a/third_party/rust/mio/src/net/tcp/listener.rs b/third_party/rust/mio/src/net/tcp/listener.rs new file mode 100644 index 0000000000..df51d57ae6 --- /dev/null +++ b/third_party/rust/mio/src/net/tcp/listener.rs @@ -0,0 +1,248 @@ +use std::net::{self, SocketAddr}; +#[cfg(unix)] +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +#[cfg(target_os = "wasi")] +use std::os::wasi::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +#[cfg(windows)] +use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; +use std::{fmt, io}; + +use crate::io_source::IoSource; +use crate::net::TcpStream; +#[cfg(unix)] +use crate::sys::tcp::set_reuseaddr; +#[cfg(not(target_os = "wasi"))] +use crate::sys::tcp::{bind, listen, new_for_addr}; +use crate::{event, sys, Interest, Registry, Token}; + +/// A structure representing a socket server +/// +/// # Examples +/// +#[cfg_attr(feature = "os-poll", doc = "```")] +#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] +/// # use std::error::Error; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Events, Interest, Poll, Token}; +/// use mio::net::TcpListener; +/// use std::time::Duration; +/// +/// let mut listener = TcpListener::bind("127.0.0.1:34255".parse()?)?; +/// +/// let mut poll = Poll::new()?; +/// let mut events = Events::with_capacity(128); +/// +/// // Register the socket with `Poll` +/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?; +/// +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; +/// +/// // There may be a socket ready to be accepted +/// # Ok(()) +/// # } +/// ``` +pub struct TcpListener { + inner: IoSource<net::TcpListener>, +} + +impl TcpListener { + /// Convenience method to bind a new TCP listener to the specified address + /// to receive new connections. + /// + /// This function will take the following steps: + /// + /// 1. Create a new TCP socket. + /// 2. Set the `SO_REUSEADDR` option on the socket on Unix. + /// 3. Bind the socket to the specified address. + /// 4. Calls `listen` on the socket to prepare it to receive new connections. + #[cfg(not(target_os = "wasi"))] + pub fn bind(addr: SocketAddr) -> io::Result<TcpListener> { + let socket = new_for_addr(addr)?; + #[cfg(unix)] + let listener = unsafe { TcpListener::from_raw_fd(socket) }; + #[cfg(windows)] + let listener = unsafe { TcpListener::from_raw_socket(socket as _) }; + + // On platforms with Berkeley-derived sockets, this allows to quickly + // rebind a socket, without needing to wait for the OS to clean up the + // previous one. + // + // On Windows, this allows rebinding sockets which are actively in use, + // which allows “socket hijacking”, so we explicitly don't set it here. + // https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse + #[cfg(not(windows))] + set_reuseaddr(&listener.inner, true)?; + + bind(&listener.inner, addr)?; + listen(&listener.inner, 1024)?; + Ok(listener) + } + + /// Creates a new `TcpListener` from a standard `net::TcpListener`. + /// + /// This function is intended to be used to wrap a TCP listener from the + /// standard library in the Mio equivalent. The conversion assumes nothing + /// about the underlying listener; ; it is left up to the user to set it + /// in non-blocking mode. + pub fn from_std(listener: net::TcpListener) -> TcpListener { + TcpListener { + inner: IoSource::new(listener), + } + } + + /// Accepts a new `TcpStream`. + /// + /// This may return an `Err(e)` where `e.kind()` is + /// `io::ErrorKind::WouldBlock`. This means a stream may be ready at a later + /// point and one should wait for an event before calling `accept` again. + /// + /// If an accepted stream is returned, the remote address of the peer is + /// returned along with it. + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + self.inner.do_io(|inner| { + sys::tcp::accept(inner).map(|(stream, addr)| (TcpStream::from_std(stream), addr)) + }) + } + + /// Returns the local socket address of this listener. + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.inner.local_addr() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.inner.set_ttl(ttl) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`][link]. + /// + /// [link]: #method.set_ttl + pub fn ttl(&self) -> io::Result<u32> { + self.inner.ttl() + } + + /// Get the value of the `SO_ERROR` option on this socket. + /// + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.inner.take_error() + } +} + +impl event::Source for TcpListener { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +#[cfg(unix)] +impl IntoRawFd for TcpListener { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +#[cfg(unix)] +impl AsRawFd for TcpListener { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +#[cfg(unix)] +impl FromRawFd for TcpListener { + /// Converts a `RawFd` to a `TcpListener`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> TcpListener { + TcpListener::from_std(FromRawFd::from_raw_fd(fd)) + } +} + +#[cfg(windows)] +impl IntoRawSocket for TcpListener { + fn into_raw_socket(self) -> RawSocket { + self.inner.into_inner().into_raw_socket() + } +} + +#[cfg(windows)] +impl AsRawSocket for TcpListener { + fn as_raw_socket(&self) -> RawSocket { + self.inner.as_raw_socket() + } +} + +#[cfg(windows)] +impl FromRawSocket for TcpListener { + /// Converts a `RawSocket` to a `TcpListener`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_socket(socket: RawSocket) -> TcpListener { + TcpListener::from_std(FromRawSocket::from_raw_socket(socket)) + } +} + +#[cfg(target_os = "wasi")] +impl IntoRawFd for TcpListener { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +#[cfg(target_os = "wasi")] +impl AsRawFd for TcpListener { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +#[cfg(target_os = "wasi")] +impl FromRawFd for TcpListener { + /// Converts a `RawFd` to a `TcpListener`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> TcpListener { + TcpListener::from_std(FromRawFd::from_raw_fd(fd)) + } +} diff --git a/third_party/rust/mio/src/net/tcp/mod.rs b/third_party/rust/mio/src/net/tcp/mod.rs new file mode 100644 index 0000000000..94af5c10e8 --- /dev/null +++ b/third_party/rust/mio/src/net/tcp/mod.rs @@ -0,0 +1,5 @@ +mod listener; +pub use self::listener::TcpListener; + +mod stream; +pub use self::stream::TcpStream; diff --git a/third_party/rust/mio/src/net/tcp/stream.rs b/third_party/rust/mio/src/net/tcp/stream.rs new file mode 100644 index 0000000000..8a3f6a2f25 --- /dev/null +++ b/third_party/rust/mio/src/net/tcp/stream.rs @@ -0,0 +1,427 @@ +use std::fmt; +use std::io::{self, IoSlice, IoSliceMut, Read, Write}; +use std::net::{self, Shutdown, SocketAddr}; +#[cfg(unix)] +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +#[cfg(target_os = "wasi")] +use std::os::wasi::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +#[cfg(windows)] +use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; + +use crate::io_source::IoSource; +#[cfg(not(target_os = "wasi"))] +use crate::sys::tcp::{connect, new_for_addr}; +use crate::{event, Interest, Registry, Token}; + +/// A non-blocking TCP stream between a local socket and a remote socket. +/// +/// The socket will be closed when the value is dropped. +/// +/// # Examples +/// +#[cfg_attr(feature = "os-poll", doc = "```")] +#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] +/// # use std::net::{TcpListener, SocketAddr}; +/// # use std::error::Error; +/// # +/// # fn main() -> Result<(), Box<dyn Error>> { +/// let address: SocketAddr = "127.0.0.1:0".parse()?; +/// let listener = TcpListener::bind(address)?; +/// use mio::{Events, Interest, Poll, Token}; +/// use mio::net::TcpStream; +/// use std::time::Duration; +/// +/// let mut stream = TcpStream::connect(listener.local_addr()?)?; +/// +/// let mut poll = Poll::new()?; +/// let mut events = Events::with_capacity(128); +/// +/// // Register the socket with `Poll` +/// poll.registry().register(&mut stream, Token(0), Interest::WRITABLE)?; +/// +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; +/// +/// // The socket might be ready at this point +/// # Ok(()) +/// # } +/// ``` +pub struct TcpStream { + inner: IoSource<net::TcpStream>, +} + +impl TcpStream { + /// Create a new TCP stream and issue a non-blocking connect to the + /// specified address. + /// + /// # Notes + /// + /// The returned `TcpStream` may not be connected (and thus usable), unlike + /// the API found in `std::net::TcpStream`. Because Mio issues a + /// *non-blocking* connect it will not block the thread and instead return + /// an unconnected `TcpStream`. + /// + /// Ensuring the returned stream is connected is surprisingly complex when + /// considering cross-platform support. Doing this properly should follow + /// the steps below, an example implementation can be found + /// [here](https://github.com/Thomasdezeeuw/heph/blob/0c4f1ab3eaf08bea1d65776528bfd6114c9f8374/src/net/tcp/stream.rs#L560-L622). + /// + /// 1. Call `TcpStream::connect` + /// 2. Register the returned stream with at least [write interest]. + /// 3. Wait for a (writable) event. + /// 4. Check `TcpStream::peer_addr`. If it returns `libc::EINPROGRESS` or + /// `ErrorKind::NotConnected` it means the stream is not yet connected, + /// go back to step 3. If it returns an address it means the stream is + /// connected, go to step 5. If another error is returned something + /// went wrong. + /// 5. Now the stream can be used. + /// + /// This may return a `WouldBlock` in which case the socket connection + /// cannot be completed immediately, it usually means there are insufficient + /// entries in the routing cache. + /// + /// [write interest]: Interest::WRITABLE + #[cfg(not(target_os = "wasi"))] + pub fn connect(addr: SocketAddr) -> io::Result<TcpStream> { + let socket = new_for_addr(addr)?; + #[cfg(unix)] + let stream = unsafe { TcpStream::from_raw_fd(socket) }; + #[cfg(windows)] + let stream = unsafe { TcpStream::from_raw_socket(socket as _) }; + connect(&stream.inner, addr)?; + Ok(stream) + } + + /// Creates a new `TcpStream` from a standard `net::TcpStream`. + /// + /// This function is intended to be used to wrap a TCP stream from the + /// standard library in the Mio equivalent. The conversion assumes nothing + /// about the underlying stream; it is left up to the user to set it in + /// non-blocking mode. + /// + /// # Note + /// + /// The TCP stream here will not have `connect` called on it, so it + /// should already be connected via some other means (be it manually, or + /// the standard library). + pub fn from_std(stream: net::TcpStream) -> TcpStream { + TcpStream { + inner: IoSource::new(stream), + } + } + + /// Returns the socket address of the remote peer of this TCP connection. + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + self.inner.peer_addr() + } + + /// Returns the socket address of the local half of this TCP connection. + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.inner.local_addr() + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O on the specified + /// portions to return immediately with an appropriate value (see the + /// documentation of `Shutdown`). + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.inner.shutdown(how) + } + + /// Sets the value of the `TCP_NODELAY` option on this socket. + /// + /// If set, this option disables the Nagle algorithm. This means that + /// segments are always sent as soon as possible, even if there is only a + /// small amount of data. When not set, data is buffered until there is a + /// sufficient amount to send out, thereby avoiding the frequent sending of + /// small packets. + /// + /// # Notes + /// + /// On Windows make sure the stream is connected before calling this method, + /// by receiving an (writable) event. Trying to set `nodelay` on an + /// unconnected `TcpStream` is unspecified behavior. + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + self.inner.set_nodelay(nodelay) + } + + /// Gets the value of the `TCP_NODELAY` option on this socket. + /// + /// For more information about this option, see [`set_nodelay`][link]. + /// + /// [link]: #method.set_nodelay + /// + /// # Notes + /// + /// On Windows make sure the stream is connected before calling this method, + /// by receiving an (writable) event. Trying to get `nodelay` on an + /// unconnected `TcpStream` is unspecified behavior. + pub fn nodelay(&self) -> io::Result<bool> { + self.inner.nodelay() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + /// + /// # Notes + /// + /// On Windows make sure the stream is connected before calling this method, + /// by receiving an (writable) event. Trying to set `ttl` on an + /// unconnected `TcpStream` is unspecified behavior. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.inner.set_ttl(ttl) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`][link]. + /// + /// # Notes + /// + /// On Windows make sure the stream is connected before calling this method, + /// by receiving an (writable) event. Trying to get `ttl` on an + /// unconnected `TcpStream` is unspecified behavior. + /// + /// [link]: #method.set_ttl + pub fn ttl(&self) -> io::Result<u32> { + self.inner.ttl() + } + + /// Get the value of the `SO_ERROR` option on this socket. + /// + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.inner.take_error() + } + + /// Receives data on the socket from the remote address to which it is + /// connected, without removing that data from the queue. On success, + /// returns the number of bytes peeked. + /// + /// Successive calls return the same data. This is accomplished by passing + /// `MSG_PEEK` as a flag to the underlying recv system call. + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.peek(buf) + } + + /// Execute an I/O operation ensuring that the socket receives more events + /// if it hits a [`WouldBlock`] error. + /// + /// # Notes + /// + /// This method is required to be called for **all** I/O operations to + /// ensure the user will receive events once the socket is ready again after + /// returning a [`WouldBlock`] error. + /// + /// [`WouldBlock`]: io::ErrorKind::WouldBlock + /// + /// # Examples + /// + #[cfg_attr(unix, doc = "```no_run")] + #[cfg_attr(windows, doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use std::io; + /// #[cfg(unix)] + /// use std::os::unix::io::AsRawFd; + /// #[cfg(windows)] + /// use std::os::windows::io::AsRawSocket; + /// use mio::net::TcpStream; + /// + /// let address = "127.0.0.1:8080".parse().unwrap(); + /// let stream = TcpStream::connect(address)?; + /// + /// // Wait until the stream is readable... + /// + /// // Read from the stream using a direct libc call, of course the + /// // `io::Read` implementation would be easier to use. + /// let mut buf = [0; 512]; + /// let n = stream.try_io(|| { + /// let buf_ptr = &mut buf as *mut _ as *mut _; + /// #[cfg(unix)] + /// let res = unsafe { libc::recv(stream.as_raw_fd(), buf_ptr, buf.len(), 0) }; + /// #[cfg(windows)] + /// let res = unsafe { libc::recvfrom(stream.as_raw_socket() as usize, buf_ptr, buf.len() as i32, 0, std::ptr::null_mut(), std::ptr::null_mut()) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("read {} bytes", n); + /// # Ok(()) + /// # } + /// ``` + pub fn try_io<F, T>(&self, f: F) -> io::Result<T> + where + F: FnOnce() -> io::Result<T>, + { + self.inner.do_io(|_| f()) + } +} + +impl Read for TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read(buf)) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read_vectored(bufs)) + } +} + +impl<'a> Read for &'a TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read(buf)) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read_vectored(bufs)) + } +} + +impl Write for TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write(buf)) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write_vectored(bufs)) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.do_io(|mut inner| inner.flush()) + } +} + +impl<'a> Write for &'a TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write(buf)) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write_vectored(bufs)) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.do_io(|mut inner| inner.flush()) + } +} + +impl event::Source for TcpStream { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +#[cfg(unix)] +impl IntoRawFd for TcpStream { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +#[cfg(unix)] +impl AsRawFd for TcpStream { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +#[cfg(unix)] +impl FromRawFd for TcpStream { + /// Converts a `RawFd` to a `TcpStream`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> TcpStream { + TcpStream::from_std(FromRawFd::from_raw_fd(fd)) + } +} + +#[cfg(windows)] +impl IntoRawSocket for TcpStream { + fn into_raw_socket(self) -> RawSocket { + self.inner.into_inner().into_raw_socket() + } +} + +#[cfg(windows)] +impl AsRawSocket for TcpStream { + fn as_raw_socket(&self) -> RawSocket { + self.inner.as_raw_socket() + } +} + +#[cfg(windows)] +impl FromRawSocket for TcpStream { + /// Converts a `RawSocket` to a `TcpStream`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_socket(socket: RawSocket) -> TcpStream { + TcpStream::from_std(FromRawSocket::from_raw_socket(socket)) + } +} + +#[cfg(target_os = "wasi")] +impl IntoRawFd for TcpStream { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +#[cfg(target_os = "wasi")] +impl AsRawFd for TcpStream { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +#[cfg(target_os = "wasi")] +impl FromRawFd for TcpStream { + /// Converts a `RawFd` to a `TcpStream`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> TcpStream { + TcpStream::from_std(FromRawFd::from_raw_fd(fd)) + } +} diff --git a/third_party/rust/mio/src/net/udp.rs b/third_party/rust/mio/src/net/udp.rs new file mode 100644 index 0000000000..6129527a7f --- /dev/null +++ b/third_party/rust/mio/src/net/udp.rs @@ -0,0 +1,697 @@ +//! Primitives for working with UDP. +//! +//! The types provided in this module are non-blocking by default and are +//! designed to be portable across all supported Mio platforms. As long as the +//! [portability guidelines] are followed, the behavior should be identical no +//! matter the target platform. +//! +//! [portability guidelines]: ../struct.Poll.html#portability + +use crate::io_source::IoSource; +use crate::{event, sys, Interest, Registry, Token}; + +use std::fmt; +use std::io; +use std::net; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; +#[cfg(unix)] +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +#[cfg(windows)] +use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; + +/// A User Datagram Protocol socket. +/// +/// This is an implementation of a bound UDP socket. This supports both IPv4 and +/// IPv6 addresses, and there is no corresponding notion of a server because UDP +/// is a datagram protocol. +/// +/// # Examples +/// +#[cfg_attr(feature = "os-poll", doc = "```")] +#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] +/// # use std::error::Error; +/// # +/// # fn main() -> Result<(), Box<dyn Error>> { +/// // An Echo program: +/// // SENDER -> sends a message. +/// // ECHOER -> listens and prints the message received. +/// +/// use mio::net::UdpSocket; +/// use mio::{Events, Interest, Poll, Token}; +/// use std::time::Duration; +/// +/// const SENDER: Token = Token(0); +/// const ECHOER: Token = Token(1); +/// +/// // This operation will fail if the address is in use, so we select different ports for each +/// // socket. +/// let mut sender_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; +/// let mut echoer_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; +/// +/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from +/// // respectively. +/// sender_socket.connect(echoer_socket.local_addr()?)?; +/// +/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be +/// // read from. +/// let mut poll = Poll::new()?; +/// +/// // We register our sockets here so that we can check if they are ready to be written/read. +/// poll.registry().register(&mut sender_socket, SENDER, Interest::WRITABLE)?; +/// poll.registry().register(&mut echoer_socket, ECHOER, Interest::READABLE)?; +/// +/// let msg_to_send = [9; 9]; +/// let mut buffer = [0; 9]; +/// +/// let mut events = Events::with_capacity(128); +/// loop { +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; +/// for event in events.iter() { +/// match event.token() { +/// // Our SENDER is ready to be written into. +/// SENDER => { +/// let bytes_sent = sender_socket.send(&msg_to_send)?; +/// assert_eq!(bytes_sent, 9); +/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent); +/// }, +/// // Our ECHOER is ready to be read from. +/// ECHOER => { +/// let num_recv = echoer_socket.recv(&mut buffer)?; +/// println!("echo {:?} -> {:?}", buffer, num_recv); +/// buffer = [0; 9]; +/// # _ = buffer; // Silence unused assignment warning. +/// # return Ok(()); +/// } +/// _ => unreachable!() +/// } +/// } +/// } +/// # } +/// ``` +pub struct UdpSocket { + inner: IoSource<net::UdpSocket>, +} + +impl UdpSocket { + /// Creates a UDP socket from the given address. + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// // We must bind it to an open address. + /// let socket = match UdpSocket::bind("127.0.0.1:0".parse()?) { + /// Ok(new_socket) => new_socket, + /// Err(fail) => { + /// // We panic! here, but you could try to bind it again on another address. + /// panic!("Failed to bind socket. {:?}", fail); + /// } + /// }; + /// + /// // Our socket was created, but we should not use it before checking it's readiness. + /// # drop(socket); // Silence unused variable warning. + /// # Ok(()) + /// # } + /// ``` + pub fn bind(addr: SocketAddr) -> io::Result<UdpSocket> { + sys::udp::bind(addr).map(UdpSocket::from_std) + } + + /// Creates a new `UdpSocket` from a standard `net::UdpSocket`. + /// + /// This function is intended to be used to wrap a UDP socket from the + /// standard library in the Mio equivalent. The conversion assumes nothing + /// about the underlying socket; it is left up to the user to set it in + /// non-blocking mode. + pub fn from_std(socket: net::UdpSocket) -> UdpSocket { + UdpSocket { + inner: IoSource::new(socket), + } + } + + /// Returns the socket address that this socket was created from. + /// + /// # Examples + /// + // This assertion is almost, but not quite, universal. It fails on + // shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed, + // so simply disable the test on FreeBSD. + #[cfg_attr(all(feature = "os-poll", not(target_os = "freebsd")), doc = "```")] + #[cfg_attr( + any(not(feature = "os-poll"), target_os = "freebsd"), + doc = "```ignore" + )] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let addr = "127.0.0.1:0".parse()?; + /// let socket = UdpSocket::bind(addr)?; + /// assert_eq!(socket.local_addr()?.ip(), addr.ip()); + /// # Ok(()) + /// # } + /// ``` + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.inner.local_addr() + } + + /// Returns the socket address of the remote peer this socket was connected to. + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let addr = "127.0.0.1:0".parse()?; + /// let peer_addr = "127.0.0.1:11100".parse()?; + /// let socket = UdpSocket::bind(addr)?; + /// socket.connect(peer_addr)?; + /// assert_eq!(socket.peer_addr()?.ip(), peer_addr.ip()); + /// # Ok(()) + /// # } + /// ``` + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + self.inner.peer_addr() + } + + /// Sends data on the socket to the given address. On success, returns the + /// number of bytes written. + /// + /// Address type can be any implementor of `ToSocketAddrs` trait. See its + /// documentation for concrete examples. + /// + /// # Examples + /// + /// ```no_run + /// # use std::error::Error; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; + /// + /// // We must check if the socket is writable before calling send_to, + /// // or we could run into a WouldBlock error. + /// + /// let bytes_sent = socket.send_to(&[9; 9], "127.0.0.1:11100".parse()?)?; + /// assert_eq!(bytes_sent, 9); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> { + self.inner.do_io(|inner| inner.send_to(buf, target)) + } + + /// Receives data from the socket. On success, returns the number of bytes + /// read and the address from whence the data came. + /// + /// # Notes + /// + /// On Windows, if the data is larger than the buffer specified, the buffer + /// is filled with the first part of the data, and recv_from returns the error + /// WSAEMSGSIZE(10040). The excess data is lost. + /// Make sure to always use a sufficiently large buffer to hold the + /// maximum UDP packet size, which can be up to 65536 bytes in size. + /// + /// # Examples + /// + /// ```no_run + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; + /// + /// // We must check if the socket is readable before calling recv_from, + /// // or we could run into a WouldBlock error. + /// + /// let mut buf = [0; 9]; + /// let (num_recv, from_addr) = socket.recv_from(&mut buf)?; + /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.inner.do_io(|inner| inner.recv_from(buf)) + } + + /// Receives data from the socket, without removing it from the input queue. + /// On success, returns the number of bytes read and the address from whence + /// the data came. + /// + /// # Notes + /// + /// On Windows, if the data is larger than the buffer specified, the buffer + /// is filled with the first part of the data, and peek_from returns the error + /// WSAEMSGSIZE(10040). The excess data is lost. + /// Make sure to always use a sufficiently large buffer to hold the + /// maximum UDP packet size, which can be up to 65536 bytes in size. + /// + /// # Examples + /// + /// ```no_run + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; + /// + /// // We must check if the socket is readable before calling recv_from, + /// // or we could run into a WouldBlock error. + /// + /// let mut buf = [0; 9]; + /// let (num_recv, from_addr) = socket.peek_from(&mut buf)?; + /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.inner.do_io(|inner| inner.peek_from(buf)) + } + + /// Sends data on the socket to the address previously bound via connect(). On success, + /// returns the number of bytes written. + pub fn send(&self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|inner| inner.send(buf)) + } + + /// Receives data from the socket previously bound with connect(). On success, returns + /// the number of bytes read. + /// + /// # Notes + /// + /// On Windows, if the data is larger than the buffer specified, the buffer + /// is filled with the first part of the data, and recv returns the error + /// WSAEMSGSIZE(10040). The excess data is lost. + /// Make sure to always use a sufficiently large buffer to hold the + /// maximum UDP packet size, which can be up to 65536 bytes in size. + pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|inner| inner.recv(buf)) + } + + /// Receives data from the socket, without removing it from the input queue. + /// On success, returns the number of bytes read. + /// + /// # Notes + /// + /// On Windows, if the data is larger than the buffer specified, the buffer + /// is filled with the first part of the data, and peek returns the error + /// WSAEMSGSIZE(10040). The excess data is lost. + /// Make sure to always use a sufficiently large buffer to hold the + /// maximum UDP packet size, which can be up to 65536 bytes in size. + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|inner| inner.peek(buf)) + } + + /// Connects the UDP socket setting the default destination for `send()` + /// and limiting packets that are read via `recv` from the address specified + /// in `addr`. + /// + /// This may return a `WouldBlock` in which case the socket connection + /// cannot be completed immediately, it usually means there are insufficient + /// entries in the routing cache. + pub fn connect(&self, addr: SocketAddr) -> io::Result<()> { + self.inner.connect(addr) + } + + /// Sets the value of the `SO_BROADCAST` option for this socket. + /// + /// When enabled, this socket is allowed to send packets to a broadcast + /// address. + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; + /// if broadcast_socket.broadcast()? == false { + /// broadcast_socket.set_broadcast(true)?; + /// } + /// + /// assert_eq!(broadcast_socket.broadcast()?, true); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn set_broadcast(&self, on: bool) -> io::Result<()> { + self.inner.set_broadcast(on) + } + + /// Gets the value of the `SO_BROADCAST` option for this socket. + /// + /// For more information about this option, see + /// [`set_broadcast`][link]. + /// + /// [link]: #method.set_broadcast + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; + /// assert_eq!(broadcast_socket.broadcast()?, false); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn broadcast(&self) -> io::Result<bool> { + self.inner.broadcast() + } + + /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// If enabled, multicast packets will be looped back to the local socket. + /// Note that this may not have any affect on IPv6 sockets. + pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { + self.inner.set_multicast_loop_v4(on) + } + + /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see + /// [`set_multicast_loop_v4`][link]. + /// + /// [link]: #method.set_multicast_loop_v4 + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + self.inner.multicast_loop_v4() + } + + /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// Indicates the time-to-live value of outgoing multicast packets for + /// this socket. The default value is 1 which means that multicast packets + /// don't leave the local network unless explicitly requested. + /// + /// Note that this may not have any affect on IPv6 sockets. + pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { + self.inner.set_multicast_ttl_v4(ttl) + } + + /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// For more information about this option, see + /// [`set_multicast_ttl_v4`][link]. + /// + /// [link]: #method.set_multicast_ttl_v4 + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + self.inner.multicast_ttl_v4() + } + + /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// Controls whether this socket sees the multicast packets it sends itself. + /// Note that this may not have any affect on IPv4 sockets. + pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { + self.inner.set_multicast_loop_v6(on) + } + + /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see + /// [`set_multicast_loop_v6`][link]. + /// + /// [link]: #method.set_multicast_loop_v6 + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + self.inner.multicast_loop_v6() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; + /// if socket.ttl()? < 255 { + /// socket.set_ttl(255)?; + /// } + /// + /// assert_eq!(socket.ttl()?, 255); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.inner.set_ttl(ttl) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`][link]. + /// + /// [link]: #method.set_ttl + /// + /// # Examples + /// + #[cfg_attr(feature = "os-poll", doc = "```")] + #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; + /// socket.set_ttl(255)?; + /// + /// assert_eq!(socket.ttl()?, 255); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn ttl(&self) -> io::Result<u32> { + self.inner.ttl() + } + + /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// address of the local interface with which the system should join the + /// multicast group. If it's equal to `INADDR_ANY` then an appropriate + /// interface is chosen by the system. + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { + self.inner.join_multicast_v4(multiaddr, interface) + } + + /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// index of the interface to join/leave (or 0 to indicate any interface). + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { + self.inner.join_multicast_v6(multiaddr, interface) + } + + /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see + /// [`join_multicast_v4`][link]. + /// + /// [link]: #method.join_multicast_v4 + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { + self.inner.leave_multicast_v4(multiaddr, interface) + } + + /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see + /// [`join_multicast_v6`][link]. + /// + /// [link]: #method.join_multicast_v6 + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { + self.inner.leave_multicast_v6(multiaddr, interface) + } + + /// Get the value of the `IPV6_V6ONLY` option on this socket. + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn only_v6(&self) -> io::Result<bool> { + sys::udp::only_v6(&self.inner) + } + + /// Get the value of the `SO_ERROR` option on this socket. + /// + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.inner.take_error() + } + + /// Execute an I/O operation ensuring that the socket receives more events + /// if it hits a [`WouldBlock`] error. + /// + /// # Notes + /// + /// This method is required to be called for **all** I/O operations to + /// ensure the user will receive events once the socket is ready again after + /// returning a [`WouldBlock`] error. + /// + /// [`WouldBlock`]: io::ErrorKind::WouldBlock + /// + /// # Examples + /// + #[cfg_attr(unix, doc = "```no_run")] + #[cfg_attr(windows, doc = "```ignore")] + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use std::io; + /// #[cfg(unix)] + /// use std::os::unix::io::AsRawFd; + /// #[cfg(windows)] + /// use std::os::windows::io::AsRawSocket; + /// use mio::net::UdpSocket; + /// + /// let address = "127.0.0.1:8080".parse().unwrap(); + /// let dgram = UdpSocket::bind(address)?; + /// + /// // Wait until the dgram is readable... + /// + /// // Read from the dgram using a direct libc call, of course the + /// // `io::Read` implementation would be easier to use. + /// let mut buf = [0; 512]; + /// let n = dgram.try_io(|| { + /// let buf_ptr = &mut buf as *mut _ as *mut _; + /// #[cfg(unix)] + /// let res = unsafe { libc::recv(dgram.as_raw_fd(), buf_ptr, buf.len(), 0) }; + /// #[cfg(windows)] + /// let res = unsafe { libc::recvfrom(dgram.as_raw_socket() as usize, buf_ptr, buf.len() as i32, 0, std::ptr::null_mut(), std::ptr::null_mut()) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("read {} bytes", n); + /// # Ok(()) + /// # } + /// ``` + pub fn try_io<F, T>(&self, f: F) -> io::Result<T> + where + F: FnOnce() -> io::Result<T>, + { + self.inner.do_io(|_| f()) + } +} + +impl event::Source for UdpSocket { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +#[cfg(unix)] +impl IntoRawFd for UdpSocket { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +#[cfg(unix)] +impl AsRawFd for UdpSocket { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +#[cfg(unix)] +impl FromRawFd for UdpSocket { + /// Converts a `RawFd` to a `UdpSocket`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket { + UdpSocket::from_std(FromRawFd::from_raw_fd(fd)) + } +} + +#[cfg(windows)] +impl IntoRawSocket for UdpSocket { + fn into_raw_socket(self) -> RawSocket { + self.inner.into_inner().into_raw_socket() + } +} + +#[cfg(windows)] +impl AsRawSocket for UdpSocket { + fn as_raw_socket(&self) -> RawSocket { + self.inner.as_raw_socket() + } +} + +#[cfg(windows)] +impl FromRawSocket for UdpSocket { + /// Converts a `RawSocket` to a `UdpSocket`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_socket(socket: RawSocket) -> UdpSocket { + UdpSocket::from_std(FromRawSocket::from_raw_socket(socket)) + } +} diff --git a/third_party/rust/mio/src/net/uds/datagram.rs b/third_party/rust/mio/src/net/uds/datagram.rs new file mode 100644 index 0000000000..e963d6e2fa --- /dev/null +++ b/third_party/rust/mio/src/net/uds/datagram.rs @@ -0,0 +1,236 @@ +use crate::io_source::IoSource; +use crate::{event, sys, Interest, Registry, Token}; + +use std::net::Shutdown; +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use std::os::unix::net; +use std::path::Path; +use std::{fmt, io}; + +/// A Unix datagram socket. +pub struct UnixDatagram { + inner: IoSource<net::UnixDatagram>, +} + +impl UnixDatagram { + /// Creates a Unix datagram socket bound to the given path. + pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> { + sys::uds::datagram::bind(path.as_ref()).map(UnixDatagram::from_std) + } + + /// Creates a new `UnixDatagram` from a standard `net::UnixDatagram`. + /// + /// This function is intended to be used to wrap a Unix datagram from the + /// standard library in the Mio equivalent. The conversion assumes nothing + /// about the underlying datagram; it is left up to the user to set it in + /// non-blocking mode. + pub fn from_std(socket: net::UnixDatagram) -> UnixDatagram { + UnixDatagram { + inner: IoSource::new(socket), + } + } + + /// Connects the socket to the specified address. + /// + /// This may return a `WouldBlock` in which case the socket connection + /// cannot be completed immediately. + pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { + self.inner.connect(path) + } + + /// Creates a Unix Datagram socket which is not bound to any address. + pub fn unbound() -> io::Result<UnixDatagram> { + sys::uds::datagram::unbound().map(UnixDatagram::from_std) + } + + /// Create an unnamed pair of connected sockets. + pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { + sys::uds::datagram::pair().map(|(socket1, socket2)| { + ( + UnixDatagram::from_std(socket1), + UnixDatagram::from_std(socket2), + ) + }) + } + + /// Returns the address of this socket. + pub fn local_addr(&self) -> io::Result<sys::SocketAddr> { + sys::uds::datagram::local_addr(&self.inner) + } + + /// Returns the address of this socket's peer. + /// + /// The `connect` method will connect the socket to a peer. + pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> { + sys::uds::datagram::peer_addr(&self.inner) + } + + /// Receives data from the socket. + /// + /// On success, returns the number of bytes read and the address from + /// whence the data came. + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, sys::SocketAddr)> { + self.inner + .do_io(|inner| sys::uds::datagram::recv_from(inner, buf)) + } + + /// Receives data from the socket. + /// + /// On success, returns the number of bytes read. + pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|inner| inner.recv(buf)) + } + + /// Sends data on the socket to the specified address. + /// + /// On success, returns the number of bytes written. + pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> { + self.inner.do_io(|inner| inner.send_to(buf, path)) + } + + /// Sends data on the socket to the socket's peer. + /// + /// The peer address may be set by the `connect` method, and this method + /// will return an error if the socket has not already been connected. + /// + /// On success, returns the number of bytes written. + pub fn send(&self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|inner| inner.send(buf)) + } + + /// Returns the value of the `SO_ERROR` option. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.inner.take_error() + } + + /// Shut down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O calls on the + /// specified portions to immediately return with an appropriate value + /// (see the documentation of `Shutdown`). + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.inner.shutdown(how) + } + + /// Execute an I/O operation ensuring that the socket receives more events + /// if it hits a [`WouldBlock`] error. + /// + /// # Notes + /// + /// This method is required to be called for **all** I/O operations to + /// ensure the user will receive events once the socket is ready again after + /// returning a [`WouldBlock`] error. + /// + /// [`WouldBlock`]: io::ErrorKind::WouldBlock + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use std::io; + /// use std::os::unix::io::AsRawFd; + /// use mio::net::UnixDatagram; + /// + /// let (dgram1, dgram2) = UnixDatagram::pair()?; + /// + /// // Wait until the dgram is writable... + /// + /// // Write to the dgram using a direct libc call, of course the + /// // `io::Write` implementation would be easier to use. + /// let buf = b"hello"; + /// let n = dgram1.try_io(|| { + /// let buf_ptr = &buf as *const _ as *const _; + /// let res = unsafe { libc::send(dgram1.as_raw_fd(), buf_ptr, buf.len(), 0) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::send, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("write {} bytes", n); + /// + /// // Wait until the dgram is readable... + /// + /// // Read from the dgram using a direct libc call, of course the + /// // `io::Read` implementation would be easier to use. + /// let mut buf = [0; 512]; + /// let n = dgram2.try_io(|| { + /// let buf_ptr = &mut buf as *mut _ as *mut _; + /// let res = unsafe { libc::recv(dgram2.as_raw_fd(), buf_ptr, buf.len(), 0) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("read {} bytes", n); + /// # Ok(()) + /// # } + /// ``` + pub fn try_io<F, T>(&self, f: F) -> io::Result<T> + where + F: FnOnce() -> io::Result<T>, + { + self.inner.do_io(|_| f()) + } +} + +impl event::Source for UnixDatagram { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl fmt::Debug for UnixDatagram { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl IntoRawFd for UnixDatagram { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +impl AsRawFd for UnixDatagram { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl FromRawFd for UnixDatagram { + /// Converts a `RawFd` to a `UnixDatagram`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram { + UnixDatagram::from_std(FromRawFd::from_raw_fd(fd)) + } +} diff --git a/third_party/rust/mio/src/net/uds/listener.rs b/third_party/rust/mio/src/net/uds/listener.rs new file mode 100644 index 0000000000..37e8106d89 --- /dev/null +++ b/third_party/rust/mio/src/net/uds/listener.rs @@ -0,0 +1,104 @@ +use crate::io_source::IoSource; +use crate::net::{SocketAddr, UnixStream}; +use crate::{event, sys, Interest, Registry, Token}; + +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use std::os::unix::net; +use std::path::Path; +use std::{fmt, io}; + +/// A non-blocking Unix domain socket server. +pub struct UnixListener { + inner: IoSource<net::UnixListener>, +} + +impl UnixListener { + /// Creates a new `UnixListener` bound to the specified socket. + pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> { + sys::uds::listener::bind(path.as_ref()).map(UnixListener::from_std) + } + + /// Creates a new `UnixListener` from a standard `net::UnixListener`. + /// + /// This function is intended to be used to wrap a Unix listener from the + /// standard library in the Mio equivalent. The conversion assumes nothing + /// about the underlying listener; it is left up to the user to set it in + /// non-blocking mode. + pub fn from_std(listener: net::UnixListener) -> UnixListener { + UnixListener { + inner: IoSource::new(listener), + } + } + + /// Accepts a new incoming connection to this listener. + /// + /// The call is responsible for ensuring that the listening socket is in + /// non-blocking mode. + pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> { + sys::uds::listener::accept(&self.inner) + } + + /// Returns the local socket address of this listener. + pub fn local_addr(&self) -> io::Result<sys::SocketAddr> { + sys::uds::listener::local_addr(&self.inner) + } + + /// Returns the value of the `SO_ERROR` option. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.inner.take_error() + } +} + +impl event::Source for UnixListener { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl fmt::Debug for UnixListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl IntoRawFd for UnixListener { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +impl AsRawFd for UnixListener { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl FromRawFd for UnixListener { + /// Converts a `RawFd` to a `UnixListener`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> UnixListener { + UnixListener::from_std(FromRawFd::from_raw_fd(fd)) + } +} diff --git a/third_party/rust/mio/src/net/uds/mod.rs b/third_party/rust/mio/src/net/uds/mod.rs new file mode 100644 index 0000000000..6b4ffdc430 --- /dev/null +++ b/third_party/rust/mio/src/net/uds/mod.rs @@ -0,0 +1,10 @@ +mod datagram; +pub use self::datagram::UnixDatagram; + +mod listener; +pub use self::listener::UnixListener; + +mod stream; +pub use self::stream::UnixStream; + +pub use crate::sys::SocketAddr; diff --git a/third_party/rust/mio/src/net/uds/stream.rs b/third_party/rust/mio/src/net/uds/stream.rs new file mode 100644 index 0000000000..b38812e5d5 --- /dev/null +++ b/third_party/rust/mio/src/net/uds/stream.rs @@ -0,0 +1,245 @@ +use crate::io_source::IoSource; +use crate::{event, sys, Interest, Registry, Token}; + +use std::fmt; +use std::io::{self, IoSlice, IoSliceMut, Read, Write}; +use std::net::Shutdown; +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use std::os::unix::net; +use std::path::Path; + +/// A non-blocking Unix stream socket. +pub struct UnixStream { + inner: IoSource<net::UnixStream>, +} + +impl UnixStream { + /// Connects to the socket named by `path`. + /// + /// This may return a `WouldBlock` in which case the socket connection + /// cannot be completed immediately. Usually it means the backlog is full. + pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> { + sys::uds::stream::connect(path.as_ref()).map(UnixStream::from_std) + } + + /// Creates a new `UnixStream` from a standard `net::UnixStream`. + /// + /// This function is intended to be used to wrap a Unix stream from the + /// standard library in the Mio equivalent. The conversion assumes nothing + /// about the underlying stream; it is left up to the user to set it in + /// non-blocking mode. + /// + /// # Note + /// + /// The Unix stream here will not have `connect` called on it, so it + /// should already be connected via some other means (be it manually, or + /// the standard library). + pub fn from_std(stream: net::UnixStream) -> UnixStream { + UnixStream { + inner: IoSource::new(stream), + } + } + + /// Creates an unnamed pair of connected sockets. + /// + /// Returns two `UnixStream`s which are connected to each other. + pub fn pair() -> io::Result<(UnixStream, UnixStream)> { + sys::uds::stream::pair().map(|(stream1, stream2)| { + (UnixStream::from_std(stream1), UnixStream::from_std(stream2)) + }) + } + + /// Returns the socket address of the local half of this connection. + pub fn local_addr(&self) -> io::Result<sys::SocketAddr> { + sys::uds::stream::local_addr(&self.inner) + } + + /// Returns the socket address of the remote half of this connection. + pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> { + sys::uds::stream::peer_addr(&self.inner) + } + + /// Returns the value of the `SO_ERROR` option. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.inner.take_error() + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O calls on the + /// specified portions to immediately return with an appropriate value + /// (see the documentation of `Shutdown`). + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.inner.shutdown(how) + } + + /// Execute an I/O operation ensuring that the socket receives more events + /// if it hits a [`WouldBlock`] error. + /// + /// # Notes + /// + /// This method is required to be called for **all** I/O operations to + /// ensure the user will receive events once the socket is ready again after + /// returning a [`WouldBlock`] error. + /// + /// [`WouldBlock`]: io::ErrorKind::WouldBlock + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use std::io; + /// use std::os::unix::io::AsRawFd; + /// use mio::net::UnixStream; + /// + /// let (stream1, stream2) = UnixStream::pair()?; + /// + /// // Wait until the stream is writable... + /// + /// // Write to the stream using a direct libc call, of course the + /// // `io::Write` implementation would be easier to use. + /// let buf = b"hello"; + /// let n = stream1.try_io(|| { + /// let buf_ptr = &buf as *const _ as *const _; + /// let res = unsafe { libc::send(stream1.as_raw_fd(), buf_ptr, buf.len(), 0) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::send, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("write {} bytes", n); + /// + /// // Wait until the stream is readable... + /// + /// // Read from the stream using a direct libc call, of course the + /// // `io::Read` implementation would be easier to use. + /// let mut buf = [0; 512]; + /// let n = stream2.try_io(|| { + /// let buf_ptr = &mut buf as *mut _ as *mut _; + /// let res = unsafe { libc::recv(stream2.as_raw_fd(), buf_ptr, buf.len(), 0) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("read {} bytes", n); + /// # Ok(()) + /// # } + /// ``` + pub fn try_io<F, T>(&self, f: F) -> io::Result<T> + where + F: FnOnce() -> io::Result<T>, + { + self.inner.do_io(|_| f()) + } +} + +impl Read for UnixStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read(buf)) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read_vectored(bufs)) + } +} + +impl<'a> Read for &'a UnixStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read(buf)) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.read_vectored(bufs)) + } +} + +impl Write for UnixStream { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write(buf)) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write_vectored(bufs)) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.do_io(|mut inner| inner.flush()) + } +} + +impl<'a> Write for &'a UnixStream { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write(buf)) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut inner| inner.write_vectored(bufs)) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.do_io(|mut inner| inner.flush()) + } +} + +impl event::Source for UnixStream { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl fmt::Debug for UnixStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl IntoRawFd for UnixStream { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +impl AsRawFd for UnixStream { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl FromRawFd for UnixStream { + /// Converts a `RawFd` to a `UnixStream`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> UnixStream { + UnixStream::from_std(FromRawFd::from_raw_fd(fd)) + } +} diff --git a/third_party/rust/mio/src/poll.rs b/third_party/rust/mio/src/poll.rs new file mode 100644 index 0000000000..25a273ad2b --- /dev/null +++ b/third_party/rust/mio/src/poll.rs @@ -0,0 +1,713 @@ +use crate::{event, sys, Events, Interest, Token}; +#[cfg(unix)] +use std::os::unix::io::{AsRawFd, RawFd}; +use std::time::Duration; +use std::{fmt, io}; + +/// Polls for readiness events on all registered values. +/// +/// `Poll` allows a program to monitor a large number of [`event::Source`]s, +/// waiting until one or more become "ready" for some class of operations; e.g. +/// reading and writing. An event source is considered ready if it is possible +/// to immediately perform a corresponding operation; e.g. [`read`] or +/// [`write`]. +/// +/// To use `Poll`, an `event::Source` must first be registered with the `Poll` +/// instance using the [`register`] method on its associated `Register`, +/// supplying readiness interest. The readiness interest tells `Poll` which +/// specific operations on the handle to monitor for readiness. A `Token` is +/// also passed to the [`register`] function. When `Poll` returns a readiness +/// event, it will include this token. This associates the event with the +/// event source that generated the event. +/// +/// [`event::Source`]: ./event/trait.Source.html +/// [`read`]: ./net/struct.TcpStream.html#method.read +/// [`write`]: ./net/struct.TcpStream.html#method.write +/// [`register`]: struct.Registry.html#method.register +/// +/// # Examples +/// +/// A basic example -- establishing a `TcpStream` connection. +/// +#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] +#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] +/// # use std::error::Error; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Events, Poll, Interest, Token}; +/// use mio::net::TcpStream; +/// +/// use std::net::{self, SocketAddr}; +/// +/// // Bind a server socket to connect to. +/// let addr: SocketAddr = "127.0.0.1:0".parse()?; +/// let server = net::TcpListener::bind(addr)?; +/// +/// // Construct a new `Poll` handle as well as the `Events` we'll store into +/// let mut poll = Poll::new()?; +/// let mut events = Events::with_capacity(1024); +/// +/// // Connect the stream +/// let mut stream = TcpStream::connect(server.local_addr()?)?; +/// +/// // Register the stream with `Poll` +/// poll.registry().register(&mut stream, Token(0), Interest::READABLE | Interest::WRITABLE)?; +/// +/// // Wait for the socket to become ready. This has to happens in a loop to +/// // handle spurious wakeups. +/// loop { +/// poll.poll(&mut events, None)?; +/// +/// for event in &events { +/// if event.token() == Token(0) && event.is_writable() { +/// // The socket connected (probably, it could still be a spurious +/// // wakeup) +/// return Ok(()); +/// } +/// } +/// } +/// # } +/// ``` +/// +/// # Portability +/// +/// Using `Poll` provides a portable interface across supported platforms as +/// long as the caller takes the following into consideration: +/// +/// ### Spurious events +/// +/// [`Poll::poll`] may return readiness events even if the associated +/// event source is not actually ready. Given the same code, this may +/// happen more on some platforms than others. It is important to never assume +/// that, just because a readiness event was received, that the associated +/// operation will succeed as well. +/// +/// If operation fails with [`WouldBlock`], then the caller should not treat +/// this as an error, but instead should wait until another readiness event is +/// received. +/// +/// ### Draining readiness +/// +/// Once a readiness event is received, the corresponding operation must be +/// performed repeatedly until it returns [`WouldBlock`]. Unless this is done, +/// there is no guarantee that another readiness event will be delivered, even +/// if further data is received for the event source. +/// +/// [`WouldBlock`]: std::io::ErrorKind::WouldBlock +/// +/// ### Readiness operations +/// +/// The only readiness operations that are guaranteed to be present on all +/// supported platforms are [`readable`] and [`writable`]. All other readiness +/// operations may have false negatives and as such should be considered +/// **hints**. This means that if a socket is registered with [`readable`] +/// interest and either an error or close is received, a readiness event will +/// be generated for the socket, but it **may** only include `readable` +/// readiness. Also note that, given the potential for spurious events, +/// receiving a readiness event with `read_closed`, `write_closed`, or `error` +/// doesn't actually mean that a `read` on the socket will return a result +/// matching the readiness event. +/// +/// In other words, portable programs that explicitly check for [`read_closed`], +/// [`write_closed`], or [`error`] readiness should be doing so as an +/// **optimization** and always be able to handle an error or close situation +/// when performing the actual read operation. +/// +/// [`readable`]: ./event/struct.Event.html#method.is_readable +/// [`writable`]: ./event/struct.Event.html#method.is_writable +/// [`error`]: ./event/struct.Event.html#method.is_error +/// [`read_closed`]: ./event/struct.Event.html#method.is_read_closed +/// [`write_closed`]: ./event/struct.Event.html#method.is_write_closed +/// +/// ### Registering handles +/// +/// Unless otherwise noted, it should be assumed that types implementing +/// [`event::Source`] will never become ready unless they are registered with +/// `Poll`. +/// +/// For example: +/// +#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] +#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] +/// # use std::error::Error; +/// # use std::net; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Poll, Interest, Token}; +/// use mio::net::TcpStream; +/// use std::net::SocketAddr; +/// use std::time::Duration; +/// use std::thread; +/// +/// let address: SocketAddr = "127.0.0.1:0".parse()?; +/// let listener = net::TcpListener::bind(address)?; +/// let mut sock = TcpStream::connect(listener.local_addr()?)?; +/// +/// thread::sleep(Duration::from_secs(1)); +/// +/// let poll = Poll::new()?; +/// +/// // The connect is not guaranteed to have started until it is registered at +/// // this point +/// poll.registry().register(&mut sock, Token(0), Interest::READABLE | Interest::WRITABLE)?; +/// # Ok(()) +/// # } +/// ``` +/// +/// ### Dropping `Poll` +/// +/// When the `Poll` instance is dropped it may cancel in-flight operations for +/// the registered [event sources], meaning that no further events for them may +/// be received. It also means operations on the registered event sources may no +/// longer work. It is up to the user to keep the `Poll` instance alive while +/// registered event sources are being used. +/// +/// [event sources]: ./event/trait.Source.html +/// +/// ### Accessing raw fd/socket/handle +/// +/// Mio makes it possible for many types to be converted into a raw file +/// descriptor (fd, Unix), socket (Windows) or handle (Windows). This makes it +/// possible to support more operations on the type than Mio supports, for +/// example it makes [mio-aio] possible. However accessing the raw fd is not +/// without it's pitfalls. +/// +/// Specifically performing I/O operations outside of Mio on these types (via +/// the raw fd) has unspecified behaviour. It could cause no more events to be +/// generated for the type even though it returned `WouldBlock` (in an operation +/// directly accessing the fd). The behaviour is OS specific and Mio can only +/// guarantee cross-platform behaviour if it can control the I/O. +/// +/// [mio-aio]: https://github.com/asomers/mio-aio +/// +/// *The following is **not** guaranteed, just a description of the current +/// situation!* Mio is allowed to change the following without it being considered +/// a breaking change, don't depend on this, it's just here to inform the user. +/// Currently the kqueue and epoll implementation support direct I/O operations +/// on the fd without Mio's knowledge. Windows however needs **all** I/O +/// operations to go through Mio otherwise it is not able to update it's +/// internal state properly and won't generate events. +/// +/// ### Polling without registering event sources +/// +/// +/// *The following is **not** guaranteed, just a description of the current +/// situation!* Mio is allowed to change the following without it being +/// considered a breaking change, don't depend on this, it's just here to inform +/// the user. On platforms that use epoll, kqueue or IOCP (see implementation +/// notes below) polling without previously registering [event sources] will +/// result in sleeping forever, only a process signal will be able to wake up +/// the thread. +/// +/// On WASM/WASI this is different as it doesn't support process signals, +/// furthermore the WASI specification doesn't specify a behaviour in this +/// situation, thus it's up to the implementation what to do here. As an +/// example, the wasmtime runtime will return `EINVAL` in this situation, but +/// different runtimes may return different results. If you have further +/// insights or thoughts about this situation (and/or how Mio should handle it) +/// please add you comment to [pull request#1580]. +/// +/// [event sources]: crate::event::Source +/// [pull request#1580]: https://github.com/tokio-rs/mio/pull/1580 +/// +/// # Implementation notes +/// +/// `Poll` is backed by the selector provided by the operating system. +/// +/// | OS | Selector | +/// |---------------|-----------| +/// | Android | [epoll] | +/// | DragonFly BSD | [kqueue] | +/// | FreeBSD | [kqueue] | +/// | iOS | [kqueue] | +/// | illumos | [epoll] | +/// | Linux | [epoll] | +/// | NetBSD | [kqueue] | +/// | OpenBSD | [kqueue] | +/// | Windows | [IOCP] | +/// | macOS | [kqueue] | +/// +/// On all supported platforms, socket operations are handled by using the +/// system selector. Platform specific extensions (e.g. [`SourceFd`]) allow +/// accessing other features provided by individual system selectors. For +/// example, Linux's [`signalfd`] feature can be used by registering the FD with +/// `Poll` via [`SourceFd`]. +/// +/// On all platforms except windows, a call to [`Poll::poll`] is mostly just a +/// direct call to the system selector. However, [IOCP] uses a completion model +/// instead of a readiness model. In this case, `Poll` must adapt the completion +/// model Mio's API. While non-trivial, the bridge layer is still quite +/// efficient. The most expensive part being calls to `read` and `write` require +/// data to be copied into an intermediate buffer before it is passed to the +/// kernel. +/// +/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html +/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 +/// [IOCP]: https://docs.microsoft.com/en-us/windows/win32/fileio/i-o-completion-ports +/// [`signalfd`]: https://man7.org/linux/man-pages/man2/signalfd.2.html +/// [`SourceFd`]: unix/struct.SourceFd.html +/// [`Poll::poll`]: struct.Poll.html#method.poll +pub struct Poll { + registry: Registry, +} + +/// Registers I/O resources. +pub struct Registry { + selector: sys::Selector, +} + +impl Poll { + cfg_os_poll! { + /// Return a new `Poll` handle. + /// + /// This function will make a syscall to the operating system to create + /// the system selector. If this syscall fails, `Poll::new` will return + /// with the error. + /// + /// close-on-exec flag is set on the file descriptors used by the selector to prevent + /// leaking it to executed processes. However, on some systems such as + /// old Linux systems that don't support `epoll_create1` syscall it is done + /// non-atomically, so a separate thread executing in parallel to this + /// function may accidentally leak the file descriptor if it executes a + /// new process before this function returns. + /// + /// See [struct] level docs for more details. + /// + /// [struct]: struct.Poll.html + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::{Poll, Events}; + /// use std::time::Duration; + /// + /// let mut poll = match Poll::new() { + /// Ok(poll) => poll, + /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), + /// }; + /// + /// // Create a structure to receive polled events + /// let mut events = Events::with_capacity(1024); + /// + /// // Wait for events, but none will be received because no + /// // `event::Source`s have been registered with this `Poll` instance. + /// poll.poll(&mut events, Some(Duration::from_millis(500)))?; + /// assert!(events.is_empty()); + /// # Ok(()) + /// # } + /// ``` + pub fn new() -> io::Result<Poll> { + sys::Selector::new().map(|selector| Poll { + registry: Registry { selector }, + }) + } + } + + /// Create a separate `Registry` which can be used to register + /// `event::Source`s. + pub fn registry(&self) -> &Registry { + &self.registry + } + + /// Wait for readiness events + /// + /// Blocks the current thread and waits for readiness events for any of the + /// [`event::Source`]s that have been registered with this `Poll` instance. + /// The function will block until either at least one readiness event has + /// been received or `timeout` has elapsed. A `timeout` of `None` means that + /// `poll` will block until a readiness event has been received. + /// + /// The supplied `events` will be cleared and newly received readiness events + /// will be pushed onto the end. At most `events.capacity()` events will be + /// returned. If there are further pending readiness events, they will be + /// returned on the next call to `poll`. + /// + /// A single call to `poll` may result in multiple readiness events being + /// returned for a single event source. For example, if a TCP socket becomes + /// both readable and writable, it may be possible for a single readiness + /// event to be returned with both [`readable`] and [`writable`] readiness + /// **OR** two separate events may be returned, one with [`readable`] set + /// and one with [`writable`] set. + /// + /// Note that the `timeout` will be rounded up to the system clock + /// granularity (usually 1ms), and kernel scheduling delays mean that + /// the blocking interval may be overrun by a small amount. + /// + /// See the [struct] level documentation for a higher level discussion of + /// polling. + /// + /// [`event::Source`]: ./event/trait.Source.html + /// [`readable`]: struct.Interest.html#associatedconstant.READABLE + /// [`writable`]: struct.Interest.html#associatedconstant.WRITABLE + /// [struct]: struct.Poll.html + /// [`iter`]: ./event/struct.Events.html#method.iter + /// + /// # Notes + /// + /// This returns any errors without attempting to retry, previous versions + /// of Mio would automatically retry the poll call if it was interrupted + /// (if `EINTR` was returned). + /// + /// Currently if the `timeout` elapses without any readiness events + /// triggering this will return `Ok(())`. However we're not guaranteeing + /// this behaviour as this depends on the OS. + /// + /// # Examples + /// + /// A basic example -- establishing a `TcpStream` connection. + /// + #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] + #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] + /// # use std::error::Error; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::{Events, Poll, Interest, Token}; + /// use mio::net::TcpStream; + /// + /// use std::net::{TcpListener, SocketAddr}; + /// use std::thread; + /// + /// // Bind a server socket to connect to. + /// let addr: SocketAddr = "127.0.0.1:0".parse()?; + /// let server = TcpListener::bind(addr)?; + /// let addr = server.local_addr()?.clone(); + /// + /// // Spawn a thread to accept the socket + /// thread::spawn(move || { + /// let _ = server.accept(); + /// }); + /// + /// // Construct a new `Poll` handle as well as the `Events` we'll store into + /// let mut poll = Poll::new()?; + /// let mut events = Events::with_capacity(1024); + /// + /// // Connect the stream + /// let mut stream = TcpStream::connect(addr)?; + /// + /// // Register the stream with `Poll` + /// poll.registry().register( + /// &mut stream, + /// Token(0), + /// Interest::READABLE | Interest::WRITABLE)?; + /// + /// // Wait for the socket to become ready. This has to happens in a loop to + /// // handle spurious wakeups. + /// loop { + /// poll.poll(&mut events, None)?; + /// + /// for event in &events { + /// if event.token() == Token(0) && event.is_writable() { + /// // The socket connected (probably, it could still be a spurious + /// // wakeup) + /// return Ok(()); + /// } + /// } + /// } + /// # } + /// ``` + /// + /// [struct]: # + pub fn poll(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> { + self.registry.selector.select(events.sys(), timeout) + } +} + +#[cfg(unix)] +impl AsRawFd for Poll { + fn as_raw_fd(&self) -> RawFd { + self.registry.as_raw_fd() + } +} + +impl fmt::Debug for Poll { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Poll").finish() + } +} + +impl Registry { + /// Register an [`event::Source`] with the `Poll` instance. + /// + /// Once registered, the `Poll` instance will monitor the event source for + /// readiness state changes. When it notices a state change, it will return + /// a readiness event for the handle the next time [`poll`] is called. + /// + /// See [`Poll`] docs for a high level overview. + /// + /// # Arguments + /// + /// `source: &mut S: event::Source`: This is the source of events that the + /// `Poll` instance should monitor for readiness state changes. + /// + /// `token: Token`: The caller picks a token to associate with the socket. + /// When [`poll`] returns an event for the handle, this token is included. + /// This allows the caller to map the event to its source. The token + /// associated with the `event::Source` can be changed at any time by + /// calling [`reregister`]. + /// + /// See documentation on [`Token`] for an example showing how to pick + /// [`Token`] values. + /// + /// `interest: Interest`: Specifies which operations `Poll` should monitor + /// for readiness. `Poll` will only return readiness events for operations + /// specified by this argument. + /// + /// If a socket is registered with readable interest and the socket becomes + /// writable, no event will be returned from [`poll`]. + /// + /// The readiness interest for an `event::Source` can be changed at any time + /// by calling [`reregister`]. + /// + /// # Notes + /// + /// Callers must ensure that if a source being registered with a `Poll` + /// instance was previously registered with that `Poll` instance, then a + /// call to [`deregister`] has already occurred. Consecutive calls to + /// `register` is unspecified behavior. + /// + /// Unless otherwise specified, the caller should assume that once an event + /// source is registered with a `Poll` instance, it is bound to that `Poll` + /// instance for the lifetime of the event source. This remains true even + /// if the event source is deregistered from the poll instance using + /// [`deregister`]. + /// + /// [`event::Source`]: ./event/trait.Source.html + /// [`poll`]: struct.Poll.html#method.poll + /// [`reregister`]: struct.Registry.html#method.reregister + /// [`deregister`]: struct.Registry.html#method.deregister + /// [`Token`]: struct.Token.html + /// + /// # Examples + /// + #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] + #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] + /// # use std::error::Error; + /// # use std::net; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::{Events, Poll, Interest, Token}; + /// use mio::net::TcpStream; + /// use std::net::SocketAddr; + /// use std::time::{Duration, Instant}; + /// + /// let mut poll = Poll::new()?; + /// + /// let address: SocketAddr = "127.0.0.1:0".parse()?; + /// let listener = net::TcpListener::bind(address)?; + /// let mut socket = TcpStream::connect(listener.local_addr()?)?; + /// + /// // Register the socket with `poll` + /// poll.registry().register( + /// &mut socket, + /// Token(0), + /// Interest::READABLE | Interest::WRITABLE)?; + /// + /// let mut events = Events::with_capacity(1024); + /// let start = Instant::now(); + /// let timeout = Duration::from_millis(500); + /// + /// loop { + /// let elapsed = start.elapsed(); + /// + /// if elapsed >= timeout { + /// // Connection timed out + /// return Ok(()); + /// } + /// + /// let remaining = timeout - elapsed; + /// poll.poll(&mut events, Some(remaining))?; + /// + /// for event in &events { + /// if event.token() == Token(0) { + /// // Something (probably) happened on the socket. + /// return Ok(()); + /// } + /// } + /// } + /// # } + /// ``` + pub fn register<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()> + where + S: event::Source + ?Sized, + { + trace!( + "registering event source with poller: token={:?}, interests={:?}", + token, + interests + ); + source.register(self, token, interests) + } + + /// Re-register an [`event::Source`] with the `Poll` instance. + /// + /// Re-registering an event source allows changing the details of the + /// registration. Specifically, it allows updating the associated `token` + /// and `interests` specified in previous `register` and `reregister` calls. + /// + /// The `reregister` arguments fully override the previous values. In other + /// words, if a socket is registered with [`readable`] interest and the call + /// to `reregister` specifies [`writable`], then read interest is no longer + /// requested for the handle. + /// + /// The event source must have previously been registered with this instance + /// of `Poll`, otherwise the behavior is unspecified. + /// + /// See the [`register`] documentation for details about the function + /// arguments and see the [`struct`] docs for a high level overview of + /// polling. + /// + /// # Examples + /// + #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] + #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] + /// # use std::error::Error; + /// # use std::net; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::{Poll, Interest, Token}; + /// use mio::net::TcpStream; + /// use std::net::SocketAddr; + /// + /// let poll = Poll::new()?; + /// + /// let address: SocketAddr = "127.0.0.1:0".parse()?; + /// let listener = net::TcpListener::bind(address)?; + /// let mut socket = TcpStream::connect(listener.local_addr()?)?; + /// + /// // Register the socket with `poll`, requesting readable + /// poll.registry().register( + /// &mut socket, + /// Token(0), + /// Interest::READABLE)?; + /// + /// // Reregister the socket specifying write interest instead. Even though + /// // the token is the same it must be specified. + /// poll.registry().reregister( + /// &mut socket, + /// Token(0), + /// Interest::WRITABLE)?; + /// # Ok(()) + /// # } + /// ``` + /// + /// [`event::Source`]: ./event/trait.Source.html + /// [`struct`]: struct.Poll.html + /// [`register`]: struct.Registry.html#method.register + /// [`readable`]: ./event/struct.Event.html#is_readable + /// [`writable`]: ./event/struct.Event.html#is_writable + pub fn reregister<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()> + where + S: event::Source + ?Sized, + { + trace!( + "reregistering event source with poller: token={:?}, interests={:?}", + token, + interests + ); + source.reregister(self, token, interests) + } + + /// Deregister an [`event::Source`] with the `Poll` instance. + /// + /// When an event source is deregistered, the `Poll` instance will no longer + /// monitor it for readiness state changes. Deregistering clears up any + /// internal resources needed to track the handle. After an explicit call + /// to this method completes, it is guaranteed that the token previously + /// registered to this handle will not be returned by a future poll, so long + /// as a happens-before relationship is established between this call and + /// the poll. + /// + /// The event source must have previously been registered with this instance + /// of `Poll`, otherwise the behavior is unspecified. + /// + /// A handle can be passed back to `register` after it has been + /// deregistered; however, it must be passed back to the **same** `Poll` + /// instance, otherwise the behavior is unspecified. + /// + /// # Examples + /// + #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] + #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] + /// # use std::error::Error; + /// # use std::net; + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use mio::{Events, Poll, Interest, Token}; + /// use mio::net::TcpStream; + /// use std::net::SocketAddr; + /// use std::time::Duration; + /// + /// let mut poll = Poll::new()?; + /// + /// let address: SocketAddr = "127.0.0.1:0".parse()?; + /// let listener = net::TcpListener::bind(address)?; + /// let mut socket = TcpStream::connect(listener.local_addr()?)?; + /// + /// // Register the socket with `poll` + /// poll.registry().register( + /// &mut socket, + /// Token(0), + /// Interest::READABLE)?; + /// + /// poll.registry().deregister(&mut socket)?; + /// + /// let mut events = Events::with_capacity(1024); + /// + /// // Set a timeout because this poll should never receive any events. + /// poll.poll(&mut events, Some(Duration::from_secs(1)))?; + /// assert!(events.is_empty()); + /// # Ok(()) + /// # } + /// ``` + pub fn deregister<S>(&self, source: &mut S) -> io::Result<()> + where + S: event::Source + ?Sized, + { + trace!("deregistering event source from poller"); + source.deregister(self) + } + + /// Creates a new independently owned `Registry`. + /// + /// Event sources registered with this `Registry` will be registered with + /// the original `Registry` and `Poll` instance. + pub fn try_clone(&self) -> io::Result<Registry> { + self.selector + .try_clone() + .map(|selector| Registry { selector }) + } + + /// Internal check to ensure only a single `Waker` is active per [`Poll`] + /// instance. + #[cfg(all(debug_assertions, not(target_os = "wasi")))] + pub(crate) fn register_waker(&self) { + assert!( + !self.selector.register_waker(), + "Only a single `Waker` can be active per `Poll` instance" + ); + } + + /// Get access to the `sys::Selector`. + #[cfg(any(not(target_os = "wasi"), feature = "net"))] + pub(crate) fn selector(&self) -> &sys::Selector { + &self.selector + } +} + +impl fmt::Debug for Registry { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Registry").finish() + } +} + +#[cfg(unix)] +impl AsRawFd for Registry { + fn as_raw_fd(&self) -> RawFd { + self.selector.as_raw_fd() + } +} + +cfg_os_poll! { + #[cfg(unix)] + #[test] + pub fn as_raw_fd() { + let poll = Poll::new().unwrap(); + assert!(poll.as_raw_fd() > 0); + } +} diff --git a/third_party/rust/mio/src/sys/mod.rs b/third_party/rust/mio/src/sys/mod.rs new file mode 100644 index 0000000000..2a968b265f --- /dev/null +++ b/third_party/rust/mio/src/sys/mod.rs @@ -0,0 +1,86 @@ +//! Module with system specific types. +//! +//! Required types: +//! +//! * `Event`: a type alias for the system specific event, e.g. `kevent` or +//! `epoll_event`. +//! * `event`: a module with various helper functions for `Event`, see +//! [`crate::event::Event`] for the required functions. +//! * `Events`: collection of `Event`s, see [`crate::Events`]. +//! * `IoSourceState`: state for the `IoSource` type. +//! * `Selector`: selector used to register event sources and poll for events, +//! see [`crate::Poll`] and [`crate::Registry`] for required +//! methods. +//! * `tcp` and `udp` modules: see the [`crate::net`] module. +//! * `Waker`: see [`crate::Waker`]. + +cfg_os_poll! { + macro_rules! debug_detail { + ( + $type: ident ($event_type: ty), $test: path, + $($(#[$target: meta])* $libc: ident :: $flag: ident),+ $(,)* + ) => { + struct $type($event_type); + + impl fmt::Debug for $type { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut written_one = false; + $( + $(#[$target])* + #[allow(clippy::bad_bit_mask)] // Apparently some flags are zero. + { + // Windows doesn't use `libc` but the `afd` module. + if $test(&self.0, &$libc :: $flag) { + if !written_one { + write!(f, "{}", stringify!($flag))?; + written_one = true; + } else { + write!(f, "|{}", stringify!($flag))?; + } + } + } + )+ + if !written_one { + write!(f, "(empty)") + } else { + Ok(()) + } + } + } + }; + } +} + +#[cfg(unix)] +cfg_os_poll! { + mod unix; + pub use self::unix::*; +} + +#[cfg(windows)] +cfg_os_poll! { + mod windows; + pub use self::windows::*; +} + +#[cfg(target_os = "wasi")] +cfg_os_poll! { + mod wasi; + pub(crate) use self::wasi::*; +} + +cfg_not_os_poll! { + mod shell; + pub(crate) use self::shell::*; + + #[cfg(unix)] + cfg_any_os_ext! { + mod unix; + pub use self::unix::SourceFd; + } + + #[cfg(unix)] + cfg_net! { + pub use self::unix::SocketAddr; + } +} diff --git a/third_party/rust/mio/src/sys/shell/mod.rs b/third_party/rust/mio/src/sys/shell/mod.rs new file mode 100644 index 0000000000..8a3175f764 --- /dev/null +++ b/third_party/rust/mio/src/sys/shell/mod.rs @@ -0,0 +1,72 @@ +macro_rules! os_required { + () => { + panic!("mio must be compiled with `os-poll` to run.") + }; +} + +mod selector; +pub(crate) use self::selector::{event, Event, Events, Selector}; + +#[cfg(not(target_os = "wasi"))] +mod waker; +#[cfg(not(target_os = "wasi"))] +pub(crate) use self::waker::Waker; + +cfg_net! { + pub(crate) mod tcp; + pub(crate) mod udp; + #[cfg(unix)] + pub(crate) mod uds; +} + +cfg_io_source! { + use std::io; + #[cfg(windows)] + use std::os::windows::io::RawSocket; + + #[cfg(windows)] + use crate::{Registry, Token, Interest}; + + pub(crate) struct IoSourceState; + + impl IoSourceState { + pub fn new() -> IoSourceState { + IoSourceState + } + + pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R> + where + F: FnOnce(&T) -> io::Result<R>, + { + // We don't hold state, so we can just call the function and + // return. + f(io) + } + } + + #[cfg(windows)] + impl IoSourceState { + pub fn register( + &mut self, + _: &Registry, + _: Token, + _: Interest, + _: RawSocket, + ) -> io::Result<()> { + os_required!() + } + + pub fn reregister( + &mut self, + _: &Registry, + _: Token, + _: Interest, + ) -> io::Result<()> { + os_required!() + } + + pub fn deregister(&mut self) -> io::Result<()> { + os_required!() + } + } +} diff --git a/third_party/rust/mio/src/sys/shell/selector.rs b/third_party/rust/mio/src/sys/shell/selector.rs new file mode 100644 index 0000000000..bfc598a12a --- /dev/null +++ b/third_party/rust/mio/src/sys/shell/selector.rs @@ -0,0 +1,127 @@ +use std::io; +#[cfg(unix)] +use std::os::unix::io::{AsRawFd, RawFd}; +use std::time::Duration; + +pub type Event = usize; + +pub type Events = Vec<Event>; + +#[derive(Debug)] +pub struct Selector {} + +impl Selector { + pub fn try_clone(&self) -> io::Result<Selector> { + os_required!(); + } + + pub fn select(&self, _: &mut Events, _: Option<Duration>) -> io::Result<()> { + os_required!(); + } + + #[cfg(all(debug_assertions, not(target_os = "wasi")))] + pub fn register_waker(&self) -> bool { + os_required!(); + } +} + +#[cfg(unix)] +cfg_any_os_ext! { + use crate::{Interest, Token}; + + impl Selector { + pub fn register(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> { + os_required!(); + } + + pub fn reregister(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> { + os_required!(); + } + + pub fn deregister(&self, _: RawFd) -> io::Result<()> { + os_required!(); + } + } +} + +#[cfg(target_os = "wasi")] +cfg_any_os_ext! { + use crate::{Interest, Token}; + + impl Selector { + pub fn register(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> { + os_required!(); + } + + pub fn reregister(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> { + os_required!(); + } + + pub fn deregister(&self, _: wasi::Fd) -> io::Result<()> { + os_required!(); + } + } +} + +cfg_io_source! { + #[cfg(debug_assertions)] + impl Selector { + pub fn id(&self) -> usize { + os_required!(); + } + } +} + +#[cfg(unix)] +impl AsRawFd for Selector { + fn as_raw_fd(&self) -> RawFd { + os_required!() + } +} + +#[allow(clippy::trivially_copy_pass_by_ref)] +pub mod event { + use crate::sys::Event; + use crate::Token; + use std::fmt; + + pub fn token(_: &Event) -> Token { + os_required!(); + } + + pub fn is_readable(_: &Event) -> bool { + os_required!(); + } + + pub fn is_writable(_: &Event) -> bool { + os_required!(); + } + + pub fn is_error(_: &Event) -> bool { + os_required!(); + } + + pub fn is_read_closed(_: &Event) -> bool { + os_required!(); + } + + pub fn is_write_closed(_: &Event) -> bool { + os_required!(); + } + + pub fn is_priority(_: &Event) -> bool { + os_required!(); + } + + pub fn is_aio(_: &Event) -> bool { + os_required!(); + } + + pub fn is_lio(_: &Event) -> bool { + os_required!(); + } + + pub fn debug_details(_: &mut fmt::Formatter<'_>, _: &Event) -> fmt::Result { + os_required!(); + } +} diff --git a/third_party/rust/mio/src/sys/shell/tcp.rs b/third_party/rust/mio/src/sys/shell/tcp.rs new file mode 100644 index 0000000000..260763aeb6 --- /dev/null +++ b/third_party/rust/mio/src/sys/shell/tcp.rs @@ -0,0 +1,31 @@ +use std::io; +use std::net::{self, SocketAddr}; + +#[cfg(not(target_os = "wasi"))] +pub(crate) fn new_for_addr(_: SocketAddr) -> io::Result<i32> { + os_required!(); +} + +#[cfg(not(target_os = "wasi"))] +pub(crate) fn bind(_: &net::TcpListener, _: SocketAddr) -> io::Result<()> { + os_required!(); +} + +#[cfg(not(target_os = "wasi"))] +pub(crate) fn connect(_: &net::TcpStream, _: SocketAddr) -> io::Result<()> { + os_required!(); +} + +#[cfg(not(target_os = "wasi"))] +pub(crate) fn listen(_: &net::TcpListener, _: u32) -> io::Result<()> { + os_required!(); +} + +#[cfg(unix)] +pub(crate) fn set_reuseaddr(_: &net::TcpListener, _: bool) -> io::Result<()> { + os_required!(); +} + +pub(crate) fn accept(_: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { + os_required!(); +} diff --git a/third_party/rust/mio/src/sys/shell/udp.rs b/third_party/rust/mio/src/sys/shell/udp.rs new file mode 100644 index 0000000000..6a48b6941d --- /dev/null +++ b/third_party/rust/mio/src/sys/shell/udp.rs @@ -0,0 +1,11 @@ +#![cfg(not(target_os = "wasi"))] +use std::io; +use std::net::{self, SocketAddr}; + +pub fn bind(_: SocketAddr) -> io::Result<net::UdpSocket> { + os_required!() +} + +pub(crate) fn only_v6(_: &net::UdpSocket) -> io::Result<bool> { + os_required!() +} diff --git a/third_party/rust/mio/src/sys/shell/uds.rs b/third_party/rust/mio/src/sys/shell/uds.rs new file mode 100644 index 0000000000..c18aca042f --- /dev/null +++ b/third_party/rust/mio/src/sys/shell/uds.rs @@ -0,0 +1,75 @@ +pub(crate) mod datagram { + use crate::net::SocketAddr; + use std::io; + use std::os::unix::net; + use std::path::Path; + + pub(crate) fn bind(_: &Path) -> io::Result<net::UnixDatagram> { + os_required!() + } + + pub(crate) fn unbound() -> io::Result<net::UnixDatagram> { + os_required!() + } + + pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> { + os_required!() + } + + pub(crate) fn local_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> { + os_required!() + } + + pub(crate) fn peer_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> { + os_required!() + } + + pub(crate) fn recv_from( + _: &net::UnixDatagram, + _: &mut [u8], + ) -> io::Result<(usize, SocketAddr)> { + os_required!() + } +} + +pub(crate) mod listener { + use crate::net::{SocketAddr, UnixStream}; + use std::io; + use std::os::unix::net; + use std::path::Path; + + pub(crate) fn bind(_: &Path) -> io::Result<net::UnixListener> { + os_required!() + } + + pub(crate) fn accept(_: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> { + os_required!() + } + + pub(crate) fn local_addr(_: &net::UnixListener) -> io::Result<SocketAddr> { + os_required!() + } +} + +pub(crate) mod stream { + use crate::net::SocketAddr; + use std::io; + use std::os::unix::net; + use std::path::Path; + + pub(crate) fn connect(_: &Path) -> io::Result<net::UnixStream> { + os_required!() + } + + pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> { + os_required!() + } + + pub(crate) fn local_addr(_: &net::UnixStream) -> io::Result<SocketAddr> { + os_required!() + } + + pub(crate) fn peer_addr(_: &net::UnixStream) -> io::Result<SocketAddr> { + os_required!() + } +} diff --git a/third_party/rust/mio/src/sys/shell/waker.rs b/third_party/rust/mio/src/sys/shell/waker.rs new file mode 100644 index 0000000000..bbdd7c33af --- /dev/null +++ b/third_party/rust/mio/src/sys/shell/waker.rs @@ -0,0 +1,16 @@ +use crate::sys::Selector; +use crate::Token; +use std::io; + +#[derive(Debug)] +pub struct Waker {} + +impl Waker { + pub fn new(_: &Selector, _: Token) -> io::Result<Waker> { + os_required!(); + } + + pub fn wake(&self) -> io::Result<()> { + os_required!(); + } +} diff --git a/third_party/rust/mio/src/sys/unix/mod.rs b/third_party/rust/mio/src/sys/unix/mod.rs new file mode 100644 index 0000000000..231480a5da --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/mod.rs @@ -0,0 +1,72 @@ +/// Helper macro to execute a system call that returns an `io::Result`. +// +// Macro must be defined before any modules that uses them. +#[allow(unused_macros)] +macro_rules! syscall { + ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{ + let res = unsafe { libc::$fn($($arg, )*) }; + if res == -1 { + Err(std::io::Error::last_os_error()) + } else { + Ok(res) + } + }}; +} + +cfg_os_poll! { + mod selector; + pub(crate) use self::selector::{event, Event, Events, Selector}; + + mod sourcefd; + pub use self::sourcefd::SourceFd; + + mod waker; + pub(crate) use self::waker::Waker; + + cfg_net! { + mod net; + + pub(crate) mod tcp; + pub(crate) mod udp; + pub(crate) mod uds; + pub use self::uds::SocketAddr; + } + + cfg_io_source! { + use std::io; + + // Both `kqueue` and `epoll` don't need to hold any user space state. + pub(crate) struct IoSourceState; + + impl IoSourceState { + pub fn new() -> IoSourceState { + IoSourceState + } + + pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R> + where + F: FnOnce(&T) -> io::Result<R>, + { + // We don't hold state, so we can just call the function and + // return. + f(io) + } + } + } + + cfg_os_ext! { + pub(crate) mod pipe; + } +} + +cfg_not_os_poll! { + cfg_net! { + mod uds; + pub use self::uds::SocketAddr; + } + + cfg_any_os_ext! { + mod sourcefd; + pub use self::sourcefd::SourceFd; + } +} diff --git a/third_party/rust/mio/src/sys/unix/net.rs b/third_party/rust/mio/src/sys/unix/net.rs new file mode 100644 index 0000000000..e93918c729 --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/net.rs @@ -0,0 +1,178 @@ +use std::io; +use std::mem::size_of; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; + +pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: libc::c_int) -> io::Result<libc::c_int> { + let domain = match addr { + SocketAddr::V4(..) => libc::AF_INET, + SocketAddr::V6(..) => libc::AF_INET6, + }; + + new_socket(domain, socket_type) +} + +/// Create a new non-blocking socket. +pub(crate) fn new_socket(domain: libc::c_int, socket_type: libc::c_int) -> io::Result<libc::c_int> { + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + ))] + let socket_type = socket_type | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC; + + let socket = syscall!(socket(domain, socket_type, 0))?; + + // Mimick `libstd` and set `SO_NOSIGPIPE` on apple systems. + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + if let Err(err) = syscall!(setsockopt( + socket, + libc::SOL_SOCKET, + libc::SO_NOSIGPIPE, + &1 as *const libc::c_int as *const libc::c_void, + size_of::<libc::c_int>() as libc::socklen_t + )) { + let _ = syscall!(close(socket)); + return Err(err); + } + + // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC. + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + { + if let Err(err) = syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK)) { + let _ = syscall!(close(socket)); + return Err(err); + } + if let Err(err) = syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC)) { + let _ = syscall!(close(socket)); + return Err(err); + } + } + + Ok(socket) +} + +/// A type with the same memory layout as `libc::sockaddr`. Used in converting Rust level +/// SocketAddr* types into their system representation. The benefit of this specific +/// type over using `libc::sockaddr_storage` is that this type is exactly as large as it +/// needs to be and not a lot larger. And it can be initialized cleaner from Rust. +#[repr(C)] +pub(crate) union SocketAddrCRepr { + v4: libc::sockaddr_in, + v6: libc::sockaddr_in6, +} + +impl SocketAddrCRepr { + pub(crate) fn as_ptr(&self) -> *const libc::sockaddr { + self as *const _ as *const libc::sockaddr + } +} + +/// Converts a Rust `SocketAddr` into the system representation. +pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, libc::socklen_t) { + match addr { + SocketAddr::V4(ref addr) => { + // `s_addr` is stored as BE on all machine and the array is in BE order. + // So the native endian conversion method is used so that it's never swapped. + let sin_addr = libc::in_addr { + s_addr: u32::from_ne_bytes(addr.ip().octets()), + }; + + let sockaddr_in = libc::sockaddr_in { + sin_family: libc::AF_INET as libc::sa_family_t, + sin_port: addr.port().to_be(), + sin_addr, + sin_zero: [0; 8], + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "tvos", + target_os = "watchos", + ))] + sin_len: 0, + }; + + let sockaddr = SocketAddrCRepr { v4: sockaddr_in }; + let socklen = size_of::<libc::sockaddr_in>() as libc::socklen_t; + (sockaddr, socklen) + } + SocketAddr::V6(ref addr) => { + let sockaddr_in6 = libc::sockaddr_in6 { + sin6_family: libc::AF_INET6 as libc::sa_family_t, + sin6_port: addr.port().to_be(), + sin6_addr: libc::in6_addr { + s6_addr: addr.ip().octets(), + }, + sin6_flowinfo: addr.flowinfo(), + sin6_scope_id: addr.scope_id(), + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "tvos", + target_os = "watchos", + ))] + sin6_len: 0, + #[cfg(target_os = "illumos")] + __sin6_src_id: 0, + }; + + let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 }; + let socklen = size_of::<libc::sockaddr_in6>() as libc::socklen_t; + (sockaddr, socklen) + } + } +} + +/// Converts a `libc::sockaddr` compatible struct into a native Rust `SocketAddr`. +/// +/// # Safety +/// +/// `storage` must have the `ss_family` field correctly initialized. +/// `storage` must be initialised to a `sockaddr_in` or `sockaddr_in6`. +pub(crate) unsafe fn to_socket_addr( + storage: *const libc::sockaddr_storage, +) -> io::Result<SocketAddr> { + match (*storage).ss_family as libc::c_int { + libc::AF_INET => { + // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in. + let addr: &libc::sockaddr_in = &*(storage as *const libc::sockaddr_in); + let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes()); + let port = u16::from_be(addr.sin_port); + Ok(SocketAddr::V4(SocketAddrV4::new(ip, port))) + } + libc::AF_INET6 => { + // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6. + let addr: &libc::sockaddr_in6 = &*(storage as *const libc::sockaddr_in6); + let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr); + let port = u16::from_be(addr.sin6_port); + Ok(SocketAddr::V6(SocketAddrV6::new( + ip, + port, + addr.sin6_flowinfo, + addr.sin6_scope_id, + ))) + } + _ => Err(io::ErrorKind::InvalidInput.into()), + } +} diff --git a/third_party/rust/mio/src/sys/unix/pipe.rs b/third_party/rust/mio/src/sys/unix/pipe.rs new file mode 100644 index 0000000000..c2654ad599 --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/pipe.rs @@ -0,0 +1,577 @@ +//! Unix pipe. +//! +//! See the [`new`] function for documentation. + +use std::fs::File; +use std::io::{self, IoSlice, IoSliceMut, Read, Write}; +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use std::process::{ChildStderr, ChildStdin, ChildStdout}; + +use crate::io_source::IoSource; +use crate::{event, Interest, Registry, Token}; + +/// Create a new non-blocking Unix pipe. +/// +/// This is a wrapper around Unix's [`pipe(2)`] system call and can be used as +/// inter-process or thread communication channel. +/// +/// This channel may be created before forking the process and then one end used +/// in each process, e.g. the parent process has the sending end to send command +/// to the child process. +/// +/// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html +/// +/// # Events +/// +/// The [`Sender`] can be registered with [`WRITABLE`] interest to receive +/// [writable events], the [`Receiver`] with [`READABLE`] interest. Once data is +/// written to the `Sender` the `Receiver` will receive an [readable event]. +/// +/// In addition to those events, events will also be generated if the other side +/// is dropped. To check if the `Sender` is dropped you'll need to check +/// [`is_read_closed`] on events for the `Receiver`, if it returns true the +/// `Sender` is dropped. On the `Sender` end check [`is_write_closed`], if it +/// returns true the `Receiver` was dropped. Also see the second example below. +/// +/// [`WRITABLE`]: Interest::WRITABLE +/// [writable events]: event::Event::is_writable +/// [`READABLE`]: Interest::READABLE +/// [readable event]: event::Event::is_readable +/// [`is_read_closed`]: event::Event::is_read_closed +/// [`is_write_closed`]: event::Event::is_write_closed +/// +/// # Deregistering +/// +/// Both `Sender` and `Receiver` will deregister themselves when dropped, +/// **iff** the file descriptors are not duplicated (via [`dup(2)`]). +/// +/// [`dup(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/dup.html +/// +/// # Examples +/// +/// Simple example that writes data into the sending end and read it from the +/// receiving end. +/// +/// ``` +/// use std::io::{self, Read, Write}; +/// +/// use mio::{Poll, Events, Interest, Token}; +/// use mio::unix::pipe; +/// +/// // Unique tokens for the two ends of the channel. +/// const PIPE_RECV: Token = Token(0); +/// const PIPE_SEND: Token = Token(1); +/// +/// # fn main() -> io::Result<()> { +/// // Create our `Poll` instance and the `Events` container. +/// let mut poll = Poll::new()?; +/// let mut events = Events::with_capacity(8); +/// +/// // Create a new pipe. +/// let (mut sender, mut receiver) = pipe::new()?; +/// +/// // Register both ends of the channel. +/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?; +/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?; +/// +/// const MSG: &[u8; 11] = b"Hello world"; +/// +/// loop { +/// poll.poll(&mut events, None)?; +/// +/// for event in events.iter() { +/// match event.token() { +/// PIPE_SEND => sender.write(MSG) +/// .and_then(|n| if n != MSG.len() { +/// // We'll consider a short write an error in this +/// // example. NOTE: we can't use `write_all` with +/// // non-blocking I/O. +/// Err(io::ErrorKind::WriteZero.into()) +/// } else { +/// Ok(()) +/// })?, +/// PIPE_RECV => { +/// let mut buf = [0; 11]; +/// let n = receiver.read(&mut buf)?; +/// println!("received: {:?}", &buf[0..n]); +/// assert_eq!(n, MSG.len()); +/// assert_eq!(&buf, &*MSG); +/// return Ok(()); +/// }, +/// _ => unreachable!(), +/// } +/// } +/// } +/// # } +/// ``` +/// +/// Example that receives an event once the `Sender` is dropped. +/// +/// ``` +/// # use std::io; +/// # +/// # use mio::{Poll, Events, Interest, Token}; +/// # use mio::unix::pipe; +/// # +/// # const PIPE_RECV: Token = Token(0); +/// # const PIPE_SEND: Token = Token(1); +/// # +/// # fn main() -> io::Result<()> { +/// // Same setup as in the example above. +/// let mut poll = Poll::new()?; +/// let mut events = Events::with_capacity(8); +/// +/// let (mut sender, mut receiver) = pipe::new()?; +/// +/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?; +/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?; +/// +/// // Drop the sender. +/// drop(sender); +/// +/// poll.poll(&mut events, None)?; +/// +/// for event in events.iter() { +/// match event.token() { +/// PIPE_RECV if event.is_read_closed() => { +/// // Detected that the sender was dropped. +/// println!("Sender dropped!"); +/// return Ok(()); +/// }, +/// _ => unreachable!(), +/// } +/// } +/// # unreachable!(); +/// # } +/// ``` +pub fn new() -> io::Result<(Sender, Receiver)> { + let mut fds: [RawFd; 2] = [-1, -1]; + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + target_os = "illumos", + target_os = "redox", + ))] + unsafe { + if libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) != 0 { + return Err(io::Error::last_os_error()); + } + } + + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + unsafe { + // For platforms that don't have `pipe2(2)` we need to manually set the + // correct flags on the file descriptor. + if libc::pipe(fds.as_mut_ptr()) != 0 { + return Err(io::Error::last_os_error()); + } + + for fd in &fds { + if libc::fcntl(*fd, libc::F_SETFL, libc::O_NONBLOCK) != 0 + || libc::fcntl(*fd, libc::F_SETFD, libc::FD_CLOEXEC) != 0 + { + let err = io::Error::last_os_error(); + // Don't leak file descriptors. Can't handle closing error though. + let _ = libc::close(fds[0]); + let _ = libc::close(fds[1]); + return Err(err); + } + } + } + + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "illumos", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "redox", + target_os = "tvos", + target_os = "watchos", + )))] + compile_error!("unsupported target for `mio::unix::pipe`"); + + // SAFETY: we just initialised the `fds` above. + let r = unsafe { Receiver::from_raw_fd(fds[0]) }; + let w = unsafe { Sender::from_raw_fd(fds[1]) }; + + Ok((w, r)) +} + +/// Sending end of an Unix pipe. +/// +/// See [`new`] for documentation, including examples. +#[derive(Debug)] +pub struct Sender { + inner: IoSource<File>, +} + +impl Sender { + /// Set the `Sender` into or out of non-blocking mode. + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + set_nonblocking(self.inner.as_raw_fd(), nonblocking) + } + + /// Execute an I/O operation ensuring that the socket receives more events + /// if it hits a [`WouldBlock`] error. + /// + /// # Notes + /// + /// This method is required to be called for **all** I/O operations to + /// ensure the user will receive events once the socket is ready again after + /// returning a [`WouldBlock`] error. + /// + /// [`WouldBlock`]: io::ErrorKind::WouldBlock + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use std::io; + /// use std::os::unix::io::AsRawFd; + /// use mio::unix::pipe; + /// + /// let (sender, receiver) = pipe::new()?; + /// + /// // Wait until the sender is writable... + /// + /// // Write to the sender using a direct libc call, of course the + /// // `io::Write` implementation would be easier to use. + /// let buf = b"hello"; + /// let n = sender.try_io(|| { + /// let buf_ptr = &buf as *const _ as *const _; + /// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("write {} bytes", n); + /// + /// // Wait until the receiver is readable... + /// + /// // Read from the receiver using a direct libc call, of course the + /// // `io::Read` implementation would be easier to use. + /// let mut buf = [0; 512]; + /// let n = receiver.try_io(|| { + /// let buf_ptr = &mut buf as *mut _ as *mut _; + /// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("read {} bytes", n); + /// # Ok(()) + /// # } + /// ``` + pub fn try_io<F, T>(&self, f: F) -> io::Result<T> + where + F: FnOnce() -> io::Result<T>, + { + self.inner.do_io(|_| f()) + } +} + +impl event::Source for Sender { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl Write for Sender { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.write(buf)) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.write_vectored(bufs)) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.do_io(|mut sender| sender.flush()) + } +} + +impl Write for &Sender { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.write(buf)) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.write_vectored(bufs)) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.do_io(|mut sender| sender.flush()) + } +} + +/// # Notes +/// +/// The underlying pipe is **not** set to non-blocking. +impl From<ChildStdin> for Sender { + fn from(stdin: ChildStdin) -> Sender { + // Safety: `ChildStdin` is guaranteed to be a valid file descriptor. + unsafe { Sender::from_raw_fd(stdin.into_raw_fd()) } + } +} + +impl FromRawFd for Sender { + unsafe fn from_raw_fd(fd: RawFd) -> Sender { + Sender { + inner: IoSource::new(File::from_raw_fd(fd)), + } + } +} + +impl AsRawFd for Sender { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl IntoRawFd for Sender { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +/// Receiving end of an Unix pipe. +/// +/// See [`new`] for documentation, including examples. +#[derive(Debug)] +pub struct Receiver { + inner: IoSource<File>, +} + +impl Receiver { + /// Set the `Receiver` into or out of non-blocking mode. + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + set_nonblocking(self.inner.as_raw_fd(), nonblocking) + } + + /// Execute an I/O operation ensuring that the socket receives more events + /// if it hits a [`WouldBlock`] error. + /// + /// # Notes + /// + /// This method is required to be called for **all** I/O operations to + /// ensure the user will receive events once the socket is ready again after + /// returning a [`WouldBlock`] error. + /// + /// [`WouldBlock`]: io::ErrorKind::WouldBlock + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn main() -> Result<(), Box<dyn Error>> { + /// use std::io; + /// use std::os::unix::io::AsRawFd; + /// use mio::unix::pipe; + /// + /// let (sender, receiver) = pipe::new()?; + /// + /// // Wait until the sender is writable... + /// + /// // Write to the sender using a direct libc call, of course the + /// // `io::Write` implementation would be easier to use. + /// let buf = b"hello"; + /// let n = sender.try_io(|| { + /// let buf_ptr = &buf as *const _ as *const _; + /// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("write {} bytes", n); + /// + /// // Wait until the receiver is readable... + /// + /// // Read from the receiver using a direct libc call, of course the + /// // `io::Read` implementation would be easier to use. + /// let mut buf = [0; 512]; + /// let n = receiver.try_io(|| { + /// let buf_ptr = &mut buf as *mut _ as *mut _; + /// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) }; + /// if res != -1 { + /// Ok(res as usize) + /// } else { + /// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure + /// // should return `WouldBlock` error. + /// Err(io::Error::last_os_error()) + /// } + /// })?; + /// eprintln!("read {} bytes", n); + /// # Ok(()) + /// # } + /// ``` + pub fn try_io<F, T>(&self, f: F) -> io::Result<T> + where + F: FnOnce() -> io::Result<T>, + { + self.inner.do_io(|_| f()) + } +} + +impl event::Source for Receiver { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.register(registry, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(registry, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + self.inner.deregister(registry) + } +} + +impl Read for Receiver { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.read(buf)) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.read_vectored(bufs)) + } +} + +impl Read for &Receiver { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.read(buf)) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.do_io(|mut sender| sender.read_vectored(bufs)) + } +} + +/// # Notes +/// +/// The underlying pipe is **not** set to non-blocking. +impl From<ChildStdout> for Receiver { + fn from(stdout: ChildStdout) -> Receiver { + // Safety: `ChildStdout` is guaranteed to be a valid file descriptor. + unsafe { Receiver::from_raw_fd(stdout.into_raw_fd()) } + } +} + +/// # Notes +/// +/// The underlying pipe is **not** set to non-blocking. +impl From<ChildStderr> for Receiver { + fn from(stderr: ChildStderr) -> Receiver { + // Safety: `ChildStderr` is guaranteed to be a valid file descriptor. + unsafe { Receiver::from_raw_fd(stderr.into_raw_fd()) } + } +} + +impl FromRawFd for Receiver { + unsafe fn from_raw_fd(fd: RawFd) -> Receiver { + Receiver { + inner: IoSource::new(File::from_raw_fd(fd)), + } + } +} + +impl AsRawFd for Receiver { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl IntoRawFd for Receiver { + fn into_raw_fd(self) -> RawFd { + self.inner.into_inner().into_raw_fd() + } +} + +#[cfg(not(target_os = "illumos"))] +fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> { + let value = nonblocking as libc::c_int; + if unsafe { libc::ioctl(fd, libc::FIONBIO, &value) } == -1 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } +} + +#[cfg(target_os = "illumos")] +fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> { + let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + if flags < 0 { + return Err(io::Error::last_os_error()); + } + + let nflags = if nonblocking { + flags | libc::O_NONBLOCK + } else { + flags & !libc::O_NONBLOCK + }; + + if flags != nflags { + if unsafe { libc::fcntl(fd, libc::F_SETFL, nflags) } < 0 { + return Err(io::Error::last_os_error()); + } + } + + Ok(()) +} diff --git a/third_party/rust/mio/src/sys/unix/selector/epoll.rs b/third_party/rust/mio/src/sys/unix/selector/epoll.rs new file mode 100644 index 0000000000..f3e0988c6a --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/selector/epoll.rs @@ -0,0 +1,289 @@ +use crate::{Interest, Token}; + +use libc::{EPOLLET, EPOLLIN, EPOLLOUT, EPOLLPRI, EPOLLRDHUP}; +use std::os::unix::io::{AsRawFd, RawFd}; +#[cfg(debug_assertions)] +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::time::Duration; +use std::{cmp, i32, io, ptr}; + +/// Unique id for use as `SelectorId`. +#[cfg(debug_assertions)] +static NEXT_ID: AtomicUsize = AtomicUsize::new(1); + +#[derive(Debug)] +pub struct Selector { + #[cfg(debug_assertions)] + id: usize, + ep: RawFd, + #[cfg(debug_assertions)] + has_waker: AtomicBool, +} + +impl Selector { + pub fn new() -> io::Result<Selector> { + #[cfg(not(target_os = "android"))] + let res = syscall!(epoll_create1(libc::EPOLL_CLOEXEC)); + + // On Android < API level 16 `epoll_create1` is not defined, so use a + // raw system call. + // According to libuv, `EPOLL_CLOEXEC` is not defined on Android API < + // 21. But `EPOLL_CLOEXEC` is an alias for `O_CLOEXEC` on that platform, + // so we use it instead. + #[cfg(target_os = "android")] + let res = syscall!(syscall(libc::SYS_epoll_create1, libc::O_CLOEXEC)); + + let ep = match res { + Ok(ep) => ep as RawFd, + Err(err) => { + // When `epoll_create1` is not available fall back to use + // `epoll_create` followed by `fcntl`. + if let Some(libc::ENOSYS) = err.raw_os_error() { + match syscall!(epoll_create(1024)) { + Ok(ep) => match syscall!(fcntl(ep, libc::F_SETFD, libc::FD_CLOEXEC)) { + Ok(ep) => ep as RawFd, + Err(err) => { + // `fcntl` failed, cleanup `ep`. + let _ = unsafe { libc::close(ep) }; + return Err(err); + } + }, + Err(err) => return Err(err), + } + } else { + return Err(err); + } + } + }; + + Ok(Selector { + #[cfg(debug_assertions)] + id: NEXT_ID.fetch_add(1, Ordering::Relaxed), + ep, + #[cfg(debug_assertions)] + has_waker: AtomicBool::new(false), + }) + } + + pub fn try_clone(&self) -> io::Result<Selector> { + syscall!(fcntl(self.ep, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|ep| Selector { + // It's the same selector, so we use the same id. + #[cfg(debug_assertions)] + id: self.id, + ep, + #[cfg(debug_assertions)] + has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)), + }) + } + + pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> { + // A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ + // (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits + // architectures. The magic number is the same constant used by libuv. + #[cfg(target_pointer_width = "32")] + const MAX_SAFE_TIMEOUT: u128 = 1789569; + #[cfg(not(target_pointer_width = "32"))] + const MAX_SAFE_TIMEOUT: u128 = libc::c_int::max_value() as u128; + + let timeout = timeout + .map(|to| { + // `Duration::as_millis` truncates, so round up. This avoids + // turning sub-millisecond timeouts into a zero timeout, unless + // the caller explicitly requests that by specifying a zero + // timeout. + let to_ms = to + .checked_add(Duration::from_nanos(999_999)) + .unwrap_or(to) + .as_millis(); + cmp::min(MAX_SAFE_TIMEOUT, to_ms) as libc::c_int + }) + .unwrap_or(-1); + + events.clear(); + syscall!(epoll_wait( + self.ep, + events.as_mut_ptr(), + events.capacity() as i32, + timeout, + )) + .map(|n_events| { + // This is safe because `epoll_wait` ensures that `n_events` are + // assigned. + unsafe { events.set_len(n_events as usize) }; + }) + } + + pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { + let mut event = libc::epoll_event { + events: interests_to_epoll(interests), + u64: usize::from(token) as u64, + #[cfg(target_os = "redox")] + _pad: 0, + }; + + syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_ADD, fd, &mut event)).map(|_| ()) + } + + pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { + let mut event = libc::epoll_event { + events: interests_to_epoll(interests), + u64: usize::from(token) as u64, + #[cfg(target_os = "redox")] + _pad: 0, + }; + + syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_MOD, fd, &mut event)).map(|_| ()) + } + + pub fn deregister(&self, fd: RawFd) -> io::Result<()> { + syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_DEL, fd, ptr::null_mut())).map(|_| ()) + } + + #[cfg(debug_assertions)] + pub fn register_waker(&self) -> bool { + self.has_waker.swap(true, Ordering::AcqRel) + } +} + +cfg_io_source! { + impl Selector { + #[cfg(debug_assertions)] + pub fn id(&self) -> usize { + self.id + } + } +} + +impl AsRawFd for Selector { + fn as_raw_fd(&self) -> RawFd { + self.ep + } +} + +impl Drop for Selector { + fn drop(&mut self) { + if let Err(err) = syscall!(close(self.ep)) { + error!("error closing epoll: {}", err); + } + } +} + +fn interests_to_epoll(interests: Interest) -> u32 { + let mut kind = EPOLLET; + + if interests.is_readable() { + kind = kind | EPOLLIN | EPOLLRDHUP; + } + + if interests.is_writable() { + kind |= EPOLLOUT; + } + + if interests.is_priority() { + kind |= EPOLLPRI; + } + + kind as u32 +} + +pub type Event = libc::epoll_event; +pub type Events = Vec<Event>; + +pub mod event { + use std::fmt; + + use crate::sys::Event; + use crate::Token; + + pub fn token(event: &Event) -> Token { + Token(event.u64 as usize) + } + + pub fn is_readable(event: &Event) -> bool { + (event.events as libc::c_int & libc::EPOLLIN) != 0 + || (event.events as libc::c_int & libc::EPOLLPRI) != 0 + } + + pub fn is_writable(event: &Event) -> bool { + (event.events as libc::c_int & libc::EPOLLOUT) != 0 + } + + pub fn is_error(event: &Event) -> bool { + (event.events as libc::c_int & libc::EPOLLERR) != 0 + } + + pub fn is_read_closed(event: &Event) -> bool { + // Both halves of the socket have closed + event.events as libc::c_int & libc::EPOLLHUP != 0 + // Socket has received FIN or called shutdown(SHUT_RD) + || (event.events as libc::c_int & libc::EPOLLIN != 0 + && event.events as libc::c_int & libc::EPOLLRDHUP != 0) + } + + pub fn is_write_closed(event: &Event) -> bool { + // Both halves of the socket have closed + event.events as libc::c_int & libc::EPOLLHUP != 0 + // Unix pipe write end has closed + || (event.events as libc::c_int & libc::EPOLLOUT != 0 + && event.events as libc::c_int & libc::EPOLLERR != 0) + // The other side (read end) of a Unix pipe has closed. + || event.events as libc::c_int == libc::EPOLLERR + } + + pub fn is_priority(event: &Event) -> bool { + (event.events as libc::c_int & libc::EPOLLPRI) != 0 + } + + pub fn is_aio(_: &Event) -> bool { + // Not supported in the kernel, only in libc. + false + } + + pub fn is_lio(_: &Event) -> bool { + // Not supported. + false + } + + pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { + #[allow(clippy::trivially_copy_pass_by_ref)] + fn check_events(got: &u32, want: &libc::c_int) -> bool { + (*got as libc::c_int & want) != 0 + } + debug_detail!( + EventsDetails(u32), + check_events, + libc::EPOLLIN, + libc::EPOLLPRI, + libc::EPOLLOUT, + libc::EPOLLRDNORM, + libc::EPOLLRDBAND, + libc::EPOLLWRNORM, + libc::EPOLLWRBAND, + libc::EPOLLMSG, + libc::EPOLLERR, + libc::EPOLLHUP, + libc::EPOLLET, + libc::EPOLLRDHUP, + libc::EPOLLONESHOT, + #[cfg(target_os = "linux")] + libc::EPOLLEXCLUSIVE, + #[cfg(any(target_os = "android", target_os = "linux"))] + libc::EPOLLWAKEUP, + libc::EPOLL_CLOEXEC, + ); + + // Can't reference fields in packed structures. + let e_u64 = event.u64; + f.debug_struct("epoll_event") + .field("events", &EventsDetails(event.events)) + .field("u64", &e_u64) + .finish() + } +} + +#[cfg(target_os = "android")] +#[test] +fn assert_close_on_exec_flag() { + // This assertion need to be true for Selector::new. + assert_eq!(libc::O_CLOEXEC, libc::EPOLL_CLOEXEC); +} diff --git a/third_party/rust/mio/src/sys/unix/selector/kqueue.rs b/third_party/rust/mio/src/sys/unix/selector/kqueue.rs new file mode 100644 index 0000000000..8e9aa4c479 --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/selector/kqueue.rs @@ -0,0 +1,860 @@ +use crate::{Interest, Token}; +use std::mem::{self, MaybeUninit}; +use std::ops::{Deref, DerefMut}; +use std::os::unix::io::{AsRawFd, RawFd}; +#[cfg(debug_assertions)] +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::time::Duration; +use std::{cmp, io, ptr, slice}; + +/// Unique id for use as `SelectorId`. +#[cfg(debug_assertions)] +static NEXT_ID: AtomicUsize = AtomicUsize::new(1); + +// Type of the `nchanges` and `nevents` parameters in the `kevent` function. +#[cfg(not(target_os = "netbsd"))] +type Count = libc::c_int; +#[cfg(target_os = "netbsd")] +type Count = libc::size_t; + +// Type of the `filter` field in the `kevent` structure. +#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))] +type Filter = libc::c_short; +#[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" +))] +type Filter = i16; +#[cfg(target_os = "netbsd")] +type Filter = u32; + +// Type of the `flags` field in the `kevent` structure. +#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))] +type Flags = libc::c_ushort; +#[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" +))] +type Flags = u16; +#[cfg(target_os = "netbsd")] +type Flags = u32; + +// Type of the `udata` field in the `kevent` structure. +#[cfg(not(target_os = "netbsd"))] +type UData = *mut libc::c_void; +#[cfg(target_os = "netbsd")] +type UData = libc::intptr_t; + +macro_rules! kevent { + ($id: expr, $filter: expr, $flags: expr, $data: expr) => { + libc::kevent { + ident: $id as libc::uintptr_t, + filter: $filter as Filter, + flags: $flags, + udata: $data as UData, + ..unsafe { mem::zeroed() } + } + }; +} + +#[derive(Debug)] +pub struct Selector { + #[cfg(debug_assertions)] + id: usize, + kq: RawFd, + #[cfg(debug_assertions)] + has_waker: AtomicBool, +} + +impl Selector { + pub fn new() -> io::Result<Selector> { + let kq = syscall!(kqueue())?; + let selector = Selector { + #[cfg(debug_assertions)] + id: NEXT_ID.fetch_add(1, Ordering::Relaxed), + kq, + #[cfg(debug_assertions)] + has_waker: AtomicBool::new(false), + }; + + syscall!(fcntl(kq, libc::F_SETFD, libc::FD_CLOEXEC))?; + Ok(selector) + } + + pub fn try_clone(&self) -> io::Result<Selector> { + syscall!(fcntl(self.kq, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|kq| Selector { + // It's the same selector, so we use the same id. + #[cfg(debug_assertions)] + id: self.id, + kq, + #[cfg(debug_assertions)] + has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)), + }) + } + + pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> { + let timeout = timeout.map(|to| libc::timespec { + tv_sec: cmp::min(to.as_secs(), libc::time_t::max_value() as u64) as libc::time_t, + // `Duration::subsec_nanos` is guaranteed to be less than one + // billion (the number of nanoseconds in a second), making the + // cast to i32 safe. The cast itself is needed for platforms + // where C's long is only 32 bits. + tv_nsec: libc::c_long::from(to.subsec_nanos() as i32), + }); + let timeout = timeout + .as_ref() + .map(|s| s as *const _) + .unwrap_or(ptr::null_mut()); + + events.clear(); + syscall!(kevent( + self.kq, + ptr::null(), + 0, + events.as_mut_ptr(), + events.capacity() as Count, + timeout, + )) + .map(|n_events| { + // This is safe because `kevent` ensures that `n_events` are + // assigned. + unsafe { events.set_len(n_events as usize) }; + }) + } + + pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { + let flags = libc::EV_CLEAR | libc::EV_RECEIPT | libc::EV_ADD; + // At most we need two changes, but maybe we only need 1. + let mut changes: [MaybeUninit<libc::kevent>; 2] = + [MaybeUninit::uninit(), MaybeUninit::uninit()]; + let mut n_changes = 0; + + if interests.is_writable() { + let kevent = kevent!(fd, libc::EVFILT_WRITE, flags, token.0); + changes[n_changes] = MaybeUninit::new(kevent); + n_changes += 1; + } + + if interests.is_readable() { + let kevent = kevent!(fd, libc::EVFILT_READ, flags, token.0); + changes[n_changes] = MaybeUninit::new(kevent); + n_changes += 1; + } + + // Older versions of macOS (OS X 10.11 and 10.10 have been witnessed) + // can return EPIPE when registering a pipe file descriptor where the + // other end has already disappeared. For example code that creates a + // pipe, closes a file descriptor, and then registers the other end will + // see an EPIPE returned from `register`. + // + // It also turns out that kevent will still report events on the file + // descriptor, telling us that it's readable/hup at least after we've + // done this registration. As a result we just ignore `EPIPE` here + // instead of propagating it. + // + // More info can be found at tokio-rs/mio#582. + let changes = unsafe { + // This is safe because we ensure that at least `n_changes` are in + // the array. + slice::from_raw_parts_mut(changes[0].as_mut_ptr(), n_changes) + }; + kevent_register(self.kq, changes, &[libc::EPIPE as i64]) + } + + pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { + let flags = libc::EV_CLEAR | libc::EV_RECEIPT; + let write_flags = if interests.is_writable() { + flags | libc::EV_ADD + } else { + flags | libc::EV_DELETE + }; + let read_flags = if interests.is_readable() { + flags | libc::EV_ADD + } else { + flags | libc::EV_DELETE + }; + + let mut changes: [libc::kevent; 2] = [ + kevent!(fd, libc::EVFILT_WRITE, write_flags, token.0), + kevent!(fd, libc::EVFILT_READ, read_flags, token.0), + ]; + + // Since there is no way to check with which interests the fd was + // registered we modify both readable and write, adding it when required + // and removing it otherwise, ignoring the ENOENT error when it comes + // up. The ENOENT error informs us that a filter we're trying to remove + // wasn't there in first place, but we don't really care since our goal + // is accomplished. + // + // For the explanation of ignoring `EPIPE` see `register`. + kevent_register( + self.kq, + &mut changes, + &[libc::ENOENT as i64, libc::EPIPE as i64], + ) + } + + pub fn deregister(&self, fd: RawFd) -> io::Result<()> { + let flags = libc::EV_DELETE | libc::EV_RECEIPT; + let mut changes: [libc::kevent; 2] = [ + kevent!(fd, libc::EVFILT_WRITE, flags, 0), + kevent!(fd, libc::EVFILT_READ, flags, 0), + ]; + + // Since there is no way to check with which interests the fd was + // registered we remove both filters (readable and writeable) and ignore + // the ENOENT error when it comes up. The ENOENT error informs us that + // the filter wasn't there in first place, but we don't really care + // about that since our goal is to remove it. + kevent_register(self.kq, &mut changes, &[libc::ENOENT as i64]) + } + + #[cfg(debug_assertions)] + pub fn register_waker(&self) -> bool { + self.has_waker.swap(true, Ordering::AcqRel) + } + + // Used by `Waker`. + #[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + pub fn setup_waker(&self, token: Token) -> io::Result<()> { + // First attempt to accept user space notifications. + let mut kevent = kevent!( + 0, + libc::EVFILT_USER, + libc::EV_ADD | libc::EV_CLEAR | libc::EV_RECEIPT, + token.0 + ); + + syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| { + if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 { + Err(io::Error::from_raw_os_error(kevent.data as i32)) + } else { + Ok(()) + } + }) + } + + // Used by `Waker`. + #[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + pub fn wake(&self, token: Token) -> io::Result<()> { + let mut kevent = kevent!( + 0, + libc::EVFILT_USER, + libc::EV_ADD | libc::EV_RECEIPT, + token.0 + ); + kevent.fflags = libc::NOTE_TRIGGER; + + syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| { + if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 { + Err(io::Error::from_raw_os_error(kevent.data as i32)) + } else { + Ok(()) + } + }) + } +} + +/// Register `changes` with `kq`ueue. +fn kevent_register( + kq: RawFd, + changes: &mut [libc::kevent], + ignored_errors: &[i64], +) -> io::Result<()> { + syscall!(kevent( + kq, + changes.as_ptr(), + changes.len() as Count, + changes.as_mut_ptr(), + changes.len() as Count, + ptr::null(), + )) + .map(|_| ()) + .or_else(|err| { + // According to the manual page of FreeBSD: "When kevent() call fails + // with EINTR error, all changes in the changelist have been applied", + // so we can safely ignore it. + if err.raw_os_error() == Some(libc::EINTR) { + Ok(()) + } else { + Err(err) + } + }) + .and_then(|()| check_errors(changes, ignored_errors)) +} + +/// Check all events for possible errors, it returns the first error found. +fn check_errors(events: &[libc::kevent], ignored_errors: &[i64]) -> io::Result<()> { + for event in events { + // We can't use references to packed structures (in checking the ignored + // errors), so we need copy the data out before use. + let data = event.data as _; + // Check for the error flag, the actual error will be in the `data` + // field. + if (event.flags & libc::EV_ERROR != 0) && data != 0 && !ignored_errors.contains(&data) { + return Err(io::Error::from_raw_os_error(data as i32)); + } + } + Ok(()) +} + +cfg_io_source! { + #[cfg(debug_assertions)] + impl Selector { + pub fn id(&self) -> usize { + self.id + } + } +} + +impl AsRawFd for Selector { + fn as_raw_fd(&self) -> RawFd { + self.kq + } +} + +impl Drop for Selector { + fn drop(&mut self) { + if let Err(err) = syscall!(close(self.kq)) { + error!("error closing kqueue: {}", err); + } + } +} + +pub type Event = libc::kevent; +pub struct Events(Vec<libc::kevent>); + +impl Events { + pub fn with_capacity(capacity: usize) -> Events { + Events(Vec::with_capacity(capacity)) + } +} + +impl Deref for Events { + type Target = Vec<libc::kevent>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Events { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +// `Events` cannot derive `Send` or `Sync` because of the +// `udata: *mut ::c_void` field in `libc::kevent`. However, `Events`'s public +// API treats the `udata` field as a `uintptr_t` which is `Send`. `Sync` is +// safe because with a `events: &Events` value, the only access to the `udata` +// field is through `fn token(event: &Event)` which cannot mutate the field. +unsafe impl Send for Events {} +unsafe impl Sync for Events {} + +pub mod event { + use std::fmt; + + use crate::sys::Event; + use crate::Token; + + use super::{Filter, Flags}; + + pub fn token(event: &Event) -> Token { + Token(event.udata as usize) + } + + pub fn is_readable(event: &Event) -> bool { + event.filter == libc::EVFILT_READ || { + #[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + // Used by the `Awakener`. On platforms that use `eventfd` or a unix + // pipe it will emit a readable event so we'll fake that here as + // well. + { + event.filter == libc::EVFILT_USER + } + #[cfg(not(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + )))] + { + false + } + } + } + + pub fn is_writable(event: &Event) -> bool { + event.filter == libc::EVFILT_WRITE + } + + pub fn is_error(event: &Event) -> bool { + (event.flags & libc::EV_ERROR) != 0 || + // When the read end of the socket is closed, EV_EOF is set on + // flags, and fflags contains the error if there is one. + (event.flags & libc::EV_EOF) != 0 && event.fflags != 0 + } + + pub fn is_read_closed(event: &Event) -> bool { + event.filter == libc::EVFILT_READ && event.flags & libc::EV_EOF != 0 + } + + pub fn is_write_closed(event: &Event) -> bool { + event.filter == libc::EVFILT_WRITE && event.flags & libc::EV_EOF != 0 + } + + pub fn is_priority(_: &Event) -> bool { + // kqueue doesn't have priority indicators. + false + } + + #[allow(unused_variables)] // `event` is not used on some platforms. + pub fn is_aio(event: &Event) -> bool { + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + { + event.filter == libc::EVFILT_AIO + } + #[cfg(not(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + )))] + { + false + } + } + + #[allow(unused_variables)] // `event` is only used on FreeBSD. + pub fn is_lio(event: &Event) -> bool { + #[cfg(target_os = "freebsd")] + { + event.filter == libc::EVFILT_LIO + } + #[cfg(not(target_os = "freebsd"))] + { + false + } + } + + pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { + debug_detail!( + FilterDetails(Filter), + PartialEq::eq, + libc::EVFILT_READ, + libc::EVFILT_WRITE, + libc::EVFILT_AIO, + libc::EVFILT_VNODE, + libc::EVFILT_PROC, + libc::EVFILT_SIGNAL, + libc::EVFILT_TIMER, + #[cfg(target_os = "freebsd")] + libc::EVFILT_PROCDESC, + #[cfg(any( + target_os = "freebsd", + target_os = "dragonfly", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::EVFILT_FS, + #[cfg(target_os = "freebsd")] + libc::EVFILT_LIO, + #[cfg(any( + target_os = "freebsd", + target_os = "dragonfly", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::EVFILT_USER, + #[cfg(target_os = "freebsd")] + libc::EVFILT_SENDFILE, + #[cfg(target_os = "freebsd")] + libc::EVFILT_EMPTY, + #[cfg(target_os = "dragonfly")] + libc::EVFILT_EXCEPT, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::EVFILT_MACHPORT, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::EVFILT_VM, + ); + + #[allow(clippy::trivially_copy_pass_by_ref)] + fn check_flag(got: &Flags, want: &Flags) -> bool { + (got & want) != 0 + } + debug_detail!( + FlagsDetails(Flags), + check_flag, + libc::EV_ADD, + libc::EV_DELETE, + libc::EV_ENABLE, + libc::EV_DISABLE, + libc::EV_ONESHOT, + libc::EV_CLEAR, + libc::EV_RECEIPT, + libc::EV_DISPATCH, + #[cfg(target_os = "freebsd")] + libc::EV_DROP, + libc::EV_FLAG1, + libc::EV_ERROR, + libc::EV_EOF, + libc::EV_SYSFLAGS, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::EV_FLAG0, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::EV_POLL, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::EV_OOBAND, + #[cfg(target_os = "dragonfly")] + libc::EV_NODATA, + ); + + #[allow(clippy::trivially_copy_pass_by_ref)] + fn check_fflag(got: &u32, want: &u32) -> bool { + (got & want) != 0 + } + debug_detail!( + FflagsDetails(u32), + check_fflag, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::NOTE_TRIGGER, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::NOTE_FFNOP, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::NOTE_FFAND, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::NOTE_FFOR, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::NOTE_FFCOPY, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::NOTE_FFCTRLMASK, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + libc::NOTE_FFLAGSMASK, + libc::NOTE_LOWAT, + libc::NOTE_DELETE, + libc::NOTE_WRITE, + #[cfg(target_os = "dragonfly")] + libc::NOTE_OOB, + #[cfg(target_os = "openbsd")] + libc::NOTE_EOF, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_EXTEND, + libc::NOTE_ATTRIB, + libc::NOTE_LINK, + libc::NOTE_RENAME, + libc::NOTE_REVOKE, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_NONE, + #[cfg(any(target_os = "openbsd"))] + libc::NOTE_TRUNCATE, + libc::NOTE_EXIT, + libc::NOTE_FORK, + libc::NOTE_EXEC, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_SIGNAL, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_EXITSTATUS, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_EXIT_DETAIL, + libc::NOTE_PDATAMASK, + libc::NOTE_PCTRLMASK, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd", + ))] + libc::NOTE_TRACK, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd", + ))] + libc::NOTE_TRACKERR, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd", + ))] + libc::NOTE_CHILD, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_EXIT_DETAIL_MASK, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_EXIT_DECRYPTFAIL, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_EXIT_MEMORY, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_EXIT_CSERROR, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_VM_PRESSURE, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_VM_PRESSURE_TERMINATE, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_VM_PRESSURE_SUDDEN_TERMINATE, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_VM_ERROR, + #[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_SECONDS, + #[cfg(any(target_os = "freebsd"))] + libc::NOTE_MSECONDS, + #[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_USECONDS, + #[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_NSECONDS, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_ABSOLUTE, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_LEEWAY, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_CRITICAL, + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos" + ))] + libc::NOTE_BACKGROUND, + ); + + // Can't reference fields in packed structures. + let ident = event.ident; + let data = event.data; + let udata = event.udata; + f.debug_struct("kevent") + .field("ident", &ident) + .field("filter", &FilterDetails(event.filter)) + .field("flags", &FlagsDetails(event.flags)) + .field("fflags", &FflagsDetails(event.fflags)) + .field("data", &data) + .field("udata", &udata) + .finish() + } +} + +#[test] +#[cfg(feature = "os-ext")] +fn does_not_register_rw() { + use crate::unix::SourceFd; + use crate::{Poll, Token}; + + let kq = unsafe { libc::kqueue() }; + let mut kqf = SourceFd(&kq); + let poll = Poll::new().unwrap(); + + // Registering kqueue fd will fail if write is requested (On anything but + // some versions of macOS). + poll.registry() + .register(&mut kqf, Token(1234), Interest::READABLE) + .unwrap(); +} diff --git a/third_party/rust/mio/src/sys/unix/selector/mod.rs b/third_party/rust/mio/src/sys/unix/selector/mod.rs new file mode 100644 index 0000000000..3ccbdeadfb --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/selector/mod.rs @@ -0,0 +1,49 @@ +#[cfg(any( + target_os = "android", + target_os = "illumos", + target_os = "linux", + target_os = "redox", +))] +mod epoll; + +#[cfg(any( + target_os = "android", + target_os = "illumos", + target_os = "linux", + target_os = "redox", +))] +pub(crate) use self::epoll::{event, Event, Events, Selector}; + +#[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "tvos", + target_os = "watchos", +))] +mod kqueue; + +#[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "tvos", + target_os = "watchos", +))] +pub(crate) use self::kqueue::{event, Event, Events, Selector}; + +/// Lowest file descriptor used in `Selector::try_clone`. +/// +/// # Notes +/// +/// Usually fds 0, 1 and 2 are standard in, out and error. Some application +/// blindly assume this to be true, which means using any one of those a select +/// could result in some interesting and unexpected errors. Avoid that by using +/// an fd that doesn't have a pre-determined usage. +const LOWEST_FD: libc::c_int = 3; diff --git a/third_party/rust/mio/src/sys/unix/sourcefd.rs b/third_party/rust/mio/src/sys/unix/sourcefd.rs new file mode 100644 index 0000000000..84e776d21d --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/sourcefd.rs @@ -0,0 +1,116 @@ +use crate::{event, Interest, Registry, Token}; + +use std::io; +use std::os::unix::io::RawFd; + +/// Adapter for [`RawFd`] providing an [`event::Source`] implementation. +/// +/// `SourceFd` enables registering any type with an FD with [`Poll`]. +/// +/// While only implementations for TCP and UDP are provided, Mio supports +/// registering any FD that can be registered with the underlying OS selector. +/// `SourceFd` provides the necessary bridge. +/// +/// Note that `SourceFd` takes a `&RawFd`. This is because `SourceFd` **does +/// not** take ownership of the FD. Specifically, it will not manage any +/// lifecycle related operations, such as closing the FD on drop. It is expected +/// that the `SourceFd` is constructed right before a call to +/// [`Registry::register`]. See the examples for more detail. +/// +/// [`event::Source`]: ../event/trait.Source.html +/// [`Poll`]: ../struct.Poll.html +/// [`Registry::register`]: ../struct.Registry.html#method.register +/// +/// # Examples +/// +/// Basic usage. +/// +#[cfg_attr( + all(feature = "os-poll", feature = "net", feature = "os-ext"), + doc = "```" +)] +#[cfg_attr( + not(all(feature = "os-poll", feature = "net", feature = "os-ext")), + doc = "```ignore" +)] +/// # use std::error::Error; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Interest, Poll, Token}; +/// use mio::unix::SourceFd; +/// +/// use std::os::unix::io::AsRawFd; +/// use std::net::TcpListener; +/// +/// // Bind a std listener +/// let listener = TcpListener::bind("127.0.0.1:0")?; +/// +/// let poll = Poll::new()?; +/// +/// // Register the listener +/// poll.registry().register( +/// &mut SourceFd(&listener.as_raw_fd()), +/// Token(0), +/// Interest::READABLE)?; +/// # Ok(()) +/// # } +/// ``` +/// +/// Implementing [`event::Source`] for a custom type backed by a [`RawFd`]. +/// +#[cfg_attr(all(feature = "os-poll", feature = "os-ext"), doc = "```")] +#[cfg_attr(not(all(feature = "os-poll", feature = "os-ext")), doc = "```ignore")] +/// use mio::{event, Interest, Registry, Token}; +/// use mio::unix::SourceFd; +/// +/// use std::os::unix::io::RawFd; +/// use std::io; +/// +/// # #[allow(dead_code)] +/// pub struct MyIo { +/// fd: RawFd, +/// } +/// +/// impl event::Source for MyIo { +/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest) +/// -> io::Result<()> +/// { +/// SourceFd(&self.fd).register(registry, token, interests) +/// } +/// +/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest) +/// -> io::Result<()> +/// { +/// SourceFd(&self.fd).reregister(registry, token, interests) +/// } +/// +/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> { +/// SourceFd(&self.fd).deregister(registry) +/// } +/// } +/// ``` +#[derive(Debug)] +pub struct SourceFd<'a>(pub &'a RawFd); + +impl<'a> event::Source for SourceFd<'a> { + fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + registry.selector().register(*self.0, token, interests) + } + + fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + registry.selector().reregister(*self.0, token, interests) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + registry.selector().deregister(*self.0) + } +} diff --git a/third_party/rust/mio/src/sys/unix/tcp.rs b/third_party/rust/mio/src/sys/unix/tcp.rs new file mode 100644 index 0000000000..48cf8d9efa --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/tcp.rs @@ -0,0 +1,113 @@ +use std::convert::TryInto; +use std::io; +use std::mem::{size_of, MaybeUninit}; +use std::net::{self, SocketAddr}; +use std::os::unix::io::{AsRawFd, FromRawFd}; + +use crate::sys::unix::net::{new_socket, socket_addr, to_socket_addr}; + +pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<libc::c_int> { + let domain = match address { + SocketAddr::V4(_) => libc::AF_INET, + SocketAddr::V6(_) => libc::AF_INET6, + }; + new_socket(domain, libc::SOCK_STREAM) +} + +pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> { + let (raw_addr, raw_addr_length) = socket_addr(&addr); + syscall!(bind(socket.as_raw_fd(), raw_addr.as_ptr(), raw_addr_length))?; + Ok(()) +} + +pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> { + let (raw_addr, raw_addr_length) = socket_addr(&addr); + + match syscall!(connect( + socket.as_raw_fd(), + raw_addr.as_ptr(), + raw_addr_length + )) { + Err(err) if err.raw_os_error() != Some(libc::EINPROGRESS) => Err(err), + _ => Ok(()), + } +} + +pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> { + let backlog = backlog.try_into().unwrap_or(i32::max_value()); + syscall!(listen(socket.as_raw_fd(), backlog))?; + Ok(()) +} + +pub(crate) fn set_reuseaddr(socket: &net::TcpListener, reuseaddr: bool) -> io::Result<()> { + let val: libc::c_int = i32::from(reuseaddr); + syscall!(setsockopt( + socket.as_raw_fd(), + libc::SOL_SOCKET, + libc::SO_REUSEADDR, + &val as *const libc::c_int as *const libc::c_void, + size_of::<libc::c_int>() as libc::socklen_t, + ))?; + Ok(()) +} + +pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { + let mut addr: MaybeUninit<libc::sockaddr_storage> = MaybeUninit::uninit(); + let mut length = size_of::<libc::sockaddr_storage>() as libc::socklen_t; + + // On platforms that support it we can use `accept4(2)` to set `NONBLOCK` + // and `CLOEXEC` in the call to accept the connection. + #[cfg(any( + // Android x86's seccomp profile forbids calls to `accept4(2)` + // See https://github.com/tokio-rs/mio/issues/1445 for details + all(not(target_arch="x86"), target_os = "android"), + target_os = "dragonfly", + target_os = "freebsd", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + ))] + let stream = { + syscall!(accept4( + listener.as_raw_fd(), + addr.as_mut_ptr() as *mut _, + &mut length, + libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK, + )) + .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) }) + }?; + + // But not all platforms have the `accept4(2)` call. Luckily BSD (derived) + // OSes inherit the non-blocking flag from the listener, so we just have to + // set `CLOEXEC`. + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "redox", + target_os = "tvos", + target_os = "watchos", + all(target_arch = "x86", target_os = "android"), + ))] + let stream = { + syscall!(accept( + listener.as_raw_fd(), + addr.as_mut_ptr() as *mut _, + &mut length + )) + .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) }) + .and_then(|s| { + syscall!(fcntl(s.as_raw_fd(), libc::F_SETFD, libc::FD_CLOEXEC))?; + + // See https://github.com/tokio-rs/mio/issues/1450 + #[cfg(all(target_arch = "x86", target_os = "android"))] + syscall!(fcntl(s.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK))?; + + Ok(s) + }) + }?; + + // This is safe because `accept` calls above ensures the address + // initialised. + unsafe { to_socket_addr(addr.as_ptr()) }.map(|addr| (stream, addr)) +} diff --git a/third_party/rust/mio/src/sys/unix/udp.rs b/third_party/rust/mio/src/sys/unix/udp.rs new file mode 100644 index 0000000000..843ae885cb --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/udp.rs @@ -0,0 +1,31 @@ +use crate::sys::unix::net::{new_ip_socket, socket_addr}; + +use std::io; +use std::mem; +use std::net::{self, SocketAddr}; +use std::os::unix::io::{AsRawFd, FromRawFd}; + +pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> { + let fd = new_ip_socket(addr, libc::SOCK_DGRAM)?; + let socket = unsafe { net::UdpSocket::from_raw_fd(fd) }; + + let (raw_addr, raw_addr_length) = socket_addr(&addr); + syscall!(bind(fd, raw_addr.as_ptr(), raw_addr_length))?; + + Ok(socket) +} + +pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> { + let mut optval: libc::c_int = 0; + let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t; + + syscall!(getsockopt( + socket.as_raw_fd(), + libc::IPPROTO_IPV6, + libc::IPV6_V6ONLY, + &mut optval as *mut _ as *mut _, + &mut optlen, + ))?; + + Ok(optval != 0) +} diff --git a/third_party/rust/mio/src/sys/unix/uds/datagram.rs b/third_party/rust/mio/src/sys/unix/uds/datagram.rs new file mode 100644 index 0000000000..a5ada72ef2 --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/uds/datagram.rs @@ -0,0 +1,56 @@ +use super::{socket_addr, SocketAddr}; +use crate::sys::unix::net::new_socket; + +use std::io; +use std::os::unix::io::{AsRawFd, FromRawFd}; +use std::os::unix::net; +use std::path::Path; + +pub(crate) fn bind(path: &Path) -> io::Result<net::UnixDatagram> { + let (sockaddr, socklen) = socket_addr(path)?; + let sockaddr = &sockaddr as *const libc::sockaddr_un as *const _; + + let socket = unbound()?; + syscall!(bind(socket.as_raw_fd(), sockaddr, socklen))?; + + Ok(socket) +} + +pub(crate) fn unbound() -> io::Result<net::UnixDatagram> { + let fd = new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)?; + Ok(unsafe { net::UnixDatagram::from_raw_fd(fd) }) +} + +pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> { + super::pair(libc::SOCK_DGRAM) +} + +pub(crate) fn local_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> { + super::local_addr(socket.as_raw_fd()) +} + +pub(crate) fn peer_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> { + super::peer_addr(socket.as_raw_fd()) +} + +pub(crate) fn recv_from( + socket: &net::UnixDatagram, + dst: &mut [u8], +) -> io::Result<(usize, SocketAddr)> { + let mut count = 0; + let socketaddr = SocketAddr::new(|sockaddr, socklen| { + syscall!(recvfrom( + socket.as_raw_fd(), + dst.as_mut_ptr() as *mut _, + dst.len(), + 0, + sockaddr, + socklen, + )) + .map(|c| { + count = c; + c as libc::c_int + }) + })?; + Ok((count as usize, socketaddr)) +} diff --git a/third_party/rust/mio/src/sys/unix/uds/listener.rs b/third_party/rust/mio/src/sys/unix/uds/listener.rs new file mode 100644 index 0000000000..52387a5441 --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/uds/listener.rs @@ -0,0 +1,93 @@ +use super::socket_addr; +use crate::net::{SocketAddr, UnixStream}; +use crate::sys::unix::net::new_socket; +use std::os::unix::io::{AsRawFd, FromRawFd}; +use std::os::unix::net; +use std::path::Path; +use std::{io, mem}; + +pub(crate) fn bind(path: &Path) -> io::Result<net::UnixListener> { + let (sockaddr, socklen) = socket_addr(path)?; + let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr; + + let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?; + let socket = unsafe { net::UnixListener::from_raw_fd(fd) }; + syscall!(bind(fd, sockaddr, socklen))?; + syscall!(listen(fd, 1024))?; + + Ok(socket) +} + +pub(crate) fn accept(listener: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> { + let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed(); + + // This is safe to assume because a `libc::sockaddr_un` filled with `0` + // bytes is properly initialized. + // + // `0` is a valid value for `sockaddr_un::sun_family`; it is + // `libc::AF_UNSPEC`. + // + // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an + // abstract path. + let mut sockaddr = unsafe { sockaddr.assume_init() }; + + sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t; + let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t; + + #[cfg(not(any( + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "redox", + target_os = "tvos", + target_os = "watchos", + // Android x86's seccomp profile forbids calls to `accept4(2)` + // See https://github.com/tokio-rs/mio/issues/1445 for details + all(target_arch = "x86", target_os = "android"), + )))] + let socket = { + let flags = libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC; + syscall!(accept4( + listener.as_raw_fd(), + &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr, + &mut socklen, + flags + )) + .map(|socket| unsafe { net::UnixStream::from_raw_fd(socket) }) + }; + + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "redox", + target_os = "tvos", + target_os = "watchos", + all(target_arch = "x86", target_os = "android") + ))] + let socket = syscall!(accept( + listener.as_raw_fd(), + &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr, + &mut socklen, + )) + .and_then(|socket| { + // Ensure the socket is closed if either of the `fcntl` calls + // error below. + let s = unsafe { net::UnixStream::from_raw_fd(socket) }; + syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC))?; + + // See https://github.com/tokio-rs/mio/issues/1450 + #[cfg(all(target_arch = "x86", target_os = "android"))] + syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))?; + + Ok(s) + }); + + socket + .map(UnixStream::from_std) + .map(|stream| (stream, SocketAddr::from_parts(sockaddr, socklen))) +} + +pub(crate) fn local_addr(listener: &net::UnixListener) -> io::Result<SocketAddr> { + super::local_addr(listener.as_raw_fd()) +} diff --git a/third_party/rust/mio/src/sys/unix/uds/mod.rs b/third_party/rust/mio/src/sys/unix/uds/mod.rs new file mode 100644 index 0000000000..ed355ce4ed --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/uds/mod.rs @@ -0,0 +1,159 @@ +mod socketaddr; +pub use self::socketaddr::SocketAddr; + +/// Get the `sun_path` field offset of `sockaddr_un` for the target OS. +/// +/// On Linux, this function equates to the same value as +/// `size_of::<sa_family_t>()`, but some other implementations include +/// other fields before `sun_path`, so the expression more portably +/// describes the size of the address structure. +pub(in crate::sys) fn path_offset(sockaddr: &libc::sockaddr_un) -> usize { + let base = sockaddr as *const _ as usize; + let path = &sockaddr.sun_path as *const _ as usize; + path - base +} + +cfg_os_poll! { + use std::cmp::Ordering; + use std::os::unix::ffi::OsStrExt; + use std::os::unix::io::{RawFd, FromRawFd}; + use std::path::Path; + use std::{io, mem}; + + pub(crate) mod datagram; + pub(crate) mod listener; + pub(crate) mod stream; + + pub(in crate::sys) fn socket_addr(path: &Path) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> { + let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed(); + + // This is safe to assume because a `libc::sockaddr_un` filled with `0` + // bytes is properly initialized. + // + // `0` is a valid value for `sockaddr_un::sun_family`; it is + // `libc::AF_UNSPEC`. + // + // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an + // abstract path. + let mut sockaddr = unsafe { sockaddr.assume_init() }; + + sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t; + + let bytes = path.as_os_str().as_bytes(); + match (bytes.first(), bytes.len().cmp(&sockaddr.sun_path.len())) { + // Abstract paths don't need a null terminator + (Some(&0), Ordering::Greater) => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "path must be no longer than libc::sockaddr_un.sun_path", + )); + } + (_, Ordering::Greater) | (_, Ordering::Equal) => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "path must be shorter than libc::sockaddr_un.sun_path", + )); + } + _ => {} + } + + for (dst, src) in sockaddr.sun_path.iter_mut().zip(bytes.iter()) { + *dst = *src as libc::c_char; + } + + let offset = path_offset(&sockaddr); + let mut socklen = offset + bytes.len(); + + match bytes.first() { + // The struct has already been zeroes so the null byte for pathname + // addresses is already there. + Some(&0) | None => {} + Some(_) => socklen += 1, + } + + Ok((sockaddr, socklen as libc::socklen_t)) + } + + fn pair<T>(flags: libc::c_int) -> io::Result<(T, T)> + where T: FromRawFd, + { + #[cfg(not(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + )))] + let flags = flags | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC; + + let mut fds = [-1; 2]; + syscall!(socketpair(libc::AF_UNIX, flags, 0, fds.as_mut_ptr()))?; + let pair = unsafe { (T::from_raw_fd(fds[0]), T::from_raw_fd(fds[1])) }; + + // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC. + // + // In order to set those flags, additional `fcntl` sys calls must be + // performed. If a `fnctl` fails after the sockets have been created, + // the file descriptors will leak. Creating `pair` above ensures that if + // there is an error, the file descriptors are closed. + #[cfg(any( + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", + ))] + { + syscall!(fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK))?; + syscall!(fcntl(fds[0], libc::F_SETFD, libc::FD_CLOEXEC))?; + syscall!(fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK))?; + syscall!(fcntl(fds[1], libc::F_SETFD, libc::FD_CLOEXEC))?; + } + Ok(pair) + } + + // The following functions can't simply be replaced with a call to + // `net::UnixDatagram` because of our `SocketAddr` type. + + fn local_addr(socket: RawFd) -> io::Result<SocketAddr> { + SocketAddr::new(|sockaddr, socklen| syscall!(getsockname(socket, sockaddr, socklen))) + } + + fn peer_addr(socket: RawFd) -> io::Result<SocketAddr> { + SocketAddr::new(|sockaddr, socklen| syscall!(getpeername(socket, sockaddr, socklen))) + } + + #[cfg(test)] + mod tests { + use super::{path_offset, socket_addr}; + use std::path::Path; + use std::str; + + #[test] + fn pathname_address() { + const PATH: &str = "./foo/bar.txt"; + const PATH_LEN: usize = 13; + + // Pathname addresses do have a null terminator, so `socklen` is + // expected to be `PATH_LEN` + `offset` + 1. + let path = Path::new(PATH); + let (sockaddr, actual) = socket_addr(path).unwrap(); + let offset = path_offset(&sockaddr); + let expected = PATH_LEN + offset + 1; + assert_eq!(expected as libc::socklen_t, actual) + } + + #[test] + fn abstract_address() { + const PATH: &[u8] = &[0, 116, 111, 107, 105, 111]; + const PATH_LEN: usize = 6; + + // Abstract addresses do not have a null terminator, so `socklen` is + // expected to be `PATH_LEN` + `offset`. + let abstract_path = str::from_utf8(PATH).unwrap(); + let path = Path::new(abstract_path); + let (sockaddr, actual) = socket_addr(path).unwrap(); + let offset = path_offset(&sockaddr); + let expected = PATH_LEN + offset; + assert_eq!(expected as libc::socklen_t, actual) + } + } +} diff --git a/third_party/rust/mio/src/sys/unix/uds/socketaddr.rs b/third_party/rust/mio/src/sys/unix/uds/socketaddr.rs new file mode 100644 index 0000000000..4c7c411618 --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/uds/socketaddr.rs @@ -0,0 +1,130 @@ +use super::path_offset; +use std::ffi::OsStr; +use std::os::unix::ffi::OsStrExt; +use std::path::Path; +use std::{ascii, fmt}; + +/// An address associated with a `mio` specific Unix socket. +/// +/// This is implemented instead of imported from [`net::SocketAddr`] because +/// there is no way to create a [`net::SocketAddr`]. One must be returned by +/// [`accept`], so this is returned instead. +/// +/// [`net::SocketAddr`]: std::os::unix::net::SocketAddr +/// [`accept`]: #method.accept +pub struct SocketAddr { + sockaddr: libc::sockaddr_un, + socklen: libc::socklen_t, +} + +struct AsciiEscaped<'a>(&'a [u8]); + +enum AddressKind<'a> { + Unnamed, + Pathname(&'a Path), + Abstract(&'a [u8]), +} + +impl SocketAddr { + fn address(&self) -> AddressKind<'_> { + let offset = path_offset(&self.sockaddr); + // Don't underflow in `len` below. + if (self.socklen as usize) < offset { + return AddressKind::Unnamed; + } + let len = self.socklen as usize - offset; + let path = unsafe { &*(&self.sockaddr.sun_path as *const [libc::c_char] as *const [u8]) }; + + // macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses + if len == 0 + || (cfg!(not(any(target_os = "linux", target_os = "android"))) + && self.sockaddr.sun_path[0] == 0) + { + AddressKind::Unnamed + } else if self.sockaddr.sun_path[0] == 0 { + AddressKind::Abstract(&path[1..len]) + } else { + AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref()) + } + } +} + +cfg_os_poll! { + use std::{io, mem}; + + impl SocketAddr { + pub(crate) fn new<F>(f: F) -> io::Result<SocketAddr> + where + F: FnOnce(*mut libc::sockaddr, &mut libc::socklen_t) -> io::Result<libc::c_int>, + { + let mut sockaddr = { + let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed(); + unsafe { sockaddr.assume_init() } + }; + + let raw_sockaddr = &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr; + let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t; + + f(raw_sockaddr, &mut socklen)?; + Ok(SocketAddr::from_parts(sockaddr, socklen)) + } + + pub(crate) fn from_parts(sockaddr: libc::sockaddr_un, socklen: libc::socklen_t) -> SocketAddr { + SocketAddr { sockaddr, socklen } + } + + /// Returns `true` if the address is unnamed. + /// + /// Documentation reflected in [`SocketAddr`] + /// + /// [`SocketAddr`]: std::os::unix::net::SocketAddr + pub fn is_unnamed(&self) -> bool { + matches!(self.address(), AddressKind::Unnamed) + } + + /// Returns the contents of this address if it is a `pathname` address. + /// + /// Documentation reflected in [`SocketAddr`] + /// + /// [`SocketAddr`]: std::os::unix::net::SocketAddr + pub fn as_pathname(&self) -> Option<&Path> { + if let AddressKind::Pathname(path) = self.address() { + Some(path) + } else { + None + } + } + + /// Returns the contents of this address if it is an abstract namespace + /// without the leading null byte. + // Link to std::os::unix::net::SocketAddr pending + // https://github.com/rust-lang/rust/issues/85410. + pub fn as_abstract_namespace(&self) -> Option<&[u8]> { + if let AddressKind::Abstract(path) = self.address() { + Some(path) + } else { + None + } + } + } +} + +impl fmt::Debug for SocketAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.address() { + AddressKind::Unnamed => write!(fmt, "(unnamed)"), + AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)), + AddressKind::Pathname(path) => write!(fmt, "{:?} (pathname)", path), + } + } +} + +impl<'a> fmt::Display for AsciiEscaped<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "\"")?; + for byte in self.0.iter().cloned().flat_map(ascii::escape_default) { + write!(fmt, "{}", byte as char)?; + } + write!(fmt, "\"") + } +} diff --git a/third_party/rust/mio/src/sys/unix/uds/stream.rs b/third_party/rust/mio/src/sys/unix/uds/stream.rs new file mode 100644 index 0000000000..461917c12f --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/uds/stream.rs @@ -0,0 +1,34 @@ +use super::{socket_addr, SocketAddr}; +use crate::sys::unix::net::new_socket; + +use std::io; +use std::os::unix::io::{AsRawFd, FromRawFd}; +use std::os::unix::net; +use std::path::Path; + +pub(crate) fn connect(path: &Path) -> io::Result<net::UnixStream> { + let (sockaddr, socklen) = socket_addr(path)?; + let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr; + + let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?; + let socket = unsafe { net::UnixStream::from_raw_fd(fd) }; + match syscall!(connect(fd, sockaddr, socklen)) { + Ok(_) => {} + Err(ref err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {} + Err(e) => return Err(e), + } + + Ok(socket) +} + +pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> { + super::pair(libc::SOCK_STREAM) +} + +pub(crate) fn local_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> { + super::local_addr(socket.as_raw_fd()) +} + +pub(crate) fn peer_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> { + super::peer_addr(socket.as_raw_fd()) +} diff --git a/third_party/rust/mio/src/sys/unix/waker.rs b/third_party/rust/mio/src/sys/unix/waker.rs new file mode 100644 index 0000000000..0044cd06d5 --- /dev/null +++ b/third_party/rust/mio/src/sys/unix/waker.rs @@ -0,0 +1,185 @@ +#[cfg(any(target_os = "linux", target_os = "android"))] +mod eventfd { + use crate::sys::Selector; + use crate::{Interest, Token}; + + use std::fs::File; + use std::io::{self, Read, Write}; + use std::os::unix::io::FromRawFd; + + /// Waker backed by `eventfd`. + /// + /// `eventfd` is effectively an 64 bit counter. All writes must be of 8 + /// bytes (64 bits) and are converted (native endian) into an 64 bit + /// unsigned integer and added to the count. Reads must also be 8 bytes and + /// reset the count to 0, returning the count. + #[derive(Debug)] + pub struct Waker { + fd: File, + } + + impl Waker { + pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> { + let fd = syscall!(eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK))?; + let file = unsafe { File::from_raw_fd(fd) }; + + selector.register(fd, token, Interest::READABLE)?; + Ok(Waker { fd: file }) + } + + pub fn wake(&self) -> io::Result<()> { + let buf: [u8; 8] = 1u64.to_ne_bytes(); + match (&self.fd).write(&buf) { + Ok(_) => Ok(()), + Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { + // Writing only blocks if the counter is going to overflow. + // So we'll reset the counter to 0 and wake it again. + self.reset()?; + self.wake() + } + Err(err) => Err(err), + } + } + + /// Reset the eventfd object, only need to call this if `wake` fails. + fn reset(&self) -> io::Result<()> { + let mut buf: [u8; 8] = 0u64.to_ne_bytes(); + match (&self.fd).read(&mut buf) { + Ok(_) => Ok(()), + // If the `Waker` hasn't been awoken yet this will return a + // `WouldBlock` error which we can safely ignore. + Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()), + Err(err) => Err(err), + } + } + } +} + +#[cfg(any(target_os = "linux", target_os = "android"))] +pub use self::eventfd::Waker; + +#[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", +))] +mod kqueue { + use crate::sys::Selector; + use crate::Token; + + use std::io; + + /// Waker backed by kqueue user space notifications (`EVFILT_USER`). + /// + /// The implementation is fairly simple, first the kqueue must be setup to + /// receive waker events this done by calling `Selector.setup_waker`. Next + /// we need access to kqueue, thus we need to duplicate the file descriptor. + /// Now waking is as simple as adding an event to the kqueue. + #[derive(Debug)] + pub struct Waker { + selector: Selector, + token: Token, + } + + impl Waker { + pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> { + let selector = selector.try_clone()?; + selector.setup_waker(token)?; + Ok(Waker { selector, token }) + } + + pub fn wake(&self) -> io::Result<()> { + self.selector.wake(self.token) + } + } +} + +#[cfg(any( + target_os = "freebsd", + target_os = "ios", + target_os = "macos", + target_os = "tvos", + target_os = "watchos", +))] +pub use self::kqueue::Waker; + +#[cfg(any( + target_os = "dragonfly", + target_os = "illumos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "redox", +))] +mod pipe { + use crate::sys::unix::Selector; + use crate::{Interest, Token}; + + use std::fs::File; + use std::io::{self, Read, Write}; + use std::os::unix::io::FromRawFd; + + /// Waker backed by a unix pipe. + /// + /// Waker controls both the sending and receiving ends and empties the pipe + /// if writing to it (waking) fails. + #[derive(Debug)] + pub struct Waker { + sender: File, + receiver: File, + } + + impl Waker { + pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> { + let mut fds = [-1; 2]; + syscall!(pipe2(fds.as_mut_ptr(), libc::O_NONBLOCK | libc::O_CLOEXEC))?; + let sender = unsafe { File::from_raw_fd(fds[1]) }; + let receiver = unsafe { File::from_raw_fd(fds[0]) }; + + selector.register(fds[0], token, Interest::READABLE)?; + Ok(Waker { sender, receiver }) + } + + pub fn wake(&self) -> io::Result<()> { + // The epoll emulation on some illumos systems currently requires + // the pipe buffer to be completely empty for an edge-triggered + // wakeup on the pipe read side. + #[cfg(target_os = "illumos")] + self.empty(); + + match (&self.sender).write(&[1]) { + Ok(_) => Ok(()), + Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { + // The reading end is full so we'll empty the buffer and try + // again. + self.empty(); + self.wake() + } + Err(ref err) if err.kind() == io::ErrorKind::Interrupted => self.wake(), + Err(err) => Err(err), + } + } + + /// Empty the pipe's buffer, only need to call this if `wake` fails. + /// This ignores any errors. + fn empty(&self) { + let mut buf = [0; 4096]; + loop { + match (&self.receiver).read(&mut buf) { + Ok(n) if n > 0 => continue, + _ => return, + } + } + } + } +} + +#[cfg(any( + target_os = "dragonfly", + target_os = "illumos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "redox", +))] +pub use self::pipe::Waker; diff --git a/third_party/rust/mio/src/sys/wasi/mod.rs b/third_party/rust/mio/src/sys/wasi/mod.rs new file mode 100644 index 0000000000..b1a25fc9d8 --- /dev/null +++ b/third_party/rust/mio/src/sys/wasi/mod.rs @@ -0,0 +1,370 @@ +//! # Notes +//! +//! The current implementation is somewhat limited. The `Waker` is not +//! implemented, as at the time of writing there is no way to support to wake-up +//! a thread from calling `poll_oneoff`. +//! +//! Furthermore the (re/de)register functions also don't work while concurrently +//! polling as both registering and polling requires a lock on the +//! `subscriptions`. +//! +//! Finally `Selector::try_clone`, required by `Registry::try_clone`, doesn't +//! work. However this could be implemented by use of an `Arc`. +//! +//! In summary, this only (barely) works using a single thread. + +use std::cmp::min; +use std::io; +#[cfg(all(feature = "net", debug_assertions))] +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +#[cfg(feature = "net")] +use crate::{Interest, Token}; + +cfg_net! { + pub(crate) mod tcp { + use std::io; + use std::net::{self, SocketAddr}; + + pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { + let (stream, addr) = listener.accept()?; + stream.set_nonblocking(true)?; + Ok((stream, addr)) + } + } +} + +/// Unique id for use as `SelectorId`. +#[cfg(all(debug_assertions, feature = "net"))] +static NEXT_ID: AtomicUsize = AtomicUsize::new(1); + +pub(crate) struct Selector { + #[cfg(all(debug_assertions, feature = "net"))] + id: usize, + /// Subscriptions (reads events) we're interested in. + subscriptions: Arc<Mutex<Vec<wasi::Subscription>>>, +} + +impl Selector { + pub(crate) fn new() -> io::Result<Selector> { + Ok(Selector { + #[cfg(all(debug_assertions, feature = "net"))] + id: NEXT_ID.fetch_add(1, Ordering::Relaxed), + subscriptions: Arc::new(Mutex::new(Vec::new())), + }) + } + + #[cfg(all(debug_assertions, feature = "net"))] + pub(crate) fn id(&self) -> usize { + self.id + } + + pub(crate) fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> { + events.clear(); + + let mut subscriptions = self.subscriptions.lock().unwrap(); + + // If we want to a use a timeout in the `wasi_poll_oneoff()` function + // we need another subscription to the list. + if let Some(timeout) = timeout { + subscriptions.push(timeout_subscription(timeout)); + } + + // `poll_oneoff` needs the same number of events as subscriptions. + let length = subscriptions.len(); + events.reserve(length); + + debug_assert!(events.capacity() >= length); + #[cfg(debug_assertions)] + if length == 0 { + warn!( + "calling mio::Poll::poll with empty subscriptions, this likely not what you want" + ); + } + + let res = unsafe { wasi::poll_oneoff(subscriptions.as_ptr(), events.as_mut_ptr(), length) }; + + // Remove the timeout subscription we possibly added above. + if timeout.is_some() { + let timeout_sub = subscriptions.pop(); + debug_assert_eq!( + timeout_sub.unwrap().u.tag, + wasi::EVENTTYPE_CLOCK.raw(), + "failed to remove timeout subscription" + ); + } + + drop(subscriptions); // Unlock. + + match res { + Ok(n_events) => { + // Safety: `poll_oneoff` initialises the `events` for us. + unsafe { events.set_len(n_events) }; + + // Remove the timeout event. + if timeout.is_some() { + if let Some(index) = events.iter().position(is_timeout_event) { + events.swap_remove(index); + } + } + + check_errors(&events) + } + Err(err) => Err(io_err(err)), + } + } + + pub(crate) fn try_clone(&self) -> io::Result<Selector> { + Ok(Selector { + #[cfg(all(debug_assertions, feature = "net"))] + id: self.id, + subscriptions: self.subscriptions.clone(), + }) + } + + #[cfg(feature = "net")] + pub(crate) fn register( + &self, + fd: wasi::Fd, + token: Token, + interests: Interest, + ) -> io::Result<()> { + let mut subscriptions = self.subscriptions.lock().unwrap(); + + if interests.is_writable() { + let subscription = wasi::Subscription { + userdata: token.0 as wasi::Userdata, + u: wasi::SubscriptionU { + tag: wasi::EVENTTYPE_FD_WRITE.raw(), + u: wasi::SubscriptionUU { + fd_write: wasi::SubscriptionFdReadwrite { + file_descriptor: fd, + }, + }, + }, + }; + subscriptions.push(subscription); + } + + if interests.is_readable() { + let subscription = wasi::Subscription { + userdata: token.0 as wasi::Userdata, + u: wasi::SubscriptionU { + tag: wasi::EVENTTYPE_FD_READ.raw(), + u: wasi::SubscriptionUU { + fd_read: wasi::SubscriptionFdReadwrite { + file_descriptor: fd, + }, + }, + }, + }; + subscriptions.push(subscription); + } + + Ok(()) + } + + #[cfg(feature = "net")] + pub(crate) fn reregister( + &self, + fd: wasi::Fd, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.deregister(fd) + .and_then(|()| self.register(fd, token, interests)) + } + + #[cfg(feature = "net")] + pub(crate) fn deregister(&self, fd: wasi::Fd) -> io::Result<()> { + let mut subscriptions = self.subscriptions.lock().unwrap(); + + let predicate = |subscription: &wasi::Subscription| { + // Safety: `subscription.u.tag` defines the type of the union in + // `subscription.u.u`. + match subscription.u.tag { + t if t == wasi::EVENTTYPE_FD_WRITE.raw() => unsafe { + subscription.u.u.fd_write.file_descriptor == fd + }, + t if t == wasi::EVENTTYPE_FD_READ.raw() => unsafe { + subscription.u.u.fd_read.file_descriptor == fd + }, + _ => false, + } + }; + + let mut ret = Err(io::ErrorKind::NotFound.into()); + + while let Some(index) = subscriptions.iter().position(predicate) { + subscriptions.swap_remove(index); + ret = Ok(()) + } + + ret + } +} + +/// Token used to a add a timeout subscription, also used in removing it again. +const TIMEOUT_TOKEN: wasi::Userdata = wasi::Userdata::max_value(); + +/// Returns a `wasi::Subscription` for `timeout`. +fn timeout_subscription(timeout: Duration) -> wasi::Subscription { + wasi::Subscription { + userdata: TIMEOUT_TOKEN, + u: wasi::SubscriptionU { + tag: wasi::EVENTTYPE_CLOCK.raw(), + u: wasi::SubscriptionUU { + clock: wasi::SubscriptionClock { + id: wasi::CLOCKID_MONOTONIC, + // Timestamp is in nanoseconds. + timeout: min(wasi::Timestamp::MAX as u128, timeout.as_nanos()) + as wasi::Timestamp, + // Give the implementation another millisecond to coalesce + // events. + precision: Duration::from_millis(1).as_nanos() as wasi::Timestamp, + // Zero means the `timeout` is considered relative to the + // current time. + flags: 0, + }, + }, + }, + } +} + +fn is_timeout_event(event: &wasi::Event) -> bool { + event.type_ == wasi::EVENTTYPE_CLOCK && event.userdata == TIMEOUT_TOKEN +} + +/// Check all events for possible errors, it returns the first error found. +fn check_errors(events: &[Event]) -> io::Result<()> { + for event in events { + if event.error != wasi::ERRNO_SUCCESS { + return Err(io_err(event.error)); + } + } + Ok(()) +} + +/// Convert `wasi::Errno` into an `io::Error`. +fn io_err(errno: wasi::Errno) -> io::Error { + // TODO: check if this is valid. + io::Error::from_raw_os_error(errno.raw() as i32) +} + +pub(crate) type Events = Vec<Event>; + +pub(crate) type Event = wasi::Event; + +pub(crate) mod event { + use std::fmt; + + use crate::sys::Event; + use crate::Token; + + pub(crate) fn token(event: &Event) -> Token { + Token(event.userdata as usize) + } + + pub(crate) fn is_readable(event: &Event) -> bool { + event.type_ == wasi::EVENTTYPE_FD_READ + } + + pub(crate) fn is_writable(event: &Event) -> bool { + event.type_ == wasi::EVENTTYPE_FD_WRITE + } + + pub(crate) fn is_error(_: &Event) -> bool { + // Not supported? It could be that `wasi::Event.error` could be used for + // this, but the docs say `error that occurred while processing the + // subscription request`, so it's checked in `Select::select` already. + false + } + + pub(crate) fn is_read_closed(event: &Event) -> bool { + event.type_ == wasi::EVENTTYPE_FD_READ + // Safety: checked the type of the union above. + && (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0 + } + + pub(crate) fn is_write_closed(event: &Event) -> bool { + event.type_ == wasi::EVENTTYPE_FD_WRITE + // Safety: checked the type of the union above. + && (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0 + } + + pub(crate) fn is_priority(_: &Event) -> bool { + // Not supported. + false + } + + pub(crate) fn is_aio(_: &Event) -> bool { + // Not supported. + false + } + + pub(crate) fn is_lio(_: &Event) -> bool { + // Not supported. + false + } + + pub(crate) fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { + debug_detail!( + TypeDetails(wasi::Eventtype), + PartialEq::eq, + wasi::EVENTTYPE_CLOCK, + wasi::EVENTTYPE_FD_READ, + wasi::EVENTTYPE_FD_WRITE, + ); + + #[allow(clippy::trivially_copy_pass_by_ref)] + fn check_flag(got: &wasi::Eventrwflags, want: &wasi::Eventrwflags) -> bool { + (got & want) != 0 + } + debug_detail!( + EventrwflagsDetails(wasi::Eventrwflags), + check_flag, + wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP, + ); + + struct EventFdReadwriteDetails(wasi::EventFdReadwrite); + + impl fmt::Debug for EventFdReadwriteDetails { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("EventFdReadwrite") + .field("nbytes", &self.0.nbytes) + .field("flags", &self.0.flags) + .finish() + } + } + + f.debug_struct("Event") + .field("userdata", &event.userdata) + .field("error", &event.error) + .field("type", &TypeDetails(event.type_)) + .field("fd_readwrite", &EventFdReadwriteDetails(event.fd_readwrite)) + .finish() + } +} + +cfg_os_poll! { + cfg_io_source! { + pub(crate) struct IoSourceState; + + impl IoSourceState { + pub(crate) fn new() -> IoSourceState { + IoSourceState + } + + pub(crate) fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R> + where + F: FnOnce(&T) -> io::Result<R>, + { + // We don't hold state, so we can just call the function and + // return. + f(io) + } + } + } +} diff --git a/third_party/rust/mio/src/sys/windows/afd.rs b/third_party/rust/mio/src/sys/windows/afd.rs new file mode 100644 index 0000000000..11373cfca9 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/afd.rs @@ -0,0 +1,243 @@ +use std::ffi::c_void; +use std::fmt; +use std::fs::File; +use std::io; +use std::mem::size_of; +use std::os::windows::io::AsRawHandle; + +use windows_sys::Wdk::Storage::FileSystem::NtCancelIoFileEx; +use windows_sys::Wdk::System::IO::NtDeviceIoControlFile; +use windows_sys::Win32::Foundation::{ + RtlNtStatusToDosError, HANDLE, NTSTATUS, STATUS_NOT_FOUND, STATUS_PENDING, STATUS_SUCCESS, +}; +use windows_sys::Win32::System::IO::{IO_STATUS_BLOCK, IO_STATUS_BLOCK_0}; + +const IOCTL_AFD_POLL: u32 = 0x00012024; + +/// Winsock2 AFD driver instance. +/// +/// All operations are unsafe due to IO_STATUS_BLOCK parameter are being used by Afd driver during STATUS_PENDING before I/O Completion Port returns its result. +#[derive(Debug)] +pub struct Afd { + fd: File, +} + +#[repr(C)] +#[derive(Debug)] +pub struct AfdPollHandleInfo { + pub handle: HANDLE, + pub events: u32, + pub status: NTSTATUS, +} + +unsafe impl Send for AfdPollHandleInfo {} + +#[repr(C)] +pub struct AfdPollInfo { + pub timeout: i64, + // Can have only value 1. + pub number_of_handles: u32, + pub exclusive: u32, + pub handles: [AfdPollHandleInfo; 1], +} + +impl fmt::Debug for AfdPollInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AfdPollInfo").finish() + } +} + +impl Afd { + /// Poll `Afd` instance with `AfdPollInfo`. + /// + /// # Unsafety + /// + /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`). + /// `iosb` needs to be untouched after the call while operation is in effective at ALL TIME except for `cancel` method. + /// So be careful not to `poll` twice while polling. + /// User should deallocate there overlapped value when error to prevent memory leak. + pub unsafe fn poll( + &self, + info: &mut AfdPollInfo, + iosb: *mut IO_STATUS_BLOCK, + overlapped: *mut c_void, + ) -> io::Result<bool> { + let info_ptr = info as *mut _ as *mut c_void; + (*iosb).Anonymous.Status = STATUS_PENDING; + let status = NtDeviceIoControlFile( + self.fd.as_raw_handle() as HANDLE, + 0, + None, + overlapped, + iosb, + IOCTL_AFD_POLL, + info_ptr, + size_of::<AfdPollInfo>() as u32, + info_ptr, + size_of::<AfdPollInfo>() as u32, + ); + match status { + STATUS_SUCCESS => Ok(true), + STATUS_PENDING => Ok(false), + _ => Err(io::Error::from_raw_os_error( + RtlNtStatusToDosError(status) as i32 + )), + } + } + + /// Cancel previous polled request of `Afd`. + /// + /// iosb needs to be used by `poll` first for valid `cancel`. + /// + /// # Unsafety + /// + /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`). + /// Use it only with request is still being polled so that you have valid `IO_STATUS_BLOCK` to use. + /// User should NOT deallocate there overlapped value after the `cancel` to prevent double free. + pub unsafe fn cancel(&self, iosb: *mut IO_STATUS_BLOCK) -> io::Result<()> { + if (*iosb).Anonymous.Status != STATUS_PENDING { + return Ok(()); + } + + let mut cancel_iosb = IO_STATUS_BLOCK { + Anonymous: IO_STATUS_BLOCK_0 { Status: 0 }, + Information: 0, + }; + let status = NtCancelIoFileEx(self.fd.as_raw_handle() as HANDLE, iosb, &mut cancel_iosb); + if status == STATUS_SUCCESS || status == STATUS_NOT_FOUND { + return Ok(()); + } + Err(io::Error::from_raw_os_error( + RtlNtStatusToDosError(status) as i32 + )) + } +} + +cfg_io_source! { + use std::mem::zeroed; + use std::os::windows::io::{FromRawHandle, RawHandle}; + use std::ptr::null_mut; + use std::sync::atomic::{AtomicUsize, Ordering}; + + use windows_sys::Wdk::Foundation::OBJECT_ATTRIBUTES; + use windows_sys::Wdk::Storage::FileSystem::{NtCreateFile, FILE_OPEN}; + use windows_sys::Win32::Foundation::{INVALID_HANDLE_VALUE, UNICODE_STRING}; + use windows_sys::Win32::Storage::FileSystem::{ + SetFileCompletionNotificationModes, FILE_SHARE_READ, FILE_SHARE_WRITE, SYNCHRONIZE, + }; + use windows_sys::Win32::System::WindowsProgramming::FILE_SKIP_SET_EVENT_ON_HANDLE; + + use super::iocp::CompletionPort; + + const AFD_HELPER_ATTRIBUTES: OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES { + Length: size_of::<OBJECT_ATTRIBUTES>() as u32, + RootDirectory: 0, + ObjectName: &AFD_OBJ_NAME as *const _ as *mut _, + Attributes: 0, + SecurityDescriptor: null_mut(), + SecurityQualityOfService: null_mut(), + }; + + const AFD_OBJ_NAME: UNICODE_STRING = UNICODE_STRING { + Length: (AFD_HELPER_NAME.len() * size_of::<u16>()) as u16, + MaximumLength: (AFD_HELPER_NAME.len() * size_of::<u16>()) as u16, + Buffer: AFD_HELPER_NAME.as_ptr() as *mut _, + }; + + const AFD_HELPER_NAME: &[u16] = &[ + '\\' as _, + 'D' as _, + 'e' as _, + 'v' as _, + 'i' as _, + 'c' as _, + 'e' as _, + '\\' as _, + 'A' as _, + 'f' as _, + 'd' as _, + '\\' as _, + 'M' as _, + 'i' as _, + 'o' as _ + ]; + + static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(0); + + impl AfdPollInfo { + pub fn zeroed() -> AfdPollInfo { + unsafe { zeroed() } + } + } + + impl Afd { + /// Create new Afd instance. + pub(crate) fn new(cp: &CompletionPort) -> io::Result<Afd> { + let mut afd_helper_handle: HANDLE = INVALID_HANDLE_VALUE; + let mut iosb = IO_STATUS_BLOCK { + Anonymous: IO_STATUS_BLOCK_0 { Status: 0 }, + Information: 0, + }; + + unsafe { + let status = NtCreateFile( + &mut afd_helper_handle as *mut _, + SYNCHRONIZE, + &AFD_HELPER_ATTRIBUTES as *const _ as *mut _, + &mut iosb, + null_mut(), + 0, + FILE_SHARE_READ | FILE_SHARE_WRITE, + FILE_OPEN, + 0, + null_mut(), + 0, + ); + if status != STATUS_SUCCESS { + let raw_err = io::Error::from_raw_os_error( + RtlNtStatusToDosError(status) as i32 + ); + let msg = format!("Failed to open \\Device\\Afd\\Mio: {}", raw_err); + return Err(io::Error::new(raw_err.kind(), msg)); + } + let fd = File::from_raw_handle(afd_helper_handle as RawHandle); + // Increment by 2 to reserve space for other types of handles. + // Non-AFD types (currently only NamedPipe), use odd numbered + // tokens. This allows the selector to differentiate between them + // and dispatch events accordingly. + let token = NEXT_TOKEN.fetch_add(2, Ordering::Relaxed) + 2; + let afd = Afd { fd }; + cp.add_handle(token, &afd.fd)?; + match SetFileCompletionNotificationModes( + afd_helper_handle, + FILE_SKIP_SET_EVENT_ON_HANDLE as u8 // This is just 2, so fits in u8 + ) { + 0 => Err(io::Error::last_os_error()), + _ => Ok(afd), + } + } + } + } +} + +pub const POLL_RECEIVE: u32 = 0b0_0000_0001; +pub const POLL_RECEIVE_EXPEDITED: u32 = 0b0_0000_0010; +pub const POLL_SEND: u32 = 0b0_0000_0100; +pub const POLL_DISCONNECT: u32 = 0b0_0000_1000; +pub const POLL_ABORT: u32 = 0b0_0001_0000; +pub const POLL_LOCAL_CLOSE: u32 = 0b0_0010_0000; +// Not used as it indicated in each event where a connection is connected, not +// just the first time a connection is established. +// Also see https://github.com/piscisaureus/wepoll/commit/8b7b340610f88af3d83f40fb728e7b850b090ece. +pub const POLL_CONNECT: u32 = 0b0_0100_0000; +pub const POLL_ACCEPT: u32 = 0b0_1000_0000; +pub const POLL_CONNECT_FAIL: u32 = 0b1_0000_0000; + +pub const KNOWN_EVENTS: u32 = POLL_RECEIVE + | POLL_RECEIVE_EXPEDITED + | POLL_SEND + | POLL_DISCONNECT + | POLL_ABORT + | POLL_LOCAL_CLOSE + | POLL_ACCEPT + | POLL_CONNECT_FAIL; diff --git a/third_party/rust/mio/src/sys/windows/event.rs b/third_party/rust/mio/src/sys/windows/event.rs new file mode 100644 index 0000000000..731bd6067c --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/event.rs @@ -0,0 +1,161 @@ +use std::fmt; + +use super::afd; +use super::iocp::CompletionStatus; +use crate::Token; + +#[derive(Clone)] +pub struct Event { + pub flags: u32, + pub data: u64, +} + +pub fn token(event: &Event) -> Token { + Token(event.data as usize) +} + +impl Event { + pub(super) fn new(token: Token) -> Event { + Event { + flags: 0, + data: usize::from(token) as u64, + } + } + + pub(super) fn set_readable(&mut self) { + self.flags |= afd::POLL_RECEIVE + } + + #[cfg(feature = "os-ext")] + pub(super) fn set_writable(&mut self) { + self.flags |= afd::POLL_SEND; + } + + pub(super) fn from_completion_status(status: &CompletionStatus) -> Event { + Event { + flags: status.bytes_transferred(), + data: status.token() as u64, + } + } + + pub(super) fn to_completion_status(&self) -> CompletionStatus { + CompletionStatus::new(self.flags, self.data as usize, std::ptr::null_mut()) + } +} + +pub(crate) const READABLE_FLAGS: u32 = afd::POLL_RECEIVE + | afd::POLL_DISCONNECT + | afd::POLL_ACCEPT + | afd::POLL_ABORT + | afd::POLL_CONNECT_FAIL; +pub(crate) const WRITABLE_FLAGS: u32 = afd::POLL_SEND | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL; +pub(crate) const ERROR_FLAGS: u32 = afd::POLL_CONNECT_FAIL; +pub(crate) const READ_CLOSED_FLAGS: u32 = + afd::POLL_DISCONNECT | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL; +pub(crate) const WRITE_CLOSED_FLAGS: u32 = afd::POLL_ABORT | afd::POLL_CONNECT_FAIL; + +pub fn is_readable(event: &Event) -> bool { + event.flags & READABLE_FLAGS != 0 +} + +pub fn is_writable(event: &Event) -> bool { + event.flags & WRITABLE_FLAGS != 0 +} + +pub fn is_error(event: &Event) -> bool { + event.flags & ERROR_FLAGS != 0 +} + +pub fn is_read_closed(event: &Event) -> bool { + event.flags & READ_CLOSED_FLAGS != 0 +} + +pub fn is_write_closed(event: &Event) -> bool { + event.flags & WRITE_CLOSED_FLAGS != 0 +} + +pub fn is_priority(event: &Event) -> bool { + event.flags & afd::POLL_RECEIVE_EXPEDITED != 0 +} + +pub fn is_aio(_: &Event) -> bool { + // Not supported. + false +} + +pub fn is_lio(_: &Event) -> bool { + // Not supported. + false +} + +pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { + #[allow(clippy::trivially_copy_pass_by_ref)] + fn check_flags(got: &u32, want: &u32) -> bool { + (got & want) != 0 + } + debug_detail!( + FlagsDetails(u32), + check_flags, + afd::POLL_RECEIVE, + afd::POLL_RECEIVE_EXPEDITED, + afd::POLL_SEND, + afd::POLL_DISCONNECT, + afd::POLL_ABORT, + afd::POLL_LOCAL_CLOSE, + afd::POLL_CONNECT, + afd::POLL_ACCEPT, + afd::POLL_CONNECT_FAIL, + ); + + f.debug_struct("event") + .field("flags", &FlagsDetails(event.flags)) + .field("data", &event.data) + .finish() +} + +pub struct Events { + /// Raw I/O event completions are filled in here by the call to `get_many` + /// on the completion port above. These are then processed to run callbacks + /// which figure out what to do after the event is done. + pub statuses: Box<[CompletionStatus]>, + + /// Literal events returned by `get` to the upwards `EventLoop`. This file + /// doesn't really modify this (except for the waker), instead almost all + /// events are filled in by the `ReadinessQueue` from the `poll` module. + pub events: Vec<Event>, +} + +impl Events { + pub fn with_capacity(cap: usize) -> Events { + // Note that it's possible for the output `events` to grow beyond the + // capacity as it can also include deferred events, but that's certainly + // not the end of the world! + Events { + statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(), + events: Vec::with_capacity(cap), + } + } + + pub fn is_empty(&self) -> bool { + self.events.is_empty() + } + + pub fn capacity(&self) -> usize { + self.events.capacity() + } + + pub fn len(&self) -> usize { + self.events.len() + } + + pub fn get(&self, idx: usize) -> Option<&Event> { + self.events.get(idx) + } + + pub fn clear(&mut self) { + self.events.clear(); + for status in self.statuses.iter_mut() { + *status = CompletionStatus::zero(); + } + } +} diff --git a/third_party/rust/mio/src/sys/windows/handle.rs b/third_party/rust/mio/src/sys/windows/handle.rs new file mode 100644 index 0000000000..5b9ac0b624 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/handle.rs @@ -0,0 +1,30 @@ +use std::os::windows::io::RawHandle; +use windows_sys::Win32::Foundation::{CloseHandle, HANDLE}; + +/// Wrapper around a Windows HANDLE so that we close it upon drop in all scenarios +#[derive(Debug)] +pub struct Handle(HANDLE); + +impl Handle { + #[inline] + pub fn new(handle: HANDLE) -> Self { + Self(handle) + } + + pub fn raw(&self) -> HANDLE { + self.0 + } + + pub fn into_raw(self) -> RawHandle { + let ret = self.0; + // This is super important so that drop is not called! + std::mem::forget(self); + ret as RawHandle + } +} + +impl Drop for Handle { + fn drop(&mut self) { + unsafe { CloseHandle(self.0) }; + } +} diff --git a/third_party/rust/mio/src/sys/windows/io_status_block.rs b/third_party/rust/mio/src/sys/windows/io_status_block.rs new file mode 100644 index 0000000000..bd2a6dcfe2 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/io_status_block.rs @@ -0,0 +1,40 @@ +use std::fmt; +use std::ops::{Deref, DerefMut}; + +use windows_sys::Win32::System::IO::IO_STATUS_BLOCK; + +pub struct IoStatusBlock(IO_STATUS_BLOCK); + +cfg_io_source! { + use windows_sys::Win32::System::IO::IO_STATUS_BLOCK_0; + + impl IoStatusBlock { + pub fn zeroed() -> Self { + Self(IO_STATUS_BLOCK { + Anonymous: IO_STATUS_BLOCK_0 { Status: 0 }, + Information: 0, + }) + } + } +} + +unsafe impl Send for IoStatusBlock {} + +impl Deref for IoStatusBlock { + type Target = IO_STATUS_BLOCK; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for IoStatusBlock { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl fmt::Debug for IoStatusBlock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("IoStatusBlock").finish() + } +} diff --git a/third_party/rust/mio/src/sys/windows/iocp.rs b/third_party/rust/mio/src/sys/windows/iocp.rs new file mode 100644 index 0000000000..c71b695d48 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/iocp.rs @@ -0,0 +1,273 @@ +//! Bindings to IOCP, I/O Completion Ports + +use super::{Handle, Overlapped}; +use std::cmp; +use std::fmt; +use std::io; +use std::mem; +use std::os::windows::io::*; +use std::time::Duration; + +use windows_sys::Win32::Foundation::{HANDLE, INVALID_HANDLE_VALUE}; +use windows_sys::Win32::System::IO::{ + CreateIoCompletionPort, GetQueuedCompletionStatusEx, PostQueuedCompletionStatus, OVERLAPPED, + OVERLAPPED_ENTRY, +}; + +/// A handle to an Windows I/O Completion Port. +#[derive(Debug)] +pub(crate) struct CompletionPort { + handle: Handle, +} + +/// A status message received from an I/O completion port. +/// +/// These statuses can be created via the `new` or `empty` constructors and then +/// provided to a completion port, or they are read out of a completion port. +/// The fields of each status are read through its accessor methods. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct CompletionStatus(OVERLAPPED_ENTRY); + +impl fmt::Debug for CompletionStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "CompletionStatus(OVERLAPPED_ENTRY)") + } +} + +unsafe impl Send for CompletionStatus {} +unsafe impl Sync for CompletionStatus {} + +impl CompletionPort { + /// Creates a new I/O completion port with the specified concurrency value. + /// + /// The number of threads given corresponds to the level of concurrency + /// allowed for threads associated with this port. Consult the Windows + /// documentation for more information about this value. + pub fn new(threads: u32) -> io::Result<CompletionPort> { + let ret = unsafe { CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, threads) }; + if ret == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(CompletionPort { + handle: Handle::new(ret), + }) + } + } + + /// Associates a new `HANDLE` to this I/O completion port. + /// + /// This function will associate the given handle to this port with the + /// given `token` to be returned in status messages whenever it receives a + /// notification. + /// + /// Any object which is convertible to a `HANDLE` via the `AsRawHandle` + /// trait can be provided to this function, such as `std::fs::File` and + /// friends. + #[cfg(any(feature = "net", feature = "os-ext"))] + pub fn add_handle<T: AsRawHandle + ?Sized>(&self, token: usize, t: &T) -> io::Result<()> { + let ret = unsafe { + CreateIoCompletionPort(t.as_raw_handle() as HANDLE, self.handle.raw(), token, 0) + }; + if ret == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } + } + + /// Dequeues a number of completion statuses from this I/O completion port. + /// + /// This function is the same as `get` except that it may return more than + /// one status. A buffer of "zero" statuses is provided (the contents are + /// not read) and then on success this function will return a sub-slice of + /// statuses which represent those which were dequeued from this port. This + /// function does not wait to fill up the entire list of statuses provided. + /// + /// Like with `get`, a timeout may be specified for this operation. + pub fn get_many<'a>( + &self, + list: &'a mut [CompletionStatus], + timeout: Option<Duration>, + ) -> io::Result<&'a mut [CompletionStatus]> { + debug_assert_eq!( + mem::size_of::<CompletionStatus>(), + mem::size_of::<OVERLAPPED_ENTRY>() + ); + let mut removed = 0; + let timeout = duration_millis(timeout); + let len = cmp::min(list.len(), <u32>::max_value() as usize) as u32; + let ret = unsafe { + GetQueuedCompletionStatusEx( + self.handle.raw(), + list.as_ptr() as *mut _, + len, + &mut removed, + timeout, + 0, + ) + }; + + if ret == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(&mut list[..removed as usize]) + } + } + + /// Posts a new completion status onto this I/O completion port. + /// + /// This function will post the given status, with custom parameters, to the + /// port. Threads blocked in `get` or `get_many` will eventually receive + /// this status. + pub fn post(&self, status: CompletionStatus) -> io::Result<()> { + let ret = unsafe { + PostQueuedCompletionStatus( + self.handle.raw(), + status.0.dwNumberOfBytesTransferred, + status.0.lpCompletionKey, + status.0.lpOverlapped, + ) + }; + + if ret == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } + } +} + +impl AsRawHandle for CompletionPort { + fn as_raw_handle(&self) -> RawHandle { + self.handle.raw() as RawHandle + } +} + +impl FromRawHandle for CompletionPort { + unsafe fn from_raw_handle(handle: RawHandle) -> CompletionPort { + CompletionPort { + handle: Handle::new(handle as HANDLE), + } + } +} + +impl IntoRawHandle for CompletionPort { + fn into_raw_handle(self) -> RawHandle { + self.handle.into_raw() + } +} + +impl CompletionStatus { + /// Creates a new completion status with the provided parameters. + /// + /// This function is useful when creating a status to send to a port with + /// the `post` method. The parameters are opaquely passed through and not + /// interpreted by the system at all. + pub(crate) fn new(bytes: u32, token: usize, overlapped: *mut Overlapped) -> Self { + CompletionStatus(OVERLAPPED_ENTRY { + dwNumberOfBytesTransferred: bytes, + lpCompletionKey: token, + lpOverlapped: overlapped as *mut _, + Internal: 0, + }) + } + + /// Creates a new borrowed completion status from the borrowed + /// `OVERLAPPED_ENTRY` argument provided. + /// + /// This method will wrap the `OVERLAPPED_ENTRY` in a `CompletionStatus`, + /// returning the wrapped structure. + #[cfg(feature = "os-ext")] + pub fn from_entry(entry: &OVERLAPPED_ENTRY) -> &Self { + // Safety: CompletionStatus is repr(transparent) w/ OVERLAPPED_ENTRY, so + // a reference to one is guaranteed to be layout compatible with the + // reference to another. + unsafe { &*(entry as *const _ as *const _) } + } + + /// Creates a new "zero" completion status. + /// + /// This function is useful when creating a stack buffer or vector of + /// completion statuses to be passed to the `get_many` function. + pub fn zero() -> Self { + Self::new(0, 0, std::ptr::null_mut()) + } + + /// Returns the number of bytes that were transferred for the I/O operation + /// associated with this completion status. + pub fn bytes_transferred(&self) -> u32 { + self.0.dwNumberOfBytesTransferred + } + + /// Returns the completion key value associated with the file handle whose + /// I/O operation has completed. + /// + /// A completion key is a per-handle key that is specified when it is added + /// to an I/O completion port via `add_handle` or `add_socket`. + pub fn token(&self) -> usize { + self.0.lpCompletionKey as usize + } + + /// Returns a pointer to the `Overlapped` structure that was specified when + /// the I/O operation was started. + pub fn overlapped(&self) -> *mut OVERLAPPED { + self.0.lpOverlapped + } + + /// Returns a pointer to the internal `OVERLAPPED_ENTRY` object. + pub fn entry(&self) -> &OVERLAPPED_ENTRY { + &self.0 + } +} + +#[inline] +fn duration_millis(dur: Option<Duration>) -> u32 { + if let Some(dur) = dur { + // `Duration::as_millis` truncates, so round up. This avoids + // turning sub-millisecond timeouts into a zero timeout, unless + // the caller explicitly requests that by specifying a zero + // timeout. + let dur_ms = dur + .checked_add(Duration::from_nanos(999_999)) + .unwrap_or(dur) + .as_millis(); + cmp::min(dur_ms, u32::MAX as u128) as u32 + } else { + u32::MAX + } +} + +#[cfg(test)] +mod tests { + use super::{CompletionPort, CompletionStatus}; + + #[test] + fn is_send_sync() { + fn is_send_sync<T: Send + Sync>() {} + is_send_sync::<CompletionPort>(); + } + + #[test] + fn get_many() { + let c = CompletionPort::new(1).unwrap(); + + c.post(CompletionStatus::new(1, 2, 3 as *mut _)).unwrap(); + c.post(CompletionStatus::new(4, 5, 6 as *mut _)).unwrap(); + + let mut s = vec![CompletionStatus::zero(); 4]; + { + let s = c.get_many(&mut s, None).unwrap(); + assert_eq!(s.len(), 2); + assert_eq!(s[0].bytes_transferred(), 1); + assert_eq!(s[0].token(), 2); + assert_eq!(s[0].overlapped(), 3 as *mut _); + assert_eq!(s[1].bytes_transferred(), 4); + assert_eq!(s[1].token(), 5); + assert_eq!(s[1].overlapped(), 6 as *mut _); + } + assert_eq!(s[2].bytes_transferred(), 0); + assert_eq!(s[2].token(), 0); + assert_eq!(s[2].overlapped(), 0 as *mut _); + } +} diff --git a/third_party/rust/mio/src/sys/windows/mod.rs b/third_party/rust/mio/src/sys/windows/mod.rs new file mode 100644 index 0000000000..f8b72fc497 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/mod.rs @@ -0,0 +1,152 @@ +mod afd; + +pub mod event; +pub use event::{Event, Events}; + +mod handle; +use handle::Handle; + +mod io_status_block; +mod iocp; + +mod overlapped; +use overlapped::Overlapped; + +mod selector; +pub use selector::{Selector, SelectorInner, SockState}; + +// Macros must be defined before the modules that use them +cfg_net! { + /// Helper macro to execute a system call that returns an `io::Result`. + // + // Macro must be defined before any modules that uses them. + macro_rules! syscall { + ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{ + let res = unsafe { $fn($($arg, )*) }; + if $err_test(&res, &$err_value) { + Err(io::Error::last_os_error()) + } else { + Ok(res) + } + }}; + } + + mod net; + + pub(crate) mod tcp; + pub(crate) mod udp; +} + +cfg_os_ext! { + pub(crate) mod named_pipe; +} + +mod waker; +pub(crate) use waker::Waker; + +cfg_io_source! { + use std::io; + use std::os::windows::io::RawSocket; + use std::pin::Pin; + use std::sync::{Arc, Mutex}; + + use crate::{Interest, Registry, Token}; + + struct InternalState { + selector: Arc<SelectorInner>, + token: Token, + interests: Interest, + sock_state: Pin<Arc<Mutex<SockState>>>, + } + + impl Drop for InternalState { + fn drop(&mut self) { + let mut sock_state = self.sock_state.lock().unwrap(); + sock_state.mark_delete(); + } + } + + pub struct IoSourceState { + // This is `None` if the socket has not yet been registered. + // + // We box the internal state to not increase the size on the stack as the + // type might move around a lot. + inner: Option<Box<InternalState>>, + } + + impl IoSourceState { + pub fn new() -> IoSourceState { + IoSourceState { inner: None } + } + + pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R> + where + F: FnOnce(&T) -> io::Result<R>, + { + let result = f(io); + if let Err(ref e) = result { + if e.kind() == io::ErrorKind::WouldBlock { + self.inner.as_ref().map_or(Ok(()), |state| { + state + .selector + .reregister(state.sock_state.clone(), state.token, state.interests) + })?; + } + } + result + } + + pub fn register( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + socket: RawSocket, + ) -> io::Result<()> { + if self.inner.is_some() { + Err(io::ErrorKind::AlreadyExists.into()) + } else { + registry + .selector() + .register(socket, token, interests) + .map(|state| { + self.inner = Some(Box::new(state)); + }) + } + } + + pub fn reregister( + &mut self, + registry: &Registry, + token: Token, + interests: Interest, + ) -> io::Result<()> { + match self.inner.as_mut() { + Some(state) => { + registry + .selector() + .reregister(state.sock_state.clone(), token, interests) + .map(|()| { + state.token = token; + state.interests = interests; + }) + } + None => Err(io::ErrorKind::NotFound.into()), + } + } + + pub fn deregister(&mut self) -> io::Result<()> { + match self.inner.as_mut() { + Some(state) => { + { + let mut sock_state = state.sock_state.lock().unwrap(); + sock_state.mark_delete(); + } + self.inner = None; + Ok(()) + } + None => Err(io::ErrorKind::NotFound.into()), + } + } + } +} diff --git a/third_party/rust/mio/src/sys/windows/named_pipe.rs b/third_party/rust/mio/src/sys/windows/named_pipe.rs new file mode 100644 index 0000000000..23f85d1ebb --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/named_pipe.rs @@ -0,0 +1,993 @@ +use std::ffi::OsStr; +use std::io::{self, Read, Write}; +use std::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle}; +use std::sync::atomic::Ordering::{Relaxed, SeqCst}; +use std::sync::atomic::{AtomicBool, AtomicUsize}; +use std::sync::{Arc, Mutex}; +use std::{fmt, mem, slice}; + +use windows_sys::Win32::Foundation::{ + ERROR_BROKEN_PIPE, ERROR_IO_INCOMPLETE, ERROR_IO_PENDING, ERROR_NO_DATA, ERROR_PIPE_CONNECTED, + ERROR_PIPE_LISTENING, HANDLE, INVALID_HANDLE_VALUE, +}; +use windows_sys::Win32::Storage::FileSystem::{ + ReadFile, WriteFile, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, PIPE_ACCESS_DUPLEX, +}; +use windows_sys::Win32::System::Pipes::{ + ConnectNamedPipe, CreateNamedPipeW, DisconnectNamedPipe, PIPE_TYPE_BYTE, + PIPE_UNLIMITED_INSTANCES, +}; +use windows_sys::Win32::System::IO::{ + CancelIoEx, GetOverlappedResult, OVERLAPPED, OVERLAPPED_ENTRY, +}; + +use crate::event::Source; +use crate::sys::windows::iocp::{CompletionPort, CompletionStatus}; +use crate::sys::windows::{Event, Handle, Overlapped}; +use crate::Registry; +use crate::{Interest, Token}; + +/// Non-blocking windows named pipe. +/// +/// This structure internally contains a `HANDLE` which represents the named +/// pipe, and also maintains state associated with the mio event loop and active +/// I/O operations that have been scheduled to translate IOCP to a readiness +/// model. +/// +/// Note, IOCP is a *completion* based model whereas mio is a *readiness* based +/// model. To bridge this, `NamedPipe` performs internal buffering. Writes are +/// written to an internal buffer and the buffer is submitted to IOCP. IOCP +/// reads are submitted using internal buffers and `NamedPipe::read` reads from +/// this internal buffer. +/// +/// # Trait implementations +/// +/// The `Read` and `Write` traits are implemented for `NamedPipe` and for +/// `&NamedPipe`. This represents that a named pipe can be concurrently read and +/// written to and also can be read and written to at all. Typically a named +/// pipe needs to be connected to a client before it can be read or written, +/// however. +/// +/// Note that for I/O operations on a named pipe to succeed then the named pipe +/// needs to be associated with an event loop. Until this happens all I/O +/// operations will return a "would block" error. +/// +/// # Managing connections +/// +/// The `NamedPipe` type supports a `connect` method to connect to a client and +/// a `disconnect` method to disconnect from that client. These two methods only +/// work once a named pipe is associated with an event loop. +/// +/// The `connect` method will succeed asynchronously and a completion can be +/// detected once the object receives a writable notification. +/// +/// # Named pipe clients +/// +/// Currently to create a client of a named pipe server then you can use the +/// `OpenOptions` type in the standard library to create a `File` that connects +/// to a named pipe. Afterwards you can use the `into_raw_handle` method coupled +/// with the `NamedPipe::from_raw_handle` method to convert that to a named pipe +/// that can operate asynchronously. Don't forget to pass the +/// `FILE_FLAG_OVERLAPPED` flag when opening the `File`. +pub struct NamedPipe { + inner: Arc<Inner>, +} + +/// # Notes +/// +/// The memory layout of this structure must be fixed as the +/// `ptr_from_*_overlapped` methods depend on it, see the `ptr_from` test. +#[repr(C)] +struct Inner { + // NOTE: careful modifying the order of these three fields, the `ptr_from_*` + // methods depend on the layout! + connect: Overlapped, + read: Overlapped, + write: Overlapped, + // END NOTE. + handle: Handle, + connecting: AtomicBool, + io: Mutex<Io>, + pool: Mutex<BufferPool>, +} + +impl Inner { + /// Converts a pointer to `Inner.connect` to a pointer to `Inner`. + /// + /// # Unsafety + /// + /// Caller must ensure `ptr` is pointing to `Inner.connect`. + unsafe fn ptr_from_conn_overlapped(ptr: *mut OVERLAPPED) -> *const Inner { + // `connect` is the first field, so the pointer are the same. + ptr.cast() + } + + /// Same as [`ptr_from_conn_overlapped`] but for `Inner.read`. + unsafe fn ptr_from_read_overlapped(ptr: *mut OVERLAPPED) -> *const Inner { + // `read` is after `connect: Overlapped`. + (ptr as *mut Overlapped).wrapping_sub(1) as *const Inner + } + + /// Same as [`ptr_from_conn_overlapped`] but for `Inner.write`. + unsafe fn ptr_from_write_overlapped(ptr: *mut OVERLAPPED) -> *const Inner { + // `read` is after `connect: Overlapped` and `read: Overlapped`. + (ptr as *mut Overlapped).wrapping_sub(2) as *const Inner + } + + /// Issue a connection request with the specified overlapped operation. + /// + /// This function will issue a request to connect a client to this server, + /// returning immediately after starting the overlapped operation. + /// + /// If this function immediately succeeds then `Ok(true)` is returned. If + /// the overlapped operation is enqueued and pending, then `Ok(false)` is + /// returned. Otherwise an error is returned indicating what went wrong. + /// + /// # Unsafety + /// + /// This function is unsafe because the kernel requires that the + /// `overlapped` pointer is valid until the end of the I/O operation. The + /// kernel also requires that `overlapped` is unique for this I/O operation + /// and is not in use for any other I/O. + /// + /// To safely use this function callers must ensure that this pointer is + /// valid until the I/O operation is completed, typically via completion + /// ports and waiting to receive the completion notification on the port. + pub unsafe fn connect_overlapped(&self, overlapped: *mut OVERLAPPED) -> io::Result<bool> { + if ConnectNamedPipe(self.handle.raw(), overlapped) != 0 { + return Ok(true); + } + + let err = io::Error::last_os_error(); + + match err.raw_os_error().map(|e| e as u32) { + Some(ERROR_PIPE_CONNECTED) => Ok(true), + Some(ERROR_NO_DATA) => Ok(true), + Some(ERROR_IO_PENDING) => Ok(false), + _ => Err(err), + } + } + + /// Disconnects this named pipe from any connected client. + pub fn disconnect(&self) -> io::Result<()> { + if unsafe { DisconnectNamedPipe(self.handle.raw()) } == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } + } + + /// Issues an overlapped read operation to occur on this pipe. + /// + /// This function will issue an asynchronous read to occur in an overlapped + /// fashion, returning immediately. The `buf` provided will be filled in + /// with data and the request is tracked by the `overlapped` function + /// provided. + /// + /// If the operation succeeds immediately, `Ok(Some(n))` is returned where + /// `n` is the number of bytes read. If an asynchronous operation is + /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred + /// it is returned. + /// + /// When this operation completes (or if it completes immediately), another + /// mechanism must be used to learn how many bytes were transferred (such as + /// looking at the filed in the IOCP status message). + /// + /// # Unsafety + /// + /// This function is unsafe because the kernel requires that the `buf` and + /// `overlapped` pointers to be valid until the end of the I/O operation. + /// The kernel also requires that `overlapped` is unique for this I/O + /// operation and is not in use for any other I/O. + /// + /// To safely use this function callers must ensure that the pointers are + /// valid until the I/O operation is completed, typically via completion + /// ports and waiting to receive the completion notification on the port. + pub unsafe fn read_overlapped( + &self, + buf: &mut [u8], + overlapped: *mut OVERLAPPED, + ) -> io::Result<Option<usize>> { + let len = std::cmp::min(buf.len(), u32::MAX as usize) as u32; + let res = ReadFile( + self.handle.raw(), + buf.as_mut_ptr() as *mut _, + len, + std::ptr::null_mut(), + overlapped, + ); + if res == 0 { + let err = io::Error::last_os_error(); + if err.raw_os_error() != Some(ERROR_IO_PENDING as i32) { + return Err(err); + } + } + + let mut bytes = 0; + let res = GetOverlappedResult(self.handle.raw(), overlapped, &mut bytes, 0); + if res == 0 { + let err = io::Error::last_os_error(); + if err.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) { + Ok(None) + } else { + Err(err) + } + } else { + Ok(Some(bytes as usize)) + } + } + + /// Issues an overlapped write operation to occur on this pipe. + /// + /// This function will issue an asynchronous write to occur in an overlapped + /// fashion, returning immediately. The `buf` provided will be filled in + /// with data and the request is tracked by the `overlapped` function + /// provided. + /// + /// If the operation succeeds immediately, `Ok(Some(n))` is returned where + /// `n` is the number of bytes written. If an asynchronous operation is + /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred + /// it is returned. + /// + /// When this operation completes (or if it completes immediately), another + /// mechanism must be used to learn how many bytes were transferred (such as + /// looking at the filed in the IOCP status message). + /// + /// # Unsafety + /// + /// This function is unsafe because the kernel requires that the `buf` and + /// `overlapped` pointers to be valid until the end of the I/O operation. + /// The kernel also requires that `overlapped` is unique for this I/O + /// operation and is not in use for any other I/O. + /// + /// To safely use this function callers must ensure that the pointers are + /// valid until the I/O operation is completed, typically via completion + /// ports and waiting to receive the completion notification on the port. + pub unsafe fn write_overlapped( + &self, + buf: &[u8], + overlapped: *mut OVERLAPPED, + ) -> io::Result<Option<usize>> { + let len = std::cmp::min(buf.len(), u32::MAX as usize) as u32; + let res = WriteFile( + self.handle.raw(), + buf.as_ptr() as *const _, + len, + std::ptr::null_mut(), + overlapped, + ); + if res == 0 { + let err = io::Error::last_os_error(); + if err.raw_os_error() != Some(ERROR_IO_PENDING as i32) { + return Err(err); + } + } + + let mut bytes = 0; + let res = GetOverlappedResult(self.handle.raw(), overlapped, &mut bytes, 0); + if res == 0 { + let err = io::Error::last_os_error(); + if err.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) { + Ok(None) + } else { + Err(err) + } + } else { + Ok(Some(bytes as usize)) + } + } + + /// Calls the `GetOverlappedResult` function to get the result of an + /// overlapped operation for this handle. + /// + /// This function takes the `OVERLAPPED` argument which must have been used + /// to initiate an overlapped I/O operation, and returns either the + /// successful number of bytes transferred during the operation or an error + /// if one occurred. + /// + /// # Unsafety + /// + /// This function is unsafe as `overlapped` must have previously been used + /// to execute an operation for this handle, and it must also be a valid + /// pointer to an `Overlapped` instance. + #[inline] + unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result<usize> { + let mut transferred = 0; + let r = GetOverlappedResult(self.handle.raw(), overlapped, &mut transferred, 0); + if r == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(transferred as usize) + } + } +} + +#[test] +fn ptr_from() { + use std::mem::ManuallyDrop; + use std::ptr; + + let pipe = unsafe { ManuallyDrop::new(NamedPipe::from_raw_handle(ptr::null_mut())) }; + let inner: &Inner = &pipe.inner; + assert_eq!( + inner as *const Inner, + unsafe { Inner::ptr_from_conn_overlapped(&inner.connect as *const _ as *mut OVERLAPPED) }, + "`ptr_from_conn_overlapped` incorrect" + ); + assert_eq!( + inner as *const Inner, + unsafe { Inner::ptr_from_read_overlapped(&inner.read as *const _ as *mut OVERLAPPED) }, + "`ptr_from_read_overlapped` incorrect" + ); + assert_eq!( + inner as *const Inner, + unsafe { Inner::ptr_from_write_overlapped(&inner.write as *const _ as *mut OVERLAPPED) }, + "`ptr_from_write_overlapped` incorrect" + ); +} + +struct Io { + // Uniquely identifies the selector associated with this named pipe + cp: Option<Arc<CompletionPort>>, + // Token used to identify events + token: Option<Token>, + read: State, + write: State, + connect_error: Option<io::Error>, +} + +#[derive(Debug)] +enum State { + None, + Pending(Vec<u8>, usize), + Ok(Vec<u8>, usize), + Err(io::Error), +} + +// Odd tokens are for named pipes +static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(1); + +fn would_block() -> io::Error { + io::ErrorKind::WouldBlock.into() +} + +impl NamedPipe { + /// Creates a new named pipe at the specified `addr` given a "reasonable + /// set" of initial configuration options. + pub fn new<A: AsRef<OsStr>>(addr: A) -> io::Result<NamedPipe> { + use std::os::windows::ffi::OsStrExt; + let name: Vec<_> = addr.as_ref().encode_wide().chain(Some(0)).collect(); + + // Safety: syscall + let h = unsafe { + CreateNamedPipeW( + name.as_ptr(), + PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE | FILE_FLAG_OVERLAPPED, + PIPE_TYPE_BYTE, + PIPE_UNLIMITED_INSTANCES, + 65536, + 65536, + 0, + std::ptr::null_mut(), + ) + }; + + if h == INVALID_HANDLE_VALUE { + Err(io::Error::last_os_error()) + } else { + // Safety: nothing actually unsafe about this. The trait fn includes + // `unsafe`. + Ok(unsafe { Self::from_raw_handle(h as RawHandle) }) + } + } + + /// Attempts to call `ConnectNamedPipe`, if possible. + /// + /// This function will attempt to connect this pipe to a client in an + /// asynchronous fashion. If the function immediately establishes a + /// connection to a client then `Ok(())` is returned. Otherwise if a + /// connection attempt was issued and is now in progress then a "would + /// block" error is returned. + /// + /// When the connection is finished then this object will be flagged as + /// being ready for a write, or otherwise in the writable state. + /// + /// # Errors + /// + /// This function will return a "would block" error if the pipe has not yet + /// been registered with an event loop, if the connection operation has + /// previously been issued but has not yet completed, or if the connect + /// itself was issued and didn't finish immediately. + /// + /// Normal I/O errors from the call to `ConnectNamedPipe` are returned + /// immediately. + pub fn connect(&self) -> io::Result<()> { + // "Acquire the connecting lock" or otherwise just make sure we're the + // only operation that's using the `connect` overlapped instance. + if self.inner.connecting.swap(true, SeqCst) { + return Err(would_block()); + } + + // Now that we've flagged ourselves in the connecting state, issue the + // connection attempt. Afterwards interpret the return value and set + // internal state accordingly. + let res = unsafe { + let overlapped = self.inner.connect.as_ptr() as *mut _; + self.inner.connect_overlapped(overlapped) + }; + + match res { + // The connection operation finished immediately, so let's schedule + // reads/writes and such. + Ok(true) => { + self.inner.connecting.store(false, SeqCst); + Inner::post_register(&self.inner, None); + Ok(()) + } + + // If the overlapped operation was successful and didn't finish + // immediately then we forget a copy of the arc we hold + // internally. This ensures that when the completion status comes + // in for the I/O operation finishing it'll have a reference + // associated with it and our data will still be valid. The + // `connect_done` function will "reify" this forgotten pointer to + // drop the refcount on the other side. + Ok(false) => { + mem::forget(self.inner.clone()); + Err(would_block()) + } + + Err(e) => { + self.inner.connecting.store(false, SeqCst); + Err(e) + } + } + } + + /// Takes any internal error that has happened after the last I/O operation + /// which hasn't been retrieved yet. + /// + /// This is particularly useful when detecting failed attempts to `connect`. + /// After a completed `connect` flags this pipe as writable then callers + /// must invoke this method to determine whether the connection actually + /// succeeded. If this function returns `None` then a client is connected, + /// otherwise it returns an error of what happened and a client shouldn't be + /// connected. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + Ok(self.inner.io.lock().unwrap().connect_error.take()) + } + + /// Disconnects this named pipe from a connected client. + /// + /// This function will disconnect the pipe from a connected client, if any, + /// transitively calling the `DisconnectNamedPipe` function. + /// + /// After a `disconnect` is issued, then a `connect` may be called again to + /// connect to another client. + pub fn disconnect(&self) -> io::Result<()> { + self.inner.disconnect() + } +} + +impl FromRawHandle for NamedPipe { + unsafe fn from_raw_handle(handle: RawHandle) -> NamedPipe { + NamedPipe { + inner: Arc::new(Inner { + handle: Handle::new(handle as HANDLE), + connect: Overlapped::new(connect_done), + connecting: AtomicBool::new(false), + read: Overlapped::new(read_done), + write: Overlapped::new(write_done), + io: Mutex::new(Io { + cp: None, + token: None, + read: State::None, + write: State::None, + connect_error: None, + }), + pool: Mutex::new(BufferPool::with_capacity(2)), + }), + } + } +} + +impl Read for NamedPipe { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + <&NamedPipe as Read>::read(&mut &*self, buf) + } +} + +impl Write for NamedPipe { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + <&NamedPipe as Write>::write(&mut &*self, buf) + } + + fn flush(&mut self) -> io::Result<()> { + <&NamedPipe as Write>::flush(&mut &*self) + } +} + +impl<'a> Read for &'a NamedPipe { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + let mut state = self.inner.io.lock().unwrap(); + + if state.token.is_none() { + return Err(would_block()); + } + + match mem::replace(&mut state.read, State::None) { + // In theory not possible with `token` checked above, + // but return would block for now. + State::None => Err(would_block()), + + // A read is in flight, still waiting for it to finish + State::Pending(buf, amt) => { + state.read = State::Pending(buf, amt); + Err(would_block()) + } + + // We previously read something into `data`, try to copy out some + // data. If we copy out all the data schedule a new read and + // otherwise store the buffer to get read later. + State::Ok(data, cur) => { + let n = { + let mut remaining = &data[cur..]; + remaining.read(buf)? + }; + let next = cur + n; + if next != data.len() { + state.read = State::Ok(data, next); + } else { + self.inner.put_buffer(data); + Inner::schedule_read(&self.inner, &mut state, None); + } + Ok(n) + } + + // Looks like an in-flight read hit an error, return that here while + // we schedule a new one. + State::Err(e) => { + Inner::schedule_read(&self.inner, &mut state, None); + if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) { + Ok(0) + } else { + Err(e) + } + } + } + } +} + +impl<'a> Write for &'a NamedPipe { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + // Make sure there's no writes pending + let mut io = self.inner.io.lock().unwrap(); + + if io.token.is_none() { + return Err(would_block()); + } + + match io.write { + State::None => {} + State::Err(_) => match mem::replace(&mut io.write, State::None) { + State::Err(e) => return Err(e), + // `io` is locked, so this branch is unreachable + _ => unreachable!(), + }, + // any other state should be handled in `write_done` + _ => { + return Err(would_block()); + } + } + + // Move `buf` onto the heap and fire off the write + let mut owned_buf = self.inner.get_buffer(); + owned_buf.extend(buf); + match Inner::maybe_schedule_write(&self.inner, owned_buf, 0, &mut io)? { + // Some bytes are written immediately + Some(n) => Ok(n), + // Write operation is anqueued for whole buffer + None => Ok(buf.len()), + } + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Source for NamedPipe { + fn register(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> { + let mut io = self.inner.io.lock().unwrap(); + + io.check_association(registry, false)?; + + if io.token.is_some() { + return Err(io::Error::new( + io::ErrorKind::AlreadyExists, + "I/O source already registered with a `Registry`", + )); + } + + if io.cp.is_none() { + let selector = registry.selector(); + + io.cp = Some(selector.clone_port()); + + let inner_token = NEXT_TOKEN.fetch_add(2, Relaxed) + 2; + selector.inner.cp.add_handle(inner_token, self)?; + } + + io.token = Some(token); + drop(io); + + Inner::post_register(&self.inner, None); + + Ok(()) + } + + fn reregister(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> { + let mut io = self.inner.io.lock().unwrap(); + + io.check_association(registry, true)?; + + io.token = Some(token); + drop(io); + + Inner::post_register(&self.inner, None); + + Ok(()) + } + + fn deregister(&mut self, registry: &Registry) -> io::Result<()> { + let mut io = self.inner.io.lock().unwrap(); + + io.check_association(registry, true)?; + + if io.token.is_none() { + return Err(io::Error::new( + io::ErrorKind::NotFound, + "I/O source not registered with `Registry`", + )); + } + + io.token = None; + Ok(()) + } +} + +impl AsRawHandle for NamedPipe { + fn as_raw_handle(&self) -> RawHandle { + self.inner.handle.raw() as RawHandle + } +} + +impl fmt::Debug for NamedPipe { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.handle.fmt(f) + } +} + +impl Drop for NamedPipe { + fn drop(&mut self) { + // Cancel pending reads/connects, but don't cancel writes to ensure that + // everything is flushed out. + unsafe { + if self.inner.connecting.load(SeqCst) { + drop(cancel(&self.inner.handle, &self.inner.connect)); + } + + let io = self.inner.io.lock().unwrap(); + if let State::Pending(..) = io.read { + drop(cancel(&self.inner.handle, &self.inner.read)); + } + } + } +} + +impl Inner { + /// Schedules a read to happen in the background, executing an overlapped + /// operation. + /// + /// This function returns `true` if a normal error happens or if the read + /// is scheduled in the background. If the pipe is no longer connected + /// (ERROR_PIPE_LISTENING) then `false` is returned and no read is + /// scheduled. + fn schedule_read(me: &Arc<Inner>, io: &mut Io, events: Option<&mut Vec<Event>>) -> bool { + // Check to see if a read is already scheduled/completed + match io.read { + State::None => {} + _ => return true, + } + + // Allocate a buffer and schedule the read. + let mut buf = me.get_buffer(); + let e = unsafe { + let overlapped = me.read.as_ptr() as *mut _; + let slice = slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.capacity()); + me.read_overlapped(slice, overlapped) + }; + + match e { + // See `NamedPipe::connect` above for the rationale behind `forget` + Ok(_) => { + io.read = State::Pending(buf, 0); // 0 is ignored on read side + mem::forget(me.clone()); + true + } + + // If ERROR_PIPE_LISTENING happens then it's not a real read error, + // we just need to wait for a connect. + Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_LISTENING as i32) => false, + + // If some other error happened, though, we're now readable to give + // out the error. + Err(e) => { + io.read = State::Err(e); + io.notify_readable(events); + true + } + } + } + + /// Maybe schedules overlapped write operation. + /// + /// * `None` means that overlapped operation was enqueued + /// * `Some(n)` means that `n` bytes was immediately written. + /// Note, that `write_done` will fire anyway to clean up the state. + fn maybe_schedule_write( + me: &Arc<Inner>, + buf: Vec<u8>, + pos: usize, + io: &mut Io, + ) -> io::Result<Option<usize>> { + // Very similar to `schedule_read` above, just done for the write half. + let e = unsafe { + let overlapped = me.write.as_ptr() as *mut _; + me.write_overlapped(&buf[pos..], overlapped) + }; + + // See `connect` above for the rationale behind `forget` + match e { + // `n` bytes are written immediately + Ok(Some(n)) => { + io.write = State::Ok(buf, pos); + mem::forget(me.clone()); + Ok(Some(n)) + } + // write operation is enqueued + Ok(None) => { + io.write = State::Pending(buf, pos); + mem::forget(me.clone()); + Ok(None) + } + Err(e) => Err(e), + } + } + + fn schedule_write( + me: &Arc<Inner>, + buf: Vec<u8>, + pos: usize, + io: &mut Io, + events: Option<&mut Vec<Event>>, + ) { + match Inner::maybe_schedule_write(me, buf, pos, io) { + Ok(Some(_)) => { + // immediate result will be handled in `write_done`, + // so we'll reinterpret the `Ok` state + let state = mem::replace(&mut io.write, State::None); + io.write = match state { + State::Ok(buf, pos) => State::Pending(buf, pos), + // io is locked, so this branch is unreachable + _ => unreachable!(), + }; + mem::forget(me.clone()); + } + Ok(None) => (), + Err(e) => { + io.write = State::Err(e); + io.notify_writable(events); + } + } + } + + fn post_register(me: &Arc<Inner>, mut events: Option<&mut Vec<Event>>) { + let mut io = me.io.lock().unwrap(); + #[allow(clippy::needless_option_as_deref)] + if Inner::schedule_read(me, &mut io, events.as_deref_mut()) { + if let State::None = io.write { + io.notify_writable(events); + } + } + } + + fn get_buffer(&self) -> Vec<u8> { + self.pool.lock().unwrap().get(4 * 1024) + } + + fn put_buffer(&self, buf: Vec<u8>) { + self.pool.lock().unwrap().put(buf) + } +} + +unsafe fn cancel(handle: &Handle, overlapped: &Overlapped) -> io::Result<()> { + let ret = CancelIoEx(handle.raw(), overlapped.as_ptr()); + // `CancelIoEx` returns 0 on error: + // https://docs.microsoft.com/en-us/windows/win32/fileio/cancelioex-func + if ret == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } +} + +fn connect_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) { + let status = CompletionStatus::from_entry(status); + + // Acquire the `Arc<Inner>`. Note that we should be guaranteed that + // the refcount is available to us due to the `mem::forget` in + // `connect` above. + let me = unsafe { Arc::from_raw(Inner::ptr_from_conn_overlapped(status.overlapped())) }; + + // Flag ourselves as no longer using the `connect` overlapped instances. + let prev = me.connecting.swap(false, SeqCst); + assert!(prev, "NamedPipe was not previously connecting"); + + // Stash away our connect error if one happened + debug_assert_eq!(status.bytes_transferred(), 0); + unsafe { + match me.result(status.overlapped()) { + Ok(n) => debug_assert_eq!(n, 0), + Err(e) => me.io.lock().unwrap().connect_error = Some(e), + } + } + + // We essentially just finished a registration, so kick off a + // read and register write readiness. + Inner::post_register(&me, events); +} + +fn read_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) { + let status = CompletionStatus::from_entry(status); + + // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that + // the refcount is available to us due to the `mem::forget` in + // `schedule_read` above. + let me = unsafe { Arc::from_raw(Inner::ptr_from_read_overlapped(status.overlapped())) }; + + // Move from the `Pending` to `Ok` state. + let mut io = me.io.lock().unwrap(); + let mut buf = match mem::replace(&mut io.read, State::None) { + State::Pending(buf, _) => buf, + _ => unreachable!(), + }; + unsafe { + match me.result(status.overlapped()) { + Ok(n) => { + debug_assert_eq!(status.bytes_transferred() as usize, n); + buf.set_len(status.bytes_transferred() as usize); + io.read = State::Ok(buf, 0); + } + Err(e) => { + debug_assert_eq!(status.bytes_transferred(), 0); + io.read = State::Err(e); + } + } + } + + // Flag our readiness that we've got data. + io.notify_readable(events); +} + +fn write_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) { + let status = CompletionStatus::from_entry(status); + + // Acquire the `Arc<Inner>`. Note that we should be guaranteed that + // the refcount is available to us due to the `mem::forget` in + // `schedule_write` above. + let me = unsafe { Arc::from_raw(Inner::ptr_from_write_overlapped(status.overlapped())) }; + + // Make the state change out of `Pending`. If we wrote the entire buffer + // then we're writable again and otherwise we schedule another write. + let mut io = me.io.lock().unwrap(); + let (buf, pos) = match mem::replace(&mut io.write, State::None) { + // `Ok` here means, that the operation was completed immediately + // `bytes_transferred` is already reported to a client + State::Ok(..) => { + io.notify_writable(events); + return; + } + State::Pending(buf, pos) => (buf, pos), + _ => unreachable!(), + }; + + unsafe { + match me.result(status.overlapped()) { + Ok(n) => { + debug_assert_eq!(status.bytes_transferred() as usize, n); + let new_pos = pos + (status.bytes_transferred() as usize); + if new_pos == buf.len() { + me.put_buffer(buf); + io.notify_writable(events); + } else { + Inner::schedule_write(&me, buf, new_pos, &mut io, events); + } + } + Err(e) => { + debug_assert_eq!(status.bytes_transferred(), 0); + io.write = State::Err(e); + io.notify_writable(events); + } + } + } +} + +impl Io { + fn check_association(&self, registry: &Registry, required: bool) -> io::Result<()> { + match self.cp { + Some(ref cp) if !registry.selector().same_port(cp) => Err(io::Error::new( + io::ErrorKind::AlreadyExists, + "I/O source already registered with a different `Registry`", + )), + None if required => Err(io::Error::new( + io::ErrorKind::NotFound, + "I/O source not registered with `Registry`", + )), + _ => Ok(()), + } + } + + fn notify_readable(&self, events: Option<&mut Vec<Event>>) { + if let Some(token) = self.token { + let mut ev = Event::new(token); + ev.set_readable(); + + if let Some(events) = events { + events.push(ev); + } else { + let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status()); + } + } + } + + fn notify_writable(&self, events: Option<&mut Vec<Event>>) { + if let Some(token) = self.token { + let mut ev = Event::new(token); + ev.set_writable(); + + if let Some(events) = events { + events.push(ev); + } else { + let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status()); + } + } + } +} + +struct BufferPool { + pool: Vec<Vec<u8>>, +} + +impl BufferPool { + fn with_capacity(cap: usize) -> BufferPool { + BufferPool { + pool: Vec::with_capacity(cap), + } + } + + fn get(&mut self, default_cap: usize) -> Vec<u8> { + self.pool + .pop() + .unwrap_or_else(|| Vec::with_capacity(default_cap)) + } + + fn put(&mut self, mut buf: Vec<u8>) { + if self.pool.len() < self.pool.capacity() { + unsafe { + buf.set_len(0); + } + self.pool.push(buf); + } + } +} diff --git a/third_party/rust/mio/src/sys/windows/net.rs b/third_party/rust/mio/src/sys/windows/net.rs new file mode 100644 index 0000000000..5cc235335e --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/net.rs @@ -0,0 +1,111 @@ +use std::io; +use std::mem; +use std::net::SocketAddr; +use std::sync::Once; + +use windows_sys::Win32::Networking::WinSock::{ + closesocket, ioctlsocket, socket, AF_INET, AF_INET6, FIONBIO, IN6_ADDR, IN6_ADDR_0, + INVALID_SOCKET, IN_ADDR, IN_ADDR_0, SOCKADDR, SOCKADDR_IN, SOCKADDR_IN6, SOCKADDR_IN6_0, + SOCKET, +}; + +/// Initialise the network stack for Windows. +fn init() { + static INIT: Once = Once::new(); + INIT.call_once(|| { + // Let standard library call `WSAStartup` for us, we can't do it + // ourselves because otherwise using any type in `std::net` would panic + // when it tries to call `WSAStartup` a second time. + drop(std::net::UdpSocket::bind("127.0.0.1:0")); + }); +} + +/// Create a new non-blocking socket. +pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: i32) -> io::Result<SOCKET> { + let domain = match addr { + SocketAddr::V4(..) => AF_INET, + SocketAddr::V6(..) => AF_INET6, + }; + + new_socket(domain.into(), socket_type) +} + +pub(crate) fn new_socket(domain: u32, socket_type: i32) -> io::Result<SOCKET> { + init(); + + let socket = syscall!( + socket(domain as i32, socket_type, 0), + PartialEq::eq, + INVALID_SOCKET + )?; + + if let Err(err) = syscall!(ioctlsocket(socket, FIONBIO, &mut 1), PartialEq::ne, 0) { + let _ = unsafe { closesocket(socket) }; + return Err(err); + } + + Ok(socket as SOCKET) +} + +/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level +/// SocketAddr* types into their system representation. The benefit of this specific +/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it +/// needs to be and not a lot larger. And it can be initialized cleaner from Rust. +#[repr(C)] +pub(crate) union SocketAddrCRepr { + v4: SOCKADDR_IN, + v6: SOCKADDR_IN6, +} + +impl SocketAddrCRepr { + pub(crate) fn as_ptr(&self) -> *const SOCKADDR { + self as *const _ as *const SOCKADDR + } +} + +pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, i32) { + match addr { + SocketAddr::V4(ref addr) => { + // `s_addr` is stored as BE on all machine and the array is in BE order. + // So the native endian conversion method is used so that it's never swapped. + let sin_addr = unsafe { + let mut s_un = mem::zeroed::<IN_ADDR_0>(); + s_un.S_addr = u32::from_ne_bytes(addr.ip().octets()); + IN_ADDR { S_un: s_un } + }; + + let sockaddr_in = SOCKADDR_IN { + sin_family: AF_INET as u16, // 1 + sin_port: addr.port().to_be(), + sin_addr, + sin_zero: [0; 8], + }; + + let sockaddr = SocketAddrCRepr { v4: sockaddr_in }; + (sockaddr, mem::size_of::<SOCKADDR_IN>() as i32) + } + SocketAddr::V6(ref addr) => { + let sin6_addr = unsafe { + let mut u = mem::zeroed::<IN6_ADDR_0>(); + u.Byte = addr.ip().octets(); + IN6_ADDR { u } + }; + let u = unsafe { + let mut u = mem::zeroed::<SOCKADDR_IN6_0>(); + u.sin6_scope_id = addr.scope_id(); + u + }; + + let sockaddr_in6 = SOCKADDR_IN6 { + sin6_family: AF_INET6 as u16, // 23 + sin6_port: addr.port().to_be(), + sin6_addr, + sin6_flowinfo: addr.flowinfo(), + Anonymous: u, + }; + + let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 }; + (sockaddr, mem::size_of::<SOCKADDR_IN6>() as i32) + } + } +} diff --git a/third_party/rust/mio/src/sys/windows/overlapped.rs b/third_party/rust/mio/src/sys/windows/overlapped.rs new file mode 100644 index 0000000000..d1456ded40 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/overlapped.rs @@ -0,0 +1,35 @@ +use crate::sys::windows::Event; + +use std::cell::UnsafeCell; +use std::fmt; + +use windows_sys::Win32::System::IO::{OVERLAPPED, OVERLAPPED_ENTRY}; + +#[repr(C)] +pub(crate) struct Overlapped { + inner: UnsafeCell<OVERLAPPED>, + pub(crate) callback: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>), +} + +#[cfg(feature = "os-ext")] +impl Overlapped { + pub(crate) fn new(cb: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>)) -> Overlapped { + Overlapped { + inner: UnsafeCell::new(unsafe { std::mem::zeroed() }), + callback: cb, + } + } + + pub(crate) fn as_ptr(&self) -> *const OVERLAPPED { + self.inner.get() + } +} + +impl fmt::Debug for Overlapped { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Overlapped").finish() + } +} + +unsafe impl Send for Overlapped {} +unsafe impl Sync for Overlapped {} diff --git a/third_party/rust/mio/src/sys/windows/selector.rs b/third_party/rust/mio/src/sys/windows/selector.rs new file mode 100644 index 0000000000..ac5152c9f8 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/selector.rs @@ -0,0 +1,752 @@ +use super::afd::{self, Afd, AfdPollInfo}; +use super::io_status_block::IoStatusBlock; +use super::Event; +use crate::sys::Events; + +cfg_net! { + use crate::sys::event::{ + ERROR_FLAGS, READABLE_FLAGS, READ_CLOSED_FLAGS, WRITABLE_FLAGS, WRITE_CLOSED_FLAGS, + }; + use crate::Interest; +} + +use super::iocp::{CompletionPort, CompletionStatus}; +use std::collections::VecDeque; +use std::ffi::c_void; +use std::io; +use std::marker::PhantomPinned; +use std::os::windows::io::RawSocket; +use std::pin::Pin; +#[cfg(debug_assertions)] +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use windows_sys::Win32::Foundation::{ + ERROR_INVALID_HANDLE, ERROR_IO_PENDING, HANDLE, STATUS_CANCELLED, WAIT_TIMEOUT, +}; +use windows_sys::Win32::System::IO::OVERLAPPED; + +#[derive(Debug)] +struct AfdGroup { + #[cfg_attr(not(feature = "net"), allow(dead_code))] + cp: Arc<CompletionPort>, + afd_group: Mutex<Vec<Arc<Afd>>>, +} + +impl AfdGroup { + pub fn new(cp: Arc<CompletionPort>) -> AfdGroup { + AfdGroup { + afd_group: Mutex::new(Vec::new()), + cp, + } + } + + pub fn release_unused_afd(&self) { + let mut afd_group = self.afd_group.lock().unwrap(); + afd_group.retain(|g| Arc::strong_count(g) > 1); + } +} + +cfg_io_source! { + const POLL_GROUP__MAX_GROUP_SIZE: usize = 32; + + impl AfdGroup { + pub fn acquire(&self) -> io::Result<Arc<Afd>> { + let mut afd_group = self.afd_group.lock().unwrap(); + if afd_group.len() == 0 { + self._alloc_afd_group(&mut afd_group)?; + } else { + // + 1 reference in Vec + if Arc::strong_count(afd_group.last().unwrap()) > POLL_GROUP__MAX_GROUP_SIZE { + self._alloc_afd_group(&mut afd_group)?; + } + } + + match afd_group.last() { + Some(arc) => Ok(arc.clone()), + None => unreachable!( + "Cannot acquire afd, {:#?}, afd_group: {:#?}", + self, afd_group + ), + } + } + + fn _alloc_afd_group(&self, afd_group: &mut Vec<Arc<Afd>>) -> io::Result<()> { + let afd = Afd::new(&self.cp)?; + let arc = Arc::new(afd); + afd_group.push(arc); + Ok(()) + } + } +} + +#[derive(Debug)] +enum SockPollStatus { + Idle, + Pending, + Cancelled, +} + +#[derive(Debug)] +pub struct SockState { + iosb: IoStatusBlock, + poll_info: AfdPollInfo, + afd: Arc<Afd>, + + base_socket: RawSocket, + + user_evts: u32, + pending_evts: u32, + + user_data: u64, + + poll_status: SockPollStatus, + delete_pending: bool, + + // last raw os error + error: Option<i32>, + + _pinned: PhantomPinned, +} + +impl SockState { + fn update(&mut self, self_arc: &Pin<Arc<Mutex<SockState>>>) -> io::Result<()> { + assert!(!self.delete_pending); + + // make sure to reset previous error before a new update + self.error = None; + + if let SockPollStatus::Pending = self.poll_status { + if (self.user_evts & afd::KNOWN_EVENTS & !self.pending_evts) == 0 { + /* All the events the user is interested in are already being monitored by + * the pending poll operation. It might spuriously complete because of an + * event that we're no longer interested in; when that happens we'll submit + * a new poll operation with the updated event mask. */ + } else { + /* A poll operation is already pending, but it's not monitoring for all the + * events that the user is interested in. Therefore, cancel the pending + * poll operation; when we receive it's completion package, a new poll + * operation will be submitted with the correct event mask. */ + if let Err(e) = self.cancel() { + self.error = e.raw_os_error(); + return Err(e); + } + return Ok(()); + } + } else if let SockPollStatus::Cancelled = self.poll_status { + /* The poll operation has already been cancelled, we're still waiting for + * it to return. For now, there's nothing that needs to be done. */ + } else if let SockPollStatus::Idle = self.poll_status { + /* No poll operation is pending; start one. */ + self.poll_info.exclusive = 0; + self.poll_info.number_of_handles = 1; + self.poll_info.timeout = i64::MAX; + self.poll_info.handles[0].handle = self.base_socket as HANDLE; + self.poll_info.handles[0].status = 0; + self.poll_info.handles[0].events = self.user_evts | afd::POLL_LOCAL_CLOSE; + + // Increase the ref count as the memory will be used by the kernel. + let overlapped_ptr = into_overlapped(self_arc.clone()); + + let result = unsafe { + self.afd + .poll(&mut self.poll_info, &mut *self.iosb, overlapped_ptr) + }; + if let Err(e) = result { + let code = e.raw_os_error().unwrap(); + if code == ERROR_IO_PENDING as i32 { + /* Overlapped poll operation in progress; this is expected. */ + } else { + // Since the operation failed it means the kernel won't be + // using the memory any more. + drop(from_overlapped(overlapped_ptr as *mut _)); + if code == ERROR_INVALID_HANDLE as i32 { + /* Socket closed; it'll be dropped. */ + self.mark_delete(); + return Ok(()); + } else { + self.error = e.raw_os_error(); + return Err(e); + } + } + } + + self.poll_status = SockPollStatus::Pending; + self.pending_evts = self.user_evts; + } else { + unreachable!("Invalid poll status during update, {:#?}", self) + } + + Ok(()) + } + + fn cancel(&mut self) -> io::Result<()> { + match self.poll_status { + SockPollStatus::Pending => {} + _ => unreachable!("Invalid poll status during cancel, {:#?}", self), + }; + unsafe { + self.afd.cancel(&mut *self.iosb)?; + } + self.poll_status = SockPollStatus::Cancelled; + self.pending_evts = 0; + Ok(()) + } + + // This is the function called from the overlapped using as Arc<Mutex<SockState>>. Watch out for reference counting. + fn feed_event(&mut self) -> Option<Event> { + self.poll_status = SockPollStatus::Idle; + self.pending_evts = 0; + + let mut afd_events = 0; + // We use the status info in IO_STATUS_BLOCK to determine the socket poll status. It is unsafe to use a pointer of IO_STATUS_BLOCK. + unsafe { + if self.delete_pending { + return None; + } else if self.iosb.Anonymous.Status == STATUS_CANCELLED { + /* The poll request was cancelled by CancelIoEx. */ + } else if self.iosb.Anonymous.Status < 0 { + /* The overlapped request itself failed in an unexpected way. */ + afd_events = afd::POLL_CONNECT_FAIL; + } else if self.poll_info.number_of_handles < 1 { + /* This poll operation succeeded but didn't report any socket events. */ + } else if self.poll_info.handles[0].events & afd::POLL_LOCAL_CLOSE != 0 { + /* The poll operation reported that the socket was closed. */ + self.mark_delete(); + return None; + } else { + afd_events = self.poll_info.handles[0].events; + } + } + + afd_events &= self.user_evts; + + if afd_events == 0 { + return None; + } + + // In mio, we have to simulate Edge-triggered behavior to match API usage. + // The strategy here is to intercept all read/write from user that could cause WouldBlock usage, + // then reregister the socket to reset the interests. + self.user_evts &= !afd_events; + + Some(Event { + data: self.user_data, + flags: afd_events, + }) + } + + pub fn is_pending_deletion(&self) -> bool { + self.delete_pending + } + + pub fn mark_delete(&mut self) { + if !self.delete_pending { + if let SockPollStatus::Pending = self.poll_status { + drop(self.cancel()); + } + + self.delete_pending = true; + } + } + + fn has_error(&self) -> bool { + self.error.is_some() + } +} + +cfg_io_source! { + impl SockState { + fn new(raw_socket: RawSocket, afd: Arc<Afd>) -> io::Result<SockState> { + Ok(SockState { + iosb: IoStatusBlock::zeroed(), + poll_info: AfdPollInfo::zeroed(), + afd, + base_socket: get_base_socket(raw_socket)?, + user_evts: 0, + pending_evts: 0, + user_data: 0, + poll_status: SockPollStatus::Idle, + delete_pending: false, + error: None, + _pinned: PhantomPinned, + }) + } + + /// True if need to be added on update queue, false otherwise. + fn set_event(&mut self, ev: Event) -> bool { + /* afd::POLL_CONNECT_FAIL and afd::POLL_ABORT are always reported, even when not requested by the caller. */ + let events = ev.flags | afd::POLL_CONNECT_FAIL | afd::POLL_ABORT; + + self.user_evts = events; + self.user_data = ev.data; + + (events & !self.pending_evts) != 0 + } + } +} + +impl Drop for SockState { + fn drop(&mut self) { + self.mark_delete(); + } +} + +/// Converts the pointer to a `SockState` into a raw pointer. +/// To revert see `from_overlapped`. +fn into_overlapped(sock_state: Pin<Arc<Mutex<SockState>>>) -> *mut c_void { + let overlapped_ptr: *const Mutex<SockState> = + unsafe { Arc::into_raw(Pin::into_inner_unchecked(sock_state)) }; + overlapped_ptr as *mut _ +} + +/// Convert a raw overlapped pointer into a reference to `SockState`. +/// Reverts `into_overlapped`. +fn from_overlapped(ptr: *mut OVERLAPPED) -> Pin<Arc<Mutex<SockState>>> { + let sock_ptr: *const Mutex<SockState> = ptr as *const _; + unsafe { Pin::new_unchecked(Arc::from_raw(sock_ptr)) } +} + +/// Each Selector has a globally unique(ish) ID associated with it. This ID +/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first +/// registered with the `Selector`. If a type that is previously associated with +/// a `Selector` attempts to register itself with a different `Selector`, the +/// operation will return with an error. This matches windows behavior. +#[cfg(debug_assertions)] +static NEXT_ID: AtomicUsize = AtomicUsize::new(0); + +/// Windows implementation of `sys::Selector` +/// +/// Edge-triggered event notification is simulated by resetting internal event flag of each socket state `SockState` +/// and setting all events back by intercepting all requests that could cause `io::ErrorKind::WouldBlock` happening. +/// +/// This selector is currently only support socket due to `Afd` driver is winsock2 specific. +#[derive(Debug)] +pub struct Selector { + #[cfg(debug_assertions)] + id: usize, + pub(super) inner: Arc<SelectorInner>, + #[cfg(debug_assertions)] + has_waker: AtomicBool, +} + +impl Selector { + pub fn new() -> io::Result<Selector> { + SelectorInner::new().map(|inner| { + #[cfg(debug_assertions)] + let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1; + Selector { + #[cfg(debug_assertions)] + id, + inner: Arc::new(inner), + #[cfg(debug_assertions)] + has_waker: AtomicBool::new(false), + } + }) + } + + pub fn try_clone(&self) -> io::Result<Selector> { + Ok(Selector { + #[cfg(debug_assertions)] + id: self.id, + inner: Arc::clone(&self.inner), + #[cfg(debug_assertions)] + has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)), + }) + } + + /// # Safety + /// + /// This requires a mutable reference to self because only a single thread + /// can poll IOCP at a time. + pub fn select(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> { + self.inner.select(events, timeout) + } + + #[cfg(debug_assertions)] + pub fn register_waker(&self) -> bool { + self.has_waker.swap(true, Ordering::AcqRel) + } + + pub(super) fn clone_port(&self) -> Arc<CompletionPort> { + self.inner.cp.clone() + } + + #[cfg(feature = "os-ext")] + pub(super) fn same_port(&self, other: &Arc<CompletionPort>) -> bool { + Arc::ptr_eq(&self.inner.cp, other) + } +} + +cfg_io_source! { + use super::InternalState; + use crate::Token; + + impl Selector { + pub(super) fn register( + &self, + socket: RawSocket, + token: Token, + interests: Interest, + ) -> io::Result<InternalState> { + SelectorInner::register(&self.inner, socket, token, interests) + } + + pub(super) fn reregister( + &self, + state: Pin<Arc<Mutex<SockState>>>, + token: Token, + interests: Interest, + ) -> io::Result<()> { + self.inner.reregister(state, token, interests) + } + + #[cfg(debug_assertions)] + pub fn id(&self) -> usize { + self.id + } + } +} + +#[derive(Debug)] +pub struct SelectorInner { + pub(super) cp: Arc<CompletionPort>, + update_queue: Mutex<VecDeque<Pin<Arc<Mutex<SockState>>>>>, + afd_group: AfdGroup, + is_polling: AtomicBool, +} + +// We have ensured thread safety by introducing lock manually. +unsafe impl Sync for SelectorInner {} + +impl SelectorInner { + pub fn new() -> io::Result<SelectorInner> { + CompletionPort::new(0).map(|cp| { + let cp = Arc::new(cp); + let cp_afd = Arc::clone(&cp); + + SelectorInner { + cp, + update_queue: Mutex::new(VecDeque::new()), + afd_group: AfdGroup::new(cp_afd), + is_polling: AtomicBool::new(false), + } + }) + } + + /// # Safety + /// + /// May only be calling via `Selector::select`. + pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> { + events.clear(); + + if timeout.is_none() { + loop { + let len = self.select2(&mut events.statuses, &mut events.events, None)?; + if len == 0 { + continue; + } + break Ok(()); + } + } else { + self.select2(&mut events.statuses, &mut events.events, timeout)?; + Ok(()) + } + } + + pub fn select2( + &self, + statuses: &mut [CompletionStatus], + events: &mut Vec<Event>, + timeout: Option<Duration>, + ) -> io::Result<usize> { + assert!(!self.is_polling.swap(true, Ordering::AcqRel)); + + unsafe { self.update_sockets_events() }?; + + let result = self.cp.get_many(statuses, timeout); + + self.is_polling.store(false, Ordering::Relaxed); + + match result { + Ok(iocp_events) => Ok(unsafe { self.feed_events(events, iocp_events) }), + Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => Ok(0), + Err(e) => Err(e), + } + } + + unsafe fn update_sockets_events(&self) -> io::Result<()> { + let mut update_queue = self.update_queue.lock().unwrap(); + for sock in update_queue.iter_mut() { + let mut sock_internal = sock.lock().unwrap(); + if !sock_internal.is_pending_deletion() { + sock_internal.update(sock)?; + } + } + + // remove all sock which do not have error, they have afd op pending + update_queue.retain(|sock| sock.lock().unwrap().has_error()); + + self.afd_group.release_unused_afd(); + Ok(()) + } + + // It returns processed count of iocp_events rather than the events itself. + unsafe fn feed_events( + &self, + events: &mut Vec<Event>, + iocp_events: &[CompletionStatus], + ) -> usize { + let mut n = 0; + let mut update_queue = self.update_queue.lock().unwrap(); + for iocp_event in iocp_events.iter() { + if iocp_event.overlapped().is_null() { + events.push(Event::from_completion_status(iocp_event)); + n += 1; + continue; + } else if iocp_event.token() % 2 == 1 { + // Handle is a named pipe. This could be extended to be any non-AFD event. + let callback = (*(iocp_event.overlapped() as *mut super::Overlapped)).callback; + + let len = events.len(); + callback(iocp_event.entry(), Some(events)); + n += events.len() - len; + continue; + } + + let sock_state = from_overlapped(iocp_event.overlapped()); + let mut sock_guard = sock_state.lock().unwrap(); + if let Some(e) = sock_guard.feed_event() { + events.push(e); + n += 1; + } + + if !sock_guard.is_pending_deletion() { + update_queue.push_back(sock_state.clone()); + } + } + self.afd_group.release_unused_afd(); + n + } +} + +cfg_io_source! { + use std::mem::size_of; + use std::ptr::null_mut; + + use windows_sys::Win32::Networking::WinSock::{ + WSAGetLastError, WSAIoctl, SIO_BASE_HANDLE, SIO_BSP_HANDLE, + SIO_BSP_HANDLE_POLL, SIO_BSP_HANDLE_SELECT, SOCKET_ERROR, + }; + + + impl SelectorInner { + fn register( + this: &Arc<Self>, + socket: RawSocket, + token: Token, + interests: Interest, + ) -> io::Result<InternalState> { + let flags = interests_to_afd_flags(interests); + + let sock = { + let sock = this._alloc_sock_for_rawsocket(socket)?; + let event = Event { + flags, + data: token.0 as u64, + }; + sock.lock().unwrap().set_event(event); + sock + }; + + let state = InternalState { + selector: this.clone(), + token, + interests, + sock_state: sock.clone(), + }; + + this.queue_state(sock); + unsafe { this.update_sockets_events_if_polling()? }; + + Ok(state) + } + + // Directly accessed in `IoSourceState::do_io`. + pub(super) fn reregister( + &self, + state: Pin<Arc<Mutex<SockState>>>, + token: Token, + interests: Interest, + ) -> io::Result<()> { + { + let event = Event { + flags: interests_to_afd_flags(interests), + data: token.0 as u64, + }; + + state.lock().unwrap().set_event(event); + } + + // FIXME: a sock which has_error true should not be re-added to + // the update queue because it's already there. + self.queue_state(state); + unsafe { self.update_sockets_events_if_polling() } + } + + /// This function is called by register() and reregister() to start an + /// IOCTL_AFD_POLL operation corresponding to the registered events, but + /// only if necessary. + /// + /// Since it is not possible to modify or synchronously cancel an AFD_POLL + /// operation, and there can be only one active AFD_POLL operation per + /// (socket, completion port) pair at any time, it is expensive to change + /// a socket's event registration after it has been submitted to the kernel. + /// + /// Therefore, if no other threads are polling when interest in a socket + /// event is (re)registered, the socket is added to the 'update queue', but + /// the actual syscall to start the IOCTL_AFD_POLL operation is deferred + /// until just before the GetQueuedCompletionStatusEx() syscall is made. + /// + /// However, when another thread is already blocked on + /// GetQueuedCompletionStatusEx() we tell the kernel about the registered + /// socket event(s) immediately. + unsafe fn update_sockets_events_if_polling(&self) -> io::Result<()> { + if self.is_polling.load(Ordering::Acquire) { + self.update_sockets_events() + } else { + Ok(()) + } + } + + fn queue_state(&self, sock_state: Pin<Arc<Mutex<SockState>>>) { + let mut update_queue = self.update_queue.lock().unwrap(); + update_queue.push_back(sock_state); + } + + fn _alloc_sock_for_rawsocket( + &self, + raw_socket: RawSocket, + ) -> io::Result<Pin<Arc<Mutex<SockState>>>> { + let afd = self.afd_group.acquire()?; + Ok(Arc::pin(Mutex::new(SockState::new(raw_socket, afd)?))) + } + } + + fn try_get_base_socket(raw_socket: RawSocket, ioctl: u32) -> Result<RawSocket, i32> { + let mut base_socket: RawSocket = 0; + let mut bytes: u32 = 0; + unsafe { + if WSAIoctl( + raw_socket as usize, + ioctl, + null_mut(), + 0, + &mut base_socket as *mut _ as *mut c_void, + size_of::<RawSocket>() as u32, + &mut bytes, + null_mut(), + None, + ) != SOCKET_ERROR + { + Ok(base_socket) + } else { + Err(WSAGetLastError()) + } + } + } + + fn get_base_socket(raw_socket: RawSocket) -> io::Result<RawSocket> { + let res = try_get_base_socket(raw_socket, SIO_BASE_HANDLE); + if let Ok(base_socket) = res { + return Ok(base_socket); + } + + // The `SIO_BASE_HANDLE` should not be intercepted by LSPs, therefore + // it should not fail as long as `raw_socket` is a valid socket. See + // https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls. + // However, at least one known LSP deliberately breaks it, so we try + // some alternative IOCTLs, starting with the most appropriate one. + for &ioctl in &[ + SIO_BSP_HANDLE_SELECT, + SIO_BSP_HANDLE_POLL, + SIO_BSP_HANDLE, + ] { + if let Ok(base_socket) = try_get_base_socket(raw_socket, ioctl) { + // Since we know now that we're dealing with an LSP (otherwise + // SIO_BASE_HANDLE would't have failed), only return any result + // when it is different from the original `raw_socket`. + if base_socket != raw_socket { + return Ok(base_socket); + } + } + } + + // If the alternative IOCTLs also failed, return the original error. + let os_error = res.unwrap_err(); + let err = io::Error::from_raw_os_error(os_error); + Err(err) + } +} + +impl Drop for SelectorInner { + fn drop(&mut self) { + loop { + let events_num: usize; + let mut statuses: [CompletionStatus; 1024] = [CompletionStatus::zero(); 1024]; + + let result = self + .cp + .get_many(&mut statuses, Some(std::time::Duration::from_millis(0))); + match result { + Ok(iocp_events) => { + events_num = iocp_events.iter().len(); + for iocp_event in iocp_events.iter() { + if iocp_event.overlapped().is_null() { + // Custom event + } else if iocp_event.token() % 2 == 1 { + // Named pipe, dispatch the event so it can release resources + let callback = unsafe { + (*(iocp_event.overlapped() as *mut super::Overlapped)).callback + }; + + callback(iocp_event.entry(), None); + } else { + // drain sock state to release memory of Arc reference + let _sock_state = from_overlapped(iocp_event.overlapped()); + } + } + } + + Err(_) => { + break; + } + } + + if events_num == 0 { + // continue looping until all completion statuses have been drained + break; + } + } + + self.afd_group.release_unused_afd(); + } +} + +cfg_net! { + fn interests_to_afd_flags(interests: Interest) -> u32 { + let mut flags = 0; + + if interests.is_readable() { + flags |= READABLE_FLAGS | READ_CLOSED_FLAGS | ERROR_FLAGS; + } + + if interests.is_writable() { + flags |= WRITABLE_FLAGS | WRITE_CLOSED_FLAGS | ERROR_FLAGS; + } + + flags + } +} diff --git a/third_party/rust/mio/src/sys/windows/tcp.rs b/third_party/rust/mio/src/sys/windows/tcp.rs new file mode 100644 index 0000000000..addd1e8d85 --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/tcp.rs @@ -0,0 +1,66 @@ +use std::io; +use std::net::{self, SocketAddr}; +use std::os::windows::io::AsRawSocket; + +use windows_sys::Win32::Networking::WinSock::{self, SOCKET, SOCKET_ERROR, SOCK_STREAM}; + +use crate::sys::windows::net::{new_ip_socket, socket_addr}; + +pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<SOCKET> { + new_ip_socket(address, SOCK_STREAM) +} + +pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> { + use WinSock::bind; + + let (raw_addr, raw_addr_length) = socket_addr(&addr); + syscall!( + bind( + socket.as_raw_socket() as _, + raw_addr.as_ptr(), + raw_addr_length + ), + PartialEq::eq, + SOCKET_ERROR + )?; + Ok(()) +} + +pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> { + use WinSock::connect; + + let (raw_addr, raw_addr_length) = socket_addr(&addr); + let res = syscall!( + connect( + socket.as_raw_socket() as _, + raw_addr.as_ptr(), + raw_addr_length + ), + PartialEq::eq, + SOCKET_ERROR + ); + + match res { + Err(err) if err.kind() != io::ErrorKind::WouldBlock => Err(err), + _ => Ok(()), + } +} + +pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> { + use std::convert::TryInto; + use WinSock::listen; + + let backlog = backlog.try_into().unwrap_or(i32::max_value()); + syscall!( + listen(socket.as_raw_socket() as _, backlog), + PartialEq::eq, + SOCKET_ERROR + )?; + Ok(()) +} + +pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { + // The non-blocking state of `listener` is inherited. See + // https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-accept#remarks. + listener.accept() +} diff --git a/third_party/rust/mio/src/sys/windows/udp.rs b/third_party/rust/mio/src/sys/windows/udp.rs new file mode 100644 index 0000000000..87e269fa3b --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/udp.rs @@ -0,0 +1,46 @@ +use std::io; +use std::mem::{self, MaybeUninit}; +use std::net::{self, SocketAddr}; +use std::os::windows::io::{AsRawSocket, FromRawSocket}; +use std::os::windows::raw::SOCKET as StdSocket; // windows-sys uses usize, stdlib uses u32/u64. + +use crate::sys::windows::net::{new_ip_socket, socket_addr}; +use windows_sys::Win32::Networking::WinSock::{ + bind as win_bind, getsockopt, IPPROTO_IPV6, IPV6_V6ONLY, SOCKET_ERROR, SOCK_DGRAM, +}; + +pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> { + let raw_socket = new_ip_socket(addr, SOCK_DGRAM)?; + let socket = unsafe { net::UdpSocket::from_raw_socket(raw_socket as StdSocket) }; + + let (raw_addr, raw_addr_length) = socket_addr(&addr); + syscall!( + win_bind(raw_socket, raw_addr.as_ptr(), raw_addr_length), + PartialEq::eq, + SOCKET_ERROR + )?; + + Ok(socket) +} + +pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> { + let mut optval: MaybeUninit<i32> = MaybeUninit::uninit(); + let mut optlen = mem::size_of::<i32>() as i32; + + syscall!( + getsockopt( + socket.as_raw_socket() as usize, + IPPROTO_IPV6 as i32, + IPV6_V6ONLY as i32, + optval.as_mut_ptr().cast(), + &mut optlen, + ), + PartialEq::eq, + SOCKET_ERROR + )?; + + debug_assert_eq!(optlen as usize, mem::size_of::<i32>()); + // Safety: `getsockopt` initialised `optval` for us. + let optval = unsafe { optval.assume_init() }; + Ok(optval != 0) +} diff --git a/third_party/rust/mio/src/sys/windows/waker.rs b/third_party/rust/mio/src/sys/windows/waker.rs new file mode 100644 index 0000000000..103aa01a7b --- /dev/null +++ b/third_party/rust/mio/src/sys/windows/waker.rs @@ -0,0 +1,29 @@ +use crate::sys::windows::Event; +use crate::sys::windows::Selector; +use crate::Token; + +use super::iocp::CompletionPort; +use std::io; +use std::sync::Arc; + +#[derive(Debug)] +pub struct Waker { + token: Token, + port: Arc<CompletionPort>, +} + +impl Waker { + pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> { + Ok(Waker { + token, + port: selector.clone_port(), + }) + } + + pub fn wake(&self) -> io::Result<()> { + let mut ev = Event::new(self.token); + ev.set_readable(); + + self.port.post(ev.to_completion_status()) + } +} diff --git a/third_party/rust/mio/src/token.rs b/third_party/rust/mio/src/token.rs new file mode 100644 index 0000000000..91601cde0c --- /dev/null +++ b/third_party/rust/mio/src/token.rs @@ -0,0 +1,138 @@ +/// Associates readiness events with [`event::Source`]s. +/// +/// `Token` is a wrapper around `usize` and is used as an argument to +/// [`Registry::register`] and [`Registry::reregister`]. +/// +/// See [`Poll`] for more documentation on polling. +/// +/// [`event::Source`]: ./event/trait.Source.html +/// [`Poll`]: struct.Poll.html +/// [`Registry::register`]: struct.Registry.html#method.register +/// [`Registry::reregister`]: struct.Registry.html#method.reregister +/// +/// # Example +/// +/// Using `Token` to track which socket generated the event. In this example, +/// `HashMap` is used, but usually something like [`slab`] is better. +/// +/// [`slab`]: https://crates.io/crates/slab +/// +#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] +#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] +/// # use std::error::Error; +/// # fn main() -> Result<(), Box<dyn Error>> { +/// use mio::{Events, Interest, Poll, Token}; +/// use mio::net::TcpListener; +/// +/// use std::thread; +/// use std::io::{self, Read}; +/// use std::collections::HashMap; +/// +/// // After this number of sockets is accepted, the server will shutdown. +/// const MAX_SOCKETS: usize = 32; +/// +/// // Pick a token that will not be used by any other socket and use that one +/// // for the listener. +/// const LISTENER: Token = Token(1024); +/// +/// // Used to store the sockets. +/// let mut sockets = HashMap::new(); +/// +/// // This is used to generate a unique token for a socket +/// let mut next_socket_index = 0; +/// +/// // The `Poll` instance +/// let mut poll = Poll::new()?; +/// +/// // Tcp listener +/// let mut listener = TcpListener::bind("127.0.0.1:0".parse()?)?; +/// +/// // Register the listener +/// poll.registry().register(&mut listener, LISTENER, Interest::READABLE)?; +/// +/// // Spawn a thread that will connect a bunch of sockets then close them +/// let addr = listener.local_addr()?; +/// thread::spawn(move || { +/// use std::net::TcpStream; +/// +/// // +1 here is to connect an extra socket to signal the socket to close +/// for _ in 0..(MAX_SOCKETS+1) { +/// // Connect then drop the socket +/// let _ = TcpStream::connect(addr).unwrap(); +/// } +/// }); +/// +/// // Event storage +/// let mut events = Events::with_capacity(1024); +/// +/// // Read buffer, this will never actually get filled +/// let mut buf = [0; 256]; +/// +/// // The main event loop +/// loop { +/// // Wait for events +/// poll.poll(&mut events, None)?; +/// +/// for event in &events { +/// match event.token() { +/// LISTENER => { +/// // Perform operations in a loop until `WouldBlock` is +/// // encountered. +/// loop { +/// match listener.accept() { +/// Ok((mut socket, _)) => { +/// // Shutdown the server +/// if next_socket_index == MAX_SOCKETS { +/// return Ok(()); +/// } +/// +/// // Get the token for the socket +/// let token = Token(next_socket_index); +/// next_socket_index += 1; +/// +/// // Register the new socket w/ poll +/// poll.registry().register(&mut socket, token, Interest::READABLE)?; +/// +/// // Store the socket +/// sockets.insert(token, socket); +/// } +/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { +/// // Socket is not ready anymore, stop accepting +/// break; +/// } +/// e => panic!("err={:?}", e), // Unexpected error +/// } +/// } +/// } +/// token => { +/// // Always operate in a loop +/// loop { +/// match sockets.get_mut(&token).unwrap().read(&mut buf) { +/// Ok(0) => { +/// // Socket is closed, remove it from the map +/// sockets.remove(&token); +/// break; +/// } +/// // Data is not actually sent in this example +/// Ok(_) => unreachable!(), +/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { +/// // Socket is not ready anymore, stop reading +/// break; +/// } +/// e => panic!("err={:?}", e), // Unexpected error +/// } +/// } +/// } +/// } +/// } +/// } +/// # } +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Token(pub usize); + +impl From<Token> for usize { + fn from(val: Token) -> usize { + val.0 + } +} diff --git a/third_party/rust/mio/src/waker.rs b/third_party/rust/mio/src/waker.rs new file mode 100644 index 0000000000..92fdb4c163 --- /dev/null +++ b/third_party/rust/mio/src/waker.rs @@ -0,0 +1,96 @@ +use crate::{sys, Registry, Token}; + +use std::io; + +/// Waker allows cross-thread waking of [`Poll`]. +/// +/// When created it will cause events with [`readable`] readiness and the +/// provided `token` if [`wake`] is called, possibly from another thread. +/// +/// [`Poll`]: struct.Poll.html +/// [`readable`]: ./event/struct.Event.html#method.is_readable +/// [`wake`]: struct.Waker.html#method.wake +/// +/// # Notes +/// +/// `Waker` events are only guaranteed to be delivered while the `Waker` value +/// is alive. +/// +/// Only a single `Waker` can be active per [`Poll`], if multiple threads need +/// access to the `Waker` it can be shared via for example an `Arc`. What +/// happens if multiple `Waker`s are registered with the same `Poll` is +/// unspecified. +/// +/// # Implementation notes +/// +/// On platforms that support kqueue this will use the `EVFILT_USER` event +/// filter, see [implementation notes of `Poll`] to see what platforms support +/// kqueue. On Linux it uses [eventfd]. +/// +/// [implementation notes of `Poll`]: struct.Poll.html#implementation-notes +/// [eventfd]: https://man7.org/linux/man-pages/man2/eventfd.2.html +/// +/// # Examples +/// +/// Wake a [`Poll`] instance from another thread. +/// +#[cfg_attr(feature = "os-poll", doc = "```")] +#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] +/// # fn main() -> Result<(), Box<dyn std::error::Error>> { +/// use std::thread; +/// use std::time::Duration; +/// use std::sync::Arc; +/// +/// use mio::{Events, Token, Poll, Waker}; +/// +/// const WAKE_TOKEN: Token = Token(10); +/// +/// let mut poll = Poll::new()?; +/// let mut events = Events::with_capacity(2); +/// +/// let waker = Arc::new(Waker::new(poll.registry(), WAKE_TOKEN)?); +/// +/// // We need to keep the Waker alive, so we'll create a clone for the +/// // thread we create below. +/// let waker1 = waker.clone(); +/// let handle = thread::spawn(move || { +/// // Working hard, or hardly working? +/// thread::sleep(Duration::from_millis(500)); +/// +/// // Now we'll wake the queue on the other thread. +/// waker1.wake().expect("unable to wake"); +/// }); +/// +/// // On our current thread we'll poll for events, without a timeout. +/// poll.poll(&mut events, None)?; +/// +/// // After about 500 milliseconds we should be awoken by the other thread and +/// // get a single event. +/// assert!(!events.is_empty()); +/// let waker_event = events.iter().next().unwrap(); +/// assert!(waker_event.is_readable()); +/// assert_eq!(waker_event.token(), WAKE_TOKEN); +/// # handle.join().unwrap(); +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug)] +pub struct Waker { + inner: sys::Waker, +} + +impl Waker { + /// Create a new `Waker`. + pub fn new(registry: &Registry, token: Token) -> io::Result<Waker> { + #[cfg(debug_assertions)] + registry.register_waker(); + sys::Waker::new(registry.selector(), token).map(|inner| Waker { inner }) + } + + /// Wake up the [`Poll`] associated with this `Waker`. + /// + /// [`Poll`]: struct.Poll.html + pub fn wake(&self) -> io::Result<()> { + self.inner.wake() + } +} |