summaryrefslogtreecommitdiffstats
path: root/third_party/rust/mio/src/sys/windows
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/mio/src/sys/windows')
-rw-r--r--third_party/rust/mio/src/sys/windows/afd.rs237
-rw-r--r--third_party/rust/mio/src/sys/windows/event.rs162
-rw-r--r--third_party/rust/mio/src/sys/windows/io_status_block.rs40
-rw-r--r--third_party/rust/mio/src/sys/windows/mod.rs147
-rw-r--r--third_party/rust/mio/src/sys/windows/named_pipe.rs782
-rw-r--r--third_party/rust/mio/src/sys/windows/net.rs108
-rw-r--r--third_party/rust/mio/src/sys/windows/overlapped.rs37
-rw-r--r--third_party/rust/mio/src/sys/windows/selector.rs748
-rw-r--r--third_party/rust/mio/src/sys/windows/tcp.rs71
-rw-r--r--third_party/rust/mio/src/sys/windows/udp.rs53
-rw-r--r--third_party/rust/mio/src/sys/windows/waker.rs29
11 files changed, 2414 insertions, 0 deletions
diff --git a/third_party/rust/mio/src/sys/windows/afd.rs b/third_party/rust/mio/src/sys/windows/afd.rs
new file mode 100644
index 0000000000..6eae3bc035
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/afd.rs
@@ -0,0 +1,237 @@
+use ntapi::ntioapi::{IO_STATUS_BLOCK_u, IO_STATUS_BLOCK};
+use ntapi::ntioapi::{NtCancelIoFileEx, NtDeviceIoControlFile};
+use ntapi::ntrtl::RtlNtStatusToDosError;
+use std::fmt;
+use std::fs::File;
+use std::io;
+use std::mem::size_of;
+use std::os::windows::io::AsRawHandle;
+use std::ptr::null_mut;
+use winapi::shared::ntdef::{HANDLE, LARGE_INTEGER, NTSTATUS, PVOID, ULONG};
+use winapi::shared::ntstatus::{STATUS_NOT_FOUND, STATUS_PENDING, STATUS_SUCCESS};
+
+const IOCTL_AFD_POLL: ULONG = 0x00012024;
+
+/// Winsock2 AFD driver instance.
+///
+/// All operations are unsafe due to IO_STATUS_BLOCK parameter are being used by Afd driver during STATUS_PENDING before I/O Completion Port returns its result.
+#[derive(Debug)]
+pub struct Afd {
+ fd: File,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct AfdPollHandleInfo {
+ pub handle: HANDLE,
+ pub events: ULONG,
+ pub status: NTSTATUS,
+}
+
+unsafe impl Send for AfdPollHandleInfo {}
+
+#[repr(C)]
+pub struct AfdPollInfo {
+ pub timeout: LARGE_INTEGER,
+ // Can have only value 1.
+ pub number_of_handles: ULONG,
+ pub exclusive: ULONG,
+ pub handles: [AfdPollHandleInfo; 1],
+}
+
+impl fmt::Debug for AfdPollInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AfdPollInfo").finish()
+ }
+}
+
+impl Afd {
+ /// Poll `Afd` instance with `AfdPollInfo`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// `iosb` needs to be untouched after the call while operation is in effective at ALL TIME except for `cancel` method.
+ /// So be careful not to `poll` twice while polling.
+ /// User should deallocate there overlapped value when error to prevent memory leak.
+ pub unsafe fn poll(
+ &self,
+ info: &mut AfdPollInfo,
+ iosb: *mut IO_STATUS_BLOCK,
+ overlapped: PVOID,
+ ) -> io::Result<bool> {
+ let info_ptr: PVOID = info as *mut _ as PVOID;
+ (*iosb).u.Status = STATUS_PENDING;
+ let status = NtDeviceIoControlFile(
+ self.fd.as_raw_handle(),
+ null_mut(),
+ None,
+ overlapped,
+ iosb,
+ IOCTL_AFD_POLL,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ );
+ match status {
+ STATUS_SUCCESS => Ok(true),
+ STATUS_PENDING => Ok(false),
+ _ => Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ )),
+ }
+ }
+
+ /// Cancel previous polled request of `Afd`.
+ ///
+ /// iosb needs to be used by `poll` first for valid `cancel`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// Use it only with request is still being polled so that you have valid `IO_STATUS_BLOCK` to use.
+ /// User should NOT deallocate there overlapped value after the `cancel` to prevent double free.
+ pub unsafe fn cancel(&self, iosb: *mut IO_STATUS_BLOCK) -> io::Result<()> {
+ if (*iosb).u.Status != STATUS_PENDING {
+ return Ok(());
+ }
+
+ let mut cancel_iosb = IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ };
+ let status = NtCancelIoFileEx(self.fd.as_raw_handle(), iosb, &mut cancel_iosb);
+ if status == STATUS_SUCCESS || status == STATUS_NOT_FOUND {
+ return Ok(());
+ }
+ Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ ))
+ }
+}
+
+cfg_io_source! {
+ use std::mem::zeroed;
+ use std::os::windows::io::{FromRawHandle, RawHandle};
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ use miow::iocp::CompletionPort;
+ use ntapi::ntioapi::{NtCreateFile, FILE_OPEN};
+ use winapi::shared::ntdef::{OBJECT_ATTRIBUTES, UNICODE_STRING, USHORT, WCHAR};
+ use winapi::um::handleapi::INVALID_HANDLE_VALUE;
+ use winapi::um::winbase::{SetFileCompletionNotificationModes, FILE_SKIP_SET_EVENT_ON_HANDLE};
+ use winapi::um::winnt::{SYNCHRONIZE, FILE_SHARE_READ, FILE_SHARE_WRITE};
+
+ const AFD_HELPER_ATTRIBUTES: OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES {
+ Length: size_of::<OBJECT_ATTRIBUTES>() as ULONG,
+ RootDirectory: null_mut(),
+ ObjectName: &AFD_OBJ_NAME as *const _ as *mut _,
+ Attributes: 0,
+ SecurityDescriptor: null_mut(),
+ SecurityQualityOfService: null_mut(),
+ };
+
+ const AFD_OBJ_NAME: UNICODE_STRING = UNICODE_STRING {
+ Length: (AFD_HELPER_NAME.len() * size_of::<WCHAR>()) as USHORT,
+ MaximumLength: (AFD_HELPER_NAME.len() * size_of::<WCHAR>()) as USHORT,
+ Buffer: AFD_HELPER_NAME.as_ptr() as *mut _,
+ };
+
+ const AFD_HELPER_NAME: &[WCHAR] = &[
+ '\\' as _,
+ 'D' as _,
+ 'e' as _,
+ 'v' as _,
+ 'i' as _,
+ 'c' as _,
+ 'e' as _,
+ '\\' as _,
+ 'A' as _,
+ 'f' as _,
+ 'd' as _,
+ '\\' as _,
+ 'M' as _,
+ 'i' as _,
+ 'o' as _
+ ];
+
+ static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(0);
+
+ impl AfdPollInfo {
+ pub fn zeroed() -> AfdPollInfo {
+ unsafe { zeroed() }
+ }
+ }
+
+ impl Afd {
+ /// Create new Afd instance.
+ pub fn new(cp: &CompletionPort) -> io::Result<Afd> {
+ let mut afd_helper_handle: HANDLE = INVALID_HANDLE_VALUE;
+ let mut iosb = IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ };
+
+ unsafe {
+ let status = NtCreateFile(
+ &mut afd_helper_handle as *mut _,
+ SYNCHRONIZE,
+ &AFD_HELPER_ATTRIBUTES as *const _ as *mut _,
+ &mut iosb,
+ null_mut(),
+ 0,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ FILE_OPEN,
+ 0,
+ null_mut(),
+ 0,
+ );
+ if status != STATUS_SUCCESS {
+ let raw_err = io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ );
+ let msg = format!("Failed to open \\Device\\Afd\\Mio: {}", raw_err);
+ return Err(io::Error::new(raw_err.kind(), msg));
+ }
+ let fd = File::from_raw_handle(afd_helper_handle as RawHandle);
+ // Increment by 2 to reserve space for other types of handles.
+ // Non-AFD types (currently only NamedPipe), use odd numbered
+ // tokens. This allows the selector to differentate between them
+ // and dispatch events accordingly.
+ let token = NEXT_TOKEN.fetch_add(2, Ordering::Relaxed) + 2;
+ let afd = Afd { fd };
+ cp.add_handle(token, &afd.fd)?;
+ match SetFileCompletionNotificationModes(
+ afd_helper_handle,
+ FILE_SKIP_SET_EVENT_ON_HANDLE,
+ ) {
+ 0 => Err(io::Error::last_os_error()),
+ _ => Ok(afd),
+ }
+ }
+ }
+ }
+}
+
+pub const POLL_RECEIVE: u32 = 0b0_0000_0001;
+pub const POLL_RECEIVE_EXPEDITED: u32 = 0b0_0000_0010;
+pub const POLL_SEND: u32 = 0b0_0000_0100;
+pub const POLL_DISCONNECT: u32 = 0b0_0000_1000;
+pub const POLL_ABORT: u32 = 0b0_0001_0000;
+pub const POLL_LOCAL_CLOSE: u32 = 0b0_0010_0000;
+// Not used as it indicated in each event where a connection is connected, not
+// just the first time a connection is established.
+// Also see https://github.com/piscisaureus/wepoll/commit/8b7b340610f88af3d83f40fb728e7b850b090ece.
+pub const POLL_CONNECT: u32 = 0b0_0100_0000;
+pub const POLL_ACCEPT: u32 = 0b0_1000_0000;
+pub const POLL_CONNECT_FAIL: u32 = 0b1_0000_0000;
+
+pub const KNOWN_EVENTS: u32 = POLL_RECEIVE
+ | POLL_RECEIVE_EXPEDITED
+ | POLL_SEND
+ | POLL_DISCONNECT
+ | POLL_ABORT
+ | POLL_LOCAL_CLOSE
+ | POLL_ACCEPT
+ | POLL_CONNECT_FAIL;
diff --git a/third_party/rust/mio/src/sys/windows/event.rs b/third_party/rust/mio/src/sys/windows/event.rs
new file mode 100644
index 0000000000..a49252a296
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/event.rs
@@ -0,0 +1,162 @@
+use std::fmt;
+
+use miow::iocp::CompletionStatus;
+
+use super::afd;
+use crate::Token;
+
+#[derive(Clone)]
+pub struct Event {
+ pub flags: u32,
+ pub data: u64,
+}
+
+pub fn token(event: &Event) -> Token {
+ Token(event.data as usize)
+}
+
+impl Event {
+ pub(super) fn new(token: Token) -> Event {
+ Event {
+ flags: 0,
+ data: usize::from(token) as u64,
+ }
+ }
+
+ pub(super) fn set_readable(&mut self) {
+ self.flags |= afd::POLL_RECEIVE
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn set_writable(&mut self) {
+ self.flags |= afd::POLL_SEND;
+ }
+
+ pub(super) fn from_completion_status(status: &CompletionStatus) -> Event {
+ Event {
+ flags: status.bytes_transferred(),
+ data: status.token() as u64,
+ }
+ }
+
+ pub(super) fn to_completion_status(&self) -> CompletionStatus {
+ CompletionStatus::new(self.flags, self.data as usize, std::ptr::null_mut())
+ }
+}
+
+pub(crate) const READABLE_FLAGS: u32 = afd::POLL_RECEIVE
+ | afd::POLL_DISCONNECT
+ | afd::POLL_ACCEPT
+ | afd::POLL_ABORT
+ | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITABLE_FLAGS: u32 = afd::POLL_SEND | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const ERROR_FLAGS: u32 = afd::POLL_CONNECT_FAIL;
+pub(crate) const READ_CLOSED_FLAGS: u32 =
+ afd::POLL_DISCONNECT | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITE_CLOSED_FLAGS: u32 = afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+
+pub fn is_readable(event: &Event) -> bool {
+ event.flags & READABLE_FLAGS != 0
+}
+
+pub fn is_writable(event: &Event) -> bool {
+ event.flags & WRITABLE_FLAGS != 0
+}
+
+pub fn is_error(event: &Event) -> bool {
+ event.flags & ERROR_FLAGS != 0
+}
+
+pub fn is_read_closed(event: &Event) -> bool {
+ event.flags & READ_CLOSED_FLAGS != 0
+}
+
+pub fn is_write_closed(event: &Event) -> bool {
+ event.flags & WRITE_CLOSED_FLAGS != 0
+}
+
+pub fn is_priority(event: &Event) -> bool {
+ event.flags & afd::POLL_RECEIVE_EXPEDITED != 0
+}
+
+pub fn is_aio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flags(got: &u32, want: &u32) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FlagsDetails(u32),
+ check_flags,
+ afd::POLL_RECEIVE,
+ afd::POLL_RECEIVE_EXPEDITED,
+ afd::POLL_SEND,
+ afd::POLL_DISCONNECT,
+ afd::POLL_ABORT,
+ afd::POLL_LOCAL_CLOSE,
+ afd::POLL_CONNECT,
+ afd::POLL_ACCEPT,
+ afd::POLL_CONNECT_FAIL,
+ );
+
+ f.debug_struct("event")
+ .field("flags", &FlagsDetails(event.flags))
+ .field("data", &event.data)
+ .finish()
+}
+
+pub struct Events {
+ /// Raw I/O event completions are filled in here by the call to `get_many`
+ /// on the completion port above. These are then processed to run callbacks
+ /// which figure out what to do after the event is done.
+ pub statuses: Box<[CompletionStatus]>,
+
+ /// Literal events returned by `get` to the upwards `EventLoop`. This file
+ /// doesn't really modify this (except for the waker), instead almost all
+ /// events are filled in by the `ReadinessQueue` from the `poll` module.
+ pub events: Vec<Event>,
+}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ // Note that it's possible for the output `events` to grow beyond the
+ // capacity as it can also include deferred events, but that's certainly
+ // not the end of the world!
+ Events {
+ statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(),
+ events: Vec::with_capacity(cap),
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<&Event> {
+ self.events.get(idx)
+ }
+
+ pub fn clear(&mut self) {
+ self.events.clear();
+ for status in self.statuses.iter_mut() {
+ *status = CompletionStatus::zero();
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/io_status_block.rs b/third_party/rust/mio/src/sys/windows/io_status_block.rs
new file mode 100644
index 0000000000..3e60334961
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/io_status_block.rs
@@ -0,0 +1,40 @@
+use std::fmt;
+use std::ops::{Deref, DerefMut};
+
+use ntapi::ntioapi::IO_STATUS_BLOCK;
+
+pub struct IoStatusBlock(IO_STATUS_BLOCK);
+
+cfg_io_source! {
+ use ntapi::ntioapi::IO_STATUS_BLOCK_u;
+
+ impl IoStatusBlock {
+ pub fn zeroed() -> Self {
+ Self(IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ })
+ }
+ }
+}
+
+unsafe impl Send for IoStatusBlock {}
+
+impl Deref for IoStatusBlock {
+ type Target = IO_STATUS_BLOCK;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for IoStatusBlock {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl fmt::Debug for IoStatusBlock {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IoStatusBlock").finish()
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/mod.rs b/third_party/rust/mio/src/sys/windows/mod.rs
new file mode 100644
index 0000000000..7048351355
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/mod.rs
@@ -0,0 +1,147 @@
+mod afd;
+mod io_status_block;
+
+pub mod event;
+pub use event::{Event, Events};
+
+mod selector;
+pub use selector::{Selector, SelectorInner, SockState};
+
+mod overlapped;
+use overlapped::Overlapped;
+
+// Macros must be defined before the modules that use them
+cfg_net! {
+ /// Helper macro to execute a system call that returns an `io::Result`.
+ //
+ // Macro must be defined before any modules that uses them.
+ macro_rules! syscall {
+ ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{
+ let res = unsafe { $fn($($arg, )*) };
+ if $err_test(&res, &$err_value) {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(res)
+ }
+ }};
+ }
+
+ mod net;
+
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+}
+
+cfg_os_ext! {
+ pub(crate) mod named_pipe;
+}
+
+mod waker;
+pub(crate) use waker::Waker;
+
+cfg_io_source! {
+ use std::io;
+ use std::os::windows::io::RawSocket;
+ use std::pin::Pin;
+ use std::sync::{Arc, Mutex};
+
+ use crate::{Interest, Registry, Token};
+
+ struct InternalState {
+ selector: Arc<SelectorInner>,
+ token: Token,
+ interests: Interest,
+ sock_state: Pin<Arc<Mutex<SockState>>>,
+ }
+
+ impl Drop for InternalState {
+ fn drop(&mut self) {
+ let mut sock_state = self.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ }
+
+ pub struct IoSourceState {
+ // This is `None` if the socket has not yet been registered.
+ //
+ // We box the internal state to not increase the size on the stack as the
+ // type might move around a lot.
+ inner: Option<Box<InternalState>>,
+ }
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState { inner: None }
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ let result = f(io);
+ if let Err(ref e) = result {
+ if e.kind() == io::ErrorKind::WouldBlock {
+ self.inner.as_ref().map_or(Ok(()), |state| {
+ state
+ .selector
+ .reregister(state.sock_state.clone(), state.token, state.interests)
+ })?;
+ }
+ }
+ result
+ }
+
+ pub fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ socket: RawSocket,
+ ) -> io::Result<()> {
+ if self.inner.is_some() {
+ Err(io::ErrorKind::AlreadyExists.into())
+ } else {
+ registry
+ .selector()
+ .register(socket, token, interests)
+ .map(|state| {
+ self.inner = Some(Box::new(state));
+ })
+ }
+ }
+
+ pub fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ registry
+ .selector()
+ .reregister(state.sock_state.clone(), token, interests)
+ .map(|()| {
+ state.token = token;
+ state.interests = interests;
+ })
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+
+ pub fn deregister(&mut self) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ {
+ let mut sock_state = state.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ self.inner = None;
+ Ok(())
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/named_pipe.rs b/third_party/rust/mio/src/sys/windows/named_pipe.rs
new file mode 100644
index 0000000000..adda51f23c
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/named_pipe.rs
@@ -0,0 +1,782 @@
+use std::ffi::OsStr;
+use std::io::{self, Read, Write};
+use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle};
+use std::sync::atomic::Ordering::{Relaxed, SeqCst};
+use std::sync::atomic::{AtomicBool, AtomicUsize};
+use std::sync::{Arc, Mutex};
+use std::{fmt, mem, slice};
+
+use miow::iocp::{CompletionPort, CompletionStatus};
+use miow::pipe;
+use winapi::shared::winerror::{ERROR_BROKEN_PIPE, ERROR_PIPE_LISTENING};
+use winapi::um::ioapiset::CancelIoEx;
+use winapi::um::minwinbase::{OVERLAPPED, OVERLAPPED_ENTRY};
+
+use crate::event::Source;
+use crate::sys::windows::{Event, Overlapped};
+use crate::Registry;
+use crate::{Interest, Token};
+
+/// Non-blocking windows named pipe.
+///
+/// This structure internally contains a `HANDLE` which represents the named
+/// pipe, and also maintains state associated with the mio event loop and active
+/// I/O operations that have been scheduled to translate IOCP to a readiness
+/// model.
+///
+/// Note, IOCP is a *completion* based model whereas mio is a *readiness* based
+/// model. To bridge this, `NamedPipe` performs internal buffering. Writes are
+/// written to an internal buffer and the buffer is submitted to IOCP. IOCP
+/// reads are submitted using internal buffers and `NamedPipe::read` reads from
+/// this internal buffer.
+///
+/// # Trait implementations
+///
+/// The `Read` and `Write` traits are implemented for `NamedPipe` and for
+/// `&NamedPipe`. This represents that a named pipe can be concurrently read and
+/// written to and also can be read and written to at all. Typically a named
+/// pipe needs to be connected to a client before it can be read or written,
+/// however.
+///
+/// Note that for I/O operations on a named pipe to succeed then the named pipe
+/// needs to be associated with an event loop. Until this happens all I/O
+/// operations will return a "would block" error.
+///
+/// # Managing connections
+///
+/// The `NamedPipe` type supports a `connect` method to connect to a client and
+/// a `disconnect` method to disconnect from that client. These two methods only
+/// work once a named pipe is associated with an event loop.
+///
+/// The `connect` method will succeed asynchronously and a completion can be
+/// detected once the object receives a writable notification.
+///
+/// # Named pipe clients
+///
+/// Currently to create a client of a named pipe server then you can use the
+/// `OpenOptions` type in the standard library to create a `File` that connects
+/// to a named pipe. Afterwards you can use the `into_raw_handle` method coupled
+/// with the `NamedPipe::from_raw_handle` method to convert that to a named pipe
+/// that can operate asynchronously. Don't forget to pass the
+/// `FILE_FLAG_OVERLAPPED` flag when opening the `File`.
+pub struct NamedPipe {
+ inner: Arc<Inner>,
+}
+
+/// # Notes
+///
+/// The memory layout of this structure must be fixed as the
+/// `ptr_from_*_overlapped` methods depend on it, see the `ptr_from` test.
+#[repr(C)]
+struct Inner {
+ // NOTE: careful modifying the order of these three fields, the `ptr_from_*`
+ // methods depend on the layout!
+ connect: Overlapped,
+ read: Overlapped,
+ write: Overlapped,
+ // END NOTE.
+ handle: pipe::NamedPipe,
+ connecting: AtomicBool,
+ io: Mutex<Io>,
+ pool: Mutex<BufferPool>,
+}
+
+impl Inner {
+ /// Converts a pointer to `Inner.connect` to a pointer to `Inner`.
+ ///
+ /// # Unsafety
+ ///
+ /// Caller must ensure `ptr` is pointing to `Inner.connect`.
+ unsafe fn ptr_from_conn_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `connect` is the first field, so the pointer are the same.
+ ptr.cast()
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.read`.
+ unsafe fn ptr_from_read_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `read` is after `connect: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(1) as *const Inner
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.write`.
+ unsafe fn ptr_from_write_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `read` is after `connect: Overlapped` and `read: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(2) as *const Inner
+ }
+}
+
+#[test]
+fn ptr_from() {
+ use std::mem::ManuallyDrop;
+ use std::ptr;
+
+ let pipe = unsafe { ManuallyDrop::new(NamedPipe::from_raw_handle(ptr::null_mut())) };
+ let inner: &Inner = &pipe.inner;
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_conn_overlapped(&inner.connect as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_conn_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_read_overlapped(&inner.read as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_read_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_write_overlapped(&inner.write as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_write_overlapped` incorrect"
+ );
+}
+
+struct Io {
+ // Uniquely identifies the selector associated with this named pipe
+ cp: Option<Arc<CompletionPort>>,
+ // Token used to identify events
+ token: Option<Token>,
+ read: State,
+ write: State,
+ connect_error: Option<io::Error>,
+}
+
+#[derive(Debug)]
+enum State {
+ None,
+ Pending(Vec<u8>, usize),
+ Ok(Vec<u8>, usize),
+ Err(io::Error),
+}
+
+// Odd tokens are for named pipes
+static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(1);
+
+fn would_block() -> io::Error {
+ io::ErrorKind::WouldBlock.into()
+}
+
+impl NamedPipe {
+ /// Creates a new named pipe at the specified `addr` given a "reasonable
+ /// set" of initial configuration options.
+ pub fn new<A: AsRef<OsStr>>(addr: A) -> io::Result<NamedPipe> {
+ let pipe = pipe::NamedPipe::new(addr)?;
+ // Safety: nothing actually unsafe about this. The trait fn includes
+ // `unsafe`.
+ Ok(unsafe { NamedPipe::from_raw_handle(pipe.into_raw_handle()) })
+ }
+
+ /// Attempts to call `ConnectNamedPipe`, if possible.
+ ///
+ /// This function will attempt to connect this pipe to a client in an
+ /// asynchronous fashion. If the function immediately establishes a
+ /// connection to a client then `Ok(())` is returned. Otherwise if a
+ /// connection attempt was issued and is now in progress then a "would
+ /// block" error is returned.
+ ///
+ /// When the connection is finished then this object will be flagged as
+ /// being ready for a write, or otherwise in the writable state.
+ ///
+ /// # Errors
+ ///
+ /// This function will return a "would block" error if the pipe has not yet
+ /// been registered with an event loop, if the connection operation has
+ /// previously been issued but has not yet completed, or if the connect
+ /// itself was issued and didn't finish immediately.
+ ///
+ /// Normal I/O errors from the call to `ConnectNamedPipe` are returned
+ /// immediately.
+ pub fn connect(&self) -> io::Result<()> {
+ // "Acquire the connecting lock" or otherwise just make sure we're the
+ // only operation that's using the `connect` overlapped instance.
+ if self.inner.connecting.swap(true, SeqCst) {
+ return Err(would_block());
+ }
+
+ // Now that we've flagged ourselves in the connecting state, issue the
+ // connection attempt. Afterwards interpret the return value and set
+ // internal state accordingly.
+ let res = unsafe {
+ let overlapped = self.inner.connect.as_ptr() as *mut _;
+ self.inner.handle.connect_overlapped(overlapped)
+ };
+
+ match res {
+ // The connection operation finished immediately, so let's schedule
+ // reads/writes and such.
+ Ok(true) => {
+ self.inner.connecting.store(false, SeqCst);
+ Inner::post_register(&self.inner, None);
+ Ok(())
+ }
+
+ // If the overlapped operation was successful and didn't finish
+ // immediately then we forget a copy of the arc we hold
+ // internally. This ensures that when the completion status comes
+ // in for the I/O operation finishing it'll have a reference
+ // associated with it and our data will still be valid. The
+ // `connect_done` function will "reify" this forgotten pointer to
+ // drop the refcount on the other side.
+ Ok(false) => {
+ mem::forget(self.inner.clone());
+ Err(would_block())
+ }
+
+ Err(e) => {
+ self.inner.connecting.store(false, SeqCst);
+ Err(e)
+ }
+ }
+ }
+
+ /// Takes any internal error that has happened after the last I/O operation
+ /// which hasn't been retrieved yet.
+ ///
+ /// This is particularly useful when detecting failed attempts to `connect`.
+ /// After a completed `connect` flags this pipe as writable then callers
+ /// must invoke this method to determine whether the connection actually
+ /// succeeded. If this function returns `None` then a client is connected,
+ /// otherwise it returns an error of what happened and a client shouldn't be
+ /// connected.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ Ok(self.inner.io.lock().unwrap().connect_error.take())
+ }
+
+ /// Disconnects this named pipe from a connected client.
+ ///
+ /// This function will disconnect the pipe from a connected client, if any,
+ /// transitively calling the `DisconnectNamedPipe` function.
+ ///
+ /// After a `disconnect` is issued, then a `connect` may be called again to
+ /// connect to another client.
+ pub fn disconnect(&self) -> io::Result<()> {
+ self.inner.handle.disconnect()
+ }
+}
+
+impl FromRawHandle for NamedPipe {
+ unsafe fn from_raw_handle(handle: RawHandle) -> NamedPipe {
+ NamedPipe {
+ inner: Arc::new(Inner {
+ // Safety: not really unsafe
+ handle: pipe::NamedPipe::from_raw_handle(handle),
+ // transmutes to straddle winapi versions (mio 0.6 is on an
+ // older winapi)
+ connect: Overlapped::new(connect_done),
+ connecting: AtomicBool::new(false),
+ read: Overlapped::new(read_done),
+ write: Overlapped::new(write_done),
+ io: Mutex::new(Io {
+ cp: None,
+ token: None,
+ read: State::None,
+ write: State::None,
+ connect_error: None,
+ }),
+ pool: Mutex::new(BufferPool::with_capacity(2)),
+ }),
+ }
+ }
+}
+
+impl Read for NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ <&NamedPipe as Read>::read(&mut &*self, buf)
+ }
+}
+
+impl Write for NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ <&NamedPipe as Write>::write(&mut &*self, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ <&NamedPipe as Write>::flush(&mut &*self)
+ }
+}
+
+impl<'a> Read for &'a NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut state = self.inner.io.lock().unwrap();
+
+ if state.token.is_none() {
+ return Err(would_block());
+ }
+
+ match mem::replace(&mut state.read, State::None) {
+ // In theory not possible with `token` checked above,
+ // but return would block for now.
+ State::None => Err(would_block()),
+
+ // A read is in flight, still waiting for it to finish
+ State::Pending(buf, amt) => {
+ state.read = State::Pending(buf, amt);
+ Err(would_block())
+ }
+
+ // We previously read something into `data`, try to copy out some
+ // data. If we copy out all the data schedule a new read and
+ // otherwise store the buffer to get read later.
+ State::Ok(data, cur) => {
+ let n = {
+ let mut remaining = &data[cur..];
+ remaining.read(buf)?
+ };
+ let next = cur + n;
+ if next != data.len() {
+ state.read = State::Ok(data, next);
+ } else {
+ self.inner.put_buffer(data);
+ Inner::schedule_read(&self.inner, &mut state, None);
+ }
+ Ok(n)
+ }
+
+ // Looks like an in-flight read hit an error, return that here while
+ // we schedule a new one.
+ State::Err(e) => {
+ Inner::schedule_read(&self.inner, &mut state, None);
+ if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) {
+ Ok(0)
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Write for &'a NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // Make sure there's no writes pending
+ let mut io = self.inner.io.lock().unwrap();
+
+ if io.token.is_none() {
+ return Err(would_block());
+ }
+
+ match io.write {
+ State::None => {}
+ State::Err(_) => match mem::replace(&mut io.write, State::None) {
+ State::Err(e) => return Err(e),
+ // `io` is locked, so this branch is unreachable
+ _ => unreachable!(),
+ },
+ // any other state should be handled in `write_done`
+ _ => {
+ return Err(would_block());
+ }
+ }
+
+ // Move `buf` onto the heap and fire off the write
+ let mut owned_buf = self.inner.get_buffer();
+ owned_buf.extend(buf);
+ match Inner::maybe_schedule_write(&self.inner, owned_buf, 0, &mut io)? {
+ // Some bytes are written immediately
+ Some(n) => Ok(n),
+ // Write operation is anqueued for whole buffer
+ None => Ok(buf.len()),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Source for NamedPipe {
+ fn register(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, false)?;
+
+ if io.token.is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a `Registry`",
+ ));
+ }
+
+ if io.cp.is_none() {
+ let selector = registry.selector();
+
+ io.cp = Some(selector.clone_port());
+
+ let inner_token = NEXT_TOKEN.fetch_add(2, Relaxed) + 2;
+ selector
+ .inner
+ .cp
+ .add_handle(inner_token, &self.inner.handle)?;
+ }
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn reregister(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ if io.token.is_none() {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ));
+ }
+
+ io.token = None;
+ Ok(())
+ }
+}
+
+impl AsRawHandle for NamedPipe {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.inner.handle.as_raw_handle()
+ }
+}
+
+impl fmt::Debug for NamedPipe {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.handle.fmt(f)
+ }
+}
+
+impl Drop for NamedPipe {
+ fn drop(&mut self) {
+ // Cancel pending reads/connects, but don't cancel writes to ensure that
+ // everything is flushed out.
+ unsafe {
+ if self.inner.connecting.load(SeqCst) {
+ drop(cancel(&self.inner.handle, &self.inner.connect));
+ }
+
+ let io = self.inner.io.lock().unwrap();
+ if let State::Pending(..) = io.read {
+ drop(cancel(&self.inner.handle, &self.inner.read));
+ }
+ }
+ }
+}
+
+impl Inner {
+ /// Schedules a read to happen in the background, executing an overlapped
+ /// operation.
+ ///
+ /// This function returns `true` if a normal error happens or if the read
+ /// is scheduled in the background. If the pipe is no longer connected
+ /// (ERROR_PIPE_LISTENING) then `false` is returned and no read is
+ /// scheduled.
+ fn schedule_read(me: &Arc<Inner>, io: &mut Io, events: Option<&mut Vec<Event>>) -> bool {
+ // Check to see if a read is already scheduled/completed
+ match io.read {
+ State::None => {}
+ _ => return true,
+ }
+
+ // Allocate a buffer and schedule the read.
+ let mut buf = me.get_buffer();
+ let e = unsafe {
+ let overlapped = me.read.as_ptr() as *mut _;
+ let slice = slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.capacity());
+ me.handle.read_overlapped(slice, overlapped)
+ };
+
+ match e {
+ // See `NamedPipe::connect` above for the rationale behind `forget`
+ Ok(_) => {
+ io.read = State::Pending(buf, 0); // 0 is ignored on read side
+ mem::forget(me.clone());
+ true
+ }
+
+ // If ERROR_PIPE_LISTENING happens then it's not a real read error,
+ // we just need to wait for a connect.
+ Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_LISTENING as i32) => false,
+
+ // If some other error happened, though, we're now readable to give
+ // out the error.
+ Err(e) => {
+ io.read = State::Err(e);
+ io.notify_readable(events);
+ true
+ }
+ }
+ }
+
+ /// Maybe schedules overlapped write operation.
+ ///
+ /// * `None` means that overlapped operation was enqueued
+ /// * `Some(n)` means that `n` bytes was immediately written.
+ /// Note, that `write_done` will fire anyway to clean up the state.
+ fn maybe_schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ ) -> io::Result<Option<usize>> {
+ // Very similar to `schedule_read` above, just done for the write half.
+ let e = unsafe {
+ let overlapped = me.write.as_ptr() as *mut _;
+ me.handle.write_overlapped(&buf[pos..], overlapped)
+ };
+
+ // See `connect` above for the rationale behind `forget`
+ match e {
+ // `n` bytes are written immediately
+ Ok(Some(n)) => {
+ io.write = State::Ok(buf, pos);
+ mem::forget(me.clone());
+ Ok(Some(n))
+ }
+ // write operation is enqueued
+ Ok(None) => {
+ io.write = State::Pending(buf, pos);
+ mem::forget(me.clone());
+ Ok(None)
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ fn schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ events: Option<&mut Vec<Event>>,
+ ) {
+ match Inner::maybe_schedule_write(me, buf, pos, io) {
+ Ok(Some(_)) => {
+ // immediate result will be handled in `write_done`,
+ // so we'll reinterpret the `Ok` state
+ let state = mem::replace(&mut io.write, State::None);
+ io.write = match state {
+ State::Ok(buf, pos) => State::Pending(buf, pos),
+ // io is locked, so this branch is unreachable
+ _ => unreachable!(),
+ };
+ mem::forget(me.clone());
+ }
+ Ok(None) => (),
+ Err(e) => {
+ io.write = State::Err(e);
+ io.notify_writable(events);
+ }
+ }
+ }
+
+ fn post_register(me: &Arc<Inner>, mut events: Option<&mut Vec<Event>>) {
+ let mut io = me.io.lock().unwrap();
+ #[allow(clippy::needless_option_as_deref)]
+ if Inner::schedule_read(me, &mut io, events.as_deref_mut()) {
+ if let State::None = io.write {
+ io.notify_writable(events);
+ }
+ }
+ }
+
+ fn get_buffer(&self) -> Vec<u8> {
+ self.pool.lock().unwrap().get(4 * 1024)
+ }
+
+ fn put_buffer(&self, buf: Vec<u8>) {
+ self.pool.lock().unwrap().put(buf)
+ }
+}
+
+unsafe fn cancel<T: AsRawHandle>(handle: &T, overlapped: &Overlapped) -> io::Result<()> {
+ let ret = CancelIoEx(handle.as_raw_handle(), overlapped.as_ptr() as *mut _);
+ // `CancelIoEx` returns 0 on error:
+ // https://docs.microsoft.com/en-us/windows/win32/fileio/cancelioex-func
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+fn connect_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `connect` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_conn_overlapped(status.overlapped())) };
+
+ // Flag ourselves as no longer using the `connect` overlapped instances.
+ let prev = me.connecting.swap(false, SeqCst);
+ assert!(prev, "NamedPipe was not previously connecting");
+
+ // Stash away our connect error if one happened
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => debug_assert_eq!(n, 0),
+ Err(e) => me.io.lock().unwrap().connect_error = Some(e),
+ }
+ }
+
+ // We essentially just finished a registration, so kick off a
+ // read and register write readiness.
+ Inner::post_register(&me, events);
+}
+
+fn read_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_read` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_read_overlapped(status.overlapped())) };
+
+ // Move from the `Pending` to `Ok` state.
+ let mut io = me.io.lock().unwrap();
+ let mut buf = match mem::replace(&mut io.read, State::None) {
+ State::Pending(buf, _) => buf,
+ _ => unreachable!(),
+ };
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ buf.set_len(status.bytes_transferred() as usize);
+ io.read = State::Ok(buf, 0);
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.read = State::Err(e);
+ }
+ }
+ }
+
+ // Flag our readiness that we've got data.
+ io.notify_readable(events);
+}
+
+fn write_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_write` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_write_overlapped(status.overlapped())) };
+
+ // Make the state change out of `Pending`. If we wrote the entire buffer
+ // then we're writable again and otherwise we schedule another write.
+ let mut io = me.io.lock().unwrap();
+ let (buf, pos) = match mem::replace(&mut io.write, State::None) {
+ // `Ok` here means, that the operation was completed immediately
+ // `bytes_transferred` is already reported to a client
+ State::Ok(..) => {
+ io.notify_writable(events);
+ return;
+ }
+ State::Pending(buf, pos) => (buf, pos),
+ _ => unreachable!(),
+ };
+
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ let new_pos = pos + (status.bytes_transferred() as usize);
+ if new_pos == buf.len() {
+ me.put_buffer(buf);
+ io.notify_writable(events);
+ } else {
+ Inner::schedule_write(&me, buf, new_pos, &mut io, events);
+ }
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.write = State::Err(e);
+ io.notify_writable(events);
+ }
+ }
+ }
+}
+
+impl Io {
+ fn check_association(&self, registry: &Registry, required: bool) -> io::Result<()> {
+ match self.cp {
+ Some(ref cp) if !registry.selector().same_port(cp) => Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a different `Registry`",
+ )),
+ None if required => Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ )),
+ _ => Ok(()),
+ }
+ }
+
+ fn notify_readable(&self, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_readable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status());
+ }
+ }
+ }
+
+ fn notify_writable(&self, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_writable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status());
+ }
+ }
+ }
+}
+
+struct BufferPool {
+ pool: Vec<Vec<u8>>,
+}
+
+impl BufferPool {
+ fn with_capacity(cap: usize) -> BufferPool {
+ BufferPool {
+ pool: Vec::with_capacity(cap),
+ }
+ }
+
+ fn get(&mut self, default_cap: usize) -> Vec<u8> {
+ self.pool
+ .pop()
+ .unwrap_or_else(|| Vec::with_capacity(default_cap))
+ }
+
+ fn put(&mut self, mut buf: Vec<u8>) {
+ if self.pool.len() < self.pool.capacity() {
+ unsafe {
+ buf.set_len(0);
+ }
+ self.pool.push(buf);
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/net.rs b/third_party/rust/mio/src/sys/windows/net.rs
new file mode 100644
index 0000000000..db1896f198
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/net.rs
@@ -0,0 +1,108 @@
+use std::io;
+use std::mem;
+use std::net::SocketAddr;
+use std::sync::Once;
+
+use winapi::ctypes::c_int;
+use winapi::shared::in6addr::{in6_addr_u, IN6_ADDR};
+use winapi::shared::inaddr::{in_addr_S_un, IN_ADDR};
+use winapi::shared::ws2def::{ADDRESS_FAMILY, AF_INET, AF_INET6, SOCKADDR, SOCKADDR_IN};
+use winapi::shared::ws2ipdef::{SOCKADDR_IN6_LH_u, SOCKADDR_IN6_LH};
+use winapi::um::winsock2::{ioctlsocket, socket, FIONBIO, INVALID_SOCKET, SOCKET};
+
+/// Initialise the network stack for Windows.
+pub(crate) fn init() {
+ static INIT: Once = Once::new();
+ INIT.call_once(|| {
+ // Let standard library call `WSAStartup` for us, we can't do it
+ // ourselves because otherwise using any type in `std::net` would panic
+ // when it tries to call `WSAStartup` a second time.
+ drop(std::net::UdpSocket::bind("127.0.0.1:0"));
+ });
+}
+
+/// Create a new non-blocking socket.
+pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: c_int) -> io::Result<SOCKET> {
+ use winapi::um::winsock2::{PF_INET, PF_INET6};
+
+ let domain = match addr {
+ SocketAddr::V4(..) => PF_INET,
+ SocketAddr::V6(..) => PF_INET6,
+ };
+
+ new_socket(domain, socket_type)
+}
+
+pub(crate) fn new_socket(domain: c_int, socket_type: c_int) -> io::Result<SOCKET> {
+ syscall!(
+ socket(domain, socket_type, 0),
+ PartialEq::eq,
+ INVALID_SOCKET
+ )
+ .and_then(|socket| {
+ syscall!(ioctlsocket(socket, FIONBIO, &mut 1), PartialEq::ne, 0).map(|_| socket as SOCKET)
+ })
+}
+
+/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: SOCKADDR_IN,
+ v6: SOCKADDR_IN6_LH,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const SOCKADDR {
+ self as *const _ as *const SOCKADDR
+ }
+}
+
+pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, c_int) {
+ match addr {
+ SocketAddr::V4(ref addr) => {
+ // `s_addr` is stored as BE on all machine and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ let sin_addr = unsafe {
+ let mut s_un = mem::zeroed::<in_addr_S_un>();
+ *s_un.S_addr_mut() = u32::from_ne_bytes(addr.ip().octets());
+ IN_ADDR { S_un: s_un }
+ };
+
+ let sockaddr_in = SOCKADDR_IN {
+ sin_family: AF_INET as ADDRESS_FAMILY,
+ sin_port: addr.port().to_be(),
+ sin_addr,
+ sin_zero: [0; 8],
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ (sockaddr, mem::size_of::<SOCKADDR_IN>() as c_int)
+ }
+ SocketAddr::V6(ref addr) => {
+ let sin6_addr = unsafe {
+ let mut u = mem::zeroed::<in6_addr_u>();
+ *u.Byte_mut() = addr.ip().octets();
+ IN6_ADDR { u }
+ };
+ let u = unsafe {
+ let mut u = mem::zeroed::<SOCKADDR_IN6_LH_u>();
+ *u.sin6_scope_id_mut() = addr.scope_id();
+ u
+ };
+
+ let sockaddr_in6 = SOCKADDR_IN6_LH {
+ sin6_family: AF_INET6 as ADDRESS_FAMILY,
+ sin6_port: addr.port().to_be(),
+ sin6_addr,
+ sin6_flowinfo: addr.flowinfo(),
+ u,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ (sockaddr, mem::size_of::<SOCKADDR_IN6_LH>() as c_int)
+ }
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/overlapped.rs b/third_party/rust/mio/src/sys/windows/overlapped.rs
new file mode 100644
index 0000000000..837b78b60a
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/overlapped.rs
@@ -0,0 +1,37 @@
+use crate::sys::windows::Event;
+
+use std::cell::UnsafeCell;
+use std::fmt;
+
+#[cfg(feature = "os-ext")]
+use winapi::um::minwinbase::OVERLAPPED;
+use winapi::um::minwinbase::OVERLAPPED_ENTRY;
+
+#[repr(C)]
+pub(crate) struct Overlapped {
+ inner: UnsafeCell<miow::Overlapped>,
+ pub(crate) callback: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>),
+}
+
+#[cfg(feature = "os-ext")]
+impl Overlapped {
+ pub(crate) fn new(cb: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>)) -> Overlapped {
+ Overlapped {
+ inner: UnsafeCell::new(miow::Overlapped::zero()),
+ callback: cb,
+ }
+ }
+
+ pub(crate) fn as_ptr(&self) -> *const OVERLAPPED {
+ unsafe { (*self.inner.get()).raw() }
+ }
+}
+
+impl fmt::Debug for Overlapped {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Overlapped").finish()
+ }
+}
+
+unsafe impl Send for Overlapped {}
+unsafe impl Sync for Overlapped {}
diff --git a/third_party/rust/mio/src/sys/windows/selector.rs b/third_party/rust/mio/src/sys/windows/selector.rs
new file mode 100644
index 0000000000..133fefe895
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/selector.rs
@@ -0,0 +1,748 @@
+use super::afd::{self, Afd, AfdPollInfo};
+use super::io_status_block::IoStatusBlock;
+use super::Event;
+use crate::sys::Events;
+
+cfg_net! {
+ use crate::sys::event::{
+ ERROR_FLAGS, READABLE_FLAGS, READ_CLOSED_FLAGS, WRITABLE_FLAGS, WRITE_CLOSED_FLAGS,
+ };
+ use crate::Interest;
+}
+
+use miow::iocp::{CompletionPort, CompletionStatus};
+use std::collections::VecDeque;
+use std::io;
+use std::marker::PhantomPinned;
+use std::os::windows::io::RawSocket;
+use std::pin::Pin;
+#[cfg(debug_assertions)]
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+use winapi::shared::ntdef::NT_SUCCESS;
+use winapi::shared::ntdef::{HANDLE, PVOID};
+use winapi::shared::ntstatus::STATUS_CANCELLED;
+use winapi::shared::winerror::{ERROR_INVALID_HANDLE, ERROR_IO_PENDING, WAIT_TIMEOUT};
+use winapi::um::minwinbase::OVERLAPPED;
+
+#[derive(Debug)]
+struct AfdGroup {
+ #[cfg_attr(not(feature = "net"), allow(dead_code))]
+ cp: Arc<CompletionPort>,
+ afd_group: Mutex<Vec<Arc<Afd>>>,
+}
+
+impl AfdGroup {
+ pub fn new(cp: Arc<CompletionPort>) -> AfdGroup {
+ AfdGroup {
+ afd_group: Mutex::new(Vec::new()),
+ cp,
+ }
+ }
+
+ pub fn release_unused_afd(&self) {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ afd_group.retain(|g| Arc::strong_count(g) > 1);
+ }
+}
+
+cfg_io_source! {
+ const POLL_GROUP__MAX_GROUP_SIZE: usize = 32;
+
+ impl AfdGroup {
+ pub fn acquire(&self) -> io::Result<Arc<Afd>> {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ if afd_group.len() == 0 {
+ self._alloc_afd_group(&mut afd_group)?;
+ } else {
+ // + 1 reference in Vec
+ if Arc::strong_count(afd_group.last().unwrap()) > POLL_GROUP__MAX_GROUP_SIZE {
+ self._alloc_afd_group(&mut afd_group)?;
+ }
+ }
+
+ match afd_group.last() {
+ Some(arc) => Ok(arc.clone()),
+ None => unreachable!(
+ "Cannot acquire afd, {:#?}, afd_group: {:#?}",
+ self, afd_group
+ ),
+ }
+ }
+
+ fn _alloc_afd_group(&self, afd_group: &mut Vec<Arc<Afd>>) -> io::Result<()> {
+ let afd = Afd::new(&self.cp)?;
+ let arc = Arc::new(afd);
+ afd_group.push(arc);
+ Ok(())
+ }
+ }
+}
+
+#[derive(Debug)]
+enum SockPollStatus {
+ Idle,
+ Pending,
+ Cancelled,
+}
+
+#[derive(Debug)]
+pub struct SockState {
+ iosb: IoStatusBlock,
+ poll_info: AfdPollInfo,
+ afd: Arc<Afd>,
+
+ base_socket: RawSocket,
+
+ user_evts: u32,
+ pending_evts: u32,
+
+ user_data: u64,
+
+ poll_status: SockPollStatus,
+ delete_pending: bool,
+
+ // last raw os error
+ error: Option<i32>,
+
+ _pinned: PhantomPinned,
+}
+
+impl SockState {
+ fn update(&mut self, self_arc: &Pin<Arc<Mutex<SockState>>>) -> io::Result<()> {
+ assert!(!self.delete_pending);
+
+ // make sure to reset previous error before a new update
+ self.error = None;
+
+ if let SockPollStatus::Pending = self.poll_status {
+ if (self.user_evts & afd::KNOWN_EVENTS & !self.pending_evts) == 0 {
+ /* All the events the user is interested in are already being monitored by
+ * the pending poll operation. It might spuriously complete because of an
+ * event that we're no longer interested in; when that happens we'll submit
+ * a new poll operation with the updated event mask. */
+ } else {
+ /* A poll operation is already pending, but it's not monitoring for all the
+ * events that the user is interested in. Therefore, cancel the pending
+ * poll operation; when we receive it's completion package, a new poll
+ * operation will be submitted with the correct event mask. */
+ if let Err(e) = self.cancel() {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ return Ok(());
+ }
+ } else if let SockPollStatus::Cancelled = self.poll_status {
+ /* The poll operation has already been cancelled, we're still waiting for
+ * it to return. For now, there's nothing that needs to be done. */
+ } else if let SockPollStatus::Idle = self.poll_status {
+ /* No poll operation is pending; start one. */
+ self.poll_info.exclusive = 0;
+ self.poll_info.number_of_handles = 1;
+ *unsafe { self.poll_info.timeout.QuadPart_mut() } = std::i64::MAX;
+ self.poll_info.handles[0].handle = self.base_socket as HANDLE;
+ self.poll_info.handles[0].status = 0;
+ self.poll_info.handles[0].events = self.user_evts | afd::POLL_LOCAL_CLOSE;
+
+ // Increase the ref count as the memory will be used by the kernel.
+ let overlapped_ptr = into_overlapped(self_arc.clone());
+
+ let result = unsafe {
+ self.afd
+ .poll(&mut self.poll_info, &mut *self.iosb, overlapped_ptr)
+ };
+ if let Err(e) = result {
+ let code = e.raw_os_error().unwrap();
+ if code == ERROR_IO_PENDING as i32 {
+ /* Overlapped poll operation in progress; this is expected. */
+ } else {
+ // Since the operation failed it means the kernel won't be
+ // using the memory any more.
+ drop(from_overlapped(overlapped_ptr as *mut _));
+ if code == ERROR_INVALID_HANDLE as i32 {
+ /* Socket closed; it'll be dropped. */
+ self.mark_delete();
+ return Ok(());
+ } else {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ }
+ }
+
+ self.poll_status = SockPollStatus::Pending;
+ self.pending_evts = self.user_evts;
+ } else {
+ unreachable!("Invalid poll status during update, {:#?}", self)
+ }
+
+ Ok(())
+ }
+
+ fn cancel(&mut self) -> io::Result<()> {
+ match self.poll_status {
+ SockPollStatus::Pending => {}
+ _ => unreachable!("Invalid poll status during cancel, {:#?}", self),
+ };
+ unsafe {
+ self.afd.cancel(&mut *self.iosb)?;
+ }
+ self.poll_status = SockPollStatus::Cancelled;
+ self.pending_evts = 0;
+ Ok(())
+ }
+
+ // This is the function called from the overlapped using as Arc<Mutex<SockState>>. Watch out for reference counting.
+ fn feed_event(&mut self) -> Option<Event> {
+ self.poll_status = SockPollStatus::Idle;
+ self.pending_evts = 0;
+
+ let mut afd_events = 0;
+ // We use the status info in IO_STATUS_BLOCK to determine the socket poll status. It is unsafe to use a pointer of IO_STATUS_BLOCK.
+ unsafe {
+ if self.delete_pending {
+ return None;
+ } else if self.iosb.u.Status == STATUS_CANCELLED {
+ /* The poll request was cancelled by CancelIoEx. */
+ } else if !NT_SUCCESS(self.iosb.u.Status) {
+ /* The overlapped request itself failed in an unexpected way. */
+ afd_events = afd::POLL_CONNECT_FAIL;
+ } else if self.poll_info.number_of_handles < 1 {
+ /* This poll operation succeeded but didn't report any socket events. */
+ } else if self.poll_info.handles[0].events & afd::POLL_LOCAL_CLOSE != 0 {
+ /* The poll operation reported that the socket was closed. */
+ self.mark_delete();
+ return None;
+ } else {
+ afd_events = self.poll_info.handles[0].events;
+ }
+ }
+
+ afd_events &= self.user_evts;
+
+ if afd_events == 0 {
+ return None;
+ }
+
+ // In mio, we have to simulate Edge-triggered behavior to match API usage.
+ // The strategy here is to intercept all read/write from user that could cause WouldBlock usage,
+ // then reregister the socket to reset the interests.
+ self.user_evts &= !afd_events;
+
+ Some(Event {
+ data: self.user_data,
+ flags: afd_events,
+ })
+ }
+
+ pub fn is_pending_deletion(&self) -> bool {
+ self.delete_pending
+ }
+
+ pub fn mark_delete(&mut self) {
+ if !self.delete_pending {
+ if let SockPollStatus::Pending = self.poll_status {
+ drop(self.cancel());
+ }
+
+ self.delete_pending = true;
+ }
+ }
+
+ fn has_error(&self) -> bool {
+ self.error.is_some()
+ }
+}
+
+cfg_io_source! {
+ impl SockState {
+ fn new(raw_socket: RawSocket, afd: Arc<Afd>) -> io::Result<SockState> {
+ Ok(SockState {
+ iosb: IoStatusBlock::zeroed(),
+ poll_info: AfdPollInfo::zeroed(),
+ afd,
+ base_socket: get_base_socket(raw_socket)?,
+ user_evts: 0,
+ pending_evts: 0,
+ user_data: 0,
+ poll_status: SockPollStatus::Idle,
+ delete_pending: false,
+ error: None,
+ _pinned: PhantomPinned,
+ })
+ }
+
+ /// True if need to be added on update queue, false otherwise.
+ fn set_event(&mut self, ev: Event) -> bool {
+ /* afd::POLL_CONNECT_FAIL and afd::POLL_ABORT are always reported, even when not requested by the caller. */
+ let events = ev.flags | afd::POLL_CONNECT_FAIL | afd::POLL_ABORT;
+
+ self.user_evts = events;
+ self.user_data = ev.data;
+
+ (events & !self.pending_evts) != 0
+ }
+ }
+}
+
+impl Drop for SockState {
+ fn drop(&mut self) {
+ self.mark_delete();
+ }
+}
+
+/// Converts the pointer to a `SockState` into a raw pointer.
+/// To revert see `from_overlapped`.
+fn into_overlapped(sock_state: Pin<Arc<Mutex<SockState>>>) -> PVOID {
+ let overlapped_ptr: *const Mutex<SockState> =
+ unsafe { Arc::into_raw(Pin::into_inner_unchecked(sock_state)) };
+ overlapped_ptr as *mut _
+}
+
+/// Convert a raw overlapped pointer into a reference to `SockState`.
+/// Reverts `into_overlapped`.
+fn from_overlapped(ptr: *mut OVERLAPPED) -> Pin<Arc<Mutex<SockState>>> {
+ let sock_ptr: *const Mutex<SockState> = ptr as *const _;
+ unsafe { Pin::new_unchecked(Arc::from_raw(sock_ptr)) }
+}
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
+
+/// Windows implementaion of `sys::Selector`
+///
+/// Edge-triggered event notification is simulated by resetting internal event flag of each socket state `SockState`
+/// and setting all events back by intercepting all requests that could cause `io::ErrorKind::WouldBlock` happening.
+///
+/// This selector is currently only support socket due to `Afd` driver is winsock2 specific.
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ pub(super) inner: Arc<SelectorInner>,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ SelectorInner::new().map(|inner| {
+ #[cfg(debug_assertions)]
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+ Selector {
+ #[cfg(debug_assertions)]
+ id,
+ inner: Arc::new(inner),
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(false),
+ }
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ Ok(Selector {
+ #[cfg(debug_assertions)]
+ id: self.id,
+ inner: Arc::clone(&self.inner),
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)),
+ })
+ }
+
+ /// # Safety
+ ///
+ /// This requires a mutable reference to self because only a single thread
+ /// can poll IOCP at a time.
+ pub fn select(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ self.inner.select(events, timeout)
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ self.has_waker.swap(true, Ordering::AcqRel)
+ }
+
+ pub(super) fn clone_port(&self) -> Arc<CompletionPort> {
+ self.inner.cp.clone()
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn same_port(&self, other: &Arc<CompletionPort>) -> bool {
+ Arc::ptr_eq(&self.inner.cp, other)
+ }
+}
+
+cfg_io_source! {
+ use super::InternalState;
+ use crate::Token;
+
+ impl Selector {
+ pub(super) fn register(
+ &self,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ SelectorInner::register(&self.inner, socket, token, interests)
+ }
+
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(state, token, interests)
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct SelectorInner {
+ pub(super) cp: Arc<CompletionPort>,
+ update_queue: Mutex<VecDeque<Pin<Arc<Mutex<SockState>>>>>,
+ afd_group: AfdGroup,
+ is_polling: AtomicBool,
+}
+
+// We have ensured thread safety by introducing lock manually.
+unsafe impl Sync for SelectorInner {}
+
+impl SelectorInner {
+ pub fn new() -> io::Result<SelectorInner> {
+ CompletionPort::new(0).map(|cp| {
+ let cp = Arc::new(cp);
+ let cp_afd = Arc::clone(&cp);
+
+ SelectorInner {
+ cp,
+ update_queue: Mutex::new(VecDeque::new()),
+ afd_group: AfdGroup::new(cp_afd),
+ is_polling: AtomicBool::new(false),
+ }
+ })
+ }
+
+ /// # Safety
+ ///
+ /// May only be calling via `Selector::select`.
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ events.clear();
+
+ if timeout.is_none() {
+ loop {
+ let len = self.select2(&mut events.statuses, &mut events.events, None)?;
+ if len == 0 {
+ continue;
+ }
+ break Ok(());
+ }
+ } else {
+ self.select2(&mut events.statuses, &mut events.events, timeout)?;
+ Ok(())
+ }
+ }
+
+ pub fn select2(
+ &self,
+ statuses: &mut [CompletionStatus],
+ events: &mut Vec<Event>,
+ timeout: Option<Duration>,
+ ) -> io::Result<usize> {
+ assert!(!self.is_polling.swap(true, Ordering::AcqRel));
+
+ unsafe { self.update_sockets_events() }?;
+
+ let result = self.cp.get_many(statuses, timeout);
+
+ self.is_polling.store(false, Ordering::Relaxed);
+
+ match result {
+ Ok(iocp_events) => Ok(unsafe { self.feed_events(events, iocp_events) }),
+ Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => Ok(0),
+ Err(e) => Err(e),
+ }
+ }
+
+ unsafe fn update_sockets_events(&self) -> io::Result<()> {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for sock in update_queue.iter_mut() {
+ let mut sock_internal = sock.lock().unwrap();
+ if !sock_internal.is_pending_deletion() {
+ sock_internal.update(sock)?;
+ }
+ }
+
+ // remove all sock which do not have error, they have afd op pending
+ update_queue.retain(|sock| sock.lock().unwrap().has_error());
+
+ self.afd_group.release_unused_afd();
+ Ok(())
+ }
+
+ // It returns processed count of iocp_events rather than the events itself.
+ unsafe fn feed_events(
+ &self,
+ events: &mut Vec<Event>,
+ iocp_events: &[CompletionStatus],
+ ) -> usize {
+ let mut n = 0;
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ events.push(Event::from_completion_status(iocp_event));
+ n += 1;
+ continue;
+ } else if iocp_event.token() % 2 == 1 {
+ // Handle is a named pipe. This could be extended to be any non-AFD event.
+ let callback = (*(iocp_event.overlapped() as *mut super::Overlapped)).callback;
+
+ let len = events.len();
+ callback(iocp_event.entry(), Some(events));
+ n += events.len() - len;
+ continue;
+ }
+
+ let sock_state = from_overlapped(iocp_event.overlapped());
+ let mut sock_guard = sock_state.lock().unwrap();
+ if let Some(e) = sock_guard.feed_event() {
+ events.push(e);
+ n += 1;
+ }
+
+ if !sock_guard.is_pending_deletion() {
+ update_queue.push_back(sock_state.clone());
+ }
+ }
+ self.afd_group.release_unused_afd();
+ n
+ }
+}
+
+cfg_io_source! {
+ use std::mem::size_of;
+ use std::ptr::null_mut;
+ use winapi::um::mswsock;
+ use winapi::um::winsock2::WSAGetLastError;
+ use winapi::um::winsock2::{WSAIoctl, SOCKET_ERROR};
+
+ impl SelectorInner {
+ fn register(
+ this: &Arc<Self>,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ let flags = interests_to_afd_flags(interests);
+
+ let sock = {
+ let sock = this._alloc_sock_for_rawsocket(socket)?;
+ let event = Event {
+ flags,
+ data: token.0 as u64,
+ };
+ sock.lock().unwrap().set_event(event);
+ sock
+ };
+
+ let state = InternalState {
+ selector: this.clone(),
+ token,
+ interests,
+ sock_state: sock.clone(),
+ };
+
+ this.queue_state(sock);
+ unsafe { this.update_sockets_events_if_polling()? };
+
+ Ok(state)
+ }
+
+ // Directly accessed in `IoSourceState::do_io`.
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ {
+ let event = Event {
+ flags: interests_to_afd_flags(interests),
+ data: token.0 as u64,
+ };
+
+ state.lock().unwrap().set_event(event);
+ }
+
+ // FIXME: a sock which has_error true should not be re-added to
+ // the update queue because it's already there.
+ self.queue_state(state);
+ unsafe { self.update_sockets_events_if_polling() }
+ }
+
+ /// This function is called by register() and reregister() to start an
+ /// IOCTL_AFD_POLL operation corresponding to the registered events, but
+ /// only if necessary.
+ ///
+ /// Since it is not possible to modify or synchronously cancel an AFD_POLL
+ /// operation, and there can be only one active AFD_POLL operation per
+ /// (socket, completion port) pair at any time, it is expensive to change
+ /// a socket's event registration after it has been submitted to the kernel.
+ ///
+ /// Therefore, if no other threads are polling when interest in a socket
+ /// event is (re)registered, the socket is added to the 'update queue', but
+ /// the actual syscall to start the IOCTL_AFD_POLL operation is deferred
+ /// until just before the GetQueuedCompletionStatusEx() syscall is made.
+ ///
+ /// However, when another thread is already blocked on
+ /// GetQueuedCompletionStatusEx() we tell the kernel about the registered
+ /// socket event(s) immediately.
+ unsafe fn update_sockets_events_if_polling(&self) -> io::Result<()> {
+ if self.is_polling.load(Ordering::Acquire) {
+ self.update_sockets_events()
+ } else {
+ Ok(())
+ }
+ }
+
+ fn queue_state(&self, sock_state: Pin<Arc<Mutex<SockState>>>) {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ update_queue.push_back(sock_state);
+ }
+
+ fn _alloc_sock_for_rawsocket(
+ &self,
+ raw_socket: RawSocket,
+ ) -> io::Result<Pin<Arc<Mutex<SockState>>>> {
+ let afd = self.afd_group.acquire()?;
+ Ok(Arc::pin(Mutex::new(SockState::new(raw_socket, afd)?)))
+ }
+ }
+
+ fn try_get_base_socket(raw_socket: RawSocket, ioctl: u32) -> Result<RawSocket, i32> {
+ let mut base_socket: RawSocket = 0;
+ let mut bytes: u32 = 0;
+ unsafe {
+ if WSAIoctl(
+ raw_socket as usize,
+ ioctl,
+ null_mut(),
+ 0,
+ &mut base_socket as *mut _ as PVOID,
+ size_of::<RawSocket>() as u32,
+ &mut bytes,
+ null_mut(),
+ None,
+ ) != SOCKET_ERROR
+ {
+ Ok(base_socket)
+ } else {
+ Err(WSAGetLastError())
+ }
+ }
+ }
+
+ fn get_base_socket(raw_socket: RawSocket) -> io::Result<RawSocket> {
+ let res = try_get_base_socket(raw_socket, mswsock::SIO_BASE_HANDLE);
+ if let Ok(base_socket) = res {
+ return Ok(base_socket);
+ }
+
+ // The `SIO_BASE_HANDLE` should not be intercepted by LSPs, therefore
+ // it should not fail as long as `raw_socket` is a valid socket. See
+ // https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls.
+ // However, at least one known LSP deliberately breaks it, so we try
+ // some alternative IOCTLs, starting with the most appropriate one.
+ for &ioctl in &[
+ mswsock::SIO_BSP_HANDLE_SELECT,
+ mswsock::SIO_BSP_HANDLE_POLL,
+ mswsock::SIO_BSP_HANDLE,
+ ] {
+ if let Ok(base_socket) = try_get_base_socket(raw_socket, ioctl) {
+ // Since we know now that we're dealing with an LSP (otherwise
+ // SIO_BASE_HANDLE would't have failed), only return any result
+ // when it is different from the original `raw_socket`.
+ if base_socket != raw_socket {
+ return Ok(base_socket);
+ }
+ }
+ }
+
+ // If the alternative IOCTLs also failed, return the original error.
+ let os_error = res.unwrap_err();
+ let err = io::Error::from_raw_os_error(os_error);
+ Err(err)
+ }
+}
+
+impl Drop for SelectorInner {
+ fn drop(&mut self) {
+ loop {
+ let events_num: usize;
+ let mut statuses: [CompletionStatus; 1024] = [CompletionStatus::zero(); 1024];
+
+ let result = self
+ .cp
+ .get_many(&mut statuses, Some(std::time::Duration::from_millis(0)));
+ match result {
+ Ok(iocp_events) => {
+ events_num = iocp_events.iter().len();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ // Custom event
+ } else if iocp_event.token() % 2 == 1 {
+ // Named pipe, dispatch the event so it can release resources
+ let callback = unsafe {
+ (*(iocp_event.overlapped() as *mut super::Overlapped)).callback
+ };
+
+ callback(iocp_event.entry(), None);
+ } else {
+ // drain sock state to release memory of Arc reference
+ let _sock_state = from_overlapped(iocp_event.overlapped());
+ }
+ }
+ }
+
+ Err(_) => {
+ break;
+ }
+ }
+
+ if events_num == 0 {
+ // continue looping until all completion statuses have been drained
+ break;
+ }
+ }
+
+ self.afd_group.release_unused_afd();
+ }
+}
+
+cfg_net! {
+ fn interests_to_afd_flags(interests: Interest) -> u32 {
+ let mut flags = 0;
+
+ if interests.is_readable() {
+ flags |= READABLE_FLAGS | READ_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ if interests.is_writable() {
+ flags |= WRITABLE_FLAGS | WRITE_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ flags
+ }
+}
diff --git a/third_party/rust/mio/src/sys/windows/tcp.rs b/third_party/rust/mio/src/sys/windows/tcp.rs
new file mode 100644
index 0000000000..b3f05aec65
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/tcp.rs
@@ -0,0 +1,71 @@
+use std::io;
+use std::net::{self, SocketAddr};
+use std::os::windows::io::AsRawSocket;
+
+use winapi::um::winsock2::{self, PF_INET, PF_INET6, SOCKET, SOCKET_ERROR, SOCK_STREAM};
+
+use crate::sys::windows::net::{init, new_socket, socket_addr};
+
+pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<SOCKET> {
+ init();
+ let domain = match address {
+ SocketAddr::V4(_) => PF_INET,
+ SocketAddr::V6(_) => PF_INET6,
+ };
+ new_socket(domain, SOCK_STREAM)
+}
+
+pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
+ use winsock2::bind;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ bind(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
+ use winsock2::connect;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ let res = syscall!(
+ connect(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ );
+
+ match res {
+ Err(err) if err.kind() != io::ErrorKind::WouldBlock => Err(err),
+ _ => Ok(()),
+ }
+}
+
+pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
+ use std::convert::TryInto;
+ use winsock2::listen;
+
+ let backlog = backlog.try_into().unwrap_or(i32::max_value());
+ syscall!(
+ listen(socket.as_raw_socket() as _, backlog),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ // The non-blocking state of `listener` is inherited. See
+ // https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-accept#remarks.
+ listener.accept()
+}
diff --git a/third_party/rust/mio/src/sys/windows/udp.rs b/third_party/rust/mio/src/sys/windows/udp.rs
new file mode 100644
index 0000000000..825ecccff4
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/udp.rs
@@ -0,0 +1,53 @@
+use std::io;
+use std::mem::{self, MaybeUninit};
+use std::net::{self, SocketAddr};
+use std::os::windows::io::{AsRawSocket, FromRawSocket};
+use std::os::windows::raw::SOCKET as StdSocket; // winapi uses usize, stdlib uses u32/u64.
+
+use winapi::ctypes::c_int;
+use winapi::shared::ws2def::IPPROTO_IPV6;
+use winapi::shared::ws2ipdef::IPV6_V6ONLY;
+use winapi::um::winsock2::{bind as win_bind, closesocket, getsockopt, SOCKET_ERROR, SOCK_DGRAM};
+
+use crate::sys::windows::net::{init, new_ip_socket, socket_addr};
+
+pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
+ init();
+ new_ip_socket(addr, SOCK_DGRAM).and_then(|socket| {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ win_bind(socket, raw_addr.as_ptr(), raw_addr_length,),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )
+ .map_err(|err| {
+ // Close the socket if we hit an error, ignoring the error
+ // from closing since we can't pass back two errors.
+ let _ = unsafe { closesocket(socket) };
+ err
+ })
+ .map(|_| unsafe { net::UdpSocket::from_raw_socket(socket as StdSocket) })
+ })
+}
+
+pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
+ let mut optval: MaybeUninit<c_int> = MaybeUninit::uninit();
+ let mut optlen = mem::size_of::<c_int>() as c_int;
+
+ syscall!(
+ getsockopt(
+ socket.as_raw_socket() as usize,
+ IPPROTO_IPV6 as c_int,
+ IPV6_V6ONLY as c_int,
+ optval.as_mut_ptr().cast(),
+ &mut optlen,
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+
+ debug_assert_eq!(optlen as usize, mem::size_of::<c_int>());
+ // Safety: `getsockopt` initialised `optval` for us.
+ let optval = unsafe { optval.assume_init() };
+ Ok(optval != 0)
+}
diff --git a/third_party/rust/mio/src/sys/windows/waker.rs b/third_party/rust/mio/src/sys/windows/waker.rs
new file mode 100644
index 0000000000..ab12c3c689
--- /dev/null
+++ b/third_party/rust/mio/src/sys/windows/waker.rs
@@ -0,0 +1,29 @@
+use crate::sys::windows::Event;
+use crate::sys::windows::Selector;
+use crate::Token;
+
+use miow::iocp::CompletionPort;
+use std::io;
+use std::sync::Arc;
+
+#[derive(Debug)]
+pub struct Waker {
+ token: Token,
+ port: Arc<CompletionPort>,
+}
+
+impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ Ok(Waker {
+ token,
+ port: selector.clone_port(),
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ let mut ev = Event::new(self.token);
+ ev.set_readable();
+
+ self.port.post(ev.to_completion_status())
+ }
+}