--- /dev/null
+freebsd_instance:
+ image: freebsd-12-2-release-amd64
+
+env:
+ RUST_BACKTRACE: full
+
+task:
+ name: FreeBSD
+ setup_script:
+ - pkg install -y curl
+ - curl https://sh.rustup.rs -sSf --output rustup.sh
+ - sh rustup.sh -y --profile minimal
+ cargo_cache:
+ folder: $HOME/.cargo/registry
+ build_script:
+ - . $HOME/.cargo/env
+ - cargo build
+ - cargo build --no-default-features
+ amd64_test_script:
+ - . $HOME/.cargo/env
+ - cargo test --all-features
+ i386_test_script:
+ - . $HOME/.cargo/env
+ - rustup target add i686-unknown-freebsd
+ - cargo test --target i686-unknown-freebsd --all-features
+ before_cache_script:
+ - rm -rf $HOME/.cargo/registry/index
--- /dev/null
+# Windows changes
+/src/sys/windows/ @carllerche
\ No newline at end of file
--- /dev/null
+.cargo
+Cargo.lock
+target*
+libs
--- /dev/null
+# 0.8.0
+
+## Removed
+
+* Deprecated features (https://github.com/tokio-rs/mio/commit/105f8f2afb57b01ddea716a0aa9720f226c520e3):
+ * extra-docs (always enabled)
+ * tcp (replaced with "net" feature).
+ * udp (replaced with "net" feature).
+ * uds (replaced with "net" feature).
+ * pipe (replaced with "os-ext" feature).
+* `TcpSocket` type
+ (https://github.com/tokio-rs/mio/commit/02e9be41f27daf822575444fdd2b3067433a5996).
+ The socket2 crate provides all the functionality and more.
+* Support for Solaris, it never really worked anyway
+ (https://github.com/tokio-rs/mio/pull/1528).
+
+## Changes
+
+* Update minimum Rustc version (MSVR) to 1.46.0
+ (https://github.com/tokio-rs/mio/commit/5c577efecd23750a9a3e0f6ad080ab98f14a255d).
+
+## Added
+
+* `UdpSocket::peer_addr`
+ (https://github.com/tokio-rs/mio/commit/5fc104d08e0e74c8a19247f7cba0f058699fc438).
+
+# 0.7.14
+
+## Fixes
+
+* Remove use unsound internal macro (#1519).
+
+## Added
+
+* `sys::unix::SocketAddr::as_abstract_namespace()` (#1520).
+
+# 0.7.13
+
+## Fixes
+
+* Fix `Registry::try_clone` invalid usage of `F_DUPFD_CLOEXEC` (#1497,
+ https://github.com/tokio-rs/mio/commit/2883f5c1f35bf1a59682c5ffc4afe6b97d7d6e68).
+
+# 0.7.12 (yanked)
+
+## Fixes
+
+* Set `FD_CLOEXEC` when calling `Registry::try_clone`
+ (https://github.com/tokio-rs/mio/commit/d1617b567ff6bc669d71e367d22e0e93ff7e2e24 for epoll and
+ (https://github.com/tokio-rs/mio/commit/b367a05e408ca90a26383c3aa16d8a16f019dc59 for kqueue).
+
+# 0.7.11
+
+## Fixes
+
+* Fix missing feature of winapi.
+ (https://github.com/tokio-rs/mio/commit/a7e61db9e3c2b929ef1a33532bfcc22045d163ce).
+
+# 0.7.10
+
+## Fixes
+
+* Fix an instance of not doc(cfg(.*))
+ (https://github.com/tokio-rs/mio/commit/25e8f911357c740034f10a170dfa4ea1b28234ce).
+
+# 0.7.9
+
+## Fixes
+
+* Fix error handling in `NamedPipe::write`
+ (https://github.com/tokio-rs/mio/commit/aec872be9732e5c6685100674278be27f54a271b).
+* Use `accept(2)` on x86 Android instead of `accept4(2)`
+ (https://github.com/tokio-rs/mio/commit/6f86b925d3e48f30905d5cfa54348acf3f1fa036,
+ https://github.com/tokio-rs/mio/commit/8d5414880ab82178305ac1d2c16d715e58633d3e).
+* Improve error message when opening AFD device
+ (https://github.com/tokio-rs/mio/commit/139f7c4422321eb4a17b14ae2c296fddd19a8804).
+
+# 0.7.8
+
+## Fixes
+
+* Fix `TcpStream::set_linger` on macOS
+ (https://github.com/tokio-rs/mio/commit/175773ce02e85977db81224c782c8d140aba8543).
+* Fix compilation on DragonFlyBSD
+ (https://github.com/tokio-rs/mio/commit/b51af46b28871f8dd3233b490ee62237ffed6a26).
+
+# 0.7.7
+
+## Added
+
+* `UdpSocket::only_v6`
+ (https://github.com/tokio-rs/mio/commit/0101e05a800f17fb88f4315d9b9fe0f08cca6e57).
+* `Clone` implementation for `Event`
+ (https://github.com/tokio-rs/mio/commit/26540ebbae89df6d4d08465c56f715d8f2addfc3).
+* `AsRawFd` implementation for `Registry`
+ (https://github.com/tokio-rs/mio/commit/f70daa72da0042b1880256164774c3286d315a02).
+* `Read` and `Write` implementation for `&unix::pipe::Sender` and `Receiver`,
+ that is on the reference to them, an implementation existed on the types
+ themselves already
+ (https://github.com/tokio-rs/mio/commit/1be481dcbbcb6906364008b5d61e7f53cddc3eb3).
+
+## Fixes
+
+* Underflow in `SocketAddr::address`
+ (https://github.com/tokio-rs/mio/commit/6d3fa69240cd4bb95e9d34605c660c30245a18bd).
+* Android build with the net feature enabled, but with os-poll disabled
+ (https://github.com/tokio-rs/mio/commit/49d8fd33e026ad6e2c055d05d6667180ba2af7be).
+* Solaris build with the net feature enabled, but with os-poll disabled
+ (https://github.com/tokio-rs/mio/commit/a6e025e9d9511639ec106ebedc0dd312bdc9be12).
+* Ensure that `Waker::wake` works on illumos systems with poor `pipe(2)` and
+ `epoll(2)` interaction using `EPOLLET`
+ (https://github.com/tokio-rs/mio/commit/943d4249dcc17cd8b4d2250c4fa19116097248fa).
+* Fix `unix::pipe` on illumos
+ (https://github.com/tokio-rs/mio/commit/0db49f6d5caf54b12176821363d154384357e70a).
+
+# 0.7.6
+
+## Added
+
+* `net` feature, replaces `tcp`, `udp` and `uds` features
+ (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7).
+* `os-ext` feature, replaces `os-util` and `pipe` features
+ (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc).
+* Added keepalive support to `TcpSocket`
+ (https://github.com/tokio-rs/mio/commit/290c43a96662d54ab7c4b8814e5a9f9a9e523fda).
+* `TcpSocket::set_{send, recv}_buffer_size`
+ (https://github.com/tokio-rs/mio/commit/40c4af79bf5b32b8fbdbf6f2e5c16290e1d3d406).
+* `TcpSocket::get_linger`
+ (https://github.com/tokio-rs/mio/commit/13e82ced655bbb6e2729226e485a7de9f2c2ccd9).
+* Implement `IntoRawFd` for `TcpSocket`
+ (https://github.com/tokio-rs/mio/commit/50548ed45d0b2c98f1f2e003e210d14195284ef4).
+
+## Deprecated
+
+* The `tcp`, `udp` and `uds` features, replaced by a new `net` feature.
+ (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7).
+* The `extra-docs` feature, now enabled by default.
+ (https://github.com/tokio-rs/mio/commit/25731e8688a2d91c5c700674a2c2d3841240ece1).
+* The `os-util` and `pipe` features, replaced by a new `os-ext` feature.
+ (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc).
+
+## Fixes
+
+* Incorrect assumption of the layout of `std::net::SocketAddr`. Previously Mio
+ would assume that `SocketAddrV{4,6}` had the same layout as
+ `libc::sockaddr_in(6)`, however this is not guaranteed by the standard
+ library.
+ (https://github.com/tokio-rs/mio/commit/152e0751f0be1c9b0cbd6778645b76bcb0eba93c).
+* Also bumped the miow dependency to version 0.3.6 to solve the same problem as
+ above.
+
+# 0.7.5
+
+## Added
+
+* `TcpSocket::get_localaddr()` retrieves local address
+ (https://github.com/tokio-rs/mio/commit/b41a022b2242eef1969c70c8ba93e04c528dba47).
+* `TcpSocket::set_reuseport()` & `TcpSocket::get_reuseport()` configures and reads `SO_REUSEPORT`
+ (https://github.com/tokio-rs/mio/commit/183bbe409ab69cbf9db41d0263b41ec86202d9a0).
+* `unix:pipe()` a wrapper around pipe(2) sys call
+ (https://github.com/tokio-rs/mio/commit/2b7c0967a7362303946deb3d4ca2ae507af6c72d).
+* Add a check that a single Waker is active per Poll instance (only in debug mode)
+ (https://github.com/tokio-rs/mio/commit/f4874f28b32efcf4841691884c65a89734d96a56).
+* Added `Interest:remove()`
+ (https://github.com/tokio-rs/mio/commit/b8639c3d9ac07bb7e2e27685680c8a6510fa1357).
+
+# 0.7.4
+
+## Fixes
+
+* lost "socket closed" events on windows
+ (https://github.com/tokio-rs/mio/commit/50c299aca56c4a26e5ed20c283007239fbe6a7a7).
+
+## Added
+
+* `TcpSocket::set_linger()` configures SO_LINGER
+ (https://github.com/tokio-rs/mio/commit/3b4096565c1a879f651b8f8282ecdcbdbd5c92d3).
+
+# 0.7.3
+
+## Added
+
+* `TcpSocket` for configuring a TCP socket before connecting or listening
+ (https://github.com/tokio-rs/mio/commit/5b09e60d0f64419b989bda88c86a3147334a03b3).
+
+# 0.7.2
+
+## Added
+
+* Windows named pipe support.
+ (https://github.com/tokio-rs/mio/commit/52e8c2220e87696d20f13561402bcaabba4136ed).
+
+# 0.7.1
+
+## Reduced support for 32-bit Apple targets
+
+In January 2020 Rust reduced its support for 32-bit Apple targets
+(https://blog.rust-lang.org/2020/01/03/reducing-support-for-32-bit-apple-targets.html).
+Starting with v0.7.1 Mio will do the same as we're no longer checking 32 bit
+iOS/macOS on our CI.
+
+## Added
+
+* Support for illumos
+ (https://github.com/tokio-rs/mio/commit/976f2354d0e8fbbb64fba3bf017d7131f9c369a0).
+* Report `epoll(2)`'s `EPOLLERR` event as `Event::is_write_closed` if it's the
+ only event
+ (https://github.com/tokio-rs/mio/commit/0c77b5712d675eeb9bd43928b5dd7d22b2c7ac0c).
+* Optimised event::Iter::{size_hint, count}
+ (https://github.com/tokio-rs/mio/commit/40df934a11b05233a7796c4de19a4ee06bc4e03e).
+
+## Fixed
+
+* Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then
+ ~30 minutes effectively infinite
+ (https://github.com/tokio-rs/mio/commit/d555991f5ee81f6c1eec0fe481557d3d5b8d5ff4).
+* Set `SO_NOSIGPIPE` on all sockets (not just UDP) on for Apple targets
+ (https://github.com/tokio-rs/mio/commit/b8bbdcb0d3236f4c4acb257996d42a88dc9987d9).
+* Properly handle `POLL_ABORT` on Windows
+ (https://github.com/tokio-rs/mio/commit/a98da62b3ed1eeed1770aaca12f46d647e4fa749).
+* Improved error handling around failing `SIO_BASE_HANDLE` calls on Windows
+ (https://github.com/tokio-rs/mio/commit/b15fc18458a79ef8a51f73effa92548650f4e5dc).
+
+## Changed
+
+* On NetBSD we now use `accept4(2)`
+ (https://github.com/tokio-rs/mio/commit/4e306addc7144f2e02a7e8397c220b179a006a19).
+* The package uploaded to crates.io should be slightly smaller
+ (https://github.com/tokio-rs/mio/commit/eef8d3b9500bc0db957cd1ac68ee128ebc68351f).
+
+## Removed
+
+* Dependency on `lazy_static` on Windows
+ (https://github.com/tokio-rs/mio/commit/57e4c2a8ac153bc7bb87829e22cf0a21e3927e8a).
+
+# 0.7.0
+
+Version 0.7 of Mio contains various major changes compared to version 0.6.
+Overall a large number of API changes have been made to reduce the complexity of
+the implementation and remove overhead where possible.
+
+Please refer to the [blog post about
+0.7-alpha.1](https://tokio.rs/blog/2019-12-mio-v0.7-alpha.1/) for additional
+information.
+
+## Added
+
+* `Interest` structure that replaces `Ready` in registering event sources.
+* `Registry` structure that separates the registering and polling functionality.
+* `Waker` structure that allows another thread to wake a thread polling `Poll`.
+* Unix Domain Socket (UDS) types: `UnixDatagram`, `UnixListener` and
+ `UnixStream`.
+
+## Removed
+
+* All code deprecated in 0.6 was removed in 0.7.
+* Support for Fuchsia was removed as the code was unmaintained.
+* Support for Bitrig was removed, rustc dropped support for it also.
+* `UnixReady` was merged into `Ready`.
+* Custom user-space readiness queue was removed, this includes the public
+ `Registration` and `SetReadiness` types.
+* `PollOpt` was removed and all registrations use edge-triggers. See the upgrade
+ guide on how to process event using edge-triggers.
+* The network types (types in the `net` module) now support only the same API as
+ found in the standard library, various methods on the types were removed.
+* `TcpStream` now supports vectored I/O.
+* `Poll::poll_interruptible` was removed. Instead `Poll::poll` will now return
+ an error if one occurs.
+* `From<usize>` is removed from `Token`, the internal field is still public, so
+ `Token(my_token)` can still be used.
+
+## Changed
+
+* Various documentation improvements were made around correct usage of `Poll`
+ and registered event sources. It is recommended to reread the documentation of
+ at least `event::Source` and `Poll`.
+* Mio now uses Rust 2018 and rustfmt for all code.
+* `Event` was changed to be a wrapper around the OS event. This means it can be
+ significantly larger on some OSes.
+* `Ready` was removed and replaced with various `is_*` methods on `Event`. For
+ example instead checking for readable readiness using
+ `Event::ready().is_readable()`, you would call `Event::is_readable()`.
+* `Ready::is_hup` was removed in favour of `Event::is_read_closed` and
+ `Event::is_write_closed`.
+* The Iterator implementation of `Events` was changed to return `&Event`.
+* `Evented` was renamed to `event::Source` and now takes mutable reference to
+ the source.
+* Minimum supported Rust version was increased to 1.39.
+* By default Mio now uses a shim implementation. To enable the full
+ implementation, that uses the OS, enable the `os-oll` feature. To enable the
+ network types use `tcp`, `udp` and/or `uds`. For more documentation on the
+ features see the `feature` module in the API documentation (requires the
+ `extra-docs` feature).
+* The entire Windows implementation was rewritten.
+* Various optimisation were made to reduce the number of system calls in
+ creating and using sockets, e.g. making use of `accept4(2)`.
+* The `fmt::Debug` implementation of `Events` is now actually useful as it
+ prints all `Event`s.
+
+# 0.6.23 (Dec 01, 2020)
+
+### Changed
+- **MSRV**: Increased the MSRV from 1.18.0 (Jun 8, 2017) to 1.31.0 (Dec 6,
+ 2018)
+ (https://github.com/tokio-rs/mio/commit/4879e0d32ddfd98e762fc87240e594a3ad8fca30).
+
+### Fixed
+- Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then
+ ~30 minutes effectively infinite
+ (https://github.com/tokio-rs/mio/commit/e7cba59950e9c9fa6194e29b5b1e72029e3df455).
+- Update miow and net2 depedencies to get rid of invalid memory layout assumption
+ (https://github.com/tokio-rs/mio/commit/13f02ac0a86d7c0c0001e5ff8960a0b4340d075c).
+
+# 0.6.22 (May 01, 2020)
+
+### Added
+- Add support for illumos target (#1294)
+
+# 0.6.21 (November 27, 2019)
+
+### Fixed
+- remove `=` dependency on `cfg-if`.
+
+# 0.6.20 (November 21, 2019)
+
+### Fixed
+- Use default IOCP concurrency value (#1161).
+- setting FD_CLOEXEC in pipe (#1095).
+
+# 0.6.19 (May 28, 2018)
+
+### Fixed
+- Do not trigger HUP events on kqueue platforms (#958).
+
+# 0.6.18 (May 24, 2018)
+
+### Fixed
+- Fix compilation on kqueue platforms with 32bit C long (#948).
+
+# 0.6.17 (May 15, 2018)
+
+### Fixed
+- Don't report `RDHUP` as `HUP` (#939)
+- Fix lazycell related compilation issues.
+- Fix EPOLLPRI conflicting with READABLE
+- Abort process on ref count overflows
+
+### Added
+- Define PRI on all targets
+
+# 0.6.16 (September 5, 2018)
+
+* Add EPOLLPRI readiness to UnixReady on supported platforms (#867)
+* Reduce spurious awaken calls (#875)
+
+# 0.6.15 (July 3, 2018)
+
+* Implement `Evented` for containers (#840).
+* Fix android-aarch64 build (#850).
+
+# 0.6.14 (March 8, 2018)
+
+* Add `Poll::poll_interruptible` (#811)
+* Add `Ready::all` and `usize` conversions (#825)
+
+# 0.6.13 (February 5, 2018)
+
+* Fix build on DragonFlyBSD.
+* Add `TcpListener::from_std` that does not require the socket addr.
+* Deprecate `TcpListener::from_listener` in favor of from_std.
+
+# 0.6.12 (January 5, 2018)
+
+* Add `TcpStream::peek` function (#773).
+* Raise minimum Rust version to 1.18.0.
+* `Poll`: retry select() when interrupted by a signal (#742).
+* Deprecate `Events` index access (#713).
+* Add `Events::clear` (#782).
+* Add support for `lio_listio` (#780).
+
+# 0.6.11 (October 25, 2017)
+
+* Allow register to take empty interest (#640).
+* Fix bug with TCP errors on windows (#725).
+* Add TcpListener::accept_std (#733).
+* Update IoVec to fix soundness bug -- includes behavior change. (#747).
+* Minimum Rust version is now 1.14.0.
+* Fix Android x86_64 build.
+* Misc API & doc polish.
+
+# 0.6.10 (July 27, 2017)
+
+* Experimental support for Fuchsia
+* Add `only_v6` option for UDP sockets
+* Fix build on NetBSD
+* Minimum Rust version is now 1.13.0
+* Assignment operators (e.g. `|=`) are now implemented for `Ready`
+
+# 0.6.9 (June 7, 2017)
+
+* More socket options are exposed through the TCP types, brought in through the
+ `net2` crate.
+
+# 0.6.8 (May 26, 2017)
+
+* Support Fuchia
+* POSIX AIO support
+* Fix memory leak caused by Register::new2
+* Windows: fix handling failed TCP connections
+* Fix build on aarch64-linux-android
+* Fix usage of `O_CLOEXEC` with `SETFL`
+
+# 0.6.7 (April 27, 2017)
+
+* Ignore EPIPE coming out of `kevent`
+* Timer thread should exit when timer is dropped.
+
+# 0.6.6 (March 22, 2017)
+
+* Add send(), recv() and connect() to UDPSocket.
+* Fix bug in custom readiness queue
+* Move net types into `net` module
+
+# 0.6.5 (March 14, 2017)
+
+* Misc improvements to kqueue bindings
+* Add official support for iOS, Android, BSD
+* Reimplement custom readiness queue
+* `Poll` is now `Sync`
+* Officially deprecate non-core functionality (timers, channel, etc...)
+* `Registration` now implements `Evented`
+* Fix bug around error conditions with `connect` on windows.
+* Use iovec crate for scatter / gather operations
+* Only support readable and writable readiness on all platforms
+* Expose additional readiness in a platform specific capacity
+
+# 0.6.4 (January 24, 2017)
+
+* Fix compilation on musl
+* Add `TcpStream::from_stream` which converts a std TCP stream to Mio.
+
+# 0.6.3 (January 22, 2017)
+
+* Implement readv/writev for `TcpStream`, allowing vectored reads/writes to
+ work across platforms
+* Remove `nix` dependency
+* Implement `Display` and `Error` for some channel error types.
+* Optimize TCP on Windows through `SetFileCompletionNotificationModes`
+
+# 0.6.2 (December 18, 2016)
+
+* Allow registration of custom handles on Windows (like `EventedFd` on Unix)
+* Send only one byte for the awakener on Unix instead of four
+* Fix a bug in the timer implementation which caused an infinite loop
+
+# 0.6.1 (October 30, 2016)
+
+* Update dependency of `libc` to 0.2.16
+* Fix channel `dec` logic
+* Fix a timer bug around timeout cancellation
+* Don't allocate buffers for TCP reads on Windows
+* Touched up documentation in a few places
+* Fix an infinite looping timer thread on OSX
+* Fix compile on 32-bit OSX
+* Fix compile on FreeBSD
+
+# 0.6.0 (September 2, 2016)
+
+* Shift primary API towards `Poll`
+* `EventLoop` and types to `deprecated` mod. All contents of the
+ `deprecated` mod will be removed by Mio 1.0.
+* Increase minimum supported Rust version to 1.9.0
+* Deprecate unix domain socket implementation in favor of using a
+ version external to Mio. For example: https://github.com/alexcrichton/mio-uds.
+* Remove various types now included in `std`
+* Updated TCP & UDP APIs to match the versions in `std`
+* Enable implementing `Evented` for any type via `Registration`
+* Rename `IoEvent` -> `Event`
+* Access `Event` data via functions vs. public fields.
+* Expose `Events` as a public type that is passed into `Poll`
+* Use `std::time::Duration` for all APIs that require a time duration.
+* Polled events are now retrieved via `Events` type.
+* Implement `std::error::Error` for `TimerError`
+* Relax `Send` bound on notify messages.
+* Remove `Clone` impl for `Timeout` (future proof)
+* Remove `mio::prelude`
+* Remove `mio::util`
+* Remove dependency on bytes
+
+# 0.5.0 (December 3, 2015)
+
+* Windows support (#239)
+* NetBSD support (#306)
+* Android support (#295)
+* Don't re-export bytes types
+* Renamed `EventLoop::register_opt` to `EventLoop::register` (#257)
+* `EventLoopConfig` is now a builder instead of having public struct fields. It
+ is also no longer `Copy`. (#259)
+* `TcpSocket` is no longer exported in the public API (#262)
+* Integrate with net2. (#262)
+* `TcpListener` now returns the remote peer address from `accept` as well (#275)
+* The `UdpSocket::{send_to, recv_from}` methods are no longer generic over `Buf`
+ or `MutBuf` but instead take slices directly. The return types have also been
+ updated to return the number of bytes transferred. (#260)
+* Fix bug with kqueue where an error on registration prevented the
+ changelist from getting flushed (#276)
+* Support sending/receiving FDs over UNIX sockets (#291)
+* Mio's socket types are permanently associated with an EventLoop (#308)
+* Reduce unnecessary poll wakeups (#314)
+
+
+# 0.4.1 (July 21, 2015)
+
+* [BUGFIX] Fix notify channel concurrency bug (#216)
+
+# 0.4.0 (July 16, 2015)
+
+* [BUGFIX] EventLoop::register requests all events, not just readable.
+* [BUGFIX] Attempting to send a message to a shutdown event loop fails correctly.
+* [FEATURE] Expose TCP shutdown
+* [IMPROVEMENT] Coalesce readable & writable into `ready` event (#184)
+* [IMPROVEMENT] Rename TryRead & TryWrite function names to avoid conflict with std.
+* [IMPROVEMENT] Provide TCP and UDP types in Mio (path to windows #155)
+* [IMPROVEMENT] Use clock_ticks crate instead of time (path to windows #155)
+* [IMPROVEMENT] Move unix specific features into mio::unix module
+* [IMPROVEMENT] TcpListener sets SO_REUSEADDR by default
--- /dev/null
+[package]
+edition = "2018"
+name = "mio"
+# When releasing to crates.io:
+# - Update CHANGELOG.md.
+# - Create git tag
+version = "0.8.0"
+license = "MIT"
+authors = [
+ "Carl Lerche <me@carllerche.com>",
+ "Thomas de Zeeuw <thomasdezeeuw@gmail.com>",
+ "Tokio Contributors <team@tokio.rs>",
+]
+description = "Lightweight non-blocking IO"
+homepage = "https://github.com/tokio-rs/mio"
+repository = "https://github.com/tokio-rs/mio"
+readme = "README.md"
+keywords = ["io", "async", "non-blocking"]
+categories = ["asynchronous"]
+include = [
+ "Cargo.toml",
+ "LICENSE",
+ "README.md",
+ "CHANGELOG.md",
+ "src/**/*.rs",
+ "examples/**/*.rs",
+]
+
+# For documentation of features see the `mio::features` module.
+[features]
+# By default Mio only provides a shell implementation.
+default = []
+
+# Enables the `Poll` and `Registry` types.
+os-poll = []
+# Enables additional OS specific extensions, e.g. Unix `pipe(2)`.
+os-ext = ["os-poll"]
+# Enables `mio::net` module containing networking primitives.
+net = []
+
+[dependencies]
+log = "0.4.8"
+
+[target.'cfg(unix)'.dependencies]
+libc = "0.2.86"
+
+[target.'cfg(windows)'.dependencies]
+miow = "0.3.6"
+winapi = { version = "0.3", features = ["winsock2", "mswsock"] }
+ntapi = "0.3"
+
+[dev-dependencies]
+env_logger = { version = "0.8.4", default-features = false }
+rand = "0.8"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+targets = [
+ "aarch64-apple-ios",
+ "aarch64-linux-android",
+ "x86_64-apple-darwin",
+ "x86_64-pc-windows-msvc",
+ "x86_64-unknown-dragonfly",
+ "x86_64-unknown-freebsd",
+ "x86_64-unknown-illumos",
+ "x86_64-unknown-linux-gnu",
+ "x86_64-unknown-netbsd",
+ "x86_64-unknown-openbsd",
+]
+
+[package.metadata.playground]
+features = ["os-poll", "os-ext", "net"]
+
+[[example]]
+name = "tcp_server"
+required-features = ["os-poll", "net"]
+
+[[example]]
+name = "udp_server"
+required-features = ["os-poll", "net"]
--- /dev/null
+Copyright (c) 2014 Carl Lerche and other MIO contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
--- /dev/null
+# Targets available via Rustup that are supported.
+TARGETS ?= "aarch64-apple-ios" "aarch64-linux-android" "x86_64-apple-darwin" "x86_64-pc-windows-msvc" "x86_64-unknown-freebsd" "x86_64-unknown-illumos" "x86_64-unknown-linux-gnu" "x86_64-unknown-netbsd"
+
+test:
+ cargo test --all-features
+
+# Test everything for the current OS/architecture and check all targets in
+# $TARGETS.
+test_all: check_all_targets
+ cargo hack test --feature-powerset
+ cargo hack test --feature-powerset --release
+
+# Check all targets using all features.
+check_all_targets: $(TARGETS)
+$(TARGETS):
+ cargo hack check --target $@ --feature-powerset
+
+# Installs all required targets for `check_all_targets`.
+install_targets:
+ rustup target add $(TARGETS)
+
+# NOTE: when using this command you might want to change the `test` target to
+# only run a subset of the tests you're actively working on.
+dev:
+ find src/ tests/ Makefile Cargo.toml | entr -d -c $(MAKE) test
+
+clean:
+ cargo clean
+
+.PHONY: test test_all check_all_targets $(TARGETS) dev clean
--- /dev/null
+# Mio – Metal IO
+
+Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs and
+event notification for building high performance I/O apps with as little
+overhead as possible over the OS abstractions.
+
+[![Crates.io][crates-badge]][crates-url]
+[![MIT licensed][mit-badge]][mit-url]
+[![Build Status][azure-badge]][azure-url]
+[![Build Status][cirrus-badge]][cirrus-url]
+
+[crates-badge]: https://img.shields.io/crates/v/mio.svg
+[crates-url]: https://crates.io/crates/mio
+[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
+[mit-url]: LICENSE
+[azure-badge]: https://dev.azure.com/tokio-rs/Tokio/_apis/build/status/tokio-rs.mio?branchName=master
+[azure-url]: https://dev.azure.com/tokio-rs/Tokio/_build/latest?definitionId=2&branchName=master
+[cirrus-badge]: https://api.cirrus-ci.com/github/tokio-rs/mio.svg
+[cirrus-url]: https://cirrus-ci.com/github/tokio-rs/mio
+
+**API documentation**
+
+* [master](https://tokio-rs.github.io/mio/doc/mio/)
+* [v0.7](https://docs.rs/mio/^0.7)
+* [v0.6](https://docs.rs/mio/^0.6)
+
+This is a low level library, if you are looking for something easier to get
+started with, see [Tokio](https://tokio.rs).
+
+## Usage
+
+To use `mio`, first add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+mio = "0.7"
+```
+
+Next we can start using Mio. The following is quick introduction using
+`TcpListener` and `TcpStream`. Note that `features = ["os-poll", "net"]` must be
+specified for this example.
+
+```rust
+use std::error::Error;
+
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Token};
+
+// Some tokens to allow us to identify which event is for which socket.
+const SERVER: Token = Token(0);
+const CLIENT: Token = Token(1);
+
+fn main() -> Result<(), Box<dyn Error>> {
+ // Create a poll instance.
+ let mut poll = Poll::new()?;
+ // Create storage for events.
+ let mut events = Events::with_capacity(128);
+
+ // Setup the server socket.
+ let addr = "127.0.0.1:13265".parse()?;
+ let mut server = TcpListener::bind(addr)?;
+ // Start listening for incoming connections.
+ poll.registry()
+ .register(&mut server, SERVER, Interest::READABLE)?;
+
+ // Setup the client socket.
+ let mut client = TcpStream::connect(addr)?;
+ // Register the socket.
+ poll.registry()
+ .register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?;
+
+ // Start an event loop.
+ loop {
+ // Poll Mio for events, blocking until we get an event.
+ poll.poll(&mut events, None)?;
+
+ // Process each event.
+ for event in events.iter() {
+ // We can use the token we previously provided to `register` to
+ // determine for which socket the event is.
+ match event.token() {
+ SERVER => {
+ // If this is an event for the server, it means a connection
+ // is ready to be accepted.
+ //
+ // Accept the connection and drop it immediately. This will
+ // close the socket and notify the client of the EOF.
+ let connection = server.accept();
+ drop(connection);
+ }
+ CLIENT => {
+ if event.is_writable() {
+ // We can (likely) write to the socket without blocking.
+ }
+
+ if event.is_readable() {
+ // We can (likely) read from the socket without blocking.
+ }
+
+ // Since the server just shuts down the connection, let's
+ // just exit from our event loop.
+ return Ok(());
+ }
+ // We don't expect any events with tokens other than those we provided.
+ _ => unreachable!(),
+ }
+ }
+ }
+}
+```
+
+## Features
+
+* Non-blocking TCP, UDP
+* I/O event queue backed by epoll, kqueue, and IOCP
+* Zero allocations at runtime
+* Platform specific extensions
+
+## Non-goals
+
+The following are specifically omitted from Mio and are left to the user
+or higher-level libraries.
+
+* File operations
+* Thread pools / multi-threaded event loop
+* Timers
+
+## Platforms
+
+Currently supported platforms:
+
+* Android (API level 21)
+* DragonFly BSD
+* FreeBSD
+* Linux
+* NetBSD
+* OpenBSD
+* Solaris
+* Windows
+* iOS
+* macOS
+* Wine (version 6.11+, see [issue #1444])
+
+There are potentially others. If you find that Mio works on another
+platform, submit a PR to update the list!
+
+Mio can handle interfacing with each of the event systems of the aforementioned
+platforms. The details of their implementation are further discussed in the
+`Poll` type of the API documentation (see above).
+
+The Windows implementation for polling sockets is using the [wepoll] strategy.
+This uses the Windows AFD system to access socket readiness events.
+
+[wepoll]: https://github.com/piscisaureus/wepoll
+[issue #1444]: https://github.com/tokio-rs/mio/issues/1444
+
+### Unsupported
+
+* Haiku, see [issue #1472]
+
+[issue #1472]: https://github.com/tokio-rs/mio/issues/1472
+
+## Community
+
+A group of Mio users hang out on [Discord], this can be a good place to go for
+questions.
+
+[Discord]: https://discord.gg/tokio
+
+## Contributing
+
+Interested in getting involved? We would love to help you! For simple
+bug fixes, just submit a PR with the fix and we can discuss the fix
+directly in the PR. If the fix is more complex, start with an issue.
+
+If you want to propose an API change, create an issue to start a
+discussion with the community. Also, feel free to talk with us in Discord.
+
+Finally, be kind. We support the [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct).
--- /dev/null
+trigger: ["master", "v0.6.x", "v0.7.x"]
+pr: ["master", "v0.6.x", "v0.7.x"]
+
+jobs:
+ # Check formatting
+ - template: ci/azure-rustfmt.yml
+ parameters:
+ name: rustfmt
+
+ # Stable
+ - template: ci/azure-test-stable.yml
+ parameters:
+ name: stable
+ displayName: Test
+ cross: true
+
+ # Stable --release
+ - template: ci/azure-test-stable.yml
+ parameters:
+ name: stable_release
+ displayName: Test --release
+ cmd: test --release
+
+ # Nightly
+ - template: ci/azure-test-stable.yml
+ parameters:
+ name: nightly
+ displayName: Nightly
+ # Pin nightly to avoid being impacted by breakage
+ rust_version: nightly-2021-11-05
+ benches: true
+
+ # This represents the minimum Rust version supported by
+ # Mio. Updating this should be done in a dedicated PR.
+ #
+ # Tests are not run as tests may require newer versions of
+ # rust.
+ - template: ci/azure-test-stable.yml
+ parameters:
+ name: minrust
+ displayName: Min Rust
+ rust_version: 1.46.0
+ cmd: check
+ cross: true
+
+ - template: ci/azure-minimal-versions.yml
+ parameters:
+ name: minimal_versions
+
+ - template: ci/azure-clippy.yml
+ parameters:
+ name: clippy
+
+ - template: ci/azure-cross-compile.yml
+ parameters:
+ name: cross
+
+ - template: ci/azure-deploy-docs.yml
+ parameters:
+ dependsOn:
+ # - rustfmt
+ - stable
+ - nightly
+ - minrust
+ - cross
--- /dev/null
+jobs:
+- job: ${{ parameters.name }}
+ displayName: Clippy
+
+ pool:
+ vmImage: ubuntu-18.04
+
+ steps:
+ - template: azure-install-rust.yml
+ parameters:
+ rust_version: stable
+
+ - script: rustup component add clippy
+ displayName: "Add component"
+
+ - script: cargo clippy --all-targets --all-features -- -D warnings -A clippy::cognitive-complexity
+ displayName: "Run Clippy"
--- /dev/null
+parameters:
+ vmImage: ubuntu-18.04
+
+jobs:
+ - job: ${{ parameters.name }}
+ displayName: Cross
+ strategy:
+ matrix:
+ iOS_64:
+ vmImage: macOS-10.15
+ target: x86_64-apple-ios
+
+ iOS_ARM64:
+ vmImage: macOS-10.15
+ target: aarch64-apple-ios
+
+ Android_ARM:
+ vmImage: ubuntu-18.04
+ target: arm-linux-androideabi
+
+ Android_ARM64:
+ vmImage: ubuntu-18.04
+ target: aarch64-linux-android
+
+ Android_32:
+ vmImage: ubuntu-18.04
+ target: i686-unknown-linux-gnu
+
+ NetBSD:
+ vmImage: ubuntu-18.04
+ target: x86_64-unknown-netbsd
+
+ illumos:
+ vmImage: ubuntu-18.04
+ target: x86_64-unknown-illumos
+
+ pool:
+ vmImage: $(vmImage)
+
+ steps:
+ - template: azure-install-rust.yml
+ parameters:
+ rust_version: stable
+
+ - script: rustup target add $(target)
+ displayName: "Add target"
+
+ - script: cargo check --target $(target)
+ displayName: Check source
+
+ - script: cargo check --tests --target $(target) --all-features
+ displayName: Check tests
+
+ - script: cargo check --examples --target $(target) --all-features
+ displayName: Check examples
--- /dev/null
+parameters:
+ dependsOn: []
+
+jobs:
+ - job: documentation
+ displayName: "Deploy API Documentation"
+ condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master'))
+ pool:
+ vmImage: "Ubuntu 16.04"
+ dependsOn:
+ - ${{ parameters.dependsOn }}
+ steps:
+ - template: azure-install-rust.yml
+ parameters:
+ rust_version: stable
+ - script: |
+ cargo doc --no-deps --all-features
+ cp -R target/doc '$(Build.BinariesDirectory)'
+ displayName: "Generate Documentation"
+ - script: |
+ set -e
+
+ git --version
+ ls -la
+ git init
+ git config user.name 'Deployment Bot (from Azure Pipelines)'
+ git config user.email 'deploy@tokio-rs.com'
+ git config --global credential.helper 'store --file ~/.my-credentials'
+ printf "protocol=https\nhost=github.com\nusername=carllerche\npassword=%s\n\n" "$GITHUB_TOKEN" | git credential-store --file ~/.my-credentials store
+ git remote add origin https://github.com/tokio-rs/mio
+ git checkout -b gh-pages
+ git add .
+ git commit -m 'Deploy Mio API documentation'
+ git push -f origin gh-pages
+ env:
+ GITHUB_TOKEN: $(githubPersonalToken)
+ workingDirectory: "$(Build.BinariesDirectory)"
+ displayName: "Deploy Documentation"
--- /dev/null
+steps:
+ # Linux and macOS.
+ - script: |
+ set -e
+ curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain none --profile minimal
+ export PATH=$PATH:$HOME/.cargo/bin
+ rustup toolchain install $RUSTUP_TOOLCHAIN
+ rustup default $RUSTUP_TOOLCHAIN
+ echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin"
+ env:
+ RUSTUP_TOOLCHAIN: ${{parameters.rust_version}}
+ displayName: "Install rust (*nix)"
+ condition: not(eq(variables['Agent.OS'], 'Windows_NT'))
+
+ # Windows.
+ - script: |
+ curl -sSf -o rustup-init.exe https://win.rustup.rs
+ rustup-init.exe -y --default-toolchain none
+ set PATH=%PATH%;%USERPROFILE%\.cargo\bin
+ rustup toolchain install %RUSTUP_TOOLCHAIN%
+ rustup default %RUSTUP_TOOLCHAIN%
+ echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin"
+ env:
+ RUSTUP_TOOLCHAIN: ${{parameters.rust_version}}
+ displayName: "Install rust (windows)"
+ condition: eq(variables['Agent.OS'], 'Windows_NT')
+
+ # All platforms.
+ - script: |
+ rustup toolchain list
+ rustc -Vv
+ cargo -V
+ displayName: Query rust and cargo versions
--- /dev/null
+parameters:
+ rust_version: nightly
+
+jobs:
+ - job: ${{ parameters.name }}
+ displayName: Minimal versions
+ strategy:
+ matrix:
+ Linux:
+ vmImage: ubuntu-18.04
+ Windows:
+ vmImage: vs2017-win2016
+ pool:
+ vmImage: $(vmImage)
+
+ variables:
+ RUST_BACKTRACE: full
+
+ steps:
+ - template: azure-install-rust.yml
+ parameters:
+ rust_version: ${{ parameters.rust_version }}
+
+ - script: cargo update -Zminimal-versions
+ displayName: cargo update -Zminimal-versions
+ env:
+ CI: "True"
+
+ - script: cargo test --all-features
+ displayName: cargo test --all-features
+ env:
+ CI: "True"
--- /dev/null
+jobs:
+ # Check formatting
+ - job: ${{ parameters.name }}
+ displayName: Check rustfmt
+ pool:
+ vmImage: ubuntu-18.04
+ steps:
+ - template: azure-install-rust.yml
+ parameters:
+ rust_version: stable
+ - script: |
+ rustup component add rustfmt
+ displayName: Install rustfmt
+ - script: |
+ # FIXME: for some reason this doesn't actually check all files.
+ # So instead we run `rustfmt` directly on each file.
+ #cargo fmt --all -- --check
+ find src tests examples -type f -iname "*.rs" | xargs rustfmt --check
+ displayName: Check formatting
--- /dev/null
+parameters:
+ cmd: test
+ rust_version: stable
+
+jobs:
+ - job: ${{ parameters.name }}
+ displayName: ${{ parameters.displayName }}
+ strategy:
+ matrix:
+ Linux:
+ vmImage: ubuntu-18.04
+
+ ${{ if parameters.cross }}:
+ MacOS:
+ vmImage: macOS-10.15
+ Windows:
+ vmImage: vs2017-win2016
+ pool:
+ vmImage: $(vmImage)
+
+ variables:
+ RUST_BACKTRACE: full
+
+ steps:
+ - template: azure-install-rust.yml
+ parameters:
+ rust_version: ${{ parameters.rust_version }}
+
+ - ${{ if eq(parameters.cmd, 'test') }}:
+ - script: |
+ # Cargo-hack's dependency bitflags has a higher MSVR then us.
+ rustup install nightly
+ rustup run nightly cargo install cargo-hack
+ cargo hack check --feature-powerset
+ displayName: Check feature powerset
+
+ - script: cargo ${{ parameters.cmd }} --all-features
+ displayName: cargo ${{ parameters.cmd }} --all-features
+ env:
+ CI: "True"
+
+ - ${{ if eq(parameters.cmd, 'test') }}:
+ - script: cargo doc --no-deps
+ displayName: cargo doc --no-deps
+
+ - ${{ if parameters.benches }}:
+ - script: cargo check --benches
+ displayName: Check benchmarks
--- /dev/null
+// You can run this example from the root of the mio repo:
+// cargo run --example tcp_server --features="os-poll net"
+use mio::event::Event;
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Registry, Token};
+use std::collections::HashMap;
+use std::io::{self, Read, Write};
+use std::str::from_utf8;
+
+// Setup some tokens to allow us to identify which event is for which socket.
+const SERVER: Token = Token(0);
+
+// Some data we'll send over the connection.
+const DATA: &[u8] = b"Hello world!\n";
+
+fn main() -> io::Result<()> {
+ env_logger::init();
+
+ // Create a poll instance.
+ let mut poll = Poll::new()?;
+ // Create storage for events.
+ let mut events = Events::with_capacity(128);
+
+ // Setup the TCP server socket.
+ let addr = "127.0.0.1:9000".parse().unwrap();
+ let mut server = TcpListener::bind(addr)?;
+
+ // Register the server with poll we can receive events for it.
+ poll.registry()
+ .register(&mut server, SERVER, Interest::READABLE)?;
+
+ // Map of `Token` -> `TcpStream`.
+ let mut connections = HashMap::new();
+ // Unique token for each incoming connection.
+ let mut unique_token = Token(SERVER.0 + 1);
+
+ println!("You can connect to the server using `nc`:");
+ println!(" $ nc 127.0.0.1 9000");
+ println!("You'll see our welcome message and anything you type will be printed here.");
+
+ loop {
+ poll.poll(&mut events, None)?;
+
+ for event in events.iter() {
+ match event.token() {
+ SERVER => loop {
+ // Received an event for the TCP server socket, which
+ // indicates we can accept an connection.
+ let (mut connection, address) = match server.accept() {
+ Ok((connection, address)) => (connection, address),
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ // If we get a `WouldBlock` error we know our
+ // listener has no more incoming connections queued,
+ // so we can return to polling and wait for some
+ // more.
+ break;
+ }
+ Err(e) => {
+ // If it was any other kind of error, something went
+ // wrong and we terminate with an error.
+ return Err(e);
+ }
+ };
+
+ println!("Accepted connection from: {}", address);
+
+ let token = next(&mut unique_token);
+ poll.registry().register(
+ &mut connection,
+ token,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )?;
+
+ connections.insert(token, connection);
+ },
+ token => {
+ // Maybe received an event for a TCP connection.
+ let done = if let Some(connection) = connections.get_mut(&token) {
+ handle_connection_event(poll.registry(), connection, event)?
+ } else {
+ // Sporadic events happen, we can safely ignore them.
+ false
+ };
+ if done {
+ if let Some(mut connection) = connections.remove(&token) {
+ poll.registry().deregister(&mut connection)?;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn next(current: &mut Token) -> Token {
+ let next = current.0;
+ current.0 += 1;
+ Token(next)
+}
+
+/// Returns `true` if the connection is done.
+fn handle_connection_event(
+ registry: &Registry,
+ connection: &mut TcpStream,
+ event: &Event,
+) -> io::Result<bool> {
+ if event.is_writable() {
+ // We can (maybe) write to the connection.
+ match connection.write(DATA) {
+ // We want to write the entire `DATA` buffer in a single go. If we
+ // write less we'll return a short write error (same as
+ // `io::Write::write_all` does).
+ Ok(n) if n < DATA.len() => return Err(io::ErrorKind::WriteZero.into()),
+ Ok(_) => {
+ // After we've written something we'll reregister the connection
+ // to only respond to readable events.
+ registry.reregister(connection, event.token(), Interest::READABLE)?
+ }
+ // Would block "errors" are the OS's way of saying that the
+ // connection is not actually ready to perform this I/O operation.
+ Err(ref err) if would_block(err) => {}
+ // Got interrupted (how rude!), we'll try again.
+ Err(ref err) if interrupted(err) => {
+ return handle_connection_event(registry, connection, event)
+ }
+ // Other errors we'll consider fatal.
+ Err(err) => return Err(err),
+ }
+ }
+
+ if event.is_readable() {
+ let mut connection_closed = false;
+ let mut received_data = vec![0; 4096];
+ let mut bytes_read = 0;
+ // We can (maybe) read from the connection.
+ loop {
+ match connection.read(&mut received_data[bytes_read..]) {
+ Ok(0) => {
+ // Reading 0 bytes means the other side has closed the
+ // connection or is done writing, then so are we.
+ connection_closed = true;
+ break;
+ }
+ Ok(n) => {
+ bytes_read += n;
+ if bytes_read == received_data.len() {
+ received_data.resize(received_data.len() + 1024, 0);
+ }
+ }
+ // Would block "errors" are the OS's way of saying that the
+ // connection is not actually ready to perform this I/O operation.
+ Err(ref err) if would_block(err) => break,
+ Err(ref err) if interrupted(err) => continue,
+ // Other errors we'll consider fatal.
+ Err(err) => return Err(err),
+ }
+ }
+
+ if bytes_read != 0 {
+ let received_data = &received_data[..bytes_read];
+ if let Ok(str_buf) = from_utf8(received_data) {
+ println!("Received data: {}", str_buf.trim_end());
+ } else {
+ println!("Received (none UTF-8) data: {:?}", received_data);
+ }
+ }
+
+ if connection_closed {
+ println!("Connection closed");
+ return Ok(true);
+ }
+ }
+
+ Ok(false)
+}
+
+fn would_block(err: &io::Error) -> bool {
+ err.kind() == io::ErrorKind::WouldBlock
+}
+
+fn interrupted(err: &io::Error) -> bool {
+ err.kind() == io::ErrorKind::Interrupted
+}
--- /dev/null
+// You can run this example from the root of the mio repo:
+// cargo run --example udp_server --features="os-poll net"
+use log::warn;
+use mio::net::UdpSocket;
+use mio::{Events, Interest, Poll, Token};
+use std::io;
+
+// A token to allow us to identify which event is for the `UdpSocket`.
+const UDP_SOCKET: Token = Token(0);
+
+fn main() -> io::Result<()> {
+ env_logger::init();
+
+ // Create a poll instance.
+ let mut poll = Poll::new()?;
+ // Create storage for events. Since we will only register a single socket, a
+ // capacity of 1 will do.
+ let mut events = Events::with_capacity(1);
+
+ // Setup the UDP socket.
+ let addr = "127.0.0.1:9000".parse().unwrap();
+ let mut socket = UdpSocket::bind(addr)?;
+
+ // Register our socket with the token defined above and an interest in being
+ // `READABLE`.
+ poll.registry()
+ .register(&mut socket, UDP_SOCKET, Interest::READABLE)?;
+
+ println!("You can connect to the server using `nc`:");
+ println!(" $ nc -u 127.0.0.1 9000");
+ println!("Anything you type will be echoed back to you.");
+
+ // Initialize a buffer for the UDP packet. We use the maximum size of a UDP
+ // packet, which is the maximum value of 16 a bit integer.
+ let mut buf = [0; 1 << 16];
+
+ // Our event loop.
+ loop {
+ // Poll to check if we have events waiting for us.
+ poll.poll(&mut events, None)?;
+
+ // Process each event.
+ for event in events.iter() {
+ // Validate the token we registered our socket with,
+ // in this example it will only ever be one but we
+ // make sure it's valid none the less.
+ match event.token() {
+ UDP_SOCKET => loop {
+ // In this loop we receive all packets queued for the socket.
+ match socket.recv_from(&mut buf) {
+ Ok((packet_size, source_address)) => {
+ // Echo the data.
+ socket.send_to(&buf[..packet_size], source_address)?;
+ }
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ // If we get a `WouldBlock` error we know our socket
+ // has no more packets queued, so we can return to
+ // polling and wait for some more.
+ break;
+ }
+ Err(e) => {
+ // If it was any other kind of error, something went
+ // wrong and we terminate with an error.
+ return Err(e);
+ }
+ }
+ },
+ _ => {
+ // This should never happen as we only registered our
+ // `UdpSocket` using the `UDP_SOCKET` token, but if it ever
+ // does we'll log it.
+ warn!("Got event for unexpected token: {:?}", event);
+ }
+ }
+ }
+ }
+}
--- /dev/null
+use crate::{sys, Token};
+
+use std::fmt;
+
+/// A readiness event.
+///
+/// `Event` is a readiness state paired with a [`Token`]. It is returned by
+/// [`Poll::poll`].
+///
+/// For more documentation on polling and events, see [`Poll`].
+///
+/// [`Poll::poll`]: ../struct.Poll.html#method.poll
+/// [`Poll`]: ../struct.Poll.html
+/// [`Token`]: ../struct.Token.html
+#[derive(Clone)]
+#[repr(transparent)]
+pub struct Event {
+ inner: sys::Event,
+}
+
+impl Event {
+ /// Returns the event's token.
+ pub fn token(&self) -> Token {
+ sys::event::token(&self.inner)
+ }
+
+ /// Returns true if the event contains readable readiness.
+ ///
+ /// # Notes
+ ///
+ /// Out-of-band (OOB) data also triggers readable events. But must
+ /// application don't actually read OOB data, this could leave an
+ /// application open to a Denial-of-Service (Dos) attack, see
+ /// <https://github.com/sandstorm-io/sandstorm-website/blob/58f93346028c0576e8147627667328eaaf4be9fa/_posts/2015-04-08-osx-security-bug.md>.
+ /// However because Mio uses edge-triggers it will not result in an infinite
+ /// loop as described in the article above.
+ pub fn is_readable(&self) -> bool {
+ sys::event::is_readable(&self.inner)
+ }
+
+ /// Returns true if the event contains writable readiness.
+ pub fn is_writable(&self) -> bool {
+ sys::event::is_writable(&self.inner)
+ }
+
+ /// Returns true if the event contains error readiness.
+ ///
+ /// Error events occur when the socket enters an error state. In this case,
+ /// the socket will also receive a readable or writable event. Reading or
+ /// writing to the socket will result in an error.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but not all platforms trigger the
+ /// error event.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLERR` |
+ /// | [kqueue] | `EV_ERROR` and `EV_EOF` with `fflags` set to `0`. |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_error(&self) -> bool {
+ sys::event::is_error(&self.inner)
+ }
+
+ /// Returns true if the event contains read closed readiness.
+ ///
+ /// # Notes
+ ///
+ /// Read closed readiness can be expected after any of the following have
+ /// occurred:
+ /// * The local stream has shutdown the read half of its socket
+ /// * The local stream has shutdown both the read half and the write half
+ /// of its socket
+ /// * The peer stream has shutdown the write half its socket; this sends a
+ /// `FIN` packet that has been received by the local stream
+ ///
+ /// Method is a best effort implementation. While some platforms may not
+ /// return readiness when read half is closed, it is guaranteed that
+ /// false-positives will not occur.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLHUP`, or |
+ /// | | `EPOLLIN` and `EPOLLRDHUP` |
+ /// | [kqueue] | `EV_EOF` |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_read_closed(&self) -> bool {
+ sys::event::is_read_closed(&self.inner)
+ }
+
+ /// Returns true if the event contains write closed readiness.
+ ///
+ /// # Notes
+ ///
+ /// On [epoll] this is essentially a check for `EPOLLHUP` flag as the
+ /// local stream shutting down its write half does not trigger this event.
+ ///
+ /// On [kqueue] the local stream shutting down the write half of its
+ /// socket will trigger this event.
+ ///
+ /// Method is a best effort implementation. While some platforms may not
+ /// return readiness when write half is closed, it is guaranteed that
+ /// false-positives will not occur.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLHUP`, or |
+ /// | | only `EPOLLERR`, or |
+ /// | | `EPOLLOUT` and `EPOLLERR` |
+ /// | [kqueue] | `EV_EOF` |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_write_closed(&self) -> bool {
+ sys::event::is_write_closed(&self.inner)
+ }
+
+ /// Returns true if the event contains priority readiness.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but not all platforms trigger the
+ /// priority event.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | `EPOLLPRI` |
+ /// | [kqueue] | *Not supported* |
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ #[inline]
+ pub fn is_priority(&self) -> bool {
+ sys::event::is_priority(&self.inner)
+ }
+
+ /// Returns true if the event contains AIO readiness.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but not all platforms support AIO.
+ ///
+ /// The table below shows what flags are checked on what OS.
+ ///
+ /// | [OS selector] | Flag(s) checked |
+ /// |---------------|-----------------|
+ /// | [epoll] | *Not supported* |
+ /// | [kqueue]<sup>1</sup> | `EVFILT_AIO` |
+ ///
+ /// 1: Only supported on DragonFly BSD, FreeBSD, iOS and macOS.
+ ///
+ /// [OS selector]: ../struct.Poll.html#implementation-notes
+ /// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+ /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+ pub fn is_aio(&self) -> bool {
+ sys::event::is_aio(&self.inner)
+ }
+
+ /// Returns true if the event contains LIO readiness.
+ ///
+ /// # Notes
+ ///
+ /// Method is available on all platforms, but only FreeBSD supports LIO. On
+ /// FreeBSD this method checks the `EVFILT_LIO` flag.
+ pub fn is_lio(&self) -> bool {
+ sys::event::is_lio(&self.inner)
+ }
+
+ /// Create a reference to an `Event` from a platform specific event.
+ pub(crate) fn from_sys_event_ref(sys_event: &sys::Event) -> &Event {
+ unsafe {
+ // This is safe because the memory layout of `Event` is
+ // the same as `sys::Event` due to the `repr(transparent)` attribute.
+ &*(sys_event as *const sys::Event as *const Event)
+ }
+ }
+}
+
+/// When the [alternate] flag is enabled this will print platform specific
+/// details, for example the fields of the `kevent` structure on platforms that
+/// use `kqueue(2)`. Note however that the output of this implementation is
+/// **not** consider a part of the stable API.
+///
+/// [alternate]: fmt::Formatter::alternate
+impl fmt::Debug for Event {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let alternate = f.alternate();
+ let mut d = f.debug_struct("Event");
+ d.field("token", &self.token())
+ .field("readable", &self.is_readable())
+ .field("writable", &self.is_writable())
+ .field("error", &self.is_error())
+ .field("read_closed", &self.is_read_closed())
+ .field("write_closed", &self.is_write_closed())
+ .field("priority", &self.is_priority())
+ .field("aio", &self.is_aio())
+ .field("lio", &self.is_lio());
+
+ if alternate {
+ struct EventDetails<'a>(&'a sys::Event);
+
+ impl<'a> fmt::Debug for EventDetails<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ sys::event::debug_details(f, self.0)
+ }
+ }
+
+ d.field("details", &EventDetails(&self.inner)).finish()
+ } else {
+ d.finish()
+ }
+ }
+}
--- /dev/null
+use crate::event::Event;
+use crate::sys;
+
+use std::fmt;
+
+/// A collection of readiness events.
+///
+/// `Events` is passed as an argument to [`Poll::poll`] and will be used to
+/// receive any new readiness events received since the last poll. Usually, a
+/// single `Events` instance is created at the same time as a [`Poll`] and
+/// reused on each call to [`Poll::poll`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// [`Poll::poll`]: ../struct.Poll.html#method.poll
+/// [`Poll`]: ../struct.Poll.html
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let mut poll = Poll::new()?;
+/// #
+/// # assert!(events.is_empty());
+///
+/// // Register `event::Source`s with `poll`.
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events.iter() {
+/// println!("Got an event for {:?}", event.token());
+/// }
+/// # Ok(())
+/// # }
+/// ```
+pub struct Events {
+ inner: sys::Events,
+}
+
+/// [`Events`] iterator.
+///
+/// This struct is created by the [`iter`] method on [`Events`].
+///
+/// [`Events`]: struct.Events.html
+/// [`iter`]: struct.Events.html#method.iter
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Poll};
+/// use std::time::Duration;
+///
+/// let mut events = Events::with_capacity(1024);
+/// let mut poll = Poll::new()?;
+///
+/// // Register handles with `poll`.
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// for event in events.iter() {
+/// println!("Got an event for {:?}", event.token());
+/// }
+/// # Ok(())
+/// # }
+/// ```
+#[derive(Debug, Clone)]
+pub struct Iter<'a> {
+ inner: &'a Events,
+ pos: usize,
+}
+
+impl Events {
+ /// Return a new `Events` capable of holding up to `capacity` events.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn with_capacity(capacity: usize) -> Events {
+ Events {
+ inner: sys::Events::with_capacity(capacity),
+ }
+ }
+
+ /// Returns the number of `Event` values that `self` can hold.
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ /// assert_eq!(1024, events.capacity());
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ /// Returns `true` if `self` contains no `Event` values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use mio::Events;
+ ///
+ /// let events = Events::with_capacity(1024);
+ /// assert!(events.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+
+ /// Returns an iterator over the `Event` values.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let mut poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`.
+ ///
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// for event in events.iter() {
+ /// println!("Got an event for {:?}", event.token());
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn iter(&self) -> Iter<'_> {
+ Iter {
+ inner: self,
+ pos: 0,
+ }
+ }
+
+ /// Clearing all `Event` values from container explicitly.
+ ///
+ /// # Notes
+ ///
+ /// Events are cleared before every `poll`, so it is not required to call
+ /// this manually.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll};
+ /// use std::time::Duration;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let mut poll = Poll::new()?;
+ ///
+ /// // Register handles with `poll`.
+ ///
+ /// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ ///
+ /// // Clear all events.
+ /// events.clear();
+ /// assert!(events.is_empty());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn clear(&mut self) {
+ self.inner.clear();
+ }
+
+ /// Returns the inner `sys::Events`.
+ pub(crate) fn sys(&mut self) -> &mut sys::Events {
+ &mut self.inner
+ }
+}
+
+impl<'a> IntoIterator for &'a Events {
+ type Item = &'a Event;
+ type IntoIter = Iter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = &'a Event;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let ret = self
+ .inner
+ .inner
+ .get(self.pos)
+ .map(Event::from_sys_event_ref);
+ self.pos += 1;
+ ret
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let size = self.inner.inner.len();
+ (size, Some(size))
+ }
+
+ fn count(self) -> usize {
+ self.inner.inner.len()
+ }
+}
+
+impl fmt::Debug for Events {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
--- /dev/null
+//! Readiness event types and utilities.
+
+#[allow(clippy::module_inception)]
+mod event;
+mod events;
+mod source;
+
+pub use self::event::Event;
+pub use self::events::{Events, Iter};
+pub use self::source::Source;
--- /dev/null
+use crate::{Interest, Registry, Token};
+
+use std::io;
+
+/// An event source that may be registered with [`Registry`].
+///
+/// Types that implement `event::Source` can be registered with
+/// `Registry`. Users of Mio **should not** use the `event::Source` trait
+/// functions directly. Instead, the equivalent functions on `Registry` should
+/// be used.
+///
+/// See [`Registry`] for more details.
+///
+/// [`Registry`]: ../struct.Registry.html
+///
+/// # Implementing `event::Source`
+///
+/// Event sources are always backed by system handles, such as sockets or other
+/// system handles. These `event::Source`s will be monitored by the system
+/// selector. An implementation of `Source` will almost always delegates to a
+/// lower level handle. Examples of this are [`TcpStream`]s, or the *unix only*
+/// [`SourceFd`].
+///
+/// [`TcpStream`]: ../net/struct.TcpStream.html
+/// [`SourceFd`]: ../unix/struct.SourceFd.html
+///
+/// # Dropping `event::Source`s
+///
+/// All `event::Source`s, unless otherwise specified, need to be [deregistered]
+/// before being dropped for them to not leak resources. This goes against the
+/// normal drop behaviour of types in Rust which cleanup after themselves, e.g.
+/// a `File` will close itself. However since deregistering needs access to
+/// [`Registry`] this cannot be done while being dropped.
+///
+/// [deregistered]: ../struct.Registry.html#method.deregister
+///
+/// # Examples
+///
+/// Implementing `Source` on a struct containing a socket:
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// use mio::{Interest, Registry, Token};
+/// use mio::event::Source;
+/// use mio::net::TcpStream;
+///
+/// use std::io;
+///
+/// # #[allow(dead_code)]
+/// pub struct MySource {
+/// socket: TcpStream,
+/// }
+///
+/// impl Source for MySource {
+/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `register` call to `socket`
+/// self.socket.register(registry, token, interests)
+/// }
+///
+/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// // Delegate the `reregister` call to `socket`
+/// self.socket.reregister(registry, token, interests)
+/// }
+///
+/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+/// // Delegate the `deregister` call to `socket`
+/// self.socket.deregister(registry)
+/// }
+/// }
+/// ```
+pub trait Source {
+ /// Register `self` with the given `Registry` instance.
+ ///
+ /// This function should not be called directly. Use [`Registry::register`]
+ /// instead. Implementors should handle registration by delegating the call
+ /// to another `Source` type.
+ ///
+ /// [`Registry::register`]: ../struct.Registry.html#method.register
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()>;
+
+ /// Re-register `self` with the given `Registry` instance.
+ ///
+ /// This function should not be called directly. Use
+ /// [`Registry::reregister`] instead. Implementors should handle
+ /// re-registration by either delegating the call to another `Source` type.
+ ///
+ /// [`Registry::reregister`]: ../struct.Registry.html#method.reregister
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()>;
+
+ /// Deregister `self` from the given `Registry` instance.
+ ///
+ /// This function should not be called directly. Use
+ /// [`Registry::deregister`] instead. Implementors should handle
+ /// deregistration by delegating the call to another `Source` type.
+ ///
+ /// [`Registry::deregister`]: ../struct.Registry.html#method.deregister
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()>;
+}
+
+impl<T> Source for Box<T>
+where
+ T: Source + ?Sized,
+{
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ (&mut **self).register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ (&mut **self).reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ (&mut **self).deregister(registry)
+ }
+}
--- /dev/null
+use std::num::NonZeroU8;
+use std::{fmt, ops};
+
+/// Interest used in registering.
+///
+/// Interest are used in [registering] [`event::Source`]s with [`Poll`], they
+/// indicate what readiness should be monitored for. For example if a socket is
+/// registered with [readable] interests and the socket becomes writable, no
+/// event will be returned from a call to [`poll`].
+///
+/// [registering]: struct.Registry.html#method.register
+/// [`event::Source`]: ./event/trait.Source.html
+/// [`Poll`]: struct.Poll.html
+/// [readable]: struct.Interest.html#associatedconstant.READABLE
+/// [`poll`]: struct.Poll.html#method.poll
+#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct Interest(NonZeroU8);
+
+// These must be unique.
+const READABLE: u8 = 0b0001;
+const WRITABLE: u8 = 0b0010;
+// The following are not available on all platforms.
+#[cfg_attr(
+ not(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ )),
+ allow(dead_code)
+)]
+const AIO: u8 = 0b0100;
+#[cfg_attr(not(target_os = "freebsd"), allow(dead_code))]
+const LIO: u8 = 0b1000;
+
+impl Interest {
+ /// Returns a `Interest` set representing readable interests.
+ pub const READABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(READABLE) });
+
+ /// Returns a `Interest` set representing writable interests.
+ pub const WRITABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(WRITABLE) });
+
+ /// Returns a `Interest` set representing AIO completion interests.
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ pub const AIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(AIO) });
+
+ /// Returns a `Interest` set representing LIO completion interests.
+ #[cfg(target_os = "freebsd")]
+ pub const LIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(LIO) });
+
+ /// Add together two `Interest`.
+ ///
+ /// This does the same thing as the `BitOr` implementation, but is a
+ /// constant function.
+ ///
+ /// ```
+ /// use mio::Interest;
+ ///
+ /// const INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE);
+ /// # fn silent_dead_code_warning(_: Interest) { }
+ /// # silent_dead_code_warning(INTERESTS)
+ /// ```
+ #[allow(clippy::should_implement_trait)]
+ pub const fn add(self, other: Interest) -> Interest {
+ Interest(unsafe { NonZeroU8::new_unchecked(self.0.get() | other.0.get()) })
+ }
+
+ /// Removes `other` `Interest` from `self`.
+ ///
+ /// Returns `None` if the set would be empty after removing `other`.
+ ///
+ /// ```
+ /// use mio::Interest;
+ ///
+ /// const RW_INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE);
+ ///
+ /// // As long a one interest remain this will return `Some`.
+ /// let w_interest = RW_INTERESTS.remove(Interest::READABLE).unwrap();
+ /// assert!(!w_interest.is_readable());
+ /// assert!(w_interest.is_writable());
+ ///
+ /// // Removing all interests from the set will return `None`.
+ /// assert_eq!(w_interest.remove(Interest::WRITABLE), None);
+ ///
+ /// // Its also possible to remove multiple interests at once.
+ /// assert_eq!(RW_INTERESTS.remove(RW_INTERESTS), None);
+ /// ```
+ pub fn remove(self, other: Interest) -> Option<Interest> {
+ NonZeroU8::new(self.0.get() & !other.0.get()).map(Interest)
+ }
+
+ /// Returns true if the value includes readable readiness.
+ pub const fn is_readable(self) -> bool {
+ (self.0.get() & READABLE) != 0
+ }
+
+ /// Returns true if the value includes writable readiness.
+ pub const fn is_writable(self) -> bool {
+ (self.0.get() & WRITABLE) != 0
+ }
+
+ /// Returns true if `Interest` contains AIO readiness
+ pub const fn is_aio(self) -> bool {
+ (self.0.get() & AIO) != 0
+ }
+
+ /// Returns true if `Interest` contains LIO readiness
+ pub const fn is_lio(self) -> bool {
+ (self.0.get() & LIO) != 0
+ }
+}
+
+impl ops::BitOr for Interest {
+ type Output = Self;
+
+ #[inline]
+ fn bitor(self, other: Self) -> Self {
+ self.add(other)
+ }
+}
+
+impl ops::BitOrAssign for Interest {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ self.0 = (*self | other).0;
+ }
+}
+
+impl fmt::Debug for Interest {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut one = false;
+ if self.is_readable() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "READABLE")?;
+ one = true
+ }
+ if self.is_writable() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "WRITABLE")?;
+ one = true
+ }
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ {
+ if self.is_aio() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "AIO")?;
+ one = true
+ }
+ }
+ #[cfg(any(target_os = "freebsd"))]
+ {
+ if self.is_lio() {
+ if one {
+ write!(fmt, " | ")?
+ }
+ write!(fmt, "LIO")?;
+ one = true
+ }
+ }
+ debug_assert!(one, "printing empty interests");
+ Ok(())
+ }
+}
--- /dev/null
+use std::ops::{Deref, DerefMut};
+#[cfg(unix)]
+use std::os::unix::io::AsRawFd;
+#[cfg(windows)]
+use std::os::windows::io::AsRawSocket;
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::{fmt, io};
+
+use crate::sys::IoSourceState;
+use crate::{event, Interest, Registry, Token};
+
+/// Adapter for a [`RawFd`] or [`RawSocket`] providing an [`event::Source`]
+/// implementation.
+///
+/// `IoSource` enables registering any FD or socket wrapper with [`Poll`].
+///
+/// While only implementations for TCP, UDP, and UDS (Unix only) are provided,
+/// Mio supports registering any FD or socket that can be registered with the
+/// underlying OS selector. `IoSource` provides the necessary bridge.
+///
+/// [`RawFd`]: std::os::unix::io::RawFd
+/// [`RawSocket`]: std::os::windows::io::RawSocket
+///
+/// # Notes
+///
+/// To handle the registrations and events properly **all** I/O operations (such
+/// as `read`, `write`, etc.) must go through the [`do_io`] method to ensure the
+/// internal state is updated accordingly.
+///
+/// [`Poll`]: crate::Poll
+/// [`do_io`]: IoSource::do_io
+/*
+///
+/// # Examples
+///
+/// Basic usage.
+///
+/// ```
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Interest, Poll, Token};
+/// use mio::IoSource;
+///
+/// use std::net;
+///
+/// let poll = Poll::new()?;
+///
+/// // Bind a std TCP listener.
+/// let listener = net::TcpListener::bind("127.0.0.1:0")?;
+/// // Wrap it in the `IoSource` type.
+/// let mut listener = IoSource::new(listener);
+///
+/// // Register the listener.
+/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?;
+/// # Ok(())
+/// # }
+/// ```
+*/
+pub struct IoSource<T> {
+ state: IoSourceState,
+ inner: T,
+ #[cfg(debug_assertions)]
+ selector_id: SelectorId,
+}
+
+impl<T> IoSource<T> {
+ /// Create a new `IoSource`.
+ pub fn new(io: T) -> IoSource<T> {
+ IoSource {
+ state: IoSourceState::new(),
+ inner: io,
+ #[cfg(debug_assertions)]
+ selector_id: SelectorId::new(),
+ }
+ }
+
+ /// Execute an I/O operations ensuring that the socket receives more events
+ /// if it hits a [`WouldBlock`] error.
+ ///
+ /// # Notes
+ ///
+ /// This method is required to be called for **all** I/O operations to
+ /// ensure the user will receive events once the socket is ready again after
+ /// returning a [`WouldBlock`] error.
+ ///
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ pub fn do_io<F, R>(&self, f: F) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ self.state.do_io(f, &self.inner)
+ }
+
+ /// Returns the I/O source, dropping the state.
+ ///
+ /// # Notes
+ ///
+ /// To ensure no more events are to be received for this I/O source first
+ /// [`deregister`] it.
+ ///
+ /// [`deregister`]: Registry::deregister
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+}
+
+/// Be careful when using this method. All I/O operations that may block must go
+/// through the [`do_io`] method.
+///
+/// [`do_io`]: IoSource::do_io
+impl<T> Deref for IoSource<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+/// Be careful when using this method. All I/O operations that may block must go
+/// through the [`do_io`] method.
+///
+/// [`do_io`]: IoSource::do_io
+impl<T> DerefMut for IoSource<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+#[cfg(unix)]
+impl<T> event::Source for IoSource<T>
+where
+ T: AsRawFd,
+{
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.associate(registry)?;
+ registry
+ .selector()
+ .register(self.inner.as_raw_fd(), token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.check_association(registry)?;
+ registry
+ .selector()
+ .reregister(self.inner.as_raw_fd(), token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.remove_association(registry)?;
+ registry.selector().deregister(self.inner.as_raw_fd())
+ }
+}
+
+#[cfg(windows)]
+impl<T> event::Source for IoSource<T>
+where
+ T: AsRawSocket,
+{
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.associate(registry)?;
+ self.state
+ .register(registry, token, interests, self.inner.as_raw_socket())
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.check_association(registry)?;
+ self.state.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
+ #[cfg(debug_assertions)]
+ self.selector_id.remove_association(_registry)?;
+ self.state.deregister()
+ }
+}
+
+impl<T> fmt::Debug for IoSource<T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+/// Used to associate an `IoSource` with a `sys::Selector`.
+#[cfg(debug_assertions)]
+#[derive(Debug)]
+struct SelectorId {
+ id: AtomicUsize,
+}
+
+#[cfg(debug_assertions)]
+impl SelectorId {
+ /// Value of `id` if `SelectorId` is not associated with any
+ /// `sys::Selector`. Valid selector ids start at 1.
+ const UNASSOCIATED: usize = 0;
+
+ /// Create a new `SelectorId`.
+ const fn new() -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(Self::UNASSOCIATED),
+ }
+ }
+
+ /// Associate an I/O source with `registry`, returning an error if its
+ /// already registered.
+ fn associate(&self, registry: &Registry) -> io::Result<()> {
+ let registry_id = registry.selector().id();
+ let previous_id = self.id.swap(registry_id, Ordering::AcqRel);
+
+ if previous_id == Self::UNASSOCIATED {
+ Ok(())
+ } else {
+ Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a `Registry`",
+ ))
+ }
+ }
+
+ /// Check the association of an I/O source with `registry`, returning an
+ /// error if its registered with a different `Registry` or not registered at
+ /// all.
+ fn check_association(&self, registry: &Registry) -> io::Result<()> {
+ let registry_id = registry.selector().id();
+ let id = self.id.load(Ordering::Acquire);
+
+ if id == registry_id {
+ Ok(())
+ } else if id == Self::UNASSOCIATED {
+ Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ))
+ } else {
+ Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a different `Registry`",
+ ))
+ }
+ }
+
+ /// Remove a previously made association from `registry`, returns an error
+ /// if it was not previously associated with `registry`.
+ fn remove_association(&self, registry: &Registry) -> io::Result<()> {
+ let registry_id = registry.selector().id();
+ let previous_id = self.id.swap(Self::UNASSOCIATED, Ordering::AcqRel);
+
+ if previous_id == registry_id {
+ Ok(())
+ } else {
+ Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ))
+ }
+ }
+}
+
+#[cfg(debug_assertions)]
+impl Clone for SelectorId {
+ fn clone(&self) -> SelectorId {
+ SelectorId {
+ id: AtomicUsize::new(self.id.load(Ordering::Acquire)),
+ }
+ }
+}
--- /dev/null
+#![deny(
+ missing_docs,
+ missing_debug_implementations,
+ rust_2018_idioms,
+ unused_imports,
+ dead_code
+)]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+// Disallow warnings when running tests.
+#![cfg_attr(test, deny(warnings))]
+// Disallow warnings in examples.
+#![doc(test(attr(deny(warnings))))]
+
+//! Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs
+//! and event notification for building high performance I/O apps with as little
+//! overhead as possible over the OS abstractions.
+//!
+//! # Usage
+//!
+//! Using Mio starts by creating a [`Poll`], which reads events from the OS and
+//! puts them into [`Events`]. You can handle I/O events from the OS with it.
+//!
+//! For more detail, see [`Poll`].
+//!
+//! [`Poll`]: ../mio/struct.Poll.html
+//! [`Events`]: ../mio/event/struct.Events.html
+//!
+//! ## Examples
+//!
+//! Examples can found in the `examples` directory of the source code, or [on
+//! GitHub].
+//!
+//! [on GitHub]: https://github.com/tokio-rs/mio/tree/master/examples
+//!
+//! ## Guide
+//!
+//! A getting started guide is available in the [`guide`] module.
+//!
+//! ## Available features
+//!
+//! The available features are described in the [`features`] module.
+
+// macros used internally
+#[macro_use]
+mod macros;
+
+mod interest;
+mod poll;
+mod sys;
+mod token;
+mod waker;
+
+pub mod event;
+
+cfg_io_source! {
+ mod io_source;
+}
+
+cfg_net! {
+ pub mod net;
+}
+
+#[doc(no_inline)]
+pub use event::Events;
+pub use interest::Interest;
+pub use poll::{Poll, Registry};
+pub use token::Token;
+pub use waker::Waker;
+
+#[cfg(all(unix, feature = "os-ext"))]
+#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "os-ext"))))]
+pub mod unix {
+ //! Unix only extensions.
+
+ pub mod pipe {
+ //! Unix pipe.
+ //!
+ //! See the [`new`] function for documentation.
+
+ pub use crate::sys::pipe::{new, Receiver, Sender};
+ }
+
+ pub use crate::sys::SourceFd;
+}
+
+#[cfg(all(windows, feature = "os-ext"))]
+#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "os-ext"))))]
+pub mod windows {
+ //! Windows only extensions.
+
+ pub use crate::sys::named_pipe::NamedPipe;
+}
+
+pub mod features {
+ //! # Mio's optional features.
+ //!
+ //! This document describes the available features in Mio.
+ //!
+ #![cfg_attr(feature = "os-poll", doc = "## `os-poll` (enabled)")]
+ #![cfg_attr(not(feature = "os-poll"), doc = "## `os-poll` (disabled)")]
+ //!
+ //! Mio by default provides only a shell implementation, that `panic!`s the
+ //! moment it is actually run. To run it requires OS support, this is
+ //! enabled by activating the `os-poll` feature.
+ //!
+ //! This makes `Poll`, `Registry` and `Waker` functional.
+ //!
+ #![cfg_attr(feature = "os-ext", doc = "## `os-ext` (enabled)")]
+ #![cfg_attr(not(feature = "os-ext"), doc = "## `os-ext` (disabled)")]
+ //!
+ //! `os-ext` enables additional OS specific facilities. These facilities can
+ //! be found in the `unix` and `windows` module.
+ //!
+ #![cfg_attr(feature = "net", doc = "## Network types (enabled)")]
+ #![cfg_attr(not(feature = "net"), doc = "## Network types (disabled)")]
+ //!
+ //! The `net` feature enables networking primitives in the `net` module.
+}
+
+pub mod guide {
+ //! # Getting started guide.
+ //!
+ //! In this guide we'll do the following:
+ //!
+ //! 1. Create a [`Poll`] instance (and learn what it is).
+ //! 2. Register an [event source].
+ //! 3. Create an event loop.
+ //!
+ //! At the end you'll have a very small (but quick) TCP server that accepts
+ //! connections and then drops (disconnects) them.
+ //!
+ //! ## 1. Creating a `Poll` instance
+ //!
+ //! Using Mio starts by creating a [`Poll`] instance, which monitors events
+ //! from the OS and puts them into [`Events`]. This allows us to execute I/O
+ //! operations based on what operations are ready.
+ //!
+ //! [`Poll`]: ../struct.Poll.html
+ //! [`Events`]: ../event/struct.Events.html
+ //!
+ #![cfg_attr(feature = "os-poll", doc = "```")]
+ #![cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ //! # use mio::{Poll, Events};
+ //! # fn main() -> std::io::Result<()> {
+ //! // `Poll` allows for polling of readiness events.
+ //! let poll = Poll::new()?;
+ //! // `Events` is collection of readiness `Event`s and can be filled by
+ //! // calling `Poll::poll`.
+ //! let events = Events::with_capacity(128);
+ //! # drop((poll, events));
+ //! # Ok(())
+ //! # }
+ //! ```
+ //!
+ //! For example if we're using a [`TcpListener`], we'll only want to
+ //! attempt to accept an incoming connection *iff* any connections are
+ //! queued and ready to be accepted. We don't want to waste our time if no
+ //! connections are ready.
+ //!
+ //! [`TcpListener`]: ../net/struct.TcpListener.html
+ //!
+ //! ## 2. Registering event source
+ //!
+ //! After we've created a [`Poll`] instance that monitors events from the OS
+ //! for us, we need to provide it with a source of events. This is done by
+ //! registering an [event source]. As the name “event source” suggests it is
+ //! a source of events which can be polled using a `Poll` instance. On Unix
+ //! systems this is usually a file descriptor, or a socket/handle on
+ //! Windows.
+ //!
+ //! In the example below we'll use a [`TcpListener`] for which we'll receive
+ //! an event (from [`Poll`]) once a connection is ready to be accepted.
+ //!
+ //! [event source]: ../event/trait.Source.html
+ //!
+ #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ //! # use mio::net::TcpListener;
+ //! # use mio::{Poll, Token, Interest};
+ //! # fn main() -> std::io::Result<()> {
+ //! # let poll = Poll::new()?;
+ //! # let address = "127.0.0.1:0".parse().unwrap();
+ //! // Create a `TcpListener`, binding it to `address`.
+ //! let mut listener = TcpListener::bind(address)?;
+ //!
+ //! // Next we register it with `Poll` to receive events for it. The `SERVER`
+ //! // `Token` is used to determine that we received an event for the listener
+ //! // later on.
+ //! const SERVER: Token = Token(0);
+ //! poll.registry().register(&mut listener, SERVER, Interest::READABLE)?;
+ //! # Ok(())
+ //! # }
+ //! ```
+ //!
+ //! Multiple event sources can be [registered] (concurrently), so we can
+ //! monitor multiple sources at a time.
+ //!
+ //! [registered]: ../struct.Registry.html#method.register
+ //!
+ //! ## 3. Creating the event loop
+ //!
+ //! After we've created a [`Poll`] instance and registered one or more
+ //! [event sources] with it, we can [poll] it for events. Polling for events
+ //! is simple, we need a container to store the events: [`Events`] and need
+ //! to do something based on the polled events (this part is up to you, we
+ //! can't do it all!). If we do this in a loop we've got ourselves an event
+ //! loop.
+ //!
+ //! The example below shows the event loop in action, completing our small
+ //! TCP server.
+ //!
+ //! [poll]: ../struct.Poll.html#method.poll
+ //! [event sources]: ../event/trait.Source.html
+ //!
+ #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ //! # use std::io;
+ //! # use std::time::Duration;
+ //! # use mio::net::TcpListener;
+ //! # use mio::{Poll, Token, Interest, Events};
+ //! # fn main() -> io::Result<()> {
+ //! # let mut poll = Poll::new()?;
+ //! # let mut events = Events::with_capacity(128);
+ //! # let address = "127.0.0.1:0".parse().unwrap();
+ //! # let mut listener = TcpListener::bind(address)?;
+ //! # const SERVER: Token = Token(0);
+ //! # poll.registry().register(&mut listener, SERVER, Interest::READABLE)?;
+ //! // Start our event loop.
+ //! loop {
+ //! // Poll the OS for events, waiting at most 100 milliseconds.
+ //! poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+ //!
+ //! // Process each event.
+ //! for event in events.iter() {
+ //! // We can use the token we previously provided to `register` to
+ //! // determine for which type the event is.
+ //! match event.token() {
+ //! SERVER => loop {
+ //! // One or more connections are ready, so we'll attempt to
+ //! // accept them (in a loop).
+ //! match listener.accept() {
+ //! Ok((connection, address)) => {
+ //! println!("Got a connection from: {}", address);
+ //! # drop(connection);
+ //! },
+ //! // A "would block error" is returned if the operation
+ //! // is not ready, so we'll stop trying to accept
+ //! // connections.
+ //! Err(ref err) if would_block(err) => break,
+ //! Err(err) => return Err(err),
+ //! }
+ //! }
+ //! # _ => unreachable!(),
+ //! }
+ //! }
+ //! # return Ok(());
+ //! }
+ //!
+ //! fn would_block(err: &io::Error) -> bool {
+ //! err.kind() == io::ErrorKind::WouldBlock
+ //! }
+ //! # }
+ //! ```
+}
--- /dev/null
+//! Macros to ease conditional code based on enabled features.
+
+// Depending on the features not all macros are used.
+#![allow(unused_macros)]
+
+/// The `os-poll` feature is enabled.
+macro_rules! cfg_os_poll {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "os-poll")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "os-poll")))]
+ $item
+ )*
+ }
+}
+
+/// The `os-poll` feature is disabled.
+macro_rules! cfg_not_os_poll {
+ ($($item:item)*) => {
+ $(
+ #[cfg(not(feature = "os-poll"))]
+ $item
+ )*
+ }
+}
+
+/// The `os-ext` feature is enabled.
+macro_rules! cfg_os_ext {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "os-ext")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "os-ext")))]
+ $item
+ )*
+ }
+}
+
+/// The `net` feature is enabled.
+macro_rules! cfg_net {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "net")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "net")))]
+ $item
+ )*
+ }
+}
+
+/// One of the features enabled that needs `IoSource`. That is `net` or `os-ext`
+/// on Unix (for `pipe`).
+macro_rules! cfg_io_source {
+ ($($item:item)*) => {
+ $(
+ #[cfg(any(feature = "net", all(unix, feature = "os-ext")))]
+ #[cfg_attr(docsrs, doc(cfg(any(feature = "net", all(unix, feature = "os-ext")))))]
+ $item
+ )*
+ }
+}
+
+/// The `os-ext` feature is enabled, or one of the features that need `os-ext`.
+macro_rules! cfg_any_os_ext {
+ ($($item:item)*) => {
+ $(
+ #[cfg(any(feature = "os-ext", feature = "net"))]
+ #[cfg_attr(docsrs, doc(cfg(any(feature = "os-ext", feature = "net"))))]
+ $item
+ )*
+ }
+}
--- /dev/null
+//! Networking primitives.
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+//!
+//! # Notes
+//!
+//! When using a datagram based socket, i.e. [`UdpSocket`] or [`UnixDatagram`],
+//! its only possible to receive a packet once. This means that if you provide a
+//! buffer that is too small you won't be able to receive the data anymore. How
+//! OSs deal with this situation is different for each OS:
+//! * Unixes, such as Linux, FreeBSD and macOS, will simply fill the buffer and
+//! return the amount of bytes written. This means that if the returned value
+//! is equal to the size of the buffer it may have only written a part of the
+//! packet (or the packet has the same size as the buffer).
+//! * Windows returns an `WSAEMSGSIZE` error.
+//!
+//! Mio does not change the value (either ok or error) returned by the OS, it's
+//! up to the user handle this. How to deal with these difference is still up
+//! for debate, specifically in
+//! <https://github.com/rust-lang/rust/issues/55794>. The best advice we can
+//! give is to always call receive with a large enough buffer.
+
+mod tcp;
+pub use self::tcp::{TcpListener, TcpStream};
+
+mod udp;
+pub use self::udp::UdpSocket;
+
+#[cfg(unix)]
+mod uds;
+#[cfg(unix)]
+pub use self::uds::{SocketAddr, UnixDatagram, UnixListener, UnixStream};
--- /dev/null
+use std::net::{self, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
+use std::{fmt, io};
+
+use crate::io_source::IoSource;
+use crate::net::TcpStream;
+#[cfg(unix)]
+use crate::sys::tcp::set_reuseaddr;
+use crate::sys::tcp::{bind, listen, new_for_addr};
+use crate::{event, sys, Interest, Registry, Token};
+
+/// A structure representing a socket server
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Interest, Poll, Token};
+/// use mio::net::TcpListener;
+/// use std::time::Duration;
+///
+/// let mut listener = TcpListener::bind("127.0.0.1:34255".parse()?)?;
+///
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // There may be a socket ready to be accepted
+/// # Ok(())
+/// # }
+/// ```
+pub struct TcpListener {
+ inner: IoSource<net::TcpListener>,
+}
+
+impl TcpListener {
+ /// Convenience method to bind a new TCP listener to the specified address
+ /// to receive new connections.
+ ///
+ /// This function will take the following steps:
+ ///
+ /// 1. Create a new TCP socket.
+ /// 2. Set the `SO_REUSEADDR` option on the socket on Unix.
+ /// 3. Bind the socket to the specified address.
+ /// 4. Calls `listen` on the socket to prepare it to receive new connections.
+ pub fn bind(addr: SocketAddr) -> io::Result<TcpListener> {
+ let socket = new_for_addr(addr)?;
+ #[cfg(unix)]
+ let listener = unsafe { TcpListener::from_raw_fd(socket) };
+ #[cfg(windows)]
+ let listener = unsafe { TcpListener::from_raw_socket(socket as _) };
+
+ // On platforms with Berkeley-derived sockets, this allows to quickly
+ // rebind a socket, without needing to wait for the OS to clean up the
+ // previous one.
+ //
+ // On Windows, this allows rebinding sockets which are actively in use,
+ // which allows “socket hijacking”, so we explicitly don't set it here.
+ // https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
+ #[cfg(not(windows))]
+ set_reuseaddr(&listener.inner, true)?;
+
+ bind(&listener.inner, addr)?;
+ listen(&listener.inner, 1024)?;
+ Ok(listener)
+ }
+
+ /// Creates a new `TcpListener` from a standard `net::TcpListener`.
+ ///
+ /// This function is intended to be used to wrap a TCP listener from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying listener; ; it is left up to the user to set it
+ /// in non-blocking mode.
+ pub fn from_std(listener: net::TcpListener) -> TcpListener {
+ TcpListener {
+ inner: IoSource::new(listener),
+ }
+ }
+
+ /// Accepts a new `TcpStream`.
+ ///
+ /// This may return an `Err(e)` where `e.kind()` is
+ /// `io::ErrorKind::WouldBlock`. This means a stream may be ready at a later
+ /// point and one should wait for an event before calling `accept` again.
+ ///
+ /// If an accepted stream is returned, the remote address of the peer is
+ /// returned along with it.
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ self.inner.do_io(|inner| {
+ sys::tcp::accept(inner).map(|(stream, addr)| (TcpStream::from_std(stream), addr))
+ })
+ }
+
+ /// Returns the local socket address of this listener.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl event::Source for TcpListener {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+#[cfg(unix)]
+impl IntoRawFd for TcpListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for TcpListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl FromRawFd for TcpListener {
+ /// Converts a `RawFd` to a `TcpListener`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
+ TcpListener::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
+
+#[cfg(windows)]
+impl IntoRawSocket for TcpListener {
+ fn into_raw_socket(self) -> RawSocket {
+ self.inner.into_inner().into_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl AsRawSocket for TcpListener {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.inner.as_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl FromRawSocket for TcpListener {
+ /// Converts a `RawSocket` to a `TcpListener`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_socket(socket: RawSocket) -> TcpListener {
+ TcpListener::from_std(FromRawSocket::from_raw_socket(socket))
+ }
+}
--- /dev/null
+mod listener;
+pub use self::listener::TcpListener;
+
+mod stream;
+pub use self::stream::TcpStream;
--- /dev/null
+use std::fmt;
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::net::{self, Shutdown, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
+
+use crate::io_source::IoSource;
+use crate::sys::tcp::{connect, new_for_addr};
+use crate::{event, Interest, Registry, Token};
+
+/// A non-blocking TCP stream between a local socket and a remote socket.
+///
+/// The socket will be closed when the value is dropped.
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::net::{TcpListener, SocketAddr};
+/// # use std::error::Error;
+/// #
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// let address: SocketAddr = "127.0.0.1:0".parse()?;
+/// let listener = TcpListener::bind(address)?;
+/// use mio::{Events, Interest, Poll, Token};
+/// use mio::net::TcpStream;
+/// use std::time::Duration;
+///
+/// let mut stream = TcpStream::connect(listener.local_addr()?)?;
+///
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(128);
+///
+/// // Register the socket with `Poll`
+/// poll.registry().register(&mut stream, Token(0), Interest::WRITABLE)?;
+///
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+///
+/// // The socket might be ready at this point
+/// # Ok(())
+/// # }
+/// ```
+pub struct TcpStream {
+ inner: IoSource<net::TcpStream>,
+}
+
+impl TcpStream {
+ /// Create a new TCP stream and issue a non-blocking connect to the
+ /// specified address.
+ ///
+ /// # Notes
+ ///
+ /// The returned `TcpStream` may not be connected (and thus usable), unlike
+ /// the API found in `std::net::TcpStream`. Because Mio issues a
+ /// *non-blocking* connect it will not block the thread and instead return
+ /// an unconnected `TcpStream`.
+ ///
+ /// Ensuring the returned stream is connected is surprisingly complex when
+ /// considering cross-platform support. Doing this properly should follow
+ /// the steps below, an example implementation can be found
+ /// [here](https://github.com/Thomasdezeeuw/heph/blob/0c4f1ab3eaf08bea1d65776528bfd6114c9f8374/src/net/tcp/stream.rs#L560-L622).
+ ///
+ /// 1. Call `TcpStream::connect`
+ /// 2. Register the returned stream with at least [read interest].
+ /// 3. Wait for a (readable) event.
+ /// 4. Check `TcpStream::peer_addr`. If it returns `libc::EINPROGRESS` or
+ /// `ErrorKind::NotConnected` it means the stream is not yet connected,
+ /// go back to step 3. If it returns an address it means the stream is
+ /// connected, go to step 5. If another error is returned something
+ /// whent wrong.
+ /// 5. Now the stream can be used.
+ ///
+ /// [read interest]: Interest::READABLE
+ pub fn connect(addr: SocketAddr) -> io::Result<TcpStream> {
+ let socket = new_for_addr(addr)?;
+ #[cfg(unix)]
+ let stream = unsafe { TcpStream::from_raw_fd(socket) };
+ #[cfg(windows)]
+ let stream = unsafe { TcpStream::from_raw_socket(socket as _) };
+ connect(&stream.inner, addr)?;
+ Ok(stream)
+ }
+
+ /// Creates a new `TcpStream` from a standard `net::TcpStream`.
+ ///
+ /// This function is intended to be used to wrap a TCP stream from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying stream; it is left up to the user to set it in
+ /// non-blocking mode.
+ ///
+ /// # Note
+ ///
+ /// The TCP stream here will not have `connect` called on it, so it
+ /// should already be connected via some other means (be it manually, or
+ /// the standard library).
+ pub fn from_std(stream: net::TcpStream) -> TcpStream {
+ TcpStream {
+ inner: IoSource::new(stream),
+ }
+ }
+
+ /// Returns the socket address of the remote peer of this TCP connection.
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.peer_addr()
+ }
+
+ /// Returns the socket address of the local half of this TCP connection.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O on the specified
+ /// portions to return immediately with an appropriate value (see the
+ /// documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+
+ /// Sets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// If set, this option disables the Nagle algorithm. This means that
+ /// segments are always sent as soon as possible, even if there is only a
+ /// small amount of data. When not set, data is buffered until there is a
+ /// sufficient amount to send out, thereby avoiding the frequent sending of
+ /// small packets.
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to set `nodelay` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.inner.set_nodelay(nodelay)
+ }
+
+ /// Gets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// For more information about this option, see [`set_nodelay`][link].
+ ///
+ /// [link]: #method.set_nodelay
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to get `nodelay` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.inner.nodelay()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to set `ttl` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// # Notes
+ ///
+ /// On Windows make sure the stream is connected before calling this method,
+ /// by receiving an (writable) event. Trying to get `ttl` on an
+ /// unconnected `TcpStream` is unspecified behavior.
+ ///
+ /// [link]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying recv system call.
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+}
+
+impl Read for TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl<'a> Read for &'a TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl Write for TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl<'a> Write for &'a TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl event::Source for TcpStream {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+#[cfg(unix)]
+impl IntoRawFd for TcpStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for TcpStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl FromRawFd for TcpStream {
+ /// Converts a `RawFd` to a `TcpStream`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
+ TcpStream::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
+
+#[cfg(windows)]
+impl IntoRawSocket for TcpStream {
+ fn into_raw_socket(self) -> RawSocket {
+ self.inner.into_inner().into_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl AsRawSocket for TcpStream {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.inner.as_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl FromRawSocket for TcpStream {
+ /// Converts a `RawSocket` to a `TcpStream`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_socket(socket: RawSocket) -> TcpStream {
+ TcpStream::from_std(FromRawSocket::from_raw_socket(socket))
+ }
+}
--- /dev/null
+//! Primitives for working with UDP.
+//!
+//! The types provided in this module are non-blocking by default and are
+//! designed to be portable across all supported Mio platforms. As long as the
+//! [portability guidelines] are followed, the behavior should be identical no
+//! matter the target platform.
+//!
+//! [portability guidelines]: ../struct.Poll.html#portability
+
+use crate::io_source::IoSource;
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::fmt;
+use std::io;
+use std::net;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
+
+/// A User Datagram Protocol socket.
+///
+/// This is an implementation of a bound UDP socket. This supports both IPv4 and
+/// IPv6 addresses, and there is no corresponding notion of a server because UDP
+/// is a datagram protocol.
+///
+/// # Examples
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # use std::error::Error;
+/// #
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// // An Echo program:
+/// // SENDER -> sends a message.
+/// // ECHOER -> listens and prints the message received.
+///
+/// use mio::net::UdpSocket;
+/// use mio::{Events, Interest, Poll, Token};
+/// use std::time::Duration;
+///
+/// const SENDER: Token = Token(0);
+/// const ECHOER: Token = Token(1);
+///
+/// // This operation will fail if the address is in use, so we select different ports for each
+/// // socket.
+/// let mut sender_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+/// let mut echoer_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+///
+/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from
+/// // respectively.
+/// sender_socket.connect(echoer_socket.local_addr()?)?;
+///
+/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be
+/// // read from.
+/// let mut poll = Poll::new()?;
+///
+/// // We register our sockets here so that we can check if they are ready to be written/read.
+/// poll.registry().register(&mut sender_socket, SENDER, Interest::WRITABLE)?;
+/// poll.registry().register(&mut echoer_socket, ECHOER, Interest::READABLE)?;
+///
+/// let msg_to_send = [9; 9];
+/// let mut buffer = [0; 9];
+///
+/// let mut events = Events::with_capacity(128);
+/// loop {
+/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
+/// for event in events.iter() {
+/// match event.token() {
+/// // Our SENDER is ready to be written into.
+/// SENDER => {
+/// let bytes_sent = sender_socket.send(&msg_to_send)?;
+/// assert_eq!(bytes_sent, 9);
+/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent);
+/// },
+/// // Our ECHOER is ready to be read from.
+/// ECHOER => {
+/// let num_recv = echoer_socket.recv(&mut buffer)?;
+/// println!("echo {:?} -> {:?}", buffer, num_recv);
+/// buffer = [0; 9];
+/// # drop(buffer); // Silence unused assignment warning.
+/// # return Ok(());
+/// }
+/// _ => unreachable!()
+/// }
+/// }
+/// }
+/// # }
+/// ```
+pub struct UdpSocket {
+ inner: IoSource<net::UdpSocket>,
+}
+
+impl UdpSocket {
+ /// Creates a UDP socket from the given address.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// // We must bind it to an open address.
+ /// let socket = match UdpSocket::bind("127.0.0.1:0".parse()?) {
+ /// Ok(new_socket) => new_socket,
+ /// Err(fail) => {
+ /// // We panic! here, but you could try to bind it again on another address.
+ /// panic!("Failed to bind socket. {:?}", fail);
+ /// }
+ /// };
+ ///
+ /// // Our socket was created, but we should not use it before checking it's readiness.
+ /// # drop(socket); // Silence unused variable warning.
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn bind(addr: SocketAddr) -> io::Result<UdpSocket> {
+ sys::udp::bind(addr).map(UdpSocket::from_std)
+ }
+
+ /// Creates a new `UdpSocket` from a standard `net::UdpSocket`.
+ ///
+ /// This function is intended to be used to wrap a UDP socket from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying socket; it is left up to the user to set it in
+ /// non-blocking mode.
+ pub fn from_std(socket: net::UdpSocket) -> UdpSocket {
+ UdpSocket {
+ inner: IoSource::new(socket),
+ }
+ }
+
+ /// Returns the socket address that this socket was created from.
+ ///
+ /// # Examples
+ ///
+ // This assertion is almost, but not quite, universal. It fails on
+ // shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed,
+ // so simply disable the test on FreeBSD.
+ #[cfg_attr(all(feature = "os-poll", not(target_os = "freebsd")), doc = "```")]
+ #[cfg_attr(
+ any(not(feature = "os-poll"), target_os = "freebsd"),
+ doc = "```ignore"
+ )]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let addr = "127.0.0.1:0".parse()?;
+ /// let socket = UdpSocket::bind(addr)?;
+ /// assert_eq!(socket.local_addr()?.ip(), addr.ip());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.local_addr()
+ }
+
+ /// Returns the socket address of the remote peer this socket was connected to.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let addr = "127.0.0.1:0".parse()?;
+ /// let peer_addr = "127.0.0.1:11100".parse()?;
+ /// let socket = UdpSocket::bind(addr)?;
+ /// socket.connect(peer_addr)?;
+ /// assert_eq!(socket.peer_addr()?.ip(), peer_addr.ip());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.inner.peer_addr()
+ }
+
+ /// Sends data on the socket to the given address. On success, returns the
+ /// number of bytes written.
+ ///
+ /// Address type can be any implementor of `ToSocketAddrs` trait. See its
+ /// documentation for concrete examples.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is writable before calling send_to,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let bytes_sent = socket.send_to(&[9; 9], "127.0.0.1:11100".parse()?)?;
+ /// assert_eq!(bytes_sent, 9);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send_to(buf, target))
+ }
+
+ /// Receives data from the socket. On success, returns the number of bytes
+ /// read and the address from whence the data came.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and recv_from returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is readable before calling recv_from,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let mut buf = [0; 9];
+ /// let (num_recv, from_addr) = socket.recv_from(&mut buf)?;
+ /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.inner.do_io(|inner| inner.recv_from(buf))
+ }
+
+ /// Receives data from the socket, without removing it from the input queue.
+ /// On success, returns the number of bytes read and the address from whence
+ /// the data came.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and peek_from returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ ///
+ /// // We must check if the socket is readable before calling recv_from,
+ /// // or we could run into a WouldBlock error.
+ ///
+ /// let mut buf = [0; 9];
+ /// let (num_recv, from_addr) = socket.peek_from(&mut buf)?;
+ /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.inner.do_io(|inner| inner.peek_from(buf))
+ }
+
+ /// Sends data on the socket to the address previously bound via connect(). On success,
+ /// returns the number of bytes written.
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send(buf))
+ }
+
+ /// Receives data from the socket previously bound with connect(). On success, returns
+ /// the number of bytes read.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and recv returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.recv(buf))
+ }
+
+ /// Receives data from the socket, without removing it from the input queue.
+ /// On success, returns the number of bytes read.
+ ///
+ /// # Notes
+ ///
+ /// On Windows, if the data is larger than the buffer specified, the buffer
+ /// is filled with the first part of the data, and peek returns the error
+ /// WSAEMSGSIZE(10040). The excess data is lost.
+ /// Make sure to always use a sufficiently large buffer to hold the
+ /// maximum UDP packet size, which can be up to 65536 bytes in size.
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.peek(buf))
+ }
+
+ /// Connects the UDP socket setting the default destination for `send()`
+ /// and limiting packets that are read via `recv` from the address specified
+ /// in `addr`.
+ pub fn connect(&self, addr: SocketAddr) -> io::Result<()> {
+ self.inner.connect(addr)
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// if broadcast_socket.broadcast()? == false {
+ /// broadcast_socket.set_broadcast(true)?;
+ /// }
+ ///
+ /// assert_eq!(broadcast_socket.broadcast()?, true);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.inner.set_broadcast(on)
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_broadcast`][link].
+ ///
+ /// [link]: #method.set_broadcast
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// assert_eq!(broadcast_socket.broadcast()?, false);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.inner.broadcast()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.inner.set_multicast_loop_v4(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v4
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.inner.multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// Note that this may not have any affect on IPv6 sockets.
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_multicast_ttl_v4(ttl)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_ttl_v4`][link].
+ ///
+ /// [link]: #method.set_multicast_ttl_v4
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.inner.multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ /// Note that this may not have any affect on IPv4 sockets.
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.inner.set_multicast_loop_v6(on)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see
+ /// [`set_multicast_loop_v6`][link].
+ ///
+ /// [link]: #method.set_multicast_loop_v6
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.inner.multicast_loop_v6()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// if socket.ttl()? < 255 {
+ /// socket.set_ttl(255)?;
+ /// }
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.inner.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`][link].
+ ///
+ /// [link]: #method.set_ttl
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(feature = "os-poll", doc = "```")]
+ #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// #
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
+ /// socket.set_ttl(255)?;
+ ///
+ /// assert_eq!(socket.ttl()?, 255);
+ /// #
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.inner.ttl()
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ self.inner.join_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.inner.join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v4`][link].
+ ///
+ /// [link]: #method.join_multicast_v4
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ self.inner.leave_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see
+ /// [`join_multicast_v6`][link].
+ ///
+ /// [link]: #method.join_multicast_v6
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.inner.leave_multicast_v6(multiaddr, interface)
+ }
+
+ /// Get the value of the `IPV6_V6ONLY` option on this socket.
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ sys::udp::only_v6(&self.inner)
+ }
+
+ /// Get the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl event::Source for UdpSocket {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+#[cfg(unix)]
+impl IntoRawFd for UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+#[cfg(unix)]
+impl FromRawFd for UdpSocket {
+ /// Converts a `RawFd` to a `UdpSocket`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
+ UdpSocket::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
+
+#[cfg(windows)]
+impl IntoRawSocket for UdpSocket {
+ fn into_raw_socket(self) -> RawSocket {
+ self.inner.into_inner().into_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl AsRawSocket for UdpSocket {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.inner.as_raw_socket()
+ }
+}
+
+#[cfg(windows)]
+impl FromRawSocket for UdpSocket {
+ /// Converts a `RawSocket` to a `UdpSocket`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_socket(socket: RawSocket) -> UdpSocket {
+ UdpSocket::from_std(FromRawSocket::from_raw_socket(socket))
+ }
+}
--- /dev/null
+use crate::io_source::IoSource;
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::net::Shutdown;
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::os::unix::net;
+use std::path::Path;
+use std::{fmt, io};
+
+/// A Unix datagram socket.
+pub struct UnixDatagram {
+ inner: IoSource<net::UnixDatagram>,
+}
+
+impl UnixDatagram {
+ /// Creates a Unix datagram socket bound to the given path.
+ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> {
+ sys::uds::datagram::bind(path.as_ref()).map(UnixDatagram::from_std)
+ }
+
+ /// Creates a new `UnixDatagram` from a standard `net::UnixDatagram`.
+ ///
+ /// This function is intended to be used to wrap a Unix datagram from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying datagram; ; it is left up to the user to set it
+ /// in non-blocking mode.
+ pub fn from_std(socket: net::UnixDatagram) -> UnixDatagram {
+ UnixDatagram {
+ inner: IoSource::new(socket),
+ }
+ }
+
+ /// Connects the socket to the specified address.
+ pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+ self.inner.connect(path)
+ }
+
+ /// Creates a Unix Datagram socket which is not bound to any address.
+ pub fn unbound() -> io::Result<UnixDatagram> {
+ sys::uds::datagram::unbound().map(UnixDatagram::from_std)
+ }
+
+ /// Create an unnamed pair of connected sockets.
+ pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> {
+ sys::uds::datagram::pair().map(|(socket1, socket2)| {
+ (
+ UnixDatagram::from_std(socket1),
+ UnixDatagram::from_std(socket2),
+ )
+ })
+ }
+
+ /// Returns the address of this socket.
+ pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::datagram::local_addr(&self.inner)
+ }
+
+ /// Returns the address of this socket's peer.
+ ///
+ /// The `connect` method will connect the socket to a peer.
+ pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::datagram::peer_addr(&self.inner)
+ }
+
+ /// Receives data from the socket.
+ ///
+ /// On success, returns the number of bytes read and the address from
+ /// whence the data came.
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, sys::SocketAddr)> {
+ self.inner
+ .do_io(|inner| sys::uds::datagram::recv_from(inner, buf))
+ }
+
+ /// Receives data from the socket.
+ ///
+ /// On success, returns the number of bytes read.
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.recv(buf))
+ }
+
+ /// Sends data on the socket to the specified address.
+ ///
+ /// On success, returns the number of bytes written.
+ pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send_to(buf, path))
+ }
+
+ /// Sends data on the socket to the socket's peer.
+ ///
+ /// The peer address may be set by the `connect` method, and this method
+ /// will return an error if the socket has not already been connected.
+ ///
+ /// On success, returns the number of bytes written.
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| inner.send(buf))
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ /// Shut down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+}
+
+impl event::Source for UnixDatagram {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UnixDatagram {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+impl IntoRawFd for UnixDatagram {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixDatagram {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixDatagram {
+ /// Converts a `RawFd` to a `UnixDatagram`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram {
+ UnixDatagram::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
--- /dev/null
+use crate::io_source::IoSource;
+use crate::net::{SocketAddr, UnixStream};
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::os::unix::net;
+use std::path::Path;
+use std::{fmt, io};
+
+/// A non-blocking Unix domain socket server.
+pub struct UnixListener {
+ inner: IoSource<net::UnixListener>,
+}
+
+impl UnixListener {
+ /// Creates a new `UnixListener` bound to the specified socket.
+ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
+ sys::uds::listener::bind(path.as_ref()).map(UnixListener::from_std)
+ }
+
+ /// Creates a new `UnixListener` from a standard `net::UnixListener`.
+ ///
+ /// This function is intended to be used to wrap a Unix listener from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying listener; it is left up to the user to set it in
+ /// non-blocking mode.
+ pub fn from_std(listener: net::UnixListener) -> UnixListener {
+ UnixListener {
+ inner: IoSource::new(listener),
+ }
+ }
+
+ /// Accepts a new incoming connection to this listener.
+ ///
+ /// The call is responsible for ensuring that the listening socket is in
+ /// non-blocking mode.
+ pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
+ sys::uds::listener::accept(&self.inner)
+ }
+
+ /// Returns the local socket address of this listener.
+ pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::listener::local_addr(&self.inner)
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+}
+
+impl event::Source for UnixListener {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UnixListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+impl IntoRawFd for UnixListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixListener {
+ /// Converts a `RawFd` to a `UnixListener`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixListener {
+ UnixListener::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
--- /dev/null
+mod datagram;
+pub use self::datagram::UnixDatagram;
+
+mod listener;
+pub use self::listener::UnixListener;
+
+mod stream;
+pub use self::stream::UnixStream;
+
+pub use crate::sys::SocketAddr;
--- /dev/null
+use crate::io_source::IoSource;
+use crate::{event, sys, Interest, Registry, Token};
+
+use std::fmt;
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::net::Shutdown;
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::os::unix::net;
+use std::path::Path;
+
+/// A non-blocking Unix stream socket.
+pub struct UnixStream {
+ inner: IoSource<net::UnixStream>,
+}
+
+impl UnixStream {
+ /// Connects to the socket named by `path`.
+ pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> {
+ sys::uds::stream::connect(path.as_ref()).map(UnixStream::from_std)
+ }
+
+ /// Creates a new `UnixStream` from a standard `net::UnixStream`.
+ ///
+ /// This function is intended to be used to wrap a Unix stream from the
+ /// standard library in the Mio equivalent. The conversion assumes nothing
+ /// about the underlying stream; it is left up to the user to set it in
+ /// non-blocking mode.
+ ///
+ /// # Note
+ ///
+ /// The Unix stream here will not have `connect` called on it, so it
+ /// should already be connected via some other means (be it manually, or
+ /// the standard library).
+ pub fn from_std(stream: net::UnixStream) -> UnixStream {
+ UnixStream {
+ inner: IoSource::new(stream),
+ }
+ }
+
+ /// Creates an unnamed pair of connected sockets.
+ ///
+ /// Returns two `UnixStream`s which are connected to each other.
+ pub fn pair() -> io::Result<(UnixStream, UnixStream)> {
+ sys::uds::stream::pair().map(|(stream1, stream2)| {
+ (UnixStream::from_std(stream1), UnixStream::from_std(stream2))
+ })
+ }
+
+ /// Returns the socket address of the local half of this connection.
+ pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::stream::local_addr(&self.inner)
+ }
+
+ /// Returns the socket address of the remote half of this connection.
+ pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> {
+ sys::uds::stream::peer_addr(&self.inner)
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+}
+
+impl Read for UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl<'a> Read for &'a UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).read_vectored(bufs))
+ }
+}
+
+impl Write for UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl<'a> Write for &'a UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|inner| (&*inner).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|inner| (&*inner).flush())
+ }
+}
+
+impl event::Source for UnixStream {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl fmt::Debug for UnixStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+impl IntoRawFd for UnixStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+impl AsRawFd for UnixStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl FromRawFd for UnixStream {
+ /// Converts a `RawFd` to a `UnixStream`.
+ ///
+ /// # Notes
+ ///
+ /// The caller is responsible for ensuring that the socket is in
+ /// non-blocking mode.
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixStream {
+ UnixStream::from_std(FromRawFd::from_raw_fd(fd))
+ }
+}
--- /dev/null
+use crate::{event, sys, Events, Interest, Token};
+use log::trace;
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::time::Duration;
+use std::{fmt, io};
+
+/// Polls for readiness events on all registered values.
+///
+/// `Poll` allows a program to monitor a large number of [`event::Source`]s,
+/// waiting until one or more become "ready" for some class of operations; e.g.
+/// reading and writing. An event source is considered ready if it is possible
+/// to immediately perform a corresponding operation; e.g. [`read`] or
+/// [`write`].
+///
+/// To use `Poll`, an `event::Source` must first be registered with the `Poll`
+/// instance using the [`register`] method on its associated `Register`,
+/// supplying readiness interest. The readiness interest tells `Poll` which
+/// specific operations on the handle to monitor for readiness. A `Token` is
+/// also passed to the [`register`] function. When `Poll` returns a readiness
+/// event, it will include this token. This associates the event with the
+/// event source that generated the event.
+///
+/// [`event::Source`]: ./event/trait.Source.html
+/// [`read`]: ./net/struct.TcpStream.html#method.read
+/// [`write`]: ./net/struct.TcpStream.html#method.write
+/// [`register`]: struct.Registry.html#method.register
+///
+/// # Examples
+///
+/// A basic example -- establishing a `TcpStream` connection.
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Poll, Interest, Token};
+/// use mio::net::TcpStream;
+///
+/// use std::net::{self, SocketAddr};
+///
+/// // Bind a server socket to connect to.
+/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+/// let server = net::TcpListener::bind(addr)?;
+///
+/// // Construct a new `Poll` handle as well as the `Events` we'll store into
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Connect the stream
+/// let mut stream = TcpStream::connect(server.local_addr()?)?;
+///
+/// // Register the stream with `Poll`
+/// poll.registry().register(&mut stream, Token(0), Interest::READABLE | Interest::WRITABLE)?;
+///
+/// // Wait for the socket to become ready. This has to happens in a loop to
+/// // handle spurious wakeups.
+/// loop {
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// if event.token() == Token(0) && event.is_writable() {
+/// // The socket connected (probably, it could still be a spurious
+/// // wakeup)
+/// return Ok(());
+/// }
+/// }
+/// }
+/// # }
+/// ```
+///
+/// # Portability
+///
+/// Using `Poll` provides a portable interface across supported platforms as
+/// long as the caller takes the following into consideration:
+///
+/// ### Spurious events
+///
+/// [`Poll::poll`] may return readiness events even if the associated
+/// event source is not actually ready. Given the same code, this may
+/// happen more on some platforms than others. It is important to never assume
+/// that, just because a readiness event was received, that the associated
+/// operation will succeed as well.
+///
+/// If operation fails with [`WouldBlock`], then the caller should not treat
+/// this as an error, but instead should wait until another readiness event is
+/// received.
+///
+/// ### Draining readiness
+///
+/// Once a readiness event is received, the corresponding operation must be
+/// performed repeatedly until it returns [`WouldBlock`]. Unless this is done,
+/// there is no guarantee that another readiness event will be delivered, even
+/// if further data is received for the event source.
+///
+/// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
+///
+/// ### Readiness operations
+///
+/// The only readiness operations that are guaranteed to be present on all
+/// supported platforms are [`readable`] and [`writable`]. All other readiness
+/// operations may have false negatives and as such should be considered
+/// **hints**. This means that if a socket is registered with [`readable`]
+/// interest and either an error or close is received, a readiness event will
+/// be generated for the socket, but it **may** only include `readable`
+/// readiness. Also note that, given the potential for spurious events,
+/// receiving a readiness event with `read_closed`, `write_closed`, or `error`
+/// doesn't actually mean that a `read` on the socket will return a result
+/// matching the readiness event.
+///
+/// In other words, portable programs that explicitly check for [`read_closed`],
+/// [`write_closed`], or [`error`] readiness should be doing so as an
+/// **optimization** and always be able to handle an error or close situation
+/// when performing the actual read operation.
+///
+/// [`readable`]: ./event/struct.Event.html#method.is_readable
+/// [`writable`]: ./event/struct.Event.html#method.is_writable
+/// [`error`]: ./event/struct.Event.html#method.is_error
+/// [`read_closed`]: ./event/struct.Event.html#method.is_read_closed
+/// [`write_closed`]: ./event/struct.Event.html#method.is_write_closed
+///
+/// ### Registering handles
+///
+/// Unless otherwise noted, it should be assumed that types implementing
+/// [`event::Source`] will never become ready unless they are registered with
+/// `Poll`.
+///
+/// For example:
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// # use std::error::Error;
+/// # use std::net;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Poll, Interest, Token};
+/// use mio::net::TcpStream;
+/// use std::net::SocketAddr;
+/// use std::time::Duration;
+/// use std::thread;
+///
+/// let address: SocketAddr = "127.0.0.1:0".parse()?;
+/// let listener = net::TcpListener::bind(address)?;
+/// let mut sock = TcpStream::connect(listener.local_addr()?)?;
+///
+/// thread::sleep(Duration::from_secs(1));
+///
+/// let poll = Poll::new()?;
+///
+/// // The connect is not guaranteed to have started until it is registered at
+/// // this point
+/// poll.registry().register(&mut sock, Token(0), Interest::READABLE | Interest::WRITABLE)?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// ### Dropping `Poll`
+///
+/// When the `Poll` instance is dropped it may cancel in-flight operations for
+/// the registered [event sources], meaning that no further events for them may
+/// be received. It also means operations on the registered event sources may no
+/// longer work. It is up to the user to keep the `Poll` instance alive while
+/// registered event sources are being used.
+///
+/// [event sources]: ./event/trait.Source.html
+///
+/// ### Accessing raw fd/socket/handle
+///
+/// Mio makes it possible for many types to be converted into a raw file
+/// descriptor (fd, Unix), socket (Windows) or handle (Windows). This makes it
+/// possible to support more operations on the type than Mio supports, for
+/// example it makes [mio-aio] possible. However accessing the raw fd is not
+/// without it's pitfalls.
+///
+/// Specifically performing I/O operations outside of Mio on these types (via
+/// the raw fd) has unspecified behaviour. It could cause no more events to be
+/// generated for the type even though it returned `WouldBlock` (in an operation
+/// directly accessing the fd). The behaviour is OS specific and Mio can only
+/// guarantee cross-platform behaviour if it can control the I/O.
+///
+/// [mio-aio]: https://github.com/asomers/mio-aio
+///
+/// *The following is **not** guaranteed, just a description of the current
+/// situation!* Mio is allowed to change the following without it being considered
+/// a breaking change, don't depend on this, it's just here to inform the user.
+/// Currently the kqueue and epoll implementation support direct I/O operations
+/// on the fd without Mio's knowledge. Windows however needs **all** I/O
+/// operations to go through Mio otherwise it is not able to update it's
+/// internal state properly and won't generate events.
+///
+/// # Implementation notes
+///
+/// `Poll` is backed by the selector provided by the operating system.
+///
+/// | OS | Selector |
+/// |---------------|-----------|
+/// | Android | [epoll] |
+/// | DragonFly BSD | [kqueue] |
+/// | FreeBSD | [kqueue] |
+/// | iOS | [kqueue] |
+/// | illumos | [epoll] |
+/// | Linux | [epoll] |
+/// | NetBSD | [kqueue] |
+/// | OpenBSD | [kqueue] |
+/// | Windows | [IOCP] |
+/// | macOS | [kqueue] |
+///
+/// On all supported platforms, socket operations are handled by using the
+/// system selector. Platform specific extensions (e.g. [`SourceFd`]) allow
+/// accessing other features provided by individual system selectors. For
+/// example, Linux's [`signalfd`] feature can be used by registering the FD with
+/// `Poll` via [`SourceFd`].
+///
+/// On all platforms except windows, a call to [`Poll::poll`] is mostly just a
+/// direct call to the system selector. However, [IOCP] uses a completion model
+/// instead of a readiness model. In this case, `Poll` must adapt the completion
+/// model Mio's API. While non-trivial, the bridge layer is still quite
+/// efficient. The most expensive part being calls to `read` and `write` require
+/// data to be copied into an intermediate buffer before it is passed to the
+/// kernel.
+///
+/// [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html
+/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
+/// [IOCP]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx
+/// [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
+/// [`SourceFd`]: unix/struct.SourceFd.html
+/// [`Poll::poll`]: struct.Poll.html#method.poll
+pub struct Poll {
+ registry: Registry,
+}
+
+/// Registers I/O resources.
+pub struct Registry {
+ selector: sys::Selector,
+}
+
+impl Poll {
+ /// Create a separate `Registry` which can be used to register
+ /// `event::Source`s.
+ pub fn registry(&self) -> &Registry {
+ &self.registry
+ }
+
+ /// Wait for readiness events
+ ///
+ /// Blocks the current thread and waits for readiness events for any of the
+ /// [`event::Source`]s that have been registered with this `Poll` instance.
+ /// The function will block until either at least one readiness event has
+ /// been received or `timeout` has elapsed. A `timeout` of `None` means that
+ /// `poll` will block until a readiness event has been received.
+ ///
+ /// The supplied `events` will be cleared and newly received readiness events
+ /// will be pushed onto the end. At most `events.capacity()` events will be
+ /// returned. If there are further pending readiness events, they will be
+ /// returned on the next call to `poll`.
+ ///
+ /// A single call to `poll` may result in multiple readiness events being
+ /// returned for a single event source. For example, if a TCP socket becomes
+ /// both readable and writable, it may be possible for a single readiness
+ /// event to be returned with both [`readable`] and [`writable`] readiness
+ /// **OR** two separate events may be returned, one with [`readable`] set
+ /// and one with [`writable`] set.
+ ///
+ /// Note that the `timeout` will be rounded up to the system clock
+ /// granularity (usually 1ms), and kernel scheduling delays mean that
+ /// the blocking interval may be overrun by a small amount.
+ ///
+ /// See the [struct] level documentation for a higher level discussion of
+ /// polling.
+ ///
+ /// [`event::Source`]: ./event/trait.Source.html
+ /// [`readable`]: struct.Interest.html#associatedconstant.READABLE
+ /// [`writable`]: struct.Interest.html#associatedconstant.WRITABLE
+ /// [struct]: struct.Poll.html
+ /// [`iter`]: ./event/struct.Events.html#method.iter
+ ///
+ /// # Notes
+ ///
+ /// This returns any errors without attempting to retry, previous versions
+ /// of Mio would automatically retry the poll call if it was interrupted
+ /// (if `EINTR` was returned).
+ ///
+ /// # Examples
+ ///
+ /// A basic example -- establishing a `TcpStream` connection.
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ ///
+ /// use std::net::{TcpListener, SocketAddr};
+ /// use std::thread;
+ ///
+ /// // Bind a server socket to connect to.
+ /// let addr: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let server = TcpListener::bind(addr)?;
+ /// let addr = server.local_addr()?.clone();
+ ///
+ /// // Spawn a thread to accept the socket
+ /// thread::spawn(move || {
+ /// let _ = server.accept();
+ /// });
+ ///
+ /// // Construct a new `Poll` handle as well as the `Events` we'll store into
+ /// let mut poll = Poll::new()?;
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Connect the stream
+ /// let mut stream = TcpStream::connect(addr)?;
+ ///
+ /// // Register the stream with `Poll`
+ /// poll.registry().register(
+ /// &mut stream,
+ /// Token(0),
+ /// Interest::READABLE | Interest::WRITABLE)?;
+ ///
+ /// // Wait for the socket to become ready. This has to happens in a loop to
+ /// // handle spurious wakeups.
+ /// loop {
+ /// poll.poll(&mut events, None)?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) && event.is_writable() {
+ /// // The socket connected (probably, it could still be a spurious
+ /// // wakeup)
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// [struct]: #
+ pub fn poll(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ self.registry.selector.select(events.sys(), timeout)
+ }
+}
+
+cfg_os_poll! {
+ impl Poll {
+ /// Return a new `Poll` handle.
+ ///
+ /// This function will make a syscall to the operating system to create
+ /// the system selector. If this syscall fails, `Poll::new` will return
+ /// with the error.
+ ///
+ /// See [struct] level docs for more details.
+ ///
+ /// [struct]: struct.Poll.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::error::Error;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Poll, Events};
+ /// use std::time::Duration;
+ ///
+ /// let mut poll = match Poll::new() {
+ /// Ok(poll) => poll,
+ /// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
+ /// };
+ ///
+ /// // Create a structure to receive polled events
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Wait for events, but none will be received because no
+ /// // `event::Source`s have been registered with this `Poll` instance.
+ /// poll.poll(&mut events, Some(Duration::from_millis(500)))?;
+ /// assert!(events.is_empty());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn new() -> io::Result<Poll> {
+ sys::Selector::new().map(|selector| Poll {
+ registry: Registry { selector },
+ })
+ }
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for Poll {
+ fn as_raw_fd(&self) -> RawFd {
+ self.registry.as_raw_fd()
+ }
+}
+
+impl fmt::Debug for Poll {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Poll").finish()
+ }
+}
+
+impl Registry {
+ /// Register an [`event::Source`] with the `Poll` instance.
+ ///
+ /// Once registered, the `Poll` instance will monitor the event source for
+ /// readiness state changes. When it notices a state change, it will return
+ /// a readiness event for the handle the next time [`poll`] is called.
+ ///
+ /// See [`Poll`] docs for a high level overview.
+ ///
+ /// # Arguments
+ ///
+ /// `source: &mut S: event::Source`: This is the source of events that the
+ /// `Poll` instance should monitor for readiness state changes.
+ ///
+ /// `token: Token`: The caller picks a token to associate with the socket.
+ /// When [`poll`] returns an event for the handle, this token is included.
+ /// This allows the caller to map the event to its source. The token
+ /// associated with the `event::Source` can be changed at any time by
+ /// calling [`reregister`].
+ ///
+ /// See documentation on [`Token`] for an example showing how to pick
+ /// [`Token`] values.
+ ///
+ /// `interest: Interest`: Specifies which operations `Poll` should monitor
+ /// for readiness. `Poll` will only return readiness events for operations
+ /// specified by this argument.
+ ///
+ /// If a socket is registered with readable interest and the socket becomes
+ /// writable, no event will be returned from [`poll`].
+ ///
+ /// The readiness interest for an `event::Source` can be changed at any time
+ /// by calling [`reregister`].
+ ///
+ /// # Notes
+ ///
+ /// Callers must ensure that if a source being registered with a `Poll`
+ /// instance was previously registered with that `Poll` instance, then a
+ /// call to [`deregister`] has already occurred. Consecutive calls to
+ /// `register` is unspecified behavior.
+ ///
+ /// Unless otherwise specified, the caller should assume that once an event
+ /// source is registered with a `Poll` instance, it is bound to that `Poll`
+ /// instance for the lifetime of the event source. This remains true even
+ /// if the event source is deregistered from the poll instance using
+ /// [`deregister`].
+ ///
+ /// [`event::Source`]: ./event/trait.Source.html
+ /// [`poll`]: struct.Poll.html#method.poll
+ /// [`reregister`]: struct.Registry.html#method.reregister
+ /// [`deregister`]: struct.Registry.html#method.deregister
+ /// [`Token`]: struct.Token.html
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # use std::net;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ /// use std::net::SocketAddr;
+ /// use std::time::{Duration, Instant};
+ ///
+ /// let mut poll = Poll::new()?;
+ ///
+ /// let address: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let listener = net::TcpListener::bind(address)?;
+ /// let mut socket = TcpStream::connect(listener.local_addr()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.registry().register(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::READABLE | Interest::WRITABLE)?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ /// let start = Instant::now();
+ /// let timeout = Duration::from_millis(500);
+ ///
+ /// loop {
+ /// let elapsed = start.elapsed();
+ ///
+ /// if elapsed >= timeout {
+ /// // Connection timed out
+ /// return Ok(());
+ /// }
+ ///
+ /// let remaining = timeout - elapsed;
+ /// poll.poll(&mut events, Some(remaining))?;
+ ///
+ /// for event in &events {
+ /// if event.token() == Token(0) {
+ /// // Something (probably) happened on the socket.
+ /// return Ok(());
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ pub fn register<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()>
+ where
+ S: event::Source + ?Sized,
+ {
+ trace!(
+ "registering event source with poller: token={:?}, interests={:?}",
+ token,
+ interests
+ );
+ source.register(self, token, interests)
+ }
+
+ /// Re-register an [`event::Source`] with the `Poll` instance.
+ ///
+ /// Re-registering an event source allows changing the details of the
+ /// registration. Specifically, it allows updating the associated `token`
+ /// and `interests` specified in previous `register` and `reregister` calls.
+ ///
+ /// The `reregister` arguments fully override the previous values. In other
+ /// words, if a socket is registered with [`readable`] interest and the call
+ /// to `reregister` specifies [`writable`], then read interest is no longer
+ /// requested for the handle.
+ ///
+ /// The event source must have previously been registered with this instance
+ /// of `Poll`, otherwise the behavior is unspecified.
+ ///
+ /// See the [`register`] documentation for details about the function
+ /// arguments and see the [`struct`] docs for a high level overview of
+ /// polling.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # use std::net;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ /// use std::net::SocketAddr;
+ ///
+ /// let poll = Poll::new()?;
+ ///
+ /// let address: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let listener = net::TcpListener::bind(address)?;
+ /// let mut socket = TcpStream::connect(listener.local_addr()?)?;
+ ///
+ /// // Register the socket with `poll`, requesting readable
+ /// poll.registry().register(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::READABLE)?;
+ ///
+ /// // Reregister the socket specifying write interest instead. Even though
+ /// // the token is the same it must be specified.
+ /// poll.registry().reregister(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::WRITABLE)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ ///
+ /// [`event::Source`]: ./event/trait.Source.html
+ /// [`struct`]: struct.Poll.html
+ /// [`register`]: struct.Registry.html#method.register
+ /// [`readable`]: ./event/struct.Event.html#is_readable
+ /// [`writable`]: ./event/struct.Event.html#is_writable
+ pub fn reregister<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()>
+ where
+ S: event::Source + ?Sized,
+ {
+ trace!(
+ "reregistering event source with poller: token={:?}, interests={:?}",
+ token,
+ interests
+ );
+ source.reregister(self, token, interests)
+ }
+
+ /// Deregister an [`event::Source`] with the `Poll` instance.
+ ///
+ /// When an event source is deregistered, the `Poll` instance will no longer
+ /// monitor it for readiness state changes. Deregistering clears up any
+ /// internal resources needed to track the handle. After an explicit call
+ /// to this method completes, it is guaranteed that the token previously
+ /// registered to this handle will not be returned by a future poll, so long
+ /// as a happens-before relationship is established between this call and
+ /// the poll.
+ ///
+ /// The event source must have previously been registered with this instance
+ /// of `Poll`, otherwise the behavior is unspecified.
+ ///
+ /// A handle can be passed back to `register` after it has been
+ /// deregistered; however, it must be passed back to the **same** `Poll`
+ /// instance, otherwise the behavior is unspecified.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+ #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+ /// # use std::error::Error;
+ /// # use std::net;
+ /// # fn main() -> Result<(), Box<dyn Error>> {
+ /// use mio::{Events, Poll, Interest, Token};
+ /// use mio::net::TcpStream;
+ /// use std::net::SocketAddr;
+ /// use std::time::Duration;
+ ///
+ /// let mut poll = Poll::new()?;
+ ///
+ /// let address: SocketAddr = "127.0.0.1:0".parse()?;
+ /// let listener = net::TcpListener::bind(address)?;
+ /// let mut socket = TcpStream::connect(listener.local_addr()?)?;
+ ///
+ /// // Register the socket with `poll`
+ /// poll.registry().register(
+ /// &mut socket,
+ /// Token(0),
+ /// Interest::READABLE)?;
+ ///
+ /// poll.registry().deregister(&mut socket)?;
+ ///
+ /// let mut events = Events::with_capacity(1024);
+ ///
+ /// // Set a timeout because this poll should never receive any events.
+ /// poll.poll(&mut events, Some(Duration::from_secs(1)))?;
+ /// assert!(events.is_empty());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn deregister<S>(&self, source: &mut S) -> io::Result<()>
+ where
+ S: event::Source + ?Sized,
+ {
+ trace!("deregistering event source from poller");
+ source.deregister(self)
+ }
+
+ /// Creates a new independently owned `Registry`.
+ ///
+ /// Event sources registered with this `Registry` will be registered with
+ /// the original `Registry` and `Poll` instance.
+ pub fn try_clone(&self) -> io::Result<Registry> {
+ self.selector
+ .try_clone()
+ .map(|selector| Registry { selector })
+ }
+
+ /// Internal check to ensure only a single `Waker` is active per [`Poll`]
+ /// instance.
+ #[cfg(debug_assertions)]
+ pub(crate) fn register_waker(&self) {
+ assert!(
+ !self.selector.register_waker(),
+ "Only a single `Waker` can be active per `Poll` instance"
+ );
+ }
+
+ /// Get access to the `sys::Selector`.
+ pub(crate) fn selector(&self) -> &sys::Selector {
+ &self.selector
+ }
+}
+
+impl fmt::Debug for Registry {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Registry").finish()
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for Registry {
+ fn as_raw_fd(&self) -> RawFd {
+ self.selector.as_raw_fd()
+ }
+}
+
+cfg_os_poll! {
+ #[cfg(unix)]
+ #[test]
+ pub fn as_raw_fd() {
+ let poll = Poll::new().unwrap();
+ assert!(poll.as_raw_fd() > 0);
+ }
+}
--- /dev/null
+//! Module with system specific types.
+//!
+//! Required types:
+//!
+//! * `Event`: a type alias for the system specific event, e.g. `kevent` or
+//! `epoll_event`.
+//! * `event`: a module with various helper functions for `Event`, see
+//! [`crate::event::Event`] for the required functions.
+//! * `Events`: collection of `Event`s, see [`crate::Events`].
+//! * `IoSourceState`: state for the `IoSource` type.
+//! * `Selector`: selector used to register event sources and poll for events,
+//! see [`crate::Poll`] and [`crate::Registry`] for required
+//! methods.
+//! * `tcp` and `udp` modules: see the [`crate::net`] module.
+//! * `Waker`: see [`crate::Waker`].
+
+cfg_os_poll! {
+ macro_rules! debug_detail {
+ (
+ $type: ident ($event_type: ty), $test: path,
+ $($(#[$target: meta])* $libc: ident :: $flag: ident),+ $(,)*
+ ) => {
+ struct $type($event_type);
+
+ impl fmt::Debug for $type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut written_one = false;
+ $(
+ $(#[$target])*
+ #[allow(clippy::bad_bit_mask)] // Apparently some flags are zero.
+ {
+ // Windows doesn't use `libc` but the `afd` module.
+ if $test(&self.0, &$libc :: $flag) {
+ if !written_one {
+ write!(f, "{}", stringify!($flag))?;
+ written_one = true;
+ } else {
+ write!(f, "|{}", stringify!($flag))?;
+ }
+ }
+ }
+ )+
+ if !written_one {
+ write!(f, "(empty)")
+ } else {
+ Ok(())
+ }
+ }
+ }
+ };
+ }
+}
+
+#[cfg(unix)]
+cfg_os_poll! {
+ mod unix;
+ pub use self::unix::*;
+}
+
+#[cfg(windows)]
+cfg_os_poll! {
+ mod windows;
+ pub use self::windows::*;
+}
+
+cfg_not_os_poll! {
+ mod shell;
+ pub(crate) use self::shell::*;
+
+ #[cfg(unix)]
+ cfg_any_os_ext! {
+ mod unix;
+ pub use self::unix::SourceFd;
+ }
+
+ #[cfg(unix)]
+ cfg_net! {
+ pub use self::unix::SocketAddr;
+ }
+}
--- /dev/null
+macro_rules! os_required {
+ () => {
+ panic!("mio must be compiled with `os-poll` to run.")
+ };
+}
+
+mod selector;
+pub(crate) use self::selector::{event, Event, Events, Selector};
+
+mod waker;
+pub(crate) use self::waker::Waker;
+
+cfg_net! {
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+ #[cfg(unix)]
+ pub(crate) mod uds;
+}
+
+cfg_io_source! {
+ use std::io;
+ #[cfg(windows)]
+ use std::os::windows::io::RawSocket;
+
+ #[cfg(windows)]
+ use crate::{Registry, Token, Interest};
+
+ pub(crate) struct IoSourceState;
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ // We don't hold state, so we can just call the function and
+ // return.
+ f(io)
+ }
+ }
+
+ #[cfg(windows)]
+ impl IoSourceState {
+ pub fn register(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ _: RawSocket,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn reregister(
+ &mut self,
+ _: &Registry,
+ _: Token,
+ _: Interest,
+ ) -> io::Result<()> {
+ os_required!()
+ }
+
+ pub fn deregister(&mut self) -> io::Result<()> {
+ os_required!()
+ }
+ }
+}
--- /dev/null
+use std::io;
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::time::Duration;
+
+pub type Event = usize;
+
+pub type Events = Vec<Event>;
+
+#[derive(Debug)]
+pub struct Selector {}
+
+impl Selector {
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ os_required!();
+ }
+
+ pub fn select(&self, _: &mut Events, _: Option<Duration>) -> io::Result<()> {
+ os_required!();
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ os_required!();
+ }
+}
+
+#[cfg(unix)]
+cfg_any_os_ext! {
+ use crate::{Interest, Token};
+
+ impl Selector {
+ pub fn register(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn reregister(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
+ os_required!();
+ }
+
+ pub fn deregister(&self, _: RawFd) -> io::Result<()> {
+ os_required!();
+ }
+ }
+}
+
+cfg_io_source! {
+ #[cfg(debug_assertions)]
+ impl Selector {
+ pub fn id(&self) -> usize {
+ os_required!();
+ }
+ }
+}
+
+#[cfg(unix)]
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ os_required!()
+ }
+}
+
+#[allow(clippy::trivially_copy_pass_by_ref)]
+pub mod event {
+ use crate::sys::Event;
+ use crate::Token;
+ use std::fmt;
+
+ pub fn token(_: &Event) -> Token {
+ os_required!();
+ }
+
+ pub fn is_readable(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_writable(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_error(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_read_closed(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_write_closed(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_priority(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_aio(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn is_lio(_: &Event) -> bool {
+ os_required!();
+ }
+
+ pub fn debug_details(_: &mut fmt::Formatter<'_>, _: &Event) -> fmt::Result {
+ os_required!();
+ }
+}
--- /dev/null
+use std::io;
+use std::net::{self, SocketAddr};
+
+pub(crate) fn new_for_addr(_: SocketAddr) -> io::Result<i32> {
+ os_required!();
+}
+
+pub(crate) fn bind(_: &net::TcpListener, _: SocketAddr) -> io::Result<()> {
+ os_required!();
+}
+
+pub(crate) fn connect(_: &net::TcpStream, _: SocketAddr) -> io::Result<()> {
+ os_required!();
+}
+
+pub(crate) fn listen(_: &net::TcpListener, _: u32) -> io::Result<()> {
+ os_required!();
+}
+
+#[cfg(unix)]
+pub(crate) fn set_reuseaddr(_: &net::TcpListener, _: bool) -> io::Result<()> {
+ os_required!();
+}
+
+pub(crate) fn accept(_: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ os_required!();
+}
--- /dev/null
+use std::io;
+use std::net::{self, SocketAddr};
+
+pub fn bind(_: SocketAddr) -> io::Result<net::UdpSocket> {
+ os_required!()
+}
+
+pub(crate) fn only_v6(_: &net::UdpSocket) -> io::Result<bool> {
+ os_required!()
+}
--- /dev/null
+pub(crate) mod datagram {
+ use crate::net::SocketAddr;
+ use std::io;
+ use std::os::unix::net;
+ use std::path::Path;
+
+ pub(crate) fn bind(_: &Path) -> io::Result<net::UnixDatagram> {
+ os_required!()
+ }
+
+ pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
+ os_required!()
+ }
+
+ pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
+ os_required!()
+ }
+
+ pub(crate) fn local_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+
+ pub(crate) fn peer_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+
+ pub(crate) fn recv_from(
+ _: &net::UnixDatagram,
+ _: &mut [u8],
+ ) -> io::Result<(usize, SocketAddr)> {
+ os_required!()
+ }
+}
+
+pub(crate) mod listener {
+ use crate::net::{SocketAddr, UnixStream};
+ use std::io;
+ use std::os::unix::net;
+ use std::path::Path;
+
+ pub(crate) fn bind(_: &Path) -> io::Result<net::UnixListener> {
+ os_required!()
+ }
+
+ pub(crate) fn accept(_: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
+ os_required!()
+ }
+
+ pub(crate) fn local_addr(_: &net::UnixListener) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+}
+
+pub(crate) mod stream {
+ use crate::net::SocketAddr;
+ use std::io;
+ use std::os::unix::net;
+ use std::path::Path;
+
+ pub(crate) fn connect(_: &Path) -> io::Result<net::UnixStream> {
+ os_required!()
+ }
+
+ pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
+ os_required!()
+ }
+
+ pub(crate) fn local_addr(_: &net::UnixStream) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+
+ pub(crate) fn peer_addr(_: &net::UnixStream) -> io::Result<SocketAddr> {
+ os_required!()
+ }
+}
--- /dev/null
+use crate::sys::Selector;
+use crate::Token;
+use std::io;
+
+#[derive(Debug)]
+pub struct Waker {}
+
+impl Waker {
+ pub fn new(_: &Selector, _: Token) -> io::Result<Waker> {
+ os_required!();
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ os_required!();
+ }
+}
--- /dev/null
+/// Helper macro to execute a system call that returns an `io::Result`.
+//
+// Macro must be defined before any modules that uses them.
+#[allow(unused_macros)]
+macro_rules! syscall {
+ ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{
+ let res = unsafe { libc::$fn($($arg, )*) };
+ if res == -1 {
+ Err(std::io::Error::last_os_error())
+ } else {
+ Ok(res)
+ }
+ }};
+}
+
+cfg_os_poll! {
+ mod selector;
+ pub(crate) use self::selector::{event, Event, Events, Selector};
+
+ mod sourcefd;
+ pub use self::sourcefd::SourceFd;
+
+ mod waker;
+ pub(crate) use self::waker::Waker;
+
+ cfg_net! {
+ mod net;
+
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+ pub(crate) mod uds;
+ pub use self::uds::SocketAddr;
+ }
+
+ cfg_io_source! {
+ use std::io;
+
+ // Both `kqueue` and `epoll` don't need to hold any user space state.
+ pub(crate) struct IoSourceState;
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ // We don't hold state, so we can just call the function and
+ // return.
+ f(io)
+ }
+ }
+ }
+
+ cfg_os_ext! {
+ pub(crate) mod pipe;
+ }
+}
+
+cfg_not_os_poll! {
+ cfg_net! {
+ mod uds;
+ pub use self::uds::SocketAddr;
+ }
+
+ cfg_any_os_ext! {
+ mod sourcefd;
+ pub use self::sourcefd::SourceFd;
+ }
+}
--- /dev/null
+use std::io;
+use std::mem::size_of;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
+
+pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: libc::c_int) -> io::Result<libc::c_int> {
+ let domain = match addr {
+ SocketAddr::V4(..) => libc::AF_INET,
+ SocketAddr::V6(..) => libc::AF_INET6,
+ };
+
+ new_socket(domain, socket_type)
+}
+
+/// Create a new non-blocking socket.
+pub(crate) fn new_socket(domain: libc::c_int, socket_type: libc::c_int) -> io::Result<libc::c_int> {
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ let socket_type = socket_type | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+
+ // Gives a warning for platforms without SOCK_NONBLOCK.
+ #[allow(clippy::let_and_return)]
+ let socket = syscall!(socket(domain, socket_type, 0));
+
+ // Mimick `libstd` and set `SO_NOSIGPIPE` on apple systems.
+ #[cfg(target_vendor = "apple")]
+ let socket = socket.and_then(|socket| {
+ syscall!(setsockopt(
+ socket,
+ libc::SOL_SOCKET,
+ libc::SO_NOSIGPIPE,
+ &1 as *const libc::c_int as *const libc::c_void,
+ size_of::<libc::c_int>() as libc::socklen_t
+ ))
+ .map(|_| socket)
+ });
+
+ // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ let socket = socket.and_then(|socket| {
+ // For platforms that don't support flags in socket, we need to
+ // set the flags ourselves.
+ syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))
+ .and_then(|_| syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC)).map(|_| socket))
+ .map_err(|e| {
+ // If either of the `fcntl` calls failed, ensure the socket is
+ // closed and return the error.
+ let _ = syscall!(close(socket));
+ e
+ })
+ });
+
+ socket
+}
+
+/// A type with the same memory layout as `libc::sockaddr`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `libc::sockaddr_storage` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: libc::sockaddr_in,
+ v6: libc::sockaddr_in6,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const libc::sockaddr {
+ self as *const _ as *const libc::sockaddr
+ }
+}
+
+/// Converts a Rust `SocketAddr` into the system representation.
+pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, libc::socklen_t) {
+ match addr {
+ SocketAddr::V4(ref addr) => {
+ // `s_addr` is stored as BE on all machine and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ let sin_addr = libc::in_addr {
+ s_addr: u32::from_ne_bytes(addr.ip().octets()),
+ };
+
+ let sockaddr_in = libc::sockaddr_in {
+ sin_family: libc::AF_INET as libc::sa_family_t,
+ sin_port: addr.port().to_be(),
+ sin_addr,
+ sin_zero: [0; 8],
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ sin_len: 0,
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ let socklen = size_of::<libc::sockaddr_in>() as libc::socklen_t;
+ (sockaddr, socklen)
+ }
+ SocketAddr::V6(ref addr) => {
+ let sockaddr_in6 = libc::sockaddr_in6 {
+ sin6_family: libc::AF_INET6 as libc::sa_family_t,
+ sin6_port: addr.port().to_be(),
+ sin6_addr: libc::in6_addr {
+ s6_addr: addr.ip().octets(),
+ },
+ sin6_flowinfo: addr.flowinfo(),
+ sin6_scope_id: addr.scope_id(),
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ sin6_len: 0,
+ #[cfg(target_os = "illumos")]
+ __sin6_src_id: 0,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ let socklen = size_of::<libc::sockaddr_in6>() as libc::socklen_t;
+ (sockaddr, socklen)
+ }
+ }
+}
+
+/// Converts a `libc::sockaddr` compatible struct into a native Rust `SocketAddr`.
+///
+/// # Safety
+///
+/// `storage` must have the `ss_family` field correctly initialized.
+/// `storage` must be initialised to a `sockaddr_in` or `sockaddr_in6`.
+pub(crate) unsafe fn to_socket_addr(
+ storage: *const libc::sockaddr_storage,
+) -> io::Result<SocketAddr> {
+ match (*storage).ss_family as libc::c_int {
+ libc::AF_INET => {
+ // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in.
+ let addr: &libc::sockaddr_in = &*(storage as *const libc::sockaddr_in);
+ let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes());
+ let port = u16::from_be(addr.sin_port);
+ Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
+ }
+ libc::AF_INET6 => {
+ // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6.
+ let addr: &libc::sockaddr_in6 = &*(storage as *const libc::sockaddr_in6);
+ let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr);
+ let port = u16::from_be(addr.sin6_port);
+ Ok(SocketAddr::V6(SocketAddrV6::new(
+ ip,
+ port,
+ addr.sin6_flowinfo,
+ addr.sin6_scope_id,
+ )))
+ }
+ _ => Err(io::ErrorKind::InvalidInput.into()),
+ }
+}
--- /dev/null
+//! Unix pipe.
+//!
+//! See the [`new`] function for documentation.
+
+use std::fs::File;
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::process::{ChildStderr, ChildStdin, ChildStdout};
+
+use crate::io_source::IoSource;
+use crate::{event, Interest, Registry, Token};
+
+/// Create a new non-blocking Unix pipe.
+///
+/// This is a wrapper around Unix's [`pipe(2)`] system call and can be used as
+/// inter-process or thread communication channel.
+///
+/// This channel may be created before forking the process and then one end used
+/// in each process, e.g. the parent process has the sending end to send command
+/// to the child process.
+///
+/// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html
+///
+/// # Events
+///
+/// The [`Sender`] can be registered with [`WRITABLE`] interest to receive
+/// [writable events], the [`Receiver`] with [`READABLE`] interest. Once data is
+/// written to the `Sender` the `Receiver` will receive an [readable event].
+///
+/// In addition to those events, events will also be generated if the other side
+/// is dropped. To check if the `Sender` is dropped you'll need to check
+/// [`is_read_closed`] on events for the `Receiver`, if it returns true the
+/// `Sender` is dropped. On the `Sender` end check [`is_write_closed`], if it
+/// returns true the `Receiver` was dropped. Also see the second example below.
+///
+/// [`WRITABLE`]: Interest::WRITABLE
+/// [writable events]: event::Event::is_writable
+/// [`READABLE`]: Interest::READABLE
+/// [readable event]: event::Event::is_readable
+/// [`is_read_closed`]: event::Event::is_read_closed
+/// [`is_write_closed`]: event::Event::is_write_closed
+///
+/// # Deregistering
+///
+/// Both `Sender` and `Receiver` will deregister themselves when dropped,
+/// **iff** the file descriptors are not duplicated (via [`dup(2)`]).
+///
+/// [`dup(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/dup.html
+///
+/// # Examples
+///
+/// Simple example that writes data into the sending end and read it from the
+/// receiving end.
+///
+/// ```
+/// use std::io::{self, Read, Write};
+///
+/// use mio::{Poll, Events, Interest, Token};
+/// use mio::unix::pipe;
+///
+/// // Unique tokens for the two ends of the channel.
+/// const PIPE_RECV: Token = Token(0);
+/// const PIPE_SEND: Token = Token(1);
+///
+/// # fn main() -> io::Result<()> {
+/// // Create our `Poll` instance and the `Events` container.
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(8);
+///
+/// // Create a new pipe.
+/// let (mut sender, mut receiver) = pipe::new()?;
+///
+/// // Register both ends of the channel.
+/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
+/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
+///
+/// const MSG: &[u8; 11] = b"Hello world";
+///
+/// loop {
+/// poll.poll(&mut events, None)?;
+///
+/// for event in events.iter() {
+/// match event.token() {
+/// PIPE_SEND => sender.write(MSG)
+/// .and_then(|n| if n != MSG.len() {
+/// // We'll consider a short write an error in this
+/// // example. NOTE: we can't use `write_all` with
+/// // non-blocking I/O.
+/// Err(io::ErrorKind::WriteZero.into())
+/// } else {
+/// Ok(())
+/// })?,
+/// PIPE_RECV => {
+/// let mut buf = [0; 11];
+/// let n = receiver.read(&mut buf)?;
+/// println!("received: {:?}", &buf[0..n]);
+/// assert_eq!(n, MSG.len());
+/// assert_eq!(&buf, &*MSG);
+/// return Ok(());
+/// },
+/// _ => unreachable!(),
+/// }
+/// }
+/// }
+/// # }
+/// ```
+///
+/// Example that receives an event once the `Sender` is dropped.
+///
+/// ```
+/// # use std::io;
+/// #
+/// # use mio::{Poll, Events, Interest, Token};
+/// # use mio::unix::pipe;
+/// #
+/// # const PIPE_RECV: Token = Token(0);
+/// # const PIPE_SEND: Token = Token(1);
+/// #
+/// # fn main() -> io::Result<()> {
+/// // Same setup as in the example above.
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(8);
+///
+/// let (mut sender, mut receiver) = pipe::new()?;
+///
+/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
+/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
+///
+/// // Drop the sender.
+/// drop(sender);
+///
+/// poll.poll(&mut events, None)?;
+///
+/// for event in events.iter() {
+/// match event.token() {
+/// PIPE_RECV if event.is_read_closed() => {
+/// // Detected that the sender was dropped.
+/// println!("Sender dropped!");
+/// return Ok(());
+/// },
+/// _ => unreachable!(),
+/// }
+/// }
+/// # unreachable!();
+/// # }
+/// ```
+pub fn new() -> io::Result<(Sender, Receiver)> {
+ let mut fds: [RawFd; 2] = [-1, -1];
+
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "illumos",
+ ))]
+ unsafe {
+ if libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) != 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ unsafe {
+ // For platforms that don't have `pipe2(2)` we need to manually set the
+ // correct flags on the file descriptor.
+ if libc::pipe(fds.as_mut_ptr()) != 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ for fd in &fds {
+ if libc::fcntl(*fd, libc::F_SETFL, libc::O_NONBLOCK) != 0
+ || libc::fcntl(*fd, libc::F_SETFD, libc::FD_CLOEXEC) != 0
+ {
+ let err = io::Error::last_os_error();
+ // Don't leak file descriptors. Can't handle error though.
+ let _ = libc::close(fds[0]);
+ let _ = libc::close(fds[1]);
+ return Err(err);
+ }
+ }
+ }
+
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "illumos",
+ )))]
+ compile_error!("unsupported target for `mio::unix::pipe`");
+
+ // Safety: we just initialised the `fds` above.
+ let r = unsafe { Receiver::from_raw_fd(fds[0]) };
+ let w = unsafe { Sender::from_raw_fd(fds[1]) };
+ Ok((w, r))
+}
+
+/// Sending end of an Unix pipe.
+///
+/// See [`new`] for documentation, including examples.
+#[derive(Debug)]
+pub struct Sender {
+ inner: IoSource<File>,
+}
+
+impl Sender {
+ /// Set the `Sender` into or out of non-blocking mode.
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ set_nonblocking(self.inner.as_raw_fd(), nonblocking)
+ }
+}
+
+impl event::Source for Sender {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl Write for Sender {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|sender| (&*sender).flush())
+ }
+}
+
+impl Write for &Sender {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write(buf))
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).write_vectored(bufs))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.do_io(|sender| (&*sender).flush())
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStdin> for Sender {
+ fn from(stdin: ChildStdin) -> Sender {
+ // Safety: `ChildStdin` is guaranteed to be a valid file descriptor.
+ unsafe { Sender::from_raw_fd(stdin.into_raw_fd()) }
+ }
+}
+
+impl FromRawFd for Sender {
+ unsafe fn from_raw_fd(fd: RawFd) -> Sender {
+ Sender {
+ inner: IoSource::new(File::from_raw_fd(fd)),
+ }
+ }
+}
+
+impl AsRawFd for Sender {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for Sender {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+/// Receiving end of an Unix pipe.
+///
+/// See [`new`] for documentation, including examples.
+#[derive(Debug)]
+pub struct Receiver {
+ inner: IoSource<File>,
+}
+
+impl Receiver {
+ /// Set the `Receiver` into or out of non-blocking mode.
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ set_nonblocking(self.inner.as_raw_fd(), nonblocking)
+ }
+}
+
+impl event::Source for Receiver {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.register(registry, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(registry, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ self.inner.deregister(registry)
+ }
+}
+
+impl Read for Receiver {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read_vectored(bufs))
+ }
+}
+
+impl Read for &Receiver {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read(buf))
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.do_io(|sender| (&*sender).read_vectored(bufs))
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStdout> for Receiver {
+ fn from(stdout: ChildStdout) -> Receiver {
+ // Safety: `ChildStdout` is guaranteed to be a valid file descriptor.
+ unsafe { Receiver::from_raw_fd(stdout.into_raw_fd()) }
+ }
+}
+
+/// # Notes
+///
+/// The underlying pipe is **not** set to non-blocking.
+impl From<ChildStderr> for Receiver {
+ fn from(stderr: ChildStderr) -> Receiver {
+ // Safety: `ChildStderr` is guaranteed to be a valid file descriptor.
+ unsafe { Receiver::from_raw_fd(stderr.into_raw_fd()) }
+ }
+}
+
+impl FromRawFd for Receiver {
+ unsafe fn from_raw_fd(fd: RawFd) -> Receiver {
+ Receiver {
+ inner: IoSource::new(File::from_raw_fd(fd)),
+ }
+ }
+}
+
+impl AsRawFd for Receiver {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for Receiver {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_inner().into_raw_fd()
+ }
+}
+
+#[cfg(not(target_os = "illumos"))]
+fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
+ let value = nonblocking as libc::c_int;
+ if unsafe { libc::ioctl(fd, libc::FIONBIO, &value) } == -1 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+#[cfg(target_os = "illumos")]
+fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
+ let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) };
+ if flags < 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ let nflags = if nonblocking {
+ flags | libc::O_NONBLOCK
+ } else {
+ flags & !libc::O_NONBLOCK
+ };
+
+ if flags != nflags {
+ if unsafe { libc::fcntl(fd, libc::F_SETFL, nflags) } < 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+
+ Ok(())
+}
--- /dev/null
+use crate::{Interest, Token};
+
+use libc::{EPOLLET, EPOLLIN, EPOLLOUT, EPOLLRDHUP};
+use log::error;
+use std::os::unix::io::{AsRawFd, RawFd};
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::time::Duration;
+use std::{cmp, i32, io, ptr};
+
+/// Unique id for use as `SelectorId`.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ ep: RawFd,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ // According to libuv, `EPOLL_CLOEXEC` is not defined on Android API <
+ // 21. But `EPOLL_CLOEXEC` is an alias for `O_CLOEXEC` on that platform,
+ // so we use it instead.
+ #[cfg(target_os = "android")]
+ let flag = libc::O_CLOEXEC;
+ #[cfg(not(target_os = "android"))]
+ let flag = libc::EPOLL_CLOEXEC;
+
+ syscall!(epoll_create1(flag)).map(|ep| Selector {
+ #[cfg(debug_assertions)]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ ep,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(false),
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ syscall!(fcntl(self.ep, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|ep| Selector {
+ // It's the same selector, so we use the same id.
+ #[cfg(debug_assertions)]
+ id: self.id,
+ ep,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)),
+ })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ // A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ
+ // (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits
+ // architectures. The magic number is the same constant used by libuv.
+ #[cfg(target_pointer_width = "32")]
+ const MAX_SAFE_TIMEOUT: u128 = 1789569;
+ #[cfg(not(target_pointer_width = "32"))]
+ const MAX_SAFE_TIMEOUT: u128 = libc::c_int::max_value() as u128;
+
+ let timeout = timeout
+ .map(|to| cmp::min(to.as_millis(), MAX_SAFE_TIMEOUT) as libc::c_int)
+ .unwrap_or(-1);
+
+ events.clear();
+ syscall!(epoll_wait(
+ self.ep,
+ events.as_mut_ptr(),
+ events.capacity() as i32,
+ timeout,
+ ))
+ .map(|n_events| {
+ // This is safe because `epoll_wait` ensures that `n_events` are
+ // assigned.
+ unsafe { events.set_len(n_events as usize) };
+ })
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let mut event = libc::epoll_event {
+ events: interests_to_epoll(interests),
+ u64: usize::from(token) as u64,
+ };
+
+ syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_ADD, fd, &mut event)).map(|_| ())
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let mut event = libc::epoll_event {
+ events: interests_to_epoll(interests),
+ u64: usize::from(token) as u64,
+ };
+
+ syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_MOD, fd, &mut event)).map(|_| ())
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_DEL, fd, ptr::null_mut())).map(|_| ())
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ self.has_waker.swap(true, Ordering::AcqRel)
+ }
+}
+
+cfg_io_source! {
+ impl Selector {
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.ep
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ if let Err(err) = syscall!(close(self.ep)) {
+ error!("error closing epoll: {}", err);
+ }
+ }
+}
+
+fn interests_to_epoll(interests: Interest) -> u32 {
+ let mut kind = EPOLLET;
+
+ if interests.is_readable() {
+ kind = kind | EPOLLIN | EPOLLRDHUP;
+ }
+
+ if interests.is_writable() {
+ kind |= EPOLLOUT;
+ }
+
+ kind as u32
+}
+
+pub type Event = libc::epoll_event;
+pub type Events = Vec<Event>;
+
+pub mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ pub fn token(event: &Event) -> Token {
+ Token(event.u64 as usize)
+ }
+
+ pub fn is_readable(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLIN) != 0
+ || (event.events as libc::c_int & libc::EPOLLPRI) != 0
+ }
+
+ pub fn is_writable(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLOUT) != 0
+ }
+
+ pub fn is_error(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLERR) != 0
+ }
+
+ pub fn is_read_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ event.events as libc::c_int & libc::EPOLLHUP != 0
+ // Socket has received FIN or called shutdown(SHUT_RD)
+ || (event.events as libc::c_int & libc::EPOLLIN != 0
+ && event.events as libc::c_int & libc::EPOLLRDHUP != 0)
+ }
+
+ pub fn is_write_closed(event: &Event) -> bool {
+ // Both halves of the socket have closed
+ event.events as libc::c_int & libc::EPOLLHUP != 0
+ // Unix pipe write end has closed
+ || (event.events as libc::c_int & libc::EPOLLOUT != 0
+ && event.events as libc::c_int & libc::EPOLLERR != 0)
+ // The other side (read end) of a Unix pipe has closed.
+ || event.events as libc::c_int == libc::EPOLLERR
+ }
+
+ pub fn is_priority(event: &Event) -> bool {
+ (event.events as libc::c_int & libc::EPOLLPRI) != 0
+ }
+
+ pub fn is_aio(_: &Event) -> bool {
+ // Not supported in the kernel, only in libc.
+ false
+ }
+
+ pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+ }
+
+ pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_events(got: &u32, want: &libc::c_int) -> bool {
+ (*got as libc::c_int & want) != 0
+ }
+ debug_detail!(
+ EventsDetails(u32),
+ check_events,
+ libc::EPOLLIN,
+ libc::EPOLLPRI,
+ libc::EPOLLOUT,
+ libc::EPOLLRDNORM,
+ libc::EPOLLRDBAND,
+ libc::EPOLLWRNORM,
+ libc::EPOLLWRBAND,
+ libc::EPOLLMSG,
+ libc::EPOLLERR,
+ libc::EPOLLHUP,
+ libc::EPOLLET,
+ libc::EPOLLRDHUP,
+ libc::EPOLLONESHOT,
+ #[cfg(target_os = "linux")]
+ libc::EPOLLEXCLUSIVE,
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ libc::EPOLLWAKEUP,
+ libc::EPOLL_CLOEXEC,
+ );
+
+ // Can't reference fields in packed structures.
+ let e_u64 = event.u64;
+ f.debug_struct("epoll_event")
+ .field("events", &EventsDetails(event.events))
+ .field("u64", &e_u64)
+ .finish()
+ }
+}
+
+#[cfg(target_os = "android")]
+#[test]
+fn assert_close_on_exec_flag() {
+ // This assertion need to be true for Selector::new.
+ assert_eq!(libc::O_CLOEXEC, libc::EPOLL_CLOEXEC);
+}
--- /dev/null
+use crate::{Interest, Token};
+use log::error;
+use std::mem::MaybeUninit;
+use std::ops::{Deref, DerefMut};
+use std::os::unix::io::{AsRawFd, RawFd};
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::time::Duration;
+use std::{cmp, io, ptr, slice};
+
+/// Unique id for use as `SelectorId`.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
+
+// Type of the `nchanges` and `nevents` parameters in the `kevent` function.
+#[cfg(not(target_os = "netbsd"))]
+type Count = libc::c_int;
+#[cfg(target_os = "netbsd")]
+type Count = libc::size_t;
+
+// Type of the `filter` field in the `kevent` structure.
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
+type Filter = libc::c_short;
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+type Filter = i16;
+#[cfg(target_os = "netbsd")]
+type Filter = u32;
+
+// Type of the `flags` field in the `kevent` structure.
+#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
+type Flags = libc::c_ushort;
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+type Flags = u16;
+#[cfg(target_os = "netbsd")]
+type Flags = u32;
+
+// Type of the `data` field in the `kevent` structure.
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+))]
+type Data = libc::intptr_t;
+#[cfg(any(target_os = "netbsd", target_os = "openbsd"))]
+type Data = i64;
+
+// Type of the `udata` field in the `kevent` structure.
+#[cfg(not(target_os = "netbsd"))]
+type UData = *mut libc::c_void;
+#[cfg(target_os = "netbsd")]
+type UData = libc::intptr_t;
+
+macro_rules! kevent {
+ ($id: expr, $filter: expr, $flags: expr, $data: expr) => {
+ libc::kevent {
+ ident: $id as libc::uintptr_t,
+ filter: $filter as Filter,
+ flags: $flags,
+ fflags: 0,
+ data: 0,
+ udata: $data as UData,
+ }
+ };
+}
+
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ kq: RawFd,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ syscall!(kqueue())
+ .and_then(|kq| syscall!(fcntl(kq, libc::F_SETFD, libc::FD_CLOEXEC)).map(|_| kq))
+ .map(|kq| Selector {
+ #[cfg(debug_assertions)]
+ id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ kq,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(false),
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ syscall!(fcntl(self.kq, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|kq| Selector {
+ // It's the same selector, so we use the same id.
+ #[cfg(debug_assertions)]
+ id: self.id,
+ kq,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)),
+ })
+ }
+
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ let timeout = timeout.map(|to| libc::timespec {
+ tv_sec: cmp::min(to.as_secs(), libc::time_t::max_value() as u64) as libc::time_t,
+ // `Duration::subsec_nanos` is guaranteed to be less than one
+ // billion (the number of nanoseconds in a second), making the
+ // cast to i32 safe. The cast itself is needed for platforms
+ // where C's long is only 32 bits.
+ tv_nsec: libc::c_long::from(to.subsec_nanos() as i32),
+ });
+ let timeout = timeout
+ .as_ref()
+ .map(|s| s as *const _)
+ .unwrap_or(ptr::null_mut());
+
+ events.clear();
+ syscall!(kevent(
+ self.kq,
+ ptr::null(),
+ 0,
+ events.as_mut_ptr(),
+ events.capacity() as Count,
+ timeout,
+ ))
+ .map(|n_events| {
+ // This is safe because `kevent` ensures that `n_events` are
+ // assigned.
+ unsafe { events.set_len(n_events as usize) };
+ })
+ }
+
+ pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let flags = libc::EV_CLEAR | libc::EV_RECEIPT | libc::EV_ADD;
+ // At most we need two changes, but maybe we only need 1.
+ let mut changes: [MaybeUninit<libc::kevent>; 2] =
+ [MaybeUninit::uninit(), MaybeUninit::uninit()];
+ let mut n_changes = 0;
+
+ if interests.is_writable() {
+ let kevent = kevent!(fd, libc::EVFILT_WRITE, flags, token.0);
+ changes[n_changes] = MaybeUninit::new(kevent);
+ n_changes += 1;
+ }
+
+ if interests.is_readable() {
+ let kevent = kevent!(fd, libc::EVFILT_READ, flags, token.0);
+ changes[n_changes] = MaybeUninit::new(kevent);
+ n_changes += 1;
+ }
+
+ // Older versions of macOS (OS X 10.11 and 10.10 have been witnessed)
+ // can return EPIPE when registering a pipe file descriptor where the
+ // other end has already disappeared. For example code that creates a
+ // pipe, closes a file descriptor, and then registers the other end will
+ // see an EPIPE returned from `register`.
+ //
+ // It also turns out that kevent will still report events on the file
+ // descriptor, telling us that it's readable/hup at least after we've
+ // done this registration. As a result we just ignore `EPIPE` here
+ // instead of propagating it.
+ //
+ // More info can be found at tokio-rs/mio#582.
+ let changes = unsafe {
+ // This is safe because we ensure that at least `n_changes` are in
+ // the array.
+ slice::from_raw_parts_mut(changes[0].as_mut_ptr(), n_changes)
+ };
+ kevent_register(self.kq, changes, &[libc::EPIPE as Data])
+ }
+
+ pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
+ let flags = libc::EV_CLEAR | libc::EV_RECEIPT;
+ let write_flags = if interests.is_writable() {
+ flags | libc::EV_ADD
+ } else {
+ flags | libc::EV_DELETE
+ };
+ let read_flags = if interests.is_readable() {
+ flags | libc::EV_ADD
+ } else {
+ flags | libc::EV_DELETE
+ };
+
+ let mut changes: [libc::kevent; 2] = [
+ kevent!(fd, libc::EVFILT_WRITE, write_flags, token.0),
+ kevent!(fd, libc::EVFILT_READ, read_flags, token.0),
+ ];
+
+ // Since there is no way to check with which interests the fd was
+ // registered we modify both readable and write, adding it when required
+ // and removing it otherwise, ignoring the ENOENT error when it comes
+ // up. The ENOENT error informs us that a filter we're trying to remove
+ // wasn't there in first place, but we don't really care since our goal
+ // is accomplished.
+ //
+ // For the explanation of ignoring `EPIPE` see `register`.
+ kevent_register(
+ self.kq,
+ &mut changes,
+ &[libc::ENOENT as Data, libc::EPIPE as Data],
+ )
+ }
+
+ pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
+ let flags = libc::EV_DELETE | libc::EV_RECEIPT;
+ let mut changes: [libc::kevent; 2] = [
+ kevent!(fd, libc::EVFILT_WRITE, flags, 0),
+ kevent!(fd, libc::EVFILT_READ, flags, 0),
+ ];
+
+ // Since there is no way to check with which interests the fd was
+ // registered we remove both filters (readable and writeable) and ignore
+ // the ENOENT error when it comes up. The ENOENT error informs us that
+ // the filter wasn't there in first place, but we don't really care
+ // about that since our goal is to remove it.
+ kevent_register(self.kq, &mut changes, &[libc::ENOENT as Data])
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ self.has_waker.swap(true, Ordering::AcqRel)
+ }
+
+ // Used by `Waker`.
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn setup_waker(&self, token: Token) -> io::Result<()> {
+ // First attempt to accept user space notifications.
+ let mut kevent = kevent!(
+ 0,
+ libc::EVFILT_USER,
+ libc::EV_ADD | libc::EV_CLEAR | libc::EV_RECEIPT,
+ token.0
+ );
+
+ syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
+ if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
+ Err(io::Error::from_raw_os_error(kevent.data as i32))
+ } else {
+ Ok(())
+ }
+ })
+ }
+
+ // Used by `Waker`.
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ pub fn wake(&self, token: Token) -> io::Result<()> {
+ let mut kevent = kevent!(
+ 0,
+ libc::EVFILT_USER,
+ libc::EV_ADD | libc::EV_RECEIPT,
+ token.0
+ );
+ kevent.fflags = libc::NOTE_TRIGGER;
+
+ syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
+ if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
+ Err(io::Error::from_raw_os_error(kevent.data as i32))
+ } else {
+ Ok(())
+ }
+ })
+ }
+}
+
+/// Register `changes` with `kq`ueue.
+fn kevent_register(
+ kq: RawFd,
+ changes: &mut [libc::kevent],
+ ignored_errors: &[Data],
+) -> io::Result<()> {
+ syscall!(kevent(
+ kq,
+ changes.as_ptr(),
+ changes.len() as Count,
+ changes.as_mut_ptr(),
+ changes.len() as Count,
+ ptr::null(),
+ ))
+ .map(|_| ())
+ .or_else(|err| {
+ // According to the manual page of FreeBSD: "When kevent() call fails
+ // with EINTR error, all changes in the changelist have been applied",
+ // so we can safely ignore it.
+ if err.raw_os_error() == Some(libc::EINTR) {
+ Ok(())
+ } else {
+ Err(err)
+ }
+ })
+ .and_then(|()| check_errors(changes, ignored_errors))
+}
+
+/// Check all events for possible errors, it returns the first error found.
+fn check_errors(events: &[libc::kevent], ignored_errors: &[Data]) -> io::Result<()> {
+ for event in events {
+ // We can't use references to packed structures (in checking the ignored
+ // errors), so we need copy the data out before use.
+ let data = event.data;
+ // Check for the error flag, the actual error will be in the `data`
+ // field.
+ if (event.flags & libc::EV_ERROR != 0) && data != 0 && !ignored_errors.contains(&data) {
+ return Err(io::Error::from_raw_os_error(data as i32));
+ }
+ }
+ Ok(())
+}
+
+cfg_io_source! {
+ #[cfg(debug_assertions)]
+ impl Selector {
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+impl AsRawFd for Selector {
+ fn as_raw_fd(&self) -> RawFd {
+ self.kq
+ }
+}
+
+impl Drop for Selector {
+ fn drop(&mut self) {
+ if let Err(err) = syscall!(close(self.kq)) {
+ error!("error closing kqueue: {}", err);
+ }
+ }
+}
+
+pub type Event = libc::kevent;
+pub struct Events(Vec<libc::kevent>);
+
+impl Events {
+ pub fn with_capacity(capacity: usize) -> Events {
+ Events(Vec::with_capacity(capacity))
+ }
+}
+
+impl Deref for Events {
+ type Target = Vec<libc::kevent>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for Events {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+// `Events` cannot derive `Send` or `Sync` because of the
+// `udata: *mut ::c_void` field in `libc::kevent`. However, `Events`'s public
+// API treats the `udata` field as a `uintptr_t` which is `Send`. `Sync` is
+// safe because with a `events: &Events` value, the only access to the `udata`
+// field is through `fn token(event: &Event)` which cannot mutate the field.
+unsafe impl Send for Events {}
+unsafe impl Sync for Events {}
+
+pub mod event {
+ use std::fmt;
+
+ use crate::sys::Event;
+ use crate::Token;
+
+ use super::{Filter, Flags};
+
+ pub fn token(event: &Event) -> Token {
+ Token(event.udata as usize)
+ }
+
+ pub fn is_readable(event: &Event) -> bool {
+ event.filter == libc::EVFILT_READ || {
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ // Used by the `Awakener`. On platforms that use `eventfd` or a unix
+ // pipe it will emit a readable event so we'll fake that here as
+ // well.
+ {
+ event.filter == libc::EVFILT_USER
+ }
+ #[cfg(not(any(target_os = "freebsd", target_os = "ios", target_os = "macos")))]
+ {
+ false
+ }
+ }
+ }
+
+ pub fn is_writable(event: &Event) -> bool {
+ event.filter == libc::EVFILT_WRITE
+ }
+
+ pub fn is_error(event: &Event) -> bool {
+ (event.flags & libc::EV_ERROR) != 0 ||
+ // When the read end of the socket is closed, EV_EOF is set on
+ // flags, and fflags contains the error if there is one.
+ (event.flags & libc::EV_EOF) != 0 && event.fflags != 0
+ }
+
+ pub fn is_read_closed(event: &Event) -> bool {
+ event.filter == libc::EVFILT_READ && event.flags & libc::EV_EOF != 0
+ }
+
+ pub fn is_write_closed(event: &Event) -> bool {
+ event.filter == libc::EVFILT_WRITE && event.flags & libc::EV_EOF != 0
+ }
+
+ pub fn is_priority(_: &Event) -> bool {
+ // kqueue doesn't have priority indicators.
+ false
+ }
+
+ #[allow(unused_variables)] // `event` is not used on some platforms.
+ pub fn is_aio(event: &Event) -> bool {
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ {
+ event.filter == libc::EVFILT_AIO
+ }
+ #[cfg(not(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ )))]
+ {
+ false
+ }
+ }
+
+ #[allow(unused_variables)] // `event` is only used on FreeBSD.
+ pub fn is_lio(event: &Event) -> bool {
+ #[cfg(target_os = "freebsd")]
+ {
+ event.filter == libc::EVFILT_LIO
+ }
+ #[cfg(not(target_os = "freebsd"))]
+ {
+ false
+ }
+ }
+
+ pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ debug_detail!(
+ FilterDetails(Filter),
+ PartialEq::eq,
+ libc::EVFILT_READ,
+ libc::EVFILT_WRITE,
+ libc::EVFILT_AIO,
+ libc::EVFILT_VNODE,
+ libc::EVFILT_PROC,
+ libc::EVFILT_SIGNAL,
+ libc::EVFILT_TIMER,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_PROCDESC,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::EVFILT_FS,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_LIO,
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::EVFILT_USER,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_SENDFILE,
+ #[cfg(target_os = "freebsd")]
+ libc::EVFILT_EMPTY,
+ #[cfg(target_os = "dragonfly")]
+ libc::EVFILT_EXCEPT,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EVFILT_MACHPORT,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EVFILT_VM,
+ );
+
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flag(got: &Flags, want: &Flags) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FlagsDetails(Flags),
+ check_flag,
+ libc::EV_ADD,
+ libc::EV_DELETE,
+ libc::EV_ENABLE,
+ libc::EV_DISABLE,
+ libc::EV_ONESHOT,
+ libc::EV_CLEAR,
+ libc::EV_RECEIPT,
+ libc::EV_DISPATCH,
+ #[cfg(target_os = "freebsd")]
+ libc::EV_DROP,
+ libc::EV_FLAG1,
+ libc::EV_ERROR,
+ libc::EV_EOF,
+ libc::EV_SYSFLAGS,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EV_FLAG0,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EV_POLL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::EV_OOBAND,
+ #[cfg(target_os = "dragonfly")]
+ libc::EV_NODATA,
+ );
+
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_fflag(got: &u32, want: &u32) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FflagsDetails(u32),
+ check_fflag,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_TRIGGER,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFNOP,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFAND,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFOR,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFCOPY,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFCTRLMASK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ libc::NOTE_FFLAGSMASK,
+ libc::NOTE_LOWAT,
+ libc::NOTE_DELETE,
+ libc::NOTE_WRITE,
+ #[cfg(target_os = "dragonfly")]
+ libc::NOTE_OOB,
+ #[cfg(target_os = "openbsd")]
+ libc::NOTE_EOF,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXTEND,
+ libc::NOTE_ATTRIB,
+ libc::NOTE_LINK,
+ libc::NOTE_RENAME,
+ libc::NOTE_REVOKE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_NONE,
+ #[cfg(any(target_os = "openbsd"))]
+ libc::NOTE_TRUNCATE,
+ libc::NOTE_EXIT,
+ libc::NOTE_FORK,
+ libc::NOTE_EXEC,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_SIGNAL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXITSTATUS,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_DETAIL,
+ libc::NOTE_PDATAMASK,
+ libc::NOTE_PCTRLMASK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ libc::NOTE_TRACK,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ libc::NOTE_TRACKERR,
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ libc::NOTE_CHILD,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_DETAIL_MASK,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_DECRYPTFAIL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_MEMORY,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_EXIT_CSERROR,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_PRESSURE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_PRESSURE_TERMINATE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_PRESSURE_SUDDEN_TERMINATE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_VM_ERROR,
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_SECONDS,
+ #[cfg(any(target_os = "freebsd"))]
+ libc::NOTE_MSECONDS,
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_USECONDS,
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_NSECONDS,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+ libc::NOTE_ABSOLUTE,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_LEEWAY,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_CRITICAL,
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ libc::NOTE_BACKGROUND,
+ );
+
+ // Can't reference fields in packed structures.
+ let ident = event.ident;
+ let data = event.data;
+ let udata = event.udata;
+ f.debug_struct("kevent")
+ .field("ident", &ident)
+ .field("filter", &FilterDetails(event.filter))
+ .field("flags", &FlagsDetails(event.flags))
+ .field("fflags", &FflagsDetails(event.fflags))
+ .field("data", &data)
+ .field("udata", &udata)
+ .finish()
+ }
+}
+
+#[test]
+#[cfg(feature = "os-ext")]
+fn does_not_register_rw() {
+ use crate::unix::SourceFd;
+ use crate::{Poll, Token};
+
+ let kq = unsafe { libc::kqueue() };
+ let mut kqf = SourceFd(&kq);
+ let poll = Poll::new().unwrap();
+
+ // Registering kqueue fd will fail if write is requested (On anything but
+ // some versions of macOS).
+ poll.registry()
+ .register(&mut kqf, Token(1234), Interest::READABLE)
+ .unwrap();
+}
--- /dev/null
+#[cfg(any(target_os = "android", target_os = "illumos", target_os = "linux"))]
+mod epoll;
+
+#[cfg(any(target_os = "android", target_os = "illumos", target_os = "linux"))]
+pub(crate) use self::epoll::{event, Event, Events, Selector};
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+mod kqueue;
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+pub(crate) use self::kqueue::{event, Event, Events, Selector};
+
+/// Lowest file descriptor used in `Selector::try_clone`.
+///
+/// # Notes
+///
+/// Usually fds 0, 1 and 2 are standard in, out and error. Some application
+/// blindly assume this to be true, which means using any one of those a select
+/// could result in some interesting and unexpected errors. Avoid that by using
+/// an fd that doesn't have a pre-determined usage.
+const LOWEST_FD: libc::c_int = 3;
--- /dev/null
+use crate::{event, Interest, Registry, Token};
+
+use std::io;
+use std::os::unix::io::RawFd;
+
+/// Adapter for [`RawFd`] providing an [`event::Source`] implementation.
+///
+/// `SourceFd` enables registering any type with an FD with [`Poll`].
+///
+/// While only implementations for TCP and UDP are provided, Mio supports
+/// registering any FD that can be registered with the underlying OS selector.
+/// `SourceFd` provides the necessary bridge.
+///
+/// Note that `SourceFd` takes a `&RawFd`. This is because `SourceFd` **does
+/// not** take ownership of the FD. Specifically, it will not manage any
+/// lifecycle related operations, such as closing the FD on drop. It is expected
+/// that the `SourceFd` is constructed right before a call to
+/// [`Registry::register`]. See the examples for more detail.
+///
+/// [`event::Source`]: ../event/trait.Source.html
+/// [`Poll`]: ../struct.Poll.html
+/// [`Registry::register`]: ../struct.Registry.html#method.register
+///
+/// # Examples
+///
+/// Basic usage.
+///
+#[cfg_attr(
+ all(feature = "os-poll", feature = "net", feature = "os-ext"),
+ doc = "```"
+)]
+#[cfg_attr(
+ not(all(feature = "os-poll", feature = "net", feature = "os-ext")),
+ doc = "```ignore"
+)]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Interest, Poll, Token};
+/// use mio::unix::SourceFd;
+///
+/// use std::os::unix::io::AsRawFd;
+/// use std::net::TcpListener;
+///
+/// // Bind a std listener
+/// let listener = TcpListener::bind("127.0.0.1:0")?;
+///
+/// let poll = Poll::new()?;
+///
+/// // Register the listener
+/// poll.registry().register(
+/// &mut SourceFd(&listener.as_raw_fd()),
+/// Token(0),
+/// Interest::READABLE)?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Implementing [`event::Source`] for a custom type backed by a [`RawFd`].
+///
+#[cfg_attr(all(feature = "os-poll", feature = "os-ext"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "os-ext")), doc = "```ignore")]
+/// use mio::{event, Interest, Registry, Token};
+/// use mio::unix::SourceFd;
+///
+/// use std::os::unix::io::RawFd;
+/// use std::io;
+///
+/// # #[allow(dead_code)]
+/// pub struct MyIo {
+/// fd: RawFd,
+/// }
+///
+/// impl event::Source for MyIo {
+/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// SourceFd(&self.fd).register(registry, token, interests)
+/// }
+///
+/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest)
+/// -> io::Result<()>
+/// {
+/// SourceFd(&self.fd).reregister(registry, token, interests)
+/// }
+///
+/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+/// SourceFd(&self.fd).deregister(registry)
+/// }
+/// }
+/// ```
+#[derive(Debug)]
+pub struct SourceFd<'a>(pub &'a RawFd);
+
+impl<'a> event::Source for SourceFd<'a> {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ registry.selector().register(*self.0, token, interests)
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ registry.selector().reregister(*self.0, token, interests)
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ registry.selector().deregister(*self.0)
+ }
+}
--- /dev/null
+use std::convert::TryInto;
+use std::io;
+use std::mem::{size_of, MaybeUninit};
+use std::net::{self, SocketAddr};
+use std::os::unix::io::{AsRawFd, FromRawFd};
+
+use crate::sys::unix::net::{new_socket, socket_addr, to_socket_addr};
+
+pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<libc::c_int> {
+ let domain = match address {
+ SocketAddr::V4(_) => libc::AF_INET,
+ SocketAddr::V6(_) => libc::AF_INET6,
+ };
+ new_socket(domain, libc::SOCK_STREAM)
+}
+
+pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(bind(socket.as_raw_fd(), raw_addr.as_ptr(), raw_addr_length))?;
+ Ok(())
+}
+
+pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+
+ match syscall!(connect(
+ socket.as_raw_fd(),
+ raw_addr.as_ptr(),
+ raw_addr_length
+ )) {
+ Err(err) if err.raw_os_error() != Some(libc::EINPROGRESS) => Err(err),
+ _ => Ok(()),
+ }
+}
+
+pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
+ let backlog = backlog.try_into().unwrap_or(i32::max_value());
+ syscall!(listen(socket.as_raw_fd(), backlog))?;
+ Ok(())
+}
+
+pub(crate) fn set_reuseaddr(socket: &net::TcpListener, reuseaddr: bool) -> io::Result<()> {
+ let val: libc::c_int = if reuseaddr { 1 } else { 0 };
+ syscall!(setsockopt(
+ socket.as_raw_fd(),
+ libc::SOL_SOCKET,
+ libc::SO_REUSEADDR,
+ &val as *const libc::c_int as *const libc::c_void,
+ size_of::<libc::c_int>() as libc::socklen_t,
+ ))?;
+ Ok(())
+}
+
+pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ let mut addr: MaybeUninit<libc::sockaddr_storage> = MaybeUninit::uninit();
+ let mut length = size_of::<libc::sockaddr_storage>() as libc::socklen_t;
+
+ // On platforms that support it we can use `accept4(2)` to set `NONBLOCK`
+ // and `CLOEXEC` in the call to accept the connection.
+ #[cfg(any(
+ // Android x86's seccomp profile forbids calls to `accept4(2)`
+ // See https://github.com/tokio-rs/mio/issues/1445 for details
+ all(
+ not(target_arch="x86"),
+ target_os = "android"
+ ),
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ let stream = {
+ syscall!(accept4(
+ listener.as_raw_fd(),
+ addr.as_mut_ptr() as *mut _,
+ &mut length,
+ libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK,
+ ))
+ .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
+ }?;
+
+ // But not all platforms have the `accept4(2)` call. Luckily BSD (derived)
+ // OSes inherit the non-blocking flag from the listener, so we just have to
+ // set `CLOEXEC`.
+ #[cfg(any(
+ all(target_arch = "x86", target_os = "android"),
+ target_os = "ios",
+ target_os = "macos",
+ ))]
+ let stream = {
+ syscall!(accept(
+ listener.as_raw_fd(),
+ addr.as_mut_ptr() as *mut _,
+ &mut length
+ ))
+ .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
+ .and_then(|s| {
+ syscall!(fcntl(s.as_raw_fd(), libc::F_SETFD, libc::FD_CLOEXEC))?;
+
+ // See https://github.com/tokio-rs/mio/issues/1450
+ #[cfg(all(target_arch = "x86", target_os = "android"))]
+ syscall!(fcntl(s.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK))?;
+
+ Ok(s)
+ })
+ }?;
+
+ // This is safe because `accept` calls above ensures the address
+ // initialised.
+ unsafe { to_socket_addr(addr.as_ptr()) }.map(|addr| (stream, addr))
+}
--- /dev/null
+use crate::sys::unix::net::{new_ip_socket, socket_addr};
+
+use std::io;
+use std::mem;
+use std::net::{self, SocketAddr};
+use std::os::unix::io::{AsRawFd, FromRawFd};
+
+pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
+ // Gives a warning for non Apple platforms.
+ #[allow(clippy::let_and_return)]
+ let socket = new_ip_socket(addr, libc::SOCK_DGRAM);
+
+ socket.and_then(|socket| {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(bind(socket, raw_addr.as_ptr(), raw_addr_length))
+ .map_err(|err| {
+ // Close the socket if we hit an error, ignoring the error
+ // from closing since we can't pass back two errors.
+ let _ = unsafe { libc::close(socket) };
+ err
+ })
+ .map(|_| unsafe { net::UdpSocket::from_raw_fd(socket) })
+ })
+}
+
+pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
+ let mut optval: libc::c_int = 0;
+ let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t;
+
+ syscall!(getsockopt(
+ socket.as_raw_fd(),
+ libc::IPPROTO_IPV6,
+ libc::IPV6_V6ONLY,
+ &mut optval as *mut _ as *mut _,
+ &mut optlen,
+ ))?;
+
+ Ok(optval != 0)
+}
--- /dev/null
+use super::{socket_addr, SocketAddr};
+use crate::sys::unix::net::new_socket;
+
+use std::io;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::net;
+use std::path::Path;
+
+pub(crate) fn bind(path: &Path) -> io::Result<net::UnixDatagram> {
+ let fd = new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)?;
+ // Ensure the fd is closed.
+ let socket = unsafe { net::UnixDatagram::from_raw_fd(fd) };
+ let (sockaddr, socklen) = socket_addr(path)?;
+ let sockaddr = &sockaddr as *const libc::sockaddr_un as *const _;
+ syscall!(bind(fd, sockaddr, socklen))?;
+ Ok(socket)
+}
+
+pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
+ new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)
+ .map(|socket| unsafe { net::UnixDatagram::from_raw_fd(socket) })
+}
+
+pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
+ super::pair(libc::SOCK_DGRAM)
+}
+
+pub(crate) fn local_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ super::local_addr(socket.as_raw_fd())
+}
+
+pub(crate) fn peer_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> {
+ super::peer_addr(socket.as_raw_fd())
+}
+
+pub(crate) fn recv_from(
+ socket: &net::UnixDatagram,
+ dst: &mut [u8],
+) -> io::Result<(usize, SocketAddr)> {
+ let mut count = 0;
+ let socketaddr = SocketAddr::new(|sockaddr, socklen| {
+ syscall!(recvfrom(
+ socket.as_raw_fd(),
+ dst.as_mut_ptr() as *mut _,
+ dst.len(),
+ 0,
+ sockaddr,
+ socklen,
+ ))
+ .map(|c| {
+ count = c;
+ c as libc::c_int
+ })
+ })?;
+ Ok((count as usize, socketaddr))
+}
--- /dev/null
+use super::socket_addr;
+use crate::net::{SocketAddr, UnixStream};
+use crate::sys::unix::net::new_socket;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::net;
+use std::path::Path;
+use std::{io, mem};
+
+pub(crate) fn bind(path: &Path) -> io::Result<net::UnixListener> {
+ let socket = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let (sockaddr, socklen) = socket_addr(path)?;
+ let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr;
+
+ syscall!(bind(socket, sockaddr, socklen))
+ .and_then(|_| syscall!(listen(socket, 1024)))
+ .map_err(|err| {
+ // Close the socket if we hit an error, ignoring the error from
+ // closing since we can't pass back two errors.
+ let _ = unsafe { libc::close(socket) };
+ err
+ })
+ .map(|_| unsafe { net::UnixListener::from_raw_fd(socket) })
+}
+
+pub(crate) fn accept(listener: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
+ let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
+
+ // This is safe to assume because a `libc::sockaddr_un` filled with `0`
+ // bytes is properly initialized.
+ //
+ // `0` is a valid value for `sockaddr_un::sun_family`; it is
+ // `libc::AF_UNSPEC`.
+ //
+ // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
+ // abstract path.
+ let mut sockaddr = unsafe { sockaddr.assume_init() };
+
+ sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+ let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t;
+
+ #[cfg(not(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ // Android x86's seccomp profile forbids calls to `accept4(2)`
+ // See https://github.com/tokio-rs/mio/issues/1445 for details
+ all(
+ target_arch = "x86",
+ target_os = "android"
+ )
+ )))]
+ let socket = {
+ let flags = libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+ syscall!(accept4(
+ listener.as_raw_fd(),
+ &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
+ &mut socklen,
+ flags
+ ))
+ .map(|socket| unsafe { net::UnixStream::from_raw_fd(socket) })
+ };
+
+ #[cfg(any(
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ all(target_arch = "x86", target_os = "android")
+ ))]
+ let socket = syscall!(accept(
+ listener.as_raw_fd(),
+ &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
+ &mut socklen,
+ ))
+ .and_then(|socket| {
+ // Ensure the socket is closed if either of the `fcntl` calls
+ // error below.
+ let s = unsafe { net::UnixStream::from_raw_fd(socket) };
+ syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC))?;
+
+ // See https://github.com/tokio-rs/mio/issues/1450
+ #[cfg(all(target_arch = "x86", target_os = "android"))]
+ syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))?;
+
+ Ok(s)
+ });
+
+ socket
+ .map(UnixStream::from_std)
+ .map(|stream| (stream, SocketAddr::from_parts(sockaddr, socklen)))
+}
+
+pub(crate) fn local_addr(listener: &net::UnixListener) -> io::Result<SocketAddr> {
+ super::local_addr(listener.as_raw_fd())
+}
--- /dev/null
+mod socketaddr;
+pub use self::socketaddr::SocketAddr;
+
+/// Get the `sun_path` field offset of `sockaddr_un` for the target OS.
+///
+/// On Linux, this funtion equates to the same value as
+/// `size_of::<sa_family_t>()`, but some other implementations include
+/// other fields before `sun_path`, so the expression more portably
+/// describes the size of the address structure.
+pub(in crate::sys) fn path_offset(sockaddr: &libc::sockaddr_un) -> usize {
+ let base = sockaddr as *const _ as usize;
+ let path = &sockaddr.sun_path as *const _ as usize;
+ path - base
+}
+
+cfg_os_poll! {
+ use std::cmp::Ordering;
+ use std::os::unix::ffi::OsStrExt;
+ use std::os::unix::io::{RawFd, FromRawFd};
+ use std::path::Path;
+ use std::{io, mem};
+
+ pub(crate) mod datagram;
+ pub(crate) mod listener;
+ pub(crate) mod stream;
+
+ pub(in crate::sys) fn socket_addr(path: &Path) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> {
+ let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
+
+ // This is safe to assume because a `libc::sockaddr_un` filled with `0`
+ // bytes is properly initialized.
+ //
+ // `0` is a valid value for `sockaddr_un::sun_family`; it is
+ // `libc::AF_UNSPEC`.
+ //
+ // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
+ // abstract path.
+ let mut sockaddr = unsafe { sockaddr.assume_init() };
+
+ sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+ let bytes = path.as_os_str().as_bytes();
+ match (bytes.get(0), bytes.len().cmp(&sockaddr.sun_path.len())) {
+ // Abstract paths don't need a null terminator
+ (Some(&0), Ordering::Greater) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "path must be no longer than libc::sockaddr_un.sun_path",
+ ));
+ }
+ (_, Ordering::Greater) | (_, Ordering::Equal) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "path must be shorter than libc::sockaddr_un.sun_path",
+ ));
+ }
+ _ => {}
+ }
+
+ for (dst, src) in sockaddr.sun_path.iter_mut().zip(bytes.iter()) {
+ *dst = *src as libc::c_char;
+ }
+
+ let offset = path_offset(&sockaddr);
+ let mut socklen = offset + bytes.len();
+
+ match bytes.get(0) {
+ // The struct has already been zeroes so the null byte for pathname
+ // addresses is already there.
+ Some(&0) | None => {}
+ Some(_) => socklen += 1,
+ }
+
+ Ok((sockaddr, socklen as libc::socklen_t))
+ }
+
+ fn pair<T>(flags: libc::c_int) -> io::Result<(T, T)>
+ where T: FromRawFd,
+ {
+ #[cfg(not(any(target_os = "ios", target_os = "macos")))]
+ let flags = flags | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
+
+ let mut fds = [-1; 2];
+ syscall!(socketpair(libc::AF_UNIX, flags, 0, fds.as_mut_ptr()))?;
+ let pair = unsafe { (T::from_raw_fd(fds[0]), T::from_raw_fd(fds[1])) };
+
+ // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
+ //
+ // In order to set those flags, additional `fcntl` sys calls must be
+ // performed. If a `fnctl` fails after the sockets have been created,
+ // the file descriptors will leak. Creating `pair` above ensures that if
+ // there is an error, the file descriptors are closed.
+ #[cfg(any(target_os = "ios", target_os = "macos"))]
+ {
+ syscall!(fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK))?;
+ syscall!(fcntl(fds[0], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ syscall!(fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK))?;
+ syscall!(fcntl(fds[1], libc::F_SETFD, libc::FD_CLOEXEC))?;
+ }
+ Ok(pair)
+ }
+
+ // The following functions can't simply be replaced with a call to
+ // `net::UnixDatagram` because of our `SocketAddr` type.
+
+ fn local_addr(socket: RawFd) -> io::Result<SocketAddr> {
+ SocketAddr::new(|sockaddr, socklen| syscall!(getsockname(socket, sockaddr, socklen)))
+ }
+
+ fn peer_addr(socket: RawFd) -> io::Result<SocketAddr> {
+ SocketAddr::new(|sockaddr, socklen| syscall!(getpeername(socket, sockaddr, socklen)))
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use super::{path_offset, socket_addr};
+ use std::path::Path;
+ use std::str;
+
+ #[test]
+ fn pathname_address() {
+ const PATH: &str = "./foo/bar.txt";
+ const PATH_LEN: usize = 13;
+
+ // Pathname addresses do have a null terminator, so `socklen` is
+ // expected to be `PATH_LEN` + `offset` + 1.
+ let path = Path::new(PATH);
+ let (sockaddr, actual) = socket_addr(path).unwrap();
+ let offset = path_offset(&sockaddr);
+ let expected = PATH_LEN + offset + 1;
+ assert_eq!(expected as libc::socklen_t, actual)
+ }
+
+ #[test]
+ fn abstract_address() {
+ const PATH: &[u8] = &[0, 116, 111, 107, 105, 111];
+ const PATH_LEN: usize = 6;
+
+ // Abstract addresses do not have a null terminator, so `socklen` is
+ // expected to be `PATH_LEN` + `offset`.
+ let abstract_path = str::from_utf8(PATH).unwrap();
+ let path = Path::new(abstract_path);
+ let (sockaddr, actual) = socket_addr(path).unwrap();
+ let offset = path_offset(&sockaddr);
+ let expected = PATH_LEN + offset;
+ assert_eq!(expected as libc::socklen_t, actual)
+ }
+ }
+}
--- /dev/null
+use super::path_offset;
+use std::ffi::OsStr;
+use std::os::unix::ffi::OsStrExt;
+use std::path::Path;
+use std::{ascii, fmt};
+
+/// An address associated with a `mio` specific Unix socket.
+///
+/// This is implemented instead of imported from [`net::SocketAddr`] because
+/// there is no way to create a [`net::SocketAddr`]. One must be returned by
+/// [`accept`], so this is returned instead.
+///
+/// [`net::SocketAddr`]: std::os::unix::net::SocketAddr
+/// [`accept`]: #method.accept
+pub struct SocketAddr {
+ sockaddr: libc::sockaddr_un,
+ socklen: libc::socklen_t,
+}
+
+struct AsciiEscaped<'a>(&'a [u8]);
+
+enum AddressKind<'a> {
+ Unnamed,
+ Pathname(&'a Path),
+ Abstract(&'a [u8]),
+}
+
+impl SocketAddr {
+ fn address(&self) -> AddressKind<'_> {
+ let offset = path_offset(&self.sockaddr);
+ // Don't underflow in `len` below.
+ if (self.socklen as usize) < offset {
+ return AddressKind::Unnamed;
+ }
+ let len = self.socklen as usize - offset;
+ let path = unsafe { &*(&self.sockaddr.sun_path as *const [libc::c_char] as *const [u8]) };
+
+ // macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses
+ if len == 0
+ || (cfg!(not(any(target_os = "linux", target_os = "android")))
+ && self.sockaddr.sun_path[0] == 0)
+ {
+ AddressKind::Unnamed
+ } else if self.sockaddr.sun_path[0] == 0 {
+ AddressKind::Abstract(&path[1..len])
+ } else {
+ AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref())
+ }
+ }
+}
+
+cfg_os_poll! {
+ use std::{io, mem};
+
+ impl SocketAddr {
+ pub(crate) fn new<F>(f: F) -> io::Result<SocketAddr>
+ where
+ F: FnOnce(*mut libc::sockaddr, &mut libc::socklen_t) -> io::Result<libc::c_int>,
+ {
+ let mut sockaddr = {
+ let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
+ unsafe { sockaddr.assume_init() }
+ };
+
+ let raw_sockaddr = &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr;
+ let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t;
+
+ f(raw_sockaddr, &mut socklen)?;
+ Ok(SocketAddr::from_parts(sockaddr, socklen))
+ }
+
+ pub(crate) fn from_parts(sockaddr: libc::sockaddr_un, socklen: libc::socklen_t) -> SocketAddr {
+ SocketAddr { sockaddr, socklen }
+ }
+
+ /// Returns `true` if the address is unnamed.
+ ///
+ /// Documentation reflected in [`SocketAddr`]
+ ///
+ /// [`SocketAddr`]: std::os::unix::net::SocketAddr
+ pub fn is_unnamed(&self) -> bool {
+ matches!(self.address(), AddressKind::Unnamed)
+ }
+
+ /// Returns the contents of this address if it is a `pathname` address.
+ ///
+ /// Documentation reflected in [`SocketAddr`]
+ ///
+ /// [`SocketAddr`]: std::os::unix::net::SocketAddr
+ pub fn as_pathname(&self) -> Option<&Path> {
+ if let AddressKind::Pathname(path) = self.address() {
+ Some(path)
+ } else {
+ None
+ }
+ }
+
+ /// Returns the contents of this address if it is an abstract namespace
+ /// without the leading null byte.
+ // Link to std::os::unix::net::SocketAddr pending
+ // https://github.com/rust-lang/rust/issues/85410.
+ pub fn as_abstract_namespace(&self) -> Option<&[u8]> {
+ if let AddressKind::Abstract(path) = self.address() {
+ Some(path)
+ } else {
+ None
+ }
+ }
+ }
+}
+
+impl fmt::Debug for SocketAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.address() {
+ AddressKind::Unnamed => write!(fmt, "(unnamed)"),
+ AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)),
+ AddressKind::Pathname(path) => write!(fmt, "{:?} (pathname)", path),
+ }
+ }
+}
+
+impl<'a> fmt::Display for AsciiEscaped<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "\"")?;
+ for byte in self.0.iter().cloned().flat_map(ascii::escape_default) {
+ write!(fmt, "{}", byte as char)?;
+ }
+ write!(fmt, "\"")
+ }
+}
--- /dev/null
+use super::{socket_addr, SocketAddr};
+use crate::sys::unix::net::new_socket;
+
+use std::io;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::net;
+use std::path::Path;
+
+pub(crate) fn connect(path: &Path) -> io::Result<net::UnixStream> {
+ let socket = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let (sockaddr, socklen) = socket_addr(path)?;
+ let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr;
+
+ match syscall!(connect(socket, sockaddr, socklen)) {
+ Ok(_) => {}
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
+ Err(e) => {
+ // Close the socket if we hit an error, ignoring the error
+ // from closing since we can't pass back two errors.
+ let _ = unsafe { libc::close(socket) };
+
+ return Err(e);
+ }
+ }
+
+ Ok(unsafe { net::UnixStream::from_raw_fd(socket) })
+}
+
+pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
+ super::pair(libc::SOCK_STREAM)
+}
+
+pub(crate) fn local_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> {
+ super::local_addr(socket.as_raw_fd())
+}
+
+pub(crate) fn peer_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> {
+ super::peer_addr(socket.as_raw_fd())
+}
--- /dev/null
+#[cfg(any(target_os = "linux", target_os = "android"))]
+mod eventfd {
+ use crate::sys::Selector;
+ use crate::{Interest, Token};
+
+ use std::fs::File;
+ use std::io::{self, Read, Write};
+ use std::os::unix::io::FromRawFd;
+
+ /// Waker backed by `eventfd`.
+ ///
+ /// `eventfd` is effectively an 64 bit counter. All writes must be of 8
+ /// bytes (64 bits) and are converted (native endian) into an 64 bit
+ /// unsigned integer and added to the count. Reads must also be 8 bytes and
+ /// reset the count to 0, returning the count.
+ #[derive(Debug)]
+ pub struct Waker {
+ fd: File,
+ }
+
+ impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ syscall!(eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK)).and_then(|fd| {
+ // Turn the file descriptor into a file first so we're ensured
+ // it's closed when dropped, e.g. when register below fails.
+ let file = unsafe { File::from_raw_fd(fd) };
+ selector
+ .register(fd, token, Interest::READABLE)
+ .map(|()| Waker { fd: file })
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ let buf: [u8; 8] = 1u64.to_ne_bytes();
+ match (&self.fd).write(&buf) {
+ Ok(_) => Ok(()),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
+ // Writing only blocks if the counter is going to overflow.
+ // So we'll reset the counter to 0 and wake it again.
+ self.reset()?;
+ self.wake()
+ }
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Reset the eventfd object, only need to call this if `wake` fails.
+ fn reset(&self) -> io::Result<()> {
+ let mut buf: [u8; 8] = 0u64.to_ne_bytes();
+ match (&self.fd).read(&mut buf) {
+ Ok(_) => Ok(()),
+ // If the `Waker` hasn't been awoken yet this will return a
+ // `WouldBlock` error which we can safely ignore.
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()),
+ Err(err) => Err(err),
+ }
+ }
+ }
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub use self::eventfd::Waker;
+
+#[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+mod kqueue {
+ use crate::sys::Selector;
+ use crate::Token;
+
+ use std::io;
+
+ /// Waker backed by kqueue user space notifications (`EVFILT_USER`).
+ ///
+ /// The implementation is fairly simple, first the kqueue must be setup to
+ /// receive waker events this done by calling `Selector.setup_waker`. Next
+ /// we need access to kqueue, thus we need to duplicate the file descriptor.
+ /// Now waking is as simple as adding an event to the kqueue.
+ #[derive(Debug)]
+ pub struct Waker {
+ selector: Selector,
+ token: Token,
+ }
+
+ impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ selector.try_clone().and_then(|selector| {
+ selector
+ .setup_waker(token)
+ .map(|()| Waker { selector, token })
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ self.selector.wake(self.token)
+ }
+ }
+}
+
+#[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))]
+pub use self::kqueue::Waker;
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "illumos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+))]
+mod pipe {
+ use crate::sys::unix::Selector;
+ use crate::{Interest, Token};
+
+ use std::fs::File;
+ use std::io::{self, Read, Write};
+ use std::os::unix::io::FromRawFd;
+
+ /// Waker backed by a unix pipe.
+ ///
+ /// Waker controls both the sending and receiving ends and empties the pipe
+ /// if writing to it (waking) fails.
+ #[derive(Debug)]
+ pub struct Waker {
+ sender: File,
+ receiver: File,
+ }
+
+ impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ let mut fds = [-1; 2];
+ syscall!(pipe2(fds.as_mut_ptr(), libc::O_NONBLOCK | libc::O_CLOEXEC))?;
+ // Turn the file descriptors into files first so we're ensured
+ // they're closed when dropped, e.g. when register below fails.
+ let sender = unsafe { File::from_raw_fd(fds[1]) };
+ let receiver = unsafe { File::from_raw_fd(fds[0]) };
+ selector
+ .register(fds[0], token, Interest::READABLE)
+ .map(|()| Waker { sender, receiver })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ // The epoll emulation on some illumos systems currently requires
+ // the pipe buffer to be completely empty for an edge-triggered
+ // wakeup on the pipe read side.
+ #[cfg(target_os = "illumos")]
+ self.empty();
+
+ match (&self.sender).write(&[1]) {
+ Ok(_) => Ok(()),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
+ // The reading end is full so we'll empty the buffer and try
+ // again.
+ self.empty();
+ self.wake()
+ }
+ Err(ref err) if err.kind() == io::ErrorKind::Interrupted => self.wake(),
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Empty the pipe's buffer, only need to call this if `wake` fails.
+ /// This ignores any errors.
+ fn empty(&self) {
+ let mut buf = [0; 4096];
+ loop {
+ match (&self.receiver).read(&mut buf) {
+ Ok(n) if n > 0 => continue,
+ _ => return,
+ }
+ }
+ }
+ }
+}
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "illumos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+))]
+pub use self::pipe::Waker;
--- /dev/null
+use ntapi::ntioapi::{IO_STATUS_BLOCK_u, IO_STATUS_BLOCK};
+use ntapi::ntioapi::{NtCancelIoFileEx, NtDeviceIoControlFile};
+use ntapi::ntrtl::RtlNtStatusToDosError;
+use std::fmt;
+use std::fs::File;
+use std::io;
+use std::mem::size_of;
+use std::os::windows::io::AsRawHandle;
+use std::ptr::null_mut;
+use winapi::shared::ntdef::{HANDLE, LARGE_INTEGER, NTSTATUS, PVOID, ULONG};
+use winapi::shared::ntstatus::{STATUS_NOT_FOUND, STATUS_PENDING, STATUS_SUCCESS};
+
+const IOCTL_AFD_POLL: ULONG = 0x00012024;
+
+/// Winsock2 AFD driver instance.
+///
+/// All operations are unsafe due to IO_STATUS_BLOCK parameter are being used by Afd driver during STATUS_PENDING before I/O Completion Port returns its result.
+#[derive(Debug)]
+pub struct Afd {
+ fd: File,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct AfdPollHandleInfo {
+ pub handle: HANDLE,
+ pub events: ULONG,
+ pub status: NTSTATUS,
+}
+
+unsafe impl Send for AfdPollHandleInfo {}
+
+#[repr(C)]
+pub struct AfdPollInfo {
+ pub timeout: LARGE_INTEGER,
+ // Can have only value 1.
+ pub number_of_handles: ULONG,
+ pub exclusive: ULONG,
+ pub handles: [AfdPollHandleInfo; 1],
+}
+
+impl fmt::Debug for AfdPollInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AfdPollInfo").finish()
+ }
+}
+
+impl Afd {
+ /// Poll `Afd` instance with `AfdPollInfo`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// `iosb` needs to be untouched after the call while operation is in effective at ALL TIME except for `cancel` method.
+ /// So be careful not to `poll` twice while polling.
+ /// User should deallocate there overlapped value when error to prevent memory leak.
+ pub unsafe fn poll(
+ &self,
+ info: &mut AfdPollInfo,
+ iosb: *mut IO_STATUS_BLOCK,
+ overlapped: PVOID,
+ ) -> io::Result<bool> {
+ let info_ptr: PVOID = info as *mut _ as PVOID;
+ (*iosb).u.Status = STATUS_PENDING;
+ let status = NtDeviceIoControlFile(
+ self.fd.as_raw_handle(),
+ null_mut(),
+ None,
+ overlapped,
+ iosb,
+ IOCTL_AFD_POLL,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ info_ptr,
+ size_of::<AfdPollInfo>() as u32,
+ );
+ match status {
+ STATUS_SUCCESS => Ok(true),
+ STATUS_PENDING => Ok(false),
+ _ => Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ )),
+ }
+ }
+
+ /// Cancel previous polled request of `Afd`.
+ ///
+ /// iosb needs to be used by `poll` first for valid `cancel`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
+ /// Use it only with request is still being polled so that you have valid `IO_STATUS_BLOCK` to use.
+ /// User should NOT deallocate there overlapped value after the `cancel` to prevent double free.
+ pub unsafe fn cancel(&self, iosb: *mut IO_STATUS_BLOCK) -> io::Result<()> {
+ if (*iosb).u.Status != STATUS_PENDING {
+ return Ok(());
+ }
+
+ let mut cancel_iosb = IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ };
+ let status = NtCancelIoFileEx(self.fd.as_raw_handle(), iosb, &mut cancel_iosb);
+ if status == STATUS_SUCCESS || status == STATUS_NOT_FOUND {
+ return Ok(());
+ }
+ Err(io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ ))
+ }
+}
+
+cfg_io_source! {
+ use std::mem::zeroed;
+ use std::os::windows::io::{FromRawHandle, RawHandle};
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ use miow::iocp::CompletionPort;
+ use ntapi::ntioapi::{NtCreateFile, FILE_OPEN};
+ use winapi::shared::ntdef::{OBJECT_ATTRIBUTES, UNICODE_STRING, USHORT, WCHAR};
+ use winapi::um::handleapi::INVALID_HANDLE_VALUE;
+ use winapi::um::winbase::{SetFileCompletionNotificationModes, FILE_SKIP_SET_EVENT_ON_HANDLE};
+ use winapi::um::winnt::{SYNCHRONIZE, FILE_SHARE_READ, FILE_SHARE_WRITE};
+
+ const AFD_HELPER_ATTRIBUTES: OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES {
+ Length: size_of::<OBJECT_ATTRIBUTES>() as ULONG,
+ RootDirectory: null_mut(),
+ ObjectName: &AFD_OBJ_NAME as *const _ as *mut _,
+ Attributes: 0,
+ SecurityDescriptor: null_mut(),
+ SecurityQualityOfService: null_mut(),
+ };
+
+ const AFD_OBJ_NAME: UNICODE_STRING = UNICODE_STRING {
+ Length: (AFD_HELPER_NAME.len() * size_of::<WCHAR>()) as USHORT,
+ MaximumLength: (AFD_HELPER_NAME.len() * size_of::<WCHAR>()) as USHORT,
+ Buffer: AFD_HELPER_NAME.as_ptr() as *mut _,
+ };
+
+ const AFD_HELPER_NAME: &[WCHAR] = &[
+ '\\' as _,
+ 'D' as _,
+ 'e' as _,
+ 'v' as _,
+ 'i' as _,
+ 'c' as _,
+ 'e' as _,
+ '\\' as _,
+ 'A' as _,
+ 'f' as _,
+ 'd' as _,
+ '\\' as _,
+ 'M' as _,
+ 'i' as _,
+ 'o' as _
+ ];
+
+ static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(0);
+
+ impl AfdPollInfo {
+ pub fn zeroed() -> AfdPollInfo {
+ unsafe { zeroed() }
+ }
+ }
+
+ impl Afd {
+ /// Create new Afd instance.
+ pub fn new(cp: &CompletionPort) -> io::Result<Afd> {
+ let mut afd_helper_handle: HANDLE = INVALID_HANDLE_VALUE;
+ let mut iosb = IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ };
+
+ unsafe {
+ let status = NtCreateFile(
+ &mut afd_helper_handle as *mut _,
+ SYNCHRONIZE,
+ &AFD_HELPER_ATTRIBUTES as *const _ as *mut _,
+ &mut iosb,
+ null_mut(),
+ 0,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ FILE_OPEN,
+ 0,
+ null_mut(),
+ 0,
+ );
+ if status != STATUS_SUCCESS {
+ let raw_err = io::Error::from_raw_os_error(
+ RtlNtStatusToDosError(status) as i32
+ );
+ let msg = format!("Failed to open \\Device\\Afd\\Mio: {}", raw_err);
+ return Err(io::Error::new(raw_err.kind(), msg));
+ }
+ let fd = File::from_raw_handle(afd_helper_handle as RawHandle);
+ // Increment by 2 to reserve space for other types of handles.
+ // Non-AFD types (currently only NamedPipe), use odd numbered
+ // tokens. This allows the selector to differentate between them
+ // and dispatch events accordingly.
+ let token = NEXT_TOKEN.fetch_add(2, Ordering::Relaxed) + 2;
+ let afd = Afd { fd };
+ cp.add_handle(token, &afd.fd)?;
+ match SetFileCompletionNotificationModes(
+ afd_helper_handle,
+ FILE_SKIP_SET_EVENT_ON_HANDLE,
+ ) {
+ 0 => Err(io::Error::last_os_error()),
+ _ => Ok(afd),
+ }
+ }
+ }
+ }
+}
+
+pub const POLL_RECEIVE: u32 = 0b0_0000_0001;
+pub const POLL_RECEIVE_EXPEDITED: u32 = 0b0_0000_0010;
+pub const POLL_SEND: u32 = 0b0_0000_0100;
+pub const POLL_DISCONNECT: u32 = 0b0_0000_1000;
+pub const POLL_ABORT: u32 = 0b0_0001_0000;
+pub const POLL_LOCAL_CLOSE: u32 = 0b0_0010_0000;
+// Not used as it indicated in each event where a connection is connected, not
+// just the first time a connection is established.
+// Also see https://github.com/piscisaureus/wepoll/commit/8b7b340610f88af3d83f40fb728e7b850b090ece.
+pub const POLL_CONNECT: u32 = 0b0_0100_0000;
+pub const POLL_ACCEPT: u32 = 0b0_1000_0000;
+pub const POLL_CONNECT_FAIL: u32 = 0b1_0000_0000;
+
+pub const KNOWN_EVENTS: u32 = POLL_RECEIVE
+ | POLL_RECEIVE_EXPEDITED
+ | POLL_SEND
+ | POLL_DISCONNECT
+ | POLL_ABORT
+ | POLL_LOCAL_CLOSE
+ | POLL_ACCEPT
+ | POLL_CONNECT_FAIL;
--- /dev/null
+use std::fmt;
+
+use miow::iocp::CompletionStatus;
+
+use super::afd;
+use crate::Token;
+
+#[derive(Clone)]
+pub struct Event {
+ pub flags: u32,
+ pub data: u64,
+}
+
+pub fn token(event: &Event) -> Token {
+ Token(event.data as usize)
+}
+
+impl Event {
+ pub(super) fn new(token: Token) -> Event {
+ Event {
+ flags: 0,
+ data: usize::from(token) as u64,
+ }
+ }
+
+ pub(super) fn set_readable(&mut self) {
+ self.flags |= afd::POLL_RECEIVE
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn set_writable(&mut self) {
+ self.flags |= afd::POLL_SEND;
+ }
+
+ pub(super) fn from_completion_status(status: &CompletionStatus) -> Event {
+ Event {
+ flags: status.bytes_transferred(),
+ data: status.token() as u64,
+ }
+ }
+
+ pub(super) fn to_completion_status(&self) -> CompletionStatus {
+ CompletionStatus::new(self.flags, self.data as usize, std::ptr::null_mut())
+ }
+}
+
+pub(crate) const READABLE_FLAGS: u32 = afd::POLL_RECEIVE
+ | afd::POLL_DISCONNECT
+ | afd::POLL_ACCEPT
+ | afd::POLL_ABORT
+ | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITABLE_FLAGS: u32 = afd::POLL_SEND | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const ERROR_FLAGS: u32 = afd::POLL_CONNECT_FAIL;
+pub(crate) const READ_CLOSED_FLAGS: u32 =
+ afd::POLL_DISCONNECT | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+pub(crate) const WRITE_CLOSED_FLAGS: u32 = afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
+
+pub fn is_readable(event: &Event) -> bool {
+ event.flags & READABLE_FLAGS != 0
+}
+
+pub fn is_writable(event: &Event) -> bool {
+ event.flags & WRITABLE_FLAGS != 0
+}
+
+pub fn is_error(event: &Event) -> bool {
+ event.flags & ERROR_FLAGS != 0
+}
+
+pub fn is_read_closed(event: &Event) -> bool {
+ event.flags & READ_CLOSED_FLAGS != 0
+}
+
+pub fn is_write_closed(event: &Event) -> bool {
+ event.flags & WRITE_CLOSED_FLAGS != 0
+}
+
+pub fn is_priority(event: &Event) -> bool {
+ event.flags & afd::POLL_RECEIVE_EXPEDITED != 0
+}
+
+pub fn is_aio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn is_lio(_: &Event) -> bool {
+ // Not supported.
+ false
+}
+
+pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ fn check_flags(got: &u32, want: &u32) -> bool {
+ (got & want) != 0
+ }
+ debug_detail!(
+ FlagsDetails(u32),
+ check_flags,
+ afd::POLL_RECEIVE,
+ afd::POLL_RECEIVE_EXPEDITED,
+ afd::POLL_SEND,
+ afd::POLL_DISCONNECT,
+ afd::POLL_ABORT,
+ afd::POLL_LOCAL_CLOSE,
+ afd::POLL_CONNECT,
+ afd::POLL_ACCEPT,
+ afd::POLL_CONNECT_FAIL,
+ );
+
+ f.debug_struct("event")
+ .field("flags", &FlagsDetails(event.flags))
+ .field("data", &event.data)
+ .finish()
+}
+
+pub struct Events {
+ /// Raw I/O event completions are filled in here by the call to `get_many`
+ /// on the completion port above. These are then processed to run callbacks
+ /// which figure out what to do after the event is done.
+ pub statuses: Box<[CompletionStatus]>,
+
+ /// Literal events returned by `get` to the upwards `EventLoop`. This file
+ /// doesn't really modify this (except for the waker), instead almost all
+ /// events are filled in by the `ReadinessQueue` from the `poll` module.
+ pub events: Vec<Event>,
+}
+
+impl Events {
+ pub fn with_capacity(cap: usize) -> Events {
+ // Note that it's possible for the output `events` to grow beyond the
+ // capacity as it can also include deferred events, but that's certainly
+ // not the end of the world!
+ Events {
+ statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(),
+ events: Vec::with_capacity(cap),
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.events.is_empty()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.events.capacity()
+ }
+
+ pub fn len(&self) -> usize {
+ self.events.len()
+ }
+
+ pub fn get(&self, idx: usize) -> Option<&Event> {
+ self.events.get(idx)
+ }
+
+ pub fn clear(&mut self) {
+ self.events.clear();
+ for status in self.statuses.iter_mut() {
+ *status = CompletionStatus::zero();
+ }
+ }
+}
--- /dev/null
+use std::fmt;
+use std::ops::{Deref, DerefMut};
+
+use ntapi::ntioapi::IO_STATUS_BLOCK;
+
+pub struct IoStatusBlock(IO_STATUS_BLOCK);
+
+cfg_io_source! {
+ use ntapi::ntioapi::IO_STATUS_BLOCK_u;
+
+ impl IoStatusBlock {
+ pub fn zeroed() -> Self {
+ Self(IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_u { Status: 0 },
+ Information: 0,
+ })
+ }
+ }
+}
+
+unsafe impl Send for IoStatusBlock {}
+
+impl Deref for IoStatusBlock {
+ type Target = IO_STATUS_BLOCK;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for IoStatusBlock {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl fmt::Debug for IoStatusBlock {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IoStatusBlock").finish()
+ }
+}
--- /dev/null
+mod afd;
+mod io_status_block;
+
+pub mod event;
+pub use event::{Event, Events};
+
+mod selector;
+pub use selector::{Selector, SelectorInner, SockState};
+
+mod overlapped;
+use overlapped::Overlapped;
+
+// Macros must be defined before the modules that use them
+cfg_net! {
+ /// Helper macro to execute a system call that returns an `io::Result`.
+ //
+ // Macro must be defined before any modules that uses them.
+ macro_rules! syscall {
+ ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{
+ let res = unsafe { $fn($($arg, )*) };
+ if $err_test(&res, &$err_value) {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(res)
+ }
+ }};
+ }
+
+ mod net;
+
+ pub(crate) mod tcp;
+ pub(crate) mod udp;
+}
+
+cfg_os_ext! {
+ pub(crate) mod named_pipe;
+}
+
+mod waker;
+pub(crate) use waker::Waker;
+
+cfg_io_source! {
+ use std::io;
+ use std::os::windows::io::RawSocket;
+ use std::pin::Pin;
+ use std::sync::{Arc, Mutex};
+
+ use crate::{Interest, Registry, Token};
+
+ struct InternalState {
+ selector: Arc<SelectorInner>,
+ token: Token,
+ interests: Interest,
+ sock_state: Pin<Arc<Mutex<SockState>>>,
+ }
+
+ impl Drop for InternalState {
+ fn drop(&mut self) {
+ let mut sock_state = self.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ }
+
+ pub struct IoSourceState {
+ // This is `None` if the socket has not yet been registered.
+ //
+ // We box the internal state to not increase the size on the stack as the
+ // type might move around a lot.
+ inner: Option<Box<InternalState>>,
+ }
+
+ impl IoSourceState {
+ pub fn new() -> IoSourceState {
+ IoSourceState { inner: None }
+ }
+
+ pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
+ where
+ F: FnOnce(&T) -> io::Result<R>,
+ {
+ let result = f(io);
+ if let Err(ref e) = result {
+ if e.kind() == io::ErrorKind::WouldBlock {
+ self.inner.as_ref().map_or(Ok(()), |state| {
+ state
+ .selector
+ .reregister(state.sock_state.clone(), state.token, state.interests)
+ })?;
+ }
+ }
+ result
+ }
+
+ pub fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ socket: RawSocket,
+ ) -> io::Result<()> {
+ if self.inner.is_some() {
+ Err(io::ErrorKind::AlreadyExists.into())
+ } else {
+ registry
+ .selector()
+ .register(socket, token, interests)
+ .map(|state| {
+ self.inner = Some(Box::new(state));
+ })
+ }
+ }
+
+ pub fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ registry
+ .selector()
+ .reregister(state.sock_state.clone(), token, interests)
+ .map(|()| {
+ state.token = token;
+ state.interests = interests;
+ })
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+
+ pub fn deregister(&mut self) -> io::Result<()> {
+ match self.inner.as_mut() {
+ Some(state) => {
+ {
+ let mut sock_state = state.sock_state.lock().unwrap();
+ sock_state.mark_delete();
+ }
+ self.inner = None;
+ Ok(())
+ }
+ None => Err(io::ErrorKind::NotFound.into()),
+ }
+ }
+ }
+}
--- /dev/null
+use std::ffi::OsStr;
+use std::io::{self, Read, Write};
+use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle};
+use std::sync::atomic::Ordering::{Relaxed, SeqCst};
+use std::sync::atomic::{AtomicBool, AtomicUsize};
+use std::sync::{Arc, Mutex};
+use std::{fmt, mem, slice};
+
+use miow::iocp::{CompletionPort, CompletionStatus};
+use miow::pipe;
+use winapi::shared::winerror::{ERROR_BROKEN_PIPE, ERROR_PIPE_LISTENING};
+use winapi::um::ioapiset::CancelIoEx;
+use winapi::um::minwinbase::{OVERLAPPED, OVERLAPPED_ENTRY};
+
+use crate::event::Source;
+use crate::sys::windows::{Event, Overlapped};
+use crate::Registry;
+use crate::{Interest, Token};
+
+/// Non-blocking windows named pipe.
+///
+/// This structure internally contains a `HANDLE` which represents the named
+/// pipe, and also maintains state associated with the mio event loop and active
+/// I/O operations that have been scheduled to translate IOCP to a readiness
+/// model.
+///
+/// Note, IOCP is a *completion* based model whereas mio is a *readiness* based
+/// model. To bridge this, `NamedPipe` performs internal buffering. Writes are
+/// written to an internal buffer and the buffer is submitted to IOCP. IOCP
+/// reads are submitted using internal buffers and `NamedPipe::read` reads from
+/// this internal buffer.
+///
+/// # Trait implementations
+///
+/// The `Read` and `Write` traits are implemented for `NamedPipe` and for
+/// `&NamedPipe`. This represents that a named pipe can be concurrently read and
+/// written to and also can be read and written to at all. Typically a named
+/// pipe needs to be connected to a client before it can be read or written,
+/// however.
+///
+/// Note that for I/O operations on a named pipe to succeed then the named pipe
+/// needs to be associated with an event loop. Until this happens all I/O
+/// operations will return a "would block" error.
+///
+/// # Managing connections
+///
+/// The `NamedPipe` type supports a `connect` method to connect to a client and
+/// a `disconnect` method to disconnect from that client. These two methods only
+/// work once a named pipe is associated with an event loop.
+///
+/// The `connect` method will succeed asynchronously and a completion can be
+/// detected once the object receives a writable notification.
+///
+/// # Named pipe clients
+///
+/// Currently to create a client of a named pipe server then you can use the
+/// `OpenOptions` type in the standard library to create a `File` that connects
+/// to a named pipe. Afterwards you can use the `into_raw_handle` method coupled
+/// with the `NamedPipe::from_raw_handle` method to convert that to a named pipe
+/// that can operate asynchronously. Don't forget to pass the
+/// `FILE_FLAG_OVERLAPPED` flag when opening the `File`.
+pub struct NamedPipe {
+ inner: Arc<Inner>,
+}
+
+/// # Notes
+///
+/// The memory layout of this structure must be fixed as the
+/// `ptr_from_*_overlapped` methods depend on it, see the `ptr_from` test.
+#[repr(C)]
+struct Inner {
+ // NOTE: careful modifying the order of these three fields, the `ptr_from_*`
+ // methods depend on the layout!
+ connect: Overlapped,
+ read: Overlapped,
+ write: Overlapped,
+ // END NOTE.
+ handle: pipe::NamedPipe,
+ connecting: AtomicBool,
+ io: Mutex<Io>,
+ pool: Mutex<BufferPool>,
+}
+
+impl Inner {
+ /// Converts a pointer to `Inner.connect` to a pointer to `Inner`.
+ ///
+ /// # Unsafety
+ ///
+ /// Caller must ensure `ptr` is pointing to `Inner.connect`.
+ unsafe fn ptr_from_conn_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `connect` is the first field, so the pointer are the same.
+ ptr.cast()
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.read`.
+ unsafe fn ptr_from_read_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `read` is after `connect: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(1) as *const Inner
+ }
+
+ /// Same as [`ptr_from_conn_overlapped`] but for `Inner.write`.
+ unsafe fn ptr_from_write_overlapped(ptr: *mut OVERLAPPED) -> *const Inner {
+ // `read` is after `connect: Overlapped` and `read: Overlapped`.
+ (ptr as *mut Overlapped).wrapping_sub(2) as *const Inner
+ }
+}
+
+#[test]
+fn ptr_from() {
+ use std::mem::ManuallyDrop;
+ use std::ptr;
+
+ let pipe = unsafe { ManuallyDrop::new(NamedPipe::from_raw_handle(ptr::null_mut())) };
+ let inner: &Inner = &pipe.inner;
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_conn_overlapped(&inner.connect as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_conn_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_read_overlapped(&inner.read as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_read_overlapped` incorrect"
+ );
+ assert_eq!(
+ inner as *const Inner,
+ unsafe { Inner::ptr_from_write_overlapped(&inner.write as *const _ as *mut OVERLAPPED) },
+ "`ptr_from_write_overlapped` incorrect"
+ );
+}
+
+struct Io {
+ // Uniquely identifies the selector associated with this named pipe
+ cp: Option<Arc<CompletionPort>>,
+ // Token used to identify events
+ token: Option<Token>,
+ read: State,
+ write: State,
+ connect_error: Option<io::Error>,
+}
+
+#[derive(Debug)]
+enum State {
+ None,
+ Pending(Vec<u8>, usize),
+ Ok(Vec<u8>, usize),
+ Err(io::Error),
+}
+
+// Odd tokens are for named pipes
+static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(1);
+
+fn would_block() -> io::Error {
+ io::ErrorKind::WouldBlock.into()
+}
+
+impl NamedPipe {
+ /// Creates a new named pipe at the specified `addr` given a "reasonable
+ /// set" of initial configuration options.
+ pub fn new<A: AsRef<OsStr>>(addr: A) -> io::Result<NamedPipe> {
+ let pipe = pipe::NamedPipe::new(addr)?;
+ // Safety: nothing actually unsafe about this. The trait fn includes
+ // `unsafe`.
+ Ok(unsafe { NamedPipe::from_raw_handle(pipe.into_raw_handle()) })
+ }
+
+ /// Attempts to call `ConnectNamedPipe`, if possible.
+ ///
+ /// This function will attempt to connect this pipe to a client in an
+ /// asynchronous fashion. If the function immediately establishes a
+ /// connection to a client then `Ok(())` is returned. Otherwise if a
+ /// connection attempt was issued and is now in progress then a "would
+ /// block" error is returned.
+ ///
+ /// When the connection is finished then this object will be flagged as
+ /// being ready for a write, or otherwise in the writable state.
+ ///
+ /// # Errors
+ ///
+ /// This function will return a "would block" error if the pipe has not yet
+ /// been registered with an event loop, if the connection operation has
+ /// previously been issued but has not yet completed, or if the connect
+ /// itself was issued and didn't finish immediately.
+ ///
+ /// Normal I/O errors from the call to `ConnectNamedPipe` are returned
+ /// immediately.
+ pub fn connect(&self) -> io::Result<()> {
+ // "Acquire the connecting lock" or otherwise just make sure we're the
+ // only operation that's using the `connect` overlapped instance.
+ if self.inner.connecting.swap(true, SeqCst) {
+ return Err(would_block());
+ }
+
+ // Now that we've flagged ourselves in the connecting state, issue the
+ // connection attempt. Afterwards interpret the return value and set
+ // internal state accordingly.
+ let res = unsafe {
+ let overlapped = self.inner.connect.as_ptr() as *mut _;
+ self.inner.handle.connect_overlapped(overlapped)
+ };
+
+ match res {
+ // The connection operation finished immediately, so let's schedule
+ // reads/writes and such.
+ Ok(true) => {
+ self.inner.connecting.store(false, SeqCst);
+ Inner::post_register(&self.inner, None);
+ Ok(())
+ }
+
+ // If the overlapped operation was successful and didn't finish
+ // immediately then we forget a copy of the arc we hold
+ // internally. This ensures that when the completion status comes
+ // in for the I/O operation finishing it'll have a reference
+ // associated with it and our data will still be valid. The
+ // `connect_done` function will "reify" this forgotten pointer to
+ // drop the refcount on the other side.
+ Ok(false) => {
+ mem::forget(self.inner.clone());
+ Err(would_block())
+ }
+
+ Err(e) => {
+ self.inner.connecting.store(false, SeqCst);
+ Err(e)
+ }
+ }
+ }
+
+ /// Takes any internal error that has happened after the last I/O operation
+ /// which hasn't been retrieved yet.
+ ///
+ /// This is particularly useful when detecting failed attempts to `connect`.
+ /// After a completed `connect` flags this pipe as writable then callers
+ /// must invoke this method to determine whether the connection actually
+ /// succeeded. If this function returns `None` then a client is connected,
+ /// otherwise it returns an error of what happened and a client shouldn't be
+ /// connected.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ Ok(self.inner.io.lock().unwrap().connect_error.take())
+ }
+
+ /// Disconnects this named pipe from a connected client.
+ ///
+ /// This function will disconnect the pipe from a connected client, if any,
+ /// transitively calling the `DisconnectNamedPipe` function.
+ ///
+ /// After a `disconnect` is issued, then a `connect` may be called again to
+ /// connect to another client.
+ pub fn disconnect(&self) -> io::Result<()> {
+ self.inner.handle.disconnect()
+ }
+}
+
+impl FromRawHandle for NamedPipe {
+ unsafe fn from_raw_handle(handle: RawHandle) -> NamedPipe {
+ NamedPipe {
+ inner: Arc::new(Inner {
+ // Safety: not really unsafe
+ handle: pipe::NamedPipe::from_raw_handle(handle),
+ // transmutes to straddle winapi versions (mio 0.6 is on an
+ // older winapi)
+ connect: Overlapped::new(connect_done),
+ connecting: AtomicBool::new(false),
+ read: Overlapped::new(read_done),
+ write: Overlapped::new(write_done),
+ io: Mutex::new(Io {
+ cp: None,
+ token: None,
+ read: State::None,
+ write: State::None,
+ connect_error: None,
+ }),
+ pool: Mutex::new(BufferPool::with_capacity(2)),
+ }),
+ }
+ }
+}
+
+impl Read for NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ <&NamedPipe as Read>::read(&mut &*self, buf)
+ }
+}
+
+impl Write for NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ <&NamedPipe as Write>::write(&mut &*self, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ <&NamedPipe as Write>::flush(&mut &*self)
+ }
+}
+
+impl<'a> Read for &'a NamedPipe {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut state = self.inner.io.lock().unwrap();
+
+ if state.token.is_none() {
+ return Err(would_block());
+ }
+
+ match mem::replace(&mut state.read, State::None) {
+ // In theory not possible with `token` checked above,
+ // but return would block for now.
+ State::None => Err(would_block()),
+
+ // A read is in flight, still waiting for it to finish
+ State::Pending(buf, amt) => {
+ state.read = State::Pending(buf, amt);
+ Err(would_block())
+ }
+
+ // We previously read something into `data`, try to copy out some
+ // data. If we copy out all the data schedule a new read and
+ // otherwise store the buffer to get read later.
+ State::Ok(data, cur) => {
+ let n = {
+ let mut remaining = &data[cur..];
+ remaining.read(buf)?
+ };
+ let next = cur + n;
+ if next != data.len() {
+ state.read = State::Ok(data, next);
+ } else {
+ self.inner.put_buffer(data);
+ Inner::schedule_read(&self.inner, &mut state, None);
+ }
+ Ok(n)
+ }
+
+ // Looks like an in-flight read hit an error, return that here while
+ // we schedule a new one.
+ State::Err(e) => {
+ Inner::schedule_read(&self.inner, &mut state, None);
+ if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) {
+ Ok(0)
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Write for &'a NamedPipe {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // Make sure there's no writes pending
+ let mut io = self.inner.io.lock().unwrap();
+
+ if io.token.is_none() {
+ return Err(would_block());
+ }
+
+ match io.write {
+ State::None => {}
+ State::Err(_) => match mem::replace(&mut io.write, State::None) {
+ State::Err(e) => return Err(e),
+ // `io` is locked, so this branch is unreachable
+ _ => unreachable!(),
+ },
+ // any other state should be handled in `write_done`
+ _ => {
+ return Err(would_block());
+ }
+ }
+
+ // Move `buf` onto the heap and fire off the write
+ let mut owned_buf = self.inner.get_buffer();
+ owned_buf.extend(buf);
+ match Inner::maybe_schedule_write(&self.inner, owned_buf, 0, &mut io)? {
+ // Some bytes are written immediately
+ Some(n) => Ok(n),
+ // Write operation is anqueued for whole buffer
+ None => Ok(buf.len()),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Source for NamedPipe {
+ fn register(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, false)?;
+
+ if io.token.is_some() {
+ return Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a `Registry`",
+ ));
+ }
+
+ if io.cp.is_none() {
+ let selector = registry.selector();
+
+ io.cp = Some(selector.clone_port());
+
+ let inner_token = NEXT_TOKEN.fetch_add(2, Relaxed) + 2;
+ selector
+ .inner
+ .cp
+ .add_handle(inner_token, &self.inner.handle)?;
+ }
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn reregister(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ io.token = Some(token);
+ drop(io);
+
+ Inner::post_register(&self.inner, None);
+
+ Ok(())
+ }
+
+ fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
+ let mut io = self.inner.io.lock().unwrap();
+
+ io.check_association(registry, true)?;
+
+ if io.token.is_none() {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ ));
+ }
+
+ io.token = None;
+ Ok(())
+ }
+}
+
+impl AsRawHandle for NamedPipe {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.inner.handle.as_raw_handle()
+ }
+}
+
+impl fmt::Debug for NamedPipe {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.handle.fmt(f)
+ }
+}
+
+impl Drop for NamedPipe {
+ fn drop(&mut self) {
+ // Cancel pending reads/connects, but don't cancel writes to ensure that
+ // everything is flushed out.
+ unsafe {
+ if self.inner.connecting.load(SeqCst) {
+ drop(cancel(&self.inner.handle, &self.inner.connect));
+ }
+
+ let io = self.inner.io.lock().unwrap();
+ if let State::Pending(..) = io.read {
+ drop(cancel(&self.inner.handle, &self.inner.read));
+ }
+ }
+ }
+}
+
+impl Inner {
+ /// Schedules a read to happen in the background, executing an overlapped
+ /// operation.
+ ///
+ /// This function returns `true` if a normal error happens or if the read
+ /// is scheduled in the background. If the pipe is no longer connected
+ /// (ERROR_PIPE_LISTENING) then `false` is returned and no read is
+ /// scheduled.
+ fn schedule_read(me: &Arc<Inner>, io: &mut Io, events: Option<&mut Vec<Event>>) -> bool {
+ // Check to see if a read is already scheduled/completed
+ match io.read {
+ State::None => {}
+ _ => return true,
+ }
+
+ // Allocate a buffer and schedule the read.
+ let mut buf = me.get_buffer();
+ let e = unsafe {
+ let overlapped = me.read.as_ptr() as *mut _;
+ let slice = slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.capacity());
+ me.handle.read_overlapped(slice, overlapped)
+ };
+
+ match e {
+ // See `NamedPipe::connect` above for the rationale behind `forget`
+ Ok(_) => {
+ io.read = State::Pending(buf, 0); // 0 is ignored on read side
+ mem::forget(me.clone());
+ true
+ }
+
+ // If ERROR_PIPE_LISTENING happens then it's not a real read error,
+ // we just need to wait for a connect.
+ Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_LISTENING as i32) => false,
+
+ // If some other error happened, though, we're now readable to give
+ // out the error.
+ Err(e) => {
+ io.read = State::Err(e);
+ io.notify_readable(events);
+ true
+ }
+ }
+ }
+
+ /// Maybe schedules overlapped write operation.
+ ///
+ /// * `None` means that overlapped operation was enqueued
+ /// * `Some(n)` means that `n` bytes was immediately written.
+ /// Note, that `write_done` will fire anyway to clean up the state.
+ fn maybe_schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ ) -> io::Result<Option<usize>> {
+ // Very similar to `schedule_read` above, just done for the write half.
+ let e = unsafe {
+ let overlapped = me.write.as_ptr() as *mut _;
+ me.handle.write_overlapped(&buf[pos..], overlapped)
+ };
+
+ // See `connect` above for the rationale behind `forget`
+ match e {
+ // `n` bytes are written immediately
+ Ok(Some(n)) => {
+ io.write = State::Ok(buf, pos);
+ mem::forget(me.clone());
+ Ok(Some(n))
+ }
+ // write operation is enqueued
+ Ok(None) => {
+ io.write = State::Pending(buf, pos);
+ mem::forget(me.clone());
+ Ok(None)
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ fn schedule_write(
+ me: &Arc<Inner>,
+ buf: Vec<u8>,
+ pos: usize,
+ io: &mut Io,
+ events: Option<&mut Vec<Event>>,
+ ) {
+ match Inner::maybe_schedule_write(me, buf, pos, io) {
+ Ok(Some(_)) => {
+ // immediate result will be handled in `write_done`,
+ // so we'll reinterpret the `Ok` state
+ let state = mem::replace(&mut io.write, State::None);
+ io.write = match state {
+ State::Ok(buf, pos) => State::Pending(buf, pos),
+ // io is locked, so this branch is unreachable
+ _ => unreachable!(),
+ };
+ mem::forget(me.clone());
+ }
+ Ok(None) => (),
+ Err(e) => {
+ io.write = State::Err(e);
+ io.notify_writable(events);
+ }
+ }
+ }
+
+ fn post_register(me: &Arc<Inner>, mut events: Option<&mut Vec<Event>>) {
+ let mut io = me.io.lock().unwrap();
+ #[allow(clippy::needless_option_as_deref)]
+ if Inner::schedule_read(me, &mut io, events.as_deref_mut()) {
+ if let State::None = io.write {
+ io.notify_writable(events);
+ }
+ }
+ }
+
+ fn get_buffer(&self) -> Vec<u8> {
+ self.pool.lock().unwrap().get(4 * 1024)
+ }
+
+ fn put_buffer(&self, buf: Vec<u8>) {
+ self.pool.lock().unwrap().put(buf)
+ }
+}
+
+unsafe fn cancel<T: AsRawHandle>(handle: &T, overlapped: &Overlapped) -> io::Result<()> {
+ let ret = CancelIoEx(handle.as_raw_handle(), overlapped.as_ptr() as *mut _);
+ // `CancelIoEx` returns 0 on error:
+ // https://docs.microsoft.com/en-us/windows/win32/fileio/cancelioex-func
+ if ret == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+}
+
+fn connect_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `connect` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_conn_overlapped(status.overlapped())) };
+
+ // Flag ourselves as no longer using the `connect` overlapped instances.
+ let prev = me.connecting.swap(false, SeqCst);
+ assert!(prev, "NamedPipe was not previously connecting");
+
+ // Stash away our connect error if one happened
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => debug_assert_eq!(n, 0),
+ Err(e) => me.io.lock().unwrap().connect_error = Some(e),
+ }
+ }
+
+ // We essentially just finished a registration, so kick off a
+ // read and register write readiness.
+ Inner::post_register(&me, events);
+}
+
+fn read_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_read` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_read_overlapped(status.overlapped())) };
+
+ // Move from the `Pending` to `Ok` state.
+ let mut io = me.io.lock().unwrap();
+ let mut buf = match mem::replace(&mut io.read, State::None) {
+ State::Pending(buf, _) => buf,
+ _ => unreachable!(),
+ };
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ buf.set_len(status.bytes_transferred() as usize);
+ io.read = State::Ok(buf, 0);
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.read = State::Err(e);
+ }
+ }
+ }
+
+ // Flag our readiness that we've got data.
+ io.notify_readable(events);
+}
+
+fn write_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec<Event>>) {
+ let status = CompletionStatus::from_entry(status);
+
+ // Acquire the `Arc<Inner>`. Note that we should be guaranteed that
+ // the refcount is available to us due to the `mem::forget` in
+ // `schedule_write` above.
+ let me = unsafe { Arc::from_raw(Inner::ptr_from_write_overlapped(status.overlapped())) };
+
+ // Make the state change out of `Pending`. If we wrote the entire buffer
+ // then we're writable again and otherwise we schedule another write.
+ let mut io = me.io.lock().unwrap();
+ let (buf, pos) = match mem::replace(&mut io.write, State::None) {
+ // `Ok` here means, that the operation was completed immediately
+ // `bytes_transferred` is already reported to a client
+ State::Ok(..) => {
+ io.notify_writable(events);
+ return;
+ }
+ State::Pending(buf, pos) => (buf, pos),
+ _ => unreachable!(),
+ };
+
+ unsafe {
+ match me.handle.result(status.overlapped()) {
+ Ok(n) => {
+ debug_assert_eq!(status.bytes_transferred() as usize, n);
+ let new_pos = pos + (status.bytes_transferred() as usize);
+ if new_pos == buf.len() {
+ me.put_buffer(buf);
+ io.notify_writable(events);
+ } else {
+ Inner::schedule_write(&me, buf, new_pos, &mut io, events);
+ }
+ }
+ Err(e) => {
+ debug_assert_eq!(status.bytes_transferred(), 0);
+ io.write = State::Err(e);
+ io.notify_writable(events);
+ }
+ }
+ }
+}
+
+impl Io {
+ fn check_association(&self, registry: &Registry, required: bool) -> io::Result<()> {
+ match self.cp {
+ Some(ref cp) if !registry.selector().same_port(cp) => Err(io::Error::new(
+ io::ErrorKind::AlreadyExists,
+ "I/O source already registered with a different `Registry`",
+ )),
+ None if required => Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "I/O source not registered with `Registry`",
+ )),
+ _ => Ok(()),
+ }
+ }
+
+ fn notify_readable(&self, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_readable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status());
+ }
+ }
+ }
+
+ fn notify_writable(&self, events: Option<&mut Vec<Event>>) {
+ if let Some(token) = self.token {
+ let mut ev = Event::new(token);
+ ev.set_writable();
+
+ if let Some(events) = events {
+ events.push(ev);
+ } else {
+ let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status());
+ }
+ }
+ }
+}
+
+struct BufferPool {
+ pool: Vec<Vec<u8>>,
+}
+
+impl BufferPool {
+ fn with_capacity(cap: usize) -> BufferPool {
+ BufferPool {
+ pool: Vec::with_capacity(cap),
+ }
+ }
+
+ fn get(&mut self, default_cap: usize) -> Vec<u8> {
+ self.pool
+ .pop()
+ .unwrap_or_else(|| Vec::with_capacity(default_cap))
+ }
+
+ fn put(&mut self, mut buf: Vec<u8>) {
+ if self.pool.len() < self.pool.capacity() {
+ unsafe {
+ buf.set_len(0);
+ }
+ self.pool.push(buf);
+ }
+ }
+}
--- /dev/null
+use std::io;
+use std::mem;
+use std::net::SocketAddr;
+use std::sync::Once;
+
+use winapi::ctypes::c_int;
+use winapi::shared::in6addr::{in6_addr_u, IN6_ADDR};
+use winapi::shared::inaddr::{in_addr_S_un, IN_ADDR};
+use winapi::shared::ws2def::{ADDRESS_FAMILY, AF_INET, AF_INET6, SOCKADDR, SOCKADDR_IN};
+use winapi::shared::ws2ipdef::{SOCKADDR_IN6_LH_u, SOCKADDR_IN6_LH};
+use winapi::um::winsock2::{ioctlsocket, socket, FIONBIO, INVALID_SOCKET, SOCKET};
+
+/// Initialise the network stack for Windows.
+pub(crate) fn init() {
+ static INIT: Once = Once::new();
+ INIT.call_once(|| {
+ // Let standard library call `WSAStartup` for us, we can't do it
+ // ourselves because otherwise using any type in `std::net` would panic
+ // when it tries to call `WSAStartup` a second time.
+ drop(std::net::UdpSocket::bind("127.0.0.1:0"));
+ });
+}
+
+/// Create a new non-blocking socket.
+pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: c_int) -> io::Result<SOCKET> {
+ use winapi::um::winsock2::{PF_INET, PF_INET6};
+
+ let domain = match addr {
+ SocketAddr::V4(..) => PF_INET,
+ SocketAddr::V6(..) => PF_INET6,
+ };
+
+ new_socket(domain, socket_type)
+}
+
+pub(crate) fn new_socket(domain: c_int, socket_type: c_int) -> io::Result<SOCKET> {
+ syscall!(
+ socket(domain, socket_type, 0),
+ PartialEq::eq,
+ INVALID_SOCKET
+ )
+ .and_then(|socket| {
+ syscall!(ioctlsocket(socket, FIONBIO, &mut 1), PartialEq::ne, 0).map(|_| socket as SOCKET)
+ })
+}
+
+/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: SOCKADDR_IN,
+ v6: SOCKADDR_IN6_LH,
+}
+
+impl SocketAddrCRepr {
+ pub(crate) fn as_ptr(&self) -> *const SOCKADDR {
+ self as *const _ as *const SOCKADDR
+ }
+}
+
+pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, c_int) {
+ match addr {
+ SocketAddr::V4(ref addr) => {
+ // `s_addr` is stored as BE on all machine and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ let sin_addr = unsafe {
+ let mut s_un = mem::zeroed::<in_addr_S_un>();
+ *s_un.S_addr_mut() = u32::from_ne_bytes(addr.ip().octets());
+ IN_ADDR { S_un: s_un }
+ };
+
+ let sockaddr_in = SOCKADDR_IN {
+ sin_family: AF_INET as ADDRESS_FAMILY,
+ sin_port: addr.port().to_be(),
+ sin_addr,
+ sin_zero: [0; 8],
+ };
+
+ let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
+ (sockaddr, mem::size_of::<SOCKADDR_IN>() as c_int)
+ }
+ SocketAddr::V6(ref addr) => {
+ let sin6_addr = unsafe {
+ let mut u = mem::zeroed::<in6_addr_u>();
+ *u.Byte_mut() = addr.ip().octets();
+ IN6_ADDR { u }
+ };
+ let u = unsafe {
+ let mut u = mem::zeroed::<SOCKADDR_IN6_LH_u>();
+ *u.sin6_scope_id_mut() = addr.scope_id();
+ u
+ };
+
+ let sockaddr_in6 = SOCKADDR_IN6_LH {
+ sin6_family: AF_INET6 as ADDRESS_FAMILY,
+ sin6_port: addr.port().to_be(),
+ sin6_addr,
+ sin6_flowinfo: addr.flowinfo(),
+ u,
+ };
+
+ let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
+ (sockaddr, mem::size_of::<SOCKADDR_IN6_LH>() as c_int)
+ }
+ }
+}
--- /dev/null
+use crate::sys::windows::Event;
+
+use std::cell::UnsafeCell;
+use std::fmt;
+
+#[cfg(feature = "os-ext")]
+use winapi::um::minwinbase::OVERLAPPED;
+use winapi::um::minwinbase::OVERLAPPED_ENTRY;
+
+#[repr(C)]
+pub(crate) struct Overlapped {
+ inner: UnsafeCell<miow::Overlapped>,
+ pub(crate) callback: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>),
+}
+
+#[cfg(feature = "os-ext")]
+impl Overlapped {
+ pub(crate) fn new(cb: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>)) -> Overlapped {
+ Overlapped {
+ inner: UnsafeCell::new(miow::Overlapped::zero()),
+ callback: cb,
+ }
+ }
+
+ pub(crate) fn as_ptr(&self) -> *const OVERLAPPED {
+ unsafe { (*self.inner.get()).raw() }
+ }
+}
+
+impl fmt::Debug for Overlapped {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Overlapped").finish()
+ }
+}
+
+unsafe impl Send for Overlapped {}
+unsafe impl Sync for Overlapped {}
--- /dev/null
+use super::afd::{self, Afd, AfdPollInfo};
+use super::io_status_block::IoStatusBlock;
+use super::Event;
+use crate::sys::Events;
+
+cfg_net! {
+ use crate::sys::event::{
+ ERROR_FLAGS, READABLE_FLAGS, READ_CLOSED_FLAGS, WRITABLE_FLAGS, WRITE_CLOSED_FLAGS,
+ };
+ use crate::Interest;
+}
+
+use miow::iocp::{CompletionPort, CompletionStatus};
+use std::collections::VecDeque;
+use std::io;
+use std::marker::PhantomPinned;
+use std::os::windows::io::RawSocket;
+use std::pin::Pin;
+#[cfg(debug_assertions)]
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+use winapi::shared::ntdef::NT_SUCCESS;
+use winapi::shared::ntdef::{HANDLE, PVOID};
+use winapi::shared::ntstatus::STATUS_CANCELLED;
+use winapi::shared::winerror::{ERROR_INVALID_HANDLE, ERROR_IO_PENDING, WAIT_TIMEOUT};
+use winapi::um::minwinbase::OVERLAPPED;
+
+#[derive(Debug)]
+struct AfdGroup {
+ #[cfg_attr(not(feature = "net"), allow(dead_code))]
+ cp: Arc<CompletionPort>,
+ afd_group: Mutex<Vec<Arc<Afd>>>,
+}
+
+impl AfdGroup {
+ pub fn new(cp: Arc<CompletionPort>) -> AfdGroup {
+ AfdGroup {
+ afd_group: Mutex::new(Vec::new()),
+ cp,
+ }
+ }
+
+ pub fn release_unused_afd(&self) {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ afd_group.retain(|g| Arc::strong_count(g) > 1);
+ }
+}
+
+cfg_io_source! {
+ const POLL_GROUP__MAX_GROUP_SIZE: usize = 32;
+
+ impl AfdGroup {
+ pub fn acquire(&self) -> io::Result<Arc<Afd>> {
+ let mut afd_group = self.afd_group.lock().unwrap();
+ if afd_group.len() == 0 {
+ self._alloc_afd_group(&mut afd_group)?;
+ } else {
+ // + 1 reference in Vec
+ if Arc::strong_count(afd_group.last().unwrap()) > POLL_GROUP__MAX_GROUP_SIZE {
+ self._alloc_afd_group(&mut afd_group)?;
+ }
+ }
+
+ match afd_group.last() {
+ Some(arc) => Ok(arc.clone()),
+ None => unreachable!(
+ "Cannot acquire afd, {:#?}, afd_group: {:#?}",
+ self, afd_group
+ ),
+ }
+ }
+
+ fn _alloc_afd_group(&self, afd_group: &mut Vec<Arc<Afd>>) -> io::Result<()> {
+ let afd = Afd::new(&self.cp)?;
+ let arc = Arc::new(afd);
+ afd_group.push(arc);
+ Ok(())
+ }
+ }
+}
+
+#[derive(Debug)]
+enum SockPollStatus {
+ Idle,
+ Pending,
+ Cancelled,
+}
+
+#[derive(Debug)]
+pub struct SockState {
+ iosb: IoStatusBlock,
+ poll_info: AfdPollInfo,
+ afd: Arc<Afd>,
+
+ base_socket: RawSocket,
+
+ user_evts: u32,
+ pending_evts: u32,
+
+ user_data: u64,
+
+ poll_status: SockPollStatus,
+ delete_pending: bool,
+
+ // last raw os error
+ error: Option<i32>,
+
+ _pinned: PhantomPinned,
+}
+
+impl SockState {
+ fn update(&mut self, self_arc: &Pin<Arc<Mutex<SockState>>>) -> io::Result<()> {
+ assert!(!self.delete_pending);
+
+ // make sure to reset previous error before a new update
+ self.error = None;
+
+ if let SockPollStatus::Pending = self.poll_status {
+ if (self.user_evts & afd::KNOWN_EVENTS & !self.pending_evts) == 0 {
+ /* All the events the user is interested in are already being monitored by
+ * the pending poll operation. It might spuriously complete because of an
+ * event that we're no longer interested in; when that happens we'll submit
+ * a new poll operation with the updated event mask. */
+ } else {
+ /* A poll operation is already pending, but it's not monitoring for all the
+ * events that the user is interested in. Therefore, cancel the pending
+ * poll operation; when we receive it's completion package, a new poll
+ * operation will be submitted with the correct event mask. */
+ if let Err(e) = self.cancel() {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ return Ok(());
+ }
+ } else if let SockPollStatus::Cancelled = self.poll_status {
+ /* The poll operation has already been cancelled, we're still waiting for
+ * it to return. For now, there's nothing that needs to be done. */
+ } else if let SockPollStatus::Idle = self.poll_status {
+ /* No poll operation is pending; start one. */
+ self.poll_info.exclusive = 0;
+ self.poll_info.number_of_handles = 1;
+ *unsafe { self.poll_info.timeout.QuadPart_mut() } = std::i64::MAX;
+ self.poll_info.handles[0].handle = self.base_socket as HANDLE;
+ self.poll_info.handles[0].status = 0;
+ self.poll_info.handles[0].events = self.user_evts | afd::POLL_LOCAL_CLOSE;
+
+ // Increase the ref count as the memory will be used by the kernel.
+ let overlapped_ptr = into_overlapped(self_arc.clone());
+
+ let result = unsafe {
+ self.afd
+ .poll(&mut self.poll_info, &mut *self.iosb, overlapped_ptr)
+ };
+ if let Err(e) = result {
+ let code = e.raw_os_error().unwrap();
+ if code == ERROR_IO_PENDING as i32 {
+ /* Overlapped poll operation in progress; this is expected. */
+ } else {
+ // Since the operation failed it means the kernel won't be
+ // using the memory any more.
+ drop(from_overlapped(overlapped_ptr as *mut _));
+ if code == ERROR_INVALID_HANDLE as i32 {
+ /* Socket closed; it'll be dropped. */
+ self.mark_delete();
+ return Ok(());
+ } else {
+ self.error = e.raw_os_error();
+ return Err(e);
+ }
+ }
+ }
+
+ self.poll_status = SockPollStatus::Pending;
+ self.pending_evts = self.user_evts;
+ } else {
+ unreachable!("Invalid poll status during update, {:#?}", self)
+ }
+
+ Ok(())
+ }
+
+ fn cancel(&mut self) -> io::Result<()> {
+ match self.poll_status {
+ SockPollStatus::Pending => {}
+ _ => unreachable!("Invalid poll status during cancel, {:#?}", self),
+ };
+ unsafe {
+ self.afd.cancel(&mut *self.iosb)?;
+ }
+ self.poll_status = SockPollStatus::Cancelled;
+ self.pending_evts = 0;
+ Ok(())
+ }
+
+ // This is the function called from the overlapped using as Arc<Mutex<SockState>>. Watch out for reference counting.
+ fn feed_event(&mut self) -> Option<Event> {
+ self.poll_status = SockPollStatus::Idle;
+ self.pending_evts = 0;
+
+ let mut afd_events = 0;
+ // We use the status info in IO_STATUS_BLOCK to determine the socket poll status. It is unsafe to use a pointer of IO_STATUS_BLOCK.
+ unsafe {
+ if self.delete_pending {
+ return None;
+ } else if self.iosb.u.Status == STATUS_CANCELLED {
+ /* The poll request was cancelled by CancelIoEx. */
+ } else if !NT_SUCCESS(self.iosb.u.Status) {
+ /* The overlapped request itself failed in an unexpected way. */
+ afd_events = afd::POLL_CONNECT_FAIL;
+ } else if self.poll_info.number_of_handles < 1 {
+ /* This poll operation succeeded but didn't report any socket events. */
+ } else if self.poll_info.handles[0].events & afd::POLL_LOCAL_CLOSE != 0 {
+ /* The poll operation reported that the socket was closed. */
+ self.mark_delete();
+ return None;
+ } else {
+ afd_events = self.poll_info.handles[0].events;
+ }
+ }
+
+ afd_events &= self.user_evts;
+
+ if afd_events == 0 {
+ return None;
+ }
+
+ // In mio, we have to simulate Edge-triggered behavior to match API usage.
+ // The strategy here is to intercept all read/write from user that could cause WouldBlock usage,
+ // then reregister the socket to reset the interests.
+ self.user_evts &= !afd_events;
+
+ Some(Event {
+ data: self.user_data,
+ flags: afd_events,
+ })
+ }
+
+ pub fn is_pending_deletion(&self) -> bool {
+ self.delete_pending
+ }
+
+ pub fn mark_delete(&mut self) {
+ if !self.delete_pending {
+ if let SockPollStatus::Pending = self.poll_status {
+ drop(self.cancel());
+ }
+
+ self.delete_pending = true;
+ }
+ }
+
+ fn has_error(&self) -> bool {
+ self.error.is_some()
+ }
+}
+
+cfg_io_source! {
+ impl SockState {
+ fn new(raw_socket: RawSocket, afd: Arc<Afd>) -> io::Result<SockState> {
+ Ok(SockState {
+ iosb: IoStatusBlock::zeroed(),
+ poll_info: AfdPollInfo::zeroed(),
+ afd,
+ base_socket: get_base_socket(raw_socket)?,
+ user_evts: 0,
+ pending_evts: 0,
+ user_data: 0,
+ poll_status: SockPollStatus::Idle,
+ delete_pending: false,
+ error: None,
+ _pinned: PhantomPinned,
+ })
+ }
+
+ /// True if need to be added on update queue, false otherwise.
+ fn set_event(&mut self, ev: Event) -> bool {
+ /* afd::POLL_CONNECT_FAIL and afd::POLL_ABORT are always reported, even when not requested by the caller. */
+ let events = ev.flags | afd::POLL_CONNECT_FAIL | afd::POLL_ABORT;
+
+ self.user_evts = events;
+ self.user_data = ev.data;
+
+ (events & !self.pending_evts) != 0
+ }
+ }
+}
+
+impl Drop for SockState {
+ fn drop(&mut self) {
+ self.mark_delete();
+ }
+}
+
+/// Converts the pointer to a `SockState` into a raw pointer.
+/// To revert see `from_overlapped`.
+fn into_overlapped(sock_state: Pin<Arc<Mutex<SockState>>>) -> PVOID {
+ let overlapped_ptr: *const Mutex<SockState> =
+ unsafe { Arc::into_raw(Pin::into_inner_unchecked(sock_state)) };
+ overlapped_ptr as *mut _
+}
+
+/// Convert a raw overlapped pointer into a reference to `SockState`.
+/// Reverts `into_overlapped`.
+fn from_overlapped(ptr: *mut OVERLAPPED) -> Pin<Arc<Mutex<SockState>>> {
+ let sock_ptr: *const Mutex<SockState> = ptr as *const _;
+ unsafe { Pin::new_unchecked(Arc::from_raw(sock_ptr)) }
+}
+
+/// Each Selector has a globally unique(ish) ID associated with it. This ID
+/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
+/// registered with the `Selector`. If a type that is previously associated with
+/// a `Selector` attempts to register itself with a different `Selector`, the
+/// operation will return with an error. This matches windows behavior.
+#[cfg(debug_assertions)]
+static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
+
+/// Windows implementaion of `sys::Selector`
+///
+/// Edge-triggered event notification is simulated by resetting internal event flag of each socket state `SockState`
+/// and setting all events back by intercepting all requests that could cause `io::ErrorKind::WouldBlock` happening.
+///
+/// This selector is currently only support socket due to `Afd` driver is winsock2 specific.
+#[derive(Debug)]
+pub struct Selector {
+ #[cfg(debug_assertions)]
+ id: usize,
+ pub(super) inner: Arc<SelectorInner>,
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool,
+}
+
+impl Selector {
+ pub fn new() -> io::Result<Selector> {
+ SelectorInner::new().map(|inner| {
+ #[cfg(debug_assertions)]
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
+ Selector {
+ #[cfg(debug_assertions)]
+ id,
+ inner: Arc::new(inner),
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(false),
+ }
+ })
+ }
+
+ pub fn try_clone(&self) -> io::Result<Selector> {
+ Ok(Selector {
+ #[cfg(debug_assertions)]
+ id: self.id,
+ inner: Arc::clone(&self.inner),
+ #[cfg(debug_assertions)]
+ has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)),
+ })
+ }
+
+ /// # Safety
+ ///
+ /// This requires a mutable reference to self because only a single thread
+ /// can poll IOCP at a time.
+ pub fn select(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ self.inner.select(events, timeout)
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn register_waker(&self) -> bool {
+ self.has_waker.swap(true, Ordering::AcqRel)
+ }
+
+ pub(super) fn clone_port(&self) -> Arc<CompletionPort> {
+ self.inner.cp.clone()
+ }
+
+ #[cfg(feature = "os-ext")]
+ pub(super) fn same_port(&self, other: &Arc<CompletionPort>) -> bool {
+ Arc::ptr_eq(&self.inner.cp, other)
+ }
+}
+
+cfg_io_source! {
+ use super::InternalState;
+ use crate::Token;
+
+ impl Selector {
+ pub(super) fn register(
+ &self,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ SelectorInner::register(&self.inner, socket, token, interests)
+ }
+
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.inner.reregister(state, token, interests)
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn id(&self) -> usize {
+ self.id
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct SelectorInner {
+ pub(super) cp: Arc<CompletionPort>,
+ update_queue: Mutex<VecDeque<Pin<Arc<Mutex<SockState>>>>>,
+ afd_group: AfdGroup,
+ is_polling: AtomicBool,
+}
+
+// We have ensured thread safety by introducing lock manually.
+unsafe impl Sync for SelectorInner {}
+
+impl SelectorInner {
+ pub fn new() -> io::Result<SelectorInner> {
+ CompletionPort::new(0).map(|cp| {
+ let cp = Arc::new(cp);
+ let cp_afd = Arc::clone(&cp);
+
+ SelectorInner {
+ cp,
+ update_queue: Mutex::new(VecDeque::new()),
+ afd_group: AfdGroup::new(cp_afd),
+ is_polling: AtomicBool::new(false),
+ }
+ })
+ }
+
+ /// # Safety
+ ///
+ /// May only be calling via `Selector::select`.
+ pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
+ events.clear();
+
+ if timeout.is_none() {
+ loop {
+ let len = self.select2(&mut events.statuses, &mut events.events, None)?;
+ if len == 0 {
+ continue;
+ }
+ break Ok(());
+ }
+ } else {
+ self.select2(&mut events.statuses, &mut events.events, timeout)?;
+ Ok(())
+ }
+ }
+
+ pub fn select2(
+ &self,
+ statuses: &mut [CompletionStatus],
+ events: &mut Vec<Event>,
+ timeout: Option<Duration>,
+ ) -> io::Result<usize> {
+ assert!(!self.is_polling.swap(true, Ordering::AcqRel));
+
+ unsafe { self.update_sockets_events() }?;
+
+ let result = self.cp.get_many(statuses, timeout);
+
+ self.is_polling.store(false, Ordering::Relaxed);
+
+ match result {
+ Ok(iocp_events) => Ok(unsafe { self.feed_events(events, iocp_events) }),
+ Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => Ok(0),
+ Err(e) => Err(e),
+ }
+ }
+
+ unsafe fn update_sockets_events(&self) -> io::Result<()> {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for sock in update_queue.iter_mut() {
+ let mut sock_internal = sock.lock().unwrap();
+ if !sock_internal.is_pending_deletion() {
+ sock_internal.update(sock)?;
+ }
+ }
+
+ // remove all sock which do not have error, they have afd op pending
+ update_queue.retain(|sock| sock.lock().unwrap().has_error());
+
+ self.afd_group.release_unused_afd();
+ Ok(())
+ }
+
+ // It returns processed count of iocp_events rather than the events itself.
+ unsafe fn feed_events(
+ &self,
+ events: &mut Vec<Event>,
+ iocp_events: &[CompletionStatus],
+ ) -> usize {
+ let mut n = 0;
+ let mut update_queue = self.update_queue.lock().unwrap();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ events.push(Event::from_completion_status(iocp_event));
+ n += 1;
+ continue;
+ } else if iocp_event.token() % 2 == 1 {
+ // Handle is a named pipe. This could be extended to be any non-AFD event.
+ let callback = (*(iocp_event.overlapped() as *mut super::Overlapped)).callback;
+
+ let len = events.len();
+ callback(iocp_event.entry(), Some(events));
+ n += events.len() - len;
+ continue;
+ }
+
+ let sock_state = from_overlapped(iocp_event.overlapped());
+ let mut sock_guard = sock_state.lock().unwrap();
+ if let Some(e) = sock_guard.feed_event() {
+ events.push(e);
+ n += 1;
+ }
+
+ if !sock_guard.is_pending_deletion() {
+ update_queue.push_back(sock_state.clone());
+ }
+ }
+ self.afd_group.release_unused_afd();
+ n
+ }
+}
+
+cfg_io_source! {
+ use std::mem::size_of;
+ use std::ptr::null_mut;
+ use winapi::um::mswsock;
+ use winapi::um::winsock2::WSAGetLastError;
+ use winapi::um::winsock2::{WSAIoctl, SOCKET_ERROR};
+
+ impl SelectorInner {
+ fn register(
+ this: &Arc<Self>,
+ socket: RawSocket,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<InternalState> {
+ let flags = interests_to_afd_flags(interests);
+
+ let sock = {
+ let sock = this._alloc_sock_for_rawsocket(socket)?;
+ let event = Event {
+ flags,
+ data: token.0 as u64,
+ };
+ sock.lock().unwrap().set_event(event);
+ sock
+ };
+
+ let state = InternalState {
+ selector: this.clone(),
+ token,
+ interests,
+ sock_state: sock.clone(),
+ };
+
+ this.queue_state(sock);
+ unsafe { this.update_sockets_events_if_polling()? };
+
+ Ok(state)
+ }
+
+ // Directly accessed in `IoSourceState::do_io`.
+ pub(super) fn reregister(
+ &self,
+ state: Pin<Arc<Mutex<SockState>>>,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ {
+ let event = Event {
+ flags: interests_to_afd_flags(interests),
+ data: token.0 as u64,
+ };
+
+ state.lock().unwrap().set_event(event);
+ }
+
+ // FIXME: a sock which has_error true should not be re-added to
+ // the update queue because it's already there.
+ self.queue_state(state);
+ unsafe { self.update_sockets_events_if_polling() }
+ }
+
+ /// This function is called by register() and reregister() to start an
+ /// IOCTL_AFD_POLL operation corresponding to the registered events, but
+ /// only if necessary.
+ ///
+ /// Since it is not possible to modify or synchronously cancel an AFD_POLL
+ /// operation, and there can be only one active AFD_POLL operation per
+ /// (socket, completion port) pair at any time, it is expensive to change
+ /// a socket's event registration after it has been submitted to the kernel.
+ ///
+ /// Therefore, if no other threads are polling when interest in a socket
+ /// event is (re)registered, the socket is added to the 'update queue', but
+ /// the actual syscall to start the IOCTL_AFD_POLL operation is deferred
+ /// until just before the GetQueuedCompletionStatusEx() syscall is made.
+ ///
+ /// However, when another thread is already blocked on
+ /// GetQueuedCompletionStatusEx() we tell the kernel about the registered
+ /// socket event(s) immediately.
+ unsafe fn update_sockets_events_if_polling(&self) -> io::Result<()> {
+ if self.is_polling.load(Ordering::Acquire) {
+ self.update_sockets_events()
+ } else {
+ Ok(())
+ }
+ }
+
+ fn queue_state(&self, sock_state: Pin<Arc<Mutex<SockState>>>) {
+ let mut update_queue = self.update_queue.lock().unwrap();
+ update_queue.push_back(sock_state);
+ }
+
+ fn _alloc_sock_for_rawsocket(
+ &self,
+ raw_socket: RawSocket,
+ ) -> io::Result<Pin<Arc<Mutex<SockState>>>> {
+ let afd = self.afd_group.acquire()?;
+ Ok(Arc::pin(Mutex::new(SockState::new(raw_socket, afd)?)))
+ }
+ }
+
+ fn try_get_base_socket(raw_socket: RawSocket, ioctl: u32) -> Result<RawSocket, i32> {
+ let mut base_socket: RawSocket = 0;
+ let mut bytes: u32 = 0;
+ unsafe {
+ if WSAIoctl(
+ raw_socket as usize,
+ ioctl,
+ null_mut(),
+ 0,
+ &mut base_socket as *mut _ as PVOID,
+ size_of::<RawSocket>() as u32,
+ &mut bytes,
+ null_mut(),
+ None,
+ ) != SOCKET_ERROR
+ {
+ Ok(base_socket)
+ } else {
+ Err(WSAGetLastError())
+ }
+ }
+ }
+
+ fn get_base_socket(raw_socket: RawSocket) -> io::Result<RawSocket> {
+ let res = try_get_base_socket(raw_socket, mswsock::SIO_BASE_HANDLE);
+ if let Ok(base_socket) = res {
+ return Ok(base_socket);
+ }
+
+ // The `SIO_BASE_HANDLE` should not be intercepted by LSPs, therefore
+ // it should not fail as long as `raw_socket` is a valid socket. See
+ // https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls.
+ // However, at least one known LSP deliberately breaks it, so we try
+ // some alternative IOCTLs, starting with the most appropriate one.
+ for &ioctl in &[
+ mswsock::SIO_BSP_HANDLE_SELECT,
+ mswsock::SIO_BSP_HANDLE_POLL,
+ mswsock::SIO_BSP_HANDLE,
+ ] {
+ if let Ok(base_socket) = try_get_base_socket(raw_socket, ioctl) {
+ // Since we know now that we're dealing with an LSP (otherwise
+ // SIO_BASE_HANDLE would't have failed), only return any result
+ // when it is different from the original `raw_socket`.
+ if base_socket != raw_socket {
+ return Ok(base_socket);
+ }
+ }
+ }
+
+ // If the alternative IOCTLs also failed, return the original error.
+ let os_error = res.unwrap_err();
+ let err = io::Error::from_raw_os_error(os_error);
+ Err(err)
+ }
+}
+
+impl Drop for SelectorInner {
+ fn drop(&mut self) {
+ loop {
+ let events_num: usize;
+ let mut statuses: [CompletionStatus; 1024] = [CompletionStatus::zero(); 1024];
+
+ let result = self
+ .cp
+ .get_many(&mut statuses, Some(std::time::Duration::from_millis(0)));
+ match result {
+ Ok(iocp_events) => {
+ events_num = iocp_events.iter().len();
+ for iocp_event in iocp_events.iter() {
+ if iocp_event.overlapped().is_null() {
+ // Custom event
+ } else if iocp_event.token() % 2 == 1 {
+ // Named pipe, dispatch the event so it can release resources
+ let callback = unsafe {
+ (*(iocp_event.overlapped() as *mut super::Overlapped)).callback
+ };
+
+ callback(iocp_event.entry(), None);
+ } else {
+ // drain sock state to release memory of Arc reference
+ let _sock_state = from_overlapped(iocp_event.overlapped());
+ }
+ }
+ }
+
+ Err(_) => {
+ break;
+ }
+ }
+
+ if events_num == 0 {
+ // continue looping until all completion statuses have been drained
+ break;
+ }
+ }
+
+ self.afd_group.release_unused_afd();
+ }
+}
+
+cfg_net! {
+ fn interests_to_afd_flags(interests: Interest) -> u32 {
+ let mut flags = 0;
+
+ if interests.is_readable() {
+ flags |= READABLE_FLAGS | READ_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ if interests.is_writable() {
+ flags |= WRITABLE_FLAGS | WRITE_CLOSED_FLAGS | ERROR_FLAGS;
+ }
+
+ flags
+ }
+}
--- /dev/null
+use std::io;
+use std::net::{self, SocketAddr};
+use std::os::windows::io::AsRawSocket;
+
+use winapi::um::winsock2::{self, PF_INET, PF_INET6, SOCKET, SOCKET_ERROR, SOCK_STREAM};
+
+use crate::sys::windows::net::{init, new_socket, socket_addr};
+
+pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<SOCKET> {
+ init();
+ let domain = match address {
+ SocketAddr::V4(_) => PF_INET,
+ SocketAddr::V6(_) => PF_INET6,
+ };
+ new_socket(domain, SOCK_STREAM)
+}
+
+pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
+ use winsock2::bind;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ bind(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
+ use winsock2::connect;
+
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ let res = syscall!(
+ connect(
+ socket.as_raw_socket() as _,
+ raw_addr.as_ptr(),
+ raw_addr_length
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ );
+
+ match res {
+ Err(err) if err.kind() != io::ErrorKind::WouldBlock => Err(err),
+ _ => Ok(()),
+ }
+}
+
+pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
+ use std::convert::TryInto;
+ use winsock2::listen;
+
+ let backlog = backlog.try_into().unwrap_or(i32::max_value());
+ syscall!(
+ listen(socket.as_raw_socket() as _, backlog),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+ Ok(())
+}
+
+pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
+ // The non-blocking state of `listener` is inherited. See
+ // https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-accept#remarks.
+ listener.accept()
+}
--- /dev/null
+use std::io;
+use std::mem::{self, MaybeUninit};
+use std::net::{self, SocketAddr};
+use std::os::windows::io::{AsRawSocket, FromRawSocket};
+use std::os::windows::raw::SOCKET as StdSocket; // winapi uses usize, stdlib uses u32/u64.
+
+use winapi::ctypes::c_int;
+use winapi::shared::ws2def::IPPROTO_IPV6;
+use winapi::shared::ws2ipdef::IPV6_V6ONLY;
+use winapi::um::winsock2::{bind as win_bind, closesocket, getsockopt, SOCKET_ERROR, SOCK_DGRAM};
+
+use crate::sys::windows::net::{init, new_ip_socket, socket_addr};
+
+pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
+ init();
+ new_ip_socket(addr, SOCK_DGRAM).and_then(|socket| {
+ let (raw_addr, raw_addr_length) = socket_addr(&addr);
+ syscall!(
+ win_bind(socket, raw_addr.as_ptr(), raw_addr_length,),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )
+ .map_err(|err| {
+ // Close the socket if we hit an error, ignoring the error
+ // from closing since we can't pass back two errors.
+ let _ = unsafe { closesocket(socket) };
+ err
+ })
+ .map(|_| unsafe { net::UdpSocket::from_raw_socket(socket as StdSocket) })
+ })
+}
+
+pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
+ let mut optval: MaybeUninit<c_int> = MaybeUninit::uninit();
+ let mut optlen = mem::size_of::<c_int>() as c_int;
+
+ syscall!(
+ getsockopt(
+ socket.as_raw_socket() as usize,
+ IPPROTO_IPV6 as c_int,
+ IPV6_V6ONLY as c_int,
+ optval.as_mut_ptr().cast(),
+ &mut optlen,
+ ),
+ PartialEq::eq,
+ SOCKET_ERROR
+ )?;
+
+ debug_assert_eq!(optlen as usize, mem::size_of::<c_int>());
+ // Safety: `getsockopt` initialised `optval` for us.
+ let optval = unsafe { optval.assume_init() };
+ Ok(optval != 0)
+}
--- /dev/null
+use crate::sys::windows::Event;
+use crate::sys::windows::Selector;
+use crate::Token;
+
+use miow::iocp::CompletionPort;
+use std::io;
+use std::sync::Arc;
+
+#[derive(Debug)]
+pub struct Waker {
+ token: Token,
+ port: Arc<CompletionPort>,
+}
+
+impl Waker {
+ pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
+ Ok(Waker {
+ token,
+ port: selector.clone_port(),
+ })
+ }
+
+ pub fn wake(&self) -> io::Result<()> {
+ let mut ev = Event::new(self.token);
+ ev.set_readable();
+
+ self.port.post(ev.to_completion_status())
+ }
+}
--- /dev/null
+/// Associates readiness events with [`event::Source`]s.
+///
+/// `Token` is a wrapper around `usize` and is used as an argument to
+/// [`Registry::register`] and [`Registry::reregister`].
+///
+/// See [`Poll`] for more documentation on polling.
+///
+/// [`event::Source`]: ./event/trait.Source.html
+/// [`Poll`]: struct.Poll.html
+/// [`Registry::register`]: struct.Registry.html#method.register
+/// [`Registry::reregister`]: struct.Registry.html#method.reregister
+///
+/// # Example
+///
+/// Using `Token` to track which socket generated the event. In this example,
+/// `HashMap` is used, but usually something like [`slab`] is better.
+///
+/// [`slab`]: https://crates.io/crates/slab
+///
+#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
+#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
+/// # use std::error::Error;
+/// # fn main() -> Result<(), Box<dyn Error>> {
+/// use mio::{Events, Interest, Poll, Token};
+/// use mio::net::TcpListener;
+///
+/// use std::thread;
+/// use std::io::{self, Read};
+/// use std::collections::HashMap;
+///
+/// // After this number of sockets is accepted, the server will shutdown.
+/// const MAX_SOCKETS: usize = 32;
+///
+/// // Pick a token that will not be used by any other socket and use that one
+/// // for the listener.
+/// const LISTENER: Token = Token(1024);
+///
+/// // Used to store the sockets.
+/// let mut sockets = HashMap::new();
+///
+/// // This is used to generate a unique token for a socket
+/// let mut next_socket_index = 0;
+///
+/// // The `Poll` instance
+/// let mut poll = Poll::new()?;
+///
+/// // Tcp listener
+/// let mut listener = TcpListener::bind("127.0.0.1:0".parse()?)?;
+///
+/// // Register the listener
+/// poll.registry().register(&mut listener, LISTENER, Interest::READABLE)?;
+///
+/// // Spawn a thread that will connect a bunch of sockets then close them
+/// let addr = listener.local_addr()?;
+/// thread::spawn(move || {
+/// use std::net::TcpStream;
+///
+/// // +1 here is to connect an extra socket to signal the socket to close
+/// for _ in 0..(MAX_SOCKETS+1) {
+/// // Connect then drop the socket
+/// let _ = TcpStream::connect(addr).unwrap();
+/// }
+/// });
+///
+/// // Event storage
+/// let mut events = Events::with_capacity(1024);
+///
+/// // Read buffer, this will never actually get filled
+/// let mut buf = [0; 256];
+///
+/// // The main event loop
+/// loop {
+/// // Wait for events
+/// poll.poll(&mut events, None)?;
+///
+/// for event in &events {
+/// match event.token() {
+/// LISTENER => {
+/// // Perform operations in a loop until `WouldBlock` is
+/// // encountered.
+/// loop {
+/// match listener.accept() {
+/// Ok((mut socket, _)) => {
+/// // Shutdown the server
+/// if next_socket_index == MAX_SOCKETS {
+/// return Ok(());
+/// }
+///
+/// // Get the token for the socket
+/// let token = Token(next_socket_index);
+/// next_socket_index += 1;
+///
+/// // Register the new socket w/ poll
+/// poll.registry().register(&mut socket, token, Interest::READABLE)?;
+///
+/// // Store the socket
+/// sockets.insert(token, socket);
+/// }
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop accepting
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// token => {
+/// // Always operate in a loop
+/// loop {
+/// match sockets.get_mut(&token).unwrap().read(&mut buf) {
+/// Ok(0) => {
+/// // Socket is closed, remove it from the map
+/// sockets.remove(&token);
+/// break;
+/// }
+/// // Data is not actually sent in this example
+/// Ok(_) => unreachable!(),
+/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+/// // Socket is not ready anymore, stop reading
+/// break;
+/// }
+/// e => panic!("err={:?}", e), // Unexpected error
+/// }
+/// }
+/// }
+/// }
+/// }
+/// }
+/// # }
+/// ```
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Token(pub usize);
+
+impl From<Token> for usize {
+ fn from(val: Token) -> usize {
+ val.0
+ }
+}
--- /dev/null
+use crate::{sys, Registry, Token};
+
+use std::io;
+
+/// Waker allows cross-thread waking of [`Poll`].
+///
+/// When created it will cause events with [`readable`] readiness and the
+/// provided `token` if [`wake`] is called, possibly from another thread.
+///
+/// [`Poll`]: struct.Poll.html
+/// [`readable`]: ./event/struct.Event.html#method.is_readable
+/// [`wake`]: struct.Waker.html#method.wake
+///
+/// # Notes
+///
+/// `Waker` events are only guaranteed to be delivered while the `Waker` value
+/// is alive.
+///
+/// Only a single `Waker` can be active per [`Poll`], if multiple threads need
+/// access to the `Waker` it can be shared via for example an `Arc`. What
+/// happens if multiple `Waker`s are registered with the same `Poll` is
+/// unspecified.
+///
+/// # Implementation notes
+///
+/// On platforms that support kqueue this will use the `EVFILT_USER` event
+/// filter, see [implementation notes of `Poll`] to see what platforms support
+/// kqueue. On Linux it uses [eventfd].
+///
+/// [implementation notes of `Poll`]: struct.Poll.html#implementation-notes
+/// [eventfd]: http://man7.org/linux/man-pages/man2/eventfd.2.html
+///
+/// # Examples
+///
+/// Wake a [`Poll`] instance from another thread.
+///
+#[cfg_attr(feature = "os-poll", doc = "```")]
+#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
+/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// use std::thread;
+/// use std::time::Duration;
+/// use std::sync::Arc;
+///
+/// use mio::{Events, Token, Poll, Waker};
+///
+/// const WAKE_TOKEN: Token = Token(10);
+///
+/// let mut poll = Poll::new()?;
+/// let mut events = Events::with_capacity(2);
+///
+/// let waker = Arc::new(Waker::new(poll.registry(), WAKE_TOKEN)?);
+///
+/// // We need to keep the Waker alive, so we'll create a clone for the
+/// // thread we create below.
+/// let waker1 = waker.clone();
+/// let handle = thread::spawn(move || {
+/// // Working hard, or hardly working?
+/// thread::sleep(Duration::from_millis(500));
+///
+/// // Now we'll wake the queue on the other thread.
+/// waker1.wake().expect("unable to wake");
+/// });
+///
+/// // On our current thread we'll poll for events, without a timeout.
+/// poll.poll(&mut events, None)?;
+///
+/// // After about 500 milliseconds we should be awoken by the other thread and
+/// // get a single event.
+/// assert!(!events.is_empty());
+/// let waker_event = events.iter().next().unwrap();
+/// assert!(waker_event.is_readable());
+/// assert_eq!(waker_event.token(), WAKE_TOKEN);
+/// # handle.join().unwrap();
+/// # Ok(())
+/// # }
+/// ```
+#[derive(Debug)]
+pub struct Waker {
+ inner: sys::Waker,
+}
+
+impl Waker {
+ /// Create a new `Waker`.
+ pub fn new(registry: &Registry, token: Token) -> io::Result<Waker> {
+ #[cfg(debug_assertions)]
+ registry.register_waker();
+ sys::Waker::new(registry.selector(), token).map(|inner| Waker { inner })
+ }
+
+ /// Wake up the [`Poll`] associated with this `Waker`.
+ ///
+ /// [`Poll`]: struct.Poll.html
+ pub fn wake(&self) -> io::Result<()> {
+ self.inner.wake()
+ }
+}
--- /dev/null
+#![cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use mio::{event::Source, Events, Interest, Poll, Registry, Token};
+use std::{
+ fs::File,
+ io, mem,
+ os::unix::io::{AsRawFd, RawFd},
+ pin::Pin,
+ ptr,
+};
+
+mod util;
+use util::{expect_events, expect_no_events, init, temp_file, ExpectEvent};
+
+const UDATA: Token = Token(0xdead_beef);
+
+/// A highly feature-incomplete POSIX AIO event source, suitable for testing
+/// mio's handling of kqueue's EVFILT_AIO.
+struct Aiocb(Pin<Box<libc::aiocb>>);
+
+impl Aiocb {
+ /// Constructs a new `Aiocb` with no associated data.
+ ///
+ /// The resulting `Aiocb` structure is suitable for use with `aio_fsync`
+ pub fn from_fd(fd: RawFd) -> Aiocb {
+ // Use mem::zeroed instead of explicitly zeroing each field, because the
+ // number and name of reserved fields is OS-dependent. On some OSes,
+ // some reserved fields are used the kernel for state, and must be
+ // explicitly zeroed when allocated.
+ let mut inner = unsafe { mem::zeroed::<libc::aiocb>() };
+ inner.aio_fildes = fd;
+ inner.aio_sigevent.sigev_notify = libc::SIGEV_NONE;
+ Aiocb(Box::pin(inner))
+ }
+
+ /// Constructs a new `Aiocb` suitable for writing to offset 0 of a file.
+ #[cfg(target_os = "freebsd")]
+ pub fn from_slice(fd: RawFd, buf: &[u8]) -> Aiocb {
+ let mut aiocb = Aiocb::from_fd(fd);
+ aiocb.0.aio_nbytes = buf.len();
+ aiocb.0.aio_buf = buf.as_ptr() as *mut libc::c_void;
+ aiocb
+ }
+
+ pub fn fsync(&mut self) -> io::Result<()> {
+ unsafe {
+ // Safe because we don't move the libc::aiocb
+ let selfp = self.0.as_mut().get_unchecked_mut();
+ match libc::aio_fsync(libc::O_SYNC, selfp) {
+ 0 => Ok(()),
+ _ => Err(io::Error::last_os_error()),
+ }
+ }
+ }
+}
+
+impl Source for Aiocb {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ assert!(interests.is_aio());
+ let udata = usize::from(token);
+ let kq = registry.as_raw_fd();
+ self.0.aio_sigevent.sigev_notify = libc::SIGEV_KEVENT;
+ self.0.aio_sigevent.sigev_signo = kq;
+ self.0.aio_sigevent.sigev_value.sival_ptr = udata as *mut libc::c_void;
+ self.0.aio_sigevent.sigev_notify_thread_id = 0;
+ Ok(())
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.register(registry, token, interests)
+ }
+
+ fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
+ self.0.aio_sigevent.sigev_notify = libc::SIGEV_NONE;
+ self.0.aio_sigevent.sigev_value.sival_ptr = ptr::null_mut();
+ Ok(())
+ }
+}
+
+#[cfg(target_os = "freebsd")]
+struct Liocb {
+ _aiocbs: Box<[Aiocb]>,
+ /// The actual list passed to `libc::lio_listio`.
+ ///
+ /// It must live for as long as any of the operations are still being
+ /// processesed, because the aio subsystem uses its address as a unique
+ /// identifier.
+ list: Box<[*mut libc::aiocb]>,
+ sev: libc::sigevent,
+}
+
+#[cfg(target_os = "freebsd")]
+impl Liocb {
+ fn listio(&mut self) -> io::Result<()> {
+ unsafe {
+ let r = libc::lio_listio(
+ libc::LIO_NOWAIT,
+ self.list.as_ptr(),
+ self.list.len() as i32,
+ &mut self.sev as *mut libc::sigevent,
+ );
+ match r {
+ 0 => Ok(()),
+ _ => Err(io::Error::last_os_error()),
+ }
+ }
+ }
+
+ fn new(inputs: Vec<Aiocb>) -> Liocb {
+ let mut aiocbs = inputs.into_boxed_slice();
+ for aiocb in aiocbs.iter_mut() {
+ aiocb.0.aio_lio_opcode = libc::LIO_WRITE;
+ }
+ let list = aiocbs
+ .iter_mut()
+ .map(|aiocb| &mut *aiocb.0 as *mut libc::aiocb)
+ .collect::<Vec<_>>()
+ .into_boxed_slice();
+ let sev = unsafe { mem::zeroed::<libc::sigevent>() };
+ Liocb {
+ _aiocbs: aiocbs,
+ list,
+ sev,
+ }
+ }
+}
+
+#[cfg(target_os = "freebsd")]
+impl Source for Liocb {
+ fn register(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ assert!(interests.is_lio());
+ let udata = usize::from(token);
+ let kq = registry.as_raw_fd();
+ self.sev.sigev_notify = libc::SIGEV_KEVENT;
+ self.sev.sigev_signo = kq;
+ self.sev.sigev_value.sival_ptr = udata as *mut libc::c_void;
+ self.sev.sigev_notify_thread_id = 0;
+ Ok(())
+ }
+
+ fn reregister(
+ &mut self,
+ registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.register(registry, token, interests)
+ }
+
+ fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
+ self.sev.sigev_notify = libc::SIGEV_NONE;
+ self.sev.sigev_value.sival_ptr = ptr::null_mut();
+ Ok(())
+ }
+}
+
+mod aio {
+ use super::*;
+
+ #[test]
+ fn smoke() {
+ init();
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(8);
+
+ let f = File::create(temp_file("aio::smoke")).unwrap();
+ let mut aiocb = Aiocb::from_fd(f.as_raw_fd());
+ poll.registry()
+ .register(&mut aiocb, UDATA, Interest::AIO)
+ .unwrap();
+
+ expect_no_events(&mut poll, &mut events);
+ aiocb.fsync().unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(UDATA, Interest::AIO)],
+ );
+ }
+}
+
+#[cfg(target_os = "freebsd")]
+mod lio {
+ use super::*;
+
+ #[test]
+ fn smoke() {
+ init();
+ let data = b"hello, world!\n";
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(8);
+
+ let f0 = File::create(temp_file("lio::smoke0")).unwrap();
+ let f1 = File::create(temp_file("lio::smoke1")).unwrap();
+ let aiocb0 = Aiocb::from_slice(f0.as_raw_fd(), data);
+ let aiocb1 = Aiocb::from_slice(f1.as_raw_fd(), data);
+ let mut liocb = Liocb::new(vec![aiocb0, aiocb1]);
+ poll.registry()
+ .register(&mut liocb, UDATA, Interest::LIO)
+ .unwrap();
+
+ expect_no_events(&mut poll, &mut events);
+ liocb.listio().unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(UDATA, Interest::LIO)],
+ );
+ }
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use std::io::Read;
+
+use log::debug;
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Token};
+
+mod util;
+use util::{any_local_address, init};
+
+use self::TestState::{AfterRead, Initial};
+
+const SERVER: Token = Token(0);
+const CLIENT: Token = Token(1);
+
+#[derive(Debug, PartialEq)]
+enum TestState {
+ Initial,
+ AfterRead,
+}
+
+struct TestHandler {
+ srv: TcpListener,
+ cli: TcpStream,
+ state: TestState,
+ shutdown: bool,
+}
+
+impl TestHandler {
+ fn new(srv: TcpListener, cli: TcpStream) -> TestHandler {
+ TestHandler {
+ srv,
+ cli,
+ state: Initial,
+ shutdown: false,
+ }
+ }
+
+ fn handle_read(&mut self, poll: &mut Poll, tok: Token) {
+ debug!("readable; tok={:?}", tok);
+
+ match tok {
+ SERVER => {
+ debug!("server connection ready for accept");
+ let _ = self.srv.accept().unwrap();
+ }
+ CLIENT => {
+ debug!("client readable");
+
+ match self.state {
+ Initial => {
+ let mut buf = [0; 4096];
+ debug!("GOT={:?}", self.cli.read(&mut buf[..]));
+ self.state = AfterRead;
+ }
+ AfterRead => {}
+ }
+
+ let mut buf = Vec::with_capacity(1024);
+
+ match self.cli.read(&mut buf) {
+ Ok(0) => self.shutdown = true,
+ Ok(_) => panic!("the client socket should not be readable"),
+ Err(e) => panic!("Unexpected error {:?}", e),
+ }
+ }
+ _ => panic!("received unknown token {:?}", tok),
+ }
+ poll.registry()
+ .reregister(&mut self.cli, CLIENT, Interest::READABLE)
+ .unwrap();
+ }
+
+ fn handle_write(&mut self, poll: &mut Poll, tok: Token) {
+ match tok {
+ SERVER => panic!("received writable for token 0"),
+ CLIENT => {
+ debug!("client connected");
+ poll.registry()
+ .reregister(&mut self.cli, CLIENT, Interest::READABLE)
+ .unwrap();
+ }
+ _ => panic!("received unknown token {:?}", tok),
+ }
+ }
+}
+
+#[test]
+pub fn close_on_drop() {
+ init();
+ debug!("Starting TEST_CLOSE_ON_DROP");
+ let mut poll = Poll::new().unwrap();
+
+ // == Create & setup server socket
+ let mut srv = TcpListener::bind(any_local_address()).unwrap();
+ let addr = srv.local_addr().unwrap();
+
+ poll.registry()
+ .register(&mut srv, SERVER, Interest::READABLE)
+ .unwrap();
+
+ // == Create & setup client socket
+ let mut sock = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(&mut sock, CLIENT, Interest::WRITABLE)
+ .unwrap();
+
+ // == Create storage for events
+ let mut events = Events::with_capacity(1024);
+
+ // == Setup test handler
+ let mut handler = TestHandler::new(srv, sock);
+
+ // == Run test
+ while !handler.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.is_readable() {
+ handler.handle_read(&mut poll, event.token());
+ }
+
+ if event.is_writable() {
+ handler.handle_write(&mut poll, event.token());
+ }
+ }
+ }
+ assert!(handler.state == AfterRead, "actual={:?}", handler.state);
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use std::time::Duration;
+
+use mio::net::TcpStream;
+use mio::{event, Token, Waker};
+
+mod util;
+use util::init_with_poll;
+
+const WAKE_TOKEN: Token = Token(10);
+
+#[test]
+fn assert_event_source_implemented_for() {
+ fn assert_event_source<E: event::Source>() {}
+
+ assert_event_source::<Box<dyn event::Source>>();
+ assert_event_source::<Box<TcpStream>>();
+}
+
+#[test]
+fn events_all() {
+ let (mut poll, mut events) = init_with_poll();
+ assert_eq!(events.capacity(), 16);
+ assert!(events.is_empty());
+
+ let waker = Waker::new(poll.registry(), WAKE_TOKEN).unwrap();
+
+ waker.wake().expect("unable to wake");
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .unwrap();
+
+ assert!(!events.is_empty());
+
+ for event in events.iter() {
+ assert_eq!(event.token(), WAKE_TOKEN);
+ assert!(event.is_readable());
+ }
+
+ events.clear();
+ assert!(events.is_empty());
+}
--- /dev/null
+use mio::Interest;
+
+#[test]
+fn is_tests() {
+ assert!(Interest::READABLE.is_readable());
+ assert!(!Interest::READABLE.is_writable());
+ assert!(!Interest::WRITABLE.is_readable());
+ assert!(Interest::WRITABLE.is_writable());
+ assert!(!Interest::WRITABLE.is_aio());
+ assert!(!Interest::WRITABLE.is_lio());
+}
+
+#[test]
+fn bit_or() {
+ let interests = Interest::READABLE | Interest::WRITABLE;
+ assert!(interests.is_readable());
+ assert!(interests.is_writable());
+}
+
+#[test]
+fn fmt_debug() {
+ assert_eq!(format!("{:?}", Interest::READABLE), "READABLE");
+ assert_eq!(format!("{:?}", Interest::WRITABLE), "WRITABLE");
+ assert_eq!(
+ format!("{:?}", Interest::READABLE | Interest::WRITABLE),
+ "READABLE | WRITABLE"
+ );
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos"
+ ))]
+ {
+ assert_eq!(format!("{:?}", Interest::AIO), "AIO");
+ }
+ #[cfg(any(target_os = "freebsd"))]
+ {
+ assert_eq!(format!("{:?}", Interest::LIO), "LIO");
+ }
+}
+
+#[test]
+fn add() {
+ let interest: Interest = Interest::READABLE.add(Interest::WRITABLE);
+
+ assert!(interest.is_readable());
+ assert!(interest.is_writable());
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use std::net;
+use std::sync::{Arc, Barrier};
+use std::thread::{self, sleep};
+use std::time::Duration;
+use std::{fmt, io};
+
+use mio::event::Source;
+use mio::net::{TcpListener, TcpStream, UdpSocket};
+use mio::{event, Events, Interest, Poll, Registry, Token};
+
+mod util;
+use util::{
+ any_local_address, assert_send, assert_sync, expect_events, init, init_with_poll, ExpectEvent,
+};
+
+const ID1: Token = Token(1);
+const ID2: Token = Token(2);
+const ID3: Token = Token(3);
+
+#[test]
+fn is_send_and_sync() {
+ assert_send::<Events>();
+ assert_sync::<Events>();
+
+ assert_sync::<Poll>();
+ assert_send::<Poll>();
+
+ assert_sync::<Registry>();
+ assert_send::<Registry>();
+}
+
+#[test]
+fn run_once_with_nothing() {
+ init();
+
+ let mut events = Events::with_capacity(16);
+ let mut poll = Poll::new().unwrap();
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .unwrap();
+}
+
+#[test]
+fn add_then_drop() {
+ init();
+
+ let mut events = Events::with_capacity(16);
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let mut poll = Poll::new().unwrap();
+ poll.registry()
+ .register(
+ &mut listener,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+ drop(listener);
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .unwrap();
+}
+
+#[test]
+fn zero_duration_polls_events() {
+ init();
+
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(16);
+
+ let listener = net::TcpListener::bind(any_local_address()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let streams: Vec<TcpStream> = (0..3)
+ .map(|n| {
+ let mut stream = TcpStream::connect(addr).unwrap();
+ poll.registry()
+ .register(&mut stream, Token(n), Interest::WRITABLE)
+ .unwrap();
+ stream
+ })
+ .collect();
+
+ // Ensure the TcpStreams have some time to connection and for the events to
+ // show up.
+ sleep(Duration::from_millis(10));
+
+ // Even when passing a zero duration timeout we still want do the system
+ // call.
+ poll.poll(&mut events, Some(Duration::from_nanos(0)))
+ .unwrap();
+ assert!(!events.is_empty());
+
+ // Both need to live until here.
+ drop(streams);
+ drop(listener);
+}
+
+#[test]
+fn poll_closes_fd() {
+ init();
+
+ for _ in 0..2000 {
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(4);
+
+ poll.poll(&mut events, Some(Duration::from_millis(0)))
+ .unwrap();
+
+ drop(poll);
+ }
+}
+
+#[test]
+fn drop_cancels_interest_and_shuts_down() {
+ init();
+
+ use mio::net::TcpStream;
+ use std::io;
+ use std::io::Read;
+ use std::net::TcpListener;
+ use std::thread;
+
+ let listener = TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let handle = thread::spawn(move || {
+ let mut stream = listener.incoming().next().unwrap().unwrap();
+ stream
+ .set_read_timeout(Some(Duration::from_secs(5)))
+ .expect("set_read_timeout");
+ match stream.read(&mut [0; 16]) {
+ Ok(_) => (),
+ Err(err) => {
+ if err.kind() != io::ErrorKind::UnexpectedEof {
+ panic!("{}", err);
+ }
+ }
+ }
+ });
+
+ let mut poll = Poll::new().unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(
+ &mut stream,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+ let mut events = Events::with_capacity(16);
+ 'outer: loop {
+ poll.poll(&mut events, None).unwrap();
+ for event in &events {
+ if event.token() == Token(1) {
+ // connected
+ break 'outer;
+ }
+ }
+ }
+
+ let mut buf = [0; 1024];
+ match stream.read(&mut buf) {
+ Ok(_) => panic!("unexpected ok"),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => (),
+ Err(err) => panic!("unexpected error: {}", err),
+ }
+
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn registry_behind_arc() {
+ // `Registry` should work behind an `Arc`, being `Sync` and `Send`.
+ init();
+
+ let mut poll = Poll::new().unwrap();
+ let registry = Arc::new(poll.registry().try_clone().unwrap());
+ let mut events = Events::with_capacity(128);
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let addr = listener.local_addr().unwrap();
+ let barrier = Arc::new(Barrier::new(3));
+
+ let registry2 = Arc::clone(®istry);
+ let registry3 = Arc::clone(®istry);
+ let barrier2 = Arc::clone(&barrier);
+ let barrier3 = Arc::clone(&barrier);
+
+ let handle1 = thread::spawn(move || {
+ registry2
+ .register(&mut listener, Token(0), Interest::READABLE)
+ .unwrap();
+ barrier2.wait();
+ });
+ let handle2 = thread::spawn(move || {
+ let mut stream = TcpStream::connect(addr).unwrap();
+ registry3
+ .register(
+ &mut stream,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+ barrier3.wait();
+ });
+
+ poll.poll(&mut events, Some(Duration::from_millis(1000)))
+ .unwrap();
+ assert!(events.iter().count() >= 1);
+
+ // Let the threads return.
+ barrier.wait();
+
+ handle1.join().unwrap();
+ handle2.join().unwrap();
+}
+
+/// Call all registration operations, ending with `source` being registered with `token` and `final_interests`.
+pub fn registry_ops_flow(
+ registry: &Registry,
+ source: &mut dyn Source,
+ token: Token,
+ init_interests: Interest,
+ final_interests: Interest,
+) -> io::Result<()> {
+ registry.register(source, token, init_interests).unwrap();
+ registry.deregister(source).unwrap();
+
+ registry.register(source, token, init_interests).unwrap();
+ registry.reregister(source, token, final_interests)
+}
+
+#[test]
+fn registry_operations_are_thread_safe() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let registry = Arc::new(poll.registry().try_clone().unwrap());
+ let registry1 = Arc::clone(®istry);
+ let registry2 = Arc::clone(®istry);
+ let registry3 = Arc::clone(®istry);
+
+ let barrier = Arc::new(Barrier::new(4));
+ let barrier1 = Arc::clone(&barrier);
+ let barrier2 = Arc::clone(&barrier);
+ let barrier3 = Arc::clone(&barrier);
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ // Expect that multiple register/deregister/reregister work fine on multiple
+ // threads. Main thread will wait before the expect_events for all other 3
+ // threads to do their work. Otherwise expect_events timeout might be too short
+ // for all threads to complete, and call might fail.
+
+ let handle1 = thread::spawn(move || {
+ registry_ops_flow(
+ ®istry1,
+ &mut listener,
+ ID1,
+ Interest::READABLE,
+ Interest::READABLE,
+ )
+ .unwrap();
+
+ barrier1.wait();
+ barrier1.wait();
+ });
+
+ let handle2 = thread::spawn(move || {
+ let mut udp_socket = UdpSocket::bind(any_local_address()).unwrap();
+ registry_ops_flow(
+ ®istry2,
+ &mut udp_socket,
+ ID2,
+ Interest::WRITABLE,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+
+ barrier2.wait();
+ barrier2.wait();
+ });
+
+ let handle3 = thread::spawn(move || {
+ let mut stream = TcpStream::connect(addr).unwrap();
+ registry_ops_flow(
+ ®istry3,
+ &mut stream,
+ ID3,
+ Interest::READABLE,
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+
+ barrier3.wait();
+ barrier3.wait();
+ });
+
+ // wait for threads to finish before expect_events
+ barrier.wait();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::READABLE),
+ ExpectEvent::new(ID2, Interest::WRITABLE),
+ ExpectEvent::new(ID3, Interest::WRITABLE),
+ ],
+ );
+
+ // Let the threads return.
+ barrier.wait();
+
+ handle1.join().unwrap();
+ handle2.join().unwrap();
+ handle3.join().unwrap();
+}
+
+#[test]
+fn register_during_poll() {
+ let (mut poll, mut events) = init_with_poll();
+ let registry = poll.registry().try_clone().unwrap();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let barrier1 = Arc::clone(&barrier);
+
+ let handle1 = thread::spawn(move || {
+ let mut stream = UdpSocket::bind(any_local_address()).unwrap();
+
+ barrier1.wait();
+ // Get closer to "trying" to register during a poll by doing a short
+ // sleep before register to give main thread enough time to start
+ // waiting the 5 sec long poll.
+ sleep(Duration::from_millis(200));
+ registry
+ .register(&mut stream, ID1, Interest::WRITABLE)
+ .unwrap();
+
+ barrier1.wait();
+ drop(stream);
+ });
+
+ // Unlock the thread, allow it to register the `UdpSocket`.
+ barrier.wait();
+ // Concurrently (at least we attempt to) call `Poll::poll`.
+ poll.poll(&mut events, Some(Duration::from_secs(5)))
+ .unwrap();
+
+ let mut iter = events.iter();
+ let event = iter.next().expect("expect an event");
+ assert_eq!(event.token(), ID1);
+ assert!(event.is_writable());
+ assert!(iter.next().is_none(), "unexpected extra event");
+
+ barrier.wait();
+ handle1.join().unwrap();
+}
+
+// This test checks the following reregister constraints:
+// - `reregister` arguments fully override the previous values. In other
+// words, if a socket is registered with `READABLE` interest and the call
+// to `reregister` specifies `WRITABLE`, then read interest is no longer
+// requested for the handle.
+// - `reregister` can use the same token as `register`
+// - `reregister` can use different token from `register`
+// - multiple `reregister` are ok
+#[test]
+fn reregister_interest_token_usage() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut udp_socket = UdpSocket::bind(any_local_address()).unwrap();
+
+ poll.registry()
+ .register(&mut udp_socket, ID1, Interest::READABLE)
+ .expect("unable to register listener");
+
+ poll.registry()
+ .reregister(&mut udp_socket, ID1, Interest::READABLE)
+ .expect("unable to register listener");
+
+ poll.registry()
+ .reregister(&mut udp_socket, ID2, Interest::WRITABLE)
+ .expect("unable to register listener");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::WRITABLE)],
+ );
+}
+
+// This test checks the following register constraint:
+// The event source must **not** have been previously registered with this
+// instance of `Poll`, otherwise the behavior is unspecified.
+//
+// This test is done on Windows and epoll platforms where registering a
+// source twice is defined behavior that fail with an error code.
+//
+// On kqueue platforms registering twice (not *re*registering) works, but that
+// is not a test goal, so it is not tested.
+#[test]
+#[cfg(debug_assertions)] // Check is only present when debug assertions are enabled.
+pub fn double_register_different_token() {
+ init();
+ let poll = Poll::new().unwrap();
+
+ let mut listener = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+
+ poll.registry()
+ .register(&mut listener, Token(0), Interest::READABLE)
+ .unwrap();
+
+ assert_error(
+ poll.registry()
+ .register(&mut listener, Token(1), Interest::READABLE),
+ "already registered",
+ );
+}
+
+#[test]
+fn poll_ok_after_cancelling_pending_ops() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let address = listener.local_addr().unwrap();
+
+ let registry = Arc::new(poll.registry().try_clone().unwrap());
+ let registry1 = Arc::clone(®istry);
+
+ let barrier = Arc::new(Barrier::new(2));
+ let barrier1 = Arc::clone(&barrier);
+
+ registry
+ .register(&mut listener, ID1, Interest::READABLE)
+ .unwrap();
+
+ // Call a dummy poll just to submit an afd poll request
+ poll.poll(&mut events, Some(Duration::from_millis(0)))
+ .unwrap();
+
+ // This reregister will cancel the previous pending poll op.
+ // The token is different from the register done above, so it can ensure
+ // the proper event got returned expect_events below.
+ registry
+ .reregister(&mut listener, ID2, Interest::READABLE)
+ .unwrap();
+
+ let handle = thread::spawn(move || {
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ barrier1.wait();
+
+ registry1
+ .register(&mut stream, ID3, Interest::WRITABLE)
+ .unwrap();
+
+ barrier1.wait();
+ });
+
+ // listener ready to accept stream? getting `READABLE` here means the
+ // cancelled poll op was cleared, another poll request was submitted
+ // which resulted in returning this event
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ let (_, _) = listener.accept().unwrap();
+ barrier.wait();
+
+ // for the sake of completeness check stream `WRITABLE`
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID3, Interest::WRITABLE)],
+ );
+
+ barrier.wait();
+ handle.join().expect("unable to join thread");
+}
+
+// This test checks the following reregister constraint:
+// The event source must have previously been registered with this instance
+// of `Poll`, otherwise the behavior is unspecified.
+//
+// This test is done on Windows and epoll platforms where reregistering a
+// source without a previous register is defined behavior that fail with an
+// error code.
+//
+// On kqueue platforms reregistering w/o registering works but that's not a
+// test goal, so it is not tested.
+#[test]
+#[cfg(debug_assertions)] // Check is only present when debug assertions are enabled.
+fn reregister_without_register() {
+ let poll = Poll::new().expect("unable to create Poll instance");
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+
+ assert_error(
+ poll.registry()
+ .reregister(&mut listener, ID1, Interest::READABLE),
+ "not registered",
+ );
+}
+
+// This test checks the following register/deregister constraint:
+// The event source must have previously been registered with this instance
+// of `Poll`, otherwise the behavior is unspecified.
+//
+// This test is done on Windows and epoll platforms where deregistering a
+// source without a previous register is defined behavior that fail with an
+// error code.
+//
+// On kqueue platforms deregistering w/o registering works but that's not a
+// test goal, so it is not tested.
+#[test]
+#[cfg(debug_assertions)] // Check is only present when debug assertions are enabled.
+fn deregister_without_register() {
+ let poll = Poll::new().expect("unable to create Poll instance");
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+
+ assert_error(poll.registry().deregister(&mut listener), "not registered");
+}
+
+struct TestEventSource {
+ registrations: Vec<(Token, Interest)>,
+ reregistrations: Vec<(Token, Interest)>,
+ deregister_count: usize,
+}
+
+impl TestEventSource {
+ fn new() -> TestEventSource {
+ TestEventSource {
+ registrations: Vec::new(),
+ reregistrations: Vec::new(),
+ deregister_count: 0,
+ }
+ }
+}
+
+impl event::Source for TestEventSource {
+ fn register(
+ &mut self,
+ _registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.registrations.push((token, interests));
+ Ok(())
+ }
+
+ fn reregister(
+ &mut self,
+ _registry: &Registry,
+ token: Token,
+ interests: Interest,
+ ) -> io::Result<()> {
+ self.reregistrations.push((token, interests));
+ Ok(())
+ }
+
+ fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
+ self.deregister_count += 1;
+ Ok(())
+ }
+}
+
+#[test]
+fn poll_registration() {
+ init();
+ let poll = Poll::new().unwrap();
+ let registry = poll.registry();
+
+ let mut source = TestEventSource::new();
+ let token = Token(0);
+ let interests = Interest::READABLE;
+ registry.register(&mut source, token, interests).unwrap();
+ assert_eq!(source.registrations.len(), 1);
+ assert_eq!(source.registrations.get(0), Some(&(token, interests)));
+ assert!(source.reregistrations.is_empty());
+ assert_eq!(source.deregister_count, 0);
+
+ let re_token = Token(0);
+ let re_interests = Interest::READABLE;
+ registry
+ .reregister(&mut source, re_token, re_interests)
+ .unwrap();
+ assert_eq!(source.registrations.len(), 1);
+ assert_eq!(source.reregistrations.len(), 1);
+ assert_eq!(
+ source.reregistrations.get(0),
+ Some(&(re_token, re_interests))
+ );
+ assert_eq!(source.deregister_count, 0);
+
+ registry.deregister(&mut source).unwrap();
+ assert_eq!(source.registrations.len(), 1);
+ assert_eq!(source.reregistrations.len(), 1);
+ assert_eq!(source.deregister_count, 1);
+}
+
+struct ErroneousTestEventSource;
+
+impl event::Source for ErroneousTestEventSource {
+ fn register(
+ &mut self,
+ _registry: &Registry,
+ _token: Token,
+ _interests: Interest,
+ ) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::Other, "register"))
+ }
+
+ fn reregister(
+ &mut self,
+ _registry: &Registry,
+ _token: Token,
+ _interests: Interest,
+ ) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::Other, "reregister"))
+ }
+
+ fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::Other, "deregister"))
+ }
+}
+
+#[test]
+fn poll_erroneous_registration() {
+ init();
+ let poll = Poll::new().unwrap();
+ let registry = poll.registry();
+
+ let mut source = ErroneousTestEventSource;
+ let token = Token(0);
+ let interests = Interest::READABLE;
+ assert_error(registry.register(&mut source, token, interests), "register");
+ assert_error(
+ registry.reregister(&mut source, token, interests),
+ "reregister",
+ );
+ assert_error(registry.deregister(&mut source), "deregister");
+}
+
+/// Assert that `result` is an error and the formatted error (via
+/// `fmt::Display`) equals `expected_msg`.
+pub fn assert_error<T, E: fmt::Display>(result: Result<T, E>, expected_msg: &str) {
+ match result {
+ Ok(_) => panic!("unexpected OK result"),
+ Err(err) => assert!(
+ err.to_string().contains(expected_msg),
+ "wanted: {}, got: {}",
+ err,
+ expected_msg
+ ),
+ }
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use std::io::{self, Write};
+use std::thread::sleep;
+use std::time::Duration;
+
+use log::{debug, info, trace};
+#[cfg(debug_assertions)]
+use mio::net::UdpSocket;
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Registry, Token};
+
+mod util;
+#[cfg(debug_assertions)]
+use util::assert_error;
+use util::{any_local_address, init};
+
+const SERVER: Token = Token(0);
+const CLIENT: Token = Token(1);
+
+struct TestHandler {
+ server: TcpListener,
+ client: TcpStream,
+ state: usize,
+}
+
+impl TestHandler {
+ fn new(srv: TcpListener, cli: TcpStream) -> TestHandler {
+ TestHandler {
+ server: srv,
+ client: cli,
+ state: 0,
+ }
+ }
+
+ fn handle_read(&mut self, registry: &Registry, token: Token) {
+ match token {
+ SERVER => {
+ trace!("handle_read; token=SERVER");
+ let mut sock = self.server.accept().unwrap().0;
+ if let Err(err) = sock.write(b"foobar") {
+ if err.kind() != io::ErrorKind::WouldBlock {
+ panic!("unexpected error writing to connection: {}", err);
+ }
+ }
+ }
+ CLIENT => {
+ trace!("handle_read; token=CLIENT");
+ assert!(self.state == 0, "unexpected state {}", self.state);
+ self.state = 1;
+ registry
+ .reregister(&mut self.client, CLIENT, Interest::WRITABLE)
+ .unwrap();
+ }
+ _ => panic!("unexpected token"),
+ }
+ }
+
+ fn handle_write(&mut self, registry: &Registry, token: Token) {
+ debug!("handle_write; token={:?}; state={:?}", token, self.state);
+
+ assert!(token == CLIENT, "unexpected token {:?}", token);
+ assert!(self.state == 1, "unexpected state {}", self.state);
+
+ self.state = 2;
+ registry.deregister(&mut self.client).unwrap();
+ registry.deregister(&mut self.server).unwrap();
+ }
+}
+
+#[test]
+pub fn register_deregister() {
+ init();
+
+ debug!("Starting TEST_REGISTER_DEREGISTER");
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(1024);
+
+ let mut server = TcpListener::bind(any_local_address()).unwrap();
+ let addr = server.local_addr().unwrap();
+
+ info!("register server socket");
+ poll.registry()
+ .register(&mut server, SERVER, Interest::READABLE)
+ .unwrap();
+
+ let mut client = TcpStream::connect(addr).unwrap();
+
+ // Register client socket only as writable
+ poll.registry()
+ .register(&mut client, CLIENT, Interest::READABLE)
+ .unwrap();
+
+ let mut handler = TestHandler::new(server, client);
+
+ loop {
+ poll.poll(&mut events, None).unwrap();
+
+ if let Some(event) = events.iter().next() {
+ if event.is_readable() {
+ handler.handle_read(poll.registry(), event.token());
+ }
+
+ if event.is_writable() {
+ handler.handle_write(poll.registry(), event.token());
+ break;
+ }
+ }
+ }
+
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .unwrap();
+ assert!(events.iter().next().is_none());
+}
+
+#[test]
+pub fn reregister_different_interest_without_poll() {
+ init();
+
+ let mut events = Events::with_capacity(1024);
+ let mut poll = Poll::new().unwrap();
+
+ // Create the listener
+ let mut l = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+
+ // Register the listener with `Poll`
+ poll.registry()
+ .register(&mut l, Token(0), Interest::READABLE)
+ .unwrap();
+
+ let mut s1 = TcpStream::connect(l.local_addr().unwrap()).unwrap();
+ poll.registry()
+ .register(&mut s1, Token(2), Interest::READABLE)
+ .unwrap();
+
+ const TIMEOUT: Duration = Duration::from_millis(200);
+ sleep(TIMEOUT);
+
+ poll.registry()
+ .reregister(&mut l, Token(0), Interest::WRITABLE)
+ .unwrap();
+
+ poll.poll(&mut events, Some(TIMEOUT)).unwrap();
+ assert!(events.iter().next().is_none());
+}
+
+#[test]
+#[cfg(debug_assertions)] // Check is only present when debug assertions are enabled.
+fn tcp_register_multiple_event_loops() {
+ init();
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let poll1 = Poll::new().unwrap();
+ poll1
+ .registry()
+ .register(
+ &mut listener,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+
+ let poll2 = Poll::new().unwrap();
+
+ // Try registering the same socket with the initial one
+ let res = poll2.registry().register(
+ &mut listener,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ );
+ assert_error(res, "I/O source already registered with a `Registry`");
+
+ // Try the stream
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll1
+ .registry()
+ .register(
+ &mut stream,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+
+ let res = poll2.registry().register(
+ &mut stream,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ );
+ assert_error(res, "I/O source already registered with a `Registry`");
+}
+
+#[test]
+#[cfg(debug_assertions)] // Check is only present when debug assertions are enabled.
+fn udp_register_multiple_event_loops() {
+ init();
+
+ let mut socket = UdpSocket::bind(any_local_address()).unwrap();
+
+ let poll1 = Poll::new().unwrap();
+ poll1
+ .registry()
+ .register(
+ &mut socket,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+
+ let poll2 = Poll::new().unwrap();
+
+ // Try registering the same socket with the initial one
+ let res = poll2.registry().register(
+ &mut socket,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ );
+ assert_error(res, "I/O source already registered with a `Registry`");
+}
+
+#[test]
+fn registering_after_deregistering() {
+ init();
+
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(8);
+
+ let mut server = TcpListener::bind(any_local_address()).unwrap();
+
+ poll.registry()
+ .register(&mut server, SERVER, Interest::READABLE)
+ .unwrap();
+
+ poll.registry().deregister(&mut server).unwrap();
+
+ poll.registry()
+ .register(&mut server, SERVER, Interest::READABLE)
+ .unwrap();
+
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .unwrap();
+ assert!(events.is_empty());
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use std::io::{self, Read};
+use std::sync::Arc;
+use std::time::Duration;
+use std::{net, thread};
+
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Token, Waker};
+
+mod util;
+use util::{any_local_address, init, init_with_poll};
+
+const ID1: Token = Token(1);
+const WAKE_TOKEN: Token = Token(10);
+
+#[test]
+fn issue_776() {
+ init();
+
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let handle = thread::spawn(move || {
+ let mut stream = listener.accept().expect("accept").0;
+ stream
+ .set_read_timeout(Some(Duration::from_secs(5)))
+ .expect("set_read_timeout");
+ let _ = stream.read(&mut [0; 16]).expect("read");
+ });
+
+ let mut poll = Poll::new().unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(
+ &mut stream,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+ let mut events = Events::with_capacity(16);
+ 'outer: loop {
+ poll.poll(&mut events, None).unwrap();
+ for event in &events {
+ if event.token() == Token(1) {
+ // connected
+ break 'outer;
+ }
+ }
+ }
+
+ let mut buf = [0; 1024];
+ match stream.read(&mut buf) {
+ Ok(_) => panic!("unexpected ok"),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => (),
+ Err(err) => panic!("unexpected error: {}", err),
+ }
+
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn issue_1205() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let waker = Arc::new(Waker::new(poll.registry(), WAKE_TOKEN).unwrap());
+
+ // `_waker` must stay in scope in order for `Waker` events to be delivered
+ // when the test polls for events. If it is not cloned, it is moved out of
+ // scope in `thread::spawn` and `Poll::poll` will timeout.
+ #[allow(clippy::redundant_clone)]
+ let _waker = waker.clone();
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+
+ poll.registry()
+ .register(&mut listener, ID1, Interest::READABLE)
+ .unwrap();
+
+ poll.poll(&mut events, Some(std::time::Duration::from_millis(0)))
+ .unwrap();
+ assert!(events.iter().count() == 0);
+
+ let _stream = TcpStream::connect(listener.local_addr().unwrap()).unwrap();
+
+ poll.registry().deregister(&mut listener).unwrap();
+
+ // spawn a waker thread to wake the poll call below
+ let handle = thread::spawn(move || {
+ thread::sleep(Duration::from_millis(500));
+ waker.wake().expect("unable to wake");
+ });
+
+ poll.poll(&mut events, None).unwrap();
+
+ // the poll should return only one event that being the waker event.
+ // the poll should not retrieve event for the listener above because it was
+ // deregistered
+ assert!(events.iter().count() == 1);
+ let waker_event = events.iter().next().unwrap();
+ assert!(waker_event.is_readable());
+ assert_eq!(waker_event.token(), WAKE_TOKEN);
+ handle.join().unwrap();
+}
+
+#[test]
+#[cfg(unix)]
+fn issue_1403() {
+ use mio::net::UnixDatagram;
+ use util::temp_file;
+
+ init();
+
+ let path = temp_file("issue_1403");
+ let datagram1 = UnixDatagram::bind(&path).unwrap();
+ let datagram2 = UnixDatagram::unbound().unwrap();
+
+ let mut buf = [1u8; 1024];
+ let n = datagram2.send_to(&buf, &path).unwrap();
+
+ let (got, addr) = datagram1.recv_from(&mut buf).unwrap();
+ assert_eq!(got, n);
+ assert_eq!(addr.as_pathname(), None);
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use mio::net::{TcpListener, TcpStream};
+use mio::{Events, Interest, Poll, Token};
+use std::io::{self, Read, Write};
+use std::net::{self, Shutdown};
+use std::sync::mpsc::channel;
+use std::thread::{self, sleep};
+use std::time::Duration;
+
+#[macro_use]
+mod util;
+use util::{
+ any_local_address, assert_send, assert_sync, expect_events, expect_no_events, init,
+ init_with_poll, set_linger_zero, ExpectEvent,
+};
+
+const LISTEN: Token = Token(0);
+const CLIENT: Token = Token(1);
+const SERVER: Token = Token(2);
+
+#[test]
+#[cfg(all(unix, not(debug_assertions)))]
+fn assert_size() {
+ use mio::net::*;
+ use std::mem::size_of;
+
+ // Without debug assertions enabled `TcpListener`, `TcpStream` and
+ // `UdpSocket` should have the same size as the system specific socket, i.e.
+ // just a file descriptor on Unix platforms.
+ assert_eq!(size_of::<TcpListener>(), size_of::<std::net::TcpListener>());
+ assert_eq!(size_of::<TcpStream>(), size_of::<std::net::TcpStream>());
+}
+
+#[test]
+fn is_send_and_sync() {
+ assert_send::<TcpListener>();
+ assert_sync::<TcpListener>();
+
+ assert_send::<TcpStream>();
+ assert_sync::<TcpStream>();
+}
+
+#[test]
+fn accept() {
+ init();
+
+ struct Data {
+ hit: bool,
+ listener: TcpListener,
+ shutdown: bool,
+ }
+
+ let mut listener = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let handle = thread::spawn(move || {
+ net::TcpStream::connect(addr).unwrap();
+ });
+
+ let mut poll = Poll::new().unwrap();
+
+ poll.registry()
+ .register(&mut listener, Token(1), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(128);
+
+ let mut data = Data {
+ hit: false,
+ listener,
+ shutdown: false,
+ };
+ while !data.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ data.hit = true;
+ assert_eq!(event.token(), Token(1));
+ assert!(event.is_readable());
+ assert!(data.listener.accept().is_ok());
+ data.shutdown = true;
+ }
+ }
+ assert!(data.hit);
+ assert!(data.listener.accept().unwrap_err().kind() == io::ErrorKind::WouldBlock);
+ handle.join().unwrap();
+}
+
+#[test]
+fn connect() {
+ init();
+
+ struct Data {
+ hit: u32,
+ shutdown: bool,
+ }
+
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let (tx, rx) = channel();
+ let (tx2, rx2) = channel();
+ let handle = thread::spawn(move || {
+ let stream = listener.accept().unwrap();
+ rx.recv().unwrap();
+ drop(stream);
+ tx2.send(()).unwrap();
+ });
+
+ let mut poll = Poll::new().unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(
+ &mut stream,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+
+ let mut events = Events::with_capacity(128);
+
+ let mut data = Data {
+ hit: 0,
+ shutdown: false,
+ };
+ while !data.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ assert_eq!(event.token(), Token(1));
+ match data.hit {
+ 0 => assert!(event.is_writable()),
+ 1 => assert!(event.is_readable()),
+ _ => panic!(),
+ }
+ data.hit += 1;
+ data.shutdown = true;
+ }
+ }
+ assert_eq!(data.hit, 1);
+ tx.send(()).unwrap();
+ rx2.recv().unwrap();
+ data.shutdown = false;
+ while !data.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ assert_eq!(event.token(), Token(1));
+ match data.hit {
+ 0 => assert!(event.is_writable()),
+ 1 => assert!(event.is_readable()),
+ _ => panic!(),
+ }
+ data.hit += 1;
+ data.shutdown = true;
+ }
+ }
+ assert_eq!(data.hit, 2);
+ handle.join().unwrap();
+}
+
+#[test]
+fn read() {
+ init();
+
+ const N: usize = 16 * 1024 * 1024;
+ struct Data {
+ amt: usize,
+ socket: TcpStream,
+ shutdown: bool,
+ }
+
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let handle = thread::spawn(move || {
+ let mut stream = listener.accept().unwrap().0;
+ let buf = [0; 1024];
+ let mut amt = 0;
+ while amt < N {
+ amt += stream.write(&buf).unwrap();
+ }
+ });
+
+ let mut poll = Poll::new().unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(&mut stream, Token(1), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(128);
+
+ let mut data = Data {
+ amt: 0,
+ socket: stream,
+ shutdown: false,
+ };
+ while !data.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ assert_eq!(event.token(), Token(1));
+ let mut buf = [0; 1024];
+ loop {
+ if let Ok(amt) = data.socket.read(&mut buf) {
+ data.amt += amt;
+ } else {
+ break;
+ }
+ if data.amt >= N {
+ data.shutdown = true;
+ break;
+ }
+ }
+ }
+ }
+ handle.join().unwrap();
+}
+
+#[test]
+fn peek() {
+ init();
+
+ const N: usize = 16 * 1024 * 1024;
+ struct Data {
+ amt: usize,
+ socket: TcpStream,
+ shutdown: bool,
+ }
+
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let handle = thread::spawn(move || {
+ let mut stream = listener.accept().unwrap().0;
+ let buf = [0; 1024];
+ let mut amt = 0;
+ while amt < N {
+ amt += stream.write(&buf).unwrap();
+ }
+ });
+
+ let mut poll = Poll::new().unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(&mut stream, Token(1), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(128);
+
+ let mut data = Data {
+ amt: 0,
+ socket: stream,
+ shutdown: false,
+ };
+ while !data.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ assert_eq!(event.token(), Token(1));
+ let mut buf = [0; 1024];
+ match data.socket.peek(&mut buf) {
+ Ok(_) => (),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(err) => panic!("unexpected error: {}", err),
+ }
+
+ loop {
+ if let Ok(amt) = data.socket.read(&mut buf) {
+ data.amt += amt;
+ } else {
+ break;
+ }
+ if data.amt >= N {
+ data.shutdown = true;
+ break;
+ }
+ }
+ }
+ }
+ handle.join().unwrap();
+}
+
+#[test]
+fn write() {
+ init();
+
+ const N: usize = 16 * 1024 * 1024;
+ struct Data {
+ amt: usize,
+ socket: TcpStream,
+ shutdown: bool,
+ }
+
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let handle = thread::spawn(move || {
+ let mut stream = listener.accept().unwrap().0;
+ let mut buf = [0; 1024];
+ let mut amt = 0;
+ while amt < N {
+ amt += stream.read(&mut buf).unwrap();
+ }
+ });
+
+ let mut poll = Poll::new().unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(&mut stream, Token(1), Interest::WRITABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(128);
+
+ let mut data = Data {
+ amt: 0,
+ socket: stream,
+ shutdown: false,
+ };
+ while !data.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ assert_eq!(event.token(), Token(1));
+ let buf = [0; 1024];
+ loop {
+ if let Ok(amt) = data.socket.write(&buf) {
+ data.amt += amt;
+ } else {
+ break;
+ }
+ if data.amt >= N {
+ data.shutdown = true;
+ break;
+ }
+ }
+ }
+ }
+ handle.join().unwrap();
+}
+
+#[test]
+fn connect_then_close() {
+ init();
+
+ struct Data {
+ listener: TcpListener,
+ shutdown: bool,
+ }
+
+ let mut poll = Poll::new().unwrap();
+ let mut listener = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut s = TcpStream::connect(listener.local_addr().unwrap()).unwrap();
+
+ poll.registry()
+ .register(&mut listener, Token(1), Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut s, Token(2), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(128);
+
+ let mut data = Data {
+ listener,
+ shutdown: false,
+ };
+ while !data.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.token() == Token(1) {
+ let mut s = data.listener.accept().unwrap().0;
+ poll.registry()
+ .register(&mut s, Token(3), Interest::READABLE | Interest::WRITABLE)
+ .unwrap();
+ drop(s);
+ } else if event.token() == Token(2) {
+ data.shutdown = true;
+ }
+ }
+ }
+}
+
+#[test]
+fn listen_then_close() {
+ init();
+
+ let mut poll = Poll::new().unwrap();
+ let mut listener = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+
+ poll.registry()
+ .register(&mut listener, Token(1), Interest::READABLE)
+ .unwrap();
+ drop(listener);
+
+ let mut events = Events::with_capacity(128);
+
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .unwrap();
+
+ for event in &events {
+ if event.token() == Token(1) {
+ panic!("recieved ready() on a closed TcpListener")
+ }
+ }
+}
+
+#[test]
+fn bind_twice_bad() {
+ init();
+
+ let l1 = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = l1.local_addr().unwrap();
+ assert!(TcpListener::bind(addr).is_err());
+}
+
+#[test]
+fn multiple_writes_immediate_success() {
+ init();
+
+ const N: usize = 16;
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let handle = thread::spawn(move || {
+ let mut s = listener.accept().unwrap().0;
+ let mut b = [0; 1024];
+ let mut amt = 0;
+ while amt < 1024 * N {
+ for byte in b.iter_mut() {
+ *byte = 0;
+ }
+ let n = s.read(&mut b).unwrap();
+ amt += n;
+ for byte in b[..n].iter() {
+ assert_eq!(*byte, 1);
+ }
+ }
+ });
+
+ let mut poll = Poll::new().unwrap();
+ let mut s = TcpStream::connect(addr).unwrap();
+ poll.registry()
+ .register(&mut s, Token(1), Interest::WRITABLE)
+ .unwrap();
+ let mut events = Events::with_capacity(16);
+
+ // Wait for our TCP stream to connect
+ 'outer: loop {
+ poll.poll(&mut events, None).unwrap();
+ for event in events.iter() {
+ if event.token() == Token(1) && event.is_writable() {
+ break 'outer;
+ }
+ }
+ }
+
+ for _ in 0..N {
+ s.write_all(&[1; 1024]).unwrap();
+ }
+
+ handle.join().unwrap();
+}
+
+#[test]
+fn connection_reset_by_peer() {
+ init();
+
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(16);
+ let mut buf = [0u8; 16];
+
+ // Create listener
+ let mut listener = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ // Connect client
+ let mut client = TcpStream::connect(addr).unwrap();
+ set_linger_zero(&client);
+
+ // Register server
+ poll.registry()
+ .register(&mut listener, Token(0), Interest::READABLE)
+ .unwrap();
+
+ // Register interest in the client
+ poll.registry()
+ .register(
+ &mut client,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+
+ // Wait for listener to be ready
+ let mut server;
+ 'outer: loop {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.token() == Token(0) {
+ match listener.accept() {
+ Ok((sock, _)) => {
+ server = sock;
+ break 'outer;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
+ Err(e) => panic!("unexpected error {:?}", e),
+ }
+ }
+ }
+ }
+
+ // Close the connection
+ drop(client);
+
+ // Wait a moment
+ sleep(Duration::from_millis(100));
+
+ // Register interest in the server socket
+ poll.registry()
+ .register(&mut server, Token(3), Interest::READABLE)
+ .unwrap();
+
+ loop {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.token() == Token(3) {
+ assert!(event.is_readable());
+
+ match server.read(&mut buf) {
+ Ok(0) | Err(_) => {}
+
+ Ok(x) => panic!("expected empty buffer but read {} bytes", x),
+ }
+ return;
+ }
+ }
+ }
+}
+
+#[test]
+fn connect_error() {
+ let (mut poll, mut events) = init_with_poll();
+
+ // Pick a "random" port that shouldn't be in use.
+ let mut stream = match TcpStream::connect("127.0.0.1:58381".parse().unwrap()) {
+ Ok(l) => l,
+ Err(ref e) if e.kind() == io::ErrorKind::ConnectionRefused => {
+ // Connection failed synchronously. This is not a bug, but it
+ // unfortunately doesn't get us the code coverage we want.
+ return;
+ }
+ Err(e) => panic!("TcpStream::connect unexpected error {:?}", e),
+ };
+
+ poll.registry()
+ .register(&mut stream, Token(0), Interest::WRITABLE)
+ .unwrap();
+
+ 'outer: loop {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.token() == Token(0) {
+ assert!(event.is_writable());
+ assert!(event.is_write_closed());
+ break 'outer;
+ }
+ }
+ }
+
+ assert!(stream.take_error().unwrap().is_some());
+}
+
+#[test]
+fn write_error() {
+ init();
+
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(16);
+ let (tx, rx) = channel();
+
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+ let handle = thread::spawn(move || {
+ let (conn, _addr) = listener.accept().unwrap();
+ rx.recv().unwrap();
+ drop(conn);
+ });
+
+ let mut s = TcpStream::connect(addr).unwrap();
+ poll.registry()
+ .register(&mut s, Token(0), Interest::READABLE | Interest::WRITABLE)
+ .unwrap();
+
+ let mut wait_writable = || 'outer: loop {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.token() == Token(0) && event.is_writable() {
+ break 'outer;
+ }
+ }
+ };
+
+ wait_writable();
+
+ tx.send(()).unwrap();
+ handle.join().unwrap();
+
+ let buf = [0; 1024];
+ loop {
+ match s.write(&buf) {
+ Ok(_) => {}
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => wait_writable(),
+ Err(e) => {
+ println!("good error: {}", e);
+ break;
+ }
+ }
+ }
+}
+
+macro_rules! wait {
+ ($poll:ident, $ready:ident, $expect_read_closed: expr) => {{
+ use std::time::Instant;
+
+ let now = Instant::now();
+ let mut events = Events::with_capacity(16);
+ let mut found = false;
+
+ while !found {
+ if now.elapsed() > Duration::from_secs(5) {
+ panic!("not ready");
+ }
+
+ $poll
+ .poll(&mut events, Some(Duration::from_secs(1)))
+ .unwrap();
+
+ for event in &events {
+ if $expect_read_closed {
+ assert!(event.is_read_closed());
+ } else {
+ assert!(!event.is_read_closed() && !event.is_write_closed());
+ }
+
+ if event.token() == Token(0) && event.$ready() {
+ found = true;
+ break;
+ }
+ }
+ }
+ }};
+}
+
+#[test]
+fn write_shutdown() {
+ init();
+
+ let mut poll = Poll::new().unwrap();
+
+ let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let mut client = TcpStream::connect(addr).unwrap();
+ poll.registry()
+ .register(
+ &mut client,
+ Token(0),
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+
+ let (socket, _) = listener.accept().unwrap();
+
+ wait!(poll, is_writable, false);
+
+ let mut events = Events::with_capacity(16);
+
+ // Polling should not have any events
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .unwrap();
+
+ let next = events.iter().next();
+ assert!(next.is_none());
+
+ println!("SHUTTING DOWN");
+ // Now, shutdown the write half of the socket.
+ socket.shutdown(Shutdown::Write).unwrap();
+
+ wait!(poll, is_readable, true);
+}
+
+struct MyHandler {
+ listener: TcpListener,
+ connected: TcpStream,
+ accepted: Option<TcpStream>,
+ shutdown: bool,
+}
+
+#[test]
+fn local_addr_ready() {
+ init();
+
+ let addr = "127.0.0.1:0".parse().unwrap();
+ let mut server = TcpListener::bind(addr).unwrap();
+ let addr = server.local_addr().unwrap();
+
+ let mut poll = Poll::new().unwrap();
+ poll.registry()
+ .register(&mut server, LISTEN, Interest::READABLE)
+ .unwrap();
+
+ let mut sock = TcpStream::connect(addr).unwrap();
+ poll.registry()
+ .register(&mut sock, CLIENT, Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+
+ let mut handler = MyHandler {
+ listener: server,
+ connected: sock,
+ accepted: None,
+ shutdown: false,
+ };
+
+ while !handler.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ match event.token() {
+ LISTEN => {
+ let mut sock = handler.listener.accept().unwrap().0;
+ poll.registry()
+ .register(&mut sock, SERVER, Interest::WRITABLE)
+ .unwrap();
+ handler.accepted = Some(sock);
+ }
+ SERVER => {
+ handler.accepted.as_ref().unwrap().peer_addr().unwrap();
+ handler.accepted.as_ref().unwrap().local_addr().unwrap();
+ let n = handler
+ .accepted
+ .as_mut()
+ .unwrap()
+ .write(&[1, 2, 3])
+ .unwrap();
+ assert_eq!(n, 3);
+ handler.accepted = None;
+ }
+ CLIENT => {
+ handler.connected.peer_addr().unwrap();
+ handler.connected.local_addr().unwrap();
+ handler.shutdown = true;
+ }
+ _ => panic!("unexpected token"),
+ }
+ }
+ }
+}
+
+#[test]
+fn write_then_drop() {
+ init();
+
+ let mut a = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = a.local_addr().unwrap();
+ let mut s = TcpStream::connect(addr).unwrap();
+
+ let mut poll = Poll::new().unwrap();
+
+ poll.registry()
+ .register(&mut a, Token(1), Interest::READABLE)
+ .unwrap();
+
+ poll.registry()
+ .register(&mut s, Token(3), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+ while events.is_empty() {
+ poll.poll(&mut events, None).unwrap();
+ }
+ assert_eq!(events.iter().count(), 1);
+ assert_eq!(events.iter().next().unwrap().token(), Token(1));
+
+ let mut s2 = a.accept().unwrap().0;
+
+ poll.registry()
+ .register(&mut s2, Token(2), Interest::WRITABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+ while events.is_empty() {
+ poll.poll(&mut events, None).unwrap();
+ }
+ assert_eq!(events.iter().count(), 1);
+ assert_eq!(events.iter().next().unwrap().token(), Token(2));
+
+ s2.write_all(&[1, 2, 3, 4]).unwrap();
+ drop(s2);
+
+ poll.registry()
+ .reregister(&mut s, Token(3), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+ while events.is_empty() {
+ poll.poll(&mut events, None).unwrap();
+ }
+ assert_eq!(events.iter().count(), 1);
+ assert_eq!(events.iter().next().unwrap().token(), Token(3));
+
+ let mut buf = [0; 10];
+ expect_read!(s.read(&mut buf), &[1, 2, 3, 4]);
+}
+
+#[test]
+fn write_then_deregister() {
+ init();
+
+ let mut a = TcpListener::bind("127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = a.local_addr().unwrap();
+ let mut s = TcpStream::connect(addr).unwrap();
+
+ let mut poll = Poll::new().unwrap();
+
+ poll.registry()
+ .register(&mut a, Token(1), Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut s, Token(3), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+ while events.is_empty() {
+ poll.poll(&mut events, None).unwrap();
+ }
+ assert_eq!(events.iter().count(), 1);
+ assert_eq!(events.iter().next().unwrap().token(), Token(1));
+
+ let mut s2 = a.accept().unwrap().0;
+
+ poll.registry()
+ .register(&mut s2, Token(2), Interest::WRITABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+ while events.is_empty() {
+ poll.poll(&mut events, None).unwrap();
+ }
+ assert_eq!(events.iter().count(), 1);
+ assert_eq!(events.iter().next().unwrap().token(), Token(2));
+
+ s2.write_all(&[1, 2, 3, 4]).unwrap();
+ poll.registry().deregister(&mut s2).unwrap();
+
+ poll.registry()
+ .reregister(&mut s, Token(3), Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+ while events.is_empty() {
+ poll.poll(&mut events, None).unwrap();
+ }
+ assert_eq!(events.iter().count(), 1);
+ assert_eq!(events.iter().next().unwrap().token(), Token(3));
+
+ let mut buf = [0; 10];
+ expect_read!(s.read(&mut buf), &[1, 2, 3, 4]);
+}
+
+const ID1: Token = Token(1);
+const ID2: Token = Token(2);
+const ID3: Token = Token(3);
+
+#[test]
+fn tcp_no_events_after_deregister() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let addr = listener.local_addr().unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+
+ poll.registry()
+ .register(&mut listener, ID1, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut stream, ID3, Interest::READABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ let (mut stream2, peer_address) = listener.accept().expect("unable to accept connection");
+ assert!(peer_address.ip().is_loopback());
+ assert_eq!(stream2.peer_addr().unwrap(), peer_address);
+ assert_eq!(stream2.local_addr().unwrap(), addr);
+
+ poll.registry()
+ .register(&mut stream2, ID2, Interest::WRITABLE)
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::WRITABLE)],
+ );
+
+ stream2.write_all(&[1, 2, 3, 4]).unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID3, Interest::READABLE)],
+ );
+
+ poll.registry().deregister(&mut listener).unwrap();
+ poll.registry().deregister(&mut stream).unwrap();
+ poll.registry().deregister(&mut stream2).unwrap();
+
+ expect_no_events(&mut poll, &mut events);
+
+ let mut buf = [0; 10];
+ expect_read!(stream.read(&mut buf), &[1, 2, 3, 4]);
+
+ checked_write!(stream2.write(&[1, 2, 3, 4]));
+ expect_no_events(&mut poll, &mut events);
+
+ sleep(Duration::from_millis(200));
+ expect_read!(stream.read(&mut buf), &[1, 2, 3, 4]);
+
+ expect_no_events(&mut poll, &mut events);
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use mio::net::TcpListener;
+use mio::{Interest, Token};
+use std::io::{self, Read};
+use std::net::{self, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
+use std::sync::{Arc, Barrier};
+use std::thread;
+
+mod util;
+use util::{
+ any_local_address, any_local_ipv6_address, assert_send, assert_socket_close_on_exec,
+ assert_socket_non_blocking, assert_sync, assert_would_block, expect_events, expect_no_events,
+ init, init_with_poll, ExpectEvent,
+};
+
+const ID1: Token = Token(0);
+const ID2: Token = Token(1);
+
+#[test]
+fn is_send_and_sync() {
+ assert_send::<TcpListener>();
+ assert_sync::<TcpListener>();
+}
+
+#[test]
+fn tcp_listener() {
+ smoke_test_tcp_listener(any_local_address(), TcpListener::bind);
+}
+
+#[test]
+fn tcp_listener_ipv6() {
+ smoke_test_tcp_listener(any_local_ipv6_address(), TcpListener::bind);
+}
+
+#[test]
+fn tcp_listener_std() {
+ smoke_test_tcp_listener(any_local_address(), |addr| {
+ let listener = net::TcpListener::bind(addr).unwrap();
+ // `std::net::TcpListener`s are blocking by default, so make sure it is in
+ // non-blocking mode before wrapping in a Mio equivalent.
+ listener.set_nonblocking(true).unwrap();
+ Ok(TcpListener::from_std(listener))
+ });
+}
+
+fn smoke_test_tcp_listener<F>(addr: SocketAddr, make_listener: F)
+where
+ F: FnOnce(SocketAddr) -> io::Result<TcpListener>,
+{
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut listener = make_listener(addr).unwrap();
+ let address = listener.local_addr().unwrap();
+
+ assert_socket_non_blocking(&listener);
+ assert_socket_close_on_exec(&listener);
+
+ poll.registry()
+ .register(&mut listener, ID1, Interest::READABLE)
+ .expect("unable to register TCP listener");
+
+ let barrier = Arc::new(Barrier::new(2));
+ let thread_handle = start_connections(address, 1, barrier.clone());
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ // Expect a single connection.
+ let (mut stream, peer_address) = listener.accept().expect("unable to accept connection");
+ assert!(peer_address.ip().is_loopback());
+ assert_eq!(stream.peer_addr().unwrap(), peer_address);
+ assert_eq!(stream.local_addr().unwrap(), address);
+
+ // Expect the stream to be non-blocking.
+ let mut buf = [0; 20];
+ assert_would_block(stream.read(&mut buf));
+
+ // Expect no more connections.
+ assert_would_block(listener.accept());
+
+ assert!(listener.take_error().unwrap().is_none());
+
+ barrier.wait();
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn set_get_ttl() {
+ init();
+
+ let listener = TcpListener::bind(any_local_address()).unwrap();
+
+ // set TTL, get TTL, make sure it has the expected value
+ const TTL: u32 = 10;
+ listener.set_ttl(TTL).unwrap();
+ assert_eq!(listener.ttl().unwrap(), TTL);
+ assert!(listener.take_error().unwrap().is_none());
+}
+
+#[test]
+fn get_ttl_without_previous_set() {
+ init();
+
+ let listener = TcpListener::bind(any_local_address()).unwrap();
+
+ // expect a get TTL to work w/o any previous set_ttl
+ listener.ttl().expect("unable to get TTL for TCP listener");
+ assert!(listener.take_error().unwrap().is_none());
+}
+
+#[cfg(unix)]
+#[test]
+fn raw_fd() {
+ init();
+
+ let listener = TcpListener::bind(any_local_address()).unwrap();
+ let address = listener.local_addr().unwrap();
+
+ let raw_fd1 = listener.as_raw_fd();
+ let raw_fd2 = listener.into_raw_fd();
+ assert_eq!(raw_fd1, raw_fd2);
+
+ let listener = unsafe { TcpListener::from_raw_fd(raw_fd2) };
+ assert_eq!(listener.as_raw_fd(), raw_fd1);
+ assert_eq!(listener.local_addr().unwrap(), address);
+}
+
+#[test]
+fn registering() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut stream = TcpListener::bind(any_local_address()).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::READABLE)
+ .expect("unable to register TCP listener");
+
+ expect_no_events(&mut poll, &mut events);
+
+ // NOTE: more tests are done in the smoke tests above.
+}
+
+#[test]
+fn reregister() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let address = listener.local_addr().unwrap();
+
+ poll.registry()
+ .register(&mut listener, ID1, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .reregister(&mut listener, ID2, Interest::READABLE)
+ .unwrap();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let thread_handle = start_connections(address, 1, barrier.clone());
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ let (stream, peer_address) = listener.accept().expect("unable to accept connection");
+ assert!(peer_address.ip().is_loopback());
+ assert_eq!(stream.peer_addr().unwrap(), peer_address);
+ assert_eq!(stream.local_addr().unwrap(), address);
+
+ assert_would_block(listener.accept());
+
+ assert!(listener.take_error().unwrap().is_none());
+
+ barrier.wait();
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn no_events_after_deregister() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let address = listener.local_addr().unwrap();
+
+ poll.registry()
+ .register(&mut listener, ID1, Interest::READABLE)
+ .unwrap();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let thread_handle = start_connections(address, 1, barrier.clone());
+
+ poll.registry().deregister(&mut listener).unwrap();
+
+ expect_no_events(&mut poll, &mut events);
+
+ // Should still be able to accept the connection.
+ let (stream, peer_address) = listener.accept().expect("unable to accept connection");
+ assert!(peer_address.ip().is_loopback());
+ assert_eq!(stream.peer_addr().unwrap(), peer_address);
+ assert_eq!(stream.local_addr().unwrap(), address);
+
+ assert_would_block(listener.accept());
+
+ assert!(listener.take_error().unwrap().is_none());
+
+ barrier.wait();
+ thread_handle.join().expect("unable to join thread");
+}
+
+/// This tests reregister on successful accept works
+#[test]
+fn tcp_listener_two_streams() {
+ let (mut poll1, mut events) = init_with_poll();
+
+ let mut listener = TcpListener::bind(any_local_address()).unwrap();
+ let address = listener.local_addr().unwrap();
+
+ let barrier = Arc::new(Barrier::new(3));
+ let thread_handle1 = start_connections(address, 1, barrier.clone());
+
+ poll1
+ .registry()
+ .register(&mut listener, ID1, Interest::READABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll1,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ {
+ let (stream, peer_address) = listener.accept().expect("unable to accept connection");
+ assert!(peer_address.ip().is_loopback());
+ assert_eq!(stream.peer_addr().unwrap(), peer_address);
+ assert_eq!(stream.local_addr().unwrap(), address);
+ }
+
+ assert_would_block(listener.accept());
+
+ let thread_handle2 = start_connections(address, 1, barrier.clone());
+
+ expect_events(
+ &mut poll1,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ {
+ let (stream, peer_address) = listener.accept().expect("unable to accept connection");
+ assert!(peer_address.ip().is_loopback());
+ assert_eq!(stream.peer_addr().unwrap(), peer_address);
+ assert_eq!(stream.local_addr().unwrap(), address);
+ }
+
+ expect_no_events(&mut poll1, &mut events);
+
+ barrier.wait();
+ thread_handle1.join().expect("unable to join thread");
+ thread_handle2.join().expect("unable to join thread");
+}
+
+/// Start `n_connections` connections to `address`. If a `barrier` is provided
+/// it will wait on it after each connection is made before it is dropped.
+fn start_connections(
+ address: SocketAddr,
+ n_connections: usize,
+ barrier: Arc<Barrier>,
+) -> thread::JoinHandle<()> {
+ thread::spawn(move || {
+ for _ in 0..n_connections {
+ let conn = net::TcpStream::connect(address).unwrap();
+ barrier.wait();
+ drop(conn);
+ }
+ })
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::net::{self, Shutdown, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
+use std::sync::{mpsc::channel, Arc, Barrier};
+use std::thread;
+use std::time::Duration;
+
+use mio::net::TcpStream;
+use mio::{Interest, Token};
+
+#[macro_use]
+mod util;
+#[cfg(not(target_os = "windows"))]
+use util::init;
+use util::{
+ any_local_address, any_local_ipv6_address, assert_send, assert_socket_close_on_exec,
+ assert_socket_non_blocking, assert_sync, assert_would_block, expect_events, expect_no_events,
+ init_with_poll, set_linger_zero, ExpectEvent, Readiness,
+};
+
+const DATA1: &[u8] = b"Hello world!";
+const DATA2: &[u8] = b"Hello mars!";
+// TODO: replace with `DATA1.len()` once `const_slice_len` is stable.
+const DATA1_LEN: usize = 12;
+const DATA2_LEN: usize = 11;
+
+const ID1: Token = Token(0);
+const ID2: Token = Token(1);
+
+#[test]
+fn is_send_and_sync() {
+ assert_send::<TcpStream>();
+ assert_sync::<TcpStream>();
+}
+
+#[test]
+fn tcp_stream_ipv4() {
+ smoke_test_tcp_stream(any_local_address(), TcpStream::connect);
+}
+
+#[test]
+fn tcp_stream_ipv6() {
+ smoke_test_tcp_stream(any_local_ipv6_address(), TcpStream::connect);
+}
+
+#[test]
+fn tcp_stream_std() {
+ smoke_test_tcp_stream(any_local_address(), |addr| {
+ let stream = net::TcpStream::connect(addr).unwrap();
+ // `std::net::TcpStream`s are blocking by default, so make sure it is
+ // in non-blocking mode before wrapping in a Mio equivalent.
+ stream.set_nonblocking(true).unwrap();
+ Ok(TcpStream::from_std(stream))
+ });
+}
+
+fn smoke_test_tcp_stream<F>(addr: SocketAddr, make_stream: F)
+where
+ F: FnOnce(SocketAddr) -> io::Result<TcpStream>,
+{
+ let (mut poll, mut events) = init_with_poll();
+
+ let (handle, addr) = echo_listener(addr, 1);
+ let mut stream = make_stream(addr).unwrap();
+
+ assert_socket_non_blocking(&stream);
+ assert_socket_close_on_exec(&stream);
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE.add(Interest::READABLE))
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ let mut buf = [0; 16];
+ assert_would_block(stream.peek(&mut buf));
+ assert_would_block(stream.read(&mut buf));
+
+ // NOTE: the call to `peer_addr` must happen after we received a writable
+ // event as the stream might not yet be connected.
+ assert_eq!(stream.peer_addr().unwrap(), addr);
+ assert!(stream.local_addr().unwrap().ip().is_loopback());
+
+ checked_write!(stream.write(DATA1));
+
+ stream.flush().unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ expect_read!(stream.peek(&mut buf), DATA1);
+ expect_read!(stream.read(&mut buf), DATA1);
+
+ assert!(stream.take_error().unwrap().is_none());
+
+ assert_would_block(stream.read(&mut buf));
+
+ let bufs = [IoSlice::new(DATA1), IoSlice::new(DATA2)];
+ let n = stream
+ .write_vectored(&bufs)
+ .expect("unable to write vectored to stream");
+ assert_eq!(n, DATA1.len() + DATA2.len());
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ let mut buf1 = [1; DATA1_LEN];
+ let mut buf2 = [2; DATA2_LEN + 1];
+ let mut bufs = [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)];
+ let n = stream
+ .read_vectored(&mut bufs)
+ .expect("unable to read vectored from stream");
+ assert_eq!(n, DATA1.len() + DATA2.len());
+ assert_eq!(&buf1, DATA1);
+ assert_eq!(&buf2[..DATA2.len()], DATA2);
+ assert_eq!(buf2[DATA2.len()], 2); // Last byte should be unchanged.
+
+ // Close the connection to allow the listener to shutdown.
+ drop(stream);
+ handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn set_get_ttl() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let (thread_handle, address) = start_listener(1, Some(barrier.clone()), false);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ // on Windows: the stream must be connected before setting the ttl, otherwise
+ // it is unspecified behavior, register and expect a WRITABLE here to make sure
+ // the stream is connected
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE)
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ // set TTL, get TTL, make sure it has the expected value
+ const TTL: u32 = 10;
+ stream.set_ttl(TTL).unwrap();
+ assert_eq!(stream.ttl().unwrap(), TTL);
+ assert!(stream.take_error().unwrap().is_none());
+
+ barrier.wait();
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn get_ttl_without_previous_set() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let (thread_handle, address) = start_listener(1, Some(barrier.clone()), false);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ // on Windows: the stream must be connected before getting the ttl, otherwise
+ // it is unspecified behavior, register and expect a WRITABLE here to make sure
+ // the stream is connected
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE)
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ // expect a get TTL to work w/o any previous set_ttl
+ stream.ttl().expect("unable to get TTL for TCP stream");
+ assert!(stream.take_error().unwrap().is_none());
+
+ barrier.wait();
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn set_get_nodelay() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let (thread_handle, address) = start_listener(1, Some(barrier.clone()), false);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ // on Windows: the stream must be connected before setting the nodelay, otherwise
+ // it is unspecified behavior, register and expect a WRITABLE here to make sure
+ // the stream is connected
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE)
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ // set nodelay, get nodelay, make sure it has the expected value
+ const NO_DELAY: bool = true;
+ stream.set_nodelay(NO_DELAY).unwrap();
+ assert_eq!(stream.nodelay().unwrap(), NO_DELAY);
+ assert!(stream.take_error().unwrap().is_none());
+
+ barrier.wait();
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn get_nodelay_without_previous_set() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let (thread_handle, address) = start_listener(1, Some(barrier.clone()), false);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ // on Windows: the stream must be connected before setting the nodelay, otherwise
+ // it is unspecified behavior, register and expect a WRITABLE here to make sure
+ // the stream is connected
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE)
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ // expect a get nodelay to work w/o any previous set nodelay
+ stream
+ .nodelay()
+ .expect("Unable to get nodelay for TCP stream");
+ assert!(stream.take_error().unwrap().is_none());
+
+ barrier.wait();
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn shutdown_read() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (thread_handle, address) = echo_listener(any_local_address(), 1);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE.add(Interest::READABLE))
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ checked_write!(stream.write(DATA2));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ stream.shutdown(Shutdown::Read).unwrap();
+
+ // Shutting down the reading side is different on each platform. For example
+ // on Linux based systems we can still read.
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ {
+ let mut buf = [0; 20];
+ expect_read!(stream.read(&mut buf), &[]);
+ }
+
+ drop(stream);
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+#[ignore = "This test is flaky, it doesn't always receive an event after shutting down the write side"]
+fn shutdown_write() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (thread_handle, address) = echo_listener(any_local_address(), 1);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE.add(Interest::READABLE))
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ checked_write!(stream.write(DATA1));
+
+ stream.shutdown(Shutdown::Write).unwrap();
+
+ let err = stream.write(DATA2).unwrap_err();
+ assert_eq!(err.kind(), io::ErrorKind::BrokenPipe);
+
+ // FIXME: we don't always receive the following event.
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ // Read should be ok.
+ let mut buf = [0; 20];
+ expect_read!(stream.read(&mut buf), DATA1);
+
+ drop(stream);
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn shutdown_both() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (thread_handle, address) = echo_listener(any_local_address(), 1);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE.add(Interest::READABLE))
+ .expect("unable to register TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ checked_write!(stream.write(DATA1));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ stream.shutdown(Shutdown::Both).unwrap();
+
+ // Shutting down the reading side is different on each platform. For example
+ // on Linux based systems we can still read.
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ {
+ let mut buf = [0; 20];
+ expect_read!(stream.read(&mut buf), &[]);
+ }
+
+ let err = stream.write(DATA2).unwrap_err();
+ #[cfg(unix)]
+ assert_eq!(err.kind(), io::ErrorKind::BrokenPipe);
+ #[cfg(windows)]
+ assert_eq!(err.kind(), io::ErrorKind::ConnectionAborted);
+
+ drop(stream);
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[cfg(unix)]
+#[test]
+fn raw_fd() {
+ init();
+
+ let (thread_handle, address) = start_listener(1, None, false);
+
+ let stream = TcpStream::connect(address).unwrap();
+ let address = stream.local_addr().unwrap();
+
+ let raw_fd1 = stream.as_raw_fd();
+ let raw_fd2 = stream.into_raw_fd();
+ assert_eq!(raw_fd1, raw_fd2);
+
+ let stream = unsafe { TcpStream::from_raw_fd(raw_fd2) };
+ assert_eq!(stream.as_raw_fd(), raw_fd1);
+ assert_eq!(stream.local_addr().unwrap(), address);
+
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn registering() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (thread_handle, address) = echo_listener(any_local_address(), 1);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::READABLE)
+ .expect("unable to register TCP stream");
+
+ expect_no_events(&mut poll, &mut events);
+
+ // NOTE: more tests are done in the smoke tests above.
+
+ drop(stream);
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn reregistering() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (thread_handle, address) = echo_listener(any_local_address(), 1);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::READABLE)
+ .expect("unable to register TCP stream");
+
+ poll.registry()
+ .reregister(&mut stream, ID2, Interest::WRITABLE)
+ .expect("unable to reregister TCP stream");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::WRITABLE)],
+ );
+
+ assert_eq!(stream.peer_addr().unwrap(), address);
+
+ drop(stream);
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn no_events_after_deregister() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (thread_handle, address) = echo_listener(any_local_address(), 1);
+
+ let mut stream = TcpStream::connect(address).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::WRITABLE.add(Interest::READABLE))
+ .expect("unable to register TCP stream");
+
+ poll.registry()
+ .deregister(&mut stream)
+ .expect("unable to deregister TCP stream");
+
+ expect_no_events(&mut poll, &mut events);
+
+ // We do expect to be connected.
+ assert_eq!(stream.peer_addr().unwrap(), address);
+
+ // Also, write should work
+ let mut buf = [0; 16];
+ assert_would_block(stream.peek(&mut buf));
+ assert_would_block(stream.read(&mut buf));
+
+ checked_write!(stream.write(DATA1));
+ stream.flush().unwrap();
+
+ expect_no_events(&mut poll, &mut events);
+
+ drop(stream);
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+#[cfg_attr(
+ windows,
+ ignore = "fails on Windows; client read closed events are not triggered"
+)]
+fn tcp_shutdown_client_read_close_event() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+
+ let (handle, sockaddr) = start_listener(1, Some(barrier.clone()), false);
+ let mut stream = TcpStream::connect(sockaddr).unwrap();
+
+ let interests = Interest::READABLE | Interest::WRITABLE;
+
+ poll.registry()
+ .register(&mut stream, ID1, interests)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ stream.shutdown(Shutdown::Read).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Readiness::READ_CLOSED)],
+ );
+
+ barrier.wait();
+ handle.join().expect("failed to join thread");
+}
+
+#[test]
+#[cfg_attr(windows, ignore = "fails; client write_closed events are not found")]
+#[cfg_attr(
+ any(target_os = "android", target_os = "illumos", target_os = "linux"),
+ ignore = "fails; client write_closed events are not found"
+)]
+fn tcp_shutdown_client_write_close_event() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+
+ let (handle, sockaddr) = start_listener(1, Some(barrier.clone()), false);
+ let mut stream = TcpStream::connect(sockaddr).unwrap();
+
+ let interests = Interest::READABLE | Interest::WRITABLE;
+
+ poll.registry()
+ .register(&mut stream, ID1, interests)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ stream.shutdown(Shutdown::Write).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Readiness::WRITE_CLOSED)],
+ );
+
+ barrier.wait();
+ handle.join().expect("failed to join thread");
+}
+
+#[test]
+fn tcp_shutdown_server_write_close_event() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+
+ let (handle, sockaddr) = start_listener(1, Some(barrier.clone()), true);
+ let mut stream = TcpStream::connect(sockaddr).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::READABLE.add(Interest::WRITABLE))
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ barrier.wait();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Readiness::READ_CLOSED)],
+ );
+
+ barrier.wait();
+ handle.join().expect("failed to join thread");
+}
+
+#[test]
+fn tcp_reset_close_event() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let listener = net::TcpListener::bind(any_local_address()).unwrap();
+ let sockaddr = listener.local_addr().unwrap();
+ let mut stream = TcpStream::connect(sockaddr).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::READABLE.add(Interest::WRITABLE))
+ .unwrap();
+
+ let server_stream = listener.accept().unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+ checked_write!(stream.write(DATA1));
+
+ // Try to read something.
+ assert_would_block(stream.read(&mut [0]));
+
+ // Server goes away.
+ drop(server_stream);
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Readiness::READ_CLOSED)],
+ );
+
+ // Make sure we quiesce. `expect_no_events` seems to flake sometimes on mac/freebsd.
+ loop {
+ poll.poll(&mut events, Some(Duration::from_millis(100)))
+ .expect("poll failed");
+ if events.iter().count() == 0 {
+ break;
+ }
+ }
+}
+
+#[test]
+#[cfg_attr(
+ windows,
+ ignore = "fails on Windows; client close events are not found"
+)]
+#[cfg_attr(
+ any(target_os = "illumos"),
+ ignore = "fails; client write_closed events are not found"
+)]
+fn tcp_shutdown_client_both_close_event() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+
+ let (handle, sockaddr) = start_listener(1, Some(barrier.clone()), false);
+ let mut stream = TcpStream::connect(sockaddr).unwrap();
+
+ poll.registry()
+ .register(&mut stream, ID1, Interest::READABLE.add(Interest::WRITABLE))
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ stream.shutdown(Shutdown::Both).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Readiness::WRITE_CLOSED)],
+ );
+
+ barrier.wait();
+ handle.join().expect("failed to join thread");
+}
+
+/// Start a listener that accepts `n_connections` connections on the returned
+/// address. It echos back any data it reads from the connection before
+/// accepting another one.
+fn echo_listener(addr: SocketAddr, n_connections: usize) -> (thread::JoinHandle<()>, SocketAddr) {
+ let (sender, receiver) = channel();
+ let thread_handle = thread::spawn(move || {
+ let listener = net::TcpListener::bind(addr).unwrap();
+ let local_address = listener.local_addr().unwrap();
+ sender.send(local_address).unwrap();
+
+ let mut buf = [0; 128];
+ for _ in 0..n_connections {
+ let (mut stream, _) = listener.accept().unwrap();
+
+ loop {
+ let n = stream
+ .read(&mut buf)
+ // On Linux based system it will cause a connection reset
+ // error when the reading side of the peer connection is
+ // shutdown, we don't consider it an actual here.
+ .or_else(|err| match err {
+ ref err if err.kind() == io::ErrorKind::ConnectionReset => Ok(0),
+ err => Err(err),
+ })
+ .expect("error reading");
+ if n == 0 {
+ break;
+ }
+ checked_write!(stream.write(&buf[..n]));
+ }
+ }
+ });
+ (thread_handle, receiver.recv().unwrap())
+}
+
+/// Start a listener that accepts `n_connections` connections on the returned
+/// address. If a barrier is provided it will wait on it before closing the
+/// connection.
+fn start_listener(
+ n_connections: usize,
+ barrier: Option<Arc<Barrier>>,
+ shutdown_write: bool,
+) -> (thread::JoinHandle<()>, SocketAddr) {
+ let (sender, receiver) = channel();
+ let thread_handle = thread::spawn(move || {
+ let listener = net::TcpListener::bind(any_local_address()).unwrap();
+ let local_address = listener.local_addr().unwrap();
+ sender.send(local_address).unwrap();
+
+ for _ in 0..n_connections {
+ let (stream, _) = listener.accept().unwrap();
+ if let Some(ref barrier) = barrier {
+ barrier.wait();
+
+ if shutdown_write {
+ stream.shutdown(Shutdown::Write).unwrap();
+ barrier.wait();
+ }
+ }
+ drop(stream);
+ }
+ });
+ (thread_handle, receiver.recv().unwrap())
+}
+
+#[test]
+fn hup_event_on_disconnect() {
+ use mio::net::TcpListener;
+
+ let (mut poll, mut events) = init_with_poll();
+ let addr = "127.0.0.1:0".parse().unwrap();
+
+ let mut listener = TcpListener::bind(addr).unwrap();
+ let addr = listener.local_addr().unwrap();
+ poll.registry()
+ .register(&mut listener, Token(0), Interest::READABLE)
+ .unwrap();
+
+ let mut stream = TcpStream::connect(addr).unwrap();
+ poll.registry()
+ .register(
+ &mut stream,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(Token(0), Interest::READABLE),
+ ExpectEvent::new(Token(1), Interest::WRITABLE),
+ ],
+ );
+
+ let (sock, _) = listener.accept().unwrap();
+ // Prevent the OS from performing a graceful shutdown
+ set_linger_zero(&sock);
+ drop(sock);
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(Token(1), Interest::READABLE)],
+ );
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use log::{debug, info};
+use mio::net::UdpSocket;
+use mio::{Events, Interest, Poll, Registry, Token};
+use std::net::{self, IpAddr, SocketAddr};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
+use std::str;
+use std::sync::{Arc, Barrier};
+use std::thread;
+use std::time::Duration;
+
+#[macro_use]
+mod util;
+use util::{
+ any_local_address, any_local_ipv6_address, assert_error, assert_send,
+ assert_socket_close_on_exec, assert_socket_non_blocking, assert_sync, assert_would_block,
+ expect_events, expect_no_events, init, init_with_poll, ExpectEvent,
+};
+
+const DATA1: &[u8] = b"Hello world!";
+const DATA2: &[u8] = b"Hello mars!";
+
+const LISTENER: Token = Token(0);
+const SENDER: Token = Token(1);
+const ID1: Token = Token(2);
+const ID2: Token = Token(3);
+const ID3: Token = Token(4);
+
+#[test]
+#[cfg(all(unix, not(debug_assertions)))]
+fn assert_size() {
+ use mio::net::*;
+ use std::mem::size_of;
+
+ // Without debug assertions enabled `TcpListener`, `TcpStream` and
+ // `UdpSocket` should have the same size as the system specific socket, i.e.
+ // just a file descriptor on Unix platforms.
+ assert_eq!(size_of::<UdpSocket>(), size_of::<std::net::UdpSocket>());
+}
+
+#[test]
+fn empty_datagram() {
+ const EMPTY: &[u8] = b"";
+
+ let (mut poll, mut events) = init_with_poll();
+ let mut s1 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut s2 = UdpSocket::bind(any_local_address()).unwrap();
+
+ poll.registry()
+ .register(&mut s1, ID1, Interest::WRITABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut s2, ID2, Interest::READABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ checked_write!(s1.send_to(EMPTY, s2.local_addr().unwrap()));
+
+ let mut buf = [0; 10];
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+ expect_read!(s2.recv_from(&mut buf), EMPTY, s1.local_addr().unwrap());
+}
+
+#[test]
+fn is_send_and_sync() {
+ assert_send::<UdpSocket>();
+ assert_sync::<UdpSocket>();
+}
+
+#[test]
+fn unconnected_udp_socket_ipv4() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let socket2 = UdpSocket::bind(any_local_address()).unwrap();
+ smoke_test_unconnected_udp_socket(socket1, socket2);
+}
+
+#[test]
+fn unconnected_udp_socket_ipv6() {
+ let socket1 = UdpSocket::bind(any_local_ipv6_address()).unwrap();
+ let socket2 = UdpSocket::bind(any_local_ipv6_address()).unwrap();
+ smoke_test_unconnected_udp_socket(socket1, socket2);
+}
+
+#[test]
+fn unconnected_udp_socket_std() {
+ let socket1 = net::UdpSocket::bind(any_local_address()).unwrap();
+ let socket2 = net::UdpSocket::bind(any_local_address()).unwrap();
+
+ // `std::net::UdpSocket`s are blocking by default, so make sure they are
+ // in non-blocking mode before wrapping in a Mio equivalent.
+ socket1.set_nonblocking(true).unwrap();
+ socket2.set_nonblocking(true).unwrap();
+
+ let socket1 = UdpSocket::from_std(socket1);
+ let socket2 = UdpSocket::from_std(socket2);
+ smoke_test_unconnected_udp_socket(socket1, socket2);
+}
+
+fn smoke_test_unconnected_udp_socket(mut socket1: UdpSocket, mut socket2: UdpSocket) {
+ let (mut poll, mut events) = init_with_poll();
+
+ assert_socket_non_blocking(&socket1);
+ assert_socket_close_on_exec(&socket1);
+ assert_socket_non_blocking(&socket2);
+ assert_socket_close_on_exec(&socket2);
+
+ let address1 = socket1.local_addr().unwrap();
+ let address2 = socket2.local_addr().unwrap();
+
+ poll.registry()
+ .register(
+ &mut socket1,
+ ID1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .expect("unable to register UDP socket");
+ poll.registry()
+ .register(
+ &mut socket2,
+ ID2,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .expect("unable to register UDP socket");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::WRITABLE),
+ ExpectEvent::new(ID2, Interest::WRITABLE),
+ ],
+ );
+
+ let mut buf = [0; 20];
+ assert_would_block(socket1.peek_from(&mut buf));
+ assert_would_block(socket1.recv_from(&mut buf));
+
+ checked_write!(socket1.send_to(DATA1, address2));
+ checked_write!(socket2.send_to(DATA2, address1));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::READABLE),
+ ExpectEvent::new(ID2, Interest::READABLE),
+ ],
+ );
+
+ expect_read!(socket1.peek_from(&mut buf), DATA2, address2);
+ expect_read!(socket2.peek_from(&mut buf), DATA1, address1);
+
+ expect_read!(socket1.recv_from(&mut buf), DATA2, address2);
+ expect_read!(socket2.recv_from(&mut buf), DATA1, address1);
+
+ assert!(socket1.take_error().unwrap().is_none());
+ assert!(socket2.take_error().unwrap().is_none());
+}
+
+#[test]
+fn set_get_ttl() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ // set TTL, get TTL, make sure it has the expected value
+ const TTL: u32 = 10;
+ socket1.set_ttl(TTL).unwrap();
+ assert_eq!(socket1.ttl().unwrap(), TTL);
+ assert!(socket1.take_error().unwrap().is_none());
+}
+
+#[test]
+fn get_ttl_without_previous_set() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ // expect a get TTL to work w/o any previous set_ttl
+ socket1.ttl().expect("unable to get TTL for UDP socket");
+}
+
+#[test]
+fn set_get_broadcast() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ socket1.set_broadcast(true).unwrap();
+ assert!(socket1.broadcast().unwrap());
+
+ socket1.set_broadcast(false).unwrap();
+ assert!(!socket1.broadcast().unwrap());
+
+ assert!(socket1.take_error().unwrap().is_none());
+}
+
+#[test]
+fn get_broadcast_without_previous_set() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ socket1
+ .broadcast()
+ .expect("unable to get broadcast for UDP socket");
+}
+
+#[test]
+fn set_get_multicast_loop_v4() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ socket1.set_multicast_loop_v4(true).unwrap();
+ assert!(socket1.multicast_loop_v4().unwrap());
+
+ socket1.set_multicast_loop_v4(false).unwrap();
+ assert!(!socket1.multicast_loop_v4().unwrap());
+
+ assert!(socket1.take_error().unwrap().is_none());
+}
+
+#[test]
+fn get_multicast_loop_v4_without_previous_set() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ socket1
+ .multicast_loop_v4()
+ .expect("unable to get multicast_loop_v4 for UDP socket");
+}
+
+#[test]
+fn set_get_multicast_ttl_v4() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ const TTL: u32 = 10;
+ socket1.set_multicast_ttl_v4(TTL).unwrap();
+ assert_eq!(socket1.multicast_ttl_v4().unwrap(), TTL);
+
+ assert!(socket1.take_error().unwrap().is_none());
+}
+
+#[test]
+fn get_multicast_ttl_v4_without_previous_set() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+
+ socket1
+ .multicast_ttl_v4()
+ .expect("unable to get multicast_ttl_v4 for UDP socket");
+}
+
+#[test]
+fn set_get_multicast_loop_v6() {
+ let socket1 = UdpSocket::bind(any_local_ipv6_address()).unwrap();
+
+ socket1.set_multicast_loop_v6(true).unwrap();
+ assert!(socket1.multicast_loop_v6().unwrap());
+
+ socket1.set_multicast_loop_v6(false).unwrap();
+ assert!(!socket1.multicast_loop_v6().unwrap());
+
+ assert!(socket1.take_error().unwrap().is_none());
+}
+
+#[test]
+fn get_multicast_loop_v6_without_previous_set() {
+ let socket1 = UdpSocket::bind(any_local_ipv6_address()).unwrap();
+
+ socket1
+ .multicast_loop_v6()
+ .expect("unable to get multicast_loop_v6 for UDP socket");
+}
+
+#[test]
+fn connected_udp_socket_ipv4() {
+ let socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let address1 = socket1.local_addr().unwrap();
+
+ let socket2 = UdpSocket::bind(any_local_address()).unwrap();
+ let address2 = socket2.local_addr().unwrap();
+
+ socket1.connect(address2).unwrap();
+ socket2.connect(address1).unwrap();
+
+ smoke_test_connected_udp_socket(socket1, socket2);
+}
+
+#[test]
+fn connected_udp_socket_ipv6() {
+ let socket1 = UdpSocket::bind(any_local_ipv6_address()).unwrap();
+ let address1 = socket1.local_addr().unwrap();
+
+ let socket2 = UdpSocket::bind(any_local_ipv6_address()).unwrap();
+ let address2 = socket2.local_addr().unwrap();
+
+ socket1.connect(address2).unwrap();
+ socket2.connect(address1).unwrap();
+
+ smoke_test_connected_udp_socket(socket1, socket2);
+}
+
+#[test]
+fn connected_udp_socket_std() {
+ let socket1 = net::UdpSocket::bind(any_local_address()).unwrap();
+ let address1 = socket1.local_addr().unwrap();
+
+ let socket2 = net::UdpSocket::bind(any_local_address()).unwrap();
+ let address2 = socket2.local_addr().unwrap();
+
+ socket1.connect(address2).unwrap();
+ socket2.connect(address1).unwrap();
+
+ // `std::net::UdpSocket`s are blocking by default, so make sure they are
+ // in non-blocking mode before wrapping in a Mio equivalent.
+ socket1.set_nonblocking(true).unwrap();
+ socket2.set_nonblocking(true).unwrap();
+
+ let socket1 = UdpSocket::from_std(socket1);
+ let socket2 = UdpSocket::from_std(socket2);
+
+ smoke_test_connected_udp_socket(socket1, socket2);
+}
+
+fn smoke_test_connected_udp_socket(mut socket1: UdpSocket, mut socket2: UdpSocket) {
+ let (mut poll, mut events) = init_with_poll();
+
+ assert_socket_non_blocking(&socket1);
+ assert_socket_close_on_exec(&socket1);
+ assert_socket_non_blocking(&socket2);
+ assert_socket_close_on_exec(&socket2);
+
+ poll.registry()
+ .register(
+ &mut socket1,
+ ID1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .expect("unable to register UDP socket");
+ poll.registry()
+ .register(
+ &mut socket2,
+ ID2,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .expect("unable to register UDP socket");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::WRITABLE),
+ ExpectEvent::new(ID2, Interest::WRITABLE),
+ ],
+ );
+
+ let mut buf = [0; 20];
+ assert_would_block(socket1.peek(&mut buf));
+ assert_would_block(socket1.recv(&mut buf));
+
+ checked_write!(socket1.send(DATA1));
+ checked_write!(socket2.send(DATA2));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::READABLE),
+ ExpectEvent::new(ID2, Interest::READABLE),
+ ],
+ );
+
+ let mut buf = [0; 20];
+ expect_read!(socket1.peek(&mut buf), DATA2);
+ expect_read!(socket2.peek(&mut buf), DATA1);
+
+ expect_read!(socket1.recv(&mut buf), DATA2);
+ expect_read!(socket2.recv(&mut buf), DATA1);
+
+ assert!(socket1.take_error().unwrap().is_none());
+ assert!(socket2.take_error().unwrap().is_none());
+}
+
+#[test]
+fn reconnect_udp_socket_sending() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket2 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket3 = UdpSocket::bind(any_local_address()).unwrap();
+
+ let address1 = socket1.local_addr().unwrap();
+ let address2 = socket2.local_addr().unwrap();
+ let address3 = socket3.local_addr().unwrap();
+
+ socket1.connect(address2).unwrap();
+ socket2.connect(address1).unwrap();
+ socket3.connect(address1).unwrap();
+
+ poll.registry()
+ .register(
+ &mut socket1,
+ ID1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+ poll.registry()
+ .register(&mut socket2, ID2, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut socket3, ID3, Interest::READABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ checked_write!(socket1.send(DATA1));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ let mut buf = [0; 20];
+ expect_read!(socket2.recv(&mut buf), DATA1);
+
+ socket1.connect(address3).unwrap();
+ checked_write!(socket1.send(DATA2));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID3, Interest::READABLE)],
+ );
+
+ expect_read!(socket3.recv(&mut buf), DATA2);
+
+ assert!(socket1.take_error().unwrap().is_none());
+ assert!(socket2.take_error().unwrap().is_none());
+ assert!(socket3.take_error().unwrap().is_none());
+}
+
+#[test]
+fn reconnect_udp_socket_receiving() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket2 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket3 = UdpSocket::bind(any_local_address()).unwrap();
+
+ let address1 = socket1.local_addr().unwrap();
+ let address2 = socket2.local_addr().unwrap();
+ let address3 = socket3.local_addr().unwrap();
+
+ socket1.connect(address2).unwrap();
+ socket2.connect(address1).unwrap();
+ socket3.connect(address1).unwrap();
+
+ poll.registry()
+ .register(&mut socket1, ID1, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut socket2, ID2, Interest::WRITABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut socket3, ID3, Interest::WRITABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID2, Interest::WRITABLE),
+ ExpectEvent::new(ID3, Interest::WRITABLE),
+ ],
+ );
+
+ checked_write!(socket2.send(DATA1));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ let mut buf = [0; 20];
+ expect_read!(socket1.recv(&mut buf), DATA1);
+
+ //this will reregister socket1 resetting the interests
+ assert_would_block(socket1.recv(&mut buf));
+
+ socket1.connect(address3).unwrap();
+
+ checked_write!(socket3.send(DATA2));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ // Read all data.
+ // On Windows, reading part of data returns error WSAEMSGSIZE (10040).
+ expect_read!(socket1.recv(&mut buf), DATA2);
+
+ //this will reregister socket1 resetting the interests
+ assert_would_block(socket1.recv(&mut buf));
+
+ // Now connect back to socket 2.
+ socket1.connect(address2).unwrap();
+
+ checked_write!(socket2.send(DATA2));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::READABLE)],
+ );
+
+ expect_read!(socket1.recv(&mut buf), DATA2);
+
+ assert!(socket1.take_error().unwrap().is_none());
+ assert!(socket2.take_error().unwrap().is_none());
+ assert!(socket3.take_error().unwrap().is_none());
+}
+
+#[test]
+fn unconnected_udp_socket_connected_methods() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket2 = UdpSocket::bind(any_local_address()).unwrap();
+ let address2 = socket2.local_addr().unwrap();
+
+ poll.registry()
+ .register(&mut socket1, ID1, Interest::WRITABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut socket2, ID2, Interest::READABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)],
+ );
+
+ // Socket is unconnected, but we're using an connected method.
+ if cfg!(not(target_os = "windows")) {
+ assert_error(socket1.send(DATA1), "address required");
+ }
+ if cfg!(target_os = "windows") {
+ assert_error(
+ socket1.send(DATA1),
+ "no address was supplied. (os error 10057)",
+ );
+ }
+
+ // Now send some actual data.
+ checked_write!(socket1.send_to(DATA1, address2));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ // Receive methods don't require the socket to be connected, you just won't
+ // know the sender.
+ let mut buf = [0; 20];
+ expect_read!(socket2.peek(&mut buf), DATA1);
+ expect_read!(socket2.recv(&mut buf), DATA1);
+
+ assert!(socket1.take_error().unwrap().is_none());
+ assert!(socket2.take_error().unwrap().is_none());
+}
+
+#[test]
+fn connected_udp_socket_unconnected_methods() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket2 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket3 = UdpSocket::bind(any_local_address()).unwrap();
+
+ let address2 = socket2.local_addr().unwrap();
+ let address3 = socket3.local_addr().unwrap();
+
+ socket1.connect(address3).unwrap();
+ socket3.connect(address2).unwrap();
+
+ poll.registry()
+ .register(&mut socket1, ID1, Interest::WRITABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut socket2, ID2, Interest::WRITABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut socket3, ID3, Interest::READABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::WRITABLE),
+ ExpectEvent::new(ID2, Interest::WRITABLE),
+ ],
+ );
+
+ // Can't use `send_to`.
+ // Linux (and Android) and Windows actually allow `send_to` even if the
+ // socket is connected.
+ #[cfg(not(any(target_os = "android", target_os = "linux", target_os = "windows")))]
+ assert_error(socket1.send_to(DATA1, address2), "already connected");
+ // Even if the address is the same.
+ #[cfg(not(any(target_os = "android", target_os = "linux", target_os = "windows")))]
+ assert_error(socket1.send_to(DATA1, address3), "already connected");
+
+ checked_write!(socket2.send_to(DATA2, address3));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID3, Interest::READABLE)],
+ );
+
+ let mut buf = [0; 20];
+ expect_read!(socket3.peek_from(&mut buf), DATA2, address2);
+ expect_read!(socket3.recv_from(&mut buf), DATA2, address2);
+
+ assert!(socket1.take_error().unwrap().is_none());
+ assert!(socket2.take_error().unwrap().is_none());
+ assert!(socket3.take_error().unwrap().is_none());
+}
+
+#[cfg(unix)]
+#[test]
+fn udp_socket_raw_fd() {
+ init();
+
+ let socket = UdpSocket::bind(any_local_address()).unwrap();
+ let address = socket.local_addr().unwrap();
+
+ let raw_fd1 = socket.as_raw_fd();
+ let raw_fd2 = socket.into_raw_fd();
+ assert_eq!(raw_fd1, raw_fd2);
+
+ let socket = unsafe { UdpSocket::from_raw_fd(raw_fd2) };
+ assert_eq!(socket.as_raw_fd(), raw_fd1);
+ assert_eq!(socket.local_addr().unwrap(), address);
+}
+
+#[test]
+fn udp_socket_register() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket = UdpSocket::bind(any_local_address()).unwrap();
+ poll.registry()
+ .register(&mut socket, ID1, Interest::READABLE)
+ .expect("unable to register UDP socket");
+
+ expect_no_events(&mut poll, &mut events);
+
+ // NOTE: more tests are done in the smoke tests above.
+}
+
+#[test]
+fn udp_socket_reregister() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket = UdpSocket::bind(any_local_address()).unwrap();
+ let address = socket.local_addr().unwrap();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let thread_handle = send_packets(address, 1, barrier.clone());
+
+ poll.registry()
+ .register(&mut socket, ID1, Interest::WRITABLE)
+ .unwrap();
+ // Let the first packet be send.
+ barrier.wait();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID1, Interest::WRITABLE)], // Not readable!
+ );
+
+ poll.registry()
+ .reregister(&mut socket, ID2, Interest::READABLE)
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ let mut buf = [0; 20];
+ expect_read!(socket.recv_from(&mut buf), DATA1, __anywhere);
+
+ thread_handle.join().expect("unable to join thread");
+}
+
+#[test]
+fn udp_socket_no_events_after_deregister() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket = UdpSocket::bind(any_local_address()).unwrap();
+ let address = socket.local_addr().unwrap();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let thread_handle = send_packets(address, 1, barrier.clone());
+
+ poll.registry()
+ .register(&mut socket, ID1, Interest::READABLE)
+ .unwrap();
+
+ // Let the packet be send.
+ barrier.wait();
+
+ poll.registry().deregister(&mut socket).unwrap();
+
+ expect_no_events(&mut poll, &mut events);
+
+ // But we do expect a packet to be send.
+ let mut buf = [0; 20];
+ expect_read!(socket.recv_from(&mut buf), DATA1, __anywhere);
+
+ thread_handle.join().expect("unable to join thread");
+}
+
+/// Sends `n_packets` packets to `address`, over UDP, after the `barrier` is
+/// waited (before each send) on in another thread.
+fn send_packets(
+ address: SocketAddr,
+ n_packets: usize,
+ barrier: Arc<Barrier>,
+) -> thread::JoinHandle<()> {
+ thread::spawn(move || {
+ let socket = net::UdpSocket::bind(any_local_address()).unwrap();
+ for _ in 0..n_packets {
+ barrier.wait();
+ checked_write!(socket.send_to(DATA1, address));
+ }
+ })
+}
+
+pub struct UdpHandlerSendRecv {
+ tx: UdpSocket,
+ rx: UdpSocket,
+ msg: &'static str,
+ buf: Vec<u8>,
+ rx_buf: Vec<u8>,
+ connected: bool,
+ shutdown: bool,
+}
+
+impl UdpHandlerSendRecv {
+ fn new(tx: UdpSocket, rx: UdpSocket, connected: bool, msg: &'static str) -> UdpHandlerSendRecv {
+ UdpHandlerSendRecv {
+ tx,
+ rx,
+ msg,
+ buf: msg.as_bytes().to_vec(),
+ rx_buf: vec![0; 1024],
+ connected,
+ shutdown: false,
+ }
+ }
+}
+
+fn send_recv_udp(mut tx: UdpSocket, mut rx: UdpSocket, connected: bool) {
+ init();
+
+ debug!("Starting TEST_UDP_SOCKETS");
+ let mut poll = Poll::new().unwrap();
+
+ // ensure that the sockets are non-blocking
+ let mut buf = [0; 128];
+ assert_would_block(rx.recv_from(&mut buf));
+
+ info!("Registering SENDER");
+ poll.registry()
+ .register(&mut tx, SENDER, Interest::WRITABLE)
+ .unwrap();
+
+ info!("Registering LISTENER");
+ poll.registry()
+ .register(&mut rx, LISTENER, Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+
+ info!("Starting event loop to test with...");
+ let mut handler = UdpHandlerSendRecv::new(tx, rx, connected, "hello world");
+
+ while !handler.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.is_readable() {
+ if let LISTENER = event.token() {
+ debug!("We are receiving a datagram now...");
+ let cnt = if !handler.connected {
+ handler.rx.recv_from(&mut handler.rx_buf).unwrap().0
+ } else {
+ handler.rx.recv(&mut handler.rx_buf).unwrap()
+ };
+
+ unsafe { handler.rx_buf.set_len(cnt) };
+ assert_eq!(
+ str::from_utf8(handler.rx_buf.as_ref()).unwrap(),
+ handler.msg
+ );
+ handler.shutdown = true;
+ }
+ }
+
+ if event.is_writable() {
+ if let SENDER = event.token() {
+ let cnt = if !handler.connected {
+ let addr = handler.rx.local_addr().unwrap();
+ handler.tx.send_to(&handler.buf, addr).unwrap()
+ } else {
+ handler.tx.send(&handler.buf).unwrap()
+ };
+
+ // Advance the buffer.
+ drop(handler.buf.drain(..cnt));
+ }
+ }
+ }
+ }
+}
+
+/// Returns the sender and the receiver
+fn connected_sockets() -> (UdpSocket, UdpSocket) {
+ let tx = UdpSocket::bind(any_local_address()).unwrap();
+ let rx = UdpSocket::bind(any_local_address()).unwrap();
+
+ let tx_addr = tx.local_addr().unwrap();
+ let rx_addr = rx.local_addr().unwrap();
+
+ assert!(tx.connect(rx_addr).is_ok());
+ assert!(rx.connect(tx_addr).is_ok());
+
+ (tx, rx)
+}
+
+#[test]
+pub fn udp_socket() {
+ init();
+
+ let tx = UdpSocket::bind(any_local_address()).unwrap();
+ let rx = UdpSocket::bind(any_local_address()).unwrap();
+
+ send_recv_udp(tx, rx, false);
+}
+
+#[test]
+pub fn udp_socket_send_recv() {
+ init();
+
+ let (tx, rx) = connected_sockets();
+
+ send_recv_udp(tx, rx, true);
+}
+
+#[test]
+pub fn udp_socket_discard() {
+ init();
+
+ let mut tx = UdpSocket::bind(any_local_address()).unwrap();
+ let mut rx = UdpSocket::bind(any_local_address()).unwrap();
+ let udp_outside = UdpSocket::bind(any_local_address()).unwrap();
+
+ let tx_addr = tx.local_addr().unwrap();
+ let rx_addr = rx.local_addr().unwrap();
+
+ assert!(tx.connect(rx_addr).is_ok());
+ assert!(udp_outside.connect(rx_addr).is_ok());
+ assert!(rx.connect(tx_addr).is_ok());
+
+ let mut poll = Poll::new().unwrap();
+
+ checked_write!(udp_outside.send(b"hello world"));
+
+ poll.registry()
+ .register(&mut rx, LISTENER, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut tx, SENDER, Interest::WRITABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+
+ poll.poll(&mut events, Some(Duration::from_secs(5)))
+ .unwrap();
+
+ for event in &events {
+ if event.is_readable() {
+ if let LISTENER = event.token() {
+ panic!("Expected to no receive a packet but got something")
+ }
+ }
+ }
+}
+
+pub struct UdpHandler {
+ tx: UdpSocket,
+ rx: UdpSocket,
+ msg: &'static str,
+ buf: Vec<u8>,
+ rx_buf: Vec<u8>,
+ localhost: IpAddr,
+ shutdown: bool,
+}
+
+impl UdpHandler {
+ fn new(tx: UdpSocket, rx: UdpSocket, msg: &'static str) -> UdpHandler {
+ let sock = UdpSocket::bind(any_local_address()).unwrap();
+ UdpHandler {
+ tx,
+ rx,
+ msg,
+ buf: msg.as_bytes().to_vec(),
+ rx_buf: Vec::with_capacity(1024),
+ localhost: sock.local_addr().unwrap().ip(),
+ shutdown: false,
+ }
+ }
+
+ fn handle_read(&mut self, _: &Registry, token: Token) {
+ if let LISTENER = token {
+ debug!("We are receiving a datagram now...");
+ unsafe { self.rx_buf.set_len(self.rx_buf.capacity()) };
+ match self.rx.recv_from(&mut self.rx_buf) {
+ Ok((cnt, addr)) => {
+ unsafe { self.rx_buf.set_len(cnt) };
+ assert_eq!(addr.ip(), self.localhost);
+ }
+ res => panic!("unexpected result: {:?}", res),
+ }
+ assert_eq!(str::from_utf8(&self.rx_buf).unwrap(), self.msg);
+ self.shutdown = true;
+ }
+ }
+
+ fn handle_write(&mut self, _: &Registry, token: Token) {
+ if let SENDER = token {
+ let addr = self.rx.local_addr().unwrap();
+ let cnt = self.tx.send_to(self.buf.as_ref(), addr).unwrap();
+ self.buf.drain(..cnt);
+ }
+ }
+}
+
+// TODO: This doesn't pass on android 64bit CI...
+// Figure out why!
+#[cfg_attr(
+ target_os = "android",
+ ignore = "Multicast doesn't work on Android 64bit"
+)]
+#[test]
+pub fn multicast() {
+ init();
+
+ debug!("Starting TEST_UDP_CONNECTIONLESS");
+ let mut poll = Poll::new().unwrap();
+
+ let mut tx = UdpSocket::bind(any_local_address()).unwrap();
+ let mut rx = UdpSocket::bind(any_local_address()).unwrap();
+
+ info!("Joining group 227.1.1.100");
+ let any = &"0.0.0.0".parse().unwrap();
+ rx.join_multicast_v4(&"227.1.1.100".parse().unwrap(), any)
+ .unwrap();
+
+ info!("Joining group 227.1.1.101");
+ rx.join_multicast_v4(&"227.1.1.101".parse().unwrap(), any)
+ .unwrap();
+
+ info!("Registering SENDER");
+ poll.registry()
+ .register(&mut tx, SENDER, Interest::WRITABLE)
+ .unwrap();
+
+ info!("Registering LISTENER");
+ poll.registry()
+ .register(&mut rx, LISTENER, Interest::READABLE)
+ .unwrap();
+
+ let mut events = Events::with_capacity(1024);
+
+ let mut handler = UdpHandler::new(tx, rx, "hello world");
+
+ info!("Starting event loop to test with...");
+
+ while !handler.shutdown {
+ poll.poll(&mut events, None).unwrap();
+
+ for event in &events {
+ if event.is_readable() {
+ handler.handle_read(poll.registry(), event.token());
+ }
+
+ if event.is_writable() {
+ handler.handle_write(poll.registry(), event.token());
+ }
+ }
+ }
+}
+
+#[test]
+fn et_behavior_recv() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket2 = UdpSocket::bind(any_local_address()).unwrap();
+
+ let address2 = socket2.local_addr().unwrap();
+
+ poll.registry()
+ .register(&mut socket1, ID1, Interest::WRITABLE)
+ .expect("unable to register UDP socket");
+ poll.registry()
+ .register(
+ &mut socket2,
+ ID2,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .expect("unable to register UDP socket");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::WRITABLE),
+ ExpectEvent::new(ID2, Interest::WRITABLE),
+ ],
+ );
+
+ socket1.connect(address2).unwrap();
+
+ let mut buf = [0; 20];
+ checked_write!(socket1.send(DATA1));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ expect_read!(socket2.recv(&mut buf), DATA1);
+
+ // this will reregister the socket2, resetting the interests
+ assert_would_block(socket2.recv(&mut buf));
+ checked_write!(socket1.send(DATA1));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ let mut buf = [0; 20];
+ expect_read!(socket2.recv(&mut buf), DATA1);
+}
+
+#[test]
+fn et_behavior_recv_from() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let mut socket1 = UdpSocket::bind(any_local_address()).unwrap();
+ let mut socket2 = UdpSocket::bind(any_local_address()).unwrap();
+
+ let address1 = socket1.local_addr().unwrap();
+ let address2 = socket2.local_addr().unwrap();
+
+ poll.registry()
+ .register(
+ &mut socket1,
+ ID1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .expect("unable to register UDP socket");
+ poll.registry()
+ .register(
+ &mut socket2,
+ ID2,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .expect("unable to register UDP socket");
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(ID1, Interest::WRITABLE),
+ ExpectEvent::new(ID2, Interest::WRITABLE),
+ ],
+ );
+
+ checked_write!(socket1.send_to(DATA1, address2));
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ let mut buf = [0; 20];
+ expect_read!(socket2.recv_from(&mut buf), DATA1, address1);
+
+ // this will reregister the socket2, resetting the interests
+ assert_would_block(socket2.recv_from(&mut buf));
+ checked_write!(socket1.send_to(DATA1, address2));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(ID2, Interest::READABLE)],
+ );
+
+ expect_read!(socket2.recv_from(&mut buf), DATA1, address1);
+
+ assert!(socket1.take_error().unwrap().is_none());
+ assert!(socket2.take_error().unwrap().is_none());
+}
--- /dev/null
+#![cfg(all(unix, feature = "os-poll", feature = "net"))]
+
+use mio::net::UnixDatagram;
+use mio::{Interest, Token};
+use std::io;
+use std::net::Shutdown;
+use std::os::unix::net;
+
+#[macro_use]
+mod util;
+use util::{
+ assert_send, assert_socket_close_on_exec, assert_socket_non_blocking, assert_sync,
+ assert_would_block, expect_events, expect_no_events, init, init_with_poll, temp_file,
+ ExpectEvent, Readiness,
+};
+
+const DATA1: &[u8] = b"Hello same host!";
+const DATA2: &[u8] = b"Why hello mio!";
+const DEFAULT_BUF_SIZE: usize = 64;
+const TOKEN_1: Token = Token(0);
+const TOKEN_2: Token = Token(1);
+
+#[test]
+fn is_send_and_sync() {
+ assert_send::<UnixDatagram>();
+ assert_sync::<UnixDatagram>();
+}
+
+#[test]
+fn unix_datagram_smoke_unconnected() {
+ init();
+ let path1 = temp_file("unix_datagram_smoke_unconnected1");
+ let path2 = temp_file("unix_datagram_smoke_unconnected2");
+
+ let datagram1 = UnixDatagram::bind(&path1).unwrap();
+ let datagram2 = UnixDatagram::bind(&path2).unwrap();
+ smoke_test_unconnected(datagram1, datagram2);
+}
+
+#[test]
+fn unix_datagram_smoke_connected() {
+ init();
+ let path1 = temp_file("unix_datagram_smoke_connected1");
+ let path2 = temp_file("unix_datagram_smoke_connected2");
+
+ let datagram1 = UnixDatagram::bind(&path1).unwrap();
+ let datagram2 = UnixDatagram::bind(&path2).unwrap();
+
+ datagram1.connect(&path2).unwrap();
+ datagram2.connect(&path1).unwrap();
+ smoke_test_connected(datagram1, datagram2);
+}
+
+#[test]
+fn unix_datagram_smoke_unconnected_from_std() {
+ init();
+ let path1 = temp_file("unix_datagram_smoke_unconnected_from_std1");
+ let path2 = temp_file("unix_datagram_smoke_unconnected_from_std2");
+
+ let datagram1 = net::UnixDatagram::bind(&path1).unwrap();
+ let datagram2 = net::UnixDatagram::bind(&path2).unwrap();
+
+ datagram1.set_nonblocking(true).unwrap();
+ datagram2.set_nonblocking(true).unwrap();
+
+ let datagram1 = UnixDatagram::from_std(datagram1);
+ let datagram2 = UnixDatagram::from_std(datagram2);
+ smoke_test_unconnected(datagram1, datagram2);
+}
+
+#[test]
+fn unix_datagram_smoke_connected_from_std() {
+ init();
+ let path1 = temp_file("unix_datagram_smoke_connected_from_std1");
+ let path2 = temp_file("unix_datagram_smoke_connected_from_std2");
+
+ let datagram1 = net::UnixDatagram::bind(&path1).unwrap();
+ let datagram2 = net::UnixDatagram::bind(&path2).unwrap();
+
+ datagram1.connect(&path2).unwrap();
+ datagram2.connect(&path1).unwrap();
+
+ datagram1.set_nonblocking(true).unwrap();
+ datagram2.set_nonblocking(true).unwrap();
+
+ let datagram1 = UnixDatagram::from_std(datagram1);
+ let datagram2 = UnixDatagram::from_std(datagram2);
+ smoke_test_connected(datagram1, datagram2);
+}
+
+#[test]
+fn unix_datagram_connect() {
+ init();
+ let path1 = temp_file("unix_datagram_connect1");
+ let path2 = temp_file("unix_datagram_connect2");
+
+ let datagram1 = UnixDatagram::bind(&path1).unwrap();
+ let datagram1_local = datagram1.local_addr().unwrap();
+ let datagram2 = UnixDatagram::bind(&path2).unwrap();
+ let datagram2_local = datagram2.local_addr().unwrap();
+
+ datagram1
+ .connect(
+ datagram1_local
+ .as_pathname()
+ .expect("failed to get pathname"),
+ )
+ .unwrap();
+ datagram2
+ .connect(
+ datagram2_local
+ .as_pathname()
+ .expect("failed to get pathname"),
+ )
+ .unwrap();
+}
+
+#[test]
+fn unix_datagram_pair() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (mut datagram1, mut datagram2) = UnixDatagram::pair().unwrap();
+ poll.registry()
+ .register(
+ &mut datagram1,
+ TOKEN_1,
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+ poll.registry()
+ .register(
+ &mut datagram2,
+ TOKEN_2,
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(TOKEN_1, Interest::WRITABLE),
+ ExpectEvent::new(TOKEN_2, Interest::WRITABLE),
+ ],
+ );
+
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ assert_would_block(datagram1.recv(&mut buf));
+ assert_would_block(datagram2.recv(&mut buf));
+
+ checked_write!(datagram1.send(DATA1));
+ checked_write!(datagram2.send(DATA2));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(TOKEN_1, Interest::READABLE),
+ ExpectEvent::new(TOKEN_2, Interest::READABLE),
+ ],
+ );
+
+ expect_read!(datagram1.recv(&mut buf), DATA2);
+ expect_read!(datagram2.recv(&mut buf), DATA1);
+
+ assert!(datagram1.take_error().unwrap().is_none());
+ assert!(datagram2.take_error().unwrap().is_none());
+}
+
+#[test]
+fn unix_datagram_shutdown() {
+ let (mut poll, mut events) = init_with_poll();
+ let path1 = temp_file("unix_datagram_shutdown1");
+ let path2 = temp_file("unix_datagram_shutdown2");
+
+ let mut datagram1 = UnixDatagram::bind(&path1).unwrap();
+ let mut datagram2 = UnixDatagram::bind(&path2).unwrap();
+
+ poll.registry()
+ .register(
+ &mut datagram1,
+ TOKEN_1,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+ poll.registry()
+ .register(
+ &mut datagram2,
+ TOKEN_2,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+
+ datagram1.connect(&path2).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ checked_write!(datagram1.send(DATA1));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_2, Interest::READABLE)],
+ );
+
+ datagram1.shutdown(Shutdown::Read).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Readiness::READ_CLOSED)],
+ );
+
+ datagram1.shutdown(Shutdown::Write).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Readiness::WRITE_CLOSED)],
+ );
+
+ let err = datagram1.send(DATA2).unwrap_err();
+ assert_eq!(err.kind(), io::ErrorKind::BrokenPipe);
+
+ assert!(datagram1.take_error().unwrap().is_none());
+}
+
+#[test]
+fn unix_datagram_register() {
+ let (mut poll, mut events) = init_with_poll();
+ let path = temp_file("unix_datagram_register");
+
+ let mut datagram = UnixDatagram::bind(path).unwrap();
+ poll.registry()
+ .register(&mut datagram, TOKEN_1, Interest::READABLE)
+ .unwrap();
+ expect_no_events(&mut poll, &mut events);
+}
+
+#[test]
+fn unix_datagram_reregister() {
+ let (mut poll, mut events) = init_with_poll();
+ let path1 = temp_file("unix_datagram_reregister1");
+ let path2 = temp_file("unix_datagram_reregister2");
+
+ let mut datagram1 = UnixDatagram::bind(&path1).unwrap();
+ poll.registry()
+ .register(&mut datagram1, TOKEN_1, Interest::READABLE)
+ .unwrap();
+
+ let datagram2 = UnixDatagram::bind(&path2).unwrap();
+ datagram2.connect(&path1).unwrap();
+ poll.registry()
+ .reregister(&mut datagram1, TOKEN_1, Interest::WRITABLE)
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+}
+
+#[test]
+fn unix_datagram_deregister() {
+ let (mut poll, mut events) = init_with_poll();
+ let path1 = temp_file("unix_datagram_deregister1");
+ let path2 = temp_file("unix_datagram_deregister2");
+
+ let mut datagram1 = UnixDatagram::bind(&path1).unwrap();
+ poll.registry()
+ .register(&mut datagram1, TOKEN_1, Interest::WRITABLE)
+ .unwrap();
+
+ let datagram2 = UnixDatagram::bind(&path2).unwrap();
+ datagram2.connect(&path1).unwrap();
+ poll.registry().deregister(&mut datagram1).unwrap();
+ expect_no_events(&mut poll, &mut events);
+}
+
+fn smoke_test_unconnected(mut datagram1: UnixDatagram, mut datagram2: UnixDatagram) {
+ let (mut poll, mut events) = init_with_poll();
+
+ assert_socket_non_blocking(&datagram1);
+ assert_socket_close_on_exec(&datagram1);
+ assert_socket_non_blocking(&datagram2);
+ assert_socket_close_on_exec(&datagram2);
+
+ let addr1 = datagram1.local_addr().unwrap();
+ let addr2 = datagram2.local_addr().unwrap();
+ let path1 = addr1.as_pathname().expect("failed to get pathname");
+ let path2 = addr2.as_pathname().expect("failed to get pathname");
+
+ poll.registry()
+ .register(
+ &mut datagram1,
+ TOKEN_1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+ poll.registry()
+ .register(
+ &mut datagram2,
+ TOKEN_2,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(TOKEN_1, Interest::WRITABLE),
+ ExpectEvent::new(TOKEN_2, Interest::WRITABLE),
+ ],
+ );
+
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ assert_would_block(datagram1.recv_from(&mut buf));
+ assert_would_block(datagram2.recv_from(&mut buf));
+
+ checked_write!(datagram1.send_to(DATA1, path2));
+ checked_write!(datagram2.send_to(DATA2, path1));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(TOKEN_1, Interest::READABLE),
+ ExpectEvent::new(TOKEN_2, Interest::READABLE),
+ ],
+ );
+
+ expect_read!(datagram1.recv_from(&mut buf), DATA2, path: path2);
+ expect_read!(datagram2.recv_from(&mut buf), DATA1, path: path1);
+
+ assert!(datagram1.take_error().unwrap().is_none());
+ assert!(datagram2.take_error().unwrap().is_none());
+}
+
+fn smoke_test_connected(mut datagram1: UnixDatagram, mut datagram2: UnixDatagram) {
+ let (mut poll, mut events) = init_with_poll();
+
+ assert_socket_non_blocking(&datagram1);
+ assert_socket_close_on_exec(&datagram1);
+ assert_socket_non_blocking(&datagram2);
+ assert_socket_close_on_exec(&datagram2);
+
+ let local_addr1 = datagram1.local_addr().unwrap();
+ let peer_addr1 = datagram1.peer_addr().unwrap();
+ let local_addr2 = datagram2.local_addr().unwrap();
+ let peer_addr2 = datagram2.peer_addr().unwrap();
+ assert_eq!(
+ local_addr1.as_pathname().expect("failed to get pathname"),
+ peer_addr2.as_pathname().expect("failed to get pathname")
+ );
+ assert_eq!(
+ local_addr2.as_pathname().expect("failed to get pathname"),
+ peer_addr1.as_pathname().expect("failed to get pathname")
+ );
+
+ poll.registry()
+ .register(
+ &mut datagram1,
+ TOKEN_1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+ poll.registry()
+ .register(
+ &mut datagram2,
+ TOKEN_2,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(TOKEN_1, Interest::WRITABLE),
+ ExpectEvent::new(TOKEN_2, Interest::WRITABLE),
+ ],
+ );
+
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ assert_would_block(datagram1.recv(&mut buf));
+ assert_would_block(datagram2.recv(&mut buf));
+
+ checked_write!(datagram1.send(DATA1));
+ checked_write!(datagram2.send(DATA2));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![
+ ExpectEvent::new(TOKEN_1, Interest::READABLE),
+ ExpectEvent::new(TOKEN_2, Interest::READABLE),
+ ],
+ );
+
+ expect_read!(datagram1.recv(&mut buf), DATA2);
+ expect_read!(datagram2.recv(&mut buf), DATA1);
+
+ assert!(datagram1.take_error().unwrap().is_none());
+ assert!(datagram2.take_error().unwrap().is_none());
+}
--- /dev/null
+#![cfg(all(unix, feature = "os-poll", feature = "net"))]
+
+use mio::net::UnixListener;
+use mio::{Interest, Token};
+use std::io::{self, Read};
+use std::os::unix::net;
+use std::path::{Path, PathBuf};
+use std::sync::{Arc, Barrier};
+use std::thread;
+
+#[macro_use]
+mod util;
+use util::{
+ assert_send, assert_socket_close_on_exec, assert_socket_non_blocking, assert_sync,
+ assert_would_block, expect_events, expect_no_events, init_with_poll, temp_file, ExpectEvent,
+};
+
+const DEFAULT_BUF_SIZE: usize = 64;
+const TOKEN_1: Token = Token(0);
+
+#[test]
+fn unix_listener_send_and_sync() {
+ assert_send::<UnixListener>();
+ assert_sync::<UnixListener>();
+}
+
+#[test]
+fn unix_listener_smoke() {
+ #[allow(clippy::redundant_closure)]
+ smoke_test(|path| UnixListener::bind(path), "unix_listener_smoke");
+}
+
+#[test]
+fn unix_listener_from_std() {
+ smoke_test(
+ |path| {
+ let listener = net::UnixListener::bind(path).unwrap();
+ // `std::os::unix::net::UnixStream`s are blocking by default, so make sure
+ // it is in non-blocking mode before wrapping in a Mio equivalent.
+ listener.set_nonblocking(true).unwrap();
+ Ok(UnixListener::from_std(listener))
+ },
+ "unix_listener_from_std",
+ )
+}
+
+#[test]
+fn unix_listener_local_addr() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+
+ let path = temp_file("unix_listener_local_addr");
+ let mut listener = UnixListener::bind(&path).unwrap();
+ poll.registry()
+ .register(
+ &mut listener,
+ TOKEN_1,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+
+ let handle = open_connections(path.clone(), 1, barrier.clone());
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ let (stream, expected_addr) = listener.accept().unwrap();
+ assert_eq!(stream.local_addr().unwrap().as_pathname().unwrap(), &path);
+ assert!(expected_addr.as_pathname().is_none());
+
+ barrier.wait();
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_listener_register() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let path = temp_file("unix_listener_register");
+ let mut listener = UnixListener::bind(path).unwrap();
+ poll.registry()
+ .register(&mut listener, TOKEN_1, Interest::READABLE)
+ .unwrap();
+ expect_no_events(&mut poll, &mut events)
+}
+
+#[test]
+fn unix_listener_reregister() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+
+ let path = temp_file("unix_listener_reregister");
+ let mut listener = UnixListener::bind(&path).unwrap();
+ poll.registry()
+ .register(&mut listener, TOKEN_1, Interest::WRITABLE)
+ .unwrap();
+
+ let handle = open_connections(path, 1, barrier.clone());
+ expect_no_events(&mut poll, &mut events);
+
+ poll.registry()
+ .reregister(&mut listener, TOKEN_1, Interest::READABLE)
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ barrier.wait();
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_listener_deregister() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+
+ let path = temp_file("unix_listener_deregister");
+ let mut listener = UnixListener::bind(&path).unwrap();
+ poll.registry()
+ .register(&mut listener, TOKEN_1, Interest::READABLE)
+ .unwrap();
+
+ let handle = open_connections(path, 1, barrier.clone());
+
+ poll.registry().deregister(&mut listener).unwrap();
+ expect_no_events(&mut poll, &mut events);
+
+ barrier.wait();
+ handle.join().unwrap();
+}
+
+#[cfg(target_os = "linux")]
+#[test]
+fn unix_listener_abstract_namesapce() {
+ use rand::Rng;
+ let num: u64 = rand::thread_rng().gen();
+ let name = format!("\u{0000}-mio-abstract-uds-{}", num);
+ let listener = UnixListener::bind(&name).unwrap();
+ assert_eq!(
+ listener.local_addr().unwrap().as_abstract_namespace(),
+ Some(&name.as_bytes()[1..]),
+ );
+}
+
+fn smoke_test<F>(new_listener: F, test_name: &'static str)
+where
+ F: FnOnce(&Path) -> io::Result<UnixListener>,
+{
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+ let path = temp_file(test_name);
+
+ let mut listener = new_listener(&path).unwrap();
+
+ assert_socket_non_blocking(&listener);
+ assert_socket_close_on_exec(&listener);
+
+ poll.registry()
+ .register(
+ &mut listener,
+ TOKEN_1,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+ expect_no_events(&mut poll, &mut events);
+
+ let handle = open_connections(path, 1, barrier.clone());
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ let (mut stream, _) = listener.accept().unwrap();
+
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ assert_would_block(stream.read(&mut buf));
+
+ assert_would_block(listener.accept());
+ assert!(listener.take_error().unwrap().is_none());
+
+ barrier.wait();
+ handle.join().unwrap();
+}
+
+fn open_connections(
+ path: PathBuf,
+ n_connections: usize,
+ barrier: Arc<Barrier>,
+) -> thread::JoinHandle<()> {
+ thread::spawn(move || {
+ for _ in 0..n_connections {
+ let conn = net::UnixStream::connect(path.clone()).unwrap();
+ barrier.wait();
+ drop(conn);
+ }
+ })
+}
--- /dev/null
+#![cfg(all(unix, feature = "os-poll", feature = "os-ext", feature = "net"))]
+
+use std::io::{Read, Write};
+use std::process::{Command, Stdio};
+use std::sync::{Arc, Barrier};
+use std::thread;
+use std::time::Duration;
+
+use mio::unix::pipe::{self, Receiver, Sender};
+use mio::{Events, Interest, Poll, Token};
+
+mod util;
+use util::{assert_would_block, expect_events, ExpectEvent};
+
+const RECEIVER: Token = Token(0);
+const SENDER: Token = Token(1);
+
+const DATA1: &[u8; 11] = b"Hello world";
+
+#[test]
+fn smoke() {
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(8);
+
+ let (mut sender, mut receiver) = pipe::new().unwrap();
+
+ let mut buf = [0; 20];
+ assert_would_block(receiver.read(&mut buf));
+
+ poll.registry()
+ .register(&mut receiver, RECEIVER, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut sender, SENDER, Interest::WRITABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(SENDER, Interest::WRITABLE)],
+ );
+ let n = sender.write(DATA1).unwrap();
+ assert_eq!(n, DATA1.len());
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(RECEIVER, Interest::READABLE)],
+ );
+ let n = receiver.read(&mut buf).unwrap();
+ assert_eq!(n, DATA1.len());
+ assert_eq!(&buf[..n], &*DATA1);
+}
+
+#[test]
+fn event_when_sender_is_dropped() {
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(8);
+
+ let (mut sender, mut receiver) = pipe::new().unwrap();
+ poll.registry()
+ .register(&mut receiver, RECEIVER, Interest::READABLE)
+ .unwrap();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let thread_barrier = barrier.clone();
+
+ let handle = thread::spawn(move || {
+ let n = sender.write(DATA1).unwrap();
+ assert_eq!(n, DATA1.len());
+ thread_barrier.wait();
+
+ thread_barrier.wait();
+ drop(sender);
+ thread_barrier.wait();
+ });
+
+ barrier.wait(); // Wait for the write to complete.
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(RECEIVER, Interest::READABLE)],
+ );
+
+ barrier.wait(); // Unblock the thread.
+ barrier.wait(); // Wait until the sending end is dropped.
+
+ expect_one_closed_event(&mut poll, &mut events, RECEIVER, true);
+
+ handle.join().unwrap();
+}
+
+#[test]
+fn event_when_receiver_is_dropped() {
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(8);
+
+ let (mut sender, receiver) = pipe::new().unwrap();
+ poll.registry()
+ .register(&mut sender, SENDER, Interest::WRITABLE)
+ .unwrap();
+
+ let barrier = Arc::new(Barrier::new(2));
+ let thread_barrier = barrier.clone();
+
+ let handle = thread::spawn(move || {
+ thread_barrier.wait();
+ drop(receiver);
+ thread_barrier.wait();
+ });
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(SENDER, Interest::WRITABLE)],
+ );
+
+ barrier.wait(); // Unblock the thread.
+ barrier.wait(); // Wait until the receiving end is dropped.
+
+ expect_one_closed_event(&mut poll, &mut events, SENDER, false);
+
+ handle.join().unwrap();
+}
+
+#[test]
+fn from_child_process_io() {
+ // `cat` simply echo everything that we write via standard in.
+ let mut child = Command::new("cat")
+ .env_clear()
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()
+ .expect("failed to start `cat` command");
+
+ let mut sender = Sender::from(child.stdin.take().unwrap());
+ let mut receiver = Receiver::from(child.stdout.take().unwrap());
+
+ let mut poll = Poll::new().unwrap();
+ let mut events = Events::with_capacity(8);
+
+ poll.registry()
+ .register(&mut receiver, RECEIVER, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut sender, SENDER, Interest::WRITABLE)
+ .unwrap();
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(SENDER, Interest::WRITABLE)],
+ );
+ let n = sender.write(DATA1).unwrap();
+ assert_eq!(n, DATA1.len());
+
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(RECEIVER, Interest::READABLE)],
+ );
+ let mut buf = [0; 20];
+ let n = receiver.read(&mut buf).unwrap();
+ assert_eq!(n, DATA1.len());
+ assert_eq!(&buf[..n], &*DATA1);
+
+ drop(sender);
+
+ expect_one_closed_event(&mut poll, &mut events, RECEIVER, true);
+
+ child.wait().unwrap();
+}
+
+#[test]
+fn nonblocking_child_process_io() {
+ // `cat` simply echo everything that we write via standard in.
+ let mut child = Command::new("cat")
+ .env_clear()
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()
+ .expect("failed to start `cat` command");
+
+ let sender = Sender::from(child.stdin.take().unwrap());
+ let mut receiver = Receiver::from(child.stdout.take().unwrap());
+
+ receiver.set_nonblocking(true).unwrap();
+
+ let mut buf = [0; 20];
+ assert_would_block(receiver.read(&mut buf));
+
+ drop(sender);
+ child.wait().unwrap();
+}
+
+/// Expected a closed event. If `read` is true is checks for `is_read_closed`,
+/// otherwise for `is_write_closed`.
+pub fn expect_one_closed_event(poll: &mut Poll, events: &mut Events, token: Token, read: bool) {
+ poll.poll(events, Some(Duration::from_secs(1))).unwrap();
+ let mut iter = events.iter();
+ let event = iter.next().unwrap();
+ assert_eq!(event.token(), token, "invalid token, event: {:#?}", event);
+ if read {
+ assert!(
+ event.is_read_closed(),
+ "expected closed or error, event: {:#?}",
+ event
+ );
+ } else {
+ assert!(
+ event.is_write_closed(),
+ "expected closed or error, event: {:#?}",
+ event
+ );
+ }
+ assert!(iter.next().is_none());
+}
--- /dev/null
+#![cfg(all(unix, feature = "os-poll", feature = "net"))]
+
+use mio::net::UnixStream;
+use mio::{Interest, Token};
+use std::io::{self, IoSlice, IoSliceMut, Read, Write};
+use std::net::Shutdown;
+use std::os::unix::net;
+use std::path::Path;
+use std::sync::mpsc::channel;
+use std::sync::{Arc, Barrier};
+use std::thread;
+
+#[macro_use]
+mod util;
+use util::{
+ assert_send, assert_socket_close_on_exec, assert_socket_non_blocking, assert_sync,
+ assert_would_block, expect_events, expect_no_events, init, init_with_poll, temp_file,
+ ExpectEvent, Readiness,
+};
+
+const DATA1: &[u8] = b"Hello same host!";
+const DATA2: &[u8] = b"Why hello mio!";
+const DATA1_LEN: usize = 16;
+const DATA2_LEN: usize = 14;
+const DEFAULT_BUF_SIZE: usize = 64;
+const TOKEN_1: Token = Token(0);
+const TOKEN_2: Token = Token(1);
+
+#[test]
+fn unix_stream_send_and_sync() {
+ assert_send::<UnixStream>();
+ assert_sync::<UnixStream>();
+}
+
+#[test]
+fn unix_stream_smoke() {
+ #[allow(clippy::redundant_closure)]
+ smoke_test(|path| UnixStream::connect(path), "unix_stream_smoke");
+}
+
+#[test]
+fn unix_stream_connect() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+ let path = temp_file("unix_stream_connect");
+
+ let listener = net::UnixListener::bind(path.clone()).unwrap();
+ let mut stream = UnixStream::connect(path).unwrap();
+
+ let barrier_clone = barrier.clone();
+ let handle = thread::spawn(move || {
+ let (stream, _) = listener.accept().unwrap();
+ barrier_clone.wait();
+ drop(stream);
+ });
+
+ poll.registry()
+ .register(
+ &mut stream,
+ TOKEN_1,
+ Interest::READABLE | Interest::WRITABLE,
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ barrier.wait();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_from_std() {
+ smoke_test(
+ |path| {
+ let local = net::UnixStream::connect(path).unwrap();
+ // `std::os::unix::net::UnixStream`s are blocking by default, so make sure
+ // it is in non-blocking mode before wrapping in a Mio equivalent.
+ local.set_nonblocking(true).unwrap();
+ Ok(UnixStream::from_std(local))
+ },
+ "unix_stream_from_std",
+ )
+}
+
+#[test]
+fn unix_stream_pair() {
+ let (mut poll, mut events) = init_with_poll();
+
+ let (mut s1, mut s2) = UnixStream::pair().unwrap();
+ poll.registry()
+ .register(&mut s1, TOKEN_1, Interest::READABLE | Interest::WRITABLE)
+ .unwrap();
+ poll.registry()
+ .register(&mut s2, TOKEN_2, Interest::READABLE | Interest::WRITABLE)
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ assert_would_block(s1.read(&mut buf));
+
+ checked_write!(s1.write(DATA1));
+ s1.flush().unwrap();
+
+ expect_read!(s2.read(&mut buf), DATA1);
+ assert_would_block(s2.read(&mut buf));
+
+ checked_write!(s2.write(DATA2));
+ s2.flush().unwrap();
+
+ expect_read!(s1.read(&mut buf), DATA2);
+ assert_would_block(s2.read(&mut buf));
+}
+
+#[test]
+fn unix_stream_peer_addr() {
+ init();
+ let (handle, expected_addr) = new_echo_listener(1, "unix_stream_peer_addr");
+ let expected_path = expected_addr.as_pathname().expect("failed to get pathname");
+
+ let stream = UnixStream::connect(expected_path).unwrap();
+
+ assert_eq!(
+ stream.peer_addr().unwrap().as_pathname().unwrap(),
+ expected_path
+ );
+ assert!(stream.local_addr().unwrap().as_pathname().is_none());
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_shutdown_read() {
+ let (mut poll, mut events) = init_with_poll();
+ let (handle, remote_addr) = new_echo_listener(1, "unix_stream_shutdown_read");
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = UnixStream::connect(path).unwrap();
+ poll.registry()
+ .register(
+ &mut stream,
+ TOKEN_1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ checked_write!(stream.write(DATA1));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ stream.shutdown(Shutdown::Read).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Readiness::READ_CLOSED)],
+ );
+
+ // Shutting down the reading side is different on each platform. For example
+ // on Linux based systems we can still read.
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ {
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ expect_read!(stream.read(&mut buf), &[]);
+ }
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_shutdown_write() {
+ let (mut poll, mut events) = init_with_poll();
+ let (handle, remote_addr) = new_echo_listener(1, "unix_stream_shutdown_write");
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = UnixStream::connect(path).unwrap();
+ poll.registry()
+ .register(
+ &mut stream,
+ TOKEN_1,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ checked_write!(stream.write(DATA1));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ stream.shutdown(Shutdown::Write).unwrap();
+
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Readiness::WRITE_CLOSED)],
+ );
+
+ let err = stream.write(DATA2).unwrap_err();
+ assert_eq!(err.kind(), io::ErrorKind::BrokenPipe);
+
+ // Read should be ok
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ expect_read!(stream.read(&mut buf), DATA1);
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_shutdown_both() {
+ let (mut poll, mut events) = init_with_poll();
+ let (handle, remote_addr) = new_echo_listener(1, "unix_stream_shutdown_both");
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = UnixStream::connect(path).unwrap();
+ poll.registry()
+ .register(
+ &mut stream,
+ TOKEN_1,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ checked_write!(stream.write(DATA1));
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ stream.shutdown(Shutdown::Both).unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Readiness::WRITE_CLOSED)],
+ );
+
+ // Shutting down the reading side is different on each platform. For example
+ // on Linux based systems we can still read.
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ {
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ expect_read!(stream.read(&mut buf), &[]);
+ }
+
+ let err = stream.write(DATA2).unwrap_err();
+ #[cfg(unix)]
+ assert_eq!(err.kind(), io::ErrorKind::BrokenPipe);
+ #[cfg(window)]
+ assert_eq!(err.kind(), io::ErrorKind::ConnectionAbroted);
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_shutdown_listener_write() {
+ let (mut poll, mut events) = init_with_poll();
+ let barrier = Arc::new(Barrier::new(2));
+ let (handle, remote_addr) =
+ new_noop_listener(1, barrier.clone(), "unix_stream_shutdown_listener_write");
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = UnixStream::connect(path).unwrap();
+ poll.registry()
+ .register(
+ &mut stream,
+ TOKEN_1,
+ Interest::READABLE.add(Interest::WRITABLE),
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ barrier.wait();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Readiness::READ_CLOSED)],
+ );
+
+ barrier.wait();
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_register() {
+ let (mut poll, mut events) = init_with_poll();
+ let (handle, remote_addr) = new_echo_listener(1, "unix_stream_register");
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = UnixStream::connect(path).unwrap();
+ poll.registry()
+ .register(&mut stream, TOKEN_1, Interest::READABLE)
+ .unwrap();
+ expect_no_events(&mut poll, &mut events);
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_reregister() {
+ let (mut poll, mut events) = init_with_poll();
+ let (handle, remote_addr) = new_echo_listener(1, "unix_stream_reregister");
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = UnixStream::connect(path).unwrap();
+ poll.registry()
+ .register(&mut stream, TOKEN_1, Interest::READABLE)
+ .unwrap();
+ poll.registry()
+ .reregister(&mut stream, TOKEN_1, Interest::WRITABLE)
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+#[test]
+fn unix_stream_deregister() {
+ let (mut poll, mut events) = init_with_poll();
+ let (handle, remote_addr) = new_echo_listener(1, "unix_stream_deregister");
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = UnixStream::connect(path).unwrap();
+ poll.registry()
+ .register(&mut stream, TOKEN_1, Interest::WRITABLE)
+ .unwrap();
+ poll.registry().deregister(&mut stream).unwrap();
+ expect_no_events(&mut poll, &mut events);
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+fn smoke_test<F>(connect_stream: F, test_name: &'static str)
+where
+ F: FnOnce(&Path) -> io::Result<UnixStream>,
+{
+ let (mut poll, mut events) = init_with_poll();
+ let (handle, remote_addr) = new_echo_listener(1, test_name);
+ let path = remote_addr.as_pathname().expect("failed to get pathname");
+
+ let mut stream = connect_stream(path).unwrap();
+
+ assert_socket_non_blocking(&stream);
+ assert_socket_close_on_exec(&stream);
+
+ poll.registry()
+ .register(
+ &mut stream,
+ TOKEN_1,
+ Interest::WRITABLE.add(Interest::READABLE),
+ )
+ .unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::WRITABLE)],
+ );
+
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ assert_would_block(stream.read(&mut buf));
+
+ checked_write!(stream.write(DATA1));
+ stream.flush().unwrap();
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ expect_read!(stream.read(&mut buf), DATA1);
+
+ assert!(stream.take_error().unwrap().is_none());
+
+ let bufs = [IoSlice::new(DATA1), IoSlice::new(DATA2)];
+ let wrote = stream.write_vectored(&bufs).unwrap();
+ assert_eq!(wrote, DATA1_LEN + DATA2_LEN);
+ expect_events(
+ &mut poll,
+ &mut events,
+ vec![ExpectEvent::new(TOKEN_1, Interest::READABLE)],
+ );
+
+ let mut buf1 = [1; DATA1_LEN];
+ let mut buf2 = [2; DATA2_LEN + 1];
+ let mut bufs = [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)];
+ let read = stream.read_vectored(&mut bufs).unwrap();
+ assert_eq!(read, DATA1_LEN + DATA2_LEN);
+ assert_eq!(&buf1, DATA1);
+ assert_eq!(&buf2[..DATA2.len()], DATA2);
+
+ // Last byte should be unchanged
+ assert_eq!(buf2[DATA2.len()], 2);
+
+ // Close the connection to allow the remote to shutdown
+ drop(stream);
+ handle.join().unwrap();
+}
+
+fn new_echo_listener(
+ connections: usize,
+ test_name: &'static str,
+) -> (thread::JoinHandle<()>, net::SocketAddr) {
+ let (addr_sender, addr_receiver) = channel();
+ let handle = thread::spawn(move || {
+ let path = temp_file(test_name);
+ let listener = net::UnixListener::bind(path).unwrap();
+ let local_addr = listener.local_addr().unwrap();
+ addr_sender.send(local_addr).unwrap();
+
+ for _ in 0..connections {
+ let (mut stream, _) = listener.accept().unwrap();
+
+ // On Linux based system it will cause a connection reset
+ // error when the reading side of the peer connection is
+ // shutdown, we don't consider it an actual here.
+ let (mut read, mut written) = (0, 0);
+ let mut buf = [0; DEFAULT_BUF_SIZE];
+ loop {
+ let n = match stream.read(&mut buf) {
+ Ok(amount) => {
+ read += amount;
+ amount
+ }
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(ref err) if err.kind() == io::ErrorKind::ConnectionReset => break,
+ Err(err) => panic!("{}", err),
+ };
+ if n == 0 {
+ break;
+ }
+ match stream.write(&buf[..n]) {
+ Ok(amount) => written += amount,
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(ref err) if err.kind() == io::ErrorKind::BrokenPipe => break,
+ Err(err) => panic!("{}", err),
+ };
+ }
+ assert_eq!(read, written, "unequal reads and writes");
+ }
+ });
+ (handle, addr_receiver.recv().unwrap())
+}
+
+fn new_noop_listener(
+ connections: usize,
+ barrier: Arc<Barrier>,
+ test_name: &'static str,
+) -> (thread::JoinHandle<()>, net::SocketAddr) {
+ let (sender, receiver) = channel();
+ let handle = thread::spawn(move || {
+ let path = temp_file(test_name);
+ let listener = net::UnixListener::bind(path).unwrap();
+ let local_addr = listener.local_addr().unwrap();
+ sender.send(local_addr).unwrap();
+
+ for _ in 0..connections {
+ let (stream, _) = listener.accept().unwrap();
+ barrier.wait();
+ stream.shutdown(Shutdown::Write).unwrap();
+ barrier.wait();
+ drop(stream);
+ }
+ });
+ (handle, receiver.recv().unwrap())
+}
--- /dev/null
+// Not all functions are used by all tests.
+#![allow(dead_code, unused_macros)]
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use std::mem::size_of;
+use std::net::SocketAddr;
+use std::ops::BitOr;
+#[cfg(unix)]
+use std::os::unix::io::AsRawFd;
+use std::path::PathBuf;
+use std::sync::Once;
+use std::time::Duration;
+use std::{env, fmt, fs, io};
+
+use log::{error, warn};
+use mio::event::Event;
+use mio::net::TcpStream;
+use mio::{Events, Interest, Poll, Token};
+
+pub fn init() {
+ static INIT: Once = Once::new();
+
+ INIT.call_once(|| {
+ env_logger::try_init().expect("unable to initialise logger");
+
+ // Remove all temporary files from previous test runs.
+ let dir = temp_dir();
+ let _ = fs::remove_dir_all(&dir);
+ fs::create_dir_all(&dir).expect("unable to create temporary directory");
+ })
+}
+
+pub fn init_with_poll() -> (Poll, Events) {
+ init();
+
+ let poll = Poll::new().expect("unable to create Poll instance");
+ let events = Events::with_capacity(16);
+ (poll, events)
+}
+
+pub fn assert_sync<T: Sync>() {}
+pub fn assert_send<T: Send>() {}
+
+/// An event that is expected to show up when `Poll` is polled, see
+/// `expect_events`.
+#[derive(Debug)]
+pub struct ExpectEvent {
+ token: Token,
+ readiness: Readiness,
+}
+
+impl ExpectEvent {
+ pub fn new<R>(token: Token, readiness: R) -> ExpectEvent
+ where
+ R: Into<Readiness>,
+ {
+ ExpectEvent {
+ token,
+ readiness: readiness.into(),
+ }
+ }
+
+ fn matches(&self, event: &Event) -> bool {
+ event.token() == self.token && self.readiness.matches(event)
+ }
+}
+
+#[derive(Debug)]
+pub struct Readiness(usize);
+
+const READABLE: usize = 0b0000_0001;
+const WRITABLE: usize = 0b0000_0010;
+const AIO: usize = 0b0000_0100;
+const LIO: usize = 0b0000_1000;
+const ERROR: usize = 0b00010000;
+const READ_CLOSED: usize = 0b0010_0000;
+const WRITE_CLOSED: usize = 0b0100_0000;
+const PRIORITY: usize = 0b1000_0000;
+
+impl Readiness {
+ pub const READABLE: Readiness = Readiness(READABLE);
+ pub const WRITABLE: Readiness = Readiness(WRITABLE);
+ pub const AIO: Readiness = Readiness(AIO);
+ pub const LIO: Readiness = Readiness(LIO);
+ pub const ERROR: Readiness = Readiness(ERROR);
+ pub const READ_CLOSED: Readiness = Readiness(READ_CLOSED);
+ pub const WRITE_CLOSED: Readiness = Readiness(WRITE_CLOSED);
+ pub const PRIORITY: Readiness = Readiness(PRIORITY);
+
+ fn matches(&self, event: &Event) -> bool {
+ // If we expect a readiness then also match on the event.
+ // In maths terms that is p -> q, which is the same as !p || q.
+ (!self.is(READABLE) || event.is_readable())
+ && (!self.is(WRITABLE) || event.is_writable())
+ && (!self.is(AIO) || event.is_aio())
+ && (!self.is(LIO) || event.is_lio())
+ && (!self.is(ERROR) || event.is_error())
+ && (!self.is(READ_CLOSED) || event.is_read_closed())
+ && (!self.is(WRITE_CLOSED) || event.is_write_closed())
+ && (!self.is(PRIORITY) || event.is_priority())
+ }
+
+ /// Usage: `self.is(READABLE)`.
+ fn is(&self, value: usize) -> bool {
+ self.0 & value != 0
+ }
+}
+
+impl BitOr for Readiness {
+ type Output = Self;
+
+ fn bitor(self, other: Self) -> Self {
+ Readiness(self.0 | other.0)
+ }
+}
+
+impl From<Interest> for Readiness {
+ fn from(interests: Interest) -> Readiness {
+ let mut readiness = Readiness(0);
+ if interests.is_readable() {
+ readiness.0 |= READABLE;
+ }
+ if interests.is_writable() {
+ readiness.0 |= WRITABLE;
+ }
+ if interests.is_aio() {
+ readiness.0 |= AIO;
+ }
+ if interests.is_lio() {
+ readiness.0 |= LIO;
+ }
+ readiness
+ }
+}
+
+pub fn expect_events(poll: &mut Poll, events: &mut Events, mut expected: Vec<ExpectEvent>) {
+ // In a lot of calls we expect more then one event, but it could be that
+ // poll returns the first event only in a single call. To be a bit more
+ // lenient we'll poll a couple of times.
+ for _ in 0..3 {
+ poll.poll(events, Some(Duration::from_millis(500)))
+ .expect("unable to poll");
+
+ for event in events.iter() {
+ let index = expected.iter().position(|expected| expected.matches(event));
+
+ if let Some(index) = index {
+ expected.swap_remove(index);
+ } else {
+ // Must accept sporadic events.
+ warn!("got unexpected event: {:?}", event);
+ }
+ }
+
+ if expected.is_empty() {
+ return;
+ }
+ }
+
+ assert!(
+ expected.is_empty(),
+ "the following expected events were not found: {:?}",
+ expected
+ );
+}
+
+pub fn expect_no_events(poll: &mut Poll, events: &mut Events) {
+ poll.poll(events, Some(Duration::from_millis(50)))
+ .expect("unable to poll");
+ if !events.is_empty() {
+ for event in events.iter() {
+ error!("unexpected event: {:?}", event);
+ }
+ panic!("received events, but didn't expect any, see above");
+ }
+}
+
+/// Assert that `result` is an error and the formatted error (via
+/// `fmt::Display`) equals `expected_msg`.
+pub fn assert_error<T, E: fmt::Display>(result: Result<T, E>, expected_msg: &str) {
+ match result {
+ Ok(_) => panic!("unexpected OK result"),
+ Err(err) => assert!(
+ err.to_string().contains(expected_msg),
+ "wanted: {}, got: {}",
+ expected_msg,
+ err,
+ ),
+ }
+}
+
+/// Assert that the provided result is an `io::Error` with kind `WouldBlock`.
+pub fn assert_would_block<T>(result: io::Result<T>) {
+ match result {
+ Ok(_) => panic!("unexpected OK result, expected a `WouldBlock` error"),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {}
+ Err(err) => panic!("unexpected error result: {}", err),
+ }
+}
+
+/// Assert that `NONBLOCK` is set on `socket`.
+#[cfg(unix)]
+pub fn assert_socket_non_blocking<S>(socket: &S)
+where
+ S: AsRawFd,
+{
+ let flags = unsafe { libc::fcntl(socket.as_raw_fd(), libc::F_GETFL) };
+ assert!(flags & libc::O_NONBLOCK != 0, "socket not non-blocking");
+}
+
+#[cfg(windows)]
+pub fn assert_socket_non_blocking<S>(_: &S) {
+ // No way to get this information...
+}
+
+/// Assert that `CLOEXEC` is set on `socket`.
+#[cfg(unix)]
+pub fn assert_socket_close_on_exec<S>(socket: &S)
+where
+ S: AsRawFd,
+{
+ let flags = unsafe { libc::fcntl(socket.as_raw_fd(), libc::F_GETFD) };
+ assert!(flags & libc::FD_CLOEXEC != 0, "socket flag CLOEXEC not set");
+}
+
+#[cfg(windows)]
+pub fn assert_socket_close_on_exec<S>(_: &S) {
+ // Windows doesn't have this concept.
+}
+
+/// Bind to any port on localhost.
+pub fn any_local_address() -> SocketAddr {
+ "127.0.0.1:0".parse().unwrap()
+}
+
+/// Bind to any port on localhost, using a IPv6 address.
+pub fn any_local_ipv6_address() -> SocketAddr {
+ "[::1]:0".parse().unwrap()
+}
+
+#[cfg(unix)]
+pub fn set_linger_zero(socket: &TcpStream) {
+ let val = libc::linger {
+ l_onoff: 1,
+ l_linger: 0,
+ };
+ let res = unsafe {
+ libc::setsockopt(
+ socket.as_raw_fd(),
+ libc::SOL_SOCKET,
+ #[cfg(target_vendor = "apple")]
+ libc::SO_LINGER_SEC,
+ #[cfg(not(target_vendor = "apple"))]
+ libc::SO_LINGER,
+ &val as *const libc::linger as *const libc::c_void,
+ size_of::<libc::linger>() as libc::socklen_t,
+ )
+ };
+ assert_eq!(res, 0);
+}
+
+#[cfg(windows)]
+pub fn set_linger_zero(socket: &TcpStream) {
+ use std::os::windows::io::AsRawSocket;
+ use winapi::um::winsock2::{linger, setsockopt, SOCKET_ERROR, SOL_SOCKET, SO_LINGER};
+
+ let val = linger {
+ l_onoff: 1,
+ l_linger: 0,
+ };
+
+ let res = unsafe {
+ setsockopt(
+ socket.as_raw_socket() as _,
+ SOL_SOCKET,
+ SO_LINGER,
+ &val as *const _ as *const _,
+ size_of::<linger>() as _,
+ )
+ };
+ assert!(
+ res != SOCKET_ERROR,
+ "error setting linger: {}",
+ io::Error::last_os_error()
+ );
+}
+
+/// Returns a path to a temporary file using `name` as filename.
+pub fn temp_file(name: &'static str) -> PathBuf {
+ let mut path = temp_dir();
+ path.push(name);
+ path
+}
+
+/// Returns the temporary directory for Mio test files.
+fn temp_dir() -> PathBuf {
+ let mut path = env::temp_dir();
+ path.push("mio_tests");
+ path
+}
+
+/// A checked {write, send, send_to} macro that ensures the entire buffer is
+/// written.
+///
+/// Usage: `checked_write!(stream.write(&DATA));`
+/// Also works for send(_to): `checked_write!(socket.send_to(DATA, address))`.
+macro_rules! checked_write {
+ ($socket: ident . $method: ident ( $data: expr $(, $arg: expr)* ) ) => {{
+ let data = $data;
+ let n = $socket.$method($data $(, $arg)*)
+ .expect("unable to write to socket");
+ assert_eq!(n, data.len(), "short write");
+ }};
+}
+
+/// A checked {read, recv, recv_from, peek, peek_from} macro that ensures the
+/// current buffer is read.
+///
+/// Usage: `expect_read!(stream.read(&mut buf), DATA);` reads into `buf` and
+/// compares it to `DATA`.
+/// Also works for recv(_from): `expect_read!(socket.recv_from(&mut buf), DATA, address)`.
+macro_rules! expect_read {
+ ($socket: ident . $method: ident ( $buf: expr $(, $arg: expr)* ), $expected: expr) => {{
+ let n = $socket.$method($buf $(, $arg)*)
+ .expect("unable to read from socket");
+ let expected = $expected;
+ assert_eq!(n, expected.len());
+ assert_eq!(&$buf[..n], expected);
+ }};
+ // TODO: change the call sites to check the source address.
+ // Support for recv_from and peek_from, without checking the address.
+ ($socket: ident . $method: ident ( $buf: expr $(, $arg: expr)* ), $expected: expr, __anywhere) => {{
+ let (n, _address) = $socket.$method($buf $(, $arg)*)
+ .expect("unable to read from socket");
+ let expected = $expected;
+ assert_eq!(n, expected.len());
+ assert_eq!(&$buf[..n], expected);
+ }};
+ // Support for recv_from and peek_from for `UnixDatagram`s.
+ ($socket: ident . $method: ident ( $buf: expr $(, $arg: expr)* ), $expected: expr, path: $source: expr) => {{
+ let (n, path) = $socket.$method($buf $(, $arg)*)
+ .expect("unable to read from socket");
+ let expected = $expected;
+ let source = $source;
+ assert_eq!(n, expected.len());
+ assert_eq!(&$buf[..n], expected);
+ assert_eq!(
+ path.as_pathname().expect("failed to get path name"),
+ source
+ );
+ }};
+ // Support for recv_from and peek_from for `UdpSocket`s.
+ ($socket: ident . $method: ident ( $buf: expr $(, $arg: expr)* ), $expected: expr, $source: expr) => {{
+ let (n, address) = $socket.$method($buf $(, $arg)*)
+ .expect("unable to read from socket");
+ let expected = $expected;
+ let source = $source;
+ assert_eq!(n, expected.len());
+ assert_eq!(&$buf[..n], expected);
+ assert_eq!(address, source);
+ }};
+}
--- /dev/null
+#![cfg(all(feature = "os-poll", feature = "net"))]
+
+use mio::{Events, Poll, Token, Waker};
+use std::sync::{Arc, Barrier};
+use std::thread;
+use std::time::Duration;
+
+mod util;
+use util::{assert_send, assert_sync, expect_no_events, init};
+
+#[test]
+fn is_send_and_sync() {
+ assert_send::<Waker>();
+ assert_sync::<Waker>();
+}
+
+#[test]
+fn waker() {
+ init();
+
+ let mut poll = Poll::new().expect("unable to create new Poll instance");
+ let mut events = Events::with_capacity(10);
+
+ let token = Token(10);
+ let waker = Waker::new(poll.registry(), token).expect("unable to create waker");
+
+ waker.wake().expect("unable to wake");
+ expect_waker_event(&mut poll, &mut events, token);
+}
+
+#[test]
+fn waker_multiple_wakeups_same_thread() {
+ init();
+
+ let mut poll = Poll::new().expect("unable to create new Poll instance");
+ let mut events = Events::with_capacity(10);
+
+ let token = Token(10);
+ let waker = Waker::new(poll.registry(), token).expect("unable to create waker");
+
+ for _ in 0..3 {
+ waker.wake().expect("unable to wake");
+ }
+ expect_waker_event(&mut poll, &mut events, token);
+}
+
+#[test]
+fn waker_wakeup_different_thread() {
+ init();
+
+ let mut poll = Poll::new().expect("unable to create new Poll instance");
+ let mut events = Events::with_capacity(10);
+
+ let token = Token(10);
+ let waker = Waker::new(poll.registry(), token).expect("unable to create waker");
+
+ let waker = Arc::new(waker);
+ let waker1 = Arc::clone(&waker);
+ let handle = thread::spawn(move || {
+ waker1.wake().expect("unable to wake");
+ });
+
+ expect_waker_event(&mut poll, &mut events, token);
+
+ expect_no_events(&mut poll, &mut events);
+
+ handle.join().unwrap();
+}
+
+#[test]
+fn waker_multiple_wakeups_different_thread() {
+ init();
+
+ let mut poll = Poll::new().expect("unable to create new Poll instance");
+ let mut events = Events::with_capacity(10);
+
+ let token = Token(10);
+ let waker = Waker::new(poll.registry(), token).expect("unable to create waker");
+ let waker = Arc::new(waker);
+ let waker1 = Arc::clone(&waker);
+ let waker2 = Arc::clone(&waker1);
+
+ let handle1 = thread::spawn(move || {
+ waker1.wake().expect("unable to wake");
+ });
+
+ let barrier = Arc::new(Barrier::new(2));
+ let barrier2 = barrier.clone();
+ let handle2 = thread::spawn(move || {
+ barrier2.wait();
+ waker2.wake().expect("unable to wake");
+ });
+
+ // Receive the event from thread 1.
+ expect_waker_event(&mut poll, &mut events, token);
+
+ // Unblock thread 2.
+ barrier.wait();
+
+ // Now we need to receive another event from thread 2.
+ expect_waker_event(&mut poll, &mut events, token);
+
+ expect_no_events(&mut poll, &mut events);
+
+ handle1.join().unwrap();
+ handle2.join().unwrap();
+}
+
+#[test]
+#[cfg_attr(
+ not(debug_assertions),
+ ignore = "only works with debug_assertions enabled"
+)]
+#[should_panic = "Only a single `Waker` can be active per `Poll` instance"]
+fn using_multiple_wakers_panics() {
+ init();
+
+ let poll = Poll::new().expect("unable to create new Poll instance");
+ let token1 = Token(10);
+ let token2 = Token(11);
+
+ let waker1 = Waker::new(poll.registry(), token1).expect("unable to first waker");
+ // This should panic.
+ let waker2 = Waker::new(poll.registry(), token2).unwrap();
+
+ drop(waker1);
+ drop(waker2);
+}
+
+fn expect_waker_event(poll: &mut Poll, events: &mut Events, token: Token) {
+ poll.poll(events, Some(Duration::from_millis(100))).unwrap();
+ assert!(!events.is_empty());
+ for event in events.iter() {
+ assert_eq!(event.token(), token);
+ assert!(event.is_readable());
+ }
+}
--- /dev/null
+#![cfg(all(windows, feature = "os-poll", feature = "os-ext"))]
+
+use std::fs::OpenOptions;
+use std::io::{self, Read, Write};
+use std::os::windows::fs::OpenOptionsExt;
+use std::os::windows::io::{FromRawHandle, IntoRawHandle};
+use std::time::Duration;
+
+use mio::windows::NamedPipe;
+use mio::{Events, Interest, Poll, Token};
+use rand::Rng;
+use winapi::shared::winerror::*;
+use winapi::um::winbase::FILE_FLAG_OVERLAPPED;
+
+fn _assert_kinds() {
+ fn _assert_send<T: Send>() {}
+ fn _assert_sync<T: Sync>() {}
+ _assert_send::<NamedPipe>();
+ _assert_sync::<NamedPipe>();
+}
+
+macro_rules! t {
+ ($e:expr) => {
+ match $e {
+ Ok(e) => e,
+ Err(e) => panic!("{} failed with {}", stringify!($e), e),
+ }
+ };
+}
+
+fn server() -> (NamedPipe, String) {
+ let num: u64 = rand::thread_rng().gen();
+ let name = format!(r"\\.\pipe\my-pipe-{}", num);
+ let pipe = t!(NamedPipe::new(&name));
+ (pipe, name)
+}
+
+fn client(name: &str) -> NamedPipe {
+ let mut opts = OpenOptions::new();
+ opts.read(true)
+ .write(true)
+ .custom_flags(FILE_FLAG_OVERLAPPED);
+ let file = t!(opts.open(name));
+ unsafe { NamedPipe::from_raw_handle(file.into_raw_handle()) }
+}
+
+fn pipe() -> (NamedPipe, NamedPipe) {
+ let (pipe, name) = server();
+ (pipe, client(&name))
+}
+
+#[test]
+fn writable_after_register() {
+ let (mut server, mut client) = pipe();
+ let mut poll = t!(Poll::new());
+ t!(poll.registry().register(
+ &mut server,
+ Token(0),
+ Interest::WRITABLE | Interest::READABLE,
+ ));
+ t!(poll
+ .registry()
+ .register(&mut client, Token(1), Interest::WRITABLE));
+
+ let mut events = Events::with_capacity(128);
+ t!(poll.poll(&mut events, None));
+
+ let events = events.iter().collect::<Vec<_>>();
+ assert!(events
+ .iter()
+ .any(|e| { e.token() == Token(0) && e.is_writable() }));
+ assert!(events
+ .iter()
+ .any(|e| { e.token() == Token(1) && e.is_writable() }));
+}
+
+#[test]
+fn write_then_read() {
+ let (mut server, mut client) = pipe();
+ let mut poll = t!(Poll::new());
+ t!(poll.registry().register(
+ &mut server,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+ t!(poll.registry().register(
+ &mut client,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+
+ let mut events = Events::with_capacity(128);
+ t!(poll.poll(&mut events, None));
+
+ assert_eq!(t!(client.write(b"1234")), 4);
+
+ loop {
+ t!(poll.poll(&mut events, None));
+ let events = events.iter().collect::<Vec<_>>();
+ if let Some(event) = events.iter().find(|e| e.token() == Token(0)) {
+ if event.is_readable() {
+ break;
+ }
+ }
+ }
+
+ let mut buf = [0; 10];
+ assert_eq!(t!(server.read(&mut buf)), 4);
+ assert_eq!(&buf[..4], b"1234");
+}
+
+#[test]
+fn connect_before_client() {
+ let (mut server, name) = server();
+ let mut poll = t!(Poll::new());
+ t!(poll.registry().register(
+ &mut server,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+
+ let mut events = Events::with_capacity(128);
+ t!(poll.poll(&mut events, Some(Duration::new(0, 0))));
+ assert_eq!(events.iter().count(), 0);
+ assert_eq!(
+ server.connect().err().unwrap().kind(),
+ io::ErrorKind::WouldBlock
+ );
+
+ let mut client = client(&name);
+ t!(poll.registry().register(
+ &mut client,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+ loop {
+ t!(poll.poll(&mut events, None));
+ let e = events.iter().collect::<Vec<_>>();
+ if let Some(event) = e.iter().find(|e| e.token() == Token(0)) {
+ if event.is_writable() {
+ break;
+ }
+ }
+ }
+}
+
+#[test]
+fn connect_after_client() {
+ let (mut server, name) = server();
+ let mut poll = t!(Poll::new());
+ t!(poll.registry().register(
+ &mut server,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+
+ let mut events = Events::with_capacity(128);
+ t!(poll.poll(&mut events, Some(Duration::new(0, 0))));
+ assert_eq!(events.iter().count(), 0);
+
+ let mut client = client(&name);
+ t!(poll.registry().register(
+ &mut client,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+ t!(server.connect());
+ loop {
+ t!(poll.poll(&mut events, None));
+ let e = events.iter().collect::<Vec<_>>();
+ if let Some(event) = e.iter().find(|e| e.token() == Token(0)) {
+ if event.is_writable() {
+ break;
+ }
+ }
+ }
+}
+
+#[test]
+fn write_disconnected() {
+ let mut poll = t!(Poll::new());
+ let (mut server, mut client) = pipe();
+ t!(poll.registry().register(
+ &mut server,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+ t!(poll.registry().register(
+ &mut client,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+
+ drop(client);
+
+ let mut events = Events::with_capacity(128);
+ t!(poll.poll(&mut events, None));
+ assert!(events.iter().count() > 0);
+
+ // this should not hang
+ let mut i = 0;
+ loop {
+ i += 1;
+ assert!(i < 16, "too many iterations");
+
+ match server.write(&[0]) {
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ t!(poll.poll(&mut events, None));
+ assert!(events.iter().count() > 0);
+ }
+ Err(e) if e.raw_os_error() == Some(ERROR_NO_DATA as i32) => break,
+ e => panic!("{:?}", e),
+ }
+ }
+}
+
+#[test]
+fn write_then_drop() {
+ let (mut server, mut client) = pipe();
+ let mut poll = t!(Poll::new());
+ t!(poll.registry().register(
+ &mut server,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+ t!(poll.registry().register(
+ &mut client,
+ Token(1),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+ assert_eq!(t!(client.write(b"1234")), 4);
+ drop(client);
+
+ let mut events = Events::with_capacity(128);
+
+ 'outer: loop {
+ t!(poll.poll(&mut events, None));
+ let events = events.iter().collect::<Vec<_>>();
+
+ for event in &events {
+ if event.is_readable() && event.token() == Token(0) {
+ break 'outer;
+ }
+ }
+ }
+
+ let mut buf = [0; 10];
+ assert_eq!(t!(server.read(&mut buf)), 4);
+ assert_eq!(&buf[..4], b"1234");
+}
+
+#[test]
+fn connect_twice() {
+ let (mut server, name) = server();
+ let mut c1 = client(&name);
+ let mut poll = t!(Poll::new());
+ t!(poll.registry().register(
+ &mut server,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ ));
+ t!(poll
+ .registry()
+ .register(&mut c1, Token(1), Interest::READABLE | Interest::WRITABLE,));
+ drop(c1);
+
+ let mut events = Events::with_capacity(128);
+
+ loop {
+ t!(poll.poll(&mut events, None));
+ let events = events.iter().collect::<Vec<_>>();
+ if let Some(event) = events.iter().find(|e| e.token() == Token(0)) {
+ if event.is_readable() {
+ let mut buf = [0; 10];
+
+ match server.read(&mut buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Ok(0) => break,
+ res => panic!("{:?}", res),
+ }
+ }
+ }
+ }
+
+ t!(server.disconnect());
+ assert_eq!(
+ server.connect().err().unwrap().kind(),
+ io::ErrorKind::WouldBlock
+ );
+
+ let mut c2 = client(&name);
+ t!(poll
+ .registry()
+ .register(&mut c2, Token(2), Interest::READABLE | Interest::WRITABLE,));
+
+ 'outer: loop {
+ t!(poll.poll(&mut events, None));
+ let events = events.iter().collect::<Vec<_>>();
+
+ for event in &events {
+ if event.is_writable() && event.token() == Token(0) {
+ break 'outer;
+ }
+ }
+ }
+}
+
+#[test]
+fn reregister_deregister_before_register() {
+ let (mut pipe, _) = server();
+ let poll = t!(Poll::new());
+
+ assert_eq!(
+ poll.registry()
+ .reregister(&mut pipe, Token(0), Interest::READABLE,)
+ .unwrap_err()
+ .kind(),
+ io::ErrorKind::NotFound,
+ );
+
+ assert_eq!(
+ poll.registry().deregister(&mut pipe).unwrap_err().kind(),
+ io::ErrorKind::NotFound,
+ );
+}
+
+#[test]
+fn reregister_deregister_different_poll() {
+ let (mut pipe, _) = server();
+ let poll1 = t!(Poll::new());
+ let poll2 = t!(Poll::new());
+
+ // Register with 1
+ t!(poll1
+ .registry()
+ .register(&mut pipe, Token(0), Interest::READABLE));
+
+ assert_eq!(
+ poll2
+ .registry()
+ .reregister(&mut pipe, Token(0), Interest::READABLE,)
+ .unwrap_err()
+ .kind(),
+ io::ErrorKind::AlreadyExists,
+ );
+
+ assert_eq!(
+ poll2.registry().deregister(&mut pipe).unwrap_err().kind(),
+ io::ErrorKind::AlreadyExists,
+ );
+}