io_uring: carve io_poll_check_events fast path
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 30 Nov 2022 15:21:52 +0000 (15:21 +0000)
committerJens Axboe <axboe@kernel.dk>
Wed, 30 Nov 2022 17:26:57 +0000 (10:26 -0700)
The fast path in io_poll_check_events() is when we have only one
(i.e. master) reference. Move all verification, cancellations
checks, edge case handling and so on under a common if.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8c21c5d5e027e32dc553705e88796dec79ff6f93.1669821213.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/poll.c

index 599ba28..8987e13 100644 (file)
@@ -247,27 +247,30 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
        do {
                v = atomic_read(&req->poll_refs);
 
-               /* tw handler should be the owner, and so have some references */
-               if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
-                       return IOU_POLL_DONE;
-               if (v & IO_POLL_CANCEL_FLAG)
-                       return -ECANCELED;
-               /*
-                * cqe.res contains only events of the first wake up
-                * and all others are be lost. Redo vfs_poll() to get
-                * up to date state.
-                */
-               if ((v & IO_POLL_REF_MASK) != 1)
-                       req->cqe.res = 0;
-               if (v & IO_POLL_RETRY_FLAG) {
-                       req->cqe.res = 0;
+               if (unlikely(v != 1)) {
+                       /* tw should be the owner and so have some refs */
+                       if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
+                               return IOU_POLL_DONE;
+                       if (v & IO_POLL_CANCEL_FLAG)
+                               return -ECANCELED;
                        /*
-                        * We won't find new events that came in between
-                        * vfs_poll and the ref put unless we clear the flag
-                        * in advance.
+                        * cqe.res contains only events of the first wake up
+                        * and all others are to be lost. Redo vfs_poll() to get
+                        * up to date state.
                         */
-                       atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
-                       v &= ~IO_POLL_RETRY_FLAG;
+                       if ((v & IO_POLL_REF_MASK) != 1)
+                               req->cqe.res = 0;
+
+                       if (v & IO_POLL_RETRY_FLAG) {
+                               req->cqe.res = 0;
+                               /*
+                                * We won't find new events that came in between
+                                * vfs_poll and the ref put unless we clear the
+                                * flag in advance.
+                                */
+                               atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
+                               v &= ~IO_POLL_RETRY_FLAG;
+                       }
                }
 
                /* the mask was stashed in __io_poll_execute */