io_uring: remove check_cq checking from hot paths
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 15 Jun 2022 16:33:55 +0000 (17:33 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Jul 2022 00:39:13 +0000 (18:39 -0600)
All ctx->check_cq events are slow path, don't test every single flag one
by one in the hot path, but add a common guarding if.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/dff026585cea7ff3a172a7c83894a3b0111bbf6a.1655310733.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index 8a8d8b3..a4c1746 100644 (file)
@@ -1259,24 +1259,25 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
        int ret = 0;
        unsigned long check_cq;
 
+       check_cq = READ_ONCE(ctx->check_cq);
+       if (unlikely(check_cq)) {
+               if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
+                       __io_cqring_overflow_flush(ctx, false);
+               /*
+                * Similarly do not spin if we have not informed the user of any
+                * dropped CQE.
+                */
+               if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+                       return -EBADR;
+       }
        /*
         * Don't enter poll loop if we already have events pending.
         * If we do, we can potentially be spinning for commands that
         * already triggered a CQE (eg in error).
         */
-       check_cq = READ_ONCE(ctx->check_cq);
-       if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
-               __io_cqring_overflow_flush(ctx, false);
        if (io_cqring_events(ctx))
                return 0;
 
-       /*
-        * Similarly do not spin if we have not informed the user of any
-        * dropped CQE.
-        */
-       if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
-               return -EBADR;
-
        do {
                /*
                 * If a submit got punted to a workqueue, we can have the
@@ -2203,12 +2204,15 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
        ret = io_run_task_work_sig();
        if (ret || io_should_wake(iowq))
                return ret;
+
        check_cq = READ_ONCE(ctx->check_cq);
-       /* let the caller flush overflows, retry */
-       if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
-               return 1;
-       if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
-               return -EBADR;
+       if (unlikely(check_cq)) {
+               /* let the caller flush overflows, retry */
+               if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
+                       return 1;
+               if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+                       return -EBADR;
+       }
        if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
                return -ETIME;
        return 1;