io_uring/iopoll: unify tw breaking logic
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 8 Sep 2022 15:56:55 +0000 (16:56 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 21 Sep 2022 19:15:01 +0000 (13:15 -0600)
Let's keep checks for whether to break the iopoll loop or not same for
normal and defer tw, this includes ->cached_cq_tail checks guarding
against polling more than asked for.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d2fa8a44f8114f55a4807528da438cde93815360.1662652536.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index c6c32aa..12e8acd 100644 (file)
@@ -1428,21 +1428,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                 */
                if (wq_list_empty(&ctx->iopoll_list) ||
                    io_task_work_pending(ctx)) {
+                       u32 tail = ctx->cached_cq_tail;
+
                        if (!llist_empty(&ctx->work_llist))
                                __io_run_local_work(ctx, true);
+
                        if (task_work_pending(current) ||
                            wq_list_empty(&ctx->iopoll_list)) {
-                               u32 tail = ctx->cached_cq_tail;
-
                                mutex_unlock(&ctx->uring_lock);
                                io_run_task_work();
                                mutex_lock(&ctx->uring_lock);
-
-                               /* some requests don't go through iopoll_list */
-                               if (tail != ctx->cached_cq_tail ||
-                                   wq_list_empty(&ctx->iopoll_list))
-                                       break;
                        }
+                       /* some requests don't go through iopoll_list */
+                       if (tail != ctx->cached_cq_tail ||
+                           wq_list_empty(&ctx->iopoll_list))
+                               break;
                }
                ret = io_do_iopoll(ctx, !min);
                if (ret < 0)