io_uring: ensure iopoll runs local task work as well
authorJens Axboe <axboe@kernel.dk>
Sat, 3 Sep 2022 15:52:01 +0000 (09:52 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 21 Sep 2022 16:30:43 +0000 (10:30 -0600)
Combine the two checks we have for task_work running and whether or not
we need to shuffle the mutex into one, so we unify how task_work is run
in the iopoll loop. This helps ensure that local task_work is run when
needed, and also optimizes that path to avoid a mutex shuffle if it's
not needed.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/io_uring.h

index a1692da..0482087 100644 (file)
@@ -1428,25 +1428,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                 * forever, while the workqueue is stuck trying to acquire the
                 * very same mutex.
                 */
-               if (wq_list_empty(&ctx->iopoll_list)) {
-                       u32 tail = ctx->cached_cq_tail;
-
-                       mutex_unlock(&ctx->uring_lock);
-                       ret = io_run_task_work_ctx(ctx);
-                       mutex_lock(&ctx->uring_lock);
-                       if (ret < 0)
-                               break;
-
-                       /* some requests don't go through iopoll_list */
-                       if (tail != ctx->cached_cq_tail ||
-                           wq_list_empty(&ctx->iopoll_list))
-                               break;
-               }
-
-               if (task_work_pending(current)) {
-                       mutex_unlock(&ctx->uring_lock);
-                       io_run_task_work();
-                       mutex_lock(&ctx->uring_lock);
+               if (wq_list_empty(&ctx->iopoll_list) ||
+                   io_task_work_pending(ctx)) {
+                       if (!llist_empty(&ctx->work_llist))
+                               __io_run_local_work(ctx, true);
+                       if (task_work_pending(current) ||
+                           wq_list_empty(&ctx->iopoll_list)) {
+                               u32 tail = ctx->cached_cq_tail;
+
+                               mutex_unlock(&ctx->uring_lock);
+                               ret = io_run_task_work();
+                               mutex_lock(&ctx->uring_lock);
+
+                               if (ret < 0)
+                                       break;
+
+                               /* some requests don't go through iopoll_list */
+                               if (tail != ctx->cached_cq_tail ||
+                                   wq_list_empty(&ctx->iopoll_list))
+                                       break;
+                       }
                }
                ret = io_do_iopoll(ctx, !min);
                if (ret < 0)
index 0f90d1d..9d89425 100644 (file)
@@ -236,6 +236,12 @@ static inline int io_run_task_work(void)
        return 0;
 }
 
+static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+{
+       return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
+               !wq_list_empty(&ctx->work_llist);
+}
+
 static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
 {
        int ret = 0;