io_uring: simplify fallback execution
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 16 Jan 2023 16:48:59 +0000 (16:48 +0000)
committerJens Axboe <axboe@kernel.dk>
Sun, 29 Jan 2023 22:17:41 +0000 (15:17 -0700)
Lock the ring with uring_lock in io_fallback_req_func(), which should
make it a bit safer and easier. With that we also don't need refs
pinning as io_ring_exit_work() will wait until uring_lock is freed.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/56170e6a0cbfc8edee2794c6613e8f6f1d76d276.1673887636.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index f49d003..c314dc1 100644 (file)
@@ -245,17 +245,15 @@ static __cold void io_fallback_req_func(struct work_struct *work)
                                                fallback_work.work);
        struct llist_node *node = llist_del_all(&ctx->fallback_llist);
        struct io_kiocb *req, *tmp;
-       bool locked = false;
+       bool locked = true;
 
-       percpu_ref_get(&ctx->refs);
+       mutex_lock(&ctx->uring_lock);
        llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
                req->io_task_work.func(req, &locked);
-
-       if (locked) {
-               io_submit_flush_completions(ctx);
-               mutex_unlock(&ctx->uring_lock);
-       }
-       percpu_ref_put(&ctx->refs);
+       if (WARN_ON_ONCE(!locked))
+               return;
+       io_submit_flush_completions(ctx);
+       mutex_unlock(&ctx->uring_lock);
 }
 
 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)