io_uring: don't return from io_drain_req()
authorPavel Begunkov <asml.silence@gmail.com>
Fri, 1 Oct 2021 17:07:01 +0000 (18:07 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 19 Oct 2021 11:49:54 +0000 (05:49 -0600)
Never return from io_drain_req() but punt to tw if we've got there but
it's a false positive and we shouldn't actually drain.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/93583cee51b8783706b76c73196c155b28d9e762.1633107393.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index fb07391..b0b4c5b 100644 (file)
@@ -6428,46 +6428,39 @@ static u32 io_get_sequence(struct io_kiocb *req)
        return seq;
 }
 
-static bool io_drain_req(struct io_kiocb *req)
+static void io_drain_req(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_defer_entry *de;
        int ret;
-       u32 seq;
+       u32 seq = io_get_sequence(req);
 
        /* Still need defer if there is pending req in defer list. */
-       if (likely(list_empty_careful(&ctx->defer_list) &&
-               !(req->flags & REQ_F_IO_DRAIN))) {
-               ctx->drain_active = false;
-               return false;
-       }
-
-       seq = io_get_sequence(req);
-       /* Still a chance to pass the sequence check */
        if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
+queue:
                ctx->drain_active = false;
-               return false;
+               io_req_task_queue(req);
+               return;
        }
 
        ret = io_req_prep_async(req);
-       if (ret)
-               goto fail;
+       if (ret) {
+fail:
+               io_req_complete_failed(req, ret);
+               return;
+       }
        io_prep_async_link(req);
        de = kmalloc(sizeof(*de), GFP_KERNEL);
        if (!de) {
                ret = -ENOMEM;
-fail:
-               io_req_complete_failed(req, ret);
-               return true;
+               goto fail;
        }
 
        spin_lock(&ctx->completion_lock);
        if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
                spin_unlock(&ctx->completion_lock);
                kfree(de);
-               io_queue_async_work(req, NULL);
-               ctx->drain_active = false;
-               return true;
+               goto queue;
        }
 
        trace_io_uring_defer(ctx, req, req->user_data);
@@ -6475,7 +6468,6 @@ fail:
        de->seq = seq;
        list_add_tail(&de->list, &ctx->defer_list);
        spin_unlock(&ctx->completion_lock);
-       return true;
 }
 
 static void io_clean_op(struct io_kiocb *req)
@@ -6931,8 +6923,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
 {
        if (req->flags & REQ_F_FAIL) {
                io_req_complete_fail_submit(req);
-       } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) {
-               return;
+       } else if (unlikely(req->ctx->drain_active)) {
+               io_drain_req(req);
        } else {
                int ret = io_req_prep_async(req);