io_uring: inline __io_req_complete_put()
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 23 Nov 2022 11:33:40 +0000 (11:33 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 1 Feb 2023 07:34:41 +0000 (08:34 +0100)
[ Upstream commit fa18fa2272c7469e470dcb7bf838ea50a25494ca ]

Inline __io_req_complete_put() into io_req_complete_post(), there are no
other users.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1923a4dfe80fa877f859a22ed3df2d5fc8ecf02b.1669203009.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Stable-dep-of: ef5c600adb1d ("io_uring: always prep_async for drain requests")
Signed-off-by: Sasha Levin <sashal@kernel.org>
io_uring/io_uring.c

index 50f959f..13a60f5 100644 (file)
@@ -823,15 +823,19 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
        return filled;
 }
 
-static void __io_req_complete_put(struct io_kiocb *req)
+void io_req_complete_post(struct io_kiocb *req)
 {
+       struct io_ring_ctx *ctx = req->ctx;
+
+       io_cq_lock(ctx);
+       if (!(req->flags & REQ_F_CQE_SKIP))
+               __io_fill_cqe_req(ctx, req);
+
        /*
         * If we're the last reference to this request, add to our locked
         * free_list cache.
         */
        if (req_ref_put_and_test(req)) {
-               struct io_ring_ctx *ctx = req->ctx;
-
                if (req->flags & IO_REQ_LINK_FLAGS) {
                        if (req->flags & IO_DISARM_MASK)
                                io_disarm_next(req);
@@ -852,16 +856,6 @@ static void __io_req_complete_put(struct io_kiocb *req)
                wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
                ctx->locked_free_nr++;
        }
-}
-
-void io_req_complete_post(struct io_kiocb *req)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-
-       io_cq_lock(ctx);
-       if (!(req->flags & REQ_F_CQE_SKIP))
-               __io_fill_cqe_req(ctx, req);
-       __io_req_complete_put(req);
        io_cq_unlock_post(ctx);
 }