io_uring: kill REQ_F_COMPLETE_INLINE
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 16 Jun 2022 09:21:58 +0000 (10:21 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Jul 2022 00:39:13 +0000 (18:39 -0600)
REQ_F_COMPLETE_INLINE is only needed to delay queueing into the
completion list to io_queue_sqe() as __io_req_complete() is inlined and
we don't want to bloat the kernel.

As now we complete in a more centralised fashion in io_issue_sqe() we
can get rid of the flag and queue to the list directly.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/600ba20a9338b8a39b249b23d3d177803613dde4.1655371007.git.asml.silence@gmail.com
Reviewed-by: Hao Xu <howeyxu@tencent.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/io_uring_types.h

index 72640aa..541c109 100644 (file)
@@ -742,10 +742,7 @@ void io_req_complete_post(struct io_kiocb *req)
 
 inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
 {
-       if (issue_flags & IO_URING_F_COMPLETE_DEFER)
-               req->flags |= REQ_F_COMPLETE_INLINE;
-       else
-               io_req_complete_post(req);
+       io_req_complete_post(req);
 }
 
 void io_req_complete_failed(struct io_kiocb *req, s32 res)
@@ -1581,9 +1578,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
        if (creds)
                revert_creds(creds);
 
-       if (ret == IOU_OK)
-               __io_req_complete(req, issue_flags);
-       else if (ret != IOU_ISSUE_SKIP_COMPLETE)
+       if (ret == IOU_OK) {
+               if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+                       io_req_add_compl_list(req);
+               else
+                       io_req_complete_post(req);
+       } else if (ret != IOU_ISSUE_SKIP_COMPLETE)
                return ret;
 
        /* If the op doesn't have a file, we're not polling for it */
@@ -1749,10 +1749,6 @@ static inline void io_queue_sqe(struct io_kiocb *req)
 
        ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
 
-       if (req->flags & REQ_F_COMPLETE_INLINE) {
-               io_req_add_compl_list(req);
-               return;
-       }
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
         * doesn't support non-blocking read/write attempts
index 6744ce1..3f06fba 100644 (file)
@@ -217,11 +217,6 @@ static inline bool io_run_task_work(void)
        return false;
 }
 
-static inline void io_req_complete_state(struct io_kiocb *req)
-{
-       req->flags |= REQ_F_COMPLETE_INLINE;
-}
-
 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
 {
        if (!*locked) {
index ef1cf86..4576ea8 100644 (file)
@@ -301,7 +301,6 @@ enum {
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_BUFFER_RING_BIT,
-       REQ_F_COMPLETE_INLINE_BIT,
        REQ_F_REISSUE_BIT,
        REQ_F_CREDS_BIT,
        REQ_F_REFCOUNT_BIT,
@@ -356,8 +355,6 @@ enum {
        REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
        /* buffer selected from ring, needs commit */
        REQ_F_BUFFER_RING       = BIT(REQ_F_BUFFER_RING_BIT),
-       /* completion is deferred through io_comp_state */
-       REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
        /* caller should reissue async */
        REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
        /* supports async reads/writes */