io_uring: remove struct io_completion
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 4 Oct 2021 19:02:57 +0000 (20:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 19 Oct 2021 11:49:54 +0000 (05:49 -0600)
We keep struct io_completion only as a temporal storage of cflags, Place
it in io_kiocb, it's cleaner, removes extra bits and even might be used
for future optimisations.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/5299bd5c223204065464bd87a515d0e405316086.1633373302.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index aee8ecc09168b8c7f6a37d3abcd93df47e498183..c7dea155f43afc87f0bcdf26db21f76da260b0cb 100644 (file)
@@ -684,11 +684,6 @@ struct io_hardlink {
        int                             flags;
 };
 
-struct io_completion {
-       struct file                     *file;
-       u32                             cflags;
-};
-
 struct io_async_connect {
        struct sockaddr_storage         address;
 };
@@ -847,22 +842,20 @@ struct io_kiocb {
                struct io_mkdir         mkdir;
                struct io_symlink       symlink;
                struct io_hardlink      hardlink;
-               /* use only after cleaning per-op data, see io_clean_op() */
-               struct io_completion    compl;
        };
 
        u8                              opcode;
        /* polled IO has completed */
        u8                              iopoll_completed;
-
        u16                             buf_index;
+       unsigned int                    flags;
+
+       u64                             user_data;
        u32                             result;
+       u32                             cflags;
 
        struct io_ring_ctx              *ctx;
-       unsigned int                    flags;
-       atomic_t                        refs;
        struct task_struct              *task;
-       u64                             user_data;
 
        struct percpu_ref               *fixed_rsrc_refs;
        /* store used ubuf, so we can prevent reloading */
@@ -870,13 +863,13 @@ struct io_kiocb {
 
        /* used by request caches, completion batching and iopoll */
        struct io_wq_work_node          comp_list;
+       atomic_t                        refs;
        struct io_kiocb                 *link;
        struct io_task_work             io_task_work;
        /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
        struct hlist_node               hash_node;
        /* internal polling, see IORING_FEAT_FAST_POLL */
        struct async_poll               *apoll;
-
        /* opcode allocated if it needs to store data for async defer */
        void                            *async_data;
        struct io_wq_work               work;
@@ -1831,11 +1824,8 @@ static inline bool io_req_needs_clean(struct io_kiocb *req)
 static inline void io_req_complete_state(struct io_kiocb *req, long res,
                                         unsigned int cflags)
 {
-       /* clean per-opcode space, because req->compl is aliased with it */
-       if (io_req_needs_clean(req))
-               io_clean_op(req);
        req->result = res;
-       req->compl.cflags = cflags;
+       req->cflags = cflags;
        req->flags |= REQ_F_COMPLETE_INLINE;
 }
 
@@ -2321,7 +2311,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
                                                    comp_list);
 
                __io_cqring_fill_event(ctx, req->user_data, req->result,
-                                       req->compl.cflags);
+                                       req->cflags);
        }
        io_commit_cqring(ctx);
        spin_unlock(&ctx->completion_lock);