io_uring: move non aligned field to the end
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 24 Aug 2023 22:53:33 +0000 (23:53 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 24 Aug 2023 23:16:19 +0000 (17:16 -0600)
Move not cache aligned fields down in io_ring_ctx, should change
anything, but makes further refactoring easier.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/518e95d7888e9d481b2c5968dcf3f23db9ea47a5.1692916914.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h

index 608a8e8..ad87d60 100644 (file)
@@ -270,24 +270,6 @@ struct io_ring_ctx {
                struct io_alloc_cache   netmsg_cache;
        } ____cacheline_aligned_in_smp;
 
-       /* IRQ completion list, under ->completion_lock */
-       struct io_wq_work_list  locked_free_list;
-       unsigned int            locked_free_nr;
-
-       const struct cred       *sq_creds;      /* cred used for __io_sq_thread() */
-       struct io_sq_data       *sq_data;       /* if using sq thread polling */
-
-       struct wait_queue_head  sqo_sq_wait;
-       struct list_head        sqd_list;
-
-       unsigned long           check_cq;
-
-       unsigned int            file_alloc_start;
-       unsigned int            file_alloc_end;
-
-       struct xarray           personalities;
-       u32                     pers_next;
-
        struct {
                /*
                 * We cache a range of free CQEs we can use, once exhausted it
@@ -332,6 +314,24 @@ struct io_ring_ctx {
                unsigned                cq_last_tm_flush;
        } ____cacheline_aligned_in_smp;
 
+       /* IRQ completion list, under ->completion_lock */
+       struct io_wq_work_list  locked_free_list;
+       unsigned int            locked_free_nr;
+
+       const struct cred       *sq_creds;      /* cred used for __io_sq_thread() */
+       struct io_sq_data       *sq_data;       /* if using sq thread polling */
+
+       struct wait_queue_head  sqo_sq_wait;
+       struct list_head        sqd_list;
+
+       unsigned long           check_cq;
+
+       unsigned int            file_alloc_start;
+       unsigned int            file_alloc_end;
+
+       struct xarray           personalities;
+       u32                     pers_next;
+
        /* Keep this last, we don't need it for the fast path */
        struct wait_queue_head          poll_wq;
        struct io_restriction           restrictions;