From: Pavel Begunkov Date: Thu, 24 Aug 2023 22:53:37 +0000 (+0100) Subject: io_uring: move iopoll ctx fields around X-Git-Tag: v6.6.17~4088^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=644c4a7a721fb90356cdd42219c9928a3c386230;p=platform%2Fkernel%2Flinux-rpi.git io_uring: move iopoll ctx fields around Move poll_multi_queue and iopoll_list to the submission cache line, it doesn't make much sense to keep them separately, and is better place for it in general. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5b03cf7e6652e350e6e70a917eec72ba9f33b97b.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 01bdbc2..13d19b9 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -256,6 +256,15 @@ struct io_ring_ctx { struct io_hash_table cancel_table_locked; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; + + /* + * ->iopoll_list is protected by the ctx->uring_lock for + * io_uring instances that don't use IORING_SETUP_SQPOLL. + * For SQPOLL, only the single threaded io_sq_thread() will + * manipulate the list, hence no extra locking is needed there. + */ + struct io_wq_work_list iopoll_list; + bool poll_multi_queue; } ____cacheline_aligned_in_smp; struct { @@ -284,20 +293,6 @@ struct io_ring_ctx { struct wait_queue_head cq_wait; } ____cacheline_aligned_in_smp; - struct { - spinlock_t completion_lock; - - bool poll_multi_queue; - - /* - * ->iopoll_list is protected by the ctx->uring_lock for - * io_uring instances that don't use IORING_SETUP_SQPOLL. - * For SQPOLL, only the single threaded io_sq_thread() will - * manipulate the list, hence no extra locking is needed there. - */ - struct io_wq_work_list iopoll_list; - } ____cacheline_aligned_in_smp; - /* timeouts */ struct { spinlock_t timeout_lock; @@ -308,6 +303,8 @@ struct io_ring_ctx { struct io_uring_cqe completion_cqes[16]; + spinlock_t completion_lock; + /* IRQ completion list, under ->completion_lock */ struct io_wq_work_list locked_free_list; unsigned int locked_free_nr;