From 7b235dd82ad32c1626e51303d94ec5ef4d7bc994 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 9 Jan 2023 14:46:08 +0000 Subject: [PATCH] io_uring: separate wq for ring polling Don't use ->cq_wait for ring polling but add a separate wait queue for it. We need it for following patches. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/dea0be0bf990503443c5c6c337fc66824af7d590.1673274244.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 2 +- io_uring/io_uring.c | 3 ++- io_uring/io_uring.h | 9 +++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 8dfb6c4..0d94ee1 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -319,7 +319,7 @@ struct io_ring_ctx { } ____cacheline_aligned_in_smp; /* Keep this last, we don't need it for the fast path */ - + struct wait_queue_head poll_wq; struct io_restriction restrictions; /* slow path rsrc auxilary data, used by update/register */ diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 1e79f37..2ee2bca 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -316,6 +316,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); mutex_init(&ctx->uring_lock); init_waitqueue_head(&ctx->cq_wait); + init_waitqueue_head(&ctx->poll_wq); spin_lock_init(&ctx->completion_lock); spin_lock_init(&ctx->timeout_lock); INIT_WQ_LIST(&ctx->iopoll_list); @@ -2786,7 +2787,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) struct io_ring_ctx *ctx = file->private_data; __poll_t mask = 0; - poll_wait(file, &ctx->cq_wait, wait); + poll_wait(file, &ctx->poll_wq, wait); /* * synchronizes with barrier from wq_has_sleeper call in * io_commit_cqring diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index b5975e3..c75bbb94 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -220,9 +220,18 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx) smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); } +static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) +{ + if (waitqueue_active(&ctx->poll_wq)) + __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); +} + /* requires smb_mb() prior, see wq_has_sleeper() */ static inline void __io_cqring_wake(struct io_ring_ctx *ctx) { + io_poll_wq_wake(ctx); + /* * Trigger waitqueue handler on all waiters on our waitqueue. This * won't necessarily wake up all the tasks, io_should_wake() will make -- 2.7.4