From: Pavel Begunkov Date: Thu, 24 Aug 2023 22:53:28 +0000 (+0100) Subject: io_uring: reorder cqring_flush and wakeups X-Git-Tag: v6.6.17~4088^2~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=54927baf6c195fb512ac38b26a041ca44edb2e29;p=platform%2Fkernel%2Flinux-rpi.git io_uring: reorder cqring_flush and wakeups Unlike in the past, io_commit_cqring_flush() doesn't do anything that may need io_cqring_wake() to be issued after, all requests it completes will go via task_work. Do io_commit_cqring_flush() after io_cqring_wake() to clean up __io_cq_unlock_post(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ed32dcfeec47e6c97bd6b18c152ddce5b218403f.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index cfc2dc8..7c1ef5b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -629,19 +629,11 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx) static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) { io_commit_cqring(ctx); - - if (ctx->task_complete) { - /* - * ->task_complete implies that only current might be waiting - * for CQEs, and obviously, we currently don't. No one is - * waiting, wakeups are futile, skip them. - */ - io_commit_cqring_flush(ctx); - } else { + if (!ctx->task_complete) { spin_unlock(&ctx->completion_lock); - io_commit_cqring_flush(ctx); io_cqring_wake(ctx); } + io_commit_cqring_flush(ctx); } static void io_cq_unlock_post(struct io_ring_ctx *ctx) @@ -649,8 +641,8 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx) { io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); - io_commit_cqring_flush(ctx); io_cqring_wake(ctx); + io_commit_cqring_flush(ctx); } /* Returns true if there are no backlogged entries after the flush */ diff --git a/io_uring/rw.c b/io_uring/rw.c index 9b51afd..20140d3 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -985,9 +985,9 @@ copy_iov: static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) { - io_commit_cqring_flush(ctx); if (ctx->flags & IORING_SETUP_SQPOLL) io_cqring_wake(ctx); + io_commit_cqring_flush(ctx); } void io_rw_fail(struct io_kiocb *req)