From: Pavel Begunkov Date: Tue, 19 Jan 2021 13:32:39 +0000 (+0000) Subject: io_uring: further deduplicate #CQ events calc X-Git-Tag: v5.15~1617^2~110 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=888aae2eeddfe1d6c9731cf4af1a1b2605af6470;p=platform%2Fkernel%2Flinux-starfive.git io_uring: further deduplicate #CQ events calc Apparently, there is one more place hand coded calculation of number of CQ events in the ring. Use __io_cqring_events() helper in io_get_cqring() as well. Naturally, assembly stays identical. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- diff --git a/fs/io_uring.c b/fs/io_uring.c index 347bdcd..0a578c4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1700,21 +1700,25 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx) return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries; } +static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) +{ + return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); +} + static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; unsigned tail; - tail = ctx->cached_cq_tail; /* * writes to the cq entry need to come after reading head; the * control dependency is enough as we're using WRITE_ONCE to * fill the cq entry */ - if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) + if (__io_cqring_events(ctx) == rings->cq_ring_entries) return NULL; - ctx->cached_cq_tail++; + tail = ctx->cached_cq_tail++; return &rings->cqes[tail & ctx->cq_mask]; } @@ -1729,11 +1733,6 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) return io_wq_current_is_worker(); } -static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx) -{ - return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); -} - static void io_cqring_ev_posted(struct io_ring_ctx *ctx) { /* see waitqueue_active() comment */