From: Pavel Begunkov Date: Wed, 20 Apr 2022 12:40:54 +0000 (+0100) Subject: io_uring: move timeout locking in io_timeout_cancel() X-Git-Tag: v6.1-rc5~1365^2~19 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3645c2000a7694022c39c545676c12fb9190855a;p=platform%2Fkernel%2Flinux-starfive.git io_uring: move timeout locking in io_timeout_cancel() Move ->timeout_lock grabbing inside of io_timeout_cancel(), so we can do io_req_task_queue_fail() outside of the lock. It's much nicer than relying on triple nested locking. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/cde758c2897930d31e205ed8f476d4ec879a8849.1650458197.git.asml.silence@gmail.com [axboe: drop now wrong timeout_lock annotation] Signed-off-by: Jens Axboe --- diff --git a/fs/io_uring.c b/fs/io_uring.c index 4c8932d..bf06e30 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6509,9 +6509,12 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) __must_hold(&ctx->completion_lock) - __must_hold(&ctx->timeout_lock) { - struct io_kiocb *req = io_timeout_extract(ctx, user_data); + struct io_kiocb *req; + + spin_lock_irq(&ctx->timeout_lock); + req = io_timeout_extract(ctx, user_data); + spin_unlock_irq(&ctx->timeout_lock); if (IS_ERR(req)) return PTR_ERR(req); @@ -6630,9 +6633,7 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { spin_lock(&ctx->completion_lock); - spin_lock_irq(&ctx->timeout_lock); ret = io_timeout_cancel(ctx, tr->addr); - spin_unlock_irq(&ctx->timeout_lock); spin_unlock(&ctx->completion_lock); } else { enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); @@ -6818,10 +6819,7 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) ret = io_poll_cancel(ctx, sqe_addr, false); if (ret != -ENOENT) goto out; - - spin_lock_irq(&ctx->timeout_lock); ret = io_timeout_cancel(ctx, sqe_addr); - spin_unlock_irq(&ctx->timeout_lock); out: spin_unlock(&ctx->completion_lock); return ret;