io_uring: hold locks for io_req_complete_failed
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 23 Nov 2022 11:33:37 +0000 (11:33 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 1 Feb 2023 07:34:41 +0000 (08:34 +0100)
[ Upstream commit e276ae344a770f91912a81c6a338d92efd319be2 ]

A preparation patch, make sure we always hold uring_lock around
io_req_complete_failed(). The only place deviating from the rule
is io_cancel_defer_files(), queue a tw instead.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/70760344eadaecf2939287084b9d4ba5c05a6984.1669203009.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Stable-dep-of: ef5c600adb1d ("io_uring: always prep_async for drain requests")
Signed-off-by: Sasha Levin <sashal@kernel.org>
io_uring/io_uring.c

index 9b1c917..6b81a0d 100644 (file)
@@ -871,9 +871,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
 }
 
 void io_req_complete_failed(struct io_kiocb *req, s32 res)
+       __must_hold(&ctx->uring_lock)
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
 
+       lockdep_assert_held(&req->ctx->uring_lock);
+
        req_set_fail(req);
        io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
        if (def->fail)
@@ -1631,6 +1634,7 @@ static u32 io_get_sequence(struct io_kiocb *req)
 }
 
 static __cold void io_drain_req(struct io_kiocb *req)
+       __must_hold(&ctx->uring_lock)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_defer_entry *de;
@@ -2867,7 +2871,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
        while (!list_empty(&list)) {
                de = list_first_entry(&list, struct io_defer_entry, list);
                list_del_init(&de->list);
-               io_req_complete_failed(de->req, -ECANCELED);
+               io_req_task_queue_fail(de->req, -ECANCELED);
                kfree(de);
        }
        return true;