io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key
authorJens Axboe <axboe@kernel.dk>
Mon, 18 Apr 2022 16:44:01 +0000 (10:44 -0600)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Apr 2022 00:18:18 +0000 (18:18 -0600)
Currently sqe->addr must contain the user_data of the request being
canceled. Introduce the IORING_ASYNC_CANCEL_FD flag, which tells the
kernel that we're keying off the file fd instead for cancelation. This
allows canceling any request that a) uses a file, and b) was assigned the
file based on the value being passed in.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
Link: https://lore.kernel.org/r/20220418164402.75259-5-axboe@kernel.dk
fs/io_uring.c
include/uapi/linux/io_uring.h

index b43cdf1a455537bb1acee416db2300050284da0a..cf0d5437b77d3eb4bdbdb150575d3a346d52ea77 100644 (file)
@@ -587,6 +587,7 @@ struct io_cancel {
        struct file                     *file;
        u64                             addr;
        u32                             flags;
+       s32                             fd;
 };
 
 struct io_timeout {
@@ -992,7 +993,10 @@ struct io_defer_entry {
 
 struct io_cancel_data {
        struct io_ring_ctx *ctx;
-       u64 data;
+       union {
+               u64 data;
+               struct file *file;
+       };
        u32 flags;
        int seq;
 };
@@ -6332,6 +6336,29 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
        return NULL;
 }
 
+static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
+                                         struct io_cancel_data *cd)
+       __must_hold(&ctx->completion_lock)
+{
+       struct io_kiocb *req;
+       int i;
+
+       for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
+               struct hlist_head *list;
+
+               list = &ctx->cancel_hash[i];
+               hlist_for_each_entry(req, list, hash_node) {
+                       if (req->file != cd->file)
+                               continue;
+                       if (cd->seq == req->work.cancel_seq)
+                               continue;
+                       req->work.cancel_seq = cd->seq;
+                       return req;
+               }
+       }
+       return NULL;
+}
+
 static bool io_poll_disarm(struct io_kiocb *req)
        __must_hold(&ctx->completion_lock)
 {
@@ -6345,8 +6372,12 @@ static bool io_poll_disarm(struct io_kiocb *req)
 static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
        __must_hold(&ctx->completion_lock)
 {
-       struct io_kiocb *req = io_poll_find(ctx, false, cd);
+       struct io_kiocb *req;
 
+       if (cd->flags & IORING_ASYNC_CANCEL_FD)
+               req = io_poll_file_find(ctx, cd);
+       else
+               req = io_poll_find(ctx, false, cd);
        if (!req)
                return -ENOENT;
        io_poll_cancel_req(req);
@@ -6796,8 +6827,13 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
 
        if (req->ctx != cd->ctx)
                return false;
-       if (req->cqe.user_data != cd->data)
-               return false;
+       if (cd->flags & IORING_ASYNC_CANCEL_FD) {
+               if (req->file != cd->file)
+                       return false;
+       } else {
+               if (req->cqe.user_data != cd->data)
+                       return false;
+       }
        if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
                if (cd->seq == req->work.cancel_seq)
                        return false;
@@ -6851,7 +6887,8 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
        ret = io_poll_cancel(ctx, cd);
        if (ret != -ENOENT)
                goto out;
-       ret = io_timeout_cancel(ctx, cd);
+       if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
+               ret = io_timeout_cancel(ctx, cd);
 out:
        spin_unlock(&ctx->completion_lock);
        return ret;
@@ -6862,15 +6899,17 @@ static int io_async_cancel_prep(struct io_kiocb *req,
 {
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+       if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in)
                return -EINVAL;
 
        req->cancel.addr = READ_ONCE(sqe->addr);
        req->cancel.flags = READ_ONCE(sqe->cancel_flags);
-       if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL)
+       if (req->cancel.flags & ~(IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_FD))
                return -EINVAL;
+       if (req->cancel.flags & IORING_ASYNC_CANCEL_FD)
+               req->cancel.fd = READ_ONCE(sqe->fd);
 
        return 0;
 }
@@ -6919,7 +6958,21 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
        };
        int ret;
 
+       if (cd.flags & IORING_ASYNC_CANCEL_FD) {
+               if (req->flags & REQ_F_FIXED_FILE)
+                       req->file = io_file_get_fixed(req, req->cancel.fd,
+                                                       issue_flags);
+               else
+                       req->file = io_file_get_normal(req, req->cancel.fd);
+               if (!req->file) {
+                       ret = -EBADF;
+                       goto done;
+               }
+               cd.file = req->file;
+       }
+
        ret = __io_async_cancel(&cd, req, issue_flags);
+done:
        if (ret < 0)
                req_set_fail(req);
        io_req_complete_post(req, ret, 0);
index 476e58a2837f4e5ef77c685bdf26ea0859add388..cc7fe82a179888eed72919f969cd1b08e0cce958 100644 (file)
@@ -191,8 +191,11 @@ enum {
  * ASYNC_CANCEL flags.
  *
  * IORING_ASYNC_CANCEL_ALL     Cancel all requests that match the given key
+ * IORING_ASYNC_CANCEL_FD      Key off 'fd' for cancelation rather than the
+ *                             request 'user_data'
  */
 #define IORING_ASYNC_CANCEL_ALL        (1U << 0)
+#define IORING_ASYNC_CANCEL_FD (1U << 1)
 
 /*
  * IO completion data structure (Completion Queue Entry)