1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/nospec.h>
10 #include <linux/io_uring.h>
12 #include <uapi/linux/io_uring.h>
27 #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
28 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
31 * Returns true if the request matches the criteria outlined by 'cd'.
33 bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
35 if (req->ctx != cd->ctx)
37 if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
39 } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
40 if (req->file != cd->file)
43 if (req->cqe.user_data != cd->data)
46 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
48 if (cd->seq == req->work.cancel_seq)
50 req->work.cancel_seq = cd->seq;
56 static bool io_cancel_cb(struct io_wq_work *work, void *data)
58 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
59 struct io_cancel_data *cd = data;
61 return io_cancel_req_match(req, cd);
64 static int io_async_cancel_one(struct io_uring_task *tctx,
65 struct io_cancel_data *cd)
67 enum io_wq_cancel cancel_ret;
71 if (!tctx || !tctx->io_wq)
74 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
75 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
80 case IO_WQ_CANCEL_RUNNING:
83 case IO_WQ_CANCEL_NOTFOUND:
91 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
94 struct io_ring_ctx *ctx = cd->ctx;
97 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
99 ret = io_async_cancel_one(tctx, cd);
101 * Fall-through even for -EALREADY, as we may have poll armed
102 * that need unarming.
107 ret = io_poll_cancel(ctx, cd, issue_flags);
111 spin_lock(&ctx->completion_lock);
112 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
113 ret = io_timeout_cancel(ctx, cd);
114 spin_unlock(&ctx->completion_lock);
118 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
120 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
122 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
124 if (sqe->off || sqe->len || sqe->splice_fd_in)
127 cancel->addr = READ_ONCE(sqe->addr);
128 cancel->flags = READ_ONCE(sqe->cancel_flags);
129 if (cancel->flags & ~CANCEL_FLAGS)
131 if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
132 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
134 cancel->fd = READ_ONCE(sqe->fd);
140 static int __io_async_cancel(struct io_cancel_data *cd,
141 struct io_uring_task *tctx,
142 unsigned int issue_flags)
144 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
145 struct io_ring_ctx *ctx = cd->ctx;
146 struct io_tctx_node *node;
150 ret = io_try_cancel(tctx, cd, issue_flags);
158 /* slow path, try all io-wq's */
159 io_ring_submit_lock(ctx, issue_flags);
161 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
162 struct io_uring_task *tctx = node->task->io_uring;
164 ret = io_async_cancel_one(tctx, cd);
165 if (ret != -ENOENT) {
171 io_ring_submit_unlock(ctx, issue_flags);
172 return all ? nr : ret;
175 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
177 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
178 struct io_cancel_data cd = {
180 .data = cancel->addr,
181 .flags = cancel->flags,
182 .seq = atomic_inc_return(&req->ctx->cancel_seq),
184 struct io_uring_task *tctx = req->task->io_uring;
187 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
188 if (req->flags & REQ_F_FIXED_FILE ||
189 cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
190 req->flags |= REQ_F_FIXED_FILE;
191 req->file = io_file_get_fixed(req, cancel->fd,
194 req->file = io_file_get_normal(req, cancel->fd);
203 ret = __io_async_cancel(&cd, tctx, issue_flags);
207 io_req_set_res(req, ret, 0);
211 void init_hash_table(struct io_hash_table *table, unsigned size)
215 for (i = 0; i < size; i++) {
216 spin_lock_init(&table->hbs[i].lock);
217 INIT_HLIST_HEAD(&table->hbs[i].list);
221 static int __io_sync_cancel(struct io_uring_task *tctx,
222 struct io_cancel_data *cd, int fd)
224 struct io_ring_ctx *ctx = cd->ctx;
226 /* fixed must be grabbed every time since we drop the uring_lock */
227 if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
228 (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
229 if (unlikely(fd >= ctx->nr_user_files))
231 fd = array_index_nospec(fd, ctx->nr_user_files);
232 cd->file = io_file_from_index(&ctx->file_table, fd);
237 return __io_async_cancel(cd, tctx, 0);
240 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
241 __must_hold(&ctx->uring_lock)
243 struct io_cancel_data cd = {
245 .seq = atomic_inc_return(&ctx->cancel_seq),
247 ktime_t timeout = KTIME_MAX;
248 struct io_uring_sync_cancel_reg sc;
253 if (copy_from_user(&sc, arg, sizeof(sc)))
255 if (sc.flags & ~CANCEL_FLAGS)
257 if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
263 /* we can grab a normal file descriptor upfront */
264 if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
265 !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
272 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
274 /* found something, done! */
275 if (ret != -EALREADY)
278 if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
279 struct timespec64 ts = {
280 .tv_sec = sc.timeout.tv_sec,
281 .tv_nsec = sc.timeout.tv_nsec
284 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
288 * Keep looking until we get -ENOENT. we'll get woken everytime
289 * every time a request completes and will retry the cancelation.
292 cd.seq = atomic_inc_return(&ctx->cancel_seq);
294 prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
296 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
298 mutex_unlock(&ctx->uring_lock);
299 if (ret != -EALREADY)
302 ret = io_run_task_work_sig(ctx);
305 ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
310 mutex_lock(&ctx->uring_lock);
313 finish_wait(&ctx->cq_wait, &wait);
314 mutex_lock(&ctx->uring_lock);
316 if (ret == -ENOENT || ret > 0)