1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring.h>
7 #include <trace/events/io_uring.h>
9 #include <uapi/linux/io_uring.h>
11 #include "io_uring_types.h"
20 struct list_head list;
21 /* head of the link, used by linked timeouts only */
22 struct io_kiocb *head;
23 /* for linked completions */
24 struct io_kiocb *prev;
27 struct io_timeout_rem {
37 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
39 struct io_timeout *timeout = io_kiocb_to_cmd(req);
44 static inline void io_put_req(struct io_kiocb *req)
46 if (req_ref_put_and_test(req)) {
52 static void io_kill_timeout(struct io_kiocb *req, int status)
53 __must_hold(&req->ctx->completion_lock)
54 __must_hold(&req->ctx->timeout_lock)
56 struct io_timeout_data *io = req->async_data;
58 if (hrtimer_try_to_cancel(&io->timer) != -1) {
59 struct io_timeout *timeout = io_kiocb_to_cmd(req);
63 atomic_set(&req->ctx->cq_timeouts,
64 atomic_read(&req->ctx->cq_timeouts) + 1);
65 list_del_init(&timeout->list);
66 io_req_tw_post_queue(req, status, 0);
70 __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
71 __must_hold(&ctx->completion_lock)
73 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
74 struct io_timeout *timeout, *tmp;
76 spin_lock_irq(&ctx->timeout_lock);
77 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
78 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
79 u32 events_needed, events_got;
81 if (io_is_timeout_noseq(req))
85 * Since seq can easily wrap around over time, subtract
86 * the last seq at which timeouts were flushed before comparing.
87 * Assuming not more than 2^31-1 events have happened since,
88 * these subtractions won't have wrapped, so we can check if
89 * target is in [last_seq, current_seq] by comparing the two.
91 events_needed = timeout->target_seq - ctx->cq_last_tm_flush;
92 events_got = seq - ctx->cq_last_tm_flush;
93 if (events_got < events_needed)
96 io_kill_timeout(req, 0);
98 ctx->cq_last_tm_flush = seq;
99 spin_unlock_irq(&ctx->timeout_lock);
102 static void io_fail_links(struct io_kiocb *req)
103 __must_hold(&req->ctx->completion_lock)
105 struct io_kiocb *nxt, *link = req->link;
106 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
110 long res = -ECANCELED;
112 if (link->flags & REQ_F_FAIL)
118 trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data,
122 link->flags |= REQ_F_CQE_SKIP;
124 link->flags &= ~REQ_F_CQE_SKIP;
125 io_req_set_res(link, res, 0);
126 __io_req_complete_post(link);
131 static inline void io_remove_next_linked(struct io_kiocb *req)
133 struct io_kiocb *nxt = req->link;
135 req->link = nxt->link;
139 bool io_disarm_next(struct io_kiocb *req)
140 __must_hold(&req->ctx->completion_lock)
142 struct io_kiocb *link = NULL;
145 if (req->flags & REQ_F_ARM_LTIMEOUT) {
147 req->flags &= ~REQ_F_ARM_LTIMEOUT;
148 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
149 io_remove_next_linked(req);
150 io_req_tw_post_queue(link, -ECANCELED, 0);
153 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
154 struct io_ring_ctx *ctx = req->ctx;
156 spin_lock_irq(&ctx->timeout_lock);
157 link = io_disarm_linked_timeout(req);
158 spin_unlock_irq(&ctx->timeout_lock);
161 io_req_tw_post_queue(link, -ECANCELED, 0);
164 if (unlikely((req->flags & REQ_F_FAIL) &&
165 !(req->flags & REQ_F_HARDLINK))) {
166 posted |= (req->link != NULL);
172 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
173 struct io_kiocb *link)
174 __must_hold(&req->ctx->completion_lock)
175 __must_hold(&req->ctx->timeout_lock)
177 struct io_timeout_data *io = link->async_data;
178 struct io_timeout *timeout = io_kiocb_to_cmd(link);
180 io_remove_next_linked(req);
181 timeout->head = NULL;
182 if (hrtimer_try_to_cancel(&io->timer) != -1) {
183 list_del(&timeout->list);
190 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
192 struct io_timeout_data *data = container_of(timer,
193 struct io_timeout_data, timer);
194 struct io_kiocb *req = data->req;
195 struct io_timeout *timeout = io_kiocb_to_cmd(req);
196 struct io_ring_ctx *ctx = req->ctx;
199 spin_lock_irqsave(&ctx->timeout_lock, flags);
200 list_del_init(&timeout->list);
201 atomic_set(&req->ctx->cq_timeouts,
202 atomic_read(&req->ctx->cq_timeouts) + 1);
203 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
205 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
208 io_req_set_res(req, -ETIME, 0);
209 req->io_task_work.func = io_req_task_complete;
210 io_req_task_work_add(req);
211 return HRTIMER_NORESTART;
214 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
215 struct io_cancel_data *cd)
216 __must_hold(&ctx->timeout_lock)
218 struct io_timeout *timeout;
219 struct io_timeout_data *io;
220 struct io_kiocb *req = NULL;
222 list_for_each_entry(timeout, &ctx->timeout_list, list) {
223 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
225 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
226 cd->data != tmp->cqe.user_data)
228 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
229 if (cd->seq == tmp->work.cancel_seq)
231 tmp->work.cancel_seq = cd->seq;
237 return ERR_PTR(-ENOENT);
239 io = req->async_data;
240 if (hrtimer_try_to_cancel(&io->timer) == -1)
241 return ERR_PTR(-EALREADY);
242 timeout = io_kiocb_to_cmd(req);
243 list_del_init(&timeout->list);
247 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
248 __must_hold(&ctx->completion_lock)
250 struct io_kiocb *req;
252 spin_lock_irq(&ctx->timeout_lock);
253 req = io_timeout_extract(ctx, cd);
254 spin_unlock_irq(&ctx->timeout_lock);
258 io_req_task_queue_fail(req, -ECANCELED);
262 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
264 struct io_timeout *timeout = io_kiocb_to_cmd(req);
265 struct io_kiocb *prev = timeout->prev;
269 if (!(req->task->flags & PF_EXITING)) {
270 struct io_cancel_data cd = {
272 .data = prev->cqe.user_data,
275 ret = io_try_cancel(req, &cd);
277 io_req_set_res(req, ret ?: -ETIME, 0);
278 io_req_complete_post(req);
281 io_req_set_res(req, -ETIME, 0);
282 io_req_complete_post(req);
286 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
288 struct io_timeout_data *data = container_of(timer,
289 struct io_timeout_data, timer);
290 struct io_kiocb *prev, *req = data->req;
291 struct io_timeout *timeout = io_kiocb_to_cmd(req);
292 struct io_ring_ctx *ctx = req->ctx;
295 spin_lock_irqsave(&ctx->timeout_lock, flags);
296 prev = timeout->head;
297 timeout->head = NULL;
300 * We don't expect the list to be empty, that will only happen if we
301 * race with the completion of the linked work.
304 io_remove_next_linked(prev);
305 if (!req_ref_inc_not_zero(prev))
308 list_del(&timeout->list);
309 timeout->prev = prev;
310 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
312 req->io_task_work.func = io_req_task_link_timeout;
313 io_req_task_work_add(req);
314 return HRTIMER_NORESTART;
317 static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
319 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
320 case IORING_TIMEOUT_BOOTTIME:
321 return CLOCK_BOOTTIME;
322 case IORING_TIMEOUT_REALTIME:
323 return CLOCK_REALTIME;
325 /* can't happen, vetted at prep time */
329 return CLOCK_MONOTONIC;
333 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
334 struct timespec64 *ts, enum hrtimer_mode mode)
335 __must_hold(&ctx->timeout_lock)
337 struct io_timeout_data *io;
338 struct io_timeout *timeout;
339 struct io_kiocb *req = NULL;
341 list_for_each_entry(timeout, &ctx->ltimeout_list, list) {
342 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
344 if (user_data == tmp->cqe.user_data) {
352 io = req->async_data;
353 if (hrtimer_try_to_cancel(&io->timer) == -1)
355 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
356 io->timer.function = io_link_timeout_fn;
357 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
361 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
362 struct timespec64 *ts, enum hrtimer_mode mode)
363 __must_hold(&ctx->timeout_lock)
365 struct io_cancel_data cd = { .data = user_data, };
366 struct io_kiocb *req = io_timeout_extract(ctx, &cd);
367 struct io_timeout *timeout = io_kiocb_to_cmd(req);
368 struct io_timeout_data *data;
373 timeout->off = 0; /* noseq */
374 data = req->async_data;
375 list_add_tail(&timeout->list, &ctx->timeout_list);
376 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
377 data->timer.function = io_timeout_fn;
378 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
382 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
384 struct io_timeout_rem *tr = io_kiocb_to_cmd(req);
386 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
388 if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
391 tr->ltimeout = false;
392 tr->addr = READ_ONCE(sqe->addr);
393 tr->flags = READ_ONCE(sqe->timeout_flags);
394 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
395 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
397 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
399 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
401 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
403 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
405 } else if (tr->flags) {
406 /* timeout removal doesn't support flags */
413 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
415 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
420 * Remove or update an existing timeout command
422 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
424 struct io_timeout_rem *tr = io_kiocb_to_cmd(req);
425 struct io_ring_ctx *ctx = req->ctx;
428 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) {
429 struct io_cancel_data cd = { .data = tr->addr, };
431 spin_lock(&ctx->completion_lock);
432 ret = io_timeout_cancel(ctx, &cd);
433 spin_unlock(&ctx->completion_lock);
435 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
437 spin_lock_irq(&ctx->timeout_lock);
439 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
441 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
442 spin_unlock_irq(&ctx->timeout_lock);
447 io_req_set_res(req, ret, 0);
451 static int __io_timeout_prep(struct io_kiocb *req,
452 const struct io_uring_sqe *sqe,
453 bool is_timeout_link)
455 struct io_timeout *timeout = io_kiocb_to_cmd(req);
456 struct io_timeout_data *data;
458 u32 off = READ_ONCE(sqe->off);
460 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
462 if (off && is_timeout_link)
464 flags = READ_ONCE(sqe->timeout_flags);
465 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
466 IORING_TIMEOUT_ETIME_SUCCESS))
468 /* more than one clock specified is invalid, obviously */
469 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
472 INIT_LIST_HEAD(&timeout->list);
474 if (unlikely(off && !req->ctx->off_timeout_used))
475 req->ctx->off_timeout_used = true;
477 if (WARN_ON_ONCE(req_has_async_data(req)))
479 if (io_alloc_async_data(req))
482 data = req->async_data;
486 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
489 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
492 INIT_LIST_HEAD(&timeout->list);
493 data->mode = io_translate_timeout_mode(flags);
494 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
496 if (is_timeout_link) {
497 struct io_submit_link *link = &req->ctx->submit_state.link;
501 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
503 timeout->head = link->last;
504 link->last->flags |= REQ_F_ARM_LTIMEOUT;
509 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
511 return __io_timeout_prep(req, sqe, false);
514 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
516 return __io_timeout_prep(req, sqe, true);
519 int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
521 struct io_timeout *timeout = io_kiocb_to_cmd(req);
522 struct io_ring_ctx *ctx = req->ctx;
523 struct io_timeout_data *data = req->async_data;
524 struct list_head *entry;
525 u32 tail, off = timeout->off;
527 spin_lock_irq(&ctx->timeout_lock);
530 * sqe->off holds how many events that need to occur for this
531 * timeout event to be satisfied. If it isn't set, then this is
532 * a pure timeout request, sequence isn't used.
534 if (io_is_timeout_noseq(req)) {
535 entry = ctx->timeout_list.prev;
539 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
540 timeout->target_seq = tail + off;
542 /* Update the last seq here in case io_flush_timeouts() hasn't.
543 * This is safe because ->completion_lock is held, and submissions
544 * and completions are never mixed in the same ->completion_lock section.
546 ctx->cq_last_tm_flush = tail;
549 * Insertion sort, ensuring the first entry in the list is always
550 * the one we need first.
552 list_for_each_prev(entry, &ctx->timeout_list) {
553 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list);
554 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt);
556 if (io_is_timeout_noseq(nxt))
558 /* nxt.seq is behind @tail, otherwise would've been completed */
559 if (off >= nextt->target_seq - tail)
563 list_add(&timeout->list, entry);
564 data->timer.function = io_timeout_fn;
565 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
566 spin_unlock_irq(&ctx->timeout_lock);
567 return IOU_ISSUE_SKIP_COMPLETE;
570 void io_queue_linked_timeout(struct io_kiocb *req)
572 struct io_timeout *timeout = io_kiocb_to_cmd(req);
573 struct io_ring_ctx *ctx = req->ctx;
575 spin_lock_irq(&ctx->timeout_lock);
577 * If the back reference is NULL, then our linked request finished
578 * before we got a chance to setup the timer
581 struct io_timeout_data *data = req->async_data;
583 data->timer.function = io_link_timeout_fn;
584 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
586 list_add_tail(&timeout->list, &ctx->ltimeout_list);
588 spin_unlock_irq(&ctx->timeout_lock);
589 /* drop submission reference */
593 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
595 __must_hold(&req->ctx->timeout_lock)
597 struct io_kiocb *req;
599 if (task && head->task != task)
604 io_for_each_link(req, head) {
605 if (req->flags & REQ_F_INFLIGHT)
611 /* Returns true if we found and killed one or more timeouts */
612 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
615 struct io_timeout *timeout, *tmp;
618 spin_lock(&ctx->completion_lock);
619 spin_lock_irq(&ctx->timeout_lock);
620 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
621 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
623 if (io_match_task(req, tsk, cancel_all)) {
624 io_kill_timeout(req, -ECANCELED);
628 spin_unlock_irq(&ctx->timeout_lock);
629 io_commit_cqring(ctx);
630 spin_unlock(&ctx->completion_lock);
632 io_cqring_ev_posted(ctx);
633 return canceled != 0;