4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/io_uring_types.h>
7 #include <uapi/linux/eventpoll.h>
10 #include "filetable.h"
12 #ifndef CREATE_TRACE_POINTS
13 #include <trace/events/io_uring.h>
18 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
21 * Intended only when both IO_URING_F_MULTISHOT is passed
22 * to indicate to the poll runner that multishot should be
23 * removed and the result is set on req->cqe.res.
25 IOU_STOP_MULTISHOT = -ECANCELED,
28 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
29 bool io_req_cqe_overflow(struct io_kiocb *req);
30 int io_run_task_work_sig(struct io_ring_ctx *ctx);
31 int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
32 int io_run_local_work(struct io_ring_ctx *ctx);
33 void io_req_defer_failed(struct io_kiocb *req, s32 res);
34 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
35 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
36 bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
38 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
40 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
42 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
43 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
44 unsigned issue_flags);
46 static inline bool io_req_ffs_set(struct io_kiocb *req)
48 return req->flags & REQ_F_FIXED_FILE;
51 void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
52 bool io_is_uring_fops(struct file *file);
53 bool io_alloc_async_data(struct io_kiocb *req);
54 void io_req_task_queue(struct io_kiocb *req);
55 void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
56 void io_req_task_complete(struct io_kiocb *req, bool *locked);
57 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
58 void io_req_task_submit(struct io_kiocb *req, bool *locked);
59 void tctx_task_work(struct callback_head *cb);
60 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
61 int io_uring_alloc_task_context(struct task_struct *task,
62 struct io_ring_ctx *ctx);
64 int io_poll_issue(struct io_kiocb *req, bool *locked);
65 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
66 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
67 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
68 int io_req_prep_async(struct io_kiocb *req);
70 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
71 void io_wq_submit_work(struct io_wq_work *work);
73 void io_free_req(struct io_kiocb *req);
74 void io_queue_next(struct io_kiocb *req);
75 void __io_put_task(struct task_struct *task, int nr);
76 void io_task_refs_refill(struct io_uring_task *tctx);
77 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
79 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
82 #define io_lockdep_assert_cq_locked(ctx) \
84 if (ctx->flags & IORING_SETUP_IOPOLL) { \
85 lockdep_assert_held(&ctx->uring_lock); \
86 } else if (!ctx->task_complete) { \
87 lockdep_assert_held(&ctx->completion_lock); \
88 } else if (ctx->submitter_task->flags & PF_EXITING) { \
89 lockdep_assert(current_work()); \
91 lockdep_assert(current == ctx->submitter_task); \
95 static inline void io_req_task_work_add(struct io_kiocb *req)
97 __io_req_task_work_add(req, true);
100 #define io_for_each_link(pos, head) \
101 for (pos = (head); pos; pos = pos->link)
103 void io_cq_unlock_post(struct io_ring_ctx *ctx);
105 static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
108 io_lockdep_assert_cq_locked(ctx);
110 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
111 struct io_uring_cqe *cqe = ctx->cqe_cached;
113 ctx->cached_cq_tail++;
115 if (ctx->flags & IORING_SETUP_CQE32)
120 return __io_get_cqe(ctx, overflow);
123 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
125 return io_get_cqe_overflow(ctx, false);
128 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
129 struct io_kiocb *req)
131 struct io_uring_cqe *cqe;
134 * If we can't get a cq entry, userspace overflowed the
135 * submission (by quite a lot). Increment the overflow count in
138 cqe = io_get_cqe(ctx);
142 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
143 req->cqe.res, req->cqe.flags,
144 (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
145 (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
147 memcpy(cqe, &req->cqe, sizeof(*cqe));
149 if (ctx->flags & IORING_SETUP_CQE32) {
150 u64 extra1 = 0, extra2 = 0;
152 if (req->flags & REQ_F_CQE32_INIT) {
153 extra1 = req->extra1;
154 extra2 = req->extra2;
157 WRITE_ONCE(cqe->big_cqe[0], extra1);
158 WRITE_ONCE(cqe->big_cqe[1], extra2);
163 static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
164 struct io_kiocb *req)
166 if (likely(__io_fill_cqe_req(ctx, req)))
168 return io_req_cqe_overflow(req);
171 static inline void req_set_fail(struct io_kiocb *req)
173 req->flags |= REQ_F_FAIL;
174 if (req->flags & REQ_F_CQE_SKIP) {
175 req->flags &= ~REQ_F_CQE_SKIP;
176 req->flags |= REQ_F_SKIP_LINK_CQES;
180 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
183 req->cqe.flags = cflags;
186 static inline bool req_has_async_data(struct io_kiocb *req)
188 return req->flags & REQ_F_ASYNC_DATA;
191 static inline void io_put_file(struct file *file)
197 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
198 unsigned issue_flags)
200 lockdep_assert_held(&ctx->uring_lock);
201 if (issue_flags & IO_URING_F_UNLOCKED)
202 mutex_unlock(&ctx->uring_lock);
205 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
206 unsigned issue_flags)
209 * "Normal" inline submissions always hold the uring_lock, since we
210 * grab it from the system call. Same is true for the SQPOLL offload.
211 * The only exception is when we've detached the request and issue it
212 * from an async worker thread, grab the lock for that case.
214 if (issue_flags & IO_URING_F_UNLOCKED)
215 mutex_lock(&ctx->uring_lock);
216 lockdep_assert_held(&ctx->uring_lock);
219 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
221 /* order cqe stores with ring update */
222 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
225 /* requires smb_mb() prior, see wq_has_sleeper() */
226 static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
229 * Trigger waitqueue handler on all waiters on our waitqueue. This
230 * won't necessarily wake up all the tasks, io_should_wake() will make
233 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
234 * set in the mask so that if we recurse back into our own poll
235 * waitqueue handlers, we know we have a dependency between eventfd or
236 * epoll and should terminate multishot poll at that point.
238 if (waitqueue_active(&ctx->cq_wait))
239 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
240 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
243 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
246 __io_cqring_wake(ctx);
249 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
251 struct io_rings *r = ctx->rings;
253 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
256 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
258 struct io_rings *rings = ctx->rings;
260 /* make sure SQ entry isn't read before tail */
261 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
264 static inline int io_run_task_work(void)
267 * Always check-and-clear the task_work notification signal. With how
268 * signaling works for task_work, we can find it set with nothing to
269 * run. We need to clear it for that case, like get_signal() does.
271 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
272 clear_notify_signal();
273 if (task_work_pending(current)) {
274 __set_current_state(TASK_RUNNING);
282 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
284 return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
287 static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
292 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
293 ret = io_run_local_work(ctx);
295 /* want to run this after in case more is added */
296 ret2 = io_run_task_work();
298 /* Try propagate error in favour of if tasks were run,
299 * but still make sure to run them if requested
307 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
312 if (llist_empty(&ctx->work_llist))
316 ret = __io_run_local_work(ctx, &locked);
317 /* shouldn't happen! */
318 if (WARN_ON_ONCE(!locked))
319 mutex_lock(&ctx->uring_lock);
323 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
326 mutex_lock(&ctx->uring_lock);
332 * Don't complete immediately but use deferred completion infrastructure.
333 * Protected by ->uring_lock and can only be used either with
334 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
336 static inline void io_req_complete_defer(struct io_kiocb *req)
337 __must_hold(&req->ctx->uring_lock)
339 struct io_submit_state *state = &req->ctx->submit_state;
341 lockdep_assert_held(&req->ctx->uring_lock);
343 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
346 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
348 if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
349 __io_commit_cqring_flush(ctx);
352 /* must to be called somewhat shortly after putting a request */
353 static inline void io_put_task(struct task_struct *task, int nr)
355 if (likely(task == current))
356 task->io_uring->cached_refs += nr;
358 __io_put_task(task, nr);
361 static inline void io_get_task_refs(int nr)
363 struct io_uring_task *tctx = current->io_uring;
365 tctx->cached_refs -= nr;
366 if (unlikely(tctx->cached_refs < 0))
367 io_task_refs_refill(tctx);
370 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
372 return !ctx->submit_state.free_list.next;
375 static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
377 if (unlikely(io_req_cache_empty(ctx)))
378 return __io_alloc_req_refill(ctx);
382 static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
384 struct io_wq_work_node *node;
386 node = wq_stack_extract(&ctx->submit_state.free_list);
387 return container_of(node, struct io_kiocb, comp_list);
390 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
392 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
393 ctx->submitter_task == current);
396 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
398 io_req_set_res(req, res, 0);
399 req->io_task_work.func = io_req_task_complete;
400 io_req_task_work_add(req);