1 // SPDX-License-Identifier: GPL-2.0
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqe (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
30 * Also see the examples in the liburing library:
32 * git://git.kernel.dk/liburing
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <net/compat.h>
48 #include <linux/refcount.h>
49 #include <linux/uio.h>
50 #include <linux/bits.h>
52 #include <linux/sched/signal.h>
54 #include <linux/file.h>
55 #include <linux/fdtable.h>
57 #include <linux/mman.h>
58 #include <linux/percpu.h>
59 #include <linux/slab.h>
60 #include <linux/blk-mq.h>
61 #include <linux/bvec.h>
62 #include <linux/net.h>
64 #include <net/af_unix.h>
66 #include <linux/anon_inodes.h>
67 #include <linux/sched/mm.h>
68 #include <linux/uaccess.h>
69 #include <linux/nospec.h>
70 #include <linux/sizes.h>
71 #include <linux/hugetlb.h>
72 #include <linux/highmem.h>
73 #include <linux/namei.h>
74 #include <linux/fsnotify.h>
75 #include <linux/fadvise.h>
76 #include <linux/eventpoll.h>
77 #include <linux/splice.h>
78 #include <linux/task_work.h>
79 #include <linux/pagemap.h>
80 #include <linux/io_uring.h>
81 #include <linux/audit.h>
82 #include <linux/security.h>
83 #include <linux/xattr.h>
85 #define CREATE_TRACE_POINTS
86 #include <trace/events/io_uring.h>
88 #include <uapi/linux/io_uring.h>
93 #define IORING_MAX_ENTRIES 32768
94 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
95 #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
98 #define IORING_MAX_FIXED_FILES (1U << 20)
99 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
100 IORING_REGISTER_LAST + IORING_OP_LAST)
102 #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
103 #define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
104 #define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
106 #define IORING_MAX_REG_BUFFERS (1U << 14)
108 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
109 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
111 #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
112 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
114 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
115 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
118 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
121 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
123 #define IO_TCTX_REFS_CACHE_NR (1U << 10)
126 u32 head ____cacheline_aligned_in_smp;
127 u32 tail ____cacheline_aligned_in_smp;
131 * This data is shared with the application through the mmap at offsets
132 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
134 * The offsets to the member fields are published through struct
135 * io_sqring_offsets when calling io_uring_setup.
139 * Head and tail offsets into the ring; the offsets need to be
140 * masked to get valid indices.
142 * The kernel controls head of the sq ring and the tail of the cq ring,
143 * and the application controls tail of the sq ring and the head of the
146 struct io_uring sq, cq;
148 * Bitmasks to apply to head and tail offsets (constant, equals
151 u32 sq_ring_mask, cq_ring_mask;
152 /* Ring sizes (constant, power of 2) */
153 u32 sq_ring_entries, cq_ring_entries;
155 * Number of invalid entries dropped by the kernel due to
156 * invalid index stored in array
158 * Written by the kernel, shouldn't be modified by the
159 * application (i.e. get number of "new events" by comparing to
162 * After a new SQ head value was read by the application this
163 * counter includes all submissions that were dropped reaching
164 * the new SQ head (and possibly more).
170 * Written by the kernel, shouldn't be modified by the
173 * The application needs a full memory barrier before checking
174 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
180 * Written by the application, shouldn't be modified by the
185 * Number of completion events lost because the queue was full;
186 * this should be avoided by the application by making sure
187 * there are not more requests pending than there is space in
188 * the completion queue.
190 * Written by the kernel, shouldn't be modified by the
191 * application (i.e. get number of "new events" by comparing to
194 * As completion events come in out of order this counter is not
195 * ordered with any other data.
199 * Ring buffer of completion events.
201 * The kernel writes completion events fresh every time they are
202 * produced, so the application is allowed to modify pending
205 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
208 struct io_mapped_ubuf {
211 unsigned int nr_bvecs;
212 unsigned long acct_pages;
213 struct bio_vec bvec[];
218 struct io_overflow_cqe {
219 struct list_head list;
220 struct io_uring_cqe cqe;
224 * FFS_SCM is only available on 64-bit archs, for 32-bit we just define it as 0
225 * and define IO_URING_SCM_ALL. For this case, we use SCM for all files as we
226 * can't safely always dereference the file when the task has exited and ring
227 * cleanup is done. If a file is tracked and part of SCM, then unix gc on
228 * process exit may reap it before __io_sqe_files_unregister() is run.
230 #define FFS_NOWAIT 0x1UL
231 #define FFS_ISREG 0x2UL
232 #if defined(CONFIG_64BIT)
233 #define FFS_SCM 0x4UL
235 #define IO_URING_SCM_ALL
236 #define FFS_SCM 0x0UL
238 #define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG|FFS_SCM)
240 struct io_fixed_file {
241 /* file * with additional FFS_* flags */
242 unsigned long file_ptr;
246 struct list_head list;
251 struct io_mapped_ubuf *buf;
255 struct io_file_table {
256 struct io_fixed_file *files;
257 unsigned long *bitmap;
258 unsigned int alloc_hint;
261 struct io_rsrc_node {
262 struct percpu_ref refs;
263 struct list_head node;
264 struct list_head rsrc_list;
265 struct io_rsrc_data *rsrc_data;
266 struct llist_node llist;
270 typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
272 struct io_rsrc_data {
273 struct io_ring_ctx *ctx;
279 struct completion done;
283 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
284 struct io_buffer_list {
286 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
287 * then these are classic provided buffers and ->buf_list is used.
290 struct list_head buf_list;
292 struct page **buf_pages;
293 struct io_uring_buf_ring *buf_ring;
298 /* below is for ring provided buffers */
306 struct list_head list;
313 struct io_restriction {
314 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
315 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
316 u8 sqe_flags_allowed;
317 u8 sqe_flags_required;
322 IO_SQ_THREAD_SHOULD_STOP = 0,
323 IO_SQ_THREAD_SHOULD_PARK,
328 atomic_t park_pending;
331 /* ctx's that are using this sqd */
332 struct list_head ctx_list;
334 struct task_struct *thread;
335 struct wait_queue_head wait;
337 unsigned sq_thread_idle;
343 struct completion exited;
346 #define IO_COMPL_BATCH 32
347 #define IO_REQ_CACHE_SIZE 32
348 #define IO_REQ_ALLOC_BATCH 8
350 struct io_submit_link {
351 struct io_kiocb *head;
352 struct io_kiocb *last;
355 struct io_submit_state {
356 /* inline/task_work completion list, under ->uring_lock */
357 struct io_wq_work_node free_list;
358 /* batch completion logic */
359 struct io_wq_work_list compl_reqs;
360 struct io_submit_link link;
365 unsigned short submit_nr;
366 struct blk_plug plug;
370 struct eventfd_ctx *cq_ev_fd;
371 unsigned int eventfd_async: 1;
375 #define BGID_ARRAY 64
378 /* const or read-mostly hot data */
380 struct percpu_ref refs;
382 struct io_rings *rings;
384 enum task_work_notify_mode notify_method;
385 unsigned int compat: 1;
386 unsigned int drain_next: 1;
387 unsigned int restricted: 1;
388 unsigned int off_timeout_used: 1;
389 unsigned int drain_active: 1;
390 unsigned int drain_disabled: 1;
391 unsigned int has_evfd: 1;
392 unsigned int syscall_iopoll: 1;
393 } ____cacheline_aligned_in_smp;
395 /* submission data */
397 struct mutex uring_lock;
400 * Ring buffer of indices into array of io_uring_sqe, which is
401 * mmapped by the application using the IORING_OFF_SQES offset.
403 * This indirection could e.g. be used to assign fixed
404 * io_uring_sqe entries to operations and only submit them to
405 * the queue when needed.
407 * The kernel modifies neither the indices array nor the entries
411 struct io_uring_sqe *sq_sqes;
412 unsigned cached_sq_head;
414 struct list_head defer_list;
417 * Fixed resources fast path, should be accessed only under
418 * uring_lock, and updated through io_uring_register(2)
420 struct io_rsrc_node *rsrc_node;
421 int rsrc_cached_refs;
423 struct io_file_table file_table;
424 unsigned nr_user_files;
425 unsigned nr_user_bufs;
426 struct io_mapped_ubuf **user_bufs;
428 struct io_submit_state submit_state;
430 struct io_buffer_list *io_bl;
431 struct xarray io_bl_xa;
432 struct list_head io_buffers_cache;
434 struct list_head timeout_list;
435 struct list_head ltimeout_list;
436 struct list_head cq_overflow_list;
437 struct list_head apoll_cache;
438 struct xarray personalities;
440 unsigned sq_thread_idle;
441 } ____cacheline_aligned_in_smp;
443 /* IRQ completion list, under ->completion_lock */
444 struct io_wq_work_list locked_free_list;
445 unsigned int locked_free_nr;
447 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
448 struct io_sq_data *sq_data; /* if using sq thread polling */
450 struct wait_queue_head sqo_sq_wait;
451 struct list_head sqd_list;
453 unsigned long check_cq;
457 * We cache a range of free CQEs we can use, once exhausted it
458 * should go through a slower range setup, see __io_get_cqe()
460 struct io_uring_cqe *cqe_cached;
461 struct io_uring_cqe *cqe_sentinel;
463 unsigned cached_cq_tail;
465 struct io_ev_fd __rcu *io_ev_fd;
466 struct wait_queue_head cq_wait;
468 atomic_t cq_timeouts;
469 unsigned cq_last_tm_flush;
470 } ____cacheline_aligned_in_smp;
473 spinlock_t completion_lock;
475 spinlock_t timeout_lock;
478 * ->iopoll_list is protected by the ctx->uring_lock for
479 * io_uring instances that don't use IORING_SETUP_SQPOLL.
480 * For SQPOLL, only the single threaded io_sq_thread() will
481 * manipulate the list, hence no extra locking is needed there.
483 struct io_wq_work_list iopoll_list;
484 struct hlist_head *cancel_hash;
485 unsigned cancel_hash_bits;
486 bool poll_multi_queue;
488 struct list_head io_buffers_comp;
489 } ____cacheline_aligned_in_smp;
491 struct io_restriction restrictions;
493 /* slow path rsrc auxilary data, used by update/register */
495 struct io_rsrc_node *rsrc_backup_node;
496 struct io_mapped_ubuf *dummy_ubuf;
497 struct io_rsrc_data *file_data;
498 struct io_rsrc_data *buf_data;
500 struct delayed_work rsrc_put_work;
501 struct llist_head rsrc_put_llist;
502 struct list_head rsrc_ref_list;
503 spinlock_t rsrc_ref_lock;
505 struct list_head io_buffers_pages;
508 /* Keep this last, we don't need it for the fast path */
510 #if defined(CONFIG_UNIX)
511 struct socket *ring_sock;
513 /* hashed buffered write serialization */
514 struct io_wq_hash *hash_map;
516 /* Only used for accounting purposes */
517 struct user_struct *user;
518 struct mm_struct *mm_account;
520 /* ctx exit and cancelation */
521 struct llist_head fallback_llist;
522 struct delayed_work fallback_work;
523 struct work_struct exit_work;
524 struct list_head tctx_list;
525 struct completion ref_comp;
527 bool iowq_limits_set;
532 * Arbitrary limit, can be raised if need be
534 #define IO_RINGFD_REG_MAX 16
536 struct io_uring_task {
537 /* submission side */
540 struct wait_queue_head wait;
541 const struct io_ring_ctx *last;
543 struct percpu_counter inflight;
544 atomic_t inflight_tracked;
547 spinlock_t task_lock;
548 struct io_wq_work_list task_list;
549 struct io_wq_work_list prio_task_list;
550 struct callback_head task_work;
551 struct file **registered_rings;
556 * First field must be the file pointer in all the
557 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
559 struct io_poll_iocb {
561 struct wait_queue_head *head;
563 struct wait_queue_entry wait;
566 struct io_poll_update {
572 bool update_user_data;
582 struct io_timeout_data {
583 struct io_kiocb *req;
584 struct hrtimer timer;
585 struct timespec64 ts;
586 enum hrtimer_mode mode;
592 struct sockaddr __user *addr;
593 int __user *addr_len;
596 unsigned long nofile;
606 unsigned long nofile;
628 struct list_head list;
629 /* head of the link, used by linked timeouts only */
630 struct io_kiocb *head;
631 /* for linked completions */
632 struct io_kiocb *prev;
635 struct io_timeout_rem {
640 struct timespec64 ts;
646 /* NOTE: kiocb has the file as the first member, so don't do it here */
655 struct sockaddr __user *addr;
662 struct compat_msghdr __user *umsg_compat;
663 struct user_msghdr __user *umsg;
676 struct filename *filename;
678 unsigned long nofile;
681 struct io_rsrc_update {
707 struct epoll_event event;
711 struct file *file_out;
719 struct io_provide_buf {
733 struct filename *filename;
734 struct statx __user *buffer;
746 struct filename *oldpath;
747 struct filename *newpath;
755 struct filename *filename;
762 struct filename *filename;
768 struct filename *oldpath;
769 struct filename *newpath;
776 struct filename *oldpath;
777 struct filename *newpath;
793 struct io_async_connect {
794 struct sockaddr_storage address;
797 struct io_async_msghdr {
798 struct iovec fast_iov[UIO_FASTIOV];
799 /* points to an allocated iov, if NULL we use fast_iov instead */
800 struct iovec *free_iov;
801 struct sockaddr __user *uaddr;
803 struct sockaddr_storage addr;
807 struct iov_iter iter;
808 struct iov_iter_state iter_state;
809 struct iovec fast_iov[UIO_FASTIOV];
813 struct io_rw_state s;
814 const struct iovec *free_iovec;
816 struct wait_page_queue wpq;
821 struct xattr_ctx ctx;
822 struct filename *filename;
826 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
827 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
828 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
829 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
830 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
831 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
832 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
834 /* first byte is taken by user flags, shift it to not overlap */
839 REQ_F_LINK_TIMEOUT_BIT,
840 REQ_F_NEED_CLEANUP_BIT,
842 REQ_F_BUFFER_SELECTED_BIT,
843 REQ_F_BUFFER_RING_BIT,
844 REQ_F_COMPLETE_INLINE_BIT,
848 REQ_F_ARM_LTIMEOUT_BIT,
849 REQ_F_ASYNC_DATA_BIT,
850 REQ_F_SKIP_LINK_CQES_BIT,
851 REQ_F_SINGLE_POLL_BIT,
852 REQ_F_DOUBLE_POLL_BIT,
853 REQ_F_PARTIAL_IO_BIT,
854 REQ_F_APOLL_MULTISHOT_BIT,
855 /* keep async read/write and isreg together and in order */
856 REQ_F_SUPPORT_NOWAIT_BIT,
859 /* not a real bit, just to check we're not overflowing the space */
865 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
866 /* drain existing IO first */
867 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
869 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
870 /* doesn't sever on completion < 0 */
871 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
873 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
874 /* IOSQE_BUFFER_SELECT */
875 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
876 /* IOSQE_CQE_SKIP_SUCCESS */
877 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
879 /* fail rest of links */
880 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
881 /* on inflight list, should be cancelled and waited on exit reliably */
882 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
883 /* read/write uses file position */
884 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
885 /* must not punt to workers */
886 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
887 /* has or had linked timeout */
888 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
890 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
891 /* already went through poll handler */
892 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
893 /* buffer already selected */
894 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
895 /* buffer selected from ring, needs commit */
896 REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
897 /* completion is deferred through io_comp_state */
898 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
899 /* caller should reissue async */
900 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
901 /* supports async reads/writes */
902 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
904 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
905 /* has creds assigned */
906 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
907 /* skip refcounting if not set */
908 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
909 /* there is a linked timeout that has to be armed */
910 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
911 /* ->async_data allocated */
912 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
913 /* don't post CQEs while failing linked requests */
914 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
915 /* single poll may be active */
916 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
917 /* double poll may active */
918 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
919 /* request has already done partial IO */
920 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
921 /* fast poll multishot mode */
922 REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
926 struct io_poll_iocb poll;
927 struct io_poll_iocb *double_poll;
930 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
932 struct io_task_work {
934 struct io_wq_work_node node;
935 struct llist_node fallback_node;
937 io_req_tw_func_t func;
941 IORING_RSRC_FILE = 0,
942 IORING_RSRC_BUFFER = 1,
948 /* fd initially, then cflags for completion */
956 IO_CHECK_CQ_OVERFLOW_BIT,
957 IO_CHECK_CQ_DROPPED_BIT,
961 * NOTE! Each of the iocb union members has the file pointer
962 * as the first entry in their struct definition. So you can
963 * access the file pointer through any of the sub-structs,
964 * or directly as just 'file' in this struct.
970 struct io_poll_iocb poll;
971 struct io_poll_update poll_update;
972 struct io_accept accept;
974 struct io_cancel cancel;
975 struct io_timeout timeout;
976 struct io_timeout_rem timeout_rem;
977 struct io_connect connect;
978 struct io_sr_msg sr_msg;
980 struct io_close close;
981 struct io_rsrc_update rsrc_update;
982 struct io_fadvise fadvise;
983 struct io_madvise madvise;
984 struct io_epoll epoll;
985 struct io_splice splice;
986 struct io_provide_buf pbuf;
987 struct io_statx statx;
988 struct io_shutdown shutdown;
989 struct io_rename rename;
990 struct io_unlink unlink;
991 struct io_mkdir mkdir;
992 struct io_symlink symlink;
993 struct io_hardlink hardlink;
995 struct io_xattr xattr;
996 struct io_socket sock;
998 struct io_uring_cmd uring_cmd;
1002 /* polled IO has completed */
1003 u8 iopoll_completed;
1005 * Can be either a fixed buffer index, or used with provided buffers.
1006 * For the latter, before issue it points to the buffer group ID,
1007 * and after selection it points to the buffer ID itself.
1014 struct io_ring_ctx *ctx;
1015 struct task_struct *task;
1017 struct io_rsrc_node *rsrc_node;
1020 /* store used ubuf, so we can prevent reloading */
1021 struct io_mapped_ubuf *imu;
1023 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
1024 struct io_buffer *kbuf;
1027 * stores buffer ID for ring provided buffers, valid IFF
1028 * REQ_F_BUFFER_RING is set.
1030 struct io_buffer_list *buf_list;
1034 /* used by request caches, completion batching and iopoll */
1035 struct io_wq_work_node comp_list;
1036 /* cache ->apoll->events */
1037 __poll_t apoll_events;
1041 struct io_task_work io_task_work;
1042 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
1044 struct hlist_node hash_node;
1050 /* internal polling, see IORING_FEAT_FAST_POLL */
1051 struct async_poll *apoll;
1052 /* opcode allocated if it needs to store data for async defer */
1054 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
1055 struct io_kiocb *link;
1056 /* custom credentials, valid IFF REQ_F_CREDS is set */
1057 const struct cred *creds;
1058 struct io_wq_work work;
1061 struct io_tctx_node {
1062 struct list_head ctx_node;
1063 struct task_struct *task;
1064 struct io_ring_ctx *ctx;
1067 struct io_defer_entry {
1068 struct list_head list;
1069 struct io_kiocb *req;
1073 struct io_cancel_data {
1074 struct io_ring_ctx *ctx;
1084 * The URING_CMD payload starts at 'cmd' in the first sqe, and continues into
1085 * the following sqe if SQE128 is used.
1087 #define uring_cmd_pdu_size(is_sqe128) \
1088 ((1 + !!(is_sqe128)) * sizeof(struct io_uring_sqe) - \
1089 offsetof(struct io_uring_sqe, cmd))
1092 /* needs req->file assigned */
1093 unsigned needs_file : 1;
1094 /* should block plug */
1096 /* hash wq insertion if file is a regular file */
1097 unsigned hash_reg_file : 1;
1098 /* unbound wq insertion if file is a non-regular file */
1099 unsigned unbound_nonreg_file : 1;
1100 /* set if opcode supports polled "wait" */
1101 unsigned pollin : 1;
1102 unsigned pollout : 1;
1103 unsigned poll_exclusive : 1;
1104 /* op supports buffer selection */
1105 unsigned buffer_select : 1;
1106 /* do prep async if is going to be punted */
1107 unsigned needs_async_setup : 1;
1108 /* opcode is not supported by this kernel */
1109 unsigned not_supported : 1;
1111 unsigned audit_skip : 1;
1112 /* supports ioprio */
1113 unsigned ioprio : 1;
1114 /* supports iopoll */
1115 unsigned iopoll : 1;
1116 /* size of async data needed, if any */
1117 unsigned short async_size;
1120 static const struct io_op_def io_op_defs[] = {
1126 [IORING_OP_READV] = {
1128 .unbound_nonreg_file = 1,
1131 .needs_async_setup = 1,
1136 .async_size = sizeof(struct io_async_rw),
1138 [IORING_OP_WRITEV] = {
1141 .unbound_nonreg_file = 1,
1143 .needs_async_setup = 1,
1148 .async_size = sizeof(struct io_async_rw),
1150 [IORING_OP_FSYNC] = {
1154 [IORING_OP_READ_FIXED] = {
1156 .unbound_nonreg_file = 1,
1162 .async_size = sizeof(struct io_async_rw),
1164 [IORING_OP_WRITE_FIXED] = {
1167 .unbound_nonreg_file = 1,
1173 .async_size = sizeof(struct io_async_rw),
1175 [IORING_OP_POLL_ADD] = {
1177 .unbound_nonreg_file = 1,
1180 [IORING_OP_POLL_REMOVE] = {
1183 [IORING_OP_SYNC_FILE_RANGE] = {
1187 [IORING_OP_SENDMSG] = {
1189 .unbound_nonreg_file = 1,
1191 .needs_async_setup = 1,
1192 .async_size = sizeof(struct io_async_msghdr),
1194 [IORING_OP_RECVMSG] = {
1196 .unbound_nonreg_file = 1,
1199 .needs_async_setup = 1,
1200 .async_size = sizeof(struct io_async_msghdr),
1202 [IORING_OP_TIMEOUT] = {
1204 .async_size = sizeof(struct io_timeout_data),
1206 [IORING_OP_TIMEOUT_REMOVE] = {
1207 /* used by timeout updates' prep() */
1210 [IORING_OP_ACCEPT] = {
1212 .unbound_nonreg_file = 1,
1214 .poll_exclusive = 1,
1215 .ioprio = 1, /* used for flags */
1217 [IORING_OP_ASYNC_CANCEL] = {
1220 [IORING_OP_LINK_TIMEOUT] = {
1222 .async_size = sizeof(struct io_timeout_data),
1224 [IORING_OP_CONNECT] = {
1226 .unbound_nonreg_file = 1,
1228 .needs_async_setup = 1,
1229 .async_size = sizeof(struct io_async_connect),
1231 [IORING_OP_FALLOCATE] = {
1234 [IORING_OP_OPENAT] = {},
1235 [IORING_OP_CLOSE] = {},
1236 [IORING_OP_FILES_UPDATE] = {
1240 [IORING_OP_STATX] = {
1243 [IORING_OP_READ] = {
1245 .unbound_nonreg_file = 1,
1252 .async_size = sizeof(struct io_async_rw),
1254 [IORING_OP_WRITE] = {
1257 .unbound_nonreg_file = 1,
1263 .async_size = sizeof(struct io_async_rw),
1265 [IORING_OP_FADVISE] = {
1269 [IORING_OP_MADVISE] = {},
1270 [IORING_OP_SEND] = {
1272 .unbound_nonreg_file = 1,
1276 [IORING_OP_RECV] = {
1278 .unbound_nonreg_file = 1,
1283 [IORING_OP_OPENAT2] = {
1285 [IORING_OP_EPOLL_CTL] = {
1286 .unbound_nonreg_file = 1,
1289 [IORING_OP_SPLICE] = {
1292 .unbound_nonreg_file = 1,
1295 [IORING_OP_PROVIDE_BUFFERS] = {
1299 [IORING_OP_REMOVE_BUFFERS] = {
1306 .unbound_nonreg_file = 1,
1309 [IORING_OP_SHUTDOWN] = {
1312 [IORING_OP_RENAMEAT] = {},
1313 [IORING_OP_UNLINKAT] = {},
1314 [IORING_OP_MKDIRAT] = {},
1315 [IORING_OP_SYMLINKAT] = {},
1316 [IORING_OP_LINKAT] = {},
1317 [IORING_OP_MSG_RING] = {
1321 [IORING_OP_FSETXATTR] = {
1324 [IORING_OP_SETXATTR] = {},
1325 [IORING_OP_FGETXATTR] = {
1328 [IORING_OP_GETXATTR] = {},
1329 [IORING_OP_SOCKET] = {
1332 [IORING_OP_URING_CMD] = {
1335 .needs_async_setup = 1,
1336 .async_size = uring_cmd_pdu_size(1),
1340 /* requests with any of those set should undergo io_disarm_next() */
1341 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
1342 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
1344 static bool io_disarm_next(struct io_kiocb *req);
1345 static void io_uring_del_tctx_node(unsigned long index);
1346 static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1347 struct task_struct *task,
1349 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1351 static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags);
1352 static void io_dismantle_req(struct io_kiocb *req);
1353 static void io_queue_linked_timeout(struct io_kiocb *req);
1354 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
1355 struct io_uring_rsrc_update2 *up,
1357 static void io_clean_op(struct io_kiocb *req);
1358 static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1359 unsigned issue_flags);
1360 static struct file *io_file_get_normal(struct io_kiocb *req, int fd);
1361 static void io_queue_sqe(struct io_kiocb *req);
1362 static void io_rsrc_put_work(struct work_struct *work);
1364 static void io_req_task_queue(struct io_kiocb *req);
1365 static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
1366 static int io_req_prep_async(struct io_kiocb *req);
1368 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1369 unsigned int issue_flags, u32 slot_index);
1370 static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
1371 unsigned int offset);
1372 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1374 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
1375 static void io_eventfd_signal(struct io_ring_ctx *ctx);
1376 static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
1378 static struct kmem_cache *req_cachep;
1380 static const struct file_operations io_uring_fops;
1382 const char *io_uring_get_opcode(u8 opcode)
1384 switch ((enum io_uring_op)opcode) {
1387 case IORING_OP_READV:
1389 case IORING_OP_WRITEV:
1391 case IORING_OP_FSYNC:
1393 case IORING_OP_READ_FIXED:
1394 return "READ_FIXED";
1395 case IORING_OP_WRITE_FIXED:
1396 return "WRITE_FIXED";
1397 case IORING_OP_POLL_ADD:
1399 case IORING_OP_POLL_REMOVE:
1400 return "POLL_REMOVE";
1401 case IORING_OP_SYNC_FILE_RANGE:
1402 return "SYNC_FILE_RANGE";
1403 case IORING_OP_SENDMSG:
1405 case IORING_OP_RECVMSG:
1407 case IORING_OP_TIMEOUT:
1409 case IORING_OP_TIMEOUT_REMOVE:
1410 return "TIMEOUT_REMOVE";
1411 case IORING_OP_ACCEPT:
1413 case IORING_OP_ASYNC_CANCEL:
1414 return "ASYNC_CANCEL";
1415 case IORING_OP_LINK_TIMEOUT:
1416 return "LINK_TIMEOUT";
1417 case IORING_OP_CONNECT:
1419 case IORING_OP_FALLOCATE:
1421 case IORING_OP_OPENAT:
1423 case IORING_OP_CLOSE:
1425 case IORING_OP_FILES_UPDATE:
1426 return "FILES_UPDATE";
1427 case IORING_OP_STATX:
1429 case IORING_OP_READ:
1431 case IORING_OP_WRITE:
1433 case IORING_OP_FADVISE:
1435 case IORING_OP_MADVISE:
1437 case IORING_OP_SEND:
1439 case IORING_OP_RECV:
1441 case IORING_OP_OPENAT2:
1443 case IORING_OP_EPOLL_CTL:
1445 case IORING_OP_SPLICE:
1447 case IORING_OP_PROVIDE_BUFFERS:
1448 return "PROVIDE_BUFFERS";
1449 case IORING_OP_REMOVE_BUFFERS:
1450 return "REMOVE_BUFFERS";
1453 case IORING_OP_SHUTDOWN:
1455 case IORING_OP_RENAMEAT:
1457 case IORING_OP_UNLINKAT:
1459 case IORING_OP_MKDIRAT:
1461 case IORING_OP_SYMLINKAT:
1463 case IORING_OP_LINKAT:
1465 case IORING_OP_MSG_RING:
1467 case IORING_OP_FSETXATTR:
1469 case IORING_OP_SETXATTR:
1471 case IORING_OP_FGETXATTR:
1473 case IORING_OP_GETXATTR:
1475 case IORING_OP_SOCKET:
1477 case IORING_OP_URING_CMD:
1479 case IORING_OP_LAST:
1485 struct sock *io_uring_get_socket(struct file *file)
1487 #if defined(CONFIG_UNIX)
1488 if (file->f_op == &io_uring_fops) {
1489 struct io_ring_ctx *ctx = file->private_data;
1491 return ctx->ring_sock->sk;
1496 EXPORT_SYMBOL(io_uring_get_socket);
1498 #if defined(CONFIG_UNIX)
1499 static inline bool io_file_need_scm(struct file *filp)
1501 #if defined(IO_URING_SCM_ALL)
1504 return !!unix_get_socket(filp);
1508 static inline bool io_file_need_scm(struct file *filp)
1514 static void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags)
1516 lockdep_assert_held(&ctx->uring_lock);
1517 if (issue_flags & IO_URING_F_UNLOCKED)
1518 mutex_unlock(&ctx->uring_lock);
1521 static void io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags)
1524 * "Normal" inline submissions always hold the uring_lock, since we
1525 * grab it from the system call. Same is true for the SQPOLL offload.
1526 * The only exception is when we've detached the request and issue it
1527 * from an async worker thread, grab the lock for that case.
1529 if (issue_flags & IO_URING_F_UNLOCKED)
1530 mutex_lock(&ctx->uring_lock);
1531 lockdep_assert_held(&ctx->uring_lock);
1534 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1537 mutex_lock(&ctx->uring_lock);
1542 #define io_for_each_link(pos, head) \
1543 for (pos = (head); pos; pos = pos->link)
1546 * Shamelessly stolen from the mm implementation of page reference checking,
1547 * see commit f958d7b528b1 for details.
1549 #define req_ref_zero_or_close_to_overflow(req) \
1550 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1552 static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1554 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
1555 return atomic_inc_not_zero(&req->refs);
1558 static inline bool req_ref_put_and_test(struct io_kiocb *req)
1560 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1563 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1564 return atomic_dec_and_test(&req->refs);
1567 static inline void req_ref_get(struct io_kiocb *req)
1569 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
1570 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1571 atomic_inc(&req->refs);
1574 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
1576 if (!wq_list_empty(&ctx->submit_state.compl_reqs))
1577 __io_submit_flush_completions(ctx);
1580 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
1582 if (!(req->flags & REQ_F_REFCOUNT)) {
1583 req->flags |= REQ_F_REFCOUNT;
1584 atomic_set(&req->refs, nr);
1588 static inline void io_req_set_refcount(struct io_kiocb *req)
1590 __io_req_set_refcount(req, 1);
1593 #define IO_RSRC_REF_BATCH 100
1595 static void io_rsrc_put_node(struct io_rsrc_node *node, int nr)
1597 percpu_ref_put_many(&node->refs, nr);
1600 static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
1601 struct io_ring_ctx *ctx)
1602 __must_hold(&ctx->uring_lock)
1604 struct io_rsrc_node *node = req->rsrc_node;
1607 if (node == ctx->rsrc_node)
1608 ctx->rsrc_cached_refs++;
1610 io_rsrc_put_node(node, 1);
1614 static inline void io_req_put_rsrc(struct io_kiocb *req)
1617 io_rsrc_put_node(req->rsrc_node, 1);
1620 static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
1621 __must_hold(&ctx->uring_lock)
1623 if (ctx->rsrc_cached_refs) {
1624 io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs);
1625 ctx->rsrc_cached_refs = 0;
1629 static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
1630 __must_hold(&ctx->uring_lock)
1632 ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
1633 percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
1636 static inline void io_req_set_rsrc_node(struct io_kiocb *req,
1637 struct io_ring_ctx *ctx,
1638 unsigned int issue_flags)
1640 if (!req->rsrc_node) {
1641 req->rsrc_node = ctx->rsrc_node;
1643 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1644 lockdep_assert_held(&ctx->uring_lock);
1645 ctx->rsrc_cached_refs--;
1646 if (unlikely(ctx->rsrc_cached_refs < 0))
1647 io_rsrc_refs_refill(ctx);
1649 percpu_ref_get(&req->rsrc_node->refs);
1654 static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
1656 if (req->flags & REQ_F_BUFFER_RING) {
1658 req->buf_list->head++;
1659 req->flags &= ~REQ_F_BUFFER_RING;
1661 list_add(&req->kbuf->list, list);
1662 req->flags &= ~REQ_F_BUFFER_SELECTED;
1665 return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
1668 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
1670 lockdep_assert_held(&req->ctx->completion_lock);
1672 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
1674 return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
1677 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
1678 unsigned issue_flags)
1680 unsigned int cflags;
1682 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
1686 * We can add this buffer back to two lists:
1688 * 1) The io_buffers_cache list. This one is protected by the
1689 * ctx->uring_lock. If we already hold this lock, add back to this
1690 * list as we can grab it from issue as well.
1691 * 2) The io_buffers_comp list. This one is protected by the
1692 * ctx->completion_lock.
1694 * We migrate buffers from the comp_list to the issue cache list
1697 if (req->flags & REQ_F_BUFFER_RING) {
1698 /* no buffers to recycle for this case */
1699 cflags = __io_put_kbuf(req, NULL);
1700 } else if (issue_flags & IO_URING_F_UNLOCKED) {
1701 struct io_ring_ctx *ctx = req->ctx;
1703 spin_lock(&ctx->completion_lock);
1704 cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
1705 spin_unlock(&ctx->completion_lock);
1707 lockdep_assert_held(&req->ctx->uring_lock);
1709 cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
1715 static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
1718 if (ctx->io_bl && bgid < BGID_ARRAY)
1719 return &ctx->io_bl[bgid];
1721 return xa_load(&ctx->io_bl_xa, bgid);
1724 static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
1726 struct io_ring_ctx *ctx = req->ctx;
1727 struct io_buffer_list *bl;
1728 struct io_buffer *buf;
1730 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
1733 * For legacy provided buffer mode, don't recycle if we already did
1734 * IO to this buffer. For ring-mapped provided buffer mode, we should
1735 * increment ring->head to explicitly monopolize the buffer to avoid
1738 if ((req->flags & REQ_F_BUFFER_SELECTED) &&
1739 (req->flags & REQ_F_PARTIAL_IO))
1743 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
1744 * the flag and hence ensure that bl->head doesn't get incremented.
1745 * If the tail has already been incremented, hang on to it.
1747 if (req->flags & REQ_F_BUFFER_RING) {
1748 if (req->buf_list) {
1749 if (req->flags & REQ_F_PARTIAL_IO) {
1750 req->buf_list->head++;
1751 req->buf_list = NULL;
1753 req->buf_index = req->buf_list->bgid;
1754 req->flags &= ~REQ_F_BUFFER_RING;
1760 io_ring_submit_lock(ctx, issue_flags);
1763 bl = io_buffer_get_list(ctx, buf->bgid);
1764 list_add(&buf->list, &bl->buf_list);
1765 req->flags &= ~REQ_F_BUFFER_SELECTED;
1766 req->buf_index = buf->bgid;
1768 io_ring_submit_unlock(ctx, issue_flags);
1771 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1773 __must_hold(&req->ctx->timeout_lock)
1775 struct io_kiocb *req;
1777 if (task && head->task != task)
1782 io_for_each_link(req, head) {
1783 if (req->flags & REQ_F_INFLIGHT)
1789 static bool io_match_linked(struct io_kiocb *head)
1791 struct io_kiocb *req;
1793 io_for_each_link(req, head) {
1794 if (req->flags & REQ_F_INFLIGHT)
1801 * As io_match_task() but protected against racing with linked timeouts.
1802 * User must not hold timeout_lock.
1804 static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1809 if (task && head->task != task)
1814 if (head->flags & REQ_F_LINK_TIMEOUT) {
1815 struct io_ring_ctx *ctx = head->ctx;
1817 /* protect against races with linked timeouts */
1818 spin_lock_irq(&ctx->timeout_lock);
1819 matched = io_match_linked(head);
1820 spin_unlock_irq(&ctx->timeout_lock);
1822 matched = io_match_linked(head);
1827 static inline bool req_has_async_data(struct io_kiocb *req)
1829 return req->flags & REQ_F_ASYNC_DATA;
1832 static inline void req_set_fail(struct io_kiocb *req)
1834 req->flags |= REQ_F_FAIL;
1835 if (req->flags & REQ_F_CQE_SKIP) {
1836 req->flags &= ~REQ_F_CQE_SKIP;
1837 req->flags |= REQ_F_SKIP_LINK_CQES;
1841 static inline void req_fail_link_node(struct io_kiocb *req, int res)
1847 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
1849 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
1852 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
1854 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1856 complete(&ctx->ref_comp);
1859 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1861 return !req->timeout.off;
1864 static __cold void io_fallback_req_func(struct work_struct *work)
1866 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1867 fallback_work.work);
1868 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1869 struct io_kiocb *req, *tmp;
1870 bool locked = false;
1872 percpu_ref_get(&ctx->refs);
1873 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
1874 req->io_task_work.func(req, &locked);
1877 io_submit_flush_completions(ctx);
1878 mutex_unlock(&ctx->uring_lock);
1880 percpu_ref_put(&ctx->refs);
1883 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1885 struct io_ring_ctx *ctx;
1888 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1892 xa_init(&ctx->io_bl_xa);
1895 * Use 5 bits less than the max cq entries, that should give us around
1896 * 32 entries per hash list if totally full and uniformly spread.
1898 hash_bits = ilog2(p->cq_entries);
1902 ctx->cancel_hash_bits = hash_bits;
1903 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1905 if (!ctx->cancel_hash)
1907 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1909 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1910 if (!ctx->dummy_ubuf)
1912 /* set invalid range, so io_import_fixed() fails meeting it */
1913 ctx->dummy_ubuf->ubuf = -1UL;
1915 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1916 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1919 ctx->flags = p->flags;
1920 init_waitqueue_head(&ctx->sqo_sq_wait);
1921 INIT_LIST_HEAD(&ctx->sqd_list);
1922 INIT_LIST_HEAD(&ctx->cq_overflow_list);
1923 INIT_LIST_HEAD(&ctx->io_buffers_cache);
1924 INIT_LIST_HEAD(&ctx->apoll_cache);
1925 init_completion(&ctx->ref_comp);
1926 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
1927 mutex_init(&ctx->uring_lock);
1928 init_waitqueue_head(&ctx->cq_wait);
1929 spin_lock_init(&ctx->completion_lock);
1930 spin_lock_init(&ctx->timeout_lock);
1931 INIT_WQ_LIST(&ctx->iopoll_list);
1932 INIT_LIST_HEAD(&ctx->io_buffers_pages);
1933 INIT_LIST_HEAD(&ctx->io_buffers_comp);
1934 INIT_LIST_HEAD(&ctx->defer_list);
1935 INIT_LIST_HEAD(&ctx->timeout_list);
1936 INIT_LIST_HEAD(&ctx->ltimeout_list);
1937 spin_lock_init(&ctx->rsrc_ref_lock);
1938 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
1939 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1940 init_llist_head(&ctx->rsrc_put_llist);
1941 INIT_LIST_HEAD(&ctx->tctx_list);
1942 ctx->submit_state.free_list.next = NULL;
1943 INIT_WQ_LIST(&ctx->locked_free_list);
1944 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
1945 INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
1948 kfree(ctx->dummy_ubuf);
1949 kfree(ctx->cancel_hash);
1951 xa_destroy(&ctx->io_bl_xa);
1956 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1958 struct io_rings *r = ctx->rings;
1960 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1964 static bool req_need_defer(struct io_kiocb *req, u32 seq)
1966 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1967 struct io_ring_ctx *ctx = req->ctx;
1969 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
1975 static inline bool io_req_ffs_set(struct io_kiocb *req)
1977 return req->flags & REQ_F_FIXED_FILE;
1980 static inline void io_req_track_inflight(struct io_kiocb *req)
1982 if (!(req->flags & REQ_F_INFLIGHT)) {
1983 req->flags |= REQ_F_INFLIGHT;
1984 atomic_inc(¤t->io_uring->inflight_tracked);
1988 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1990 if (WARN_ON_ONCE(!req->link))
1993 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1994 req->flags |= REQ_F_LINK_TIMEOUT;
1996 /* linked timeouts should have two refs once prep'ed */
1997 io_req_set_refcount(req);
1998 __io_req_set_refcount(req->link, 2);
2002 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2004 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
2006 return __io_prep_linked_timeout(req);
2009 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
2011 io_queue_linked_timeout(__io_prep_linked_timeout(req));
2014 static inline void io_arm_ltimeout(struct io_kiocb *req)
2016 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
2017 __io_arm_ltimeout(req);
2020 static void io_prep_async_work(struct io_kiocb *req)
2022 const struct io_op_def *def = &io_op_defs[req->opcode];
2023 struct io_ring_ctx *ctx = req->ctx;
2025 if (!(req->flags & REQ_F_CREDS)) {
2026 req->flags |= REQ_F_CREDS;
2027 req->creds = get_current_cred();
2030 req->work.list.next = NULL;
2031 req->work.flags = 0;
2032 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
2033 if (req->flags & REQ_F_FORCE_ASYNC)
2034 req->work.flags |= IO_WQ_WORK_CONCURRENT;
2036 if (req->flags & REQ_F_ISREG) {
2037 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
2038 io_wq_hash_work(&req->work, file_inode(req->file));
2039 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
2040 if (def->unbound_nonreg_file)
2041 req->work.flags |= IO_WQ_WORK_UNBOUND;
2045 static void io_prep_async_link(struct io_kiocb *req)
2047 struct io_kiocb *cur;
2049 if (req->flags & REQ_F_LINK_TIMEOUT) {
2050 struct io_ring_ctx *ctx = req->ctx;
2052 spin_lock_irq(&ctx->timeout_lock);
2053 io_for_each_link(cur, req)
2054 io_prep_async_work(cur);
2055 spin_unlock_irq(&ctx->timeout_lock);
2057 io_for_each_link(cur, req)
2058 io_prep_async_work(cur);
2062 static inline void io_req_add_compl_list(struct io_kiocb *req)
2064 struct io_submit_state *state = &req->ctx->submit_state;
2066 if (!(req->flags & REQ_F_CQE_SKIP))
2067 state->flush_cqes = true;
2068 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
2071 static void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
2073 struct io_kiocb *link = io_prep_linked_timeout(req);
2074 struct io_uring_task *tctx = req->task->io_uring;
2077 BUG_ON(!tctx->io_wq);
2079 /* init ->work of the whole link before punting */
2080 io_prep_async_link(req);
2083 * Not expected to happen, but if we do have a bug where this _can_
2084 * happen, catch it here and ensure the request is marked as
2085 * canceled. That will make io-wq go through the usual work cancel
2086 * procedure rather than attempt to run this request (or create a new
2089 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
2090 req->work.flags |= IO_WQ_WORK_CANCEL;
2092 trace_io_uring_queue_async_work(req->ctx, req, req->cqe.user_data,
2093 req->opcode, req->flags, &req->work,
2094 io_wq_is_hashed(&req->work));
2095 io_wq_enqueue(tctx->io_wq, &req->work);
2097 io_queue_linked_timeout(link);
2100 static void io_kill_timeout(struct io_kiocb *req, int status)
2101 __must_hold(&req->ctx->completion_lock)
2102 __must_hold(&req->ctx->timeout_lock)
2104 struct io_timeout_data *io = req->async_data;
2106 if (hrtimer_try_to_cancel(&io->timer) != -1) {
2109 atomic_set(&req->ctx->cq_timeouts,
2110 atomic_read(&req->ctx->cq_timeouts) + 1);
2111 list_del_init(&req->timeout.list);
2112 io_req_tw_post_queue(req, status, 0);
2116 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
2118 while (!list_empty(&ctx->defer_list)) {
2119 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
2120 struct io_defer_entry, list);
2122 if (req_need_defer(de->req, de->seq))
2124 list_del_init(&de->list);
2125 io_req_task_queue(de->req);
2130 static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
2131 __must_hold(&ctx->completion_lock)
2133 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
2134 struct io_kiocb *req, *tmp;
2136 spin_lock_irq(&ctx->timeout_lock);
2137 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
2138 u32 events_needed, events_got;
2140 if (io_is_timeout_noseq(req))
2144 * Since seq can easily wrap around over time, subtract
2145 * the last seq at which timeouts were flushed before comparing.
2146 * Assuming not more than 2^31-1 events have happened since,
2147 * these subtractions won't have wrapped, so we can check if
2148 * target is in [last_seq, current_seq] by comparing the two.
2150 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
2151 events_got = seq - ctx->cq_last_tm_flush;
2152 if (events_got < events_needed)
2155 io_kill_timeout(req, 0);
2157 ctx->cq_last_tm_flush = seq;
2158 spin_unlock_irq(&ctx->timeout_lock);
2161 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
2163 /* order cqe stores with ring update */
2164 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
2167 static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
2169 if (ctx->off_timeout_used || ctx->drain_active) {
2170 spin_lock(&ctx->completion_lock);
2171 if (ctx->off_timeout_used)
2172 io_flush_timeouts(ctx);
2173 if (ctx->drain_active)
2174 io_queue_deferred(ctx);
2175 io_commit_cqring(ctx);
2176 spin_unlock(&ctx->completion_lock);
2179 io_eventfd_signal(ctx);
2182 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
2184 struct io_rings *r = ctx->rings;
2186 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
2189 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
2191 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
2195 * writes to the cq entry need to come after reading head; the
2196 * control dependency is enough as we're using WRITE_ONCE to
2199 static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
2201 struct io_rings *rings = ctx->rings;
2202 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
2203 unsigned int shift = 0;
2204 unsigned int free, queued, len;
2206 if (ctx->flags & IORING_SETUP_CQE32)
2209 /* userspace may cheat modifying the tail, be safe and do min */
2210 queued = min(__io_cqring_events(ctx), ctx->cq_entries);
2211 free = ctx->cq_entries - queued;
2212 /* we need a contiguous range, limit based on the current array offset */
2213 len = min(free, ctx->cq_entries - off);
2217 ctx->cached_cq_tail++;
2218 ctx->cqe_cached = &rings->cqes[off];
2219 ctx->cqe_sentinel = ctx->cqe_cached + len;
2221 return &rings->cqes[off << shift];
2224 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
2226 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
2227 struct io_uring_cqe *cqe = ctx->cqe_cached;
2229 if (ctx->flags & IORING_SETUP_CQE32) {
2230 unsigned int off = ctx->cqe_cached - ctx->rings->cqes;
2235 ctx->cached_cq_tail++;
2240 return __io_get_cqe(ctx);
2243 static void io_eventfd_signal(struct io_ring_ctx *ctx)
2245 struct io_ev_fd *ev_fd;
2249 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
2250 * and eventfd_signal
2252 ev_fd = rcu_dereference(ctx->io_ev_fd);
2255 * Check again if ev_fd exists incase an io_eventfd_unregister call
2256 * completed between the NULL check of ctx->io_ev_fd at the start of
2257 * the function and rcu_read_lock.
2259 if (unlikely(!ev_fd))
2261 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
2264 if (!ev_fd->eventfd_async || io_wq_current_is_worker())
2265 eventfd_signal(ev_fd->cq_ev_fd, 1);
2270 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
2273 * wake_up_all() may seem excessive, but io_wake_function() and
2274 * io_should_wake() handle the termination of the loop and only
2275 * wake as many waiters as we need to.
2277 if (wq_has_sleeper(&ctx->cq_wait))
2278 wake_up_all(&ctx->cq_wait);
2282 * This should only get called when at least one event has been posted.
2283 * Some applications rely on the eventfd notification count only changing
2284 * IFF a new CQE has been added to the CQ ring. There's no depedency on
2285 * 1:1 relationship between how many times this function is called (and
2286 * hence the eventfd count) and number of CQEs posted to the CQ ring.
2288 static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
2290 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
2292 __io_commit_cqring_flush(ctx);
2294 io_cqring_wake(ctx);
2297 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
2299 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
2301 __io_commit_cqring_flush(ctx);
2303 if (ctx->flags & IORING_SETUP_SQPOLL)
2304 io_cqring_wake(ctx);
2307 /* Returns true if there are no backlogged entries after the flush */
2308 static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
2310 bool all_flushed, posted;
2311 size_t cqe_size = sizeof(struct io_uring_cqe);
2313 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
2316 if (ctx->flags & IORING_SETUP_CQE32)
2320 spin_lock(&ctx->completion_lock);
2321 while (!list_empty(&ctx->cq_overflow_list)) {
2322 struct io_uring_cqe *cqe = io_get_cqe(ctx);
2323 struct io_overflow_cqe *ocqe;
2327 ocqe = list_first_entry(&ctx->cq_overflow_list,
2328 struct io_overflow_cqe, list);
2330 memcpy(cqe, &ocqe->cqe, cqe_size);
2332 io_account_cq_overflow(ctx);
2335 list_del(&ocqe->list);
2339 all_flushed = list_empty(&ctx->cq_overflow_list);
2341 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
2342 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
2345 io_commit_cqring(ctx);
2346 spin_unlock(&ctx->completion_lock);
2348 io_cqring_ev_posted(ctx);
2352 static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
2356 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
2357 /* iopoll syncs against uring_lock, not completion_lock */
2358 if (ctx->flags & IORING_SETUP_IOPOLL)
2359 mutex_lock(&ctx->uring_lock);
2360 ret = __io_cqring_overflow_flush(ctx, false);
2361 if (ctx->flags & IORING_SETUP_IOPOLL)
2362 mutex_unlock(&ctx->uring_lock);
2368 static void __io_put_task(struct task_struct *task, int nr)
2370 struct io_uring_task *tctx = task->io_uring;
2372 percpu_counter_sub(&tctx->inflight, nr);
2373 if (unlikely(atomic_read(&tctx->in_idle)))
2374 wake_up(&tctx->wait);
2375 put_task_struct_many(task, nr);
2378 /* must to be called somewhat shortly after putting a request */
2379 static inline void io_put_task(struct task_struct *task, int nr)
2381 if (likely(task == current))
2382 task->io_uring->cached_refs += nr;
2384 __io_put_task(task, nr);
2387 static void io_task_refs_refill(struct io_uring_task *tctx)
2389 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
2391 percpu_counter_add(&tctx->inflight, refill);
2392 refcount_add(refill, ¤t->usage);
2393 tctx->cached_refs += refill;
2396 static inline void io_get_task_refs(int nr)
2398 struct io_uring_task *tctx = current->io_uring;
2400 tctx->cached_refs -= nr;
2401 if (unlikely(tctx->cached_refs < 0))
2402 io_task_refs_refill(tctx);
2405 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
2407 struct io_uring_task *tctx = task->io_uring;
2408 unsigned int refs = tctx->cached_refs;
2411 tctx->cached_refs = 0;
2412 percpu_counter_sub(&tctx->inflight, refs);
2413 put_task_struct_many(task, refs);
2417 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
2418 s32 res, u32 cflags, u64 extra1,
2421 struct io_overflow_cqe *ocqe;
2422 size_t ocq_size = sizeof(struct io_overflow_cqe);
2423 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
2426 ocq_size += sizeof(struct io_uring_cqe);
2428 ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
2429 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
2432 * If we're in ring overflow flush mode, or in task cancel mode,
2433 * or cannot allocate an overflow entry, then we need to drop it
2436 io_account_cq_overflow(ctx);
2437 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
2440 if (list_empty(&ctx->cq_overflow_list)) {
2441 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
2442 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
2445 ocqe->cqe.user_data = user_data;
2446 ocqe->cqe.res = res;
2447 ocqe->cqe.flags = cflags;
2449 ocqe->cqe.big_cqe[0] = extra1;
2450 ocqe->cqe.big_cqe[1] = extra2;
2452 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
2456 static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
2457 s32 res, u32 cflags)
2459 struct io_uring_cqe *cqe;
2462 * If we can't get a cq entry, userspace overflowed the
2463 * submission (by quite a lot). Increment the overflow count in
2466 cqe = io_get_cqe(ctx);
2468 WRITE_ONCE(cqe->user_data, user_data);
2469 WRITE_ONCE(cqe->res, res);
2470 WRITE_ONCE(cqe->flags, cflags);
2473 return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
2476 static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
2477 struct io_kiocb *req)
2479 struct io_uring_cqe *cqe;
2481 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2482 req->cqe.res, req->cqe.flags, 0, 0);
2485 * If we can't get a cq entry, userspace overflowed the
2486 * submission (by quite a lot). Increment the overflow count in
2489 cqe = io_get_cqe(ctx);
2491 memcpy(cqe, &req->cqe, sizeof(*cqe));
2494 return io_cqring_event_overflow(ctx, req->cqe.user_data,
2495 req->cqe.res, req->cqe.flags, 0, 0);
2498 static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
2499 struct io_kiocb *req)
2501 struct io_uring_cqe *cqe;
2502 u64 extra1 = req->extra1;
2503 u64 extra2 = req->extra2;
2505 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2506 req->cqe.res, req->cqe.flags, extra1, extra2);
2509 * If we can't get a cq entry, userspace overflowed the
2510 * submission (by quite a lot). Increment the overflow count in
2513 cqe = io_get_cqe(ctx);
2515 memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
2516 cqe->big_cqe[0] = extra1;
2517 cqe->big_cqe[1] = extra2;
2521 return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
2522 req->cqe.flags, extra1, extra2);
2525 static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
2527 trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0);
2528 return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
2531 static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags,
2532 u64 extra1, u64 extra2)
2534 struct io_ring_ctx *ctx = req->ctx;
2535 struct io_uring_cqe *cqe;
2537 if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32)))
2539 if (req->flags & REQ_F_CQE_SKIP)
2542 trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags,
2546 * If we can't get a cq entry, userspace overflowed the
2547 * submission (by quite a lot). Increment the overflow count in
2550 cqe = io_get_cqe(ctx);
2552 WRITE_ONCE(cqe->user_data, req->cqe.user_data);
2553 WRITE_ONCE(cqe->res, res);
2554 WRITE_ONCE(cqe->flags, cflags);
2555 WRITE_ONCE(cqe->big_cqe[0], extra1);
2556 WRITE_ONCE(cqe->big_cqe[1], extra2);
2560 io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2);
2563 static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
2564 s32 res, u32 cflags)
2567 trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
2568 return __io_fill_cqe(ctx, user_data, res, cflags);
2571 static void __io_req_complete_put(struct io_kiocb *req)
2574 * If we're the last reference to this request, add to our locked
2577 if (req_ref_put_and_test(req)) {
2578 struct io_ring_ctx *ctx = req->ctx;
2580 if (req->flags & IO_REQ_LINK_FLAGS) {
2581 if (req->flags & IO_DISARM_MASK)
2582 io_disarm_next(req);
2584 io_req_task_queue(req->link);
2588 io_req_put_rsrc(req);
2590 * Selected buffer deallocation in io_clean_op() assumes that
2591 * we don't hold ->completion_lock. Clean them here to avoid
2594 io_put_kbuf_comp(req);
2595 io_dismantle_req(req);
2596 io_put_task(req->task, 1);
2597 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
2598 ctx->locked_free_nr++;
2602 static void __io_req_complete_post(struct io_kiocb *req, s32 res,
2605 if (!(req->flags & REQ_F_CQE_SKIP))
2606 __io_fill_cqe_req(req, res, cflags);
2607 __io_req_complete_put(req);
2610 static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
2611 u32 cflags, u64 extra1, u64 extra2)
2613 if (!(req->flags & REQ_F_CQE_SKIP))
2614 __io_fill_cqe32_req(req, res, cflags, extra1, extra2);
2615 __io_req_complete_put(req);
2618 static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
2620 struct io_ring_ctx *ctx = req->ctx;
2622 spin_lock(&ctx->completion_lock);
2623 __io_req_complete_post(req, res, cflags);
2624 io_commit_cqring(ctx);
2625 spin_unlock(&ctx->completion_lock);
2626 io_cqring_ev_posted(ctx);
2629 static void io_req_complete_post32(struct io_kiocb *req, s32 res,
2630 u32 cflags, u64 extra1, u64 extra2)
2632 struct io_ring_ctx *ctx = req->ctx;
2634 spin_lock(&ctx->completion_lock);
2635 __io_req_complete_post32(req, res, cflags, extra1, extra2);
2636 io_commit_cqring(ctx);
2637 spin_unlock(&ctx->completion_lock);
2638 io_cqring_ev_posted(ctx);
2641 static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
2645 req->cqe.flags = cflags;
2646 req->flags |= REQ_F_COMPLETE_INLINE;
2649 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
2650 s32 res, u32 cflags)
2652 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
2653 io_req_complete_state(req, res, cflags);
2655 io_req_complete_post(req, res, cflags);
2658 static inline void __io_req_complete32(struct io_kiocb *req,
2659 unsigned int issue_flags, s32 res,
2660 u32 cflags, u64 extra1, u64 extra2)
2662 if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
2663 io_req_complete_state(req, res, cflags);
2664 req->extra1 = extra1;
2665 req->extra2 = extra2;
2667 io_req_complete_post32(req, res, cflags, extra1, extra2);
2671 static inline void io_req_complete(struct io_kiocb *req, s32 res)
2675 __io_req_complete(req, 0, res, 0);
2678 static void io_req_complete_failed(struct io_kiocb *req, s32 res)
2681 io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
2685 * Don't initialise the fields below on every allocation, but do that in
2686 * advance and keep them valid across allocations.
2688 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
2692 req->async_data = NULL;
2693 /* not necessary, but safer to zero */
2697 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
2698 struct io_submit_state *state)
2700 spin_lock(&ctx->completion_lock);
2701 wq_list_splice(&ctx->locked_free_list, &state->free_list);
2702 ctx->locked_free_nr = 0;
2703 spin_unlock(&ctx->completion_lock);
2706 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
2708 return !ctx->submit_state.free_list.next;
2712 * A request might get retired back into the request caches even before opcode
2713 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
2714 * Because of that, io_alloc_req() should be called only under ->uring_lock
2715 * and with extra caution to not get a request that is still worked on.
2717 static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
2718 __must_hold(&ctx->uring_lock)
2720 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2721 void *reqs[IO_REQ_ALLOC_BATCH];
2725 * If we have more than a batch's worth of requests in our IRQ side
2726 * locked cache, grab the lock and move them over to our submission
2729 if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
2730 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
2731 if (!io_req_cache_empty(ctx))
2735 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
2738 * Bulk alloc is all-or-nothing. If we fail to get a batch,
2739 * retry single alloc to be on the safe side.
2741 if (unlikely(ret <= 0)) {
2742 reqs[0] = kmem_cache_alloc(req_cachep, gfp);
2748 percpu_ref_get_many(&ctx->refs, ret);
2749 for (i = 0; i < ret; i++) {
2750 struct io_kiocb *req = reqs[i];
2752 io_preinit_req(req, ctx);
2753 io_req_add_to_cache(req, ctx);
2758 static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
2760 if (unlikely(io_req_cache_empty(ctx)))
2761 return __io_alloc_req_refill(ctx);
2765 static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2767 struct io_wq_work_node *node;
2769 node = wq_stack_extract(&ctx->submit_state.free_list);
2770 return container_of(node, struct io_kiocb, comp_list);
2773 static inline void io_put_file(struct file *file)
2779 static inline void io_dismantle_req(struct io_kiocb *req)
2781 unsigned int flags = req->flags;
2783 if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
2785 if (!(flags & REQ_F_FIXED_FILE))
2786 io_put_file(req->file);
2789 static __cold void io_free_req(struct io_kiocb *req)
2791 struct io_ring_ctx *ctx = req->ctx;
2793 io_req_put_rsrc(req);
2794 io_dismantle_req(req);
2795 io_put_task(req->task, 1);
2797 spin_lock(&ctx->completion_lock);
2798 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
2799 ctx->locked_free_nr++;
2800 spin_unlock(&ctx->completion_lock);
2803 static inline void io_remove_next_linked(struct io_kiocb *req)
2805 struct io_kiocb *nxt = req->link;
2807 req->link = nxt->link;
2811 static struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
2812 __must_hold(&req->ctx->completion_lock)
2813 __must_hold(&req->ctx->timeout_lock)
2815 struct io_kiocb *link = req->link;
2817 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2818 struct io_timeout_data *io = link->async_data;
2820 io_remove_next_linked(req);
2821 link->timeout.head = NULL;
2822 if (hrtimer_try_to_cancel(&io->timer) != -1) {
2823 list_del(&link->timeout.list);
2830 static void io_fail_links(struct io_kiocb *req)
2831 __must_hold(&req->ctx->completion_lock)
2833 struct io_kiocb *nxt, *link = req->link;
2834 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
2838 long res = -ECANCELED;
2840 if (link->flags & REQ_F_FAIL)
2841 res = link->cqe.res;
2846 trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data,
2850 link->flags |= REQ_F_CQE_SKIP;
2852 link->flags &= ~REQ_F_CQE_SKIP;
2853 __io_req_complete_post(link, res, 0);
2858 static bool io_disarm_next(struct io_kiocb *req)
2859 __must_hold(&req->ctx->completion_lock)
2861 struct io_kiocb *link = NULL;
2862 bool posted = false;
2864 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2866 req->flags &= ~REQ_F_ARM_LTIMEOUT;
2867 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2868 io_remove_next_linked(req);
2869 io_req_tw_post_queue(link, -ECANCELED, 0);
2872 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
2873 struct io_ring_ctx *ctx = req->ctx;
2875 spin_lock_irq(&ctx->timeout_lock);
2876 link = io_disarm_linked_timeout(req);
2877 spin_unlock_irq(&ctx->timeout_lock);
2880 io_req_tw_post_queue(link, -ECANCELED, 0);
2883 if (unlikely((req->flags & REQ_F_FAIL) &&
2884 !(req->flags & REQ_F_HARDLINK))) {
2885 posted |= (req->link != NULL);
2891 static void __io_req_find_next_prep(struct io_kiocb *req)
2893 struct io_ring_ctx *ctx = req->ctx;
2896 spin_lock(&ctx->completion_lock);
2897 posted = io_disarm_next(req);
2898 io_commit_cqring(ctx);
2899 spin_unlock(&ctx->completion_lock);
2901 io_cqring_ev_posted(ctx);
2904 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2906 struct io_kiocb *nxt;
2909 * If LINK is set, we have dependent requests in this chain. If we
2910 * didn't fail this request, queue the first one up, moving any other
2911 * dependencies to the next request. In case of failure, fail the rest
2914 if (unlikely(req->flags & IO_DISARM_MASK))
2915 __io_req_find_next_prep(req);
2921 static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
2925 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
2926 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
2928 io_submit_flush_completions(ctx);
2929 mutex_unlock(&ctx->uring_lock);
2932 percpu_ref_put(&ctx->refs);
2935 static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
2937 io_commit_cqring(ctx);
2938 spin_unlock(&ctx->completion_lock);
2939 io_cqring_ev_posted(ctx);
2942 static void handle_prev_tw_list(struct io_wq_work_node *node,
2943 struct io_ring_ctx **ctx, bool *uring_locked)
2945 if (*ctx && !*uring_locked)
2946 spin_lock(&(*ctx)->completion_lock);
2949 struct io_wq_work_node *next = node->next;
2950 struct io_kiocb *req = container_of(node, struct io_kiocb,
2953 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2955 if (req->ctx != *ctx) {
2956 if (unlikely(!*uring_locked && *ctx))
2957 ctx_commit_and_unlock(*ctx);
2959 ctx_flush_and_put(*ctx, uring_locked);
2961 /* if not contended, grab and improve batching */
2962 *uring_locked = mutex_trylock(&(*ctx)->uring_lock);
2963 percpu_ref_get(&(*ctx)->refs);
2964 if (unlikely(!*uring_locked))
2965 spin_lock(&(*ctx)->completion_lock);
2967 if (likely(*uring_locked))
2968 req->io_task_work.func(req, uring_locked);
2970 __io_req_complete_post(req, req->cqe.res,
2971 io_put_kbuf_comp(req));
2975 if (unlikely(!*uring_locked))
2976 ctx_commit_and_unlock(*ctx);
2979 static void handle_tw_list(struct io_wq_work_node *node,
2980 struct io_ring_ctx **ctx, bool *locked)
2983 struct io_wq_work_node *next = node->next;
2984 struct io_kiocb *req = container_of(node, struct io_kiocb,
2987 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2989 if (req->ctx != *ctx) {
2990 ctx_flush_and_put(*ctx, locked);
2992 /* if not contended, grab and improve batching */
2993 *locked = mutex_trylock(&(*ctx)->uring_lock);
2994 percpu_ref_get(&(*ctx)->refs);
2996 req->io_task_work.func(req, locked);
3001 static void tctx_task_work(struct callback_head *cb)
3003 bool uring_locked = false;
3004 struct io_ring_ctx *ctx = NULL;
3005 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
3009 struct io_wq_work_node *node1, *node2;
3011 spin_lock_irq(&tctx->task_lock);
3012 node1 = tctx->prio_task_list.first;
3013 node2 = tctx->task_list.first;
3014 INIT_WQ_LIST(&tctx->task_list);
3015 INIT_WQ_LIST(&tctx->prio_task_list);
3016 if (!node2 && !node1)
3017 tctx->task_running = false;
3018 spin_unlock_irq(&tctx->task_lock);
3019 if (!node2 && !node1)
3023 handle_prev_tw_list(node1, &ctx, &uring_locked);
3025 handle_tw_list(node2, &ctx, &uring_locked);
3028 if (data_race(!tctx->task_list.first) &&
3029 data_race(!tctx->prio_task_list.first) && uring_locked)
3030 io_submit_flush_completions(ctx);
3033 ctx_flush_and_put(ctx, &uring_locked);
3035 /* relaxed read is enough as only the task itself sets ->in_idle */
3036 if (unlikely(atomic_read(&tctx->in_idle)))
3037 io_uring_drop_tctx_refs(current);
3040 static void __io_req_task_work_add(struct io_kiocb *req,
3041 struct io_uring_task *tctx,
3042 struct io_wq_work_list *list)
3044 struct io_ring_ctx *ctx = req->ctx;
3045 struct io_wq_work_node *node;
3046 unsigned long flags;
3049 spin_lock_irqsave(&tctx->task_lock, flags);
3050 wq_list_add_tail(&req->io_task_work.node, list);
3051 running = tctx->task_running;
3053 tctx->task_running = true;
3054 spin_unlock_irqrestore(&tctx->task_lock, flags);
3056 /* task_work already pending, we're done */
3060 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
3061 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
3063 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
3066 spin_lock_irqsave(&tctx->task_lock, flags);
3067 tctx->task_running = false;
3068 node = wq_list_merge(&tctx->prio_task_list, &tctx->task_list);
3069 spin_unlock_irqrestore(&tctx->task_lock, flags);
3072 req = container_of(node, struct io_kiocb, io_task_work.node);
3074 if (llist_add(&req->io_task_work.fallback_node,
3075 &req->ctx->fallback_llist))
3076 schedule_delayed_work(&req->ctx->fallback_work, 1);
3080 static void io_req_task_work_add(struct io_kiocb *req)
3082 struct io_uring_task *tctx = req->task->io_uring;
3084 __io_req_task_work_add(req, tctx, &tctx->task_list);
3087 static void io_req_task_prio_work_add(struct io_kiocb *req)
3089 struct io_uring_task *tctx = req->task->io_uring;
3091 if (req->ctx->flags & IORING_SETUP_SQPOLL)
3092 __io_req_task_work_add(req, tctx, &tctx->prio_task_list);
3094 __io_req_task_work_add(req, tctx, &tctx->task_list);
3097 static void io_req_tw_post(struct io_kiocb *req, bool *locked)
3099 io_req_complete_post(req, req->cqe.res, req->cqe.flags);
3102 static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
3105 req->cqe.flags = cflags;
3106 req->io_task_work.func = io_req_tw_post;
3107 io_req_task_work_add(req);
3110 static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
3112 /* not needed for normal modes, but SQPOLL depends on it */
3113 io_tw_lock(req->ctx, locked);
3114 io_req_complete_failed(req, req->cqe.res);
3117 static void io_req_task_submit(struct io_kiocb *req, bool *locked)
3119 io_tw_lock(req->ctx, locked);
3120 /* req->task == current here, checking PF_EXITING is safe */
3121 if (likely(!(req->task->flags & PF_EXITING)))
3124 io_req_complete_failed(req, -EFAULT);
3127 static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
3130 req->io_task_work.func = io_req_task_cancel;
3131 io_req_task_work_add(req);
3134 static void io_req_task_queue(struct io_kiocb *req)
3136 req->io_task_work.func = io_req_task_submit;
3137 io_req_task_work_add(req);
3140 static void io_req_task_queue_reissue(struct io_kiocb *req)
3142 req->io_task_work.func = io_queue_iowq;
3143 io_req_task_work_add(req);
3146 static void io_queue_next(struct io_kiocb *req)
3148 struct io_kiocb *nxt = io_req_find_next(req);
3151 io_req_task_queue(nxt);
3154 static void io_free_batch_list(struct io_ring_ctx *ctx,
3155 struct io_wq_work_node *node)
3156 __must_hold(&ctx->uring_lock)
3158 struct task_struct *task = NULL;
3162 struct io_kiocb *req = container_of(node, struct io_kiocb,
3165 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
3166 if (req->flags & REQ_F_REFCOUNT) {
3167 node = req->comp_list.next;
3168 if (!req_ref_put_and_test(req))
3171 if ((req->flags & REQ_F_POLLED) && req->apoll) {
3172 struct async_poll *apoll = req->apoll;
3174 if (apoll->double_poll)
3175 kfree(apoll->double_poll);
3176 list_add(&apoll->poll.wait.entry,
3178 req->flags &= ~REQ_F_POLLED;
3180 if (req->flags & IO_REQ_LINK_FLAGS)
3182 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
3185 if (!(req->flags & REQ_F_FIXED_FILE))
3186 io_put_file(req->file);
3188 io_req_put_rsrc_locked(req, ctx);
3190 if (req->task != task) {
3192 io_put_task(task, task_refs);
3197 node = req->comp_list.next;
3198 io_req_add_to_cache(req, ctx);
3202 io_put_task(task, task_refs);
3205 static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
3206 __must_hold(&ctx->uring_lock)
3208 struct io_wq_work_node *node, *prev;
3209 struct io_submit_state *state = &ctx->submit_state;
3211 if (state->flush_cqes) {
3212 spin_lock(&ctx->completion_lock);
3213 wq_list_for_each(node, prev, &state->compl_reqs) {
3214 struct io_kiocb *req = container_of(node, struct io_kiocb,
3217 if (!(req->flags & REQ_F_CQE_SKIP)) {
3218 if (!(ctx->flags & IORING_SETUP_CQE32))
3219 __io_fill_cqe_req_filled(ctx, req);
3221 __io_fill_cqe32_req_filled(ctx, req);
3225 io_commit_cqring(ctx);
3226 spin_unlock(&ctx->completion_lock);
3227 io_cqring_ev_posted(ctx);
3228 state->flush_cqes = false;
3231 io_free_batch_list(ctx, state->compl_reqs.first);
3232 INIT_WQ_LIST(&state->compl_reqs);
3236 * Drop reference to request, return next in chain (if there is one) if this
3237 * was the last reference to this request.
3239 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
3241 struct io_kiocb *nxt = NULL;
3243 if (req_ref_put_and_test(req)) {
3244 if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
3245 nxt = io_req_find_next(req);
3251 static inline void io_put_req(struct io_kiocb *req)
3253 if (req_ref_put_and_test(req)) {
3259 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
3261 /* See comment at the top of this file */
3263 return __io_cqring_events(ctx);
3266 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
3268 struct io_rings *rings = ctx->rings;
3270 /* make sure SQ entry isn't read before tail */
3271 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
3274 static inline bool io_run_task_work(void)
3276 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
3277 __set_current_state(TASK_RUNNING);
3278 clear_notify_signal();
3279 if (task_work_pending(current))
3287 static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
3289 struct io_wq_work_node *pos, *start, *prev;
3290 unsigned int poll_flags = BLK_POLL_NOSLEEP;
3291 DEFINE_IO_COMP_BATCH(iob);
3295 * Only spin for completions if we don't have multiple devices hanging
3296 * off our complete list.
3298 if (ctx->poll_multi_queue || force_nonspin)
3299 poll_flags |= BLK_POLL_ONESHOT;
3301 wq_list_for_each(pos, start, &ctx->iopoll_list) {
3302 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
3303 struct kiocb *kiocb = &req->rw.kiocb;
3307 * Move completed and retryable entries to our local lists.
3308 * If we find a request that requires polling, break out
3309 * and complete those lists first, if we have entries there.
3311 if (READ_ONCE(req->iopoll_completed))
3314 ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
3315 if (unlikely(ret < 0))
3318 poll_flags |= BLK_POLL_ONESHOT;
3320 /* iopoll may have completed current req */
3321 if (!rq_list_empty(iob.req_list) ||
3322 READ_ONCE(req->iopoll_completed))
3326 if (!rq_list_empty(iob.req_list))
3332 wq_list_for_each_resume(pos, prev) {
3333 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
3335 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
3336 if (!smp_load_acquire(&req->iopoll_completed))
3339 if (unlikely(req->flags & REQ_F_CQE_SKIP))
3341 __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0));
3344 if (unlikely(!nr_events))
3347 io_commit_cqring(ctx);
3348 io_cqring_ev_posted_iopoll(ctx);
3349 pos = start ? start->next : ctx->iopoll_list.first;
3350 wq_list_cut(&ctx->iopoll_list, prev, start);
3351 io_free_batch_list(ctx, pos);
3356 * We can't just wait for polled events to come to us, we have to actively
3357 * find and complete them.
3359 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
3361 if (!(ctx->flags & IORING_SETUP_IOPOLL))
3364 mutex_lock(&ctx->uring_lock);
3365 while (!wq_list_empty(&ctx->iopoll_list)) {
3366 /* let it sleep and repeat later if can't complete a request */
3367 if (io_do_iopoll(ctx, true) == 0)
3370 * Ensure we allow local-to-the-cpu processing to take place,
3371 * in this case we need to ensure that we reap all events.
3372 * Also let task_work, etc. to progress by releasing the mutex
3374 if (need_resched()) {
3375 mutex_unlock(&ctx->uring_lock);
3377 mutex_lock(&ctx->uring_lock);
3380 mutex_unlock(&ctx->uring_lock);
3383 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
3385 unsigned int nr_events = 0;
3387 unsigned long check_cq;
3390 * Don't enter poll loop if we already have events pending.
3391 * If we do, we can potentially be spinning for commands that
3392 * already triggered a CQE (eg in error).
3394 check_cq = READ_ONCE(ctx->check_cq);
3395 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
3396 __io_cqring_overflow_flush(ctx, false);
3397 if (io_cqring_events(ctx))
3401 * Similarly do not spin if we have not informed the user of any
3404 if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
3409 * If a submit got punted to a workqueue, we can have the
3410 * application entering polling for a command before it gets
3411 * issued. That app will hold the uring_lock for the duration
3412 * of the poll right here, so we need to take a breather every
3413 * now and then to ensure that the issue has a chance to add
3414 * the poll to the issued list. Otherwise we can spin here
3415 * forever, while the workqueue is stuck trying to acquire the
3418 if (wq_list_empty(&ctx->iopoll_list)) {
3419 u32 tail = ctx->cached_cq_tail;
3421 mutex_unlock(&ctx->uring_lock);
3423 mutex_lock(&ctx->uring_lock);
3425 /* some requests don't go through iopoll_list */
3426 if (tail != ctx->cached_cq_tail ||
3427 wq_list_empty(&ctx->iopoll_list))
3430 ret = io_do_iopoll(ctx, !min);
3435 } while (nr_events < min && !need_resched());
3440 static void kiocb_end_write(struct io_kiocb *req)
3443 * Tell lockdep we inherited freeze protection from submission
3446 if (req->flags & REQ_F_ISREG) {
3447 struct super_block *sb = file_inode(req->file)->i_sb;
3449 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
3455 static bool io_resubmit_prep(struct io_kiocb *req)
3457 struct io_async_rw *rw = req->async_data;
3459 if (!req_has_async_data(req))
3460 return !io_req_prep_async(req);
3461 iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
3465 static bool io_rw_should_reissue(struct io_kiocb *req)
3467 umode_t mode = file_inode(req->file)->i_mode;
3468 struct io_ring_ctx *ctx = req->ctx;
3470 if (!S_ISBLK(mode) && !S_ISREG(mode))
3472 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
3473 !(ctx->flags & IORING_SETUP_IOPOLL)))
3476 * If ref is dying, we might be running poll reap from the exit work.
3477 * Don't attempt to reissue from that path, just let it fail with
3480 if (percpu_ref_is_dying(&ctx->refs))
3483 * Play it safe and assume not safe to re-import and reissue if we're
3484 * not in the original thread group (or in task context).
3486 if (!same_thread_group(req->task, current) || !in_task())
3491 static bool io_resubmit_prep(struct io_kiocb *req)
3495 static bool io_rw_should_reissue(struct io_kiocb *req)
3501 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
3503 if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
3504 kiocb_end_write(req);
3505 fsnotify_modify(req->file);
3507 fsnotify_access(req->file);
3509 if (unlikely(res != req->cqe.res)) {
3510 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
3511 io_rw_should_reissue(req)) {
3512 req->flags |= REQ_F_REISSUE;
3521 static inline void io_req_task_complete(struct io_kiocb *req, bool *locked)
3523 int res = req->cqe.res;
3526 io_req_complete_state(req, res, io_put_kbuf(req, 0));
3527 io_req_add_compl_list(req);
3529 io_req_complete_post(req, res,
3530 io_put_kbuf(req, IO_URING_F_UNLOCKED));
3534 static void __io_complete_rw(struct io_kiocb *req, long res,
3535 unsigned int issue_flags)
3537 if (__io_complete_rw_common(req, res))
3539 __io_req_complete(req, issue_flags, req->cqe.res,
3540 io_put_kbuf(req, issue_flags));
3543 static void io_complete_rw(struct kiocb *kiocb, long res)
3545 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
3547 if (__io_complete_rw_common(req, res))
3550 req->io_task_work.func = io_req_task_complete;
3551 io_req_task_prio_work_add(req);
3554 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
3556 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
3558 if (kiocb->ki_flags & IOCB_WRITE)
3559 kiocb_end_write(req);
3560 if (unlikely(res != req->cqe.res)) {
3561 if (res == -EAGAIN && io_rw_should_reissue(req)) {
3562 req->flags |= REQ_F_REISSUE;
3568 /* order with io_iopoll_complete() checking ->iopoll_completed */
3569 smp_store_release(&req->iopoll_completed, 1);
3573 * After the iocb has been issued, it's safe to be found on the poll list.
3574 * Adding the kiocb to the list AFTER submission ensures that we don't
3575 * find it from a io_do_iopoll() thread before the issuer is done
3576 * accessing the kiocb cookie.
3578 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
3580 struct io_ring_ctx *ctx = req->ctx;
3581 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
3583 /* workqueue context doesn't hold uring_lock, grab it now */
3584 if (unlikely(needs_lock))
3585 mutex_lock(&ctx->uring_lock);
3588 * Track whether we have multiple files in our lists. This will impact
3589 * how we do polling eventually, not spinning if we're on potentially
3590 * different devices.
3592 if (wq_list_empty(&ctx->iopoll_list)) {
3593 ctx->poll_multi_queue = false;
3594 } else if (!ctx->poll_multi_queue) {
3595 struct io_kiocb *list_req;
3597 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
3599 if (list_req->file != req->file)
3600 ctx->poll_multi_queue = true;
3604 * For fast devices, IO may have already completed. If it has, add
3605 * it to the front so we find it first.
3607 if (READ_ONCE(req->iopoll_completed))
3608 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
3610 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
3612 if (unlikely(needs_lock)) {
3614 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
3615 * in sq thread task context or in io worker task context. If
3616 * current task context is sq thread, we don't need to check
3617 * whether should wake up sq thread.
3619 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
3620 wq_has_sleeper(&ctx->sq_data->wait))
3621 wake_up(&ctx->sq_data->wait);
3623 mutex_unlock(&ctx->uring_lock);
3627 static bool io_bdev_nowait(struct block_device *bdev)
3629 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
3633 * If we tracked the file through the SCM inflight mechanism, we could support
3634 * any file. For now, just ensure that anything potentially problematic is done
3637 static bool __io_file_supports_nowait(struct file *file, umode_t mode)
3639 if (S_ISBLK(mode)) {
3640 if (IS_ENABLED(CONFIG_BLOCK) &&
3641 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
3647 if (S_ISREG(mode)) {
3648 if (IS_ENABLED(CONFIG_BLOCK) &&
3649 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
3650 file->f_op != &io_uring_fops)
3655 /* any ->read/write should understand O_NONBLOCK */
3656 if (file->f_flags & O_NONBLOCK)
3658 return file->f_mode & FMODE_NOWAIT;
3662 * If we tracked the file through the SCM inflight mechanism, we could support
3663 * any file. For now, just ensure that anything potentially problematic is done
3666 static unsigned int io_file_get_flags(struct file *file)
3668 umode_t mode = file_inode(file)->i_mode;
3669 unsigned int res = 0;
3673 if (__io_file_supports_nowait(file, mode))
3675 if (io_file_need_scm(file))
3680 static inline bool io_file_supports_nowait(struct io_kiocb *req)
3682 return req->flags & REQ_F_SUPPORT_NOWAIT;
3685 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3687 struct kiocb *kiocb = &req->rw.kiocb;
3691 kiocb->ki_pos = READ_ONCE(sqe->off);
3692 /* used for fixed read/write too - just read unconditionally */
3693 req->buf_index = READ_ONCE(sqe->buf_index);
3695 if (req->opcode == IORING_OP_READ_FIXED ||
3696 req->opcode == IORING_OP_WRITE_FIXED) {
3697 struct io_ring_ctx *ctx = req->ctx;
3700 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
3702 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
3703 req->imu = ctx->user_bufs[index];
3704 io_req_set_rsrc_node(req, ctx, 0);
3707 ioprio = READ_ONCE(sqe->ioprio);
3709 ret = ioprio_check_cap(ioprio);
3713 kiocb->ki_ioprio = ioprio;
3715 kiocb->ki_ioprio = get_current_ioprio();
3718 req->rw.addr = READ_ONCE(sqe->addr);
3719 req->rw.len = READ_ONCE(sqe->len);
3720 req->rw.flags = READ_ONCE(sqe->rw_flags);
3724 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3730 case -ERESTARTNOINTR:
3731 case -ERESTARTNOHAND:
3732 case -ERESTART_RESTARTBLOCK:
3734 * We can't just restart the syscall, since previously
3735 * submitted sqes may already be in progress. Just fail this
3741 kiocb->ki_complete(kiocb, ret);
3745 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
3747 struct kiocb *kiocb = &req->rw.kiocb;
3749 if (kiocb->ki_pos != -1)
3750 return &kiocb->ki_pos;
3752 if (!(req->file->f_mode & FMODE_STREAM)) {
3753 req->flags |= REQ_F_CUR_POS;
3754 kiocb->ki_pos = req->file->f_pos;
3755 return &kiocb->ki_pos;
3762 static void kiocb_done(struct io_kiocb *req, ssize_t ret,
3763 unsigned int issue_flags)
3765 struct io_async_rw *io = req->async_data;
3767 /* add previously done IO, if any */
3768 if (req_has_async_data(req) && io->bytes_done > 0) {
3770 ret = io->bytes_done;
3772 ret += io->bytes_done;
3775 if (req->flags & REQ_F_CUR_POS)
3776 req->file->f_pos = req->rw.kiocb.ki_pos;
3777 if (ret >= 0 && (req->rw.kiocb.ki_complete == io_complete_rw))
3778 __io_complete_rw(req, ret, issue_flags);
3780 io_rw_done(&req->rw.kiocb, ret);
3782 if (req->flags & REQ_F_REISSUE) {
3783 req->flags &= ~REQ_F_REISSUE;
3784 if (io_resubmit_prep(req))
3785 io_req_task_queue_reissue(req);
3787 io_req_task_queue_fail(req, ret);
3791 static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3792 struct io_mapped_ubuf *imu)
3794 size_t len = req->rw.len;
3795 u64 buf_end, buf_addr = req->rw.addr;
3798 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
3800 /* not inside the mapped region */
3801 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
3805 * May not be a start of buffer, set size appropriately
3806 * and advance us to the beginning.
3808 offset = buf_addr - imu->ubuf;
3809 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
3813 * Don't use iov_iter_advance() here, as it's really slow for
3814 * using the latter parts of a big fixed buffer - it iterates
3815 * over each segment manually. We can cheat a bit here, because
3818 * 1) it's a BVEC iter, we set it up
3819 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3820 * first and last bvec
3822 * So just find our index, and adjust the iterator afterwards.
3823 * If the offset is within the first bvec (or the whole first
3824 * bvec, just use iov_iter_advance(). This makes it easier
3825 * since we can just skip the first segment, which may not
3826 * be PAGE_SIZE aligned.
3828 const struct bio_vec *bvec = imu->bvec;
3830 if (offset <= bvec->bv_len) {
3831 iov_iter_advance(iter, offset);
3833 unsigned long seg_skip;
3835 /* skip first vec */
3836 offset -= bvec->bv_len;
3837 seg_skip = 1 + (offset >> PAGE_SHIFT);
3839 iter->bvec = bvec + seg_skip;
3840 iter->nr_segs -= seg_skip;
3841 iter->count -= bvec->bv_len + offset;
3842 iter->iov_offset = offset & ~PAGE_MASK;
3849 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3850 unsigned int issue_flags)
3852 if (WARN_ON_ONCE(!req->imu))
3854 return __io_import_fixed(req, rw, iter, req->imu);
3857 static int io_buffer_add_list(struct io_ring_ctx *ctx,
3858 struct io_buffer_list *bl, unsigned int bgid)
3861 if (bgid < BGID_ARRAY)
3864 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
3867 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
3868 struct io_buffer_list *bl)
3870 if (!list_empty(&bl->buf_list)) {
3871 struct io_buffer *kbuf;
3873 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
3874 list_del(&kbuf->list);
3875 if (*len > kbuf->len)
3877 req->flags |= REQ_F_BUFFER_SELECTED;
3879 req->buf_index = kbuf->bid;
3880 return u64_to_user_ptr(kbuf->addr);
3885 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
3886 struct io_buffer_list *bl,
3887 unsigned int issue_flags)
3889 struct io_uring_buf_ring *br = bl->buf_ring;
3890 struct io_uring_buf *buf;
3891 __u16 head = bl->head;
3893 if (unlikely(smp_load_acquire(&br->tail) == head))
3897 if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
3898 buf = &br->bufs[head];
3900 int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
3901 int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
3902 buf = page_address(bl->buf_pages[index]);
3905 if (*len > buf->len)
3907 req->flags |= REQ_F_BUFFER_RING;
3909 req->buf_index = buf->bid;
3911 if (issue_flags & IO_URING_F_UNLOCKED) {
3913 * If we came in unlocked, we have no choice but to consume the
3914 * buffer here. This does mean it'll be pinned until the IO
3915 * completes. But coming in unlocked means we're in io-wq
3916 * context, hence there should be no further retry. For the
3917 * locked case, the caller must ensure to call the commit when
3918 * the transfer completes (or if we get -EAGAIN and must poll
3921 req->buf_list = NULL;
3924 return u64_to_user_ptr(buf->addr);
3927 static void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
3928 unsigned int issue_flags)
3930 struct io_ring_ctx *ctx = req->ctx;
3931 struct io_buffer_list *bl;
3932 void __user *ret = NULL;
3934 io_ring_submit_lock(req->ctx, issue_flags);
3936 bl = io_buffer_get_list(ctx, req->buf_index);
3938 if (bl->buf_nr_pages)
3939 ret = io_ring_buffer_select(req, len, bl, issue_flags);
3941 ret = io_provided_buffer_select(req, len, bl);
3943 io_ring_submit_unlock(req->ctx, issue_flags);
3947 #ifdef CONFIG_COMPAT
3948 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3949 unsigned int issue_flags)
3951 struct compat_iovec __user *uiov;
3952 compat_ssize_t clen;
3956 uiov = u64_to_user_ptr(req->rw.addr);
3957 if (!access_ok(uiov, sizeof(*uiov)))
3959 if (__get_user(clen, &uiov->iov_len))
3965 buf = io_buffer_select(req, &len, issue_flags);
3968 req->rw.addr = (unsigned long) buf;
3969 iov[0].iov_base = buf;
3970 req->rw.len = iov[0].iov_len = (compat_size_t) len;
3975 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3976 unsigned int issue_flags)
3978 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3982 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3985 len = iov[0].iov_len;
3988 buf = io_buffer_select(req, &len, issue_flags);
3991 req->rw.addr = (unsigned long) buf;
3992 iov[0].iov_base = buf;
3993 req->rw.len = iov[0].iov_len = len;
3997 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3998 unsigned int issue_flags)
4000 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
4001 iov[0].iov_base = u64_to_user_ptr(req->rw.addr);
4002 iov[0].iov_len = req->rw.len;
4005 if (req->rw.len != 1)
4008 #ifdef CONFIG_COMPAT
4009 if (req->ctx->compat)
4010 return io_compat_import(req, iov, issue_flags);
4013 return __io_iov_buffer_select(req, iov, issue_flags);
4016 static inline bool io_do_buffer_select(struct io_kiocb *req)
4018 if (!(req->flags & REQ_F_BUFFER_SELECT))
4020 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
4023 static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
4024 struct io_rw_state *s,
4025 unsigned int issue_flags)
4027 struct iov_iter *iter = &s->iter;
4028 u8 opcode = req->opcode;
4029 struct iovec *iovec;
4034 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
4035 ret = io_import_fixed(req, rw, iter, issue_flags);
4037 return ERR_PTR(ret);
4041 buf = u64_to_user_ptr(req->rw.addr);
4042 sqe_len = req->rw.len;
4044 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
4045 if (io_do_buffer_select(req)) {
4046 buf = io_buffer_select(req, &sqe_len, issue_flags);
4048 return ERR_PTR(-ENOBUFS);
4049 req->rw.addr = (unsigned long) buf;
4050 req->rw.len = sqe_len;
4053 ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
4055 return ERR_PTR(ret);
4059 iovec = s->fast_iov;
4060 if (req->flags & REQ_F_BUFFER_SELECT) {
4061 ret = io_iov_buffer_select(req, iovec, issue_flags);
4063 return ERR_PTR(ret);
4064 iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
4068 ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
4070 if (unlikely(ret < 0))
4071 return ERR_PTR(ret);
4075 static inline int io_import_iovec(int rw, struct io_kiocb *req,
4076 struct iovec **iovec, struct io_rw_state *s,
4077 unsigned int issue_flags)
4079 *iovec = __io_import_iovec(rw, req, s, issue_flags);
4080 if (unlikely(IS_ERR(*iovec)))
4081 return PTR_ERR(*iovec);
4083 iov_iter_save_state(&s->iter, &s->iter_state);
4087 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
4089 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
4093 * For files that don't have ->read_iter() and ->write_iter(), handle them
4094 * by looping over ->read() or ->write() manually.
4096 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
4098 struct kiocb *kiocb = &req->rw.kiocb;
4099 struct file *file = req->file;
4104 * Don't support polled IO through this interface, and we can't
4105 * support non-blocking either. For the latter, this just causes
4106 * the kiocb to be handled from an async context.
4108 if (kiocb->ki_flags & IOCB_HIPRI)
4110 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
4111 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
4114 ppos = io_kiocb_ppos(kiocb);
4116 while (iov_iter_count(iter)) {
4120 if (!iov_iter_is_bvec(iter)) {
4121 iovec = iov_iter_iovec(iter);
4123 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
4124 iovec.iov_len = req->rw.len;
4128 nr = file->f_op->read(file, iovec.iov_base,
4129 iovec.iov_len, ppos);
4131 nr = file->f_op->write(file, iovec.iov_base,
4132 iovec.iov_len, ppos);
4141 if (!iov_iter_is_bvec(iter)) {
4142 iov_iter_advance(iter, nr);
4149 if (nr != iovec.iov_len)
4156 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
4157 const struct iovec *fast_iov, struct iov_iter *iter)
4159 struct io_async_rw *rw = req->async_data;
4161 memcpy(&rw->s.iter, iter, sizeof(*iter));
4162 rw->free_iovec = iovec;
4164 /* can only be fixed buffers, no need to do anything */
4165 if (iov_iter_is_bvec(iter))
4168 unsigned iov_off = 0;
4170 rw->s.iter.iov = rw->s.fast_iov;
4171 if (iter->iov != fast_iov) {
4172 iov_off = iter->iov - fast_iov;
4173 rw->s.iter.iov += iov_off;
4175 if (rw->s.fast_iov != fast_iov)
4176 memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
4177 sizeof(struct iovec) * iter->nr_segs);
4179 req->flags |= REQ_F_NEED_CLEANUP;
4183 static inline bool io_alloc_async_data(struct io_kiocb *req)
4185 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
4186 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
4187 if (req->async_data) {
4188 req->flags |= REQ_F_ASYNC_DATA;
4194 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
4195 struct io_rw_state *s, bool force)
4197 if (!force && !io_op_defs[req->opcode].needs_async_setup)
4199 if (!req_has_async_data(req)) {
4200 struct io_async_rw *iorw;
4202 if (io_alloc_async_data(req)) {
4207 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
4208 iorw = req->async_data;
4209 /* we've copied and mapped the iter, ensure state is saved */
4210 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
4215 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
4217 struct io_async_rw *iorw = req->async_data;
4221 /* submission path, ->uring_lock should already be taken */
4222 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
4223 if (unlikely(ret < 0))
4226 iorw->bytes_done = 0;
4227 iorw->free_iovec = iov;
4229 req->flags |= REQ_F_NEED_CLEANUP;
4233 static int io_readv_prep_async(struct io_kiocb *req)
4235 return io_rw_prep_async(req, READ);
4238 static int io_writev_prep_async(struct io_kiocb *req)
4240 return io_rw_prep_async(req, WRITE);
4244 * This is our waitqueue callback handler, registered through __folio_lock_async()
4245 * when we initially tried to do the IO with the iocb armed our waitqueue.
4246 * This gets called when the page is unlocked, and we generally expect that to
4247 * happen when the page IO is completed and the page is now uptodate. This will
4248 * queue a task_work based retry of the operation, attempting to copy the data
4249 * again. If the latter fails because the page was NOT uptodate, then we will
4250 * do a thread based blocking retry of the operation. That's the unexpected
4253 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
4254 int sync, void *arg)
4256 struct wait_page_queue *wpq;
4257 struct io_kiocb *req = wait->private;
4258 struct wait_page_key *key = arg;
4260 wpq = container_of(wait, struct wait_page_queue, wait);
4262 if (!wake_page_match(wpq, key))
4265 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
4266 list_del_init(&wait->entry);
4267 io_req_task_queue(req);
4272 * This controls whether a given IO request should be armed for async page
4273 * based retry. If we return false here, the request is handed to the async
4274 * worker threads for retry. If we're doing buffered reads on a regular file,
4275 * we prepare a private wait_page_queue entry and retry the operation. This
4276 * will either succeed because the page is now uptodate and unlocked, or it
4277 * will register a callback when the page is unlocked at IO completion. Through
4278 * that callback, io_uring uses task_work to setup a retry of the operation.
4279 * That retry will attempt the buffered read again. The retry will generally
4280 * succeed, or in rare cases where it fails, we then fall back to using the
4281 * async worker threads for a blocking retry.
4283 static bool io_rw_should_retry(struct io_kiocb *req)
4285 struct io_async_rw *rw = req->async_data;
4286 struct wait_page_queue *wait = &rw->wpq;
4287 struct kiocb *kiocb = &req->rw.kiocb;
4289 /* never retry for NOWAIT, we just complete with -EAGAIN */
4290 if (req->flags & REQ_F_NOWAIT)
4293 /* Only for buffered IO */
4294 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
4298 * just use poll if we can, and don't attempt if the fs doesn't
4299 * support callback based unlocks
4301 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
4304 wait->wait.func = io_async_buf_func;
4305 wait->wait.private = req;
4306 wait->wait.flags = 0;
4307 INIT_LIST_HEAD(&wait->wait.entry);
4308 kiocb->ki_flags |= IOCB_WAITQ;
4309 kiocb->ki_flags &= ~IOCB_NOWAIT;
4310 kiocb->ki_waitq = wait;
4314 static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
4316 if (likely(req->file->f_op->read_iter))
4317 return call_read_iter(req->file, &req->rw.kiocb, iter);
4318 else if (req->file->f_op->read)
4319 return loop_rw_iter(READ, req, iter);
4324 static bool need_read_all(struct io_kiocb *req)
4326 return req->flags & REQ_F_ISREG ||
4327 S_ISBLK(file_inode(req->file)->i_mode);
4330 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
4332 struct kiocb *kiocb = &req->rw.kiocb;
4333 struct io_ring_ctx *ctx = req->ctx;
4334 struct file *file = req->file;
4337 if (unlikely(!file || !(file->f_mode & mode)))
4340 if (!io_req_ffs_set(req))
4341 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
4343 kiocb->ki_flags = iocb_flags(file);
4344 ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
4349 * If the file is marked O_NONBLOCK, still allow retry for it if it
4350 * supports async. Otherwise it's impossible to use O_NONBLOCK files
4351 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
4353 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
4354 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
4355 req->flags |= REQ_F_NOWAIT;
4357 if (ctx->flags & IORING_SETUP_IOPOLL) {
4358 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
4361 kiocb->private = NULL;
4362 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
4363 kiocb->ki_complete = io_complete_rw_iopoll;
4364 req->iopoll_completed = 0;
4366 if (kiocb->ki_flags & IOCB_HIPRI)
4368 kiocb->ki_complete = io_complete_rw;
4374 static int io_read(struct io_kiocb *req, unsigned int issue_flags)
4376 struct io_rw_state __s, *s = &__s;
4377 struct iovec *iovec;
4378 struct kiocb *kiocb = &req->rw.kiocb;
4379 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4380 struct io_async_rw *rw;
4384 if (!req_has_async_data(req)) {
4385 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
4386 if (unlikely(ret < 0))
4390 * Safe and required to re-import if we're using provided
4391 * buffers, as we dropped the selected one before retry.
4393 if (req->flags & REQ_F_BUFFER_SELECT) {
4394 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
4395 if (unlikely(ret < 0))
4399 rw = req->async_data;
4402 * We come here from an earlier attempt, restore our state to
4403 * match in case it doesn't. It's cheap enough that we don't
4404 * need to make this conditional.
4406 iov_iter_restore(&s->iter, &s->iter_state);
4409 ret = io_rw_init_file(req, FMODE_READ);
4410 if (unlikely(ret)) {
4414 req->cqe.res = iov_iter_count(&s->iter);
4416 if (force_nonblock) {
4417 /* If the file doesn't support async, just async punt */
4418 if (unlikely(!io_file_supports_nowait(req))) {
4419 ret = io_setup_async_rw(req, iovec, s, true);
4420 return ret ?: -EAGAIN;
4422 kiocb->ki_flags |= IOCB_NOWAIT;
4424 /* Ensure we clear previously set non-block flag */
4425 kiocb->ki_flags &= ~IOCB_NOWAIT;
4428 ppos = io_kiocb_update_pos(req);
4430 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
4431 if (unlikely(ret)) {
4436 ret = io_iter_do_read(req, &s->iter);
4438 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
4439 req->flags &= ~REQ_F_REISSUE;
4440 /* if we can poll, just do that */
4441 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
4443 /* IOPOLL retry should happen for io-wq threads */
4444 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
4446 /* no retry on NONBLOCK nor RWF_NOWAIT */
4447 if (req->flags & REQ_F_NOWAIT)
4450 } else if (ret == -EIOCBQUEUED) {
4452 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
4453 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
4454 /* read all, failed, already did sync or don't want to retry */
4459 * Don't depend on the iter state matching what was consumed, or being
4460 * untouched in case of error. Restore it and we'll advance it
4461 * manually if we need to.
4463 iov_iter_restore(&s->iter, &s->iter_state);
4465 ret2 = io_setup_async_rw(req, iovec, s, true);
4470 rw = req->async_data;
4473 * Now use our persistent iterator and state, if we aren't already.
4474 * We've restored and mapped the iter to match.
4479 * We end up here because of a partial read, either from
4480 * above or inside this loop. Advance the iter by the bytes
4481 * that were consumed.
4483 iov_iter_advance(&s->iter, ret);
4484 if (!iov_iter_count(&s->iter))
4486 rw->bytes_done += ret;
4487 iov_iter_save_state(&s->iter, &s->iter_state);
4489 /* if we can retry, do so with the callbacks armed */
4490 if (!io_rw_should_retry(req)) {
4491 kiocb->ki_flags &= ~IOCB_WAITQ;
4496 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
4497 * we get -EIOCBQUEUED, then we'll get a notification when the
4498 * desired page gets unlocked. We can also get a partial read
4499 * here, and if we do, then just retry at the new offset.
4501 ret = io_iter_do_read(req, &s->iter);
4502 if (ret == -EIOCBQUEUED)
4504 /* we got some bytes, but not all. retry. */
4505 kiocb->ki_flags &= ~IOCB_WAITQ;
4506 iov_iter_restore(&s->iter, &s->iter_state);
4509 kiocb_done(req, ret, issue_flags);
4511 /* it's faster to check here then delegate to kfree */
4517 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
4519 struct io_rw_state __s, *s = &__s;
4520 struct iovec *iovec;
4521 struct kiocb *kiocb = &req->rw.kiocb;
4522 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4526 if (!req_has_async_data(req)) {
4527 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
4528 if (unlikely(ret < 0))
4531 struct io_async_rw *rw = req->async_data;
4534 iov_iter_restore(&s->iter, &s->iter_state);
4537 ret = io_rw_init_file(req, FMODE_WRITE);
4538 if (unlikely(ret)) {
4542 req->cqe.res = iov_iter_count(&s->iter);
4544 if (force_nonblock) {
4545 /* If the file doesn't support async, just async punt */
4546 if (unlikely(!io_file_supports_nowait(req)))
4549 /* file path doesn't support NOWAIT for non-direct_IO */
4550 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
4551 (req->flags & REQ_F_ISREG))
4554 kiocb->ki_flags |= IOCB_NOWAIT;
4556 /* Ensure we clear previously set non-block flag */
4557 kiocb->ki_flags &= ~IOCB_NOWAIT;
4560 ppos = io_kiocb_update_pos(req);
4562 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
4567 * Open-code file_start_write here to grab freeze protection,
4568 * which will be released by another thread in
4569 * io_complete_rw(). Fool lockdep by telling it the lock got
4570 * released so that it doesn't complain about the held lock when
4571 * we return to userspace.
4573 if (req->flags & REQ_F_ISREG) {
4574 sb_start_write(file_inode(req->file)->i_sb);
4575 __sb_writers_release(file_inode(req->file)->i_sb,
4578 kiocb->ki_flags |= IOCB_WRITE;
4580 if (likely(req->file->f_op->write_iter))
4581 ret2 = call_write_iter(req->file, kiocb, &s->iter);
4582 else if (req->file->f_op->write)
4583 ret2 = loop_rw_iter(WRITE, req, &s->iter);
4587 if (req->flags & REQ_F_REISSUE) {
4588 req->flags &= ~REQ_F_REISSUE;
4593 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
4594 * retry them without IOCB_NOWAIT.
4596 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
4598 /* no retry on NONBLOCK nor RWF_NOWAIT */
4599 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
4601 if (!force_nonblock || ret2 != -EAGAIN) {
4602 /* IOPOLL retry should happen for io-wq threads */
4603 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
4606 kiocb_done(req, ret2, issue_flags);
4609 iov_iter_restore(&s->iter, &s->iter_state);
4610 ret = io_setup_async_rw(req, iovec, s, false);
4611 return ret ?: -EAGAIN;
4614 /* it's reportedly faster than delegating the null check to kfree() */
4620 static int io_renameat_prep(struct io_kiocb *req,
4621 const struct io_uring_sqe *sqe)
4623 struct io_rename *ren = &req->rename;
4624 const char __user *oldf, *newf;
4626 if (sqe->buf_index || sqe->splice_fd_in)
4628 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4631 ren->old_dfd = READ_ONCE(sqe->fd);
4632 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
4633 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4634 ren->new_dfd = READ_ONCE(sqe->len);
4635 ren->flags = READ_ONCE(sqe->rename_flags);
4637 ren->oldpath = getname(oldf);
4638 if (IS_ERR(ren->oldpath))
4639 return PTR_ERR(ren->oldpath);
4641 ren->newpath = getname(newf);
4642 if (IS_ERR(ren->newpath)) {
4643 putname(ren->oldpath);
4644 return PTR_ERR(ren->newpath);
4647 req->flags |= REQ_F_NEED_CLEANUP;
4651 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
4653 struct io_rename *ren = &req->rename;
4656 if (issue_flags & IO_URING_F_NONBLOCK)
4659 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
4660 ren->newpath, ren->flags);
4662 req->flags &= ~REQ_F_NEED_CLEANUP;
4663 io_req_complete(req, ret);
4667 static inline void __io_xattr_finish(struct io_kiocb *req)
4669 struct io_xattr *ix = &req->xattr;
4672 putname(ix->filename);
4674 kfree(ix->ctx.kname);
4675 kvfree(ix->ctx.kvalue);
4678 static void io_xattr_finish(struct io_kiocb *req, int ret)
4680 req->flags &= ~REQ_F_NEED_CLEANUP;
4682 __io_xattr_finish(req);
4683 io_req_complete(req, ret);
4686 static int __io_getxattr_prep(struct io_kiocb *req,
4687 const struct io_uring_sqe *sqe)
4689 struct io_xattr *ix = &req->xattr;
4690 const char __user *name;
4693 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4696 ix->filename = NULL;
4697 ix->ctx.kvalue = NULL;
4698 name = u64_to_user_ptr(READ_ONCE(sqe->addr));
4699 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4700 ix->ctx.size = READ_ONCE(sqe->len);
4701 ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
4706 ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
4710 ret = strncpy_from_user(ix->ctx.kname->name, name,
4711 sizeof(ix->ctx.kname->name));
4712 if (!ret || ret == sizeof(ix->ctx.kname->name))
4715 kfree(ix->ctx.kname);
4719 req->flags |= REQ_F_NEED_CLEANUP;
4723 static int io_fgetxattr_prep(struct io_kiocb *req,
4724 const struct io_uring_sqe *sqe)
4726 return __io_getxattr_prep(req, sqe);
4729 static int io_getxattr_prep(struct io_kiocb *req,
4730 const struct io_uring_sqe *sqe)
4732 struct io_xattr *ix = &req->xattr;
4733 const char __user *path;
4736 ret = __io_getxattr_prep(req, sqe);
4740 path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
4742 ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
4743 if (IS_ERR(ix->filename)) {
4744 ret = PTR_ERR(ix->filename);
4745 ix->filename = NULL;
4751 static int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
4753 struct io_xattr *ix = &req->xattr;
4756 if (issue_flags & IO_URING_F_NONBLOCK)
4759 ret = do_getxattr(mnt_user_ns(req->file->f_path.mnt),
4760 req->file->f_path.dentry,
4763 io_xattr_finish(req, ret);
4767 static int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
4769 struct io_xattr *ix = &req->xattr;
4770 unsigned int lookup_flags = LOOKUP_FOLLOW;
4774 if (issue_flags & IO_URING_F_NONBLOCK)
4778 ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
4780 ret = do_getxattr(mnt_user_ns(path.mnt),
4785 if (retry_estale(ret, lookup_flags)) {
4786 lookup_flags |= LOOKUP_REVAL;
4791 io_xattr_finish(req, ret);
4795 static int __io_setxattr_prep(struct io_kiocb *req,
4796 const struct io_uring_sqe *sqe)
4798 struct io_xattr *ix = &req->xattr;
4799 const char __user *name;
4802 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4805 ix->filename = NULL;
4806 name = u64_to_user_ptr(READ_ONCE(sqe->addr));
4807 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4808 ix->ctx.kvalue = NULL;
4809 ix->ctx.size = READ_ONCE(sqe->len);
4810 ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
4812 ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
4816 ret = setxattr_copy(name, &ix->ctx);
4818 kfree(ix->ctx.kname);
4822 req->flags |= REQ_F_NEED_CLEANUP;
4826 static int io_setxattr_prep(struct io_kiocb *req,
4827 const struct io_uring_sqe *sqe)
4829 struct io_xattr *ix = &req->xattr;
4830 const char __user *path;
4833 ret = __io_setxattr_prep(req, sqe);
4837 path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
4839 ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
4840 if (IS_ERR(ix->filename)) {
4841 ret = PTR_ERR(ix->filename);
4842 ix->filename = NULL;
4848 static int io_fsetxattr_prep(struct io_kiocb *req,
4849 const struct io_uring_sqe *sqe)
4851 return __io_setxattr_prep(req, sqe);
4854 static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags,
4857 struct io_xattr *ix = &req->xattr;
4860 ret = mnt_want_write(path->mnt);
4862 ret = do_setxattr(mnt_user_ns(path->mnt), path->dentry, &ix->ctx);
4863 mnt_drop_write(path->mnt);
4869 static int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
4873 if (issue_flags & IO_URING_F_NONBLOCK)
4876 ret = __io_setxattr(req, issue_flags, &req->file->f_path);
4877 io_xattr_finish(req, ret);
4882 static int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
4884 struct io_xattr *ix = &req->xattr;
4885 unsigned int lookup_flags = LOOKUP_FOLLOW;
4889 if (issue_flags & IO_URING_F_NONBLOCK)
4893 ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
4895 ret = __io_setxattr(req, issue_flags, &path);
4897 if (retry_estale(ret, lookup_flags)) {
4898 lookup_flags |= LOOKUP_REVAL;
4903 io_xattr_finish(req, ret);
4907 static int io_unlinkat_prep(struct io_kiocb *req,
4908 const struct io_uring_sqe *sqe)
4910 struct io_unlink *un = &req->unlink;
4911 const char __user *fname;
4913 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
4915 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4918 un->dfd = READ_ONCE(sqe->fd);
4920 un->flags = READ_ONCE(sqe->unlink_flags);
4921 if (un->flags & ~AT_REMOVEDIR)
4924 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4925 un->filename = getname(fname);
4926 if (IS_ERR(un->filename))
4927 return PTR_ERR(un->filename);
4929 req->flags |= REQ_F_NEED_CLEANUP;
4933 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
4935 struct io_unlink *un = &req->unlink;
4938 if (issue_flags & IO_URING_F_NONBLOCK)
4941 if (un->flags & AT_REMOVEDIR)
4942 ret = do_rmdir(un->dfd, un->filename);
4944 ret = do_unlinkat(un->dfd, un->filename);
4946 req->flags &= ~REQ_F_NEED_CLEANUP;
4947 io_req_complete(req, ret);
4951 static int io_mkdirat_prep(struct io_kiocb *req,
4952 const struct io_uring_sqe *sqe)
4954 struct io_mkdir *mkd = &req->mkdir;
4955 const char __user *fname;
4957 if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
4959 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4962 mkd->dfd = READ_ONCE(sqe->fd);
4963 mkd->mode = READ_ONCE(sqe->len);
4965 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4966 mkd->filename = getname(fname);
4967 if (IS_ERR(mkd->filename))
4968 return PTR_ERR(mkd->filename);
4970 req->flags |= REQ_F_NEED_CLEANUP;
4974 static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
4976 struct io_mkdir *mkd = &req->mkdir;
4979 if (issue_flags & IO_URING_F_NONBLOCK)
4982 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
4984 req->flags &= ~REQ_F_NEED_CLEANUP;
4985 io_req_complete(req, ret);
4989 static int io_symlinkat_prep(struct io_kiocb *req,
4990 const struct io_uring_sqe *sqe)
4992 struct io_symlink *sl = &req->symlink;
4993 const char __user *oldpath, *newpath;
4995 if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
4997 if (unlikely(req->flags & REQ_F_FIXED_FILE))
5000 sl->new_dfd = READ_ONCE(sqe->fd);
5001 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
5002 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5004 sl->oldpath = getname(oldpath);
5005 if (IS_ERR(sl->oldpath))
5006 return PTR_ERR(sl->oldpath);
5008 sl->newpath = getname(newpath);
5009 if (IS_ERR(sl->newpath)) {
5010 putname(sl->oldpath);
5011 return PTR_ERR(sl->newpath);
5014 req->flags |= REQ_F_NEED_CLEANUP;
5018 static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
5020 struct io_symlink *sl = &req->symlink;
5023 if (issue_flags & IO_URING_F_NONBLOCK)
5026 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
5028 req->flags &= ~REQ_F_NEED_CLEANUP;
5029 io_req_complete(req, ret);
5033 static int io_linkat_prep(struct io_kiocb *req,
5034 const struct io_uring_sqe *sqe)
5036 struct io_hardlink *lnk = &req->hardlink;
5037 const char __user *oldf, *newf;
5039 if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
5041 if (unlikely(req->flags & REQ_F_FIXED_FILE))
5044 lnk->old_dfd = READ_ONCE(sqe->fd);
5045 lnk->new_dfd = READ_ONCE(sqe->len);
5046 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
5047 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5048 lnk->flags = READ_ONCE(sqe->hardlink_flags);
5050 lnk->oldpath = getname(oldf);
5051 if (IS_ERR(lnk->oldpath))
5052 return PTR_ERR(lnk->oldpath);
5054 lnk->newpath = getname(newf);
5055 if (IS_ERR(lnk->newpath)) {
5056 putname(lnk->oldpath);
5057 return PTR_ERR(lnk->newpath);
5060 req->flags |= REQ_F_NEED_CLEANUP;
5064 static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
5066 struct io_hardlink *lnk = &req->hardlink;
5069 if (issue_flags & IO_URING_F_NONBLOCK)
5072 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
5073 lnk->newpath, lnk->flags);
5075 req->flags &= ~REQ_F_NEED_CLEANUP;
5076 io_req_complete(req, ret);
5080 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
5082 req->uring_cmd.task_work_cb(&req->uring_cmd);
5085 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
5086 void (*task_work_cb)(struct io_uring_cmd *))
5088 struct io_kiocb *req = container_of(ioucmd, struct io_kiocb, uring_cmd);
5090 req->uring_cmd.task_work_cb = task_work_cb;
5091 req->io_task_work.func = io_uring_cmd_work;
5092 io_req_task_prio_work_add(req);
5094 EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
5097 * Called by consumers of io_uring_cmd, if they originally returned
5098 * -EIOCBQUEUED upon receiving the command.
5100 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
5102 struct io_kiocb *req = container_of(ioucmd, struct io_kiocb, uring_cmd);
5106 if (req->ctx->flags & IORING_SETUP_CQE32)
5107 __io_req_complete32(req, 0, ret, 0, res2, 0);
5109 io_req_complete(req, ret);
5111 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
5113 static int io_uring_cmd_prep_async(struct io_kiocb *req)
5117 cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
5119 memcpy(req->async_data, req->uring_cmd.cmd, cmd_size);
5123 static int io_uring_cmd_prep(struct io_kiocb *req,
5124 const struct io_uring_sqe *sqe)
5126 struct io_uring_cmd *ioucmd = &req->uring_cmd;
5130 ioucmd->cmd = sqe->cmd;
5131 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
5135 static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
5137 struct io_uring_cmd *ioucmd = &req->uring_cmd;
5138 struct io_ring_ctx *ctx = req->ctx;
5139 struct file *file = req->file;
5142 if (!req->file->f_op->uring_cmd)
5145 if (ctx->flags & IORING_SETUP_SQE128)
5146 issue_flags |= IO_URING_F_SQE128;
5147 if (ctx->flags & IORING_SETUP_CQE32)
5148 issue_flags |= IO_URING_F_CQE32;
5149 if (ctx->flags & IORING_SETUP_IOPOLL)
5150 issue_flags |= IO_URING_F_IOPOLL;
5152 if (req_has_async_data(req))
5153 ioucmd->cmd = req->async_data;
5155 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
5156 if (ret == -EAGAIN) {
5157 if (!req_has_async_data(req)) {
5158 if (io_alloc_async_data(req))
5160 io_uring_cmd_prep_async(req);
5165 if (ret != -EIOCBQUEUED)
5166 io_uring_cmd_done(ioucmd, ret, 0);
5170 static int __io_splice_prep(struct io_kiocb *req,
5171 const struct io_uring_sqe *sqe)
5173 struct io_splice *sp = &req->splice;
5174 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
5176 sp->len = READ_ONCE(sqe->len);
5177 sp->flags = READ_ONCE(sqe->splice_flags);
5178 if (unlikely(sp->flags & ~valid_flags))
5180 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
5184 static int io_tee_prep(struct io_kiocb *req,
5185 const struct io_uring_sqe *sqe)
5187 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
5189 return __io_splice_prep(req, sqe);
5192 static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
5194 struct io_splice *sp = &req->splice;
5195 struct file *out = sp->file_out;
5196 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
5200 if (issue_flags & IO_URING_F_NONBLOCK)
5203 if (sp->flags & SPLICE_F_FD_IN_FIXED)
5204 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5206 in = io_file_get_normal(req, sp->splice_fd_in);
5213 ret = do_tee(in, out, sp->len, flags);
5215 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
5220 __io_req_complete(req, 0, ret, 0);
5224 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5226 struct io_splice *sp = &req->splice;
5228 sp->off_in = READ_ONCE(sqe->splice_off_in);
5229 sp->off_out = READ_ONCE(sqe->off);
5230 return __io_splice_prep(req, sqe);
5233 static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
5235 struct io_splice *sp = &req->splice;
5236 struct file *out = sp->file_out;
5237 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
5238 loff_t *poff_in, *poff_out;
5242 if (issue_flags & IO_URING_F_NONBLOCK)
5245 if (sp->flags & SPLICE_F_FD_IN_FIXED)
5246 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5248 in = io_file_get_normal(req, sp->splice_fd_in);
5254 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
5255 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
5258 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
5260 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
5265 __io_req_complete(req, 0, ret, 0);
5269 static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5272 * If the ring is setup with CQE32, relay back addr/addr
5274 if (req->ctx->flags & IORING_SETUP_CQE32) {
5275 req->nop.extra1 = READ_ONCE(sqe->addr);
5276 req->nop.extra2 = READ_ONCE(sqe->addr2);
5283 * IORING_OP_NOP just posts a completion event, nothing else.
5285 static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
5287 unsigned int cflags;
5290 if (req->flags & REQ_F_BUFFER_SELECT) {
5293 buf = io_buffer_select(req, &len, issue_flags);
5298 cflags = io_put_kbuf(req, issue_flags);
5299 if (!(req->ctx->flags & IORING_SETUP_CQE32))
5300 __io_req_complete(req, issue_flags, 0, cflags);
5302 __io_req_complete32(req, issue_flags, 0, cflags,
5303 req->nop.extra1, req->nop.extra2);
5307 static int io_msg_ring_prep(struct io_kiocb *req,
5308 const struct io_uring_sqe *sqe)
5310 if (unlikely(sqe->addr || sqe->rw_flags || sqe->splice_fd_in ||
5311 sqe->buf_index || sqe->personality))
5314 req->msg.user_data = READ_ONCE(sqe->off);
5315 req->msg.len = READ_ONCE(sqe->len);
5319 static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
5321 struct io_ring_ctx *target_ctx;
5322 struct io_msg *msg = &req->msg;
5327 if (req->file->f_op != &io_uring_fops)
5331 target_ctx = req->file->private_data;
5333 spin_lock(&target_ctx->completion_lock);
5334 filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
5335 io_commit_cqring(target_ctx);
5336 spin_unlock(&target_ctx->completion_lock);
5339 io_cqring_ev_posted(target_ctx);
5346 __io_req_complete(req, issue_flags, ret, 0);
5347 /* put file to avoid an attempt to IOPOLL the req */
5348 io_put_file(req->file);
5353 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5355 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
5358 req->sync.flags = READ_ONCE(sqe->fsync_flags);
5359 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
5362 req->sync.off = READ_ONCE(sqe->off);
5363 req->sync.len = READ_ONCE(sqe->len);
5367 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
5369 loff_t end = req->sync.off + req->sync.len;
5372 /* fsync always requires a blocking context */
5373 if (issue_flags & IO_URING_F_NONBLOCK)
5376 ret = vfs_fsync_range(req->file, req->sync.off,
5377 end > 0 ? end : LLONG_MAX,
5378 req->sync.flags & IORING_FSYNC_DATASYNC);
5379 io_req_complete(req, ret);
5383 static int io_fallocate_prep(struct io_kiocb *req,
5384 const struct io_uring_sqe *sqe)
5386 if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
5389 req->sync.off = READ_ONCE(sqe->off);
5390 req->sync.len = READ_ONCE(sqe->addr);
5391 req->sync.mode = READ_ONCE(sqe->len);
5395 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5399 /* fallocate always requiring blocking context */
5400 if (issue_flags & IO_URING_F_NONBLOCK)
5402 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
5405 fsnotify_modify(req->file);
5406 io_req_complete(req, ret);
5410 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5412 const char __user *fname;
5415 if (unlikely(sqe->buf_index))
5417 if (unlikely(req->flags & REQ_F_FIXED_FILE))
5420 /* open.how should be already initialised */
5421 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
5422 req->open.how.flags |= O_LARGEFILE;
5424 req->open.dfd = READ_ONCE(sqe->fd);
5425 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
5426 req->open.filename = getname(fname);
5427 if (IS_ERR(req->open.filename)) {
5428 ret = PTR_ERR(req->open.filename);
5429 req->open.filename = NULL;
5433 req->open.file_slot = READ_ONCE(sqe->file_index);
5434 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
5437 req->open.nofile = rlimit(RLIMIT_NOFILE);
5438 req->flags |= REQ_F_NEED_CLEANUP;
5442 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5444 u64 mode = READ_ONCE(sqe->len);
5445 u64 flags = READ_ONCE(sqe->open_flags);
5447 req->open.how = build_open_how(flags, mode);
5448 return __io_openat_prep(req, sqe);
5451 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5453 struct open_how __user *how;
5457 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5458 len = READ_ONCE(sqe->len);
5459 if (len < OPEN_HOW_SIZE_VER0)
5462 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
5467 return __io_openat_prep(req, sqe);
5470 static int io_file_bitmap_get(struct io_ring_ctx *ctx)
5472 struct io_file_table *table = &ctx->file_table;
5473 unsigned long nr = ctx->nr_user_files;
5477 ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
5481 if (!table->alloc_hint)
5484 nr = table->alloc_hint;
5485 table->alloc_hint = 0;
5492 * Note when io_fixed_fd_install() returns error value, it will ensure
5493 * fput() is called correspondingly.
5495 static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
5496 struct file *file, unsigned int file_slot)
5498 bool alloc_slot = file_slot == IORING_FILE_INDEX_ALLOC;
5499 struct io_ring_ctx *ctx = req->ctx;
5502 io_ring_submit_lock(ctx, issue_flags);
5505 ret = io_file_bitmap_get(ctx);
5506 if (unlikely(ret < 0))
5513 ret = io_install_fixed_file(req, file, issue_flags, file_slot);
5514 if (!ret && alloc_slot)
5517 io_ring_submit_unlock(ctx, issue_flags);
5518 if (unlikely(ret < 0))
5523 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
5525 struct open_flags op;
5527 bool resolve_nonblock, nonblock_set;
5528 bool fixed = !!req->open.file_slot;
5531 ret = build_open_flags(&req->open.how, &op);
5534 nonblock_set = op.open_flag & O_NONBLOCK;
5535 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
5536 if (issue_flags & IO_URING_F_NONBLOCK) {
5538 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
5539 * it'll always -EAGAIN
5541 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
5543 op.lookup_flags |= LOOKUP_CACHED;
5544 op.open_flag |= O_NONBLOCK;
5548 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
5553 file = do_filp_open(req->open.dfd, req->open.filename, &op);
5556 * We could hang on to this 'fd' on retrying, but seems like
5557 * marginal gain for something that is now known to be a slower
5558 * path. So just put it, and we'll get a new one when we retry.
5563 ret = PTR_ERR(file);
5564 /* only retry if RESOLVE_CACHED wasn't already set by application */
5565 if (ret == -EAGAIN &&
5566 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
5571 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
5572 file->f_flags &= ~O_NONBLOCK;
5573 fsnotify_open(file);
5576 fd_install(ret, file);
5578 ret = io_fixed_fd_install(req, issue_flags, file,
5579 req->open.file_slot);
5581 putname(req->open.filename);
5582 req->flags &= ~REQ_F_NEED_CLEANUP;
5585 __io_req_complete(req, issue_flags, ret, 0);
5589 static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
5591 return io_openat2(req, issue_flags);
5594 static int io_remove_buffers_prep(struct io_kiocb *req,
5595 const struct io_uring_sqe *sqe)
5597 struct io_provide_buf *p = &req->pbuf;
5600 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
5604 tmp = READ_ONCE(sqe->fd);
5605 if (!tmp || tmp > USHRT_MAX)
5608 memset(p, 0, sizeof(*p));
5610 p->bgid = READ_ONCE(sqe->buf_group);
5614 static int __io_remove_buffers(struct io_ring_ctx *ctx,
5615 struct io_buffer_list *bl, unsigned nbufs)
5619 /* shouldn't happen */
5623 if (bl->buf_nr_pages) {
5626 i = bl->buf_ring->tail - bl->head;
5627 for (j = 0; j < bl->buf_nr_pages; j++)
5628 unpin_user_page(bl->buf_pages[j]);
5629 kvfree(bl->buf_pages);
5630 bl->buf_pages = NULL;
5631 bl->buf_nr_pages = 0;
5632 /* make sure it's seen as empty */
5633 INIT_LIST_HEAD(&bl->buf_list);
5637 /* the head kbuf is the list itself */
5638 while (!list_empty(&bl->buf_list)) {
5639 struct io_buffer *nxt;
5641 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
5642 list_del(&nxt->list);
5652 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
5654 struct io_provide_buf *p = &req->pbuf;
5655 struct io_ring_ctx *ctx = req->ctx;
5656 struct io_buffer_list *bl;
5659 io_ring_submit_lock(ctx, issue_flags);
5662 bl = io_buffer_get_list(ctx, p->bgid);
5665 /* can't use provide/remove buffers command on mapped buffers */
5666 if (!bl->buf_nr_pages)
5667 ret = __io_remove_buffers(ctx, bl, p->nbufs);
5672 /* complete before unlock, IOPOLL may need the lock */
5673 __io_req_complete(req, issue_flags, ret, 0);
5674 io_ring_submit_unlock(ctx, issue_flags);
5678 static int io_provide_buffers_prep(struct io_kiocb *req,
5679 const struct io_uring_sqe *sqe)
5681 unsigned long size, tmp_check;
5682 struct io_provide_buf *p = &req->pbuf;
5685 if (sqe->rw_flags || sqe->splice_fd_in)
5688 tmp = READ_ONCE(sqe->fd);
5689 if (!tmp || tmp > USHRT_MAX)
5692 p->addr = READ_ONCE(sqe->addr);
5693 p->len = READ_ONCE(sqe->len);
5695 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
5698 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
5701 size = (unsigned long)p->len * p->nbufs;
5702 if (!access_ok(u64_to_user_ptr(p->addr), size))
5705 p->bgid = READ_ONCE(sqe->buf_group);
5706 tmp = READ_ONCE(sqe->off);
5707 if (tmp > USHRT_MAX)
5713 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
5715 struct io_buffer *buf;
5720 * Completions that don't happen inline (eg not under uring_lock) will
5721 * add to ->io_buffers_comp. If we don't have any free buffers, check
5722 * the completion list and splice those entries first.
5724 if (!list_empty_careful(&ctx->io_buffers_comp)) {
5725 spin_lock(&ctx->completion_lock);
5726 if (!list_empty(&ctx->io_buffers_comp)) {
5727 list_splice_init(&ctx->io_buffers_comp,
5728 &ctx->io_buffers_cache);
5729 spin_unlock(&ctx->completion_lock);
5732 spin_unlock(&ctx->completion_lock);
5736 * No free buffers and no completion entries either. Allocate a new
5737 * page worth of buffer entries and add those to our freelist.
5739 page = alloc_page(GFP_KERNEL_ACCOUNT);
5743 list_add(&page->lru, &ctx->io_buffers_pages);
5745 buf = page_address(page);
5746 bufs_in_page = PAGE_SIZE / sizeof(*buf);
5747 while (bufs_in_page) {
5748 list_add_tail(&buf->list, &ctx->io_buffers_cache);
5756 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
5757 struct io_buffer_list *bl)
5759 struct io_buffer *buf;
5760 u64 addr = pbuf->addr;
5761 int i, bid = pbuf->bid;
5763 for (i = 0; i < pbuf->nbufs; i++) {
5764 if (list_empty(&ctx->io_buffers_cache) &&
5765 io_refill_buffer_cache(ctx))
5767 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
5769 list_move_tail(&buf->list, &bl->buf_list);
5771 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
5773 buf->bgid = pbuf->bgid;
5779 return i ? 0 : -ENOMEM;
5782 static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
5786 ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
5791 for (i = 0; i < BGID_ARRAY; i++) {
5792 INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
5793 ctx->io_bl[i].bgid = i;
5799 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
5801 struct io_provide_buf *p = &req->pbuf;
5802 struct io_ring_ctx *ctx = req->ctx;
5803 struct io_buffer_list *bl;
5806 io_ring_submit_lock(ctx, issue_flags);
5808 if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
5809 ret = io_init_bl_list(ctx);
5814 bl = io_buffer_get_list(ctx, p->bgid);
5815 if (unlikely(!bl)) {
5816 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
5821 INIT_LIST_HEAD(&bl->buf_list);
5822 ret = io_buffer_add_list(ctx, bl, p->bgid);
5828 /* can't add buffers via this command for a mapped buffer ring */
5829 if (bl->buf_nr_pages) {
5834 ret = io_add_buffers(ctx, p, bl);
5838 /* complete before unlock, IOPOLL may need the lock */
5839 __io_req_complete(req, issue_flags, ret, 0);
5840 io_ring_submit_unlock(ctx, issue_flags);
5844 static int io_epoll_ctl_prep(struct io_kiocb *req,
5845 const struct io_uring_sqe *sqe)
5847 #if defined(CONFIG_EPOLL)
5848 if (sqe->buf_index || sqe->splice_fd_in)
5851 req->epoll.epfd = READ_ONCE(sqe->fd);
5852 req->epoll.op = READ_ONCE(sqe->len);
5853 req->epoll.fd = READ_ONCE(sqe->off);
5855 if (ep_op_has_event(req->epoll.op)) {
5856 struct epoll_event __user *ev;
5858 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
5859 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
5869 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
5871 #if defined(CONFIG_EPOLL)
5872 struct io_epoll *ie = &req->epoll;
5874 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
5876 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
5877 if (force_nonblock && ret == -EAGAIN)
5882 __io_req_complete(req, issue_flags, ret, 0);
5889 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5891 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
5892 if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
5895 req->madvise.addr = READ_ONCE(sqe->addr);
5896 req->madvise.len = READ_ONCE(sqe->len);
5897 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
5904 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
5906 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
5907 struct io_madvise *ma = &req->madvise;
5910 if (issue_flags & IO_URING_F_NONBLOCK)
5913 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
5914 io_req_complete(req, ret);
5921 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5923 if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
5926 req->fadvise.offset = READ_ONCE(sqe->off);
5927 req->fadvise.len = READ_ONCE(sqe->len);
5928 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
5932 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
5934 struct io_fadvise *fa = &req->fadvise;
5937 if (issue_flags & IO_URING_F_NONBLOCK) {
5938 switch (fa->advice) {
5939 case POSIX_FADV_NORMAL:
5940 case POSIX_FADV_RANDOM:
5941 case POSIX_FADV_SEQUENTIAL:
5948 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
5951 __io_req_complete(req, issue_flags, ret, 0);
5955 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5957 const char __user *path;
5959 if (sqe->buf_index || sqe->splice_fd_in)
5961 if (req->flags & REQ_F_FIXED_FILE)
5964 req->statx.dfd = READ_ONCE(sqe->fd);
5965 req->statx.mask = READ_ONCE(sqe->len);
5966 path = u64_to_user_ptr(READ_ONCE(sqe->addr));
5967 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5968 req->statx.flags = READ_ONCE(sqe->statx_flags);
5970 req->statx.filename = getname_flags(path,
5971 getname_statx_lookup_flags(req->statx.flags),
5974 if (IS_ERR(req->statx.filename)) {
5975 int ret = PTR_ERR(req->statx.filename);
5977 req->statx.filename = NULL;
5981 req->flags |= REQ_F_NEED_CLEANUP;
5985 static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
5987 struct io_statx *ctx = &req->statx;
5990 if (issue_flags & IO_URING_F_NONBLOCK)
5993 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
5995 io_req_complete(req, ret);
5999 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6001 if (sqe->off || sqe->addr || sqe->len || sqe->buf_index)
6003 if (req->flags & REQ_F_FIXED_FILE)
6006 req->close.fd = READ_ONCE(sqe->fd);
6007 req->close.file_slot = READ_ONCE(sqe->file_index);
6008 req->close.flags = READ_ONCE(sqe->close_flags);
6009 if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT)
6011 if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) &&
6012 req->close.file_slot && req->close.fd)
6018 static int io_close(struct io_kiocb *req, unsigned int issue_flags)
6020 struct files_struct *files = current->files;
6021 struct io_close *close = &req->close;
6022 struct fdtable *fdt;
6026 if (req->close.file_slot) {
6027 ret = io_close_fixed(req, issue_flags);
6028 if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT))
6032 spin_lock(&files->file_lock);
6033 fdt = files_fdtable(files);
6034 if (close->fd >= fdt->max_fds) {
6035 spin_unlock(&files->file_lock);
6038 file = rcu_dereference_protected(fdt->fd[close->fd],
6039 lockdep_is_held(&files->file_lock));
6040 if (!file || file->f_op == &io_uring_fops) {
6041 spin_unlock(&files->file_lock);
6045 /* if the file has a flush method, be safe and punt to async */
6046 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
6047 spin_unlock(&files->file_lock);
6051 file = __close_fd_get_file(close->fd);
6052 spin_unlock(&files->file_lock);
6056 /* No ->flush() or already async, safely close from here */
6057 ret = filp_close(file, current->files);
6061 __io_req_complete(req, issue_flags, ret, 0);
6065 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6067 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
6070 req->sync.off = READ_ONCE(sqe->off);
6071 req->sync.len = READ_ONCE(sqe->len);
6072 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
6076 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
6080 /* sync_file_range always requires a blocking context */
6081 if (issue_flags & IO_URING_F_NONBLOCK)
6084 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
6086 io_req_complete(req, ret);
6090 #if defined(CONFIG_NET)
6091 static int io_shutdown_prep(struct io_kiocb *req,
6092 const struct io_uring_sqe *sqe)
6094 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
6095 sqe->buf_index || sqe->splice_fd_in))
6098 req->shutdown.how = READ_ONCE(sqe->len);
6102 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
6104 struct socket *sock;
6107 if (issue_flags & IO_URING_F_NONBLOCK)
6110 sock = sock_from_file(req->file);
6111 if (unlikely(!sock))
6114 ret = __sys_shutdown_sock(sock, req->shutdown.how);
6115 io_req_complete(req, ret);
6119 static bool io_net_retry(struct socket *sock, int flags)
6121 if (!(flags & MSG_WAITALL))
6123 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
6126 static int io_setup_async_msg(struct io_kiocb *req,
6127 struct io_async_msghdr *kmsg)
6129 struct io_async_msghdr *async_msg = req->async_data;
6133 if (io_alloc_async_data(req)) {
6134 kfree(kmsg->free_iov);
6137 async_msg = req->async_data;
6138 req->flags |= REQ_F_NEED_CLEANUP;
6139 memcpy(async_msg, kmsg, sizeof(*kmsg));
6140 async_msg->msg.msg_name = &async_msg->addr;
6141 /* if were using fast_iov, set it to the new one */
6142 if (!async_msg->free_iov)
6143 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
6148 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
6149 struct io_async_msghdr *iomsg)
6151 iomsg->msg.msg_name = &iomsg->addr;
6152 iomsg->free_iov = iomsg->fast_iov;
6153 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
6154 req->sr_msg.msg_flags, &iomsg->free_iov);
6157 static int io_sendmsg_prep_async(struct io_kiocb *req)
6161 ret = io_sendmsg_copy_hdr(req, req->async_data);
6163 req->flags |= REQ_F_NEED_CLEANUP;
6167 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6169 struct io_sr_msg *sr = &req->sr_msg;
6171 if (unlikely(sqe->file_index))
6173 if (unlikely(sqe->addr2 || sqe->file_index))
6176 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
6177 sr->len = READ_ONCE(sqe->len);
6178 sr->flags = READ_ONCE(sqe->addr2);
6179 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
6181 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
6182 if (sr->msg_flags & MSG_DONTWAIT)
6183 req->flags |= REQ_F_NOWAIT;
6185 #ifdef CONFIG_COMPAT
6186 if (req->ctx->compat)
6187 sr->msg_flags |= MSG_CMSG_COMPAT;
6193 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
6195 struct io_async_msghdr iomsg, *kmsg;
6196 struct io_sr_msg *sr = &req->sr_msg;
6197 struct socket *sock;
6202 sock = sock_from_file(req->file);
6203 if (unlikely(!sock))
6206 if (req_has_async_data(req)) {
6207 kmsg = req->async_data;
6209 ret = io_sendmsg_copy_hdr(req, &iomsg);
6215 if (!(req->flags & REQ_F_POLLED) &&
6216 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6217 return io_setup_async_msg(req, kmsg);
6219 flags = sr->msg_flags;
6220 if (issue_flags & IO_URING_F_NONBLOCK)
6221 flags |= MSG_DONTWAIT;
6222 if (flags & MSG_WAITALL)
6223 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
6225 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
6227 if (ret < min_ret) {
6228 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6229 return io_setup_async_msg(req, kmsg);
6230 if (ret == -ERESTARTSYS)
6232 if (ret > 0 && io_net_retry(sock, flags)) {
6234 req->flags |= REQ_F_PARTIAL_IO;
6235 return io_setup_async_msg(req, kmsg);
6239 /* fast path, check for non-NULL to avoid function call */
6241 kfree(kmsg->free_iov);
6242 req->flags &= ~REQ_F_NEED_CLEANUP;
6245 else if (sr->done_io)
6247 __io_req_complete(req, issue_flags, ret, 0);
6251 static int io_send(struct io_kiocb *req, unsigned int issue_flags)
6253 struct io_sr_msg *sr = &req->sr_msg;
6256 struct socket *sock;
6261 if (!(req->flags & REQ_F_POLLED) &&
6262 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6265 sock = sock_from_file(req->file);
6266 if (unlikely(!sock))
6269 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
6273 msg.msg_name = NULL;
6274 msg.msg_control = NULL;
6275 msg.msg_controllen = 0;
6276 msg.msg_namelen = 0;
6278 flags = sr->msg_flags;
6279 if (issue_flags & IO_URING_F_NONBLOCK)
6280 flags |= MSG_DONTWAIT;
6281 if (flags & MSG_WAITALL)
6282 min_ret = iov_iter_count(&msg.msg_iter);
6284 msg.msg_flags = flags;
6285 ret = sock_sendmsg(sock, &msg);
6286 if (ret < min_ret) {
6287 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6289 if (ret == -ERESTARTSYS)
6291 if (ret > 0 && io_net_retry(sock, flags)) {
6295 req->flags |= REQ_F_PARTIAL_IO;
6302 else if (sr->done_io)
6304 __io_req_complete(req, issue_flags, ret, 0);
6308 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
6309 struct io_async_msghdr *iomsg)
6311 struct io_sr_msg *sr = &req->sr_msg;
6312 struct iovec __user *uiov;
6316 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
6317 &iomsg->uaddr, &uiov, &iov_len);
6321 if (req->flags & REQ_F_BUFFER_SELECT) {
6324 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
6326 sr->len = iomsg->fast_iov[0].iov_len;
6327 iomsg->free_iov = NULL;
6329 iomsg->free_iov = iomsg->fast_iov;
6330 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
6331 &iomsg->free_iov, &iomsg->msg.msg_iter,
6340 #ifdef CONFIG_COMPAT
6341 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
6342 struct io_async_msghdr *iomsg)
6344 struct io_sr_msg *sr = &req->sr_msg;
6345 struct compat_iovec __user *uiov;
6350 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
6355 uiov = compat_ptr(ptr);
6356 if (req->flags & REQ_F_BUFFER_SELECT) {
6357 compat_ssize_t clen;
6361 if (!access_ok(uiov, sizeof(*uiov)))
6363 if (__get_user(clen, &uiov->iov_len))
6368 iomsg->free_iov = NULL;
6370 iomsg->free_iov = iomsg->fast_iov;
6371 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
6372 UIO_FASTIOV, &iomsg->free_iov,
6373 &iomsg->msg.msg_iter, true);
6382 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
6383 struct io_async_msghdr *iomsg)
6385 iomsg->msg.msg_name = &iomsg->addr;
6387 #ifdef CONFIG_COMPAT
6388 if (req->ctx->compat)
6389 return __io_compat_recvmsg_copy_hdr(req, iomsg);
6392 return __io_recvmsg_copy_hdr(req, iomsg);
6395 static int io_recvmsg_prep_async(struct io_kiocb *req)
6399 ret = io_recvmsg_copy_hdr(req, req->async_data);
6401 req->flags |= REQ_F_NEED_CLEANUP;
6405 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6407 struct io_sr_msg *sr = &req->sr_msg;
6409 if (unlikely(sqe->file_index))
6411 if (unlikely(sqe->addr2 || sqe->file_index))
6414 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
6415 sr->len = READ_ONCE(sqe->len);
6416 sr->flags = READ_ONCE(sqe->addr2);
6417 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
6419 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
6420 if (sr->msg_flags & MSG_DONTWAIT)
6421 req->flags |= REQ_F_NOWAIT;
6423 #ifdef CONFIG_COMPAT
6424 if (req->ctx->compat)
6425 sr->msg_flags |= MSG_CMSG_COMPAT;
6431 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
6433 struct io_async_msghdr iomsg, *kmsg;
6434 struct io_sr_msg *sr = &req->sr_msg;
6435 struct socket *sock;
6436 unsigned int cflags;
6438 int ret, min_ret = 0;
6439 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
6441 sock = sock_from_file(req->file);
6442 if (unlikely(!sock))
6445 if (req_has_async_data(req)) {
6446 kmsg = req->async_data;
6448 ret = io_recvmsg_copy_hdr(req, &iomsg);
6454 if (!(req->flags & REQ_F_POLLED) &&
6455 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6456 return io_setup_async_msg(req, kmsg);
6458 if (io_do_buffer_select(req)) {
6461 buf = io_buffer_select(req, &sr->len, issue_flags);
6464 kmsg->fast_iov[0].iov_base = buf;
6465 kmsg->fast_iov[0].iov_len = sr->len;
6466 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
6470 flags = sr->msg_flags;
6472 flags |= MSG_DONTWAIT;
6473 if (flags & MSG_WAITALL)
6474 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
6476 kmsg->msg.msg_get_inq = 1;
6477 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, kmsg->uaddr, flags);
6478 if (ret < min_ret) {
6479 if (ret == -EAGAIN && force_nonblock)
6480 return io_setup_async_msg(req, kmsg);
6481 if (ret == -ERESTARTSYS)
6483 if (ret > 0 && io_net_retry(sock, flags)) {
6485 req->flags |= REQ_F_PARTIAL_IO;
6486 return io_setup_async_msg(req, kmsg);
6489 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
6493 /* fast path, check for non-NULL to avoid function call */
6495 kfree(kmsg->free_iov);
6496 req->flags &= ~REQ_F_NEED_CLEANUP;
6499 else if (sr->done_io)
6501 cflags = io_put_kbuf(req, issue_flags);
6502 if (kmsg->msg.msg_inq)
6503 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
6504 __io_req_complete(req, issue_flags, ret, cflags);
6508 static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
6510 struct io_sr_msg *sr = &req->sr_msg;
6512 struct socket *sock;
6514 unsigned int cflags;
6516 int ret, min_ret = 0;
6517 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
6519 if (!(req->flags & REQ_F_POLLED) &&
6520 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6523 sock = sock_from_file(req->file);
6524 if (unlikely(!sock))
6527 if (io_do_buffer_select(req)) {
6530 buf = io_buffer_select(req, &sr->len, issue_flags);
6536 ret = import_single_range(READ, sr->buf, sr->len, &iov, &msg.msg_iter);
6540 msg.msg_name = NULL;
6541 msg.msg_namelen = 0;
6542 msg.msg_control = NULL;
6543 msg.msg_get_inq = 1;
6545 msg.msg_controllen = 0;
6546 msg.msg_iocb = NULL;
6548 flags = sr->msg_flags;
6550 flags |= MSG_DONTWAIT;
6551 if (flags & MSG_WAITALL)
6552 min_ret = iov_iter_count(&msg.msg_iter);
6554 ret = sock_recvmsg(sock, &msg, flags);
6555 if (ret < min_ret) {
6556 if (ret == -EAGAIN && force_nonblock)
6558 if (ret == -ERESTARTSYS)
6560 if (ret > 0 && io_net_retry(sock, flags)) {
6564 req->flags |= REQ_F_PARTIAL_IO;
6568 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
6575 else if (sr->done_io)
6577 cflags = io_put_kbuf(req, issue_flags);
6579 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
6580 __io_req_complete(req, issue_flags, ret, cflags);
6584 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6586 struct io_accept *accept = &req->accept;
6589 if (sqe->len || sqe->buf_index)
6592 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
6593 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
6594 accept->flags = READ_ONCE(sqe->accept_flags);
6595 accept->nofile = rlimit(RLIMIT_NOFILE);
6596 flags = READ_ONCE(sqe->ioprio);
6597 if (flags & ~IORING_ACCEPT_MULTISHOT)
6600 accept->file_slot = READ_ONCE(sqe->file_index);
6601 if (accept->file_slot) {
6602 if (accept->flags & SOCK_CLOEXEC)
6604 if (flags & IORING_ACCEPT_MULTISHOT &&
6605 accept->file_slot != IORING_FILE_INDEX_ALLOC)
6608 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
6610 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
6611 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
6612 if (flags & IORING_ACCEPT_MULTISHOT)
6613 req->flags |= REQ_F_APOLL_MULTISHOT;
6617 static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
6619 struct io_ring_ctx *ctx = req->ctx;
6620 struct io_accept *accept = &req->accept;
6621 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
6622 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
6623 bool fixed = !!accept->file_slot;
6629 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
6630 if (unlikely(fd < 0))
6633 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
6638 ret = PTR_ERR(file);
6639 if (ret == -EAGAIN && force_nonblock) {
6641 * if it's multishot and polled, we don't need to
6642 * return EAGAIN to arm the poll infra since it
6643 * has already been done
6645 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
6646 IO_APOLL_MULTI_POLLED)
6650 if (ret == -ERESTARTSYS)
6653 } else if (!fixed) {
6654 fd_install(fd, file);
6657 ret = io_fixed_fd_install(req, issue_flags, file,
6661 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
6662 __io_req_complete(req, issue_flags, ret, 0);
6668 spin_lock(&ctx->completion_lock);
6669 filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret,
6671 io_commit_cqring(ctx);
6672 spin_unlock(&ctx->completion_lock);
6674 io_cqring_ev_posted(ctx);
6683 static int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6685 struct io_socket *sock = &req->sock;
6687 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
6690 sock->domain = READ_ONCE(sqe->fd);
6691 sock->type = READ_ONCE(sqe->off);
6692 sock->protocol = READ_ONCE(sqe->len);
6693 sock->file_slot = READ_ONCE(sqe->file_index);
6694 sock->nofile = rlimit(RLIMIT_NOFILE);
6696 sock->flags = sock->type & ~SOCK_TYPE_MASK;
6697 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
6699 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
6704 static int io_socket(struct io_kiocb *req, unsigned int issue_flags)
6706 struct io_socket *sock = &req->sock;
6707 bool fixed = !!sock->file_slot;
6712 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
6713 if (unlikely(fd < 0))
6716 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
6720 ret = PTR_ERR(file);
6721 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6723 if (ret == -ERESTARTSYS)
6726 } else if (!fixed) {
6727 fd_install(fd, file);
6730 ret = io_fixed_fd_install(req, issue_flags, file,
6733 __io_req_complete(req, issue_flags, ret, 0);
6737 static int io_connect_prep_async(struct io_kiocb *req)
6739 struct io_async_connect *io = req->async_data;
6740 struct io_connect *conn = &req->connect;
6742 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
6745 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6747 struct io_connect *conn = &req->connect;
6749 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
6752 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
6753 conn->addr_len = READ_ONCE(sqe->addr2);
6757 static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
6759 struct io_async_connect __io, *io;
6760 unsigned file_flags;
6762 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
6764 if (req_has_async_data(req)) {
6765 io = req->async_data;
6767 ret = move_addr_to_kernel(req->connect.addr,
6768 req->connect.addr_len,
6775 file_flags = force_nonblock ? O_NONBLOCK : 0;
6777 ret = __sys_connect_file(req->file, &io->address,
6778 req->connect.addr_len, file_flags);
6779 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
6780 if (req_has_async_data(req))
6782 if (io_alloc_async_data(req)) {
6786 memcpy(req->async_data, &__io, sizeof(__io));
6789 if (ret == -ERESTARTSYS)
6794 __io_req_complete(req, issue_flags, ret, 0);
6797 #else /* !CONFIG_NET */
6798 #define IO_NETOP_FN(op) \
6799 static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
6801 return -EOPNOTSUPP; \
6804 #define IO_NETOP_PREP(op) \
6806 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
6808 return -EOPNOTSUPP; \
6811 #define IO_NETOP_PREP_ASYNC(op) \
6813 static int io_##op##_prep_async(struct io_kiocb *req) \
6815 return -EOPNOTSUPP; \
6818 IO_NETOP_PREP_ASYNC(sendmsg);
6819 IO_NETOP_PREP_ASYNC(recvmsg);
6820 IO_NETOP_PREP_ASYNC(connect);
6821 IO_NETOP_PREP(accept);
6822 IO_NETOP_PREP(socket);
6823 IO_NETOP_PREP(shutdown);
6826 #endif /* CONFIG_NET */
6828 struct io_poll_table {
6829 struct poll_table_struct pt;
6830 struct io_kiocb *req;
6835 #define IO_POLL_CANCEL_FLAG BIT(31)
6836 #define IO_POLL_REF_MASK GENMASK(30, 0)
6839 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
6840 * bump it and acquire ownership. It's disallowed to modify requests while not
6841 * owning it, that prevents from races for enqueueing task_work's and b/w
6842 * arming poll and wakeups.
6844 static inline bool io_poll_get_ownership(struct io_kiocb *req)
6846 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
6849 static void io_poll_mark_cancelled(struct io_kiocb *req)
6851 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
6854 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
6856 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
6857 if (req->opcode == IORING_OP_POLL_ADD)
6858 return req->async_data;
6859 return req->apoll->double_poll;
6862 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
6864 if (req->opcode == IORING_OP_POLL_ADD)
6866 return &req->apoll->poll;
6869 static void io_poll_req_insert(struct io_kiocb *req)
6871 struct io_ring_ctx *ctx = req->ctx;
6872 struct hlist_head *list;
6874 list = &ctx->cancel_hash[hash_long(req->cqe.user_data, ctx->cancel_hash_bits)];
6875 hlist_add_head(&req->hash_node, list);
6878 static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
6879 wait_queue_func_t wake_func)
6882 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
6883 /* mask in events that we always want/need */
6884 poll->events = events | IO_POLL_UNMASK;
6885 INIT_LIST_HEAD(&poll->wait.entry);
6886 init_waitqueue_func_entry(&poll->wait, wake_func);
6889 static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
6891 struct wait_queue_head *head = smp_load_acquire(&poll->head);
6894 spin_lock_irq(&head->lock);
6895 list_del_init(&poll->wait.entry);
6897 spin_unlock_irq(&head->lock);
6901 static void io_poll_remove_entries(struct io_kiocb *req)
6904 * Nothing to do if neither of those flags are set. Avoid dipping
6905 * into the poll/apoll/double cachelines if we can.
6907 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
6911 * While we hold the waitqueue lock and the waitqueue is nonempty,
6912 * wake_up_pollfree() will wait for us. However, taking the waitqueue
6913 * lock in the first place can race with the waitqueue being freed.
6915 * We solve this as eventpoll does: by taking advantage of the fact that
6916 * all users of wake_up_pollfree() will RCU-delay the actual free. If
6917 * we enter rcu_read_lock() and see that the pointer to the queue is
6918 * non-NULL, we can then lock it without the memory being freed out from
6921 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
6922 * case the caller deletes the entry from the queue, leaving it empty.
6923 * In that case, only RCU prevents the queue memory from being freed.
6926 if (req->flags & REQ_F_SINGLE_POLL)
6927 io_poll_remove_entry(io_poll_get_single(req));
6928 if (req->flags & REQ_F_DOUBLE_POLL)
6929 io_poll_remove_entry(io_poll_get_double(req));
6933 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags);
6935 * All poll tw should go through this. Checks for poll events, manages
6936 * references, does rewait, etc.
6938 * Returns a negative error on failure. >0 when no action require, which is
6939 * either spurious wakeup or multishot CQE is served. 0 when it's done with
6940 * the request, then the mask is stored in req->cqe.res.
6942 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
6944 struct io_ring_ctx *ctx = req->ctx;
6947 /* req->task == current here, checking PF_EXITING is safe */
6948 if (unlikely(req->task->flags & PF_EXITING))
6952 v = atomic_read(&req->poll_refs);
6954 /* tw handler should be the owner, and so have some references */
6955 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
6957 if (v & IO_POLL_CANCEL_FLAG)
6960 if (!req->cqe.res) {
6961 struct poll_table_struct pt = { ._key = req->apoll_events };
6962 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
6965 if ((unlikely(!req->cqe.res)))
6967 if (req->apoll_events & EPOLLONESHOT)
6970 /* multishot, just fill a CQE and proceed */
6971 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
6972 __poll_t mask = mangle_poll(req->cqe.res &
6976 spin_lock(&ctx->completion_lock);
6977 filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
6978 mask, IORING_CQE_F_MORE);
6979 io_commit_cqring(ctx);
6980 spin_unlock(&ctx->completion_lock);
6982 io_cqring_ev_posted(ctx);
6988 io_tw_lock(req->ctx, locked);
6989 if (unlikely(req->task->flags & PF_EXITING))
6991 ret = io_issue_sqe(req,
6992 IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
6997 * Release all references, retry if someone tried to restart
6998 * task_work while we were executing it.
7000 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
7005 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
7007 struct io_ring_ctx *ctx = req->ctx;
7010 ret = io_poll_check_events(req, locked);
7015 req->cqe.res = mangle_poll(req->cqe.res & req->poll.events);
7021 io_poll_remove_entries(req);
7022 spin_lock(&ctx->completion_lock);
7023 hash_del(&req->hash_node);
7024 __io_req_complete_post(req, req->cqe.res, 0);
7025 io_commit_cqring(ctx);
7026 spin_unlock(&ctx->completion_lock);
7027 io_cqring_ev_posted(ctx);
7030 static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
7032 struct io_ring_ctx *ctx = req->ctx;
7035 ret = io_poll_check_events(req, locked);
7039 io_poll_remove_entries(req);
7040 spin_lock(&ctx->completion_lock);
7041 hash_del(&req->hash_node);
7042 spin_unlock(&ctx->completion_lock);
7045 io_req_task_submit(req, locked);
7047 io_req_complete_failed(req, ret);
7050 static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
7052 req->cqe.res = mask;
7054 * This is useful for poll that is armed on behalf of another
7055 * request, and where the wakeup path could be on a different
7056 * CPU. We want to avoid pulling in req->apoll->events for that
7059 req->apoll_events = events;
7060 if (req->opcode == IORING_OP_POLL_ADD)
7061 req->io_task_work.func = io_poll_task_func;
7063 req->io_task_work.func = io_apoll_task_func;
7065 trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
7066 io_req_task_work_add(req);
7069 static inline void io_poll_execute(struct io_kiocb *req, int res,
7072 if (io_poll_get_ownership(req))
7073 __io_poll_execute(req, res, events);
7076 static void io_poll_cancel_req(struct io_kiocb *req)
7078 io_poll_mark_cancelled(req);
7079 /* kick tw, which should complete the request */
7080 io_poll_execute(req, 0, 0);
7083 #define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
7084 #define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
7085 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
7087 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
7090 struct io_kiocb *req = wqe_to_req(wait);
7091 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
7093 __poll_t mask = key_to_poll(key);
7095 if (unlikely(mask & POLLFREE)) {
7096 io_poll_mark_cancelled(req);
7097 /* we have to kick tw in case it's not already */
7098 io_poll_execute(req, 0, poll->events);
7101 * If the waitqueue is being freed early but someone is already
7102 * holds ownership over it, we have to tear down the request as
7103 * best we can. That means immediately removing the request from
7104 * its waitqueue and preventing all further accesses to the
7105 * waitqueue via the request.
7107 list_del_init(&poll->wait.entry);
7110 * Careful: this *must* be the last step, since as soon
7111 * as req->head is NULL'ed out, the request can be
7112 * completed and freed, since aio_poll_complete_work()
7113 * will no longer need to take the waitqueue lock.
7115 smp_store_release(&poll->head, NULL);
7119 /* for instances that support it check for an event match first */
7120 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
7123 if (io_poll_get_ownership(req)) {
7124 /* optional, saves extra locking for removal in tw handler */
7125 if (mask && poll->events & EPOLLONESHOT) {
7126 list_del_init(&poll->wait.entry);
7128 if (wqe_is_double(wait))
7129 req->flags &= ~REQ_F_DOUBLE_POLL;
7131 req->flags &= ~REQ_F_SINGLE_POLL;
7133 __io_poll_execute(req, mask, poll->events);
7138 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
7139 struct wait_queue_head *head,
7140 struct io_poll_iocb **poll_ptr)
7142 struct io_kiocb *req = pt->req;
7143 unsigned long wqe_private = (unsigned long) req;
7146 * The file being polled uses multiple waitqueues for poll handling
7147 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
7150 if (unlikely(pt->nr_entries)) {
7151 struct io_poll_iocb *first = poll;
7153 /* double add on the same waitqueue head, ignore */
7154 if (first->head == head)
7156 /* already have a 2nd entry, fail a third attempt */
7158 if ((*poll_ptr)->head == head)
7160 pt->error = -EINVAL;
7164 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
7166 pt->error = -ENOMEM;
7169 /* mark as double wq entry */
7171 req->flags |= REQ_F_DOUBLE_POLL;
7172 io_init_poll_iocb(poll, first->events, first->wait.func);
7174 if (req->opcode == IORING_OP_POLL_ADD)
7175 req->flags |= REQ_F_ASYNC_DATA;
7178 req->flags |= REQ_F_SINGLE_POLL;
7181 poll->wait.private = (void *) wqe_private;
7183 if (poll->events & EPOLLEXCLUSIVE)
7184 add_wait_queue_exclusive(head, &poll->wait);
7186 add_wait_queue(head, &poll->wait);
7189 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
7190 struct poll_table_struct *p)
7192 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
7194 __io_queue_proc(&pt->req->poll, pt, head,
7195 (struct io_poll_iocb **) &pt->req->async_data);
7198 static int __io_arm_poll_handler(struct io_kiocb *req,
7199 struct io_poll_iocb *poll,
7200 struct io_poll_table *ipt, __poll_t mask)
7202 struct io_ring_ctx *ctx = req->ctx;
7205 INIT_HLIST_NODE(&req->hash_node);
7206 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
7207 io_init_poll_iocb(poll, mask, io_poll_wake);
7208 poll->file = req->file;
7210 ipt->pt._key = mask;
7213 ipt->nr_entries = 0;
7216 * Take the ownership to delay any tw execution up until we're done
7217 * with poll arming. see io_poll_get_ownership().
7219 atomic_set(&req->poll_refs, 1);
7220 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
7222 if (mask && (poll->events & EPOLLONESHOT)) {
7223 io_poll_remove_entries(req);
7224 /* no one else has access to the req, forget about the ref */
7227 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
7228 io_poll_remove_entries(req);
7230 ipt->error = -EINVAL;
7234 spin_lock(&ctx->completion_lock);
7235 io_poll_req_insert(req);
7236 spin_unlock(&ctx->completion_lock);
7239 /* can't multishot if failed, just queue the event we've got */
7240 if (unlikely(ipt->error || !ipt->nr_entries))
7241 poll->events |= EPOLLONESHOT;
7242 __io_poll_execute(req, mask, poll->events);
7247 * Release ownership. If someone tried to queue a tw while it was
7248 * locked, kick it off for them.
7250 v = atomic_dec_return(&req->poll_refs);
7251 if (unlikely(v & IO_POLL_REF_MASK))
7252 __io_poll_execute(req, 0, poll->events);
7256 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
7257 struct poll_table_struct *p)
7259 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
7260 struct async_poll *apoll = pt->req->apoll;
7262 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
7271 static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
7273 const struct io_op_def *def = &io_op_defs[req->opcode];
7274 struct io_ring_ctx *ctx = req->ctx;
7275 struct async_poll *apoll;
7276 struct io_poll_table ipt;
7277 __poll_t mask = POLLPRI | POLLERR;
7280 if (!def->pollin && !def->pollout)
7281 return IO_APOLL_ABORTED;
7282 if (!file_can_poll(req->file))
7283 return IO_APOLL_ABORTED;
7284 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
7285 return IO_APOLL_ABORTED;
7286 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
7287 mask |= EPOLLONESHOT;
7290 mask |= EPOLLIN | EPOLLRDNORM;
7292 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
7293 if ((req->opcode == IORING_OP_RECVMSG) &&
7294 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
7297 mask |= EPOLLOUT | EPOLLWRNORM;
7299 if (def->poll_exclusive)
7300 mask |= EPOLLEXCLUSIVE;
7301 if (req->flags & REQ_F_POLLED) {
7303 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
7304 !list_empty(&ctx->apoll_cache)) {
7305 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
7307 list_del_init(&apoll->poll.wait.entry);
7309 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
7310 if (unlikely(!apoll))
7311 return IO_APOLL_ABORTED;
7313 apoll->double_poll = NULL;
7315 req->flags |= REQ_F_POLLED;
7316 ipt.pt._qproc = io_async_queue_proc;
7318 io_kbuf_recycle(req, issue_flags);
7320 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
7321 if (ret || ipt.error)
7322 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
7324 trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode,
7325 mask, apoll->poll.events);
7330 * Returns true if we found and killed one or more poll requests
7332 static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
7333 struct task_struct *tsk, bool cancel_all)
7335 struct hlist_node *tmp;
7336 struct io_kiocb *req;
7340 spin_lock(&ctx->completion_lock);
7341 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7342 struct hlist_head *list;
7344 list = &ctx->cancel_hash[i];
7345 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
7346 if (io_match_task_safe(req, tsk, cancel_all)) {
7347 hlist_del_init(&req->hash_node);
7348 io_poll_cancel_req(req);
7353 spin_unlock(&ctx->completion_lock);
7357 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
7358 struct io_cancel_data *cd)
7359 __must_hold(&ctx->completion_lock)
7361 struct hlist_head *list;
7362 struct io_kiocb *req;
7364 list = &ctx->cancel_hash[hash_long(cd->data, ctx->cancel_hash_bits)];
7365 hlist_for_each_entry(req, list, hash_node) {
7366 if (cd->data != req->cqe.user_data)
7368 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
7370 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
7371 if (cd->seq == req->work.cancel_seq)
7373 req->work.cancel_seq = cd->seq;
7380 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
7381 struct io_cancel_data *cd)
7382 __must_hold(&ctx->completion_lock)
7384 struct io_kiocb *req;
7387 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7388 struct hlist_head *list;
7390 list = &ctx->cancel_hash[i];
7391 hlist_for_each_entry(req, list, hash_node) {
7392 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
7393 req->file != cd->file)
7395 if (cd->seq == req->work.cancel_seq)
7397 req->work.cancel_seq = cd->seq;
7404 static bool io_poll_disarm(struct io_kiocb *req)
7405 __must_hold(&ctx->completion_lock)
7407 if (!io_poll_get_ownership(req))
7409 io_poll_remove_entries(req);
7410 hash_del(&req->hash_node);
7414 static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
7415 __must_hold(&ctx->completion_lock)
7417 struct io_kiocb *req;
7419 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
7420 req = io_poll_file_find(ctx, cd);
7422 req = io_poll_find(ctx, false, cd);
7425 io_poll_cancel_req(req);
7429 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
7434 events = READ_ONCE(sqe->poll32_events);
7436 events = swahw32(events);
7438 if (!(flags & IORING_POLL_ADD_MULTI))
7439 events |= EPOLLONESHOT;
7440 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
7443 static int io_poll_remove_prep(struct io_kiocb *req,
7444 const struct io_uring_sqe *sqe)
7446 struct io_poll_update *upd = &req->poll_update;
7449 if (sqe->buf_index || sqe->splice_fd_in)
7451 flags = READ_ONCE(sqe->len);
7452 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
7453 IORING_POLL_ADD_MULTI))
7455 /* meaningless without update */
7456 if (flags == IORING_POLL_ADD_MULTI)
7459 upd->old_user_data = READ_ONCE(sqe->addr);
7460 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
7461 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
7463 upd->new_user_data = READ_ONCE(sqe->off);
7464 if (!upd->update_user_data && upd->new_user_data)
7466 if (upd->update_events)
7467 upd->events = io_poll_parse_events(sqe, flags);
7468 else if (sqe->poll32_events)
7474 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
7476 struct io_poll_iocb *poll = &req->poll;
7479 if (sqe->buf_index || sqe->off || sqe->addr)
7481 flags = READ_ONCE(sqe->len);
7482 if (flags & ~IORING_POLL_ADD_MULTI)
7484 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
7487 io_req_set_refcount(req);
7488 req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
7492 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
7494 struct io_poll_iocb *poll = &req->poll;
7495 struct io_poll_table ipt;
7498 ipt.pt._qproc = io_poll_queue_proc;
7500 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
7501 ret = ret ?: ipt.error;
7503 __io_req_complete(req, issue_flags, ret, 0);
7507 static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
7509 struct io_cancel_data cd = { .data = req->poll_update.old_user_data, };
7510 struct io_ring_ctx *ctx = req->ctx;
7511 struct io_kiocb *preq;
7515 spin_lock(&ctx->completion_lock);
7516 preq = io_poll_find(ctx, true, &cd);
7517 if (!preq || !io_poll_disarm(preq)) {
7518 spin_unlock(&ctx->completion_lock);
7519 ret = preq ? -EALREADY : -ENOENT;
7522 spin_unlock(&ctx->completion_lock);
7524 if (req->poll_update.update_events || req->poll_update.update_user_data) {
7525 /* only mask one event flags, keep behavior flags */
7526 if (req->poll_update.update_events) {
7527 preq->poll.events &= ~0xffff;
7528 preq->poll.events |= req->poll_update.events & 0xffff;
7529 preq->poll.events |= IO_POLL_UNMASK;
7531 if (req->poll_update.update_user_data)
7532 preq->cqe.user_data = req->poll_update.new_user_data;
7534 ret2 = io_poll_add(preq, issue_flags);
7535 /* successfully updated, don't complete poll request */
7541 preq->cqe.res = -ECANCELED;
7542 locked = !(issue_flags & IO_URING_F_UNLOCKED);
7543 io_req_task_complete(preq, &locked);
7547 /* complete update request, we're done with it */
7548 __io_req_complete(req, issue_flags, ret, 0);
7552 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
7554 struct io_timeout_data *data = container_of(timer,
7555 struct io_timeout_data, timer);
7556 struct io_kiocb *req = data->req;
7557 struct io_ring_ctx *ctx = req->ctx;
7558 unsigned long flags;
7560 spin_lock_irqsave(&ctx->timeout_lock, flags);
7561 list_del_init(&req->timeout.list);
7562 atomic_set(&req->ctx->cq_timeouts,
7563 atomic_read(&req->ctx->cq_timeouts) + 1);
7564 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
7566 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
7569 req->cqe.res = -ETIME;
7570 req->io_task_work.func = io_req_task_complete;
7571 io_req_task_work_add(req);
7572 return HRTIMER_NORESTART;
7575 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
7576 struct io_cancel_data *cd)
7577 __must_hold(&ctx->timeout_lock)
7579 struct io_timeout_data *io;
7580 struct io_kiocb *req;
7583 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
7584 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
7585 cd->data != req->cqe.user_data)
7587 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
7588 if (cd->seq == req->work.cancel_seq)
7590 req->work.cancel_seq = cd->seq;
7596 return ERR_PTR(-ENOENT);
7598 io = req->async_data;
7599 if (hrtimer_try_to_cancel(&io->timer) == -1)
7600 return ERR_PTR(-EALREADY);
7601 list_del_init(&req->timeout.list);
7605 static int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
7606 __must_hold(&ctx->completion_lock)
7608 struct io_kiocb *req;
7610 spin_lock_irq(&ctx->timeout_lock);
7611 req = io_timeout_extract(ctx, cd);
7612 spin_unlock_irq(&ctx->timeout_lock);
7615 return PTR_ERR(req);
7616 io_req_task_queue_fail(req, -ECANCELED);
7620 static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
7622 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
7623 case IORING_TIMEOUT_BOOTTIME:
7624 return CLOCK_BOOTTIME;
7625 case IORING_TIMEOUT_REALTIME:
7626 return CLOCK_REALTIME;
7628 /* can't happen, vetted at prep time */
7632 return CLOCK_MONOTONIC;
7636 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
7637 struct timespec64 *ts, enum hrtimer_mode mode)
7638 __must_hold(&ctx->timeout_lock)
7640 struct io_timeout_data *io;
7641 struct io_kiocb *req;
7644 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
7645 found = user_data == req->cqe.user_data;
7652 io = req->async_data;
7653 if (hrtimer_try_to_cancel(&io->timer) == -1)
7655 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
7656 io->timer.function = io_link_timeout_fn;
7657 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
7661 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
7662 struct timespec64 *ts, enum hrtimer_mode mode)
7663 __must_hold(&ctx->timeout_lock)
7665 struct io_cancel_data cd = { .data = user_data, };
7666 struct io_kiocb *req = io_timeout_extract(ctx, &cd);
7667 struct io_timeout_data *data;
7670 return PTR_ERR(req);
7672 req->timeout.off = 0; /* noseq */
7673 data = req->async_data;
7674 list_add_tail(&req->timeout.list, &ctx->timeout_list);
7675 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
7676 data->timer.function = io_timeout_fn;
7677 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
7681 static int io_timeout_remove_prep(struct io_kiocb *req,
7682 const struct io_uring_sqe *sqe)
7684 struct io_timeout_rem *tr = &req->timeout_rem;
7686 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
7688 if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
7691 tr->ltimeout = false;
7692 tr->addr = READ_ONCE(sqe->addr);
7693 tr->flags = READ_ONCE(sqe->timeout_flags);
7694 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
7695 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
7697 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
7698 tr->ltimeout = true;
7699 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
7701 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
7703 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
7705 } else if (tr->flags) {
7706 /* timeout removal doesn't support flags */
7713 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
7715 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
7720 * Remove or update an existing timeout command
7722 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
7724 struct io_timeout_rem *tr = &req->timeout_rem;
7725 struct io_ring_ctx *ctx = req->ctx;
7728 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
7729 struct io_cancel_data cd = { .data = tr->addr, };
7731 spin_lock(&ctx->completion_lock);
7732 ret = io_timeout_cancel(ctx, &cd);
7733 spin_unlock(&ctx->completion_lock);
7735 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
7737 spin_lock_irq(&ctx->timeout_lock);
7739 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
7741 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
7742 spin_unlock_irq(&ctx->timeout_lock);
7747 io_req_complete_post(req, ret, 0);
7751 static int __io_timeout_prep(struct io_kiocb *req,
7752 const struct io_uring_sqe *sqe,
7753 bool is_timeout_link)
7755 struct io_timeout_data *data;
7757 u32 off = READ_ONCE(sqe->off);
7759 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
7761 if (off && is_timeout_link)
7763 flags = READ_ONCE(sqe->timeout_flags);
7764 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
7765 IORING_TIMEOUT_ETIME_SUCCESS))
7767 /* more than one clock specified is invalid, obviously */
7768 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
7771 INIT_LIST_HEAD(&req->timeout.list);
7772 req->timeout.off = off;
7773 if (unlikely(off && !req->ctx->off_timeout_used))
7774 req->ctx->off_timeout_used = true;
7776 if (WARN_ON_ONCE(req_has_async_data(req)))
7778 if (io_alloc_async_data(req))
7781 data = req->async_data;
7783 data->flags = flags;
7785 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
7788 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
7791 INIT_LIST_HEAD(&req->timeout.list);
7792 data->mode = io_translate_timeout_mode(flags);
7793 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
7795 if (is_timeout_link) {
7796 struct io_submit_link *link = &req->ctx->submit_state.link;
7800 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
7802 req->timeout.head = link->last;
7803 link->last->flags |= REQ_F_ARM_LTIMEOUT;
7808 static int io_timeout_prep(struct io_kiocb *req,
7809 const struct io_uring_sqe *sqe)
7811 return __io_timeout_prep(req, sqe, false);
7814 static int io_link_timeout_prep(struct io_kiocb *req,
7815 const struct io_uring_sqe *sqe)
7817 return __io_timeout_prep(req, sqe, true);
7820 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
7822 struct io_ring_ctx *ctx = req->ctx;
7823 struct io_timeout_data *data = req->async_data;
7824 struct list_head *entry;
7825 u32 tail, off = req->timeout.off;
7827 spin_lock_irq(&ctx->timeout_lock);
7830 * sqe->off holds how many events that need to occur for this
7831 * timeout event to be satisfied. If it isn't set, then this is
7832 * a pure timeout request, sequence isn't used.
7834 if (io_is_timeout_noseq(req)) {
7835 entry = ctx->timeout_list.prev;
7839 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
7840 req->timeout.target_seq = tail + off;
7842 /* Update the last seq here in case io_flush_timeouts() hasn't.
7843 * This is safe because ->completion_lock is held, and submissions
7844 * and completions are never mixed in the same ->completion_lock section.
7846 ctx->cq_last_tm_flush = tail;
7849 * Insertion sort, ensuring the first entry in the list is always
7850 * the one we need first.
7852 list_for_each_prev(entry, &ctx->timeout_list) {
7853 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
7856 if (io_is_timeout_noseq(nxt))
7858 /* nxt.seq is behind @tail, otherwise would've been completed */
7859 if (off >= nxt->timeout.target_seq - tail)
7863 list_add(&req->timeout.list, entry);
7864 data->timer.function = io_timeout_fn;
7865 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
7866 spin_unlock_irq(&ctx->timeout_lock);
7870 static bool io_cancel_cb(struct io_wq_work *work, void *data)
7872 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7873 struct io_cancel_data *cd = data;
7875 if (req->ctx != cd->ctx)
7877 if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
7879 } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
7880 if (req->file != cd->file)
7883 if (req->cqe.user_data != cd->data)
7886 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
7887 if (cd->seq == req->work.cancel_seq)
7889 req->work.cancel_seq = cd->seq;
7894 static int io_async_cancel_one(struct io_uring_task *tctx,
7895 struct io_cancel_data *cd)
7897 enum io_wq_cancel cancel_ret;
7901 if (!tctx || !tctx->io_wq)
7904 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
7905 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
7906 switch (cancel_ret) {
7907 case IO_WQ_CANCEL_OK:
7910 case IO_WQ_CANCEL_RUNNING:
7913 case IO_WQ_CANCEL_NOTFOUND:
7921 static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
7923 struct io_ring_ctx *ctx = req->ctx;
7926 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
7928 ret = io_async_cancel_one(req->task->io_uring, cd);
7930 * Fall-through even for -EALREADY, as we may have poll armed
7931 * that need unarming.
7936 spin_lock(&ctx->completion_lock);
7937 ret = io_poll_cancel(ctx, cd);
7940 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
7941 ret = io_timeout_cancel(ctx, cd);
7943 spin_unlock(&ctx->completion_lock);
7947 #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
7948 IORING_ASYNC_CANCEL_ANY)
7950 static int io_async_cancel_prep(struct io_kiocb *req,
7951 const struct io_uring_sqe *sqe)
7953 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
7955 if (sqe->off || sqe->len || sqe->splice_fd_in)
7958 req->cancel.addr = READ_ONCE(sqe->addr);
7959 req->cancel.flags = READ_ONCE(sqe->cancel_flags);
7960 if (req->cancel.flags & ~CANCEL_FLAGS)
7962 if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) {
7963 if (req->cancel.flags & IORING_ASYNC_CANCEL_ANY)
7965 req->cancel.fd = READ_ONCE(sqe->fd);
7971 static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
7972 unsigned int issue_flags)
7974 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
7975 struct io_ring_ctx *ctx = cd->ctx;
7976 struct io_tctx_node *node;
7980 ret = io_try_cancel(req, cd);
7988 /* slow path, try all io-wq's */
7989 io_ring_submit_lock(ctx, issue_flags);
7991 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
7992 struct io_uring_task *tctx = node->task->io_uring;
7994 ret = io_async_cancel_one(tctx, cd);
7995 if (ret != -ENOENT) {
8001 io_ring_submit_unlock(ctx, issue_flags);
8002 return all ? nr : ret;
8005 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
8007 struct io_cancel_data cd = {
8009 .data = req->cancel.addr,
8010 .flags = req->cancel.flags,
8011 .seq = atomic_inc_return(&req->ctx->cancel_seq),
8015 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
8016 if (req->flags & REQ_F_FIXED_FILE)
8017 req->file = io_file_get_fixed(req, req->cancel.fd,
8020 req->file = io_file_get_normal(req, req->cancel.fd);
8025 cd.file = req->file;
8028 ret = __io_async_cancel(&cd, req, issue_flags);
8032 io_req_complete_post(req, ret, 0);
8036 static int io_files_update_prep(struct io_kiocb *req,
8037 const struct io_uring_sqe *sqe)
8039 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
8041 if (sqe->rw_flags || sqe->splice_fd_in)
8044 req->rsrc_update.offset = READ_ONCE(sqe->off);
8045 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
8046 if (!req->rsrc_update.nr_args)
8048 req->rsrc_update.arg = READ_ONCE(sqe->addr);
8052 static int io_files_update_with_index_alloc(struct io_kiocb *req,
8053 unsigned int issue_flags)
8055 __s32 __user *fds = u64_to_user_ptr(req->rsrc_update.arg);
8060 for (done = 0; done < req->rsrc_update.nr_args; done++) {
8061 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
8071 ret = io_fixed_fd_install(req, issue_flags, file,
8072 IORING_FILE_INDEX_ALLOC);
8075 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
8076 __io_close_fixed(req, issue_flags, ret);
8087 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
8089 struct io_ring_ctx *ctx = req->ctx;
8090 struct io_uring_rsrc_update2 up;
8093 up.offset = req->rsrc_update.offset;
8094 up.data = req->rsrc_update.arg;
8100 if (req->rsrc_update.offset == IORING_FILE_INDEX_ALLOC) {
8101 ret = io_files_update_with_index_alloc(req, issue_flags);
8103 io_ring_submit_lock(ctx, issue_flags);
8104 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
8105 &up, req->rsrc_update.nr_args);
8106 io_ring_submit_unlock(ctx, issue_flags);
8111 __io_req_complete(req, issue_flags, ret, 0);
8115 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
8117 switch (req->opcode) {
8119 return io_nop_prep(req, sqe);
8120 case IORING_OP_READV:
8121 case IORING_OP_READ_FIXED:
8122 case IORING_OP_READ:
8123 case IORING_OP_WRITEV:
8124 case IORING_OP_WRITE_FIXED:
8125 case IORING_OP_WRITE:
8126 return io_prep_rw(req, sqe);
8127 case IORING_OP_POLL_ADD:
8128 return io_poll_add_prep(req, sqe);
8129 case IORING_OP_POLL_REMOVE:
8130 return io_poll_remove_prep(req, sqe);
8131 case IORING_OP_FSYNC:
8132 return io_fsync_prep(req, sqe);
8133 case IORING_OP_SYNC_FILE_RANGE:
8134 return io_sfr_prep(req, sqe);
8135 case IORING_OP_SENDMSG:
8136 case IORING_OP_SEND:
8137 return io_sendmsg_prep(req, sqe);
8138 case IORING_OP_RECVMSG:
8139 case IORING_OP_RECV:
8140 return io_recvmsg_prep(req, sqe);
8141 case IORING_OP_CONNECT:
8142 return io_connect_prep(req, sqe);
8143 case IORING_OP_TIMEOUT:
8144 return io_timeout_prep(req, sqe);
8145 case IORING_OP_TIMEOUT_REMOVE:
8146 return io_timeout_remove_prep(req, sqe);
8147 case IORING_OP_ASYNC_CANCEL:
8148 return io_async_cancel_prep(req, sqe);
8149 case IORING_OP_LINK_TIMEOUT:
8150 return io_link_timeout_prep(req, sqe);
8151 case IORING_OP_ACCEPT:
8152 return io_accept_prep(req, sqe);
8153 case IORING_OP_FALLOCATE:
8154 return io_fallocate_prep(req, sqe);
8155 case IORING_OP_OPENAT:
8156 return io_openat_prep(req, sqe);
8157 case IORING_OP_CLOSE:
8158 return io_close_prep(req, sqe);
8159 case IORING_OP_FILES_UPDATE:
8160 return io_files_update_prep(req, sqe);
8161 case IORING_OP_STATX:
8162 return io_statx_prep(req, sqe);
8163 case IORING_OP_FADVISE:
8164 return io_fadvise_prep(req, sqe);
8165 case IORING_OP_MADVISE:
8166 return io_madvise_prep(req, sqe);
8167 case IORING_OP_OPENAT2:
8168 return io_openat2_prep(req, sqe);
8169 case IORING_OP_EPOLL_CTL:
8170 return io_epoll_ctl_prep(req, sqe);
8171 case IORING_OP_SPLICE:
8172 return io_splice_prep(req, sqe);
8173 case IORING_OP_PROVIDE_BUFFERS:
8174 return io_provide_buffers_prep(req, sqe);
8175 case IORING_OP_REMOVE_BUFFERS:
8176 return io_remove_buffers_prep(req, sqe);
8178 return io_tee_prep(req, sqe);
8179 case IORING_OP_SHUTDOWN:
8180 return io_shutdown_prep(req, sqe);
8181 case IORING_OP_RENAMEAT:
8182 return io_renameat_prep(req, sqe);
8183 case IORING_OP_UNLINKAT:
8184 return io_unlinkat_prep(req, sqe);
8185 case IORING_OP_MKDIRAT:
8186 return io_mkdirat_prep(req, sqe);
8187 case IORING_OP_SYMLINKAT:
8188 return io_symlinkat_prep(req, sqe);
8189 case IORING_OP_LINKAT:
8190 return io_linkat_prep(req, sqe);
8191 case IORING_OP_MSG_RING:
8192 return io_msg_ring_prep(req, sqe);
8193 case IORING_OP_FSETXATTR:
8194 return io_fsetxattr_prep(req, sqe);
8195 case IORING_OP_SETXATTR:
8196 return io_setxattr_prep(req, sqe);
8197 case IORING_OP_FGETXATTR:
8198 return io_fgetxattr_prep(req, sqe);
8199 case IORING_OP_GETXATTR:
8200 return io_getxattr_prep(req, sqe);
8201 case IORING_OP_SOCKET:
8202 return io_socket_prep(req, sqe);
8203 case IORING_OP_URING_CMD:
8204 return io_uring_cmd_prep(req, sqe);
8207 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
8212 static int io_req_prep_async(struct io_kiocb *req)
8214 const struct io_op_def *def = &io_op_defs[req->opcode];
8216 /* assign early for deferred execution for non-fixed file */
8217 if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
8218 req->file = io_file_get_normal(req, req->cqe.fd);
8219 if (!def->needs_async_setup)
8221 if (WARN_ON_ONCE(req_has_async_data(req)))
8223 if (io_alloc_async_data(req))
8226 switch (req->opcode) {
8227 case IORING_OP_READV:
8228 return io_readv_prep_async(req);
8229 case IORING_OP_WRITEV:
8230 return io_writev_prep_async(req);
8231 case IORING_OP_SENDMSG:
8232 return io_sendmsg_prep_async(req);
8233 case IORING_OP_RECVMSG:
8234 return io_recvmsg_prep_async(req);
8235 case IORING_OP_CONNECT:
8236 return io_connect_prep_async(req);
8237 case IORING_OP_URING_CMD:
8238 return io_uring_cmd_prep_async(req);
8240 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
8245 static u32 io_get_sequence(struct io_kiocb *req)
8247 u32 seq = req->ctx->cached_sq_head;
8248 struct io_kiocb *cur;
8250 /* need original cached_sq_head, but it was increased for each req */
8251 io_for_each_link(cur, req)
8256 static __cold void io_drain_req(struct io_kiocb *req)
8258 struct io_ring_ctx *ctx = req->ctx;
8259 struct io_defer_entry *de;
8261 u32 seq = io_get_sequence(req);
8263 /* Still need defer if there is pending req in defer list. */
8264 spin_lock(&ctx->completion_lock);
8265 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
8266 spin_unlock(&ctx->completion_lock);
8268 ctx->drain_active = false;
8269 io_req_task_queue(req);
8272 spin_unlock(&ctx->completion_lock);
8274 ret = io_req_prep_async(req);
8277 io_req_complete_failed(req, ret);
8280 io_prep_async_link(req);
8281 de = kmalloc(sizeof(*de), GFP_KERNEL);
8287 spin_lock(&ctx->completion_lock);
8288 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
8289 spin_unlock(&ctx->completion_lock);
8294 trace_io_uring_defer(ctx, req, req->cqe.user_data, req->opcode);
8297 list_add_tail(&de->list, &ctx->defer_list);
8298 spin_unlock(&ctx->completion_lock);
8301 static void io_clean_op(struct io_kiocb *req)
8303 if (req->flags & REQ_F_BUFFER_SELECTED) {
8304 spin_lock(&req->ctx->completion_lock);
8305 io_put_kbuf_comp(req);
8306 spin_unlock(&req->ctx->completion_lock);
8309 if (req->flags & REQ_F_NEED_CLEANUP) {
8310 switch (req->opcode) {
8311 case IORING_OP_READV:
8312 case IORING_OP_READ_FIXED:
8313 case IORING_OP_READ:
8314 case IORING_OP_WRITEV:
8315 case IORING_OP_WRITE_FIXED:
8316 case IORING_OP_WRITE: {
8317 struct io_async_rw *io = req->async_data;
8319 kfree(io->free_iovec);
8322 case IORING_OP_RECVMSG:
8323 case IORING_OP_SENDMSG: {
8324 struct io_async_msghdr *io = req->async_data;
8326 kfree(io->free_iov);
8329 case IORING_OP_OPENAT:
8330 case IORING_OP_OPENAT2:
8331 if (req->open.filename)
8332 putname(req->open.filename);
8334 case IORING_OP_RENAMEAT:
8335 putname(req->rename.oldpath);
8336 putname(req->rename.newpath);
8338 case IORING_OP_UNLINKAT:
8339 putname(req->unlink.filename);
8341 case IORING_OP_MKDIRAT:
8342 putname(req->mkdir.filename);
8344 case IORING_OP_SYMLINKAT:
8345 putname(req->symlink.oldpath);
8346 putname(req->symlink.newpath);
8348 case IORING_OP_LINKAT:
8349 putname(req->hardlink.oldpath);
8350 putname(req->hardlink.newpath);
8352 case IORING_OP_STATX:
8353 if (req->statx.filename)
8354 putname(req->statx.filename);
8356 case IORING_OP_SETXATTR:
8357 case IORING_OP_FSETXATTR:
8358 case IORING_OP_GETXATTR:
8359 case IORING_OP_FGETXATTR:
8360 __io_xattr_finish(req);
8364 if ((req->flags & REQ_F_POLLED) && req->apoll) {
8365 kfree(req->apoll->double_poll);
8369 if (req->flags & REQ_F_INFLIGHT) {
8370 struct io_uring_task *tctx = req->task->io_uring;
8372 atomic_dec(&tctx->inflight_tracked);
8374 if (req->flags & REQ_F_CREDS)
8375 put_cred(req->creds);
8376 if (req->flags & REQ_F_ASYNC_DATA) {
8377 kfree(req->async_data);
8378 req->async_data = NULL;
8380 req->flags &= ~IO_REQ_CLEAN_FLAGS;
8383 static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
8385 if (req->file || !io_op_defs[req->opcode].needs_file)
8388 if (req->flags & REQ_F_FIXED_FILE)
8389 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
8391 req->file = io_file_get_normal(req, req->cqe.fd);
8396 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
8398 const struct io_op_def *def = &io_op_defs[req->opcode];
8399 const struct cred *creds = NULL;
8402 if (unlikely(!io_assign_file(req, issue_flags)))
8405 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
8406 creds = override_creds(req->creds);
8408 if (!def->audit_skip)
8409 audit_uring_entry(req->opcode);
8411 switch (req->opcode) {
8413 ret = io_nop(req, issue_flags);
8415 case IORING_OP_READV:
8416 case IORING_OP_READ_FIXED:
8417 case IORING_OP_READ:
8418 ret = io_read(req, issue_flags);
8420 case IORING_OP_WRITEV:
8421 case IORING_OP_WRITE_FIXED:
8422 case IORING_OP_WRITE:
8423 ret = io_write(req, issue_flags);
8425 case IORING_OP_FSYNC:
8426 ret = io_fsync(req, issue_flags);
8428 case IORING_OP_POLL_ADD:
8429 ret = io_poll_add(req, issue_flags);
8431 case IORING_OP_POLL_REMOVE:
8432 ret = io_poll_remove(req, issue_flags);
8434 case IORING_OP_SYNC_FILE_RANGE:
8435 ret = io_sync_file_range(req, issue_flags);
8437 case IORING_OP_SENDMSG:
8438 ret = io_sendmsg(req, issue_flags);
8440 case IORING_OP_SEND:
8441 ret = io_send(req, issue_flags);
8443 case IORING_OP_RECVMSG:
8444 ret = io_recvmsg(req, issue_flags);
8446 case IORING_OP_RECV:
8447 ret = io_recv(req, issue_flags);
8449 case IORING_OP_TIMEOUT:
8450 ret = io_timeout(req, issue_flags);
8452 case IORING_OP_TIMEOUT_REMOVE:
8453 ret = io_timeout_remove(req, issue_flags);
8455 case IORING_OP_ACCEPT:
8456 ret = io_accept(req, issue_flags);
8458 case IORING_OP_CONNECT:
8459 ret = io_connect(req, issue_flags);
8461 case IORING_OP_ASYNC_CANCEL:
8462 ret = io_async_cancel(req, issue_flags);
8464 case IORING_OP_FALLOCATE:
8465 ret = io_fallocate(req, issue_flags);
8467 case IORING_OP_OPENAT:
8468 ret = io_openat(req, issue_flags);
8470 case IORING_OP_CLOSE:
8471 ret = io_close(req, issue_flags);
8473 case IORING_OP_FILES_UPDATE:
8474 ret = io_files_update(req, issue_flags);
8476 case IORING_OP_STATX:
8477 ret = io_statx(req, issue_flags);
8479 case IORING_OP_FADVISE:
8480 ret = io_fadvise(req, issue_flags);
8482 case IORING_OP_MADVISE:
8483 ret = io_madvise(req, issue_flags);
8485 case IORING_OP_OPENAT2:
8486 ret = io_openat2(req, issue_flags);
8488 case IORING_OP_EPOLL_CTL:
8489 ret = io_epoll_ctl(req, issue_flags);
8491 case IORING_OP_SPLICE:
8492 ret = io_splice(req, issue_flags);
8494 case IORING_OP_PROVIDE_BUFFERS:
8495 ret = io_provide_buffers(req, issue_flags);
8497 case IORING_OP_REMOVE_BUFFERS:
8498 ret = io_remove_buffers(req, issue_flags);
8501 ret = io_tee(req, issue_flags);
8503 case IORING_OP_SHUTDOWN:
8504 ret = io_shutdown(req, issue_flags);
8506 case IORING_OP_RENAMEAT:
8507 ret = io_renameat(req, issue_flags);
8509 case IORING_OP_UNLINKAT:
8510 ret = io_unlinkat(req, issue_flags);
8512 case IORING_OP_MKDIRAT:
8513 ret = io_mkdirat(req, issue_flags);
8515 case IORING_OP_SYMLINKAT:
8516 ret = io_symlinkat(req, issue_flags);
8518 case IORING_OP_LINKAT:
8519 ret = io_linkat(req, issue_flags);
8521 case IORING_OP_MSG_RING:
8522 ret = io_msg_ring(req, issue_flags);
8524 case IORING_OP_FSETXATTR:
8525 ret = io_fsetxattr(req, issue_flags);
8527 case IORING_OP_SETXATTR:
8528 ret = io_setxattr(req, issue_flags);
8530 case IORING_OP_FGETXATTR:
8531 ret = io_fgetxattr(req, issue_flags);
8533 case IORING_OP_GETXATTR:
8534 ret = io_getxattr(req, issue_flags);
8536 case IORING_OP_SOCKET:
8537 ret = io_socket(req, issue_flags);
8539 case IORING_OP_URING_CMD:
8540 ret = io_uring_cmd(req, issue_flags);
8547 if (!def->audit_skip)
8548 audit_uring_exit(!ret, ret);
8551 revert_creds(creds);
8554 /* If the op doesn't have a file, we're not polling for it */
8555 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
8556 io_iopoll_req_issued(req, issue_flags);
8561 static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
8563 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8565 req = io_put_req_find_next(req);
8566 return req ? &req->work : NULL;
8569 static void io_wq_submit_work(struct io_wq_work *work)
8571 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8572 const struct io_op_def *def = &io_op_defs[req->opcode];
8573 unsigned int issue_flags = IO_URING_F_UNLOCKED;
8574 bool needs_poll = false;
8575 int ret = 0, err = -ECANCELED;
8577 /* one will be dropped by ->io_free_work() after returning to io-wq */
8578 if (!(req->flags & REQ_F_REFCOUNT))
8579 __io_req_set_refcount(req, 2);
8583 io_arm_ltimeout(req);
8585 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
8586 if (work->flags & IO_WQ_WORK_CANCEL) {
8588 io_req_task_queue_fail(req, err);
8591 if (!io_assign_file(req, issue_flags)) {
8593 work->flags |= IO_WQ_WORK_CANCEL;
8597 if (req->flags & REQ_F_FORCE_ASYNC) {
8598 bool opcode_poll = def->pollin || def->pollout;
8600 if (opcode_poll && file_can_poll(req->file)) {
8602 issue_flags |= IO_URING_F_NONBLOCK;
8607 ret = io_issue_sqe(req, issue_flags);
8611 * We can get EAGAIN for iopolled IO even though we're
8612 * forcing a sync submission from here, since we can't
8613 * wait for request slots on the block side.
8616 if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
8622 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
8624 /* aborted or ready, in either case retry blocking */
8626 issue_flags &= ~IO_URING_F_NONBLOCK;
8629 /* avoid locking problems by failing it from a clean context */
8631 io_req_task_queue_fail(req, ret);
8634 static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
8637 return &table->files[i];
8640 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
8643 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
8645 return (struct file *) (slot->file_ptr & FFS_MASK);
8648 static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
8650 unsigned long file_ptr = (unsigned long) file;
8652 file_ptr |= io_file_get_flags(file);
8653 file_slot->file_ptr = file_ptr;
8656 static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
8657 unsigned int issue_flags)
8659 struct io_ring_ctx *ctx = req->ctx;
8660 struct file *file = NULL;
8661 unsigned long file_ptr;
8663 io_ring_submit_lock(ctx, issue_flags);
8665 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
8667 fd = array_index_nospec(fd, ctx->nr_user_files);
8668 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
8669 file = (struct file *) (file_ptr & FFS_MASK);
8670 file_ptr &= ~FFS_MASK;
8671 /* mask in overlapping REQ_F and FFS bits */
8672 req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
8673 io_req_set_rsrc_node(req, ctx, 0);
8674 WARN_ON_ONCE(file && !test_bit(fd, ctx->file_table.bitmap));
8676 io_ring_submit_unlock(ctx, issue_flags);
8680 static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
8682 struct file *file = fget(fd);
8684 trace_io_uring_file_get(req->ctx, req, req->cqe.user_data, fd);
8686 /* we don't allow fixed io_uring files */
8687 if (file && file->f_op == &io_uring_fops)
8688 io_req_track_inflight(req);
8692 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
8694 struct io_kiocb *prev = req->timeout.prev;
8698 if (!(req->task->flags & PF_EXITING)) {
8699 struct io_cancel_data cd = {
8701 .data = prev->cqe.user_data,
8704 ret = io_try_cancel(req, &cd);
8706 io_req_complete_post(req, ret ?: -ETIME, 0);
8709 io_req_complete_post(req, -ETIME, 0);
8713 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
8715 struct io_timeout_data *data = container_of(timer,
8716 struct io_timeout_data, timer);
8717 struct io_kiocb *prev, *req = data->req;
8718 struct io_ring_ctx *ctx = req->ctx;
8719 unsigned long flags;
8721 spin_lock_irqsave(&ctx->timeout_lock, flags);
8722 prev = req->timeout.head;
8723 req->timeout.head = NULL;
8726 * We don't expect the list to be empty, that will only happen if we
8727 * race with the completion of the linked work.
8730 io_remove_next_linked(prev);
8731 if (!req_ref_inc_not_zero(prev))
8734 list_del(&req->timeout.list);
8735 req->timeout.prev = prev;
8736 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
8738 req->io_task_work.func = io_req_task_link_timeout;
8739 io_req_task_work_add(req);
8740 return HRTIMER_NORESTART;
8743 static void io_queue_linked_timeout(struct io_kiocb *req)
8745 struct io_ring_ctx *ctx = req->ctx;
8747 spin_lock_irq(&ctx->timeout_lock);
8749 * If the back reference is NULL, then our linked request finished
8750 * before we got a chance to setup the timer
8752 if (req->timeout.head) {
8753 struct io_timeout_data *data = req->async_data;
8755 data->timer.function = io_link_timeout_fn;
8756 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
8758 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
8760 spin_unlock_irq(&ctx->timeout_lock);
8761 /* drop submission reference */
8765 static void io_queue_async(struct io_kiocb *req, int ret)
8766 __must_hold(&req->ctx->uring_lock)
8768 struct io_kiocb *linked_timeout;
8770 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
8771 io_req_complete_failed(req, ret);
8775 linked_timeout = io_prep_linked_timeout(req);
8777 switch (io_arm_poll_handler(req, 0)) {
8778 case IO_APOLL_READY:
8779 io_req_task_queue(req);
8781 case IO_APOLL_ABORTED:
8783 * Queued up for async execution, worker will release
8784 * submit reference when the iocb is actually submitted.
8786 io_queue_iowq(req, NULL);
8793 io_queue_linked_timeout(linked_timeout);
8796 static inline void io_queue_sqe(struct io_kiocb *req)
8797 __must_hold(&req->ctx->uring_lock)
8801 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
8803 if (req->flags & REQ_F_COMPLETE_INLINE) {
8804 io_req_add_compl_list(req);
8808 * We async punt it if the file wasn't marked NOWAIT, or if the file
8809 * doesn't support non-blocking read/write attempts
8812 io_arm_ltimeout(req);
8814 io_queue_async(req, ret);
8817 static void io_queue_sqe_fallback(struct io_kiocb *req)
8818 __must_hold(&req->ctx->uring_lock)
8820 if (unlikely(req->flags & REQ_F_FAIL)) {
8822 * We don't submit, fail them all, for that replace hardlinks
8823 * with normal links. Extra REQ_F_LINK is tolerated.
8825 req->flags &= ~REQ_F_HARDLINK;
8826 req->flags |= REQ_F_LINK;
8827 io_req_complete_failed(req, req->cqe.res);
8828 } else if (unlikely(req->ctx->drain_active)) {
8831 int ret = io_req_prep_async(req);
8834 io_req_complete_failed(req, ret);
8836 io_queue_iowq(req, NULL);
8841 * Check SQE restrictions (opcode and flags).
8843 * Returns 'true' if SQE is allowed, 'false' otherwise.
8845 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
8846 struct io_kiocb *req,
8847 unsigned int sqe_flags)
8849 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
8852 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
8853 ctx->restrictions.sqe_flags_required)
8856 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
8857 ctx->restrictions.sqe_flags_required))
8863 static void io_init_req_drain(struct io_kiocb *req)
8865 struct io_ring_ctx *ctx = req->ctx;
8866 struct io_kiocb *head = ctx->submit_state.link.head;
8868 ctx->drain_active = true;
8871 * If we need to drain a request in the middle of a link, drain
8872 * the head request and the next request/link after the current
8873 * link. Considering sequential execution of links,
8874 * REQ_F_IO_DRAIN will be maintained for every request of our
8877 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
8878 ctx->drain_next = true;
8882 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
8883 const struct io_uring_sqe *sqe)
8884 __must_hold(&ctx->uring_lock)
8886 const struct io_op_def *def;
8887 unsigned int sqe_flags;
8891 /* req is partially pre-initialised, see io_preinit_req() */
8892 req->opcode = opcode = READ_ONCE(sqe->opcode);
8893 /* same numerical values with corresponding REQ_F_*, safe to copy */
8894 req->flags = sqe_flags = READ_ONCE(sqe->flags);
8895 req->cqe.user_data = READ_ONCE(sqe->user_data);
8897 req->rsrc_node = NULL;
8898 req->task = current;
8900 if (unlikely(opcode >= IORING_OP_LAST)) {
8904 def = &io_op_defs[opcode];
8905 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
8906 /* enforce forwards compatibility on users */
8907 if (sqe_flags & ~SQE_VALID_FLAGS)
8909 if (sqe_flags & IOSQE_BUFFER_SELECT) {
8910 if (!def->buffer_select)
8912 req->buf_index = READ_ONCE(sqe->buf_group);
8914 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
8915 ctx->drain_disabled = true;
8916 if (sqe_flags & IOSQE_IO_DRAIN) {
8917 if (ctx->drain_disabled)
8919 io_init_req_drain(req);
8922 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
8923 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
8925 /* knock it to the slow queue path, will be drained there */
8926 if (ctx->drain_active)
8927 req->flags |= REQ_F_FORCE_ASYNC;
8928 /* if there is no link, we're at "next" request and need to drain */
8929 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
8930 ctx->drain_next = false;
8931 ctx->drain_active = true;
8932 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
8936 if (!def->ioprio && sqe->ioprio)
8938 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
8941 if (def->needs_file) {
8942 struct io_submit_state *state = &ctx->submit_state;
8944 req->cqe.fd = READ_ONCE(sqe->fd);
8947 * Plug now if we have more than 2 IO left after this, and the
8948 * target is potentially a read/write to block based storage.
8950 if (state->need_plug && def->plug) {
8951 state->plug_started = true;
8952 state->need_plug = false;
8953 blk_start_plug_nr_ios(&state->plug, state->submit_nr);
8957 personality = READ_ONCE(sqe->personality);
8961 req->creds = xa_load(&ctx->personalities, personality);
8964 get_cred(req->creds);
8965 ret = security_uring_override_creds(req->creds);
8967 put_cred(req->creds);
8970 req->flags |= REQ_F_CREDS;
8973 return io_req_prep(req, sqe);
8976 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
8977 struct io_kiocb *req, int ret)
8979 struct io_ring_ctx *ctx = req->ctx;
8980 struct io_submit_link *link = &ctx->submit_state.link;
8981 struct io_kiocb *head = link->head;
8983 trace_io_uring_req_failed(sqe, ctx, req, ret);
8986 * Avoid breaking links in the middle as it renders links with SQPOLL
8987 * unusable. Instead of failing eagerly, continue assembling the link if
8988 * applicable and mark the head with REQ_F_FAIL. The link flushing code
8989 * should find the flag and handle the rest.
8991 req_fail_link_node(req, ret);
8992 if (head && !(head->flags & REQ_F_FAIL))
8993 req_fail_link_node(head, -ECANCELED);
8995 if (!(req->flags & IO_REQ_LINK_FLAGS)) {
8997 link->last->link = req;
9001 io_queue_sqe_fallback(req);
9006 link->last->link = req;
9013 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
9014 const struct io_uring_sqe *sqe)
9015 __must_hold(&ctx->uring_lock)
9017 struct io_submit_link *link = &ctx->submit_state.link;
9020 ret = io_init_req(ctx, req, sqe);
9022 return io_submit_fail_init(sqe, req, ret);
9024 /* don't need @sqe from now on */
9025 trace_io_uring_submit_sqe(ctx, req, req->cqe.user_data, req->opcode,
9027 ctx->flags & IORING_SETUP_SQPOLL);
9030 * If we already have a head request, queue this one for async
9031 * submittal once the head completes. If we don't have a head but
9032 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
9033 * submitted sync once the chain is complete. If none of those
9034 * conditions are true (normal request), then just queue it.
9036 if (unlikely(link->head)) {
9037 ret = io_req_prep_async(req);
9039 return io_submit_fail_init(sqe, req, ret);
9041 trace_io_uring_link(ctx, req, link->head);
9042 link->last->link = req;
9045 if (req->flags & IO_REQ_LINK_FLAGS)
9047 /* last request of the link, flush it */
9050 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
9053 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
9054 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
9055 if (req->flags & IO_REQ_LINK_FLAGS) {
9060 io_queue_sqe_fallback(req);
9070 * Batched submission is done, ensure local IO is flushed out.
9072 static void io_submit_state_end(struct io_ring_ctx *ctx)
9074 struct io_submit_state *state = &ctx->submit_state;
9076 if (unlikely(state->link.head))
9077 io_queue_sqe_fallback(state->link.head);
9078 /* flush only after queuing links as they can generate completions */
9079 io_submit_flush_completions(ctx);
9080 if (state->plug_started)
9081 blk_finish_plug(&state->plug);
9085 * Start submission side cache.
9087 static void io_submit_state_start(struct io_submit_state *state,
9088 unsigned int max_ios)
9090 state->plug_started = false;
9091 state->need_plug = max_ios > 2;
9092 state->submit_nr = max_ios;
9093 /* set only head, no need to init link_last in advance */
9094 state->link.head = NULL;
9097 static void io_commit_sqring(struct io_ring_ctx *ctx)
9099 struct io_rings *rings = ctx->rings;
9102 * Ensure any loads from the SQEs are done at this point,
9103 * since once we write the new head, the application could
9104 * write new data to them.
9106 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
9110 * Fetch an sqe, if one is available. Note this returns a pointer to memory
9111 * that is mapped by userspace. This means that care needs to be taken to
9112 * ensure that reads are stable, as we cannot rely on userspace always
9113 * being a good citizen. If members of the sqe are validated and then later
9114 * used, it's important that those reads are done through READ_ONCE() to
9115 * prevent a re-load down the line.
9117 static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
9119 unsigned head, mask = ctx->sq_entries - 1;
9120 unsigned sq_idx = ctx->cached_sq_head++ & mask;
9123 * The cached sq head (or cq tail) serves two purposes:
9125 * 1) allows us to batch the cost of updating the user visible
9127 * 2) allows the kernel side to track the head on its own, even
9128 * though the application is the one updating it.
9130 head = READ_ONCE(ctx->sq_array[sq_idx]);
9131 if (likely(head < ctx->sq_entries)) {
9132 /* double index for 128-byte SQEs, twice as long */
9133 if (ctx->flags & IORING_SETUP_SQE128)
9135 return &ctx->sq_sqes[head];
9138 /* drop invalid entries */
9140 WRITE_ONCE(ctx->rings->sq_dropped,
9141 READ_ONCE(ctx->rings->sq_dropped) + 1);
9145 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
9146 __must_hold(&ctx->uring_lock)
9148 unsigned int entries = io_sqring_entries(ctx);
9152 if (unlikely(!entries))
9154 /* make sure SQ entry isn't read before tail */
9155 ret = left = min3(nr, ctx->sq_entries, entries);
9156 io_get_task_refs(left);
9157 io_submit_state_start(&ctx->submit_state, left);
9160 const struct io_uring_sqe *sqe;
9161 struct io_kiocb *req;
9163 if (unlikely(!io_alloc_req_refill(ctx)))
9165 req = io_alloc_req(ctx);
9166 sqe = io_get_sqe(ctx);
9167 if (unlikely(!sqe)) {
9168 io_req_add_to_cache(req, ctx);
9173 * Continue submitting even for sqe failure if the
9174 * ring was setup with IORING_SETUP_SUBMIT_ALL
9176 if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
9177 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
9183 if (unlikely(left)) {
9185 /* try again if it submitted nothing and can't allocate a req */
9186 if (!ret && io_req_cache_empty(ctx))
9188 current->io_uring->cached_refs += left;
9191 io_submit_state_end(ctx);
9192 /* Commit SQ ring head once we've consumed and submitted all SQEs */
9193 io_commit_sqring(ctx);
9197 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
9199 return READ_ONCE(sqd->state);
9202 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
9204 unsigned int to_submit;
9207 to_submit = io_sqring_entries(ctx);
9208 /* if we're handling multiple rings, cap submit size for fairness */
9209 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
9210 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
9212 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
9213 const struct cred *creds = NULL;
9215 if (ctx->sq_creds != current_cred())
9216 creds = override_creds(ctx->sq_creds);
9218 mutex_lock(&ctx->uring_lock);
9219 if (!wq_list_empty(&ctx->iopoll_list))
9220 io_do_iopoll(ctx, true);
9223 * Don't submit if refs are dying, good for io_uring_register(),
9224 * but also it is relied upon by io_ring_exit_work()
9226 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
9227 !(ctx->flags & IORING_SETUP_R_DISABLED))
9228 ret = io_submit_sqes(ctx, to_submit);
9229 mutex_unlock(&ctx->uring_lock);
9231 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
9232 wake_up(&ctx->sqo_sq_wait);
9234 revert_creds(creds);
9240 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
9242 struct io_ring_ctx *ctx;
9243 unsigned sq_thread_idle = 0;
9245 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9246 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
9247 sqd->sq_thread_idle = sq_thread_idle;
9250 static bool io_sqd_handle_event(struct io_sq_data *sqd)
9252 bool did_sig = false;
9253 struct ksignal ksig;
9255 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
9256 signal_pending(current)) {
9257 mutex_unlock(&sqd->lock);
9258 if (signal_pending(current))
9259 did_sig = get_signal(&ksig);
9261 mutex_lock(&sqd->lock);
9263 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
9266 static int io_sq_thread(void *data)
9268 struct io_sq_data *sqd = data;
9269 struct io_ring_ctx *ctx;
9270 unsigned long timeout = 0;
9271 char buf[TASK_COMM_LEN];
9274 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
9275 set_task_comm(current, buf);
9277 if (sqd->sq_cpu != -1)
9278 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
9280 set_cpus_allowed_ptr(current, cpu_online_mask);
9281 current->flags |= PF_NO_SETAFFINITY;
9283 audit_alloc_kernel(current);
9285 mutex_lock(&sqd->lock);
9287 bool cap_entries, sqt_spin = false;
9289 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
9290 if (io_sqd_handle_event(sqd))
9292 timeout = jiffies + sqd->sq_thread_idle;
9295 cap_entries = !list_is_singular(&sqd->ctx_list);
9296 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
9297 int ret = __io_sq_thread(ctx, cap_entries);
9299 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
9302 if (io_run_task_work())
9305 if (sqt_spin || !time_after(jiffies, timeout)) {
9308 timeout = jiffies + sqd->sq_thread_idle;
9312 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
9313 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
9314 bool needs_sched = true;
9316 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
9317 atomic_or(IORING_SQ_NEED_WAKEUP,
9318 &ctx->rings->sq_flags);
9319 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
9320 !wq_list_empty(&ctx->iopoll_list)) {
9321 needs_sched = false;
9326 * Ensure the store of the wakeup flag is not
9327 * reordered with the load of the SQ tail
9329 smp_mb__after_atomic();
9331 if (io_sqring_entries(ctx)) {
9332 needs_sched = false;
9338 mutex_unlock(&sqd->lock);
9340 mutex_lock(&sqd->lock);
9342 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9343 atomic_andnot(IORING_SQ_NEED_WAKEUP,
9344 &ctx->rings->sq_flags);
9347 finish_wait(&sqd->wait, &wait);
9348 timeout = jiffies + sqd->sq_thread_idle;
9351 io_uring_cancel_generic(true, sqd);
9353 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9354 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
9356 mutex_unlock(&sqd->lock);
9358 audit_free(current);
9360 complete(&sqd->exited);
9364 struct io_wait_queue {
9365 struct wait_queue_entry wq;
9366 struct io_ring_ctx *ctx;
9368 unsigned nr_timeouts;
9371 static inline bool io_should_wake(struct io_wait_queue *iowq)
9373 struct io_ring_ctx *ctx = iowq->ctx;
9374 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
9377 * Wake up if we have enough events, or if a timeout occurred since we
9378 * started waiting. For timeouts, we always want to return to userspace,
9379 * regardless of event count.
9381 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
9384 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
9385 int wake_flags, void *key)
9387 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
9391 * Cannot safely flush overflowed CQEs from here, ensure we wake up
9392 * the task, and the next invocation will do it.
9394 if (io_should_wake(iowq) ||
9395 test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
9396 return autoremove_wake_function(curr, mode, wake_flags, key);
9400 static int io_run_task_work_sig(void)
9402 if (io_run_task_work())
9404 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
9405 return -ERESTARTSYS;
9406 if (task_sigpending(current))
9411 /* when returns >0, the caller should retry */
9412 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
9413 struct io_wait_queue *iowq,
9417 unsigned long check_cq;
9419 /* make sure we run task_work before checking for signals */
9420 ret = io_run_task_work_sig();
9421 if (ret || io_should_wake(iowq))
9423 check_cq = READ_ONCE(ctx->check_cq);
9424 /* let the caller flush overflows, retry */
9425 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
9427 if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
9429 if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
9435 * Wait until events become available, if we don't already have some. The
9436 * application must reap them itself, as they reside on the shared cq ring.
9438 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
9439 const sigset_t __user *sig, size_t sigsz,
9440 struct __kernel_timespec __user *uts)
9442 struct io_wait_queue iowq;
9443 struct io_rings *rings = ctx->rings;
9444 ktime_t timeout = KTIME_MAX;
9448 io_cqring_overflow_flush(ctx);
9449 if (io_cqring_events(ctx) >= min_events)
9451 if (!io_run_task_work())
9456 #ifdef CONFIG_COMPAT
9457 if (in_compat_syscall())
9458 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
9462 ret = set_user_sigmask(sig, sigsz);
9469 struct timespec64 ts;
9471 if (get_timespec64(&ts, uts))
9473 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
9476 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
9477 iowq.wq.private = current;
9478 INIT_LIST_HEAD(&iowq.wq.entry);
9480 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
9481 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
9483 trace_io_uring_cqring_wait(ctx, min_events);
9485 /* if we can't even flush overflow, don't wait for more */
9486 if (!io_cqring_overflow_flush(ctx)) {
9490 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
9491 TASK_INTERRUPTIBLE);
9492 ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
9496 finish_wait(&ctx->cq_wait, &iowq.wq);
9497 restore_saved_sigmask_unless(ret == -EINTR);
9499 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
9502 static void io_free_page_table(void **table, size_t size)
9504 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
9506 for (i = 0; i < nr_tables; i++)
9511 static __cold void **io_alloc_page_table(size_t size)
9513 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
9514 size_t init_size = size;
9517 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
9521 for (i = 0; i < nr_tables; i++) {
9522 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
9524 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
9526 io_free_page_table(table, init_size);
9534 static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
9536 percpu_ref_exit(&ref_node->refs);
9540 static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
9542 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
9543 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
9544 unsigned long flags;
9545 bool first_add = false;
9546 unsigned long delay = HZ;
9548 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
9551 /* if we are mid-quiesce then do not delay */
9552 if (node->rsrc_data->quiesce)
9555 while (!list_empty(&ctx->rsrc_ref_list)) {
9556 node = list_first_entry(&ctx->rsrc_ref_list,
9557 struct io_rsrc_node, node);
9558 /* recycle ref nodes in order */
9561 list_del(&node->node);
9562 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
9564 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
9567 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
9570 static struct io_rsrc_node *io_rsrc_node_alloc(void)
9572 struct io_rsrc_node *ref_node;
9574 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
9578 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
9583 INIT_LIST_HEAD(&ref_node->node);
9584 INIT_LIST_HEAD(&ref_node->rsrc_list);
9585 ref_node->done = false;
9589 static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
9590 struct io_rsrc_data *data_to_kill)
9591 __must_hold(&ctx->uring_lock)
9593 WARN_ON_ONCE(!ctx->rsrc_backup_node);
9594 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
9596 io_rsrc_refs_drop(ctx);
9599 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
9601 rsrc_node->rsrc_data = data_to_kill;
9602 spin_lock_irq(&ctx->rsrc_ref_lock);
9603 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
9604 spin_unlock_irq(&ctx->rsrc_ref_lock);
9606 atomic_inc(&data_to_kill->refs);
9607 percpu_ref_kill(&rsrc_node->refs);
9608 ctx->rsrc_node = NULL;
9611 if (!ctx->rsrc_node) {
9612 ctx->rsrc_node = ctx->rsrc_backup_node;
9613 ctx->rsrc_backup_node = NULL;
9617 static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
9619 if (ctx->rsrc_backup_node)
9621 ctx->rsrc_backup_node = io_rsrc_node_alloc();
9622 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
9625 static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
9626 struct io_ring_ctx *ctx)
9630 /* As we may drop ->uring_lock, other task may have started quiesce */
9634 data->quiesce = true;
9636 ret = io_rsrc_node_switch_start(ctx);
9639 io_rsrc_node_switch(ctx, data);
9641 /* kill initial ref, already quiesced if zero */
9642 if (atomic_dec_and_test(&data->refs))
9644 mutex_unlock(&ctx->uring_lock);
9645 flush_delayed_work(&ctx->rsrc_put_work);
9646 ret = wait_for_completion_interruptible(&data->done);
9648 mutex_lock(&ctx->uring_lock);
9649 if (atomic_read(&data->refs) > 0) {
9651 * it has been revived by another thread while
9654 mutex_unlock(&ctx->uring_lock);
9660 atomic_inc(&data->refs);
9661 /* wait for all works potentially completing data->done */
9662 flush_delayed_work(&ctx->rsrc_put_work);
9663 reinit_completion(&data->done);
9665 ret = io_run_task_work_sig();
9666 mutex_lock(&ctx->uring_lock);
9668 data->quiesce = false;
9673 static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
9675 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
9676 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
9678 return &data->tags[table_idx][off];
9681 static void io_rsrc_data_free(struct io_rsrc_data *data)
9683 size_t size = data->nr * sizeof(data->tags[0][0]);
9686 io_free_page_table((void **)data->tags, size);
9690 static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
9691 u64 __user *utags, unsigned nr,
9692 struct io_rsrc_data **pdata)
9694 struct io_rsrc_data *data;
9698 data = kzalloc(sizeof(*data), GFP_KERNEL);
9701 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
9709 data->do_put = do_put;
9712 for (i = 0; i < nr; i++) {
9713 u64 *tag_slot = io_get_tag_slot(data, i);
9715 if (copy_from_user(tag_slot, &utags[i],
9721 atomic_set(&data->refs, 1);
9722 init_completion(&data->done);
9726 io_rsrc_data_free(data);
9730 static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
9732 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
9733 GFP_KERNEL_ACCOUNT);
9734 if (unlikely(!table->files))
9737 table->bitmap = bitmap_zalloc(nr_files, GFP_KERNEL_ACCOUNT);
9738 if (unlikely(!table->bitmap)) {
9739 kvfree(table->files);
9746 static void io_free_file_tables(struct io_file_table *table)
9748 kvfree(table->files);
9749 bitmap_free(table->bitmap);
9750 table->files = NULL;
9751 table->bitmap = NULL;
9754 static inline void io_file_bitmap_set(struct io_file_table *table, int bit)
9756 WARN_ON_ONCE(test_bit(bit, table->bitmap));
9757 __set_bit(bit, table->bitmap);
9758 table->alloc_hint = bit + 1;
9761 static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
9763 __clear_bit(bit, table->bitmap);
9764 table->alloc_hint = bit;
9767 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
9769 #if !defined(IO_URING_SCM_ALL)
9772 for (i = 0; i < ctx->nr_user_files; i++) {
9773 struct file *file = io_file_from_index(ctx, i);
9777 if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM)
9779 io_file_bitmap_clear(&ctx->file_table, i);
9784 #if defined(CONFIG_UNIX)
9785 if (ctx->ring_sock) {
9786 struct sock *sock = ctx->ring_sock->sk;
9787 struct sk_buff *skb;
9789 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
9793 io_free_file_tables(&ctx->file_table);
9794 io_rsrc_data_free(ctx->file_data);
9795 ctx->file_data = NULL;
9796 ctx->nr_user_files = 0;
9799 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
9801 unsigned nr = ctx->nr_user_files;
9804 if (!ctx->file_data)
9808 * Quiesce may unlock ->uring_lock, and while it's not held
9809 * prevent new requests using the table.
9811 ctx->nr_user_files = 0;
9812 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
9813 ctx->nr_user_files = nr;
9815 __io_sqe_files_unregister(ctx);
9819 static void io_sq_thread_unpark(struct io_sq_data *sqd)
9820 __releases(&sqd->lock)
9822 WARN_ON_ONCE(sqd->thread == current);
9825 * Do the dance but not conditional clear_bit() because it'd race with
9826 * other threads incrementing park_pending and setting the bit.
9828 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9829 if (atomic_dec_return(&sqd->park_pending))
9830 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9831 mutex_unlock(&sqd->lock);
9834 static void io_sq_thread_park(struct io_sq_data *sqd)
9835 __acquires(&sqd->lock)
9837 WARN_ON_ONCE(sqd->thread == current);
9839 atomic_inc(&sqd->park_pending);
9840 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9841 mutex_lock(&sqd->lock);
9843 wake_up_process(sqd->thread);
9846 static void io_sq_thread_stop(struct io_sq_data *sqd)
9848 WARN_ON_ONCE(sqd->thread == current);
9849 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
9851 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
9852 mutex_lock(&sqd->lock);
9854 wake_up_process(sqd->thread);
9855 mutex_unlock(&sqd->lock);
9856 wait_for_completion(&sqd->exited);
9859 static void io_put_sq_data(struct io_sq_data *sqd)
9861 if (refcount_dec_and_test(&sqd->refs)) {
9862 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
9864 io_sq_thread_stop(sqd);
9869 static void io_sq_thread_finish(struct io_ring_ctx *ctx)
9871 struct io_sq_data *sqd = ctx->sq_data;
9874 io_sq_thread_park(sqd);
9875 list_del_init(&ctx->sqd_list);
9876 io_sqd_update_thread_idle(sqd);
9877 io_sq_thread_unpark(sqd);
9879 io_put_sq_data(sqd);
9880 ctx->sq_data = NULL;
9884 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
9886 struct io_ring_ctx *ctx_attach;
9887 struct io_sq_data *sqd;
9890 f = fdget(p->wq_fd);
9892 return ERR_PTR(-ENXIO);
9893 if (f.file->f_op != &io_uring_fops) {
9895 return ERR_PTR(-EINVAL);
9898 ctx_attach = f.file->private_data;
9899 sqd = ctx_attach->sq_data;
9902 return ERR_PTR(-EINVAL);
9904 if (sqd->task_tgid != current->tgid) {
9906 return ERR_PTR(-EPERM);
9909 refcount_inc(&sqd->refs);
9914 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
9917 struct io_sq_data *sqd;
9920 if (p->flags & IORING_SETUP_ATTACH_WQ) {
9921 sqd = io_attach_sq_data(p);
9926 /* fall through for EPERM case, setup new sqd/task */
9927 if (PTR_ERR(sqd) != -EPERM)
9931 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
9933 return ERR_PTR(-ENOMEM);
9935 atomic_set(&sqd->park_pending, 0);
9936 refcount_set(&sqd->refs, 1);
9937 INIT_LIST_HEAD(&sqd->ctx_list);
9938 mutex_init(&sqd->lock);
9939 init_waitqueue_head(&sqd->wait);
9940 init_completion(&sqd->exited);
9945 * Ensure the UNIX gc is aware of our file set, so we are certain that
9946 * the io_uring can be safely unregistered on process exit, even if we have
9947 * loops in the file referencing. We account only files that can hold other
9948 * files because otherwise they can't form a loop and so are not interesting
9951 static int io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
9953 #if defined(CONFIG_UNIX)
9954 struct sock *sk = ctx->ring_sock->sk;
9955 struct sk_buff_head *head = &sk->sk_receive_queue;
9956 struct scm_fp_list *fpl;
9957 struct sk_buff *skb;
9959 if (likely(!io_file_need_scm(file)))
9963 * See if we can merge this file into an existing skb SCM_RIGHTS
9964 * file set. If there's no room, fall back to allocating a new skb
9965 * and filling it in.
9967 spin_lock_irq(&head->lock);
9968 skb = skb_peek(head);
9969 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
9970 __skb_unlink(skb, head);
9973 spin_unlock_irq(&head->lock);
9976 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
9980 skb = alloc_skb(0, GFP_KERNEL);
9986 fpl->user = get_uid(current_user());
9987 fpl->max = SCM_MAX_FD;
9990 UNIXCB(skb).fp = fpl;
9992 skb->destructor = unix_destruct_scm;
9993 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
9996 fpl = UNIXCB(skb).fp;
9997 fpl->fp[fpl->count++] = get_file(file);
9998 unix_inflight(fpl->user, file);
9999 skb_queue_head(head, skb);
10005 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
10007 struct file *file = prsrc->file;
10008 #if defined(CONFIG_UNIX)
10009 struct sock *sock = ctx->ring_sock->sk;
10010 struct sk_buff_head list, *head = &sock->sk_receive_queue;
10011 struct sk_buff *skb;
10014 if (!io_file_need_scm(file)) {
10019 __skb_queue_head_init(&list);
10022 * Find the skb that holds this file in its SCM_RIGHTS. When found,
10023 * remove this entry and rearrange the file array.
10025 skb = skb_dequeue(head);
10027 struct scm_fp_list *fp;
10029 fp = UNIXCB(skb).fp;
10030 for (i = 0; i < fp->count; i++) {
10033 if (fp->fp[i] != file)
10036 unix_notinflight(fp->user, fp->fp[i]);
10037 left = fp->count - 1 - i;
10039 memmove(&fp->fp[i], &fp->fp[i + 1],
10040 left * sizeof(struct file *));
10047 __skb_queue_tail(&list, skb);
10057 __skb_queue_tail(&list, skb);
10059 skb = skb_dequeue(head);
10062 if (skb_peek(&list)) {
10063 spin_lock_irq(&head->lock);
10064 while ((skb = __skb_dequeue(&list)) != NULL)
10065 __skb_queue_tail(head, skb);
10066 spin_unlock_irq(&head->lock);
10073 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
10075 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
10076 struct io_ring_ctx *ctx = rsrc_data->ctx;
10077 struct io_rsrc_put *prsrc, *tmp;
10079 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
10080 list_del(&prsrc->list);
10083 if (ctx->flags & IORING_SETUP_IOPOLL)
10084 mutex_lock(&ctx->uring_lock);
10086 spin_lock(&ctx->completion_lock);
10087 io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
10088 io_commit_cqring(ctx);
10089 spin_unlock(&ctx->completion_lock);
10090 io_cqring_ev_posted(ctx);
10092 if (ctx->flags & IORING_SETUP_IOPOLL)
10093 mutex_unlock(&ctx->uring_lock);
10096 rsrc_data->do_put(ctx, prsrc);
10100 io_rsrc_node_destroy(ref_node);
10101 if (atomic_dec_and_test(&rsrc_data->refs))
10102 complete(&rsrc_data->done);
10105 static void io_rsrc_put_work(struct work_struct *work)
10107 struct io_ring_ctx *ctx;
10108 struct llist_node *node;
10110 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
10111 node = llist_del_all(&ctx->rsrc_put_llist);
10114 struct io_rsrc_node *ref_node;
10115 struct llist_node *next = node->next;
10117 ref_node = llist_entry(node, struct io_rsrc_node, llist);
10118 __io_rsrc_put_work(ref_node);
10123 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
10124 unsigned nr_args, u64 __user *tags)
10126 __s32 __user *fds = (__s32 __user *) arg;
10131 if (ctx->file_data)
10135 if (nr_args > IORING_MAX_FIXED_FILES)
10137 if (nr_args > rlimit(RLIMIT_NOFILE))
10139 ret = io_rsrc_node_switch_start(ctx);
10142 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
10147 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
10148 io_rsrc_data_free(ctx->file_data);
10149 ctx->file_data = NULL;
10153 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
10154 struct io_fixed_file *file_slot;
10156 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
10160 /* allow sparse sets */
10161 if (!fds || fd == -1) {
10163 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
10170 if (unlikely(!file))
10174 * Don't allow io_uring instances to be registered. If UNIX
10175 * isn't enabled, then this causes a reference cycle and this
10176 * instance can never get freed. If UNIX is enabled we'll
10177 * handle it just fine, but there's still no point in allowing
10178 * a ring fd as it doesn't support regular read/write anyway.
10180 if (file->f_op == &io_uring_fops) {
10184 ret = io_scm_file_account(ctx, file);
10189 file_slot = io_fixed_file_slot(&ctx->file_table, i);
10190 io_fixed_file_set(file_slot, file);
10191 io_file_bitmap_set(&ctx->file_table, i);
10194 io_rsrc_node_switch(ctx, NULL);
10197 __io_sqe_files_unregister(ctx);
10201 static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
10202 struct io_rsrc_node *node, void *rsrc)
10204 u64 *tag_slot = io_get_tag_slot(data, idx);
10205 struct io_rsrc_put *prsrc;
10207 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
10211 prsrc->tag = *tag_slot;
10213 prsrc->rsrc = rsrc;
10214 list_add(&prsrc->list, &node->rsrc_list);
10218 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
10219 unsigned int issue_flags, u32 slot_index)
10220 __must_hold(&req->ctx->uring_lock)
10222 struct io_ring_ctx *ctx = req->ctx;
10223 bool needs_switch = false;
10224 struct io_fixed_file *file_slot;
10227 if (file->f_op == &io_uring_fops)
10229 if (!ctx->file_data)
10231 if (slot_index >= ctx->nr_user_files)
10234 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
10235 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
10237 if (file_slot->file_ptr) {
10238 struct file *old_file;
10240 ret = io_rsrc_node_switch_start(ctx);
10244 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
10245 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
10246 ctx->rsrc_node, old_file);
10249 file_slot->file_ptr = 0;
10250 io_file_bitmap_clear(&ctx->file_table, slot_index);
10251 needs_switch = true;
10254 ret = io_scm_file_account(ctx, file);
10256 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
10257 io_fixed_file_set(file_slot, file);
10258 io_file_bitmap_set(&ctx->file_table, slot_index);
10262 io_rsrc_node_switch(ctx, ctx->file_data);
10268 static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
10269 unsigned int offset)
10271 struct io_ring_ctx *ctx = req->ctx;
10272 struct io_fixed_file *file_slot;
10276 io_ring_submit_lock(ctx, issue_flags);
10278 if (unlikely(!ctx->file_data))
10281 if (offset >= ctx->nr_user_files)
10283 ret = io_rsrc_node_switch_start(ctx);
10287 offset = array_index_nospec(offset, ctx->nr_user_files);
10288 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
10290 if (!file_slot->file_ptr)
10293 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
10294 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
10298 file_slot->file_ptr = 0;
10299 io_file_bitmap_clear(&ctx->file_table, offset);
10300 io_rsrc_node_switch(ctx, ctx->file_data);
10303 io_ring_submit_unlock(ctx, issue_flags);
10307 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
10309 return __io_close_fixed(req, issue_flags, req->close.file_slot - 1);
10312 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
10313 struct io_uring_rsrc_update2 *up,
10316 u64 __user *tags = u64_to_user_ptr(up->tags);
10317 __s32 __user *fds = u64_to_user_ptr(up->data);
10318 struct io_rsrc_data *data = ctx->file_data;
10319 struct io_fixed_file *file_slot;
10321 int fd, i, err = 0;
10323 bool needs_switch = false;
10325 if (!ctx->file_data)
10327 if (up->offset + nr_args > ctx->nr_user_files)
10330 for (done = 0; done < nr_args; done++) {
10333 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
10334 copy_from_user(&fd, &fds[done], sizeof(fd))) {
10338 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
10342 if (fd == IORING_REGISTER_FILES_SKIP)
10345 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
10346 file_slot = io_fixed_file_slot(&ctx->file_table, i);
10348 if (file_slot->file_ptr) {
10349 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
10350 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
10353 file_slot->file_ptr = 0;
10354 io_file_bitmap_clear(&ctx->file_table, i);
10355 needs_switch = true;
10364 * Don't allow io_uring instances to be registered. If
10365 * UNIX isn't enabled, then this causes a reference
10366 * cycle and this instance can never get freed. If UNIX
10367 * is enabled we'll handle it just fine, but there's
10368 * still no point in allowing a ring fd as it doesn't
10369 * support regular read/write anyway.
10371 if (file->f_op == &io_uring_fops) {
10376 err = io_scm_file_account(ctx, file);
10381 *io_get_tag_slot(data, i) = tag;
10382 io_fixed_file_set(file_slot, file);
10383 io_file_bitmap_set(&ctx->file_table, i);
10388 io_rsrc_node_switch(ctx, data);
10389 return done ? done : err;
10392 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
10393 struct task_struct *task)
10395 struct io_wq_hash *hash;
10396 struct io_wq_data data;
10397 unsigned int concurrency;
10399 mutex_lock(&ctx->uring_lock);
10400 hash = ctx->hash_map;
10402 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
10404 mutex_unlock(&ctx->uring_lock);
10405 return ERR_PTR(-ENOMEM);
10407 refcount_set(&hash->refs, 1);
10408 init_waitqueue_head(&hash->wait);
10409 ctx->hash_map = hash;
10411 mutex_unlock(&ctx->uring_lock);
10415 data.free_work = io_wq_free_work;
10416 data.do_work = io_wq_submit_work;
10418 /* Do QD, or 4 * CPUS, whatever is smallest */
10419 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
10421 return io_wq_create(concurrency, &data);
10424 static __cold int io_uring_alloc_task_context(struct task_struct *task,
10425 struct io_ring_ctx *ctx)
10427 struct io_uring_task *tctx;
10430 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
10431 if (unlikely(!tctx))
10434 tctx->registered_rings = kcalloc(IO_RINGFD_REG_MAX,
10435 sizeof(struct file *), GFP_KERNEL);
10436 if (unlikely(!tctx->registered_rings)) {
10441 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
10442 if (unlikely(ret)) {
10443 kfree(tctx->registered_rings);
10448 tctx->io_wq = io_init_wq_offload(ctx, task);
10449 if (IS_ERR(tctx->io_wq)) {
10450 ret = PTR_ERR(tctx->io_wq);
10451 percpu_counter_destroy(&tctx->inflight);
10452 kfree(tctx->registered_rings);
10457 xa_init(&tctx->xa);
10458 init_waitqueue_head(&tctx->wait);
10459 atomic_set(&tctx->in_idle, 0);
10460 atomic_set(&tctx->inflight_tracked, 0);
10461 task->io_uring = tctx;
10462 spin_lock_init(&tctx->task_lock);
10463 INIT_WQ_LIST(&tctx->task_list);
10464 INIT_WQ_LIST(&tctx->prio_task_list);
10465 init_task_work(&tctx->task_work, tctx_task_work);
10469 void __io_uring_free(struct task_struct *tsk)
10471 struct io_uring_task *tctx = tsk->io_uring;
10473 WARN_ON_ONCE(!xa_empty(&tctx->xa));
10474 WARN_ON_ONCE(tctx->io_wq);
10475 WARN_ON_ONCE(tctx->cached_refs);
10477 kfree(tctx->registered_rings);
10478 percpu_counter_destroy(&tctx->inflight);
10480 tsk->io_uring = NULL;
10483 static __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
10484 struct io_uring_params *p)
10488 /* Retain compatibility with failing for an invalid attach attempt */
10489 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
10490 IORING_SETUP_ATTACH_WQ) {
10493 f = fdget(p->wq_fd);
10496 if (f.file->f_op != &io_uring_fops) {
10502 if (ctx->flags & IORING_SETUP_SQPOLL) {
10503 struct task_struct *tsk;
10504 struct io_sq_data *sqd;
10507 ret = security_uring_sqpoll();
10511 sqd = io_get_sq_data(p, &attached);
10513 ret = PTR_ERR(sqd);
10517 ctx->sq_creds = get_current_cred();
10518 ctx->sq_data = sqd;
10519 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
10520 if (!ctx->sq_thread_idle)
10521 ctx->sq_thread_idle = HZ;
10523 io_sq_thread_park(sqd);
10524 list_add(&ctx->sqd_list, &sqd->ctx_list);
10525 io_sqd_update_thread_idle(sqd);
10526 /* don't attach to a dying SQPOLL thread, would be racy */
10527 ret = (attached && !sqd->thread) ? -ENXIO : 0;
10528 io_sq_thread_unpark(sqd);
10535 if (p->flags & IORING_SETUP_SQ_AFF) {
10536 int cpu = p->sq_thread_cpu;
10539 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
10546 sqd->task_pid = current->pid;
10547 sqd->task_tgid = current->tgid;
10548 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
10550 ret = PTR_ERR(tsk);
10555 ret = io_uring_alloc_task_context(tsk, ctx);
10556 wake_up_new_task(tsk);
10559 } else if (p->flags & IORING_SETUP_SQ_AFF) {
10560 /* Can't have SQ_AFF without SQPOLL */
10567 complete(&ctx->sq_data->exited);
10569 io_sq_thread_finish(ctx);
10573 static inline void __io_unaccount_mem(struct user_struct *user,
10574 unsigned long nr_pages)
10576 atomic_long_sub(nr_pages, &user->locked_vm);
10579 static inline int __io_account_mem(struct user_struct *user,
10580 unsigned long nr_pages)
10582 unsigned long page_limit, cur_pages, new_pages;
10584 /* Don't allow more pages than we can safely lock */
10585 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
10588 cur_pages = atomic_long_read(&user->locked_vm);
10589 new_pages = cur_pages + nr_pages;
10590 if (new_pages > page_limit)
10592 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
10593 new_pages) != cur_pages);
10598 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
10601 __io_unaccount_mem(ctx->user, nr_pages);
10603 if (ctx->mm_account)
10604 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
10607 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
10612 ret = __io_account_mem(ctx->user, nr_pages);
10617 if (ctx->mm_account)
10618 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
10623 static void io_mem_free(void *ptr)
10630 page = virt_to_head_page(ptr);
10631 if (put_page_testzero(page))
10632 free_compound_page(page);
10635 static void *io_mem_alloc(size_t size)
10637 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
10639 return (void *) __get_free_pages(gfp, get_order(size));
10642 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
10643 unsigned int cq_entries, size_t *sq_offset)
10645 struct io_rings *rings;
10646 size_t off, sq_array_size;
10648 off = struct_size(rings, cqes, cq_entries);
10649 if (off == SIZE_MAX)
10651 if (ctx->flags & IORING_SETUP_CQE32) {
10652 if (check_shl_overflow(off, 1, &off))
10657 off = ALIGN(off, SMP_CACHE_BYTES);
10665 sq_array_size = array_size(sizeof(u32), sq_entries);
10666 if (sq_array_size == SIZE_MAX)
10669 if (check_add_overflow(off, sq_array_size, &off))
10675 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
10677 struct io_mapped_ubuf *imu = *slot;
10680 if (imu != ctx->dummy_ubuf) {
10681 for (i = 0; i < imu->nr_bvecs; i++)
10682 unpin_user_page(imu->bvec[i].bv_page);
10683 if (imu->acct_pages)
10684 io_unaccount_mem(ctx, imu->acct_pages);
10690 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
10692 io_buffer_unmap(ctx, &prsrc->buf);
10696 static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
10700 for (i = 0; i < ctx->nr_user_bufs; i++)
10701 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
10702 kfree(ctx->user_bufs);
10703 io_rsrc_data_free(ctx->buf_data);
10704 ctx->user_bufs = NULL;
10705 ctx->buf_data = NULL;
10706 ctx->nr_user_bufs = 0;
10709 static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
10711 unsigned nr = ctx->nr_user_bufs;
10714 if (!ctx->buf_data)
10718 * Quiesce may unlock ->uring_lock, and while it's not held
10719 * prevent new requests using the table.
10721 ctx->nr_user_bufs = 0;
10722 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
10723 ctx->nr_user_bufs = nr;
10725 __io_sqe_buffers_unregister(ctx);
10729 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
10730 void __user *arg, unsigned index)
10732 struct iovec __user *src;
10734 #ifdef CONFIG_COMPAT
10736 struct compat_iovec __user *ciovs;
10737 struct compat_iovec ciov;
10739 ciovs = (struct compat_iovec __user *) arg;
10740 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
10743 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
10744 dst->iov_len = ciov.iov_len;
10748 src = (struct iovec __user *) arg;
10749 if (copy_from_user(dst, &src[index], sizeof(*dst)))
10755 * Not super efficient, but this is just a registration time. And we do cache
10756 * the last compound head, so generally we'll only do a full search if we don't
10759 * We check if the given compound head page has already been accounted, to
10760 * avoid double accounting it. This allows us to account the full size of the
10761 * page, not just the constituent pages of a huge page.
10763 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
10764 int nr_pages, struct page *hpage)
10768 /* check current page array */
10769 for (i = 0; i < nr_pages; i++) {
10770 if (!PageCompound(pages[i]))
10772 if (compound_head(pages[i]) == hpage)
10776 /* check previously registered pages */
10777 for (i = 0; i < ctx->nr_user_bufs; i++) {
10778 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
10780 for (j = 0; j < imu->nr_bvecs; j++) {
10781 if (!PageCompound(imu->bvec[j].bv_page))
10783 if (compound_head(imu->bvec[j].bv_page) == hpage)
10791 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
10792 int nr_pages, struct io_mapped_ubuf *imu,
10793 struct page **last_hpage)
10797 imu->acct_pages = 0;
10798 for (i = 0; i < nr_pages; i++) {
10799 if (!PageCompound(pages[i])) {
10802 struct page *hpage;
10804 hpage = compound_head(pages[i]);
10805 if (hpage == *last_hpage)
10807 *last_hpage = hpage;
10808 if (headpage_already_acct(ctx, pages, i, hpage))
10810 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
10814 if (!imu->acct_pages)
10817 ret = io_account_mem(ctx, imu->acct_pages);
10819 imu->acct_pages = 0;
10823 static struct page **io_pin_pages(unsigned long ubuf, unsigned long len,
10826 unsigned long start, end, nr_pages;
10827 struct vm_area_struct **vmas = NULL;
10828 struct page **pages = NULL;
10829 int i, pret, ret = -ENOMEM;
10831 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
10832 start = ubuf >> PAGE_SHIFT;
10833 nr_pages = end - start;
10835 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
10839 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
10845 mmap_read_lock(current->mm);
10846 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
10848 if (pret == nr_pages) {
10849 /* don't support file backed memory */
10850 for (i = 0; i < nr_pages; i++) {
10851 struct vm_area_struct *vma = vmas[i];
10853 if (vma_is_shmem(vma))
10855 if (vma->vm_file &&
10856 !is_file_hugepages(vma->vm_file)) {
10861 *npages = nr_pages;
10863 ret = pret < 0 ? pret : -EFAULT;
10865 mmap_read_unlock(current->mm);
10868 * if we did partial map, or found file backed vmas,
10869 * release any pages we did get
10872 unpin_user_pages(pages, pret);
10880 pages = ERR_PTR(ret);
10885 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
10886 struct io_mapped_ubuf **pimu,
10887 struct page **last_hpage)
10889 struct io_mapped_ubuf *imu = NULL;
10890 struct page **pages = NULL;
10893 int ret, nr_pages, i;
10895 if (!iov->iov_base) {
10896 *pimu = ctx->dummy_ubuf;
10903 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
10905 if (IS_ERR(pages)) {
10906 ret = PTR_ERR(pages);
10911 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
10915 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
10917 unpin_user_pages(pages, nr_pages);
10921 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
10922 size = iov->iov_len;
10923 for (i = 0; i < nr_pages; i++) {
10926 vec_len = min_t(size_t, size, PAGE_SIZE - off);
10927 imu->bvec[i].bv_page = pages[i];
10928 imu->bvec[i].bv_len = vec_len;
10929 imu->bvec[i].bv_offset = off;
10933 /* store original address for later verification */
10934 imu->ubuf = (unsigned long) iov->iov_base;
10935 imu->ubuf_end = imu->ubuf + iov->iov_len;
10936 imu->nr_bvecs = nr_pages;
10946 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
10948 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
10949 return ctx->user_bufs ? 0 : -ENOMEM;
10952 static int io_buffer_validate(struct iovec *iov)
10954 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
10957 * Don't impose further limits on the size and buffer
10958 * constraints here, we'll -EINVAL later when IO is
10959 * submitted if they are wrong.
10961 if (!iov->iov_base)
10962 return iov->iov_len ? -EFAULT : 0;
10966 /* arbitrary limit, but we need something */
10967 if (iov->iov_len > SZ_1G)
10970 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
10976 static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
10977 unsigned int nr_args, u64 __user *tags)
10979 struct page *last_hpage = NULL;
10980 struct io_rsrc_data *data;
10984 if (ctx->user_bufs)
10986 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
10988 ret = io_rsrc_node_switch_start(ctx);
10991 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
10994 ret = io_buffers_map_alloc(ctx, nr_args);
10996 io_rsrc_data_free(data);
11000 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
11002 ret = io_copy_iov(ctx, &iov, arg, i);
11005 ret = io_buffer_validate(&iov);
11009 memset(&iov, 0, sizeof(iov));
11012 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
11017 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
11023 WARN_ON_ONCE(ctx->buf_data);
11025 ctx->buf_data = data;
11027 __io_sqe_buffers_unregister(ctx);
11029 io_rsrc_node_switch(ctx, NULL);
11033 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
11034 struct io_uring_rsrc_update2 *up,
11035 unsigned int nr_args)
11037 u64 __user *tags = u64_to_user_ptr(up->tags);
11038 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
11039 struct page *last_hpage = NULL;
11040 bool needs_switch = false;
11044 if (!ctx->buf_data)
11046 if (up->offset + nr_args > ctx->nr_user_bufs)
11049 for (done = 0; done < nr_args; done++) {
11050 struct io_mapped_ubuf *imu;
11051 int offset = up->offset + done;
11054 err = io_copy_iov(ctx, &iov, iovs, done);
11057 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
11061 err = io_buffer_validate(&iov);
11064 if (!iov.iov_base && tag) {
11068 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
11072 i = array_index_nospec(offset, ctx->nr_user_bufs);
11073 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
11074 err = io_queue_rsrc_removal(ctx->buf_data, i,
11075 ctx->rsrc_node, ctx->user_bufs[i]);
11076 if (unlikely(err)) {
11077 io_buffer_unmap(ctx, &imu);
11080 ctx->user_bufs[i] = NULL;
11081 needs_switch = true;
11084 ctx->user_bufs[i] = imu;
11085 *io_get_tag_slot(ctx->buf_data, offset) = tag;
11089 io_rsrc_node_switch(ctx, ctx->buf_data);
11090 return done ? done : err;
11093 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
11094 unsigned int eventfd_async)
11096 struct io_ev_fd *ev_fd;
11097 __s32 __user *fds = arg;
11100 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
11101 lockdep_is_held(&ctx->uring_lock));
11105 if (copy_from_user(&fd, fds, sizeof(*fds)))
11108 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
11112 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
11113 if (IS_ERR(ev_fd->cq_ev_fd)) {
11114 int ret = PTR_ERR(ev_fd->cq_ev_fd);
11118 ev_fd->eventfd_async = eventfd_async;
11119 ctx->has_evfd = true;
11120 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
11124 static void io_eventfd_put(struct rcu_head *rcu)
11126 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
11128 eventfd_ctx_put(ev_fd->cq_ev_fd);
11132 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
11134 struct io_ev_fd *ev_fd;
11136 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
11137 lockdep_is_held(&ctx->uring_lock));
11139 ctx->has_evfd = false;
11140 rcu_assign_pointer(ctx->io_ev_fd, NULL);
11141 call_rcu(&ev_fd->rcu, io_eventfd_put);
11148 static void io_destroy_buffers(struct io_ring_ctx *ctx)
11150 struct io_buffer_list *bl;
11151 unsigned long index;
11154 for (i = 0; i < BGID_ARRAY; i++) {
11157 __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
11160 xa_for_each(&ctx->io_bl_xa, index, bl) {
11161 xa_erase(&ctx->io_bl_xa, bl->bgid);
11162 __io_remove_buffers(ctx, bl, -1U);
11166 while (!list_empty(&ctx->io_buffers_pages)) {
11169 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
11170 list_del_init(&page->lru);
11175 static void io_req_caches_free(struct io_ring_ctx *ctx)
11177 struct io_submit_state *state = &ctx->submit_state;
11180 mutex_lock(&ctx->uring_lock);
11181 io_flush_cached_locked_reqs(ctx, state);
11183 while (!io_req_cache_empty(ctx)) {
11184 struct io_wq_work_node *node;
11185 struct io_kiocb *req;
11187 node = wq_stack_extract(&state->free_list);
11188 req = container_of(node, struct io_kiocb, comp_list);
11189 kmem_cache_free(req_cachep, req);
11193 percpu_ref_put_many(&ctx->refs, nr);
11194 mutex_unlock(&ctx->uring_lock);
11197 static void io_wait_rsrc_data(struct io_rsrc_data *data)
11199 if (data && !atomic_dec_and_test(&data->refs))
11200 wait_for_completion(&data->done);
11203 static void io_flush_apoll_cache(struct io_ring_ctx *ctx)
11205 struct async_poll *apoll;
11207 while (!list_empty(&ctx->apoll_cache)) {
11208 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
11210 list_del(&apoll->poll.wait.entry);
11215 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
11217 io_sq_thread_finish(ctx);
11219 if (ctx->mm_account) {
11220 mmdrop(ctx->mm_account);
11221 ctx->mm_account = NULL;
11224 io_rsrc_refs_drop(ctx);
11225 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
11226 io_wait_rsrc_data(ctx->buf_data);
11227 io_wait_rsrc_data(ctx->file_data);
11229 mutex_lock(&ctx->uring_lock);
11231 __io_sqe_buffers_unregister(ctx);
11232 if (ctx->file_data)
11233 __io_sqe_files_unregister(ctx);
11235 __io_cqring_overflow_flush(ctx, true);
11236 io_eventfd_unregister(ctx);
11237 io_flush_apoll_cache(ctx);
11238 mutex_unlock(&ctx->uring_lock);
11239 io_destroy_buffers(ctx);
11241 put_cred(ctx->sq_creds);
11243 /* there are no registered resources left, nobody uses it */
11244 if (ctx->rsrc_node)
11245 io_rsrc_node_destroy(ctx->rsrc_node);
11246 if (ctx->rsrc_backup_node)
11247 io_rsrc_node_destroy(ctx->rsrc_backup_node);
11248 flush_delayed_work(&ctx->rsrc_put_work);
11249 flush_delayed_work(&ctx->fallback_work);
11251 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
11252 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
11254 #if defined(CONFIG_UNIX)
11255 if (ctx->ring_sock) {
11256 ctx->ring_sock->file = NULL; /* so that iput() is called */
11257 sock_release(ctx->ring_sock);
11260 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
11262 io_mem_free(ctx->rings);
11263 io_mem_free(ctx->sq_sqes);
11265 percpu_ref_exit(&ctx->refs);
11266 free_uid(ctx->user);
11267 io_req_caches_free(ctx);
11269 io_wq_put_hash(ctx->hash_map);
11270 kfree(ctx->cancel_hash);
11271 kfree(ctx->dummy_ubuf);
11273 xa_destroy(&ctx->io_bl_xa);
11277 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
11279 struct io_ring_ctx *ctx = file->private_data;
11282 poll_wait(file, &ctx->cq_wait, wait);
11284 * synchronizes with barrier from wq_has_sleeper call in
11288 if (!io_sqring_full(ctx))
11289 mask |= EPOLLOUT | EPOLLWRNORM;
11292 * Don't flush cqring overflow list here, just do a simple check.
11293 * Otherwise there could possible be ABBA deadlock:
11296 * lock(&ctx->uring_lock);
11298 * lock(&ctx->uring_lock);
11301 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
11302 * pushs them to do the flush.
11304 if (io_cqring_events(ctx) ||
11305 test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
11306 mask |= EPOLLIN | EPOLLRDNORM;
11311 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
11313 const struct cred *creds;
11315 creds = xa_erase(&ctx->personalities, id);
11324 struct io_tctx_exit {
11325 struct callback_head task_work;
11326 struct completion completion;
11327 struct io_ring_ctx *ctx;
11330 static __cold void io_tctx_exit_cb(struct callback_head *cb)
11332 struct io_uring_task *tctx = current->io_uring;
11333 struct io_tctx_exit *work;
11335 work = container_of(cb, struct io_tctx_exit, task_work);
11337 * When @in_idle, we're in cancellation and it's racy to remove the
11338 * node. It'll be removed by the end of cancellation, just ignore it.
11340 if (!atomic_read(&tctx->in_idle))
11341 io_uring_del_tctx_node((unsigned long)work->ctx);
11342 complete(&work->completion);
11345 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
11347 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
11349 return req->ctx == data;
11352 static __cold void io_ring_exit_work(struct work_struct *work)
11354 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
11355 unsigned long timeout = jiffies + HZ * 60 * 5;
11356 unsigned long interval = HZ / 20;
11357 struct io_tctx_exit exit;
11358 struct io_tctx_node *node;
11362 * If we're doing polled IO and end up having requests being
11363 * submitted async (out-of-line), then completions can come in while
11364 * we're waiting for refs to drop. We need to reap these manually,
11365 * as nobody else will be looking for them.
11368 io_uring_try_cancel_requests(ctx, NULL, true);
11369 if (ctx->sq_data) {
11370 struct io_sq_data *sqd = ctx->sq_data;
11371 struct task_struct *tsk;
11373 io_sq_thread_park(sqd);
11375 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
11376 io_wq_cancel_cb(tsk->io_uring->io_wq,
11377 io_cancel_ctx_cb, ctx, true);
11378 io_sq_thread_unpark(sqd);
11381 io_req_caches_free(ctx);
11383 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
11384 /* there is little hope left, don't run it too often */
11385 interval = HZ * 60;
11387 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
11389 init_completion(&exit.completion);
11390 init_task_work(&exit.task_work, io_tctx_exit_cb);
11393 * Some may use context even when all refs and requests have been put,
11394 * and they are free to do so while still holding uring_lock or
11395 * completion_lock, see io_req_task_submit(). Apart from other work,
11396 * this lock/unlock section also waits them to finish.
11398 mutex_lock(&ctx->uring_lock);
11399 while (!list_empty(&ctx->tctx_list)) {
11400 WARN_ON_ONCE(time_after(jiffies, timeout));
11402 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
11404 /* don't spin on a single task if cancellation failed */
11405 list_rotate_left(&ctx->tctx_list);
11406 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
11407 if (WARN_ON_ONCE(ret))
11410 mutex_unlock(&ctx->uring_lock);
11411 wait_for_completion(&exit.completion);
11412 mutex_lock(&ctx->uring_lock);
11414 mutex_unlock(&ctx->uring_lock);
11415 spin_lock(&ctx->completion_lock);
11416 spin_unlock(&ctx->completion_lock);
11418 io_ring_ctx_free(ctx);
11421 /* Returns true if we found and killed one or more timeouts */
11422 static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx,
11423 struct task_struct *tsk, bool cancel_all)
11425 struct io_kiocb *req, *tmp;
11428 spin_lock(&ctx->completion_lock);
11429 spin_lock_irq(&ctx->timeout_lock);
11430 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
11431 if (io_match_task(req, tsk, cancel_all)) {
11432 io_kill_timeout(req, -ECANCELED);
11436 spin_unlock_irq(&ctx->timeout_lock);
11437 io_commit_cqring(ctx);
11438 spin_unlock(&ctx->completion_lock);
11440 io_cqring_ev_posted(ctx);
11441 return canceled != 0;
11444 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
11446 unsigned long index;
11447 struct creds *creds;
11449 mutex_lock(&ctx->uring_lock);
11450 percpu_ref_kill(&ctx->refs);
11452 __io_cqring_overflow_flush(ctx, true);
11453 xa_for_each(&ctx->personalities, index, creds)
11454 io_unregister_personality(ctx, index);
11455 mutex_unlock(&ctx->uring_lock);
11457 /* failed during ring init, it couldn't have issued any requests */
11459 io_kill_timeouts(ctx, NULL, true);
11460 io_poll_remove_all(ctx, NULL, true);
11461 /* if we failed setting up the ctx, we might not have any rings */
11462 io_iopoll_try_reap_events(ctx);
11465 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
11467 * Use system_unbound_wq to avoid spawning tons of event kworkers
11468 * if we're exiting a ton of rings at the same time. It just adds
11469 * noise and overhead, there's no discernable change in runtime
11470 * over using system_wq.
11472 queue_work(system_unbound_wq, &ctx->exit_work);
11475 static int io_uring_release(struct inode *inode, struct file *file)
11477 struct io_ring_ctx *ctx = file->private_data;
11479 file->private_data = NULL;
11480 io_ring_ctx_wait_and_kill(ctx);
11484 struct io_task_cancel {
11485 struct task_struct *task;
11489 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
11491 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
11492 struct io_task_cancel *cancel = data;
11494 return io_match_task_safe(req, cancel->task, cancel->all);
11497 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
11498 struct task_struct *task,
11501 struct io_defer_entry *de;
11504 spin_lock(&ctx->completion_lock);
11505 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
11506 if (io_match_task_safe(de->req, task, cancel_all)) {
11507 list_cut_position(&list, &ctx->defer_list, &de->list);
11511 spin_unlock(&ctx->completion_lock);
11512 if (list_empty(&list))
11515 while (!list_empty(&list)) {
11516 de = list_first_entry(&list, struct io_defer_entry, list);
11517 list_del_init(&de->list);
11518 io_req_complete_failed(de->req, -ECANCELED);
11524 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
11526 struct io_tctx_node *node;
11527 enum io_wq_cancel cret;
11530 mutex_lock(&ctx->uring_lock);
11531 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
11532 struct io_uring_task *tctx = node->task->io_uring;
11535 * io_wq will stay alive while we hold uring_lock, because it's
11536 * killed after ctx nodes, which requires to take the lock.
11538 if (!tctx || !tctx->io_wq)
11540 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
11541 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
11543 mutex_unlock(&ctx->uring_lock);
11548 static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
11549 struct task_struct *task,
11552 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
11553 struct io_uring_task *tctx = task ? task->io_uring : NULL;
11555 /* failed during ring init, it couldn't have issued any requests */
11560 enum io_wq_cancel cret;
11564 ret |= io_uring_try_cancel_iowq(ctx);
11565 } else if (tctx && tctx->io_wq) {
11567 * Cancels requests of all rings, not only @ctx, but
11568 * it's fine as the task is in exit/exec.
11570 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
11572 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
11575 /* SQPOLL thread does its own polling */
11576 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
11577 (ctx->sq_data && ctx->sq_data->thread == current)) {
11578 while (!wq_list_empty(&ctx->iopoll_list)) {
11579 io_iopoll_try_reap_events(ctx);
11584 ret |= io_cancel_defer_files(ctx, task, cancel_all);
11585 ret |= io_poll_remove_all(ctx, task, cancel_all);
11586 ret |= io_kill_timeouts(ctx, task, cancel_all);
11588 ret |= io_run_task_work();
11595 static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
11597 struct io_uring_task *tctx = current->io_uring;
11598 struct io_tctx_node *node;
11601 if (unlikely(!tctx)) {
11602 ret = io_uring_alloc_task_context(current, ctx);
11606 tctx = current->io_uring;
11607 if (ctx->iowq_limits_set) {
11608 unsigned int limits[2] = { ctx->iowq_limits[0],
11609 ctx->iowq_limits[1], };
11611 ret = io_wq_max_workers(tctx->io_wq, limits);
11616 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
11617 node = kmalloc(sizeof(*node), GFP_KERNEL);
11621 node->task = current;
11623 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
11624 node, GFP_KERNEL));
11630 mutex_lock(&ctx->uring_lock);
11631 list_add(&node->ctx_node, &ctx->tctx_list);
11632 mutex_unlock(&ctx->uring_lock);
11639 * Note that this task has used io_uring. We use it for cancelation purposes.
11641 static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
11643 struct io_uring_task *tctx = current->io_uring;
11645 if (likely(tctx && tctx->last == ctx))
11647 return __io_uring_add_tctx_node(ctx);
11651 * Remove this io_uring_file -> task mapping.
11653 static __cold void io_uring_del_tctx_node(unsigned long index)
11655 struct io_uring_task *tctx = current->io_uring;
11656 struct io_tctx_node *node;
11660 node = xa_erase(&tctx->xa, index);
11664 WARN_ON_ONCE(current != node->task);
11665 WARN_ON_ONCE(list_empty(&node->ctx_node));
11667 mutex_lock(&node->ctx->uring_lock);
11668 list_del(&node->ctx_node);
11669 mutex_unlock(&node->ctx->uring_lock);
11671 if (tctx->last == node->ctx)
11676 static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
11678 struct io_wq *wq = tctx->io_wq;
11679 struct io_tctx_node *node;
11680 unsigned long index;
11682 xa_for_each(&tctx->xa, index, node) {
11683 io_uring_del_tctx_node(index);
11688 * Must be after io_uring_del_tctx_node() (removes nodes under
11689 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
11691 io_wq_put_and_exit(wq);
11692 tctx->io_wq = NULL;
11696 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
11699 return atomic_read(&tctx->inflight_tracked);
11700 return percpu_counter_sum(&tctx->inflight);
11704 * Find any io_uring ctx that this task has registered or done IO on, and cancel
11705 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
11707 static __cold void io_uring_cancel_generic(bool cancel_all,
11708 struct io_sq_data *sqd)
11710 struct io_uring_task *tctx = current->io_uring;
11711 struct io_ring_ctx *ctx;
11715 WARN_ON_ONCE(sqd && sqd->thread != current);
11717 if (!current->io_uring)
11720 io_wq_exit_start(tctx->io_wq);
11722 atomic_inc(&tctx->in_idle);
11724 io_uring_drop_tctx_refs(current);
11725 /* read completions before cancelations */
11726 inflight = tctx_inflight(tctx, !cancel_all);
11731 struct io_tctx_node *node;
11732 unsigned long index;
11734 xa_for_each(&tctx->xa, index, node) {
11735 /* sqpoll task will cancel all its requests */
11736 if (node->ctx->sq_data)
11738 io_uring_try_cancel_requests(node->ctx, current,
11742 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
11743 io_uring_try_cancel_requests(ctx, current,
11747 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
11748 io_run_task_work();
11749 io_uring_drop_tctx_refs(current);
11752 * If we've seen completions, retry without waiting. This
11753 * avoids a race where a completion comes in before we did
11754 * prepare_to_wait().
11756 if (inflight == tctx_inflight(tctx, !cancel_all))
11758 finish_wait(&tctx->wait, &wait);
11761 io_uring_clean_tctx(tctx);
11764 * We shouldn't run task_works after cancel, so just leave
11765 * ->in_idle set for normal exit.
11767 atomic_dec(&tctx->in_idle);
11768 /* for exec all current's requests should be gone, kill tctx */
11769 __io_uring_free(current);
11773 void __io_uring_cancel(bool cancel_all)
11775 io_uring_cancel_generic(cancel_all, NULL);
11778 void io_uring_unreg_ringfd(void)
11780 struct io_uring_task *tctx = current->io_uring;
11783 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
11784 if (tctx->registered_rings[i]) {
11785 fput(tctx->registered_rings[i]);
11786 tctx->registered_rings[i] = NULL;
11791 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
11792 int start, int end)
11797 for (offset = start; offset < end; offset++) {
11798 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
11799 if (tctx->registered_rings[offset])
11805 } else if (file->f_op != &io_uring_fops) {
11807 return -EOPNOTSUPP;
11809 tctx->registered_rings[offset] = file;
11817 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
11818 * invocation. User passes in an array of struct io_uring_rsrc_update
11819 * with ->data set to the ring_fd, and ->offset given for the desired
11820 * index. If no index is desired, application may set ->offset == -1U
11821 * and we'll find an available index. Returns number of entries
11822 * successfully processed, or < 0 on error if none were processed.
11824 static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
11827 struct io_uring_rsrc_update __user *arg = __arg;
11828 struct io_uring_rsrc_update reg;
11829 struct io_uring_task *tctx;
11832 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
11835 mutex_unlock(&ctx->uring_lock);
11836 ret = io_uring_add_tctx_node(ctx);
11837 mutex_lock(&ctx->uring_lock);
11841 tctx = current->io_uring;
11842 for (i = 0; i < nr_args; i++) {
11845 if (copy_from_user(®, &arg[i], sizeof(reg))) {
11855 if (reg.offset == -1U) {
11857 end = IO_RINGFD_REG_MAX;
11859 if (reg.offset >= IO_RINGFD_REG_MAX) {
11863 start = reg.offset;
11867 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
11872 if (copy_to_user(&arg[i], ®, sizeof(reg))) {
11873 fput(tctx->registered_rings[reg.offset]);
11874 tctx->registered_rings[reg.offset] = NULL;
11880 return i ? i : ret;
11883 static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
11886 struct io_uring_rsrc_update __user *arg = __arg;
11887 struct io_uring_task *tctx = current->io_uring;
11888 struct io_uring_rsrc_update reg;
11891 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
11896 for (i = 0; i < nr_args; i++) {
11897 if (copy_from_user(®, &arg[i], sizeof(reg))) {
11901 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
11906 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
11907 if (tctx->registered_rings[reg.offset]) {
11908 fput(tctx->registered_rings[reg.offset]);
11909 tctx->registered_rings[reg.offset] = NULL;
11913 return i ? i : ret;
11916 static void *io_uring_validate_mmap_request(struct file *file,
11917 loff_t pgoff, size_t sz)
11919 struct io_ring_ctx *ctx = file->private_data;
11920 loff_t offset = pgoff << PAGE_SHIFT;
11925 case IORING_OFF_SQ_RING:
11926 case IORING_OFF_CQ_RING:
11929 case IORING_OFF_SQES:
11930 ptr = ctx->sq_sqes;
11933 return ERR_PTR(-EINVAL);
11936 page = virt_to_head_page(ptr);
11937 if (sz > page_size(page))
11938 return ERR_PTR(-EINVAL);
11945 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
11947 size_t sz = vma->vm_end - vma->vm_start;
11951 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
11953 return PTR_ERR(ptr);
11955 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
11956 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
11959 #else /* !CONFIG_MMU */
11961 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
11963 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
11966 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
11968 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
11971 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
11972 unsigned long addr, unsigned long len,
11973 unsigned long pgoff, unsigned long flags)
11977 ptr = io_uring_validate_mmap_request(file, pgoff, len);
11979 return PTR_ERR(ptr);
11981 return (unsigned long) ptr;
11984 #endif /* !CONFIG_MMU */
11986 static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
11991 if (!io_sqring_full(ctx))
11993 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
11995 if (!io_sqring_full(ctx))
11998 } while (!signal_pending(current));
12000 finish_wait(&ctx->sqo_sq_wait, &wait);
12004 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
12006 if (flags & IORING_ENTER_EXT_ARG) {
12007 struct io_uring_getevents_arg arg;
12009 if (argsz != sizeof(arg))
12011 if (copy_from_user(&arg, argp, sizeof(arg)))
12017 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
12018 struct __kernel_timespec __user **ts,
12019 const sigset_t __user **sig)
12021 struct io_uring_getevents_arg arg;
12024 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
12025 * is just a pointer to the sigset_t.
12027 if (!(flags & IORING_ENTER_EXT_ARG)) {
12028 *sig = (const sigset_t __user *) argp;
12034 * EXT_ARG is set - ensure we agree on the size of it and copy in our
12035 * timespec and sigset_t pointers if good.
12037 if (*argsz != sizeof(arg))
12039 if (copy_from_user(&arg, argp, sizeof(arg)))
12043 *sig = u64_to_user_ptr(arg.sigmask);
12044 *argsz = arg.sigmask_sz;
12045 *ts = u64_to_user_ptr(arg.ts);
12049 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
12050 u32, min_complete, u32, flags, const void __user *, argp,
12053 struct io_ring_ctx *ctx;
12057 io_run_task_work();
12059 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
12060 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
12061 IORING_ENTER_REGISTERED_RING)))
12065 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
12066 * need only dereference our task private array to find it.
12068 if (flags & IORING_ENTER_REGISTERED_RING) {
12069 struct io_uring_task *tctx = current->io_uring;
12071 if (!tctx || fd >= IO_RINGFD_REG_MAX)
12073 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
12074 f.file = tctx->registered_rings[fd];
12080 if (unlikely(!f.file))
12084 if (unlikely(f.file->f_op != &io_uring_fops))
12088 ctx = f.file->private_data;
12089 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
12093 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
12097 * For SQ polling, the thread will do all submissions and completions.
12098 * Just return the requested submit count, and wake the thread if
12099 * we were asked to.
12102 if (ctx->flags & IORING_SETUP_SQPOLL) {
12103 io_cqring_overflow_flush(ctx);
12105 if (unlikely(ctx->sq_data->thread == NULL)) {
12109 if (flags & IORING_ENTER_SQ_WAKEUP)
12110 wake_up(&ctx->sq_data->wait);
12111 if (flags & IORING_ENTER_SQ_WAIT) {
12112 ret = io_sqpoll_wait_sq(ctx);
12117 } else if (to_submit) {
12118 ret = io_uring_add_tctx_node(ctx);
12122 mutex_lock(&ctx->uring_lock);
12123 ret = io_submit_sqes(ctx, to_submit);
12124 if (ret != to_submit) {
12125 mutex_unlock(&ctx->uring_lock);
12128 if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
12129 goto iopoll_locked;
12130 mutex_unlock(&ctx->uring_lock);
12132 if (flags & IORING_ENTER_GETEVENTS) {
12134 if (ctx->syscall_iopoll) {
12136 * We disallow the app entering submit/complete with
12137 * polling, but we still need to lock the ring to
12138 * prevent racing with polled issue that got punted to
12141 mutex_lock(&ctx->uring_lock);
12143 ret2 = io_validate_ext_arg(flags, argp, argsz);
12144 if (likely(!ret2)) {
12145 min_complete = min(min_complete,
12147 ret2 = io_iopoll_check(ctx, min_complete);
12149 mutex_unlock(&ctx->uring_lock);
12151 const sigset_t __user *sig;
12152 struct __kernel_timespec __user *ts;
12154 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
12155 if (likely(!ret2)) {
12156 min_complete = min(min_complete,
12158 ret2 = io_cqring_wait(ctx, min_complete, sig,
12167 * EBADR indicates that one or more CQE were dropped.
12168 * Once the user has been informed we can clear the bit
12169 * as they are obviously ok with those drops.
12171 if (unlikely(ret2 == -EBADR))
12172 clear_bit(IO_CHECK_CQ_DROPPED_BIT,
12178 percpu_ref_put(&ctx->refs);
12184 #ifdef CONFIG_PROC_FS
12185 static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
12186 const struct cred *cred)
12188 struct user_namespace *uns = seq_user_ns(m);
12189 struct group_info *gi;
12194 seq_printf(m, "%5d\n", id);
12195 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
12196 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
12197 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
12198 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
12199 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
12200 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
12201 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
12202 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
12203 seq_puts(m, "\n\tGroups:\t");
12204 gi = cred->group_info;
12205 for (g = 0; g < gi->ngroups; g++) {
12206 seq_put_decimal_ull(m, g ? " " : "",
12207 from_kgid_munged(uns, gi->gid[g]));
12209 seq_puts(m, "\n\tCapEff:\t");
12210 cap = cred->cap_effective;
12211 CAP_FOR_EACH_U32(__capi)
12212 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
12217 static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
12218 struct seq_file *m)
12220 struct io_sq_data *sq = NULL;
12221 struct io_overflow_cqe *ocqe;
12222 struct io_rings *r = ctx->rings;
12223 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
12224 unsigned int sq_head = READ_ONCE(r->sq.head);
12225 unsigned int sq_tail = READ_ONCE(r->sq.tail);
12226 unsigned int cq_head = READ_ONCE(r->cq.head);
12227 unsigned int cq_tail = READ_ONCE(r->cq.tail);
12228 unsigned int cq_shift = 0;
12229 unsigned int sq_entries, cq_entries;
12231 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
12238 * we may get imprecise sqe and cqe info if uring is actively running
12239 * since we get cached_sq_head and cached_cq_tail without uring_lock
12240 * and sq_tail and cq_head are changed by userspace. But it's ok since
12241 * we usually use these info when it is stuck.
12243 seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
12244 seq_printf(m, "SqHead:\t%u\n", sq_head);
12245 seq_printf(m, "SqTail:\t%u\n", sq_tail);
12246 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
12247 seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
12248 seq_printf(m, "CqHead:\t%u\n", cq_head);
12249 seq_printf(m, "CqTail:\t%u\n", cq_tail);
12250 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
12251 seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
12252 sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
12253 for (i = 0; i < sq_entries; i++) {
12254 unsigned int entry = i + sq_head;
12255 unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
12256 struct io_uring_sqe *sqe;
12258 if (sq_idx > sq_mask)
12260 sqe = &ctx->sq_sqes[sq_idx];
12261 seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
12262 sq_idx, sqe->opcode, sqe->fd, sqe->flags,
12265 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
12266 cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
12267 for (i = 0; i < cq_entries; i++) {
12268 unsigned int entry = i + cq_head;
12269 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
12272 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
12273 entry & cq_mask, cqe->user_data, cqe->res,
12276 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, "
12277 "extra1:%llu, extra2:%llu\n",
12278 entry & cq_mask, cqe->user_data, cqe->res,
12279 cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]);
12284 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
12285 * since fdinfo case grabs it in the opposite direction of normal use
12286 * cases. If we fail to get the lock, we just don't iterate any
12287 * structures that could be going away outside the io_uring mutex.
12289 has_lock = mutex_trylock(&ctx->uring_lock);
12291 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
12297 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
12298 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
12299 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
12300 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
12301 struct file *f = io_file_from_index(ctx, i);
12304 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
12306 seq_printf(m, "%5u: <none>\n", i);
12308 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
12309 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
12310 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
12311 unsigned int len = buf->ubuf_end - buf->ubuf;
12313 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
12315 if (has_lock && !xa_empty(&ctx->personalities)) {
12316 unsigned long index;
12317 const struct cred *cred;
12319 seq_printf(m, "Personalities:\n");
12320 xa_for_each(&ctx->personalities, index, cred)
12321 io_uring_show_cred(m, index, cred);
12324 mutex_unlock(&ctx->uring_lock);
12326 seq_puts(m, "PollList:\n");
12327 spin_lock(&ctx->completion_lock);
12328 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
12329 struct hlist_head *list = &ctx->cancel_hash[i];
12330 struct io_kiocb *req;
12332 hlist_for_each_entry(req, list, hash_node)
12333 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
12334 task_work_pending(req->task));
12337 seq_puts(m, "CqOverflowList:\n");
12338 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
12339 struct io_uring_cqe *cqe = &ocqe->cqe;
12341 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
12342 cqe->user_data, cqe->res, cqe->flags);
12346 spin_unlock(&ctx->completion_lock);
12349 static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
12351 struct io_ring_ctx *ctx = f->private_data;
12353 if (percpu_ref_tryget(&ctx->refs)) {
12354 __io_uring_show_fdinfo(ctx, m);
12355 percpu_ref_put(&ctx->refs);
12360 static const struct file_operations io_uring_fops = {
12361 .release = io_uring_release,
12362 .mmap = io_uring_mmap,
12364 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
12365 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
12367 .poll = io_uring_poll,
12368 #ifdef CONFIG_PROC_FS
12369 .show_fdinfo = io_uring_show_fdinfo,
12373 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
12374 struct io_uring_params *p)
12376 struct io_rings *rings;
12377 size_t size, sq_array_offset;
12379 /* make sure these are sane, as we already accounted them */
12380 ctx->sq_entries = p->sq_entries;
12381 ctx->cq_entries = p->cq_entries;
12383 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
12384 if (size == SIZE_MAX)
12387 rings = io_mem_alloc(size);
12391 ctx->rings = rings;
12392 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
12393 rings->sq_ring_mask = p->sq_entries - 1;
12394 rings->cq_ring_mask = p->cq_entries - 1;
12395 rings->sq_ring_entries = p->sq_entries;
12396 rings->cq_ring_entries = p->cq_entries;
12398 if (p->flags & IORING_SETUP_SQE128)
12399 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
12401 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
12402 if (size == SIZE_MAX) {
12403 io_mem_free(ctx->rings);
12408 ctx->sq_sqes = io_mem_alloc(size);
12409 if (!ctx->sq_sqes) {
12410 io_mem_free(ctx->rings);
12418 static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
12422 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
12426 ret = io_uring_add_tctx_node(ctx);
12431 fd_install(fd, file);
12436 * Allocate an anonymous fd, this is what constitutes the application
12437 * visible backing of an io_uring instance. The application mmaps this
12438 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
12439 * we have to tie this fd to a socket for file garbage collection purposes.
12441 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
12444 #if defined(CONFIG_UNIX)
12447 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
12450 return ERR_PTR(ret);
12453 file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
12454 O_RDWR | O_CLOEXEC, NULL);
12455 #if defined(CONFIG_UNIX)
12456 if (IS_ERR(file)) {
12457 sock_release(ctx->ring_sock);
12458 ctx->ring_sock = NULL;
12460 ctx->ring_sock->file = file;
12466 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
12467 struct io_uring_params __user *params)
12469 struct io_ring_ctx *ctx;
12475 if (entries > IORING_MAX_ENTRIES) {
12476 if (!(p->flags & IORING_SETUP_CLAMP))
12478 entries = IORING_MAX_ENTRIES;
12482 * Use twice as many entries for the CQ ring. It's possible for the
12483 * application to drive a higher depth than the size of the SQ ring,
12484 * since the sqes are only used at submission time. This allows for
12485 * some flexibility in overcommitting a bit. If the application has
12486 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
12487 * of CQ ring entries manually.
12489 p->sq_entries = roundup_pow_of_two(entries);
12490 if (p->flags & IORING_SETUP_CQSIZE) {
12492 * If IORING_SETUP_CQSIZE is set, we do the same roundup
12493 * to a power-of-two, if it isn't already. We do NOT impose
12494 * any cq vs sq ring sizing.
12496 if (!p->cq_entries)
12498 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
12499 if (!(p->flags & IORING_SETUP_CLAMP))
12501 p->cq_entries = IORING_MAX_CQ_ENTRIES;
12503 p->cq_entries = roundup_pow_of_two(p->cq_entries);
12504 if (p->cq_entries < p->sq_entries)
12507 p->cq_entries = 2 * p->sq_entries;
12510 ctx = io_ring_ctx_alloc(p);
12515 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
12516 * space applications don't need to do io completion events
12517 * polling again, they can rely on io_sq_thread to do polling
12518 * work, which can reduce cpu usage and uring_lock contention.
12520 if (ctx->flags & IORING_SETUP_IOPOLL &&
12521 !(ctx->flags & IORING_SETUP_SQPOLL))
12522 ctx->syscall_iopoll = 1;
12524 ctx->compat = in_compat_syscall();
12525 if (!capable(CAP_IPC_LOCK))
12526 ctx->user = get_uid(current_user());
12529 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
12530 * COOP_TASKRUN is set, then IPIs are never needed by the app.
12533 if (ctx->flags & IORING_SETUP_SQPOLL) {
12534 /* IPI related flags don't make sense with SQPOLL */
12535 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
12536 IORING_SETUP_TASKRUN_FLAG))
12538 ctx->notify_method = TWA_SIGNAL_NO_IPI;
12539 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
12540 ctx->notify_method = TWA_SIGNAL_NO_IPI;
12542 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
12544 ctx->notify_method = TWA_SIGNAL;
12548 * This is just grabbed for accounting purposes. When a process exits,
12549 * the mm is exited and dropped before the files, hence we need to hang
12550 * on to this mm purely for the purposes of being able to unaccount
12551 * memory (locked/pinned vm). It's not used for anything else.
12553 mmgrab(current->mm);
12554 ctx->mm_account = current->mm;
12556 ret = io_allocate_scq_urings(ctx, p);
12560 ret = io_sq_offload_create(ctx, p);
12563 /* always set a rsrc node */
12564 ret = io_rsrc_node_switch_start(ctx);
12567 io_rsrc_node_switch(ctx, NULL);
12569 memset(&p->sq_off, 0, sizeof(p->sq_off));
12570 p->sq_off.head = offsetof(struct io_rings, sq.head);
12571 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
12572 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
12573 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
12574 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
12575 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
12576 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
12578 memset(&p->cq_off, 0, sizeof(p->cq_off));
12579 p->cq_off.head = offsetof(struct io_rings, cq.head);
12580 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
12581 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
12582 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
12583 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
12584 p->cq_off.cqes = offsetof(struct io_rings, cqes);
12585 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
12587 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
12588 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
12589 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
12590 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
12591 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
12592 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
12593 IORING_FEAT_LINKED_FILE;
12595 if (copy_to_user(params, p, sizeof(*p))) {
12600 file = io_uring_get_file(ctx);
12601 if (IS_ERR(file)) {
12602 ret = PTR_ERR(file);
12607 * Install ring fd as the very last thing, so we don't risk someone
12608 * having closed it before we finish setup
12610 ret = io_uring_install_fd(ctx, file);
12612 /* fput will clean it up */
12617 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
12620 io_ring_ctx_wait_and_kill(ctx);
12625 * Sets up an aio uring context, and returns the fd. Applications asks for a
12626 * ring size, we return the actual sq/cq ring sizes (among other things) in the
12627 * params structure passed in.
12629 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
12631 struct io_uring_params p;
12634 if (copy_from_user(&p, params, sizeof(p)))
12636 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
12641 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
12642 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
12643 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
12644 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
12645 IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
12646 IORING_SETUP_SQE128 | IORING_SETUP_CQE32))
12649 return io_uring_create(entries, &p, params);
12652 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
12653 struct io_uring_params __user *, params)
12655 return io_uring_setup(entries, params);
12658 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
12661 struct io_uring_probe *p;
12665 size = struct_size(p, ops, nr_args);
12666 if (size == SIZE_MAX)
12668 p = kzalloc(size, GFP_KERNEL);
12673 if (copy_from_user(p, arg, size))
12676 if (memchr_inv(p, 0, size))
12679 p->last_op = IORING_OP_LAST - 1;
12680 if (nr_args > IORING_OP_LAST)
12681 nr_args = IORING_OP_LAST;
12683 for (i = 0; i < nr_args; i++) {
12685 if (!io_op_defs[i].not_supported)
12686 p->ops[i].flags = IO_URING_OP_SUPPORTED;
12691 if (copy_to_user(arg, p, size))
12698 static int io_register_personality(struct io_ring_ctx *ctx)
12700 const struct cred *creds;
12704 creds = get_current_cred();
12706 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
12707 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
12715 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
12716 void __user *arg, unsigned int nr_args)
12718 struct io_uring_restriction *res;
12722 /* Restrictions allowed only if rings started disabled */
12723 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
12726 /* We allow only a single restrictions registration */
12727 if (ctx->restrictions.registered)
12730 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
12733 size = array_size(nr_args, sizeof(*res));
12734 if (size == SIZE_MAX)
12737 res = memdup_user(arg, size);
12739 return PTR_ERR(res);
12743 for (i = 0; i < nr_args; i++) {
12744 switch (res[i].opcode) {
12745 case IORING_RESTRICTION_REGISTER_OP:
12746 if (res[i].register_op >= IORING_REGISTER_LAST) {
12751 __set_bit(res[i].register_op,
12752 ctx->restrictions.register_op);
12754 case IORING_RESTRICTION_SQE_OP:
12755 if (res[i].sqe_op >= IORING_OP_LAST) {
12760 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
12762 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
12763 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
12765 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
12766 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
12775 /* Reset all restrictions if an error happened */
12777 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
12779 ctx->restrictions.registered = true;
12785 static int io_register_enable_rings(struct io_ring_ctx *ctx)
12787 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
12790 if (ctx->restrictions.registered)
12791 ctx->restricted = 1;
12793 ctx->flags &= ~IORING_SETUP_R_DISABLED;
12794 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
12795 wake_up(&ctx->sq_data->wait);
12799 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
12800 struct io_uring_rsrc_update2 *up,
12806 if (check_add_overflow(up->offset, nr_args, &tmp))
12808 err = io_rsrc_node_switch_start(ctx);
12813 case IORING_RSRC_FILE:
12814 return __io_sqe_files_update(ctx, up, nr_args);
12815 case IORING_RSRC_BUFFER:
12816 return __io_sqe_buffers_update(ctx, up, nr_args);
12821 static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
12824 struct io_uring_rsrc_update2 up;
12828 memset(&up, 0, sizeof(up));
12829 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
12831 if (up.resv || up.resv2)
12833 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
12836 static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
12837 unsigned size, unsigned type)
12839 struct io_uring_rsrc_update2 up;
12841 if (size != sizeof(up))
12843 if (copy_from_user(&up, arg, sizeof(up)))
12845 if (!up.nr || up.resv || up.resv2)
12847 return __io_register_rsrc_update(ctx, type, &up, up.nr);
12850 static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
12851 unsigned int size, unsigned int type)
12853 struct io_uring_rsrc_register rr;
12855 /* keep it extendible */
12856 if (size != sizeof(rr))
12859 memset(&rr, 0, sizeof(rr));
12860 if (copy_from_user(&rr, arg, size))
12862 if (!rr.nr || rr.resv2)
12864 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
12868 case IORING_RSRC_FILE:
12869 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
12871 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
12872 rr.nr, u64_to_user_ptr(rr.tags));
12873 case IORING_RSRC_BUFFER:
12874 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
12876 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
12877 rr.nr, u64_to_user_ptr(rr.tags));
12882 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
12883 void __user *arg, unsigned len)
12885 struct io_uring_task *tctx = current->io_uring;
12886 cpumask_var_t new_mask;
12889 if (!tctx || !tctx->io_wq)
12892 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
12895 cpumask_clear(new_mask);
12896 if (len > cpumask_size())
12897 len = cpumask_size();
12899 if (in_compat_syscall()) {
12900 ret = compat_get_bitmap(cpumask_bits(new_mask),
12901 (const compat_ulong_t __user *)arg,
12902 len * 8 /* CHAR_BIT */);
12904 ret = copy_from_user(new_mask, arg, len);
12908 free_cpumask_var(new_mask);
12912 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
12913 free_cpumask_var(new_mask);
12917 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
12919 struct io_uring_task *tctx = current->io_uring;
12921 if (!tctx || !tctx->io_wq)
12924 return io_wq_cpu_affinity(tctx->io_wq, NULL);
12927 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
12929 __must_hold(&ctx->uring_lock)
12931 struct io_tctx_node *node;
12932 struct io_uring_task *tctx = NULL;
12933 struct io_sq_data *sqd = NULL;
12934 __u32 new_count[2];
12937 if (copy_from_user(new_count, arg, sizeof(new_count)))
12939 for (i = 0; i < ARRAY_SIZE(new_count); i++)
12940 if (new_count[i] > INT_MAX)
12943 if (ctx->flags & IORING_SETUP_SQPOLL) {
12944 sqd = ctx->sq_data;
12947 * Observe the correct sqd->lock -> ctx->uring_lock
12948 * ordering. Fine to drop uring_lock here, we hold
12949 * a ref to the ctx.
12951 refcount_inc(&sqd->refs);
12952 mutex_unlock(&ctx->uring_lock);
12953 mutex_lock(&sqd->lock);
12954 mutex_lock(&ctx->uring_lock);
12956 tctx = sqd->thread->io_uring;
12959 tctx = current->io_uring;
12962 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
12964 for (i = 0; i < ARRAY_SIZE(new_count); i++)
12966 ctx->iowq_limits[i] = new_count[i];
12967 ctx->iowq_limits_set = true;
12969 if (tctx && tctx->io_wq) {
12970 ret = io_wq_max_workers(tctx->io_wq, new_count);
12974 memset(new_count, 0, sizeof(new_count));
12978 mutex_unlock(&sqd->lock);
12979 io_put_sq_data(sqd);
12982 if (copy_to_user(arg, new_count, sizeof(new_count)))
12985 /* that's it for SQPOLL, only the SQPOLL task creates requests */
12989 /* now propagate the restriction to all registered users */
12990 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
12991 struct io_uring_task *tctx = node->task->io_uring;
12993 if (WARN_ON_ONCE(!tctx->io_wq))
12996 for (i = 0; i < ARRAY_SIZE(new_count); i++)
12997 new_count[i] = ctx->iowq_limits[i];
12998 /* ignore errors, it always returns zero anyway */
12999 (void)io_wq_max_workers(tctx->io_wq, new_count);
13004 mutex_unlock(&sqd->lock);
13005 io_put_sq_data(sqd);
13010 static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
13012 struct io_uring_buf_ring *br;
13013 struct io_uring_buf_reg reg;
13014 struct io_buffer_list *bl;
13015 struct page **pages;
13018 if (copy_from_user(®, arg, sizeof(reg)))
13021 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
13023 if (!reg.ring_addr)
13025 if (reg.ring_addr & ~PAGE_MASK)
13027 if (!is_power_of_2(reg.ring_entries))
13030 /* cannot disambiguate full vs empty due to head/tail size */
13031 if (reg.ring_entries >= 65536)
13034 if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
13035 int ret = io_init_bl_list(ctx);
13040 bl = io_buffer_get_list(ctx, reg.bgid);
13042 /* if mapped buffer ring OR classic exists, don't allow */
13043 if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
13046 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
13051 pages = io_pin_pages(reg.ring_addr,
13052 struct_size(br, bufs, reg.ring_entries),
13054 if (IS_ERR(pages)) {
13056 return PTR_ERR(pages);
13059 br = page_address(pages[0]);
13060 bl->buf_pages = pages;
13061 bl->buf_nr_pages = nr_pages;
13062 bl->nr_entries = reg.ring_entries;
13064 bl->mask = reg.ring_entries - 1;
13065 io_buffer_add_list(ctx, bl, reg.bgid);
13069 static int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
13071 struct io_uring_buf_reg reg;
13072 struct io_buffer_list *bl;
13074 if (copy_from_user(®, arg, sizeof(reg)))
13076 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
13079 bl = io_buffer_get_list(ctx, reg.bgid);
13082 if (!bl->buf_nr_pages)
13085 __io_remove_buffers(ctx, bl, -1U);
13086 if (bl->bgid >= BGID_ARRAY) {
13087 xa_erase(&ctx->io_bl_xa, bl->bgid);
13093 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
13094 void __user *arg, unsigned nr_args)
13095 __releases(ctx->uring_lock)
13096 __acquires(ctx->uring_lock)
13101 * We're inside the ring mutex, if the ref is already dying, then
13102 * someone else killed the ctx or is already going through
13103 * io_uring_register().
13105 if (percpu_ref_is_dying(&ctx->refs))
13108 if (ctx->restricted) {
13109 if (opcode >= IORING_REGISTER_LAST)
13111 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
13112 if (!test_bit(opcode, ctx->restrictions.register_op))
13117 case IORING_REGISTER_BUFFERS:
13121 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
13123 case IORING_UNREGISTER_BUFFERS:
13125 if (arg || nr_args)
13127 ret = io_sqe_buffers_unregister(ctx);
13129 case IORING_REGISTER_FILES:
13133 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
13135 case IORING_UNREGISTER_FILES:
13137 if (arg || nr_args)
13139 ret = io_sqe_files_unregister(ctx);
13141 case IORING_REGISTER_FILES_UPDATE:
13142 ret = io_register_files_update(ctx, arg, nr_args);
13144 case IORING_REGISTER_EVENTFD:
13148 ret = io_eventfd_register(ctx, arg, 0);
13150 case IORING_REGISTER_EVENTFD_ASYNC:
13154 ret = io_eventfd_register(ctx, arg, 1);
13156 case IORING_UNREGISTER_EVENTFD:
13158 if (arg || nr_args)
13160 ret = io_eventfd_unregister(ctx);
13162 case IORING_REGISTER_PROBE:
13164 if (!arg || nr_args > 256)
13166 ret = io_probe(ctx, arg, nr_args);
13168 case IORING_REGISTER_PERSONALITY:
13170 if (arg || nr_args)
13172 ret = io_register_personality(ctx);
13174 case IORING_UNREGISTER_PERSONALITY:
13178 ret = io_unregister_personality(ctx, nr_args);
13180 case IORING_REGISTER_ENABLE_RINGS:
13182 if (arg || nr_args)
13184 ret = io_register_enable_rings(ctx);
13186 case IORING_REGISTER_RESTRICTIONS:
13187 ret = io_register_restrictions(ctx, arg, nr_args);
13189 case IORING_REGISTER_FILES2:
13190 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
13192 case IORING_REGISTER_FILES_UPDATE2:
13193 ret = io_register_rsrc_update(ctx, arg, nr_args,
13196 case IORING_REGISTER_BUFFERS2:
13197 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
13199 case IORING_REGISTER_BUFFERS_UPDATE:
13200 ret = io_register_rsrc_update(ctx, arg, nr_args,
13201 IORING_RSRC_BUFFER);
13203 case IORING_REGISTER_IOWQ_AFF:
13205 if (!arg || !nr_args)
13207 ret = io_register_iowq_aff(ctx, arg, nr_args);
13209 case IORING_UNREGISTER_IOWQ_AFF:
13211 if (arg || nr_args)
13213 ret = io_unregister_iowq_aff(ctx);
13215 case IORING_REGISTER_IOWQ_MAX_WORKERS:
13217 if (!arg || nr_args != 2)
13219 ret = io_register_iowq_max_workers(ctx, arg);
13221 case IORING_REGISTER_RING_FDS:
13222 ret = io_ringfd_register(ctx, arg, nr_args);
13224 case IORING_UNREGISTER_RING_FDS:
13225 ret = io_ringfd_unregister(ctx, arg, nr_args);
13227 case IORING_REGISTER_PBUF_RING:
13229 if (!arg || nr_args != 1)
13231 ret = io_register_pbuf_ring(ctx, arg);
13233 case IORING_UNREGISTER_PBUF_RING:
13235 if (!arg || nr_args != 1)
13237 ret = io_unregister_pbuf_ring(ctx, arg);
13247 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
13248 void __user *, arg, unsigned int, nr_args)
13250 struct io_ring_ctx *ctx;
13259 if (f.file->f_op != &io_uring_fops)
13262 ctx = f.file->private_data;
13264 io_run_task_work();
13266 mutex_lock(&ctx->uring_lock);
13267 ret = __io_uring_register(ctx, opcode, arg, nr_args);
13268 mutex_unlock(&ctx->uring_lock);
13269 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
13275 static int __init io_uring_init(void)
13277 #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
13278 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
13279 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
13282 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
13283 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
13284 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
13285 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
13286 BUILD_BUG_SQE_ELEM(1, __u8, flags);
13287 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
13288 BUILD_BUG_SQE_ELEM(4, __s32, fd);
13289 BUILD_BUG_SQE_ELEM(8, __u64, off);
13290 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
13291 BUILD_BUG_SQE_ELEM(16, __u64, addr);
13292 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
13293 BUILD_BUG_SQE_ELEM(24, __u32, len);
13294 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
13295 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
13296 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
13297 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
13298 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
13299 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
13300 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
13301 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
13302 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
13303 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
13304 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
13305 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
13306 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
13307 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
13308 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
13309 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
13310 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
13311 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
13312 BUILD_BUG_SQE_ELEM(42, __u16, personality);
13313 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
13314 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
13315 BUILD_BUG_SQE_ELEM(48, __u64, addr3);
13317 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
13318 sizeof(struct io_uring_rsrc_update));
13319 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
13320 sizeof(struct io_uring_rsrc_update2));
13322 /* ->buf_index is u16 */
13323 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
13324 BUILD_BUG_ON(BGID_ARRAY * sizeof(struct io_buffer_list) > PAGE_SIZE);
13325 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
13326 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
13327 offsetof(struct io_uring_buf_ring, tail));
13329 /* should fit into one byte */
13330 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
13331 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
13332 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
13334 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
13335 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
13337 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
13339 BUILD_BUG_ON(sizeof(struct io_uring_cmd) > 64);
13341 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
13345 __initcall(io_uring_init);