1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
13 #include <uapi/linux/io_uring.h>
16 #include "openclose.h"
19 struct io_rsrc_update {
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 struct io_mapped_ubuf **pimu,
28 struct page **last_hpage);
30 #define IO_RSRC_REF_BATCH 100
33 #define IORING_MAX_FIXED_FILES (1U << 20)
34 #define IORING_MAX_REG_BUFFERS (1U << 14)
36 void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
37 __must_hold(&ctx->uring_lock)
39 struct io_rsrc_node *node = ctx->rsrc_node;
41 if (node && node->cached_refs) {
42 io_rsrc_put_node(node, node->cached_refs);
43 node->cached_refs = 0;
47 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
49 unsigned long page_limit, cur_pages, new_pages;
54 /* Don't allow more pages than we can safely lock */
55 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
57 cur_pages = atomic_long_read(&user->locked_vm);
59 new_pages = cur_pages + nr_pages;
60 if (new_pages > page_limit)
62 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
63 &cur_pages, new_pages));
67 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
70 __io_unaccount_mem(ctx->user, nr_pages);
73 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
76 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
81 ret = __io_account_mem(ctx->user, nr_pages);
87 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
92 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
93 void __user *arg, unsigned index)
95 struct iovec __user *src;
99 struct compat_iovec __user *ciovs;
100 struct compat_iovec ciov;
102 ciovs = (struct compat_iovec __user *) arg;
103 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
106 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
107 dst->iov_len = ciov.iov_len;
111 src = (struct iovec __user *) arg;
112 if (copy_from_user(dst, &src[index], sizeof(*dst)))
117 static int io_buffer_validate(struct iovec *iov)
119 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
122 * Don't impose further limits on the size and buffer
123 * constraints here, we'll -EINVAL later when IO is
124 * submitted if they are wrong.
127 return iov->iov_len ? -EFAULT : 0;
131 /* arbitrary limit, but we need something */
132 if (iov->iov_len > SZ_1G)
135 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
141 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
143 struct io_mapped_ubuf *imu = *slot;
146 if (imu != ctx->dummy_ubuf) {
147 for (i = 0; i < imu->nr_bvecs; i++)
148 unpin_user_page(imu->bvec[i].bv_page);
150 io_unaccount_mem(ctx, imu->acct_pages);
156 void io_rsrc_refs_refill(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
157 __must_hold(&ctx->uring_lock)
159 node->cached_refs += IO_RSRC_REF_BATCH;
160 refcount_add(IO_RSRC_REF_BATCH, &node->refs);
163 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
165 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
166 struct io_ring_ctx *ctx = rsrc_data->ctx;
167 struct io_rsrc_put *prsrc, *tmp;
169 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
170 list_del(&prsrc->list);
173 if (ctx->flags & IORING_SETUP_IOPOLL) {
174 mutex_lock(&ctx->uring_lock);
175 io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
176 mutex_unlock(&ctx->uring_lock);
178 io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
182 rsrc_data->do_put(ctx, prsrc);
186 io_rsrc_node_destroy(ref_node);
187 if (atomic_dec_and_test(&rsrc_data->refs))
188 complete(&rsrc_data->done);
191 void io_rsrc_put_work(struct work_struct *work)
193 struct io_ring_ctx *ctx;
194 struct llist_node *node;
196 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
197 node = llist_del_all(&ctx->rsrc_put_llist);
200 struct io_rsrc_node *ref_node;
201 struct llist_node *next = node->next;
203 ref_node = llist_entry(node, struct io_rsrc_node, llist);
204 __io_rsrc_put_work(ref_node);
209 void io_rsrc_put_tw(struct callback_head *cb)
211 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
214 io_rsrc_put_work(&ctx->rsrc_put_work.work);
217 void io_wait_rsrc_data(struct io_rsrc_data *data)
219 if (data && !atomic_dec_and_test(&data->refs))
220 wait_for_completion(&data->done);
223 void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
228 __cold void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
230 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
232 bool first_add = false;
233 unsigned long delay = HZ;
235 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
238 /* if we are mid-quiesce then do not delay */
239 if (node->rsrc_data->quiesce)
242 while (!list_empty(&ctx->rsrc_ref_list)) {
243 node = list_first_entry(&ctx->rsrc_ref_list,
244 struct io_rsrc_node, node);
245 /* recycle ref nodes in order */
248 list_del(&node->node);
249 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
251 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
256 if (ctx->submitter_task) {
257 if (!task_work_add(ctx->submitter_task, &ctx->rsrc_put_tw,
261 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
264 static struct io_rsrc_node *io_rsrc_node_alloc(void)
266 struct io_rsrc_node *ref_node;
268 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
272 refcount_set(&ref_node->refs, 1);
273 INIT_LIST_HEAD(&ref_node->node);
274 INIT_LIST_HEAD(&ref_node->rsrc_list);
275 ref_node->done = false;
279 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
280 struct io_rsrc_data *data_to_kill)
281 __must_hold(&ctx->uring_lock)
283 WARN_ON_ONCE(!ctx->rsrc_backup_node);
284 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
286 io_rsrc_refs_drop(ctx);
289 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
291 rsrc_node->rsrc_data = data_to_kill;
292 spin_lock_irq(&ctx->rsrc_ref_lock);
293 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
294 spin_unlock_irq(&ctx->rsrc_ref_lock);
296 atomic_inc(&data_to_kill->refs);
298 io_rsrc_put_node(rsrc_node, 1);
299 ctx->rsrc_node = NULL;
302 if (!ctx->rsrc_node) {
303 ctx->rsrc_node = ctx->rsrc_backup_node;
304 ctx->rsrc_backup_node = NULL;
305 ctx->rsrc_node->cached_refs = 0;
309 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
311 if (ctx->rsrc_backup_node)
313 ctx->rsrc_backup_node = io_rsrc_node_alloc();
314 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
317 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
318 struct io_ring_ctx *ctx)
322 /* As we may drop ->uring_lock, other task may have started quiesce */
325 ret = io_rsrc_node_switch_start(ctx);
328 io_rsrc_node_switch(ctx, data);
330 /* kill initial ref, already quiesced if zero */
331 if (atomic_dec_and_test(&data->refs))
334 data->quiesce = true;
335 mutex_unlock(&ctx->uring_lock);
337 ret = io_run_task_work_sig(ctx);
339 atomic_inc(&data->refs);
340 /* wait for all works potentially completing data->done */
341 flush_delayed_work(&ctx->rsrc_put_work);
342 reinit_completion(&data->done);
343 mutex_lock(&ctx->uring_lock);
347 flush_delayed_work(&ctx->rsrc_put_work);
348 ret = wait_for_completion_interruptible(&data->done);
350 mutex_lock(&ctx->uring_lock);
351 if (atomic_read(&data->refs) <= 0)
354 * it has been revived by another thread while
357 mutex_unlock(&ctx->uring_lock);
360 data->quiesce = false;
365 static void io_free_page_table(void **table, size_t size)
367 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
369 for (i = 0; i < nr_tables; i++)
374 static void io_rsrc_data_free(struct io_rsrc_data *data)
376 size_t size = data->nr * sizeof(data->tags[0][0]);
379 io_free_page_table((void **)data->tags, size);
383 static __cold void **io_alloc_page_table(size_t size)
385 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
386 size_t init_size = size;
389 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
393 for (i = 0; i < nr_tables; i++) {
394 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
396 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
398 io_free_page_table(table, init_size);
406 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
407 rsrc_put_fn *do_put, u64 __user *utags,
408 unsigned nr, struct io_rsrc_data **pdata)
410 struct io_rsrc_data *data;
414 data = kzalloc(sizeof(*data), GFP_KERNEL);
417 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
425 data->do_put = do_put;
428 for (i = 0; i < nr; i++) {
429 u64 *tag_slot = io_get_tag_slot(data, i);
431 if (copy_from_user(tag_slot, &utags[i],
437 atomic_set(&data->refs, 1);
438 init_completion(&data->done);
442 io_rsrc_data_free(data);
446 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
447 struct io_uring_rsrc_update2 *up,
450 u64 __user *tags = u64_to_user_ptr(up->tags);
451 __s32 __user *fds = u64_to_user_ptr(up->data);
452 struct io_rsrc_data *data = ctx->file_data;
453 struct io_fixed_file *file_slot;
457 bool needs_switch = false;
461 if (up->offset + nr_args > ctx->nr_user_files)
464 for (done = 0; done < nr_args; done++) {
467 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
468 copy_from_user(&fd, &fds[done], sizeof(fd))) {
472 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
476 if (fd == IORING_REGISTER_FILES_SKIP)
479 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
480 file_slot = io_fixed_file_slot(&ctx->file_table, i);
482 if (file_slot->file_ptr) {
483 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
484 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
487 file_slot->file_ptr = 0;
488 io_file_bitmap_clear(&ctx->file_table, i);
498 * Don't allow io_uring instances to be registered. If
499 * UNIX isn't enabled, then this causes a reference
500 * cycle and this instance can never get freed. If UNIX
501 * is enabled we'll handle it just fine, but there's
502 * still no point in allowing a ring fd as it doesn't
503 * support regular read/write anyway.
505 if (io_is_uring_fops(file)) {
510 err = io_scm_file_account(ctx, file);
515 *io_get_tag_slot(data, i) = tag;
516 io_fixed_file_set(file_slot, file);
517 io_file_bitmap_set(&ctx->file_table, i);
522 io_rsrc_node_switch(ctx, data);
523 return done ? done : err;
526 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
527 struct io_uring_rsrc_update2 *up,
528 unsigned int nr_args)
530 u64 __user *tags = u64_to_user_ptr(up->tags);
531 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
532 struct page *last_hpage = NULL;
533 bool needs_switch = false;
539 if (up->offset + nr_args > ctx->nr_user_bufs)
542 for (done = 0; done < nr_args; done++) {
543 struct io_mapped_ubuf *imu;
544 int offset = up->offset + done;
547 err = io_copy_iov(ctx, &iov, iovs, done);
550 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
554 err = io_buffer_validate(&iov);
557 if (!iov.iov_base && tag) {
561 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
565 i = array_index_nospec(offset, ctx->nr_user_bufs);
566 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
567 err = io_queue_rsrc_removal(ctx->buf_data, i,
568 ctx->rsrc_node, ctx->user_bufs[i]);
570 io_buffer_unmap(ctx, &imu);
573 ctx->user_bufs[i] = ctx->dummy_ubuf;
577 ctx->user_bufs[i] = imu;
578 *io_get_tag_slot(ctx->buf_data, offset) = tag;
582 io_rsrc_node_switch(ctx, ctx->buf_data);
583 return done ? done : err;
586 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
587 struct io_uring_rsrc_update2 *up,
593 if (check_add_overflow(up->offset, nr_args, &tmp))
595 err = io_rsrc_node_switch_start(ctx);
600 case IORING_RSRC_FILE:
601 return __io_sqe_files_update(ctx, up, nr_args);
602 case IORING_RSRC_BUFFER:
603 return __io_sqe_buffers_update(ctx, up, nr_args);
608 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
611 struct io_uring_rsrc_update2 up;
615 memset(&up, 0, sizeof(up));
616 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
618 if (up.resv || up.resv2)
620 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
623 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
624 unsigned size, unsigned type)
626 struct io_uring_rsrc_update2 up;
628 if (size != sizeof(up))
630 if (copy_from_user(&up, arg, sizeof(up)))
632 if (!up.nr || up.resv || up.resv2)
634 return __io_register_rsrc_update(ctx, type, &up, up.nr);
637 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
638 unsigned int size, unsigned int type)
640 struct io_uring_rsrc_register rr;
642 /* keep it extendible */
643 if (size != sizeof(rr))
646 memset(&rr, 0, sizeof(rr));
647 if (copy_from_user(&rr, arg, size))
649 if (!rr.nr || rr.resv2)
651 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
655 case IORING_RSRC_FILE:
656 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
658 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
659 rr.nr, u64_to_user_ptr(rr.tags));
660 case IORING_RSRC_BUFFER:
661 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
663 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
664 rr.nr, u64_to_user_ptr(rr.tags));
669 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
671 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
673 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
675 if (sqe->rw_flags || sqe->splice_fd_in)
678 up->offset = READ_ONCE(sqe->off);
679 up->nr_args = READ_ONCE(sqe->len);
682 up->arg = READ_ONCE(sqe->addr);
686 static int io_files_update_with_index_alloc(struct io_kiocb *req,
687 unsigned int issue_flags)
689 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
690 __s32 __user *fds = u64_to_user_ptr(up->arg);
695 if (!req->ctx->file_data)
698 for (done = 0; done < up->nr_args; done++) {
699 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
709 ret = io_fixed_fd_install(req, issue_flags, file,
710 IORING_FILE_INDEX_ALLOC);
713 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
714 __io_close_fixed(req->ctx, issue_flags, ret);
725 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
727 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
728 struct io_ring_ctx *ctx = req->ctx;
729 struct io_uring_rsrc_update2 up2;
732 up2.offset = up->offset;
739 if (up->offset == IORING_FILE_INDEX_ALLOC) {
740 ret = io_files_update_with_index_alloc(req, issue_flags);
742 io_ring_submit_lock(ctx, issue_flags);
743 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
745 io_ring_submit_unlock(ctx, issue_flags);
750 io_req_set_res(req, ret, 0);
754 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
755 struct io_rsrc_node *node, void *rsrc)
757 u64 *tag_slot = io_get_tag_slot(data, idx);
758 struct io_rsrc_put *prsrc;
760 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
764 prsrc->tag = *tag_slot;
767 list_add(&prsrc->list, &node->rsrc_list);
771 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
775 for (i = 0; i < ctx->nr_user_files; i++) {
776 struct file *file = io_file_from_index(&ctx->file_table, i);
778 /* skip scm accounted files, they'll be freed by ->ring_sock */
779 if (!file || io_file_need_scm(file))
781 io_file_bitmap_clear(&ctx->file_table, i);
785 #if defined(CONFIG_UNIX)
786 if (ctx->ring_sock) {
787 struct sock *sock = ctx->ring_sock->sk;
790 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
794 io_free_file_tables(&ctx->file_table);
795 io_file_table_set_alloc_range(ctx, 0, 0);
796 io_rsrc_data_free(ctx->file_data);
797 ctx->file_data = NULL;
798 ctx->nr_user_files = 0;
801 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
803 unsigned nr = ctx->nr_user_files;
810 * Quiesce may unlock ->uring_lock, and while it's not held
811 * prevent new requests using the table.
813 ctx->nr_user_files = 0;
814 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
815 ctx->nr_user_files = nr;
817 __io_sqe_files_unregister(ctx);
822 * Ensure the UNIX gc is aware of our file set, so we are certain that
823 * the io_uring can be safely unregistered on process exit, even if we have
824 * loops in the file referencing. We account only files that can hold other
825 * files because otherwise they can't form a loop and so are not interesting
828 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
830 #if defined(CONFIG_UNIX)
831 struct sock *sk = ctx->ring_sock->sk;
832 struct sk_buff_head *head = &sk->sk_receive_queue;
833 struct scm_fp_list *fpl;
836 if (likely(!io_file_need_scm(file)))
840 * See if we can merge this file into an existing skb SCM_RIGHTS
841 * file set. If there's no room, fall back to allocating a new skb
844 spin_lock_irq(&head->lock);
845 skb = skb_peek(head);
846 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
847 __skb_unlink(skb, head);
850 spin_unlock_irq(&head->lock);
853 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
857 skb = alloc_skb(0, GFP_KERNEL);
863 fpl->user = get_uid(current_user());
864 fpl->max = SCM_MAX_FD;
867 UNIXCB(skb).fp = fpl;
869 skb->scm_io_uring = 1;
870 skb->destructor = unix_destruct_scm;
871 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
874 fpl = UNIXCB(skb).fp;
875 fpl->fp[fpl->count++] = get_file(file);
876 unix_inflight(fpl->user, file);
877 skb_queue_head(head, skb);
883 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
885 struct file *file = prsrc->file;
886 #if defined(CONFIG_UNIX)
887 struct sock *sock = ctx->ring_sock->sk;
888 struct sk_buff_head list, *head = &sock->sk_receive_queue;
892 if (!io_file_need_scm(file)) {
897 __skb_queue_head_init(&list);
900 * Find the skb that holds this file in its SCM_RIGHTS. When found,
901 * remove this entry and rearrange the file array.
903 skb = skb_dequeue(head);
905 struct scm_fp_list *fp;
908 for (i = 0; i < fp->count; i++) {
911 if (fp->fp[i] != file)
914 unix_notinflight(fp->user, fp->fp[i]);
915 left = fp->count - 1 - i;
917 memmove(&fp->fp[i], &fp->fp[i + 1],
918 left * sizeof(struct file *));
925 __skb_queue_tail(&list, skb);
935 __skb_queue_tail(&list, skb);
937 skb = skb_dequeue(head);
940 if (skb_peek(&list)) {
941 spin_lock_irq(&head->lock);
942 while ((skb = __skb_dequeue(&list)) != NULL)
943 __skb_queue_tail(head, skb);
944 spin_unlock_irq(&head->lock);
951 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
952 unsigned nr_args, u64 __user *tags)
954 __s32 __user *fds = (__s32 __user *) arg;
963 if (nr_args > IORING_MAX_FIXED_FILES)
965 if (nr_args > rlimit(RLIMIT_NOFILE))
967 ret = io_rsrc_node_switch_start(ctx);
970 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
975 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
976 io_rsrc_data_free(ctx->file_data);
977 ctx->file_data = NULL;
981 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
982 struct io_fixed_file *file_slot;
984 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
988 /* allow sparse sets */
989 if (!fds || fd == -1) {
991 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
1002 * Don't allow io_uring instances to be registered. If UNIX
1003 * isn't enabled, then this causes a reference cycle and this
1004 * instance can never get freed. If UNIX is enabled we'll
1005 * handle it just fine, but there's still no point in allowing
1006 * a ring fd as it doesn't support regular read/write anyway.
1008 if (io_is_uring_fops(file)) {
1012 ret = io_scm_file_account(ctx, file);
1017 file_slot = io_fixed_file_slot(&ctx->file_table, i);
1018 io_fixed_file_set(file_slot, file);
1019 io_file_bitmap_set(&ctx->file_table, i);
1022 /* default it to the whole table */
1023 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
1024 io_rsrc_node_switch(ctx, NULL);
1027 __io_sqe_files_unregister(ctx);
1031 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
1033 io_buffer_unmap(ctx, &prsrc->buf);
1037 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
1041 for (i = 0; i < ctx->nr_user_bufs; i++)
1042 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
1043 kfree(ctx->user_bufs);
1044 io_rsrc_data_free(ctx->buf_data);
1045 ctx->user_bufs = NULL;
1046 ctx->buf_data = NULL;
1047 ctx->nr_user_bufs = 0;
1050 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
1052 unsigned nr = ctx->nr_user_bufs;
1059 * Quiesce may unlock ->uring_lock, and while it's not held
1060 * prevent new requests using the table.
1062 ctx->nr_user_bufs = 0;
1063 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
1064 ctx->nr_user_bufs = nr;
1066 __io_sqe_buffers_unregister(ctx);
1071 * Not super efficient, but this is just a registration time. And we do cache
1072 * the last compound head, so generally we'll only do a full search if we don't
1075 * We check if the given compound head page has already been accounted, to
1076 * avoid double accounting it. This allows us to account the full size of the
1077 * page, not just the constituent pages of a huge page.
1079 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
1080 int nr_pages, struct page *hpage)
1084 /* check current page array */
1085 for (i = 0; i < nr_pages; i++) {
1086 if (!PageCompound(pages[i]))
1088 if (compound_head(pages[i]) == hpage)
1092 /* check previously registered pages */
1093 for (i = 0; i < ctx->nr_user_bufs; i++) {
1094 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
1096 for (j = 0; j < imu->nr_bvecs; j++) {
1097 if (!PageCompound(imu->bvec[j].bv_page))
1099 if (compound_head(imu->bvec[j].bv_page) == hpage)
1107 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
1108 int nr_pages, struct io_mapped_ubuf *imu,
1109 struct page **last_hpage)
1113 imu->acct_pages = 0;
1114 for (i = 0; i < nr_pages; i++) {
1115 if (!PageCompound(pages[i])) {
1120 hpage = compound_head(pages[i]);
1121 if (hpage == *last_hpage)
1123 *last_hpage = hpage;
1124 if (headpage_already_acct(ctx, pages, i, hpage))
1126 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1130 if (!imu->acct_pages)
1133 ret = io_account_mem(ctx, imu->acct_pages);
1135 imu->acct_pages = 0;
1139 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1141 unsigned long start, end, nr_pages;
1142 struct vm_area_struct **vmas = NULL;
1143 struct page **pages = NULL;
1144 int i, pret, ret = -ENOMEM;
1146 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1147 start = ubuf >> PAGE_SHIFT;
1148 nr_pages = end - start;
1150 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1154 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1160 mmap_read_lock(current->mm);
1161 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1163 if (pret == nr_pages) {
1164 struct file *file = vmas[0]->vm_file;
1166 /* don't support file backed memory */
1167 for (i = 0; i < nr_pages; i++) {
1168 if (vmas[i]->vm_file != file) {
1174 if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1181 ret = pret < 0 ? pret : -EFAULT;
1183 mmap_read_unlock(current->mm);
1186 * if we did partial map, or found file backed vmas,
1187 * release any pages we did get
1190 unpin_user_pages(pages, pret);
1198 pages = ERR_PTR(ret);
1203 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1204 struct io_mapped_ubuf **pimu,
1205 struct page **last_hpage)
1207 struct io_mapped_ubuf *imu = NULL;
1208 struct page **pages = NULL;
1211 int ret, nr_pages, i;
1212 struct folio *folio = NULL;
1214 *pimu = ctx->dummy_ubuf;
1219 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1221 if (IS_ERR(pages)) {
1222 ret = PTR_ERR(pages);
1227 /* If it's a huge page, try to coalesce them into a single bvec entry */
1229 folio = page_folio(pages[0]);
1230 for (i = 1; i < nr_pages; i++) {
1231 if (page_folio(pages[i]) != folio) {
1238 * The pages are bound to the folio, it doesn't
1239 * actually unpin them but drops all but one reference,
1240 * which is usually put down by io_buffer_unmap().
1241 * Note, needs a better helper.
1243 unpin_user_pages(&pages[1], nr_pages - 1);
1248 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1252 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1254 unpin_user_pages(pages, nr_pages);
1258 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1259 size = iov->iov_len;
1260 /* store original address for later verification */
1261 imu->ubuf = (unsigned long) iov->iov_base;
1262 imu->ubuf_end = imu->ubuf + iov->iov_len;
1263 imu->nr_bvecs = nr_pages;
1268 bvec_set_page(&imu->bvec[0], pages[0], size, off);
1271 for (i = 0; i < nr_pages; i++) {
1274 vec_len = min_t(size_t, size, PAGE_SIZE - off);
1275 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1286 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1288 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1289 return ctx->user_bufs ? 0 : -ENOMEM;
1292 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1293 unsigned int nr_args, u64 __user *tags)
1295 struct page *last_hpage = NULL;
1296 struct io_rsrc_data *data;
1300 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1304 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1306 ret = io_rsrc_node_switch_start(ctx);
1309 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1312 ret = io_buffers_map_alloc(ctx, nr_args);
1314 io_rsrc_data_free(data);
1318 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1320 ret = io_copy_iov(ctx, &iov, arg, i);
1323 ret = io_buffer_validate(&iov);
1327 memset(&iov, 0, sizeof(iov));
1330 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1335 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1341 WARN_ON_ONCE(ctx->buf_data);
1343 ctx->buf_data = data;
1345 __io_sqe_buffers_unregister(ctx);
1347 io_rsrc_node_switch(ctx, NULL);
1351 int io_import_fixed(int ddir, struct iov_iter *iter,
1352 struct io_mapped_ubuf *imu,
1353 u64 buf_addr, size_t len)
1358 if (WARN_ON_ONCE(!imu))
1360 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1362 /* not inside the mapped region */
1363 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1367 * Might not be a start of buffer, set size appropriately
1368 * and advance us to the beginning.
1370 offset = buf_addr - imu->ubuf;
1371 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1375 * Don't use iov_iter_advance() here, as it's really slow for
1376 * using the latter parts of a big fixed buffer - it iterates
1377 * over each segment manually. We can cheat a bit here, because
1380 * 1) it's a BVEC iter, we set it up
1381 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1382 * first and last bvec
1384 * So just find our index, and adjust the iterator afterwards.
1385 * If the offset is within the first bvec (or the whole first
1386 * bvec, just use iov_iter_advance(). This makes it easier
1387 * since we can just skip the first segment, which may not
1388 * be PAGE_SIZE aligned.
1390 const struct bio_vec *bvec = imu->bvec;
1392 if (offset <= bvec->bv_len) {
1394 * Note, huge pages buffers consists of one large
1395 * bvec entry and should always go this way. The other
1396 * branch doesn't expect non PAGE_SIZE'd chunks.
1399 iter->nr_segs = bvec->bv_len;
1400 iter->count -= offset;
1401 iter->iov_offset = offset;
1403 unsigned long seg_skip;
1405 /* skip first vec */
1406 offset -= bvec->bv_len;
1407 seg_skip = 1 + (offset >> PAGE_SHIFT);
1409 iter->bvec = bvec + seg_skip;
1410 iter->nr_segs -= seg_skip;
1411 iter->count -= bvec->bv_len + offset;
1412 iter->iov_offset = offset & ~PAGE_MASK;