1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
6 #include <linux/slab.h>
7 #include <linux/nospec.h>
8 #include <linux/io_uring.h>
10 #include <uapi/linux/io_uring.h>
15 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
16 struct task_struct *task)
18 struct io_wq_hash *hash;
19 struct io_wq_data data;
20 unsigned int concurrency;
22 mutex_lock(&ctx->uring_lock);
25 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
27 mutex_unlock(&ctx->uring_lock);
28 return ERR_PTR(-ENOMEM);
30 refcount_set(&hash->refs, 1);
31 init_waitqueue_head(&hash->wait);
34 mutex_unlock(&ctx->uring_lock);
38 data.free_work = io_wq_free_work;
39 data.do_work = io_wq_submit_work;
41 /* Do QD, or 4 * CPUS, whatever is smallest */
42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
44 return io_wq_create(concurrency, &data);
47 void __io_uring_free(struct task_struct *tsk)
49 struct io_uring_task *tctx = tsk->io_uring;
51 WARN_ON_ONCE(!xa_empty(&tctx->xa));
52 WARN_ON_ONCE(tctx->io_wq);
53 WARN_ON_ONCE(tctx->cached_refs);
55 percpu_counter_destroy(&tctx->inflight);
60 __cold int io_uring_alloc_task_context(struct task_struct *task,
61 struct io_ring_ctx *ctx)
63 struct io_uring_task *tctx;
66 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
70 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
76 tctx->io_wq = io_init_wq_offload(ctx, task);
77 if (IS_ERR(tctx->io_wq)) {
78 ret = PTR_ERR(tctx->io_wq);
79 percpu_counter_destroy(&tctx->inflight);
85 init_waitqueue_head(&tctx->wait);
86 atomic_set(&tctx->in_cancel, 0);
87 atomic_set(&tctx->inflight_tracked, 0);
88 task->io_uring = tctx;
89 init_llist_head(&tctx->task_list);
90 init_task_work(&tctx->task_work, tctx_task_work);
94 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
96 struct io_uring_task *tctx = current->io_uring;
97 struct io_tctx_node *node;
100 if (unlikely(!tctx)) {
101 ret = io_uring_alloc_task_context(current, ctx);
105 tctx = current->io_uring;
106 if (ctx->iowq_limits_set) {
107 unsigned int limits[2] = { ctx->iowq_limits[0],
108 ctx->iowq_limits[1], };
110 ret = io_wq_max_workers(tctx->io_wq, limits);
115 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
116 node = kmalloc(sizeof(*node), GFP_KERNEL);
120 node->task = current;
122 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
129 mutex_lock(&ctx->uring_lock);
130 list_add(&node->ctx_node, &ctx->tctx_list);
131 mutex_unlock(&ctx->uring_lock);
136 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
140 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
141 && ctx->submitter_task != current)
144 ret = __io_uring_add_tctx_node(ctx);
148 current->io_uring->last = ctx;
153 * Remove this io_uring_file -> task mapping.
155 __cold void io_uring_del_tctx_node(unsigned long index)
157 struct io_uring_task *tctx = current->io_uring;
158 struct io_tctx_node *node;
162 node = xa_erase(&tctx->xa, index);
166 WARN_ON_ONCE(current != node->task);
167 WARN_ON_ONCE(list_empty(&node->ctx_node));
169 mutex_lock(&node->ctx->uring_lock);
170 list_del(&node->ctx_node);
171 mutex_unlock(&node->ctx->uring_lock);
173 if (tctx->last == node->ctx)
178 __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
180 struct io_wq *wq = tctx->io_wq;
181 struct io_tctx_node *node;
184 xa_for_each(&tctx->xa, index, node) {
185 io_uring_del_tctx_node(index);
190 * Must be after io_uring_del_tctx_node() (removes nodes under
191 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
193 io_wq_put_and_exit(wq);
198 void io_uring_unreg_ringfd(void)
200 struct io_uring_task *tctx = current->io_uring;
203 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
204 if (tctx->registered_rings[i]) {
205 fput(tctx->registered_rings[i]);
206 tctx->registered_rings[i] = NULL;
211 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
215 for (offset = start; offset < end; offset++) {
216 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
217 if (tctx->registered_rings[offset])
220 tctx->registered_rings[offset] = file;
226 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
235 } else if (!io_is_uring_fops(file)) {
239 offset = io_ring_add_registered_file(tctx, file, start, end);
246 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
247 * invocation. User passes in an array of struct io_uring_rsrc_update
248 * with ->data set to the ring_fd, and ->offset given for the desired
249 * index. If no index is desired, application may set ->offset == -1U
250 * and we'll find an available index. Returns number of entries
251 * successfully processed, or < 0 on error if none were processed.
253 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
256 struct io_uring_rsrc_update __user *arg = __arg;
257 struct io_uring_rsrc_update reg;
258 struct io_uring_task *tctx;
261 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
264 mutex_unlock(&ctx->uring_lock);
265 ret = __io_uring_add_tctx_node(ctx);
266 mutex_lock(&ctx->uring_lock);
270 tctx = current->io_uring;
271 for (i = 0; i < nr_args; i++) {
274 if (copy_from_user(®, &arg[i], sizeof(reg))) {
284 if (reg.offset == -1U) {
286 end = IO_RINGFD_REG_MAX;
288 if (reg.offset >= IO_RINGFD_REG_MAX) {
296 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
301 if (copy_to_user(&arg[i], ®, sizeof(reg))) {
302 fput(tctx->registered_rings[reg.offset]);
303 tctx->registered_rings[reg.offset] = NULL;
312 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
315 struct io_uring_rsrc_update __user *arg = __arg;
316 struct io_uring_task *tctx = current->io_uring;
317 struct io_uring_rsrc_update reg;
320 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
325 for (i = 0; i < nr_args; i++) {
326 if (copy_from_user(®, &arg[i], sizeof(reg))) {
330 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
335 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
336 if (tctx->registered_rings[reg.offset]) {
337 fput(tctx->registered_rings[reg.offset]);
338 tctx->registered_rings[reg.offset] = NULL;