return 0;
}
-static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
+static int io_uring_install_fd(struct file *file)
{
- int ret, fd;
+ int fd;
fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
if (fd < 0)
return fd;
-
- ret = __io_uring_add_tctx_node(ctx);
- if (ret) {
- put_unused_fd(fd);
- return ret;
- }
fd_install(fd, file);
return fd;
}
struct io_uring_params __user *params)
{
struct io_ring_ctx *ctx;
+ struct io_uring_task *tctx;
struct file *file;
int ret;
entries = IORING_MAX_ENTRIES;
}
+ if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
+ && !(p->flags & IORING_SETUP_NO_MMAP))
+ return -EINVAL;
+
/*
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
goto err;
}
+ ret = __io_uring_add_tctx_node(ctx);
+ if (ret)
+ goto err_fput;
+ tctx = current->io_uring;
+
/*
* Install ring fd as the very last thing, so we don't risk someone
* having closed it before we finish setup
*/
- ret = io_uring_install_fd(ctx, file);
- if (ret < 0) {
- /* fput will clean it up */
- fput(file);
- return ret;
- }
+ if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
+ ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
+ else
+ ret = io_uring_install_fd(file);
+ if (ret < 0)
+ goto err_fput;
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
return ret;
+err_fput:
+ fput(file);
+ return ret;
}
/*
IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
- IORING_SETUP_NO_MMAP))
+ IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY))
return -EINVAL;
return io_uring_create(entries, &p, params);
int io_uring_alloc_task_context(struct task_struct *task,
struct io_ring_ctx *ctx);
+int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
+ int start, int end);
+
int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
}
}
-static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
+int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
int start, int end)
{
- struct file *file;
int offset;
-
for (offset = start; offset < end; offset++) {
offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
if (tctx->registered_rings[offset])
continue;
- file = fget(fd);
- if (!file) {
- return -EBADF;
- } else if (!io_is_uring_fops(file)) {
- fput(file);
- return -EOPNOTSUPP;
- }
tctx->registered_rings[offset] = file;
return offset;
}
-
return -EBUSY;
}
+static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
+ int start, int end)
+{
+ struct file *file;
+ int offset;
+
+ file = fget(fd);
+ if (!file) {
+ return -EBADF;
+ } else if (!io_is_uring_fops(file)) {
+ fput(file);
+ return -EOPNOTSUPP;
+ }
+ offset = io_ring_add_registered_file(tctx, file, start, end);
+ if (offset < 0)
+ fput(file);
+ return offset;
+}
+
/*
* Register a ring fd to avoid fdget/fdput for each io_uring_enter()
* invocation. User passes in an array of struct io_uring_rsrc_update