1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
9 #include <uapi/linux/io_uring.h>
13 #include "filetable.h"
18 struct file *src_file;
27 void io_msg_ring_cleanup(struct io_kiocb *req)
29 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
31 if (WARN_ON_ONCE(!msg->src_file))
38 static int io_msg_ring_data(struct io_kiocb *req)
40 struct io_ring_ctx *target_ctx = req->file->private_data;
41 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
43 if (msg->src_fd || msg->dst_fd || msg->flags)
45 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
48 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
54 static void io_double_unlock_ctx(struct io_ring_ctx *octx,
55 unsigned int issue_flags)
57 mutex_unlock(&octx->uring_lock);
60 static int io_double_lock_ctx(struct io_ring_ctx *octx,
61 unsigned int issue_flags)
64 * To ensure proper ordering between the two ctxs, we can only
65 * attempt a trylock on the target. If that fails and we already have
66 * the source ctx lock, punt to io-wq.
68 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
69 if (!mutex_trylock(&octx->uring_lock))
73 mutex_lock(&octx->uring_lock);
77 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
79 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
80 struct io_ring_ctx *ctx = req->ctx;
81 struct file *file = NULL;
82 unsigned long file_ptr;
83 int idx = msg->src_fd;
85 io_ring_submit_lock(ctx, issue_flags);
86 if (likely(idx < ctx->nr_user_files)) {
87 idx = array_index_nospec(idx, ctx->nr_user_files);
88 file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
89 file = (struct file *) (file_ptr & FFS_MASK);
93 io_ring_submit_unlock(ctx, issue_flags);
97 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
99 struct io_ring_ctx *target_ctx = req->file->private_data;
100 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
101 struct io_ring_ctx *ctx = req->ctx;
102 struct file *src_file = msg->src_file;
107 if (target_ctx == ctx)
109 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
112 src_file = io_msg_grab_file(req, issue_flags);
115 msg->src_file = src_file;
116 req->flags |= REQ_F_NEED_CLEANUP;
119 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
122 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
125 msg->src_file = NULL;
126 req->flags &= ~REQ_F_NEED_CLEANUP;
128 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
132 * If this fails, the target still received the file descriptor but
133 * wasn't notified of the fact. This means that if this request
134 * completes with -EOVERFLOW, then the sender must ensure that a
135 * later IORING_OP_MSG_RING delivers the message.
137 if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0, true))
140 io_double_unlock_ctx(target_ctx, issue_flags);
144 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
146 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
148 if (unlikely(sqe->buf_index || sqe->personality))
151 msg->src_file = NULL;
152 msg->user_data = READ_ONCE(sqe->off);
153 msg->len = READ_ONCE(sqe->len);
154 msg->cmd = READ_ONCE(sqe->addr);
155 msg->src_fd = READ_ONCE(sqe->addr3);
156 msg->dst_fd = READ_ONCE(sqe->file_index);
157 msg->flags = READ_ONCE(sqe->msg_ring_flags);
158 if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
164 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
166 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
170 if (!io_is_uring_fops(req->file))
174 case IORING_MSG_DATA:
175 ret = io_msg_ring_data(req);
177 case IORING_MSG_SEND_FD:
178 ret = io_msg_send_fd(req, issue_flags);
190 io_req_set_res(req, ret, 0);