1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
9 #include <uapi/linux/io_uring.h>
13 #include "filetable.h"
18 struct file *src_file;
27 void io_msg_ring_cleanup(struct io_kiocb *req)
29 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
31 if (WARN_ON_ONCE(!msg->src_file))
38 static int io_msg_ring_data(struct io_kiocb *req)
40 struct io_ring_ctx *target_ctx = req->file->private_data;
41 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
43 if (msg->src_fd || msg->dst_fd || msg->flags)
46 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
52 static void io_double_unlock_ctx(struct io_ring_ctx *octx,
53 unsigned int issue_flags)
55 mutex_unlock(&octx->uring_lock);
58 static int io_double_lock_ctx(struct io_ring_ctx *octx,
59 unsigned int issue_flags)
62 * To ensure proper ordering between the two ctxs, we can only
63 * attempt a trylock on the target. If that fails and we already have
64 * the source ctx lock, punt to io-wq.
66 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
67 if (!mutex_trylock(&octx->uring_lock))
71 mutex_lock(&octx->uring_lock);
75 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
77 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
78 struct io_ring_ctx *ctx = req->ctx;
79 struct file *file = NULL;
80 unsigned long file_ptr;
81 int idx = msg->src_fd;
83 io_ring_submit_lock(ctx, issue_flags);
84 if (likely(idx < ctx->nr_user_files)) {
85 idx = array_index_nospec(idx, ctx->nr_user_files);
86 file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
87 file = (struct file *) (file_ptr & FFS_MASK);
91 io_ring_submit_unlock(ctx, issue_flags);
95 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
97 struct io_ring_ctx *target_ctx = req->file->private_data;
98 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
99 struct io_ring_ctx *ctx = req->ctx;
100 struct file *src_file = msg->src_file;
103 if (target_ctx == ctx)
106 src_file = io_msg_grab_file(req, issue_flags);
109 msg->src_file = src_file;
110 req->flags |= REQ_F_NEED_CLEANUP;
113 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
116 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
119 msg->src_file = NULL;
120 req->flags &= ~REQ_F_NEED_CLEANUP;
122 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
126 * If this fails, the target still received the file descriptor but
127 * wasn't notified of the fact. This means that if this request
128 * completes with -EOVERFLOW, then the sender must ensure that a
129 * later IORING_OP_MSG_RING delivers the message.
131 if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
134 io_double_unlock_ctx(target_ctx, issue_flags);
138 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
140 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
142 if (unlikely(sqe->buf_index || sqe->personality))
145 msg->src_file = NULL;
146 msg->user_data = READ_ONCE(sqe->off);
147 msg->len = READ_ONCE(sqe->len);
148 msg->cmd = READ_ONCE(sqe->addr);
149 msg->src_fd = READ_ONCE(sqe->addr3);
150 msg->dst_fd = READ_ONCE(sqe->file_index);
151 msg->flags = READ_ONCE(sqe->msg_ring_flags);
152 if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
158 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
160 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
164 if (!io_is_uring_fops(req->file))
168 case IORING_MSG_DATA:
169 ret = io_msg_ring_data(req);
171 case IORING_MSG_SEND_FD:
172 ret = io_msg_send_fd(req, issue_flags);
184 io_req_set_res(req, ret, 0);