1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
9 #include <uapi/linux/io_uring.h>
13 #include "uring_cmd.h"
15 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
17 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
18 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
20 ioucmd->task_work_cb(ioucmd, issue_flags);
23 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
24 void (*task_work_cb)(struct io_uring_cmd *, unsigned))
26 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
28 ioucmd->task_work_cb = task_work_cb;
29 req->io_task_work.func = io_uring_cmd_work;
30 io_req_task_work_add(req);
32 EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
34 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
35 u64 extra1, u64 extra2)
39 req->flags |= REQ_F_CQE32_INIT;
43 * Called by consumers of io_uring_cmd, if they originally returned
44 * -EIOCBQUEUED upon receiving the command.
46 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
49 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
54 io_req_set_res(req, ret, 0);
55 if (req->ctx->flags & IORING_SETUP_CQE32)
56 io_req_set_cqe32_extra(req, res2, 0);
57 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
58 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
59 smp_store_release(&req->iopoll_completed, 1);
61 struct io_tw_state ts = {
62 .locked = !(issue_flags & IO_URING_F_UNLOCKED),
64 io_req_task_complete(req, &ts);
67 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
69 int io_uring_cmd_prep_async(struct io_kiocb *req)
71 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
74 BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
75 BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
77 cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
79 memcpy(req->async_data, ioucmd->cmd, cmd_size);
80 ioucmd->cmd = req->async_data;
84 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
86 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
91 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
92 if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
95 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
96 struct io_ring_ctx *ctx = req->ctx;
99 req->buf_index = READ_ONCE(sqe->buf_index);
100 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
102 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
103 req->imu = ctx->user_bufs[index];
104 io_req_set_rsrc_node(req, ctx, 0);
106 ioucmd->cmd = sqe->cmd;
107 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
111 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
113 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
114 struct io_ring_ctx *ctx = req->ctx;
115 struct file *file = req->file;
118 if (!file->f_op->uring_cmd)
121 ret = security_uring_cmd(ioucmd);
125 if (ctx->flags & IORING_SETUP_SQE128)
126 issue_flags |= IO_URING_F_SQE128;
127 if (ctx->flags & IORING_SETUP_CQE32)
128 issue_flags |= IO_URING_F_CQE32;
129 if (ctx->flags & IORING_SETUP_IOPOLL) {
130 if (!file->f_op->uring_cmd_iopoll)
132 issue_flags |= IO_URING_F_IOPOLL;
133 req->iopoll_completed = 0;
134 WRITE_ONCE(ioucmd->cookie, NULL);
137 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
138 if (ret == -EAGAIN) {
139 if (!req_has_async_data(req)) {
140 if (io_alloc_async_data(req))
142 io_uring_cmd_prep_async(req);
147 if (ret != -EIOCBQUEUED) {
150 io_req_set_res(req, ret, 0);
154 return IOU_ISSUE_SKIP_COMPLETE;
157 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
158 struct iov_iter *iter, void *ioucmd)
160 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
162 return io_import_fixed(rw, iter, req->imu, ubuf, len);
164 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);