net: stmmac: use GFP_DMA32
[platform/kernel/linux-starfive.git] / io_uring / uring_cmd.c
index e78b6f9..e50de0b 100644 (file)
@@ -4,10 +4,12 @@
 #include <linux/file.h>
 #include <linux/io_uring.h>
 #include <linux/security.h>
+#include <linux/nospec.h>
 
 #include <uapi/linux/io_uring.h>
 
 #include "io_uring.h"
+#include "rsrc.h"
 #include "uring_cmd.h"
 
 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
@@ -50,7 +52,11 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
        io_req_set_res(req, ret, 0);
        if (req->ctx->flags & IORING_SETUP_CQE32)
                io_req_set_cqe32_extra(req, res2, 0);
-       __io_req_complete(req, 0);
+       if (req->ctx->flags & IORING_SETUP_IOPOLL)
+               /* order with io_iopoll_req_issued() checking ->iopoll_complete */
+               smp_store_release(&req->iopoll_completed, 1);
+       else
+               __io_req_complete(req, 0);
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 
@@ -72,8 +78,24 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
 
-       if (sqe->rw_flags || sqe->__pad1)
+       if (sqe->__pad1)
                return -EINVAL;
+
+       ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
+       if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
+               return -EINVAL;
+
+       if (ioucmd->flags & IORING_URING_CMD_FIXED) {
+               struct io_ring_ctx *ctx = req->ctx;
+               u16 index;
+
+               req->buf_index = READ_ONCE(sqe->buf_index);
+               if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+                       return -EFAULT;
+               index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+               req->imu = ctx->user_bufs[index];
+               io_req_set_rsrc_node(req, ctx, 0);
+       }
        ioucmd->cmd = sqe->cmd;
        ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
        return 0;
@@ -97,8 +119,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
                issue_flags |= IO_URING_F_SQE128;
        if (ctx->flags & IORING_SETUP_CQE32)
                issue_flags |= IO_URING_F_CQE32;
-       if (ctx->flags & IORING_SETUP_IOPOLL)
+       if (ctx->flags & IORING_SETUP_IOPOLL) {
                issue_flags |= IO_URING_F_IOPOLL;
+               req->iopoll_completed = 0;
+               WRITE_ONCE(ioucmd->cookie, NULL);
+       }
 
        if (req_has_async_data(req))
                ioucmd->cmd = req->async_data;
@@ -122,3 +147,12 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
 
        return IOU_ISSUE_SKIP_COMPLETE;
 }
+
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+                             struct iov_iter *iter, void *ioucmd)
+{
+       struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+
+       return io_import_fixed(rw, iter, req->imu, ubuf, len);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);