nvme: wire up fixed buffer support for nvme passthrough
authorKanchan Joshi <joshi.k@samsung.com>
Fri, 30 Sep 2022 06:27:49 +0000 (11:57 +0530)
committerJens Axboe <axboe@kernel.dk>
Fri, 30 Sep 2022 13:51:13 +0000 (07:51 -0600)
if io_uring sends passthrough command with IORING_URING_CMD_FIXED flag,
use the pre-registered buffer for IO (non-vectored variant). Pass the
buffer/length to io_uring and get the bvec iterator for the range. Next,
pass this bvec to block-layer and obtain a bio/request for subsequent
processing.

Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
Link: https://lore.kernel.org/r/20220930062749.152261-13-anuj20.g@samsung.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/ioctl.c

index 7a41caa..81f5550 100644 (file)
@@ -95,8 +95,22 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
        void *meta = NULL;
        int ret;
 
-       ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer), bufflen,
-                       GFP_KERNEL, vec, 0, 0, rq_data_dir(req));
+       if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+               struct iov_iter iter;
+
+               /* fixedbufs is only for non-vectored io */
+               if (WARN_ON_ONCE(vec))
+                       return -EINVAL;
+               ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+                               rq_data_dir(req), &iter, ioucmd);
+               if (ret < 0)
+                       goto out;
+               ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+       } else {
+               ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
+                               bufflen, GFP_KERNEL, vec, 0, 0,
+                               rq_data_dir(req));
+       }
 
        if (ret)
                goto out;