nvme: allow 64-bit results in passthru commands
authorMarta Rybczynska <mrybczyn@kalray.eu>
Tue, 24 Sep 2019 13:14:52 +0000 (15:14 +0200)
committerSagi Grimberg <sagi@grimberg.me>
Wed, 25 Sep 2019 19:53:27 +0000 (12:53 -0700)
It is not possible to get 64-bit results from the passthru commands,
what prevents from getting for the Capabilities (CAP) property value.

As a result, it is not possible to implement IOL's NVMe Conformance
test 4.3 Case 1 for Fabrics targets [1] (page 123).

This issue has been already discussed [2], but without a solution.

This patch solves the problem by adding new ioctls with a new
passthru structure, including 64-bit results. The older ioctls stay
unchanged.

[1] https://www.iol.unh.edu/sites/default/files/testsuites/nvme/UNH-IOL_NVMe_Conformance_Test_Suite_v11.0.pdf
[2] http://lists.infradead.org/pipermail/linux-nvme/2018-June/018791.html

Signed-off-by: Marta Rybczynska <marta.rybczynska@kalray.eu>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
drivers/nvme/host/core.c
include/uapi/linux/nvme_ioctl.h

index adff57ea149edee696ebb13027e7064c6729264b..87ed437a46b84ac4c47ee9adbc26a945c797b711 100644 (file)
@@ -850,7 +850,7 @@ out:
 static int nvme_submit_user_cmd(struct request_queue *q,
                struct nvme_command *cmd, void __user *ubuffer,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-               u32 meta_seed, u32 *result, unsigned timeout)
+               u32 meta_seed, u64 *result, unsigned timeout)
 {
        bool write = nvme_is_write(cmd);
        struct nvme_ns *ns = q->queuedata;
@@ -891,7 +891,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        else
                ret = nvme_req(req)->status;
        if (result)
-               *result = le32_to_cpu(nvme_req(req)->result.u32);
+               *result = le64_to_cpu(nvme_req(req)->result.u64);
        if (meta && !ret && !write) {
                if (copy_to_user(meta_buffer, meta, meta_len))
                        ret = -EFAULT;
@@ -1338,6 +1338,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        struct nvme_command c;
        unsigned timeout = 0;
        u32 effects;
+       u64 result;
+       int status;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+               return -EFAULT;
+       if (cmd.flags)
+               return -EINVAL;
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = cmd.opcode;
+       c.common.flags = cmd.flags;
+       c.common.nsid = cpu_to_le32(cmd.nsid);
+       c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+       c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+       c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+       c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+       c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+       c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+       c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+       c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+       if (cmd.timeout_ms)
+               timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+       effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
+       status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+                       (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+                       (void __user *)(uintptr_t)cmd.metadata,
+                       cmd.metadata_len, 0, &result, timeout);
+       nvme_passthru_end(ctrl, effects);
+
+       if (status >= 0) {
+               if (put_user(result, &ucmd->result))
+                       return -EFAULT;
+       }
+
+       return status;
+}
+
+static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+                       struct nvme_passthru_cmd64 __user *ucmd)
+{
+       struct nvme_passthru_cmd64 cmd;
+       struct nvme_command c;
+       unsigned timeout = 0;
+       u32 effects;
        int status;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -1408,6 +1456,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
                srcu_read_unlock(&head->srcu, idx);
 }
 
+static bool is_ctrl_ioctl(unsigned int cmd)
+{
+       if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
+               return true;
+       if (is_sed_ioctl(cmd))
+               return true;
+       return false;
+}
+
+static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
+                                 void __user *argp,
+                                 struct nvme_ns_head *head,
+                                 int srcu_idx)
+{
+       struct nvme_ctrl *ctrl = ns->ctrl;
+       int ret;
+
+       nvme_get_ctrl(ns->ctrl);
+       nvme_put_ns_from_disk(head, srcu_idx);
+
+       switch (cmd) {
+       case NVME_IOCTL_ADMIN_CMD:
+               ret = nvme_user_cmd(ctrl, NULL, argp);
+               break;
+       case NVME_IOCTL_ADMIN64_CMD:
+               ret = nvme_user_cmd64(ctrl, NULL, argp);
+               break;
+       default:
+               ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
+               break;
+       }
+       nvme_put_ctrl(ctrl);
+       return ret;
+}
+
 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
                unsigned int cmd, unsigned long arg)
 {
@@ -1425,20 +1508,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
         * seperately and drop the ns SRCU reference early.  This avoids a
         * deadlock when deleting namespaces using the passthrough interface.
         */
-       if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
-               struct nvme_ctrl *ctrl = ns->ctrl;
-
-               nvme_get_ctrl(ns->ctrl);
-               nvme_put_ns_from_disk(head, srcu_idx);
-
-               if (cmd == NVME_IOCTL_ADMIN_CMD)
-                       ret = nvme_user_cmd(ctrl, NULL, argp);
-               else
-                       ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
-
-               nvme_put_ctrl(ctrl);
-               return ret;
-       }
+       if (is_ctrl_ioctl(cmd))
+               return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
 
        switch (cmd) {
        case NVME_IOCTL_ID:
@@ -1451,6 +1522,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
        case NVME_IOCTL_SUBMIT_IO:
                ret = nvme_submit_io(ns, argp);
                break;
+       case NVME_IOCTL_IO64_CMD:
+               ret = nvme_user_cmd64(ns->ctrl, ns, argp);
+               break;
        default:
                if (ns->ndev)
                        ret = nvme_nvm_ioctl(ns, cmd, arg);
@@ -2852,6 +2926,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
                return nvme_user_cmd(ctrl, NULL, argp);
+       case NVME_IOCTL_ADMIN64_CMD:
+               return nvme_user_cmd64(ctrl, NULL, argp);
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
index 1c215ea1798e6039f3c202faf80e8d98f82ed155..e168dc59e9a0d6c4b8b79242848c7db0a0b93337 100644 (file)
@@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
        __u32   result;
 };
 
+struct nvme_passthru_cmd64 {
+       __u8    opcode;
+       __u8    flags;
+       __u16   rsvd1;
+       __u32   nsid;
+       __u32   cdw2;
+       __u32   cdw3;
+       __u64   metadata;
+       __u64   addr;
+       __u32   metadata_len;
+       __u32   data_len;
+       __u32   cdw10;
+       __u32   cdw11;
+       __u32   cdw12;
+       __u32   cdw13;
+       __u32   cdw14;
+       __u32   cdw15;
+       __u32   timeout_ms;
+       __u64   result;
+};
+
 #define nvme_admin_cmd nvme_passthru_cmd
 
 #define NVME_IOCTL_ID          _IO('N', 0x40)
@@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
 #define NVME_IOCTL_RESET       _IO('N', 0x44)
 #define NVME_IOCTL_SUBSYS_RESET        _IO('N', 0x45)
 #define NVME_IOCTL_RESCAN      _IO('N', 0x46)
+#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
+#define NVME_IOCTL_IO64_CMD    _IOWR('N', 0x48, struct nvme_passthru_cmd64)
 
 #endif /* _UAPI_LINUX_NVME_IOCTL_H */