1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
6 #include <linux/ptrace.h> /* for force_successful_syscall_return */
7 #include <linux/nvme_ioctl.h>
8 #include <linux/io_uring.h>
12 * Convert integer values from ioctl structures to user pointers, silently
13 * ignoring the upper bits in the compat case to match behaviour of 32-bit
16 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
18 if (in_compat_syscall())
19 ptrval = (compat_uptr_t)ptrval;
20 return (void __user *)ptrval;
23 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
24 unsigned len, u32 seed, bool write)
26 struct bio_integrity_payload *bip;
30 buf = kmalloc(len, GFP_KERNEL);
35 if (write && copy_from_user(buf, ubuf, len))
38 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
44 bip->bip_iter.bi_size = len;
45 bip->bip_iter.bi_sector = seed;
46 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
57 static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
58 void *meta, unsigned len, int ret)
60 if (!ret && req_op(req) == REQ_OP_DRV_IN &&
61 copy_to_user(ubuf, meta, len))
67 static struct request *nvme_alloc_user_request(struct request_queue *q,
68 struct nvme_command *cmd, void __user *ubuffer,
69 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
70 u32 meta_seed, void **metap, unsigned timeout, bool vec,
71 blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
73 bool write = nvme_is_write(cmd);
74 struct nvme_ns *ns = q->queuedata;
75 struct block_device *bdev = ns ? ns->disk->part0 : NULL;
77 struct bio *bio = NULL;
81 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
84 nvme_init_request(req, cmd);
87 req->timeout = timeout;
88 nvme_req(req)->flags |= NVME_REQ_USERCMD;
90 if (ubuffer && bufflen) {
92 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
95 struct iovec fast_iov[UIO_FASTIOV];
96 struct iovec *iov = fast_iov;
99 ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
100 UIO_FASTIOV, &iov, &iter);
103 ret = blk_rq_map_user_iov(q, req, NULL, &iter,
111 bio_set_dev(bio, bdev);
112 if (bdev && meta_buffer && meta_len) {
113 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
119 req->cmd_flags |= REQ_INTEGRITY;
128 blk_rq_unmap_user(bio);
130 blk_mq_free_request(req);
134 static int nvme_submit_user_cmd(struct request_queue *q,
135 struct nvme_command *cmd, void __user *ubuffer,
136 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
137 u32 meta_seed, u64 *result, unsigned timeout, bool vec)
144 req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
145 meta_len, meta_seed, &meta, timeout, vec, 0, 0);
151 ret = nvme_execute_passthru_rq(req);
154 *result = le64_to_cpu(nvme_req(req)->result.u64);
156 ret = nvme_finish_user_metadata(req, meta_buffer, meta,
159 blk_rq_unmap_user(bio);
160 blk_mq_free_request(req);
164 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
166 struct nvme_user_io io;
167 struct nvme_command c;
168 unsigned length, meta_len;
169 void __user *metadata;
171 if (copy_from_user(&io, uio, sizeof(io)))
179 case nvme_cmd_compare:
185 length = (io.nblocks + 1) << ns->lba_shift;
187 if ((io.control & NVME_RW_PRINFO_PRACT) &&
188 ns->ms == sizeof(struct t10_pi_tuple)) {
190 * Protection information is stripped/inserted by the
193 if (nvme_to_user_ptr(io.metadata))
198 meta_len = (io.nblocks + 1) * ns->ms;
199 metadata = nvme_to_user_ptr(io.metadata);
202 if (ns->features & NVME_NS_EXT_LBAS) {
205 } else if (meta_len) {
206 if ((io.metadata & 3) || !io.metadata)
210 memset(&c, 0, sizeof(c));
211 c.rw.opcode = io.opcode;
212 c.rw.flags = io.flags;
213 c.rw.nsid = cpu_to_le32(ns->head->ns_id);
214 c.rw.slba = cpu_to_le64(io.slba);
215 c.rw.length = cpu_to_le16(io.nblocks);
216 c.rw.control = cpu_to_le16(io.control);
217 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
218 c.rw.reftag = cpu_to_le32(io.reftag);
219 c.rw.apptag = cpu_to_le16(io.apptag);
220 c.rw.appmask = cpu_to_le16(io.appmask);
222 return nvme_submit_user_cmd(ns->queue, &c,
223 nvme_to_user_ptr(io.addr), length,
224 metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
228 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
229 struct nvme_ns *ns, __u32 nsid)
231 if (ns && nsid != ns->head->ns_id) {
232 dev_err(ctrl->device,
233 "%s: nsid (%u) in cmd does not match nsid (%u)"
235 current->comm, nsid, ns->head->ns_id);
242 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
243 struct nvme_passthru_cmd __user *ucmd)
245 struct nvme_passthru_cmd cmd;
246 struct nvme_command c;
247 unsigned timeout = 0;
251 if (!capable(CAP_SYS_ADMIN))
253 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
257 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
260 memset(&c, 0, sizeof(c));
261 c.common.opcode = cmd.opcode;
262 c.common.flags = cmd.flags;
263 c.common.nsid = cpu_to_le32(cmd.nsid);
264 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
265 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
266 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
267 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
268 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
269 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
270 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
271 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
274 timeout = msecs_to_jiffies(cmd.timeout_ms);
276 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
277 nvme_to_user_ptr(cmd.addr), cmd.data_len,
278 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
279 0, &result, timeout, false);
282 if (put_user(result, &ucmd->result))
289 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
290 struct nvme_passthru_cmd64 __user *ucmd, bool vec)
292 struct nvme_passthru_cmd64 cmd;
293 struct nvme_command c;
294 unsigned timeout = 0;
297 if (!capable(CAP_SYS_ADMIN))
299 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
303 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
306 memset(&c, 0, sizeof(c));
307 c.common.opcode = cmd.opcode;
308 c.common.flags = cmd.flags;
309 c.common.nsid = cpu_to_le32(cmd.nsid);
310 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
311 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
312 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
313 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
314 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
315 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
316 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
317 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
320 timeout = msecs_to_jiffies(cmd.timeout_ms);
322 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
323 nvme_to_user_ptr(cmd.addr), cmd.data_len,
324 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
325 0, &cmd.result, timeout, vec);
328 if (put_user(cmd.result, &ucmd->result))
335 struct nvme_uring_data {
344 * This overlays struct io_uring_cmd pdu.
345 * Expect build errors if this grows larger than that.
347 struct nvme_uring_cmd_pdu {
352 void *meta; /* kernel-resident buffer */
353 void __user *meta_buffer;
357 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
358 struct io_uring_cmd *ioucmd)
360 return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
363 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
365 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
366 struct request *req = pdu->req;
367 struct bio *bio = req->bio;
371 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
374 status = nvme_req(req)->status;
376 result = le64_to_cpu(nvme_req(req)->result.u64);
379 status = nvme_finish_user_metadata(req, pdu->meta_buffer,
380 pdu->meta, pdu->meta_len, status);
382 blk_rq_unmap_user(bio);
383 blk_mq_free_request(req);
385 io_uring_cmd_done(ioucmd, status, result);
388 static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
390 struct io_uring_cmd *ioucmd = req->end_io_data;
391 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
392 /* extract bio before reusing the same field for request */
393 struct bio *bio = pdu->bio;
397 /* this takes care of moving rest of completion-work to task context */
398 io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
401 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
402 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
404 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
405 const struct nvme_uring_cmd *cmd = ioucmd->cmd;
406 struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
407 struct nvme_uring_data d;
408 struct nvme_command c;
410 blk_opf_t rq_flags = 0;
411 blk_mq_req_flags_t blk_flags = 0;
414 if (!capable(CAP_SYS_ADMIN))
417 c.common.opcode = READ_ONCE(cmd->opcode);
418 c.common.flags = READ_ONCE(cmd->flags);
422 c.common.command_id = 0;
423 c.common.nsid = cpu_to_le32(cmd->nsid);
424 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
427 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
428 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
429 c.common.metadata = 0;
430 c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
431 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
432 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
433 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
434 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
435 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
436 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
438 d.metadata = READ_ONCE(cmd->metadata);
439 d.addr = READ_ONCE(cmd->addr);
440 d.data_len = READ_ONCE(cmd->data_len);
441 d.metadata_len = READ_ONCE(cmd->metadata_len);
442 d.timeout_ms = READ_ONCE(cmd->timeout_ms);
444 if (issue_flags & IO_URING_F_NONBLOCK) {
445 rq_flags = REQ_NOWAIT;
446 blk_flags = BLK_MQ_REQ_NOWAIT;
449 req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
450 d.data_len, nvme_to_user_ptr(d.metadata),
451 d.metadata_len, 0, &meta, d.timeout_ms ?
452 msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
456 req->end_io = nvme_uring_cmd_end_io;
457 req->end_io_data = ioucmd;
459 /* to free bio on completion, as req->bio will be null at that time */
462 pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
463 pdu->meta_len = d.metadata_len;
465 blk_execute_rq_nowait(req, false);
469 static bool is_ctrl_ioctl(unsigned int cmd)
471 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
473 if (is_sed_ioctl(cmd))
478 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
482 case NVME_IOCTL_ADMIN_CMD:
483 return nvme_user_cmd(ctrl, NULL, argp);
484 case NVME_IOCTL_ADMIN64_CMD:
485 return nvme_user_cmd64(ctrl, NULL, argp, false);
487 return sed_ioctl(ctrl->opal_dev, cmd, argp);
491 #ifdef COMPAT_FOR_U64_ALIGNMENT
492 struct nvme_user_io32 {
505 } __attribute__((__packed__));
506 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
507 #endif /* COMPAT_FOR_U64_ALIGNMENT */
509 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
514 force_successful_syscall_return();
515 return ns->head->ns_id;
516 case NVME_IOCTL_IO_CMD:
517 return nvme_user_cmd(ns->ctrl, ns, argp);
519 * struct nvme_user_io can have different padding on some 32-bit ABIs.
520 * Just accept the compat version as all fields that are used are the
521 * same size and at the same offset.
523 #ifdef COMPAT_FOR_U64_ALIGNMENT
524 case NVME_IOCTL_SUBMIT_IO32:
526 case NVME_IOCTL_SUBMIT_IO:
527 return nvme_submit_io(ns, argp);
528 case NVME_IOCTL_IO64_CMD:
529 return nvme_user_cmd64(ns->ctrl, ns, argp, false);
530 case NVME_IOCTL_IO64_CMD_VEC:
531 return nvme_user_cmd64(ns->ctrl, ns, argp, true);
537 static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
539 if (is_ctrl_ioctl(cmd))
540 return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
541 return nvme_ns_ioctl(ns, cmd, arg);
544 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
545 unsigned int cmd, unsigned long arg)
547 struct nvme_ns *ns = bdev->bd_disk->private_data;
549 return __nvme_ioctl(ns, cmd, (void __user *)arg);
552 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
555 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
557 return __nvme_ioctl(ns, cmd, (void __user *)arg);
560 static int nvme_uring_cmd_checks(unsigned int issue_flags)
562 /* IOPOLL not supported yet */
563 if (issue_flags & IO_URING_F_IOPOLL)
566 /* NVMe passthrough requires big SQE/CQE support */
567 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
568 (IO_URING_F_SQE128|IO_URING_F_CQE32))
573 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
574 unsigned int issue_flags)
576 struct nvme_ctrl *ctrl = ns->ctrl;
579 BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
581 ret = nvme_uring_cmd_checks(issue_flags);
585 switch (ioucmd->cmd_op) {
586 case NVME_URING_CMD_IO:
587 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
589 case NVME_URING_CMD_IO_VEC:
590 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
599 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
601 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
602 struct nvme_ns, cdev);
604 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
607 #ifdef CONFIG_NVME_MULTIPATH
608 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
609 void __user *argp, struct nvme_ns_head *head, int srcu_idx)
610 __releases(&head->srcu)
612 struct nvme_ctrl *ctrl = ns->ctrl;
615 nvme_get_ctrl(ns->ctrl);
616 srcu_read_unlock(&head->srcu, srcu_idx);
617 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
623 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
624 unsigned int cmd, unsigned long arg)
626 struct nvme_ns_head *head = bdev->bd_disk->private_data;
627 void __user *argp = (void __user *)arg;
629 int srcu_idx, ret = -EWOULDBLOCK;
631 srcu_idx = srcu_read_lock(&head->srcu);
632 ns = nvme_find_path(head);
637 * Handle ioctls that apply to the controller instead of the namespace
638 * seperately and drop the ns SRCU reference early. This avoids a
639 * deadlock when deleting namespaces using the passthrough interface.
641 if (is_ctrl_ioctl(cmd))
642 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
644 ret = nvme_ns_ioctl(ns, cmd, argp);
646 srcu_read_unlock(&head->srcu, srcu_idx);
650 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
653 struct cdev *cdev = file_inode(file)->i_cdev;
654 struct nvme_ns_head *head =
655 container_of(cdev, struct nvme_ns_head, cdev);
656 void __user *argp = (void __user *)arg;
658 int srcu_idx, ret = -EWOULDBLOCK;
660 srcu_idx = srcu_read_lock(&head->srcu);
661 ns = nvme_find_path(head);
665 if (is_ctrl_ioctl(cmd))
666 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
668 ret = nvme_ns_ioctl(ns, cmd, argp);
670 srcu_read_unlock(&head->srcu, srcu_idx);
674 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
675 unsigned int issue_flags)
677 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
678 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
679 int srcu_idx = srcu_read_lock(&head->srcu);
680 struct nvme_ns *ns = nvme_find_path(head);
684 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
685 srcu_read_unlock(&head->srcu, srcu_idx);
688 #endif /* CONFIG_NVME_MULTIPATH */
690 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
692 struct nvme_ctrl *ctrl = ioucmd->file->private_data;
695 ret = nvme_uring_cmd_checks(issue_flags);
699 switch (ioucmd->cmd_op) {
700 case NVME_URING_CMD_ADMIN:
701 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
703 case NVME_URING_CMD_ADMIN_VEC:
704 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
713 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
718 down_read(&ctrl->namespaces_rwsem);
719 if (list_empty(&ctrl->namespaces)) {
724 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
725 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
726 dev_warn(ctrl->device,
727 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
732 dev_warn(ctrl->device,
733 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
735 up_read(&ctrl->namespaces_rwsem);
737 ret = nvme_user_cmd(ctrl, ns, argp);
742 up_read(&ctrl->namespaces_rwsem);
746 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
749 struct nvme_ctrl *ctrl = file->private_data;
750 void __user *argp = (void __user *)arg;
753 case NVME_IOCTL_ADMIN_CMD:
754 return nvme_user_cmd(ctrl, NULL, argp);
755 case NVME_IOCTL_ADMIN64_CMD:
756 return nvme_user_cmd64(ctrl, NULL, argp, false);
757 case NVME_IOCTL_IO_CMD:
758 return nvme_dev_user_cmd(ctrl, argp);
759 case NVME_IOCTL_RESET:
760 dev_warn(ctrl->device, "resetting controller\n");
761 return nvme_reset_ctrl_sync(ctrl);
762 case NVME_IOCTL_SUBSYS_RESET:
763 return nvme_reset_subsystem(ctrl);
764 case NVME_IOCTL_RESCAN:
765 nvme_queue_scan(ctrl);