1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
6 #include <linux/ptrace.h> /* for force_successful_syscall_return */
7 #include <linux/nvme_ioctl.h>
8 #include <linux/io_uring.h>
12 NVME_IOCTL_VEC = (1 << 0),
13 NVME_IOCTL_PARTITION = (1 << 1),
16 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
17 unsigned int flags, bool open_for_write)
21 if (capable(CAP_SYS_ADMIN))
25 * Do not allow unprivileged passthrough on partitions, as that allows an
26 * escape from the containment of the partition.
28 if (flags & NVME_IOCTL_PARTITION)
32 * Do not allow unprivileged processes to send vendor specific or fabrics
33 * commands as we can't be sure about their effects.
35 if (c->common.opcode >= nvme_cmd_vendor_start ||
36 c->common.opcode == nvme_fabrics_command)
40 * Do not allow unprivileged passthrough of admin commands except
41 * for a subset of identify commands that contain information required
42 * to form proper I/O commands in userspace and do not expose any
43 * potentially sensitive information.
46 if (c->common.opcode == nvme_admin_identify) {
47 switch (c->identify.cns) {
49 case NVME_ID_CNS_CS_NS:
50 case NVME_ID_CNS_NS_CS_INDEP:
51 case NVME_ID_CNS_CS_CTRL:
52 case NVME_ID_CNS_CTRL:
60 * Check if the controller provides a Commands Supported and Effects log
61 * and marks this command as supported. If not reject unprivileged
64 effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
65 if (!(effects & NVME_CMD_EFFECTS_CSUPP))
69 * Don't allow passthrough for command that have intrusive (or unknown)
72 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
73 NVME_CMD_EFFECTS_UUID_SEL |
74 NVME_CMD_EFFECTS_SCOPE_MASK))
78 * Only allow I/O commands that transfer data to the controller or that
79 * change the logical block contents if the file descriptor is open for
82 if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
83 return open_for_write;
88 * Convert integer values from ioctl structures to user pointers, silently
89 * ignoring the upper bits in the compat case to match behaviour of 32-bit
92 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
94 if (in_compat_syscall())
95 ptrval = (compat_uptr_t)ptrval;
96 return (void __user *)ptrval;
99 static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
100 unsigned len, u32 seed)
102 struct bio_integrity_payload *bip;
105 struct bio *bio = req->bio;
107 buf = kmalloc(len, GFP_KERNEL);
112 if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
115 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
121 bip->bip_iter.bi_size = len;
122 bip->bip_iter.bi_sector = seed;
123 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
124 offset_in_page(buf));
130 req->cmd_flags |= REQ_INTEGRITY;
138 static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
139 void *meta, unsigned len, int ret)
141 if (!ret && req_op(req) == REQ_OP_DRV_IN &&
142 copy_to_user(ubuf, meta, len))
148 static struct request *nvme_alloc_user_request(struct request_queue *q,
149 struct nvme_command *cmd, blk_opf_t rq_flags,
150 blk_mq_req_flags_t blk_flags)
154 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
157 nvme_init_request(req, cmd);
158 nvme_req(req)->flags |= NVME_REQ_USERCMD;
162 static int nvme_map_user_request(struct request *req, u64 ubuffer,
163 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
164 u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
167 struct request_queue *q = req->q;
168 struct nvme_ns *ns = q->queuedata;
169 struct block_device *bdev = ns ? ns->disk->part0 : NULL;
170 struct bio *bio = NULL;
174 if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
175 struct iov_iter iter;
177 /* fixedbufs is only for non-vectored io */
178 if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
180 ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
181 rq_data_dir(req), &iter, ioucmd);
184 ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
186 ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
187 bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
188 0, rq_data_dir(req));
195 bio_set_dev(bio, bdev);
197 if (bdev && meta_buffer && meta_len) {
198 meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
211 blk_rq_unmap_user(bio);
213 blk_mq_free_request(req);
217 static int nvme_submit_user_cmd(struct request_queue *q,
218 struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
219 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
220 u64 *result, unsigned timeout, unsigned int flags)
222 struct nvme_ns *ns = q->queuedata;
223 struct nvme_ctrl *ctrl;
230 req = nvme_alloc_user_request(q, cmd, 0, 0);
234 req->timeout = timeout;
235 if (ubuffer && bufflen) {
236 ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
237 meta_len, meta_seed, &meta, NULL, flags);
243 ctrl = nvme_req(req)->ctrl;
245 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
246 ret = nvme_execute_rq(req, false);
248 *result = le64_to_cpu(nvme_req(req)->result.u64);
250 ret = nvme_finish_user_metadata(req, meta_buffer, meta,
253 blk_rq_unmap_user(bio);
254 blk_mq_free_request(req);
257 nvme_passthru_end(ctrl, ns, effects, cmd, ret);
262 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
264 struct nvme_user_io io;
265 struct nvme_command c;
266 unsigned length, meta_len;
267 void __user *metadata;
269 if (copy_from_user(&io, uio, sizeof(io)))
277 case nvme_cmd_compare:
283 length = (io.nblocks + 1) << ns->lba_shift;
285 if ((io.control & NVME_RW_PRINFO_PRACT) &&
286 ns->ms == sizeof(struct t10_pi_tuple)) {
288 * Protection information is stripped/inserted by the
291 if (nvme_to_user_ptr(io.metadata))
296 meta_len = (io.nblocks + 1) * ns->ms;
297 metadata = nvme_to_user_ptr(io.metadata);
300 if (ns->features & NVME_NS_EXT_LBAS) {
303 } else if (meta_len) {
304 if ((io.metadata & 3) || !io.metadata)
308 memset(&c, 0, sizeof(c));
309 c.rw.opcode = io.opcode;
310 c.rw.flags = io.flags;
311 c.rw.nsid = cpu_to_le32(ns->head->ns_id);
312 c.rw.slba = cpu_to_le64(io.slba);
313 c.rw.length = cpu_to_le16(io.nblocks);
314 c.rw.control = cpu_to_le16(io.control);
315 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
316 c.rw.reftag = cpu_to_le32(io.reftag);
317 c.rw.apptag = cpu_to_le16(io.apptag);
318 c.rw.appmask = cpu_to_le16(io.appmask);
320 return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
321 meta_len, lower_32_bits(io.slba), NULL, 0, 0);
324 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
325 struct nvme_ns *ns, __u32 nsid)
327 if (ns && nsid != ns->head->ns_id) {
328 dev_err(ctrl->device,
329 "%s: nsid (%u) in cmd does not match nsid (%u)"
331 current->comm, nsid, ns->head->ns_id);
338 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
339 struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
342 struct nvme_passthru_cmd cmd;
343 struct nvme_command c;
344 unsigned timeout = 0;
348 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
352 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
355 memset(&c, 0, sizeof(c));
356 c.common.opcode = cmd.opcode;
357 c.common.flags = cmd.flags;
358 c.common.nsid = cpu_to_le32(cmd.nsid);
359 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
360 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
361 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
362 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
363 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
364 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
365 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
366 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
368 if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
372 timeout = msecs_to_jiffies(cmd.timeout_ms);
374 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
375 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
376 cmd.metadata_len, 0, &result, timeout, 0);
379 if (put_user(result, &ucmd->result))
386 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
387 struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
390 struct nvme_passthru_cmd64 cmd;
391 struct nvme_command c;
392 unsigned timeout = 0;
395 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
399 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
402 memset(&c, 0, sizeof(c));
403 c.common.opcode = cmd.opcode;
404 c.common.flags = cmd.flags;
405 c.common.nsid = cpu_to_le32(cmd.nsid);
406 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
407 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
408 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
409 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
410 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
411 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
412 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
413 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
415 if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
419 timeout = msecs_to_jiffies(cmd.timeout_ms);
421 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
422 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
423 cmd.metadata_len, 0, &cmd.result, timeout, flags);
426 if (put_user(cmd.result, &ucmd->result))
433 struct nvme_uring_data {
442 * This overlays struct io_uring_cmd pdu.
443 * Expect build errors if this grows larger than that.
445 struct nvme_uring_cmd_pdu {
454 void *meta; /* kernel-resident buffer */
455 void __user *meta_buffer;
461 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
462 struct io_uring_cmd *ioucmd)
464 return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
467 static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
468 unsigned issue_flags)
470 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
471 struct request *req = pdu->req;
475 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
478 status = nvme_req(req)->status;
480 result = le64_to_cpu(nvme_req(req)->result.u64);
483 status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
484 pdu->u.meta, pdu->meta_len, status);
486 blk_rq_unmap_user(req->bio);
487 blk_mq_free_request(req);
489 io_uring_cmd_done(ioucmd, status, result, issue_flags);
492 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
493 unsigned issue_flags)
495 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
498 blk_rq_unmap_user(pdu->bio);
500 io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
503 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
506 struct io_uring_cmd *ioucmd = req->end_io_data;
507 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
510 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
511 pdu->nvme_status = -EINTR;
513 pdu->nvme_status = nvme_req(req)->status;
514 pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
517 * For iopoll, complete it directly.
518 * Otherwise, move the completion to task work.
520 if (blk_rq_is_poll(req)) {
521 WRITE_ONCE(ioucmd->cookie, NULL);
522 nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
524 io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
527 return RQ_END_IO_FREE;
530 static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
533 struct io_uring_cmd *ioucmd = req->end_io_data;
534 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
540 * For iopoll, complete it directly.
541 * Otherwise, move the completion to task work.
543 if (blk_rq_is_poll(req)) {
544 WRITE_ONCE(ioucmd->cookie, NULL);
545 nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
547 io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
550 return RQ_END_IO_NONE;
553 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
554 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
556 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
557 const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
558 struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
559 struct nvme_uring_data d;
560 struct nvme_command c;
562 blk_opf_t rq_flags = REQ_ALLOC_CACHE;
563 blk_mq_req_flags_t blk_flags = 0;
567 c.common.opcode = READ_ONCE(cmd->opcode);
568 c.common.flags = READ_ONCE(cmd->flags);
572 c.common.command_id = 0;
573 c.common.nsid = cpu_to_le32(cmd->nsid);
574 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
577 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
578 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
579 c.common.metadata = 0;
580 c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
581 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
582 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
583 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
584 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
585 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
586 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
588 if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
591 d.metadata = READ_ONCE(cmd->metadata);
592 d.addr = READ_ONCE(cmd->addr);
593 d.data_len = READ_ONCE(cmd->data_len);
594 d.metadata_len = READ_ONCE(cmd->metadata_len);
595 d.timeout_ms = READ_ONCE(cmd->timeout_ms);
597 if (issue_flags & IO_URING_F_NONBLOCK) {
598 rq_flags |= REQ_NOWAIT;
599 blk_flags = BLK_MQ_REQ_NOWAIT;
601 if (issue_flags & IO_URING_F_IOPOLL)
602 rq_flags |= REQ_POLLED;
604 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
607 req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
609 if (d.addr && d.data_len) {
610 ret = nvme_map_user_request(req, d.addr,
611 d.data_len, nvme_to_user_ptr(d.metadata),
612 d.metadata_len, 0, &meta, ioucmd, vec);
617 if (blk_rq_is_poll(req)) {
618 ioucmd->flags |= IORING_URING_CMD_POLLED;
619 WRITE_ONCE(ioucmd->cookie, req);
622 /* to free bio on completion, as req->bio will be null at that time */
624 pdu->meta_len = d.metadata_len;
625 req->end_io_data = ioucmd;
628 pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
629 req->end_io = nvme_uring_cmd_end_io_meta;
631 req->end_io = nvme_uring_cmd_end_io;
633 blk_execute_rq_nowait(req, false);
637 static bool is_ctrl_ioctl(unsigned int cmd)
639 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
641 if (is_sed_ioctl(cmd))
646 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
647 void __user *argp, bool open_for_write)
650 case NVME_IOCTL_ADMIN_CMD:
651 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
652 case NVME_IOCTL_ADMIN64_CMD:
653 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
655 return sed_ioctl(ctrl->opal_dev, cmd, argp);
659 #ifdef COMPAT_FOR_U64_ALIGNMENT
660 struct nvme_user_io32 {
673 } __attribute__((__packed__));
674 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
675 #endif /* COMPAT_FOR_U64_ALIGNMENT */
677 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
678 void __user *argp, unsigned int flags, bool open_for_write)
682 force_successful_syscall_return();
683 return ns->head->ns_id;
684 case NVME_IOCTL_IO_CMD:
685 return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
687 * struct nvme_user_io can have different padding on some 32-bit ABIs.
688 * Just accept the compat version as all fields that are used are the
689 * same size and at the same offset.
691 #ifdef COMPAT_FOR_U64_ALIGNMENT
692 case NVME_IOCTL_SUBMIT_IO32:
694 case NVME_IOCTL_SUBMIT_IO:
695 return nvme_submit_io(ns, argp);
696 case NVME_IOCTL_IO64_CMD_VEC:
697 flags |= NVME_IOCTL_VEC;
699 case NVME_IOCTL_IO64_CMD:
700 return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
707 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
708 unsigned int cmd, unsigned long arg)
710 struct nvme_ns *ns = bdev->bd_disk->private_data;
711 bool open_for_write = mode & BLK_OPEN_WRITE;
712 void __user *argp = (void __user *)arg;
713 unsigned int flags = 0;
715 if (bdev_is_partition(bdev))
716 flags |= NVME_IOCTL_PARTITION;
718 if (is_ctrl_ioctl(cmd))
719 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
720 return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
723 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
726 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
727 bool open_for_write = file->f_mode & FMODE_WRITE;
728 void __user *argp = (void __user *)arg;
730 if (is_ctrl_ioctl(cmd))
731 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
732 return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
735 static int nvme_uring_cmd_checks(unsigned int issue_flags)
738 /* NVMe passthrough requires big SQE/CQE support */
739 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
740 (IO_URING_F_SQE128|IO_URING_F_CQE32))
745 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
746 unsigned int issue_flags)
748 struct nvme_ctrl *ctrl = ns->ctrl;
751 BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
753 ret = nvme_uring_cmd_checks(issue_flags);
757 switch (ioucmd->cmd_op) {
758 case NVME_URING_CMD_IO:
759 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
761 case NVME_URING_CMD_IO_VEC:
762 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
771 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
773 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
774 struct nvme_ns, cdev);
776 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
779 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
780 struct io_comp_batch *iob,
781 unsigned int poll_flags)
786 if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
789 req = READ_ONCE(ioucmd->cookie);
790 if (req && blk_rq_is_poll(req))
791 ret = blk_rq_poll(req, iob, poll_flags);
794 #ifdef CONFIG_NVME_MULTIPATH
795 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
796 void __user *argp, struct nvme_ns_head *head, int srcu_idx,
798 __releases(&head->srcu)
800 struct nvme_ctrl *ctrl = ns->ctrl;
803 nvme_get_ctrl(ns->ctrl);
804 srcu_read_unlock(&head->srcu, srcu_idx);
805 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
811 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
812 unsigned int cmd, unsigned long arg)
814 struct nvme_ns_head *head = bdev->bd_disk->private_data;
815 bool open_for_write = mode & BLK_OPEN_WRITE;
816 void __user *argp = (void __user *)arg;
818 int srcu_idx, ret = -EWOULDBLOCK;
819 unsigned int flags = 0;
821 if (bdev_is_partition(bdev))
822 flags |= NVME_IOCTL_PARTITION;
824 srcu_idx = srcu_read_lock(&head->srcu);
825 ns = nvme_find_path(head);
830 * Handle ioctls that apply to the controller instead of the namespace
831 * seperately and drop the ns SRCU reference early. This avoids a
832 * deadlock when deleting namespaces using the passthrough interface.
834 if (is_ctrl_ioctl(cmd))
835 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
838 ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
840 srcu_read_unlock(&head->srcu, srcu_idx);
844 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
847 bool open_for_write = file->f_mode & FMODE_WRITE;
848 struct cdev *cdev = file_inode(file)->i_cdev;
849 struct nvme_ns_head *head =
850 container_of(cdev, struct nvme_ns_head, cdev);
851 void __user *argp = (void __user *)arg;
853 int srcu_idx, ret = -EWOULDBLOCK;
855 srcu_idx = srcu_read_lock(&head->srcu);
856 ns = nvme_find_path(head);
860 if (is_ctrl_ioctl(cmd))
861 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
864 ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
866 srcu_read_unlock(&head->srcu, srcu_idx);
870 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
871 unsigned int issue_flags)
873 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
874 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
875 int srcu_idx = srcu_read_lock(&head->srcu);
876 struct nvme_ns *ns = nvme_find_path(head);
880 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
881 srcu_read_unlock(&head->srcu, srcu_idx);
884 #endif /* CONFIG_NVME_MULTIPATH */
886 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
888 struct nvme_ctrl *ctrl = ioucmd->file->private_data;
891 /* IOPOLL not supported yet */
892 if (issue_flags & IO_URING_F_IOPOLL)
895 ret = nvme_uring_cmd_checks(issue_flags);
899 switch (ioucmd->cmd_op) {
900 case NVME_URING_CMD_ADMIN:
901 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
903 case NVME_URING_CMD_ADMIN_VEC:
904 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
913 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
919 down_read(&ctrl->namespaces_rwsem);
920 if (list_empty(&ctrl->namespaces)) {
925 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
926 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
927 dev_warn(ctrl->device,
928 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
933 dev_warn(ctrl->device,
934 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
936 up_read(&ctrl->namespaces_rwsem);
938 ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
943 up_read(&ctrl->namespaces_rwsem);
947 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
950 bool open_for_write = file->f_mode & FMODE_WRITE;
951 struct nvme_ctrl *ctrl = file->private_data;
952 void __user *argp = (void __user *)arg;
955 case NVME_IOCTL_ADMIN_CMD:
956 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
957 case NVME_IOCTL_ADMIN64_CMD:
958 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
959 case NVME_IOCTL_IO_CMD:
960 return nvme_dev_user_cmd(ctrl, argp, open_for_write);
961 case NVME_IOCTL_RESET:
962 if (!capable(CAP_SYS_ADMIN))
964 dev_warn(ctrl->device, "resetting controller\n");
965 return nvme_reset_ctrl_sync(ctrl);
966 case NVME_IOCTL_SUBSYS_RESET:
967 if (!capable(CAP_SYS_ADMIN))
969 return nvme_reset_subsystem(ctrl);
970 case NVME_IOCTL_RESCAN:
971 if (!capable(CAP_SYS_ADMIN))
973 nvme_queue_scan(ctrl);