Merge tag 'drm-misc-next-fixes-2023-09-01' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / nvme / host / ioctl.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  * Copyright (c) 2017-2021 Christoph Hellwig.
5  */
6 #include <linux/ptrace.h>       /* for force_successful_syscall_return */
7 #include <linux/nvme_ioctl.h>
8 #include <linux/io_uring.h>
9 #include "nvme.h"
10
11 enum {
12         NVME_IOCTL_VEC          = (1 << 0),
13         NVME_IOCTL_PARTITION    = (1 << 1),
14 };
15
16 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
17                 unsigned int flags, bool open_for_write)
18 {
19         u32 effects;
20
21         if (capable(CAP_SYS_ADMIN))
22                 return true;
23
24         /*
25          * Do not allow unprivileged passthrough on partitions, as that allows an
26          * escape from the containment of the partition.
27          */
28         if (flags & NVME_IOCTL_PARTITION)
29                 return false;
30
31         /*
32          * Do not allow unprivileged processes to send vendor specific or fabrics
33          * commands as we can't be sure about their effects.
34          */
35         if (c->common.opcode >= nvme_cmd_vendor_start ||
36             c->common.opcode == nvme_fabrics_command)
37                 return false;
38
39         /*
40          * Do not allow unprivileged passthrough of admin commands except
41          * for a subset of identify commands that contain information required
42          * to form proper I/O commands in userspace and do not expose any
43          * potentially sensitive information.
44          */
45         if (!ns) {
46                 if (c->common.opcode == nvme_admin_identify) {
47                         switch (c->identify.cns) {
48                         case NVME_ID_CNS_NS:
49                         case NVME_ID_CNS_CS_NS:
50                         case NVME_ID_CNS_NS_CS_INDEP:
51                         case NVME_ID_CNS_CS_CTRL:
52                         case NVME_ID_CNS_CTRL:
53                                 return true;
54                         }
55                 }
56                 return false;
57         }
58
59         /*
60          * Check if the controller provides a Commands Supported and Effects log
61          * and marks this command as supported.  If not reject unprivileged
62          * passthrough.
63          */
64         effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
65         if (!(effects & NVME_CMD_EFFECTS_CSUPP))
66                 return false;
67
68         /*
69          * Don't allow passthrough for command that have intrusive (or unknown)
70          * effects.
71          */
72         if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
73                         NVME_CMD_EFFECTS_UUID_SEL |
74                         NVME_CMD_EFFECTS_SCOPE_MASK))
75                 return false;
76
77         /*
78          * Only allow I/O commands that transfer data to the controller or that
79          * change the logical block contents if the file descriptor is open for
80          * writing.
81          */
82         if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
83                 return open_for_write;
84         return true;
85 }
86
87 /*
88  * Convert integer values from ioctl structures to user pointers, silently
89  * ignoring the upper bits in the compat case to match behaviour of 32-bit
90  * kernels.
91  */
92 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
93 {
94         if (in_compat_syscall())
95                 ptrval = (compat_uptr_t)ptrval;
96         return (void __user *)ptrval;
97 }
98
99 static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
100                 unsigned len, u32 seed)
101 {
102         struct bio_integrity_payload *bip;
103         int ret = -ENOMEM;
104         void *buf;
105         struct bio *bio = req->bio;
106
107         buf = kmalloc(len, GFP_KERNEL);
108         if (!buf)
109                 goto out;
110
111         ret = -EFAULT;
112         if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
113                 goto out_free_meta;
114
115         bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
116         if (IS_ERR(bip)) {
117                 ret = PTR_ERR(bip);
118                 goto out_free_meta;
119         }
120
121         bip->bip_iter.bi_size = len;
122         bip->bip_iter.bi_sector = seed;
123         ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
124                         offset_in_page(buf));
125         if (ret != len) {
126                 ret = -ENOMEM;
127                 goto out_free_meta;
128         }
129
130         req->cmd_flags |= REQ_INTEGRITY;
131         return buf;
132 out_free_meta:
133         kfree(buf);
134 out:
135         return ERR_PTR(ret);
136 }
137
138 static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
139                 void *meta, unsigned len, int ret)
140 {
141         if (!ret && req_op(req) == REQ_OP_DRV_IN &&
142             copy_to_user(ubuf, meta, len))
143                 ret = -EFAULT;
144         kfree(meta);
145         return ret;
146 }
147
148 static struct request *nvme_alloc_user_request(struct request_queue *q,
149                 struct nvme_command *cmd, blk_opf_t rq_flags,
150                 blk_mq_req_flags_t blk_flags)
151 {
152         struct request *req;
153
154         req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
155         if (IS_ERR(req))
156                 return req;
157         nvme_init_request(req, cmd);
158         nvme_req(req)->flags |= NVME_REQ_USERCMD;
159         return req;
160 }
161
162 static int nvme_map_user_request(struct request *req, u64 ubuffer,
163                 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
164                 u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
165                 unsigned int flags)
166 {
167         struct request_queue *q = req->q;
168         struct nvme_ns *ns = q->queuedata;
169         struct block_device *bdev = ns ? ns->disk->part0 : NULL;
170         struct bio *bio = NULL;
171         void *meta = NULL;
172         int ret;
173
174         if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
175                 struct iov_iter iter;
176
177                 /* fixedbufs is only for non-vectored io */
178                 if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
179                         return -EINVAL;
180                 ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
181                                 rq_data_dir(req), &iter, ioucmd);
182                 if (ret < 0)
183                         goto out;
184                 ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
185         } else {
186                 ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
187                                 bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
188                                 0, rq_data_dir(req));
189         }
190
191         if (ret)
192                 goto out;
193         bio = req->bio;
194         if (bdev)
195                 bio_set_dev(bio, bdev);
196
197         if (bdev && meta_buffer && meta_len) {
198                 meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
199                                 meta_seed);
200                 if (IS_ERR(meta)) {
201                         ret = PTR_ERR(meta);
202                         goto out_unmap;
203                 }
204                 *metap = meta;
205         }
206
207         return ret;
208
209 out_unmap:
210         if (bio)
211                 blk_rq_unmap_user(bio);
212 out:
213         blk_mq_free_request(req);
214         return ret;
215 }
216
217 static int nvme_submit_user_cmd(struct request_queue *q,
218                 struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
219                 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
220                 u64 *result, unsigned timeout, unsigned int flags)
221 {
222         struct nvme_ns *ns = q->queuedata;
223         struct nvme_ctrl *ctrl;
224         struct request *req;
225         void *meta = NULL;
226         struct bio *bio;
227         u32 effects;
228         int ret;
229
230         req = nvme_alloc_user_request(q, cmd, 0, 0);
231         if (IS_ERR(req))
232                 return PTR_ERR(req);
233
234         req->timeout = timeout;
235         if (ubuffer && bufflen) {
236                 ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
237                                 meta_len, meta_seed, &meta, NULL, flags);
238                 if (ret)
239                         return ret;
240         }
241
242         bio = req->bio;
243         ctrl = nvme_req(req)->ctrl;
244
245         effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
246         ret = nvme_execute_rq(req, false);
247         if (result)
248                 *result = le64_to_cpu(nvme_req(req)->result.u64);
249         if (meta)
250                 ret = nvme_finish_user_metadata(req, meta_buffer, meta,
251                                                 meta_len, ret);
252         if (bio)
253                 blk_rq_unmap_user(bio);
254         blk_mq_free_request(req);
255
256         if (effects)
257                 nvme_passthru_end(ctrl, ns, effects, cmd, ret);
258
259         return ret;
260 }
261
262 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
263 {
264         struct nvme_user_io io;
265         struct nvme_command c;
266         unsigned length, meta_len;
267         void __user *metadata;
268
269         if (copy_from_user(&io, uio, sizeof(io)))
270                 return -EFAULT;
271         if (io.flags)
272                 return -EINVAL;
273
274         switch (io.opcode) {
275         case nvme_cmd_write:
276         case nvme_cmd_read:
277         case nvme_cmd_compare:
278                 break;
279         default:
280                 return -EINVAL;
281         }
282
283         length = (io.nblocks + 1) << ns->lba_shift;
284
285         if ((io.control & NVME_RW_PRINFO_PRACT) &&
286             ns->ms == sizeof(struct t10_pi_tuple)) {
287                 /*
288                  * Protection information is stripped/inserted by the
289                  * controller.
290                  */
291                 if (nvme_to_user_ptr(io.metadata))
292                         return -EINVAL;
293                 meta_len = 0;
294                 metadata = NULL;
295         } else {
296                 meta_len = (io.nblocks + 1) * ns->ms;
297                 metadata = nvme_to_user_ptr(io.metadata);
298         }
299
300         if (ns->features & NVME_NS_EXT_LBAS) {
301                 length += meta_len;
302                 meta_len = 0;
303         } else if (meta_len) {
304                 if ((io.metadata & 3) || !io.metadata)
305                         return -EINVAL;
306         }
307
308         memset(&c, 0, sizeof(c));
309         c.rw.opcode = io.opcode;
310         c.rw.flags = io.flags;
311         c.rw.nsid = cpu_to_le32(ns->head->ns_id);
312         c.rw.slba = cpu_to_le64(io.slba);
313         c.rw.length = cpu_to_le16(io.nblocks);
314         c.rw.control = cpu_to_le16(io.control);
315         c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
316         c.rw.reftag = cpu_to_le32(io.reftag);
317         c.rw.apptag = cpu_to_le16(io.apptag);
318         c.rw.appmask = cpu_to_le16(io.appmask);
319
320         return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
321                         meta_len, lower_32_bits(io.slba), NULL, 0, 0);
322 }
323
324 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
325                                         struct nvme_ns *ns, __u32 nsid)
326 {
327         if (ns && nsid != ns->head->ns_id) {
328                 dev_err(ctrl->device,
329                         "%s: nsid (%u) in cmd does not match nsid (%u)"
330                         "of namespace\n",
331                         current->comm, nsid, ns->head->ns_id);
332                 return false;
333         }
334
335         return true;
336 }
337
338 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
339                 struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
340                 bool open_for_write)
341 {
342         struct nvme_passthru_cmd cmd;
343         struct nvme_command c;
344         unsigned timeout = 0;
345         u64 result;
346         int status;
347
348         if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
349                 return -EFAULT;
350         if (cmd.flags)
351                 return -EINVAL;
352         if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
353                 return -EINVAL;
354
355         memset(&c, 0, sizeof(c));
356         c.common.opcode = cmd.opcode;
357         c.common.flags = cmd.flags;
358         c.common.nsid = cpu_to_le32(cmd.nsid);
359         c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
360         c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
361         c.common.cdw10 = cpu_to_le32(cmd.cdw10);
362         c.common.cdw11 = cpu_to_le32(cmd.cdw11);
363         c.common.cdw12 = cpu_to_le32(cmd.cdw12);
364         c.common.cdw13 = cpu_to_le32(cmd.cdw13);
365         c.common.cdw14 = cpu_to_le32(cmd.cdw14);
366         c.common.cdw15 = cpu_to_le32(cmd.cdw15);
367
368         if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
369                 return -EACCES;
370
371         if (cmd.timeout_ms)
372                 timeout = msecs_to_jiffies(cmd.timeout_ms);
373
374         status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
375                         cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
376                         cmd.metadata_len, 0, &result, timeout, 0);
377
378         if (status >= 0) {
379                 if (put_user(result, &ucmd->result))
380                         return -EFAULT;
381         }
382
383         return status;
384 }
385
386 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
387                 struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
388                 bool open_for_write)
389 {
390         struct nvme_passthru_cmd64 cmd;
391         struct nvme_command c;
392         unsigned timeout = 0;
393         int status;
394
395         if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
396                 return -EFAULT;
397         if (cmd.flags)
398                 return -EINVAL;
399         if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
400                 return -EINVAL;
401
402         memset(&c, 0, sizeof(c));
403         c.common.opcode = cmd.opcode;
404         c.common.flags = cmd.flags;
405         c.common.nsid = cpu_to_le32(cmd.nsid);
406         c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
407         c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
408         c.common.cdw10 = cpu_to_le32(cmd.cdw10);
409         c.common.cdw11 = cpu_to_le32(cmd.cdw11);
410         c.common.cdw12 = cpu_to_le32(cmd.cdw12);
411         c.common.cdw13 = cpu_to_le32(cmd.cdw13);
412         c.common.cdw14 = cpu_to_le32(cmd.cdw14);
413         c.common.cdw15 = cpu_to_le32(cmd.cdw15);
414
415         if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
416                 return -EACCES;
417
418         if (cmd.timeout_ms)
419                 timeout = msecs_to_jiffies(cmd.timeout_ms);
420
421         status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
422                         cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
423                         cmd.metadata_len, 0, &cmd.result, timeout, flags);
424
425         if (status >= 0) {
426                 if (put_user(cmd.result, &ucmd->result))
427                         return -EFAULT;
428         }
429
430         return status;
431 }
432
433 struct nvme_uring_data {
434         __u64   metadata;
435         __u64   addr;
436         __u32   data_len;
437         __u32   metadata_len;
438         __u32   timeout_ms;
439 };
440
441 /*
442  * This overlays struct io_uring_cmd pdu.
443  * Expect build errors if this grows larger than that.
444  */
445 struct nvme_uring_cmd_pdu {
446         union {
447                 struct bio *bio;
448                 struct request *req;
449         };
450         u32 meta_len;
451         u32 nvme_status;
452         union {
453                 struct {
454                         void *meta; /* kernel-resident buffer */
455                         void __user *meta_buffer;
456                 };
457                 u64 result;
458         } u;
459 };
460
461 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
462                 struct io_uring_cmd *ioucmd)
463 {
464         return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
465 }
466
467 static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
468                                     unsigned issue_flags)
469 {
470         struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
471         struct request *req = pdu->req;
472         int status;
473         u64 result;
474
475         if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
476                 status = -EINTR;
477         else
478                 status = nvme_req(req)->status;
479
480         result = le64_to_cpu(nvme_req(req)->result.u64);
481
482         if (pdu->meta_len)
483                 status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
484                                         pdu->u.meta, pdu->meta_len, status);
485         if (req->bio)
486                 blk_rq_unmap_user(req->bio);
487         blk_mq_free_request(req);
488
489         io_uring_cmd_done(ioucmd, status, result, issue_flags);
490 }
491
492 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
493                                unsigned issue_flags)
494 {
495         struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
496
497         if (pdu->bio)
498                 blk_rq_unmap_user(pdu->bio);
499
500         io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
501 }
502
503 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
504                                                 blk_status_t err)
505 {
506         struct io_uring_cmd *ioucmd = req->end_io_data;
507         struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
508
509         req->bio = pdu->bio;
510         if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
511                 pdu->nvme_status = -EINTR;
512         else
513                 pdu->nvme_status = nvme_req(req)->status;
514         pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
515
516         /*
517          * For iopoll, complete it directly.
518          * Otherwise, move the completion to task work.
519          */
520         if (blk_rq_is_poll(req)) {
521                 WRITE_ONCE(ioucmd->cookie, NULL);
522                 nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
523         } else {
524                 io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
525         }
526
527         return RQ_END_IO_FREE;
528 }
529
530 static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
531                                                      blk_status_t err)
532 {
533         struct io_uring_cmd *ioucmd = req->end_io_data;
534         struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
535
536         req->bio = pdu->bio;
537         pdu->req = req;
538
539         /*
540          * For iopoll, complete it directly.
541          * Otherwise, move the completion to task work.
542          */
543         if (blk_rq_is_poll(req)) {
544                 WRITE_ONCE(ioucmd->cookie, NULL);
545                 nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
546         } else {
547                 io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
548         }
549
550         return RQ_END_IO_NONE;
551 }
552
553 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
554                 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
555 {
556         struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
557         const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
558         struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
559         struct nvme_uring_data d;
560         struct nvme_command c;
561         struct request *req;
562         blk_opf_t rq_flags = REQ_ALLOC_CACHE;
563         blk_mq_req_flags_t blk_flags = 0;
564         void *meta = NULL;
565         int ret;
566
567         c.common.opcode = READ_ONCE(cmd->opcode);
568         c.common.flags = READ_ONCE(cmd->flags);
569         if (c.common.flags)
570                 return -EINVAL;
571
572         c.common.command_id = 0;
573         c.common.nsid = cpu_to_le32(cmd->nsid);
574         if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
575                 return -EINVAL;
576
577         c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
578         c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
579         c.common.metadata = 0;
580         c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
581         c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
582         c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
583         c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
584         c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
585         c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
586         c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
587
588         if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
589                 return -EACCES;
590
591         d.metadata = READ_ONCE(cmd->metadata);
592         d.addr = READ_ONCE(cmd->addr);
593         d.data_len = READ_ONCE(cmd->data_len);
594         d.metadata_len = READ_ONCE(cmd->metadata_len);
595         d.timeout_ms = READ_ONCE(cmd->timeout_ms);
596
597         if (issue_flags & IO_URING_F_NONBLOCK) {
598                 rq_flags |= REQ_NOWAIT;
599                 blk_flags = BLK_MQ_REQ_NOWAIT;
600         }
601         if (issue_flags & IO_URING_F_IOPOLL)
602                 rq_flags |= REQ_POLLED;
603
604         req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
605         if (IS_ERR(req))
606                 return PTR_ERR(req);
607         req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
608
609         if (d.addr && d.data_len) {
610                 ret = nvme_map_user_request(req, d.addr,
611                         d.data_len, nvme_to_user_ptr(d.metadata),
612                         d.metadata_len, 0, &meta, ioucmd, vec);
613                 if (ret)
614                         return ret;
615         }
616
617         if (blk_rq_is_poll(req)) {
618                 ioucmd->flags |= IORING_URING_CMD_POLLED;
619                 WRITE_ONCE(ioucmd->cookie, req);
620         }
621
622         /* to free bio on completion, as req->bio will be null at that time */
623         pdu->bio = req->bio;
624         pdu->meta_len = d.metadata_len;
625         req->end_io_data = ioucmd;
626         if (pdu->meta_len) {
627                 pdu->u.meta = meta;
628                 pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
629                 req->end_io = nvme_uring_cmd_end_io_meta;
630         } else {
631                 req->end_io = nvme_uring_cmd_end_io;
632         }
633         blk_execute_rq_nowait(req, false);
634         return -EIOCBQUEUED;
635 }
636
637 static bool is_ctrl_ioctl(unsigned int cmd)
638 {
639         if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
640                 return true;
641         if (is_sed_ioctl(cmd))
642                 return true;
643         return false;
644 }
645
646 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
647                 void __user *argp, bool open_for_write)
648 {
649         switch (cmd) {
650         case NVME_IOCTL_ADMIN_CMD:
651                 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
652         case NVME_IOCTL_ADMIN64_CMD:
653                 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
654         default:
655                 return sed_ioctl(ctrl->opal_dev, cmd, argp);
656         }
657 }
658
659 #ifdef COMPAT_FOR_U64_ALIGNMENT
660 struct nvme_user_io32 {
661         __u8    opcode;
662         __u8    flags;
663         __u16   control;
664         __u16   nblocks;
665         __u16   rsvd;
666         __u64   metadata;
667         __u64   addr;
668         __u64   slba;
669         __u32   dsmgmt;
670         __u32   reftag;
671         __u16   apptag;
672         __u16   appmask;
673 } __attribute__((__packed__));
674 #define NVME_IOCTL_SUBMIT_IO32  _IOW('N', 0x42, struct nvme_user_io32)
675 #endif /* COMPAT_FOR_U64_ALIGNMENT */
676
677 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
678                 void __user *argp, unsigned int flags, bool open_for_write)
679 {
680         switch (cmd) {
681         case NVME_IOCTL_ID:
682                 force_successful_syscall_return();
683                 return ns->head->ns_id;
684         case NVME_IOCTL_IO_CMD:
685                 return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
686         /*
687          * struct nvme_user_io can have different padding on some 32-bit ABIs.
688          * Just accept the compat version as all fields that are used are the
689          * same size and at the same offset.
690          */
691 #ifdef COMPAT_FOR_U64_ALIGNMENT
692         case NVME_IOCTL_SUBMIT_IO32:
693 #endif
694         case NVME_IOCTL_SUBMIT_IO:
695                 return nvme_submit_io(ns, argp);
696         case NVME_IOCTL_IO64_CMD_VEC:
697                 flags |= NVME_IOCTL_VEC;
698                 fallthrough;
699         case NVME_IOCTL_IO64_CMD:
700                 return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
701                                        open_for_write);
702         default:
703                 return -ENOTTY;
704         }
705 }
706
707 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
708                 unsigned int cmd, unsigned long arg)
709 {
710         struct nvme_ns *ns = bdev->bd_disk->private_data;
711         bool open_for_write = mode & BLK_OPEN_WRITE;
712         void __user *argp = (void __user *)arg;
713         unsigned int flags = 0;
714
715         if (bdev_is_partition(bdev))
716                 flags |= NVME_IOCTL_PARTITION;
717
718         if (is_ctrl_ioctl(cmd))
719                 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
720         return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
721 }
722
723 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
724 {
725         struct nvme_ns *ns =
726                 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
727         bool open_for_write = file->f_mode & FMODE_WRITE;
728         void __user *argp = (void __user *)arg;
729
730         if (is_ctrl_ioctl(cmd))
731                 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
732         return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
733 }
734
735 static int nvme_uring_cmd_checks(unsigned int issue_flags)
736 {
737
738         /* NVMe passthrough requires big SQE/CQE support */
739         if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
740             (IO_URING_F_SQE128|IO_URING_F_CQE32))
741                 return -EOPNOTSUPP;
742         return 0;
743 }
744
745 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
746                              unsigned int issue_flags)
747 {
748         struct nvme_ctrl *ctrl = ns->ctrl;
749         int ret;
750
751         BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
752
753         ret = nvme_uring_cmd_checks(issue_flags);
754         if (ret)
755                 return ret;
756
757         switch (ioucmd->cmd_op) {
758         case NVME_URING_CMD_IO:
759                 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
760                 break;
761         case NVME_URING_CMD_IO_VEC:
762                 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
763                 break;
764         default:
765                 ret = -ENOTTY;
766         }
767
768         return ret;
769 }
770
771 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
772 {
773         struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
774                         struct nvme_ns, cdev);
775
776         return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
777 }
778
779 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
780                                  struct io_comp_batch *iob,
781                                  unsigned int poll_flags)
782 {
783         struct request *req;
784         int ret = 0;
785
786         if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
787                 return 0;
788
789         req = READ_ONCE(ioucmd->cookie);
790         if (req && blk_rq_is_poll(req))
791                 ret = blk_rq_poll(req, iob, poll_flags);
792         return ret;
793 }
794 #ifdef CONFIG_NVME_MULTIPATH
795 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
796                 void __user *argp, struct nvme_ns_head *head, int srcu_idx,
797                 bool open_for_write)
798         __releases(&head->srcu)
799 {
800         struct nvme_ctrl *ctrl = ns->ctrl;
801         int ret;
802
803         nvme_get_ctrl(ns->ctrl);
804         srcu_read_unlock(&head->srcu, srcu_idx);
805         ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
806
807         nvme_put_ctrl(ctrl);
808         return ret;
809 }
810
811 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
812                 unsigned int cmd, unsigned long arg)
813 {
814         struct nvme_ns_head *head = bdev->bd_disk->private_data;
815         bool open_for_write = mode & BLK_OPEN_WRITE;
816         void __user *argp = (void __user *)arg;
817         struct nvme_ns *ns;
818         int srcu_idx, ret = -EWOULDBLOCK;
819         unsigned int flags = 0;
820
821         if (bdev_is_partition(bdev))
822                 flags |= NVME_IOCTL_PARTITION;
823
824         srcu_idx = srcu_read_lock(&head->srcu);
825         ns = nvme_find_path(head);
826         if (!ns)
827                 goto out_unlock;
828
829         /*
830          * Handle ioctls that apply to the controller instead of the namespace
831          * seperately and drop the ns SRCU reference early.  This avoids a
832          * deadlock when deleting namespaces using the passthrough interface.
833          */
834         if (is_ctrl_ioctl(cmd))
835                 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
836                                                open_for_write);
837
838         ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
839 out_unlock:
840         srcu_read_unlock(&head->srcu, srcu_idx);
841         return ret;
842 }
843
844 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
845                 unsigned long arg)
846 {
847         bool open_for_write = file->f_mode & FMODE_WRITE;
848         struct cdev *cdev = file_inode(file)->i_cdev;
849         struct nvme_ns_head *head =
850                 container_of(cdev, struct nvme_ns_head, cdev);
851         void __user *argp = (void __user *)arg;
852         struct nvme_ns *ns;
853         int srcu_idx, ret = -EWOULDBLOCK;
854
855         srcu_idx = srcu_read_lock(&head->srcu);
856         ns = nvme_find_path(head);
857         if (!ns)
858                 goto out_unlock;
859
860         if (is_ctrl_ioctl(cmd))
861                 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
862                                 open_for_write);
863
864         ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
865 out_unlock:
866         srcu_read_unlock(&head->srcu, srcu_idx);
867         return ret;
868 }
869
870 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
871                 unsigned int issue_flags)
872 {
873         struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
874         struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
875         int srcu_idx = srcu_read_lock(&head->srcu);
876         struct nvme_ns *ns = nvme_find_path(head);
877         int ret = -EINVAL;
878
879         if (ns)
880                 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
881         srcu_read_unlock(&head->srcu, srcu_idx);
882         return ret;
883 }
884 #endif /* CONFIG_NVME_MULTIPATH */
885
886 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
887 {
888         struct nvme_ctrl *ctrl = ioucmd->file->private_data;
889         int ret;
890
891         /* IOPOLL not supported yet */
892         if (issue_flags & IO_URING_F_IOPOLL)
893                 return -EOPNOTSUPP;
894
895         ret = nvme_uring_cmd_checks(issue_flags);
896         if (ret)
897                 return ret;
898
899         switch (ioucmd->cmd_op) {
900         case NVME_URING_CMD_ADMIN:
901                 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
902                 break;
903         case NVME_URING_CMD_ADMIN_VEC:
904                 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
905                 break;
906         default:
907                 ret = -ENOTTY;
908         }
909
910         return ret;
911 }
912
913 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
914                 bool open_for_write)
915 {
916         struct nvme_ns *ns;
917         int ret;
918
919         down_read(&ctrl->namespaces_rwsem);
920         if (list_empty(&ctrl->namespaces)) {
921                 ret = -ENOTTY;
922                 goto out_unlock;
923         }
924
925         ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
926         if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
927                 dev_warn(ctrl->device,
928                         "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
929                 ret = -EINVAL;
930                 goto out_unlock;
931         }
932
933         dev_warn(ctrl->device,
934                 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
935         kref_get(&ns->kref);
936         up_read(&ctrl->namespaces_rwsem);
937
938         ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
939         nvme_put_ns(ns);
940         return ret;
941
942 out_unlock:
943         up_read(&ctrl->namespaces_rwsem);
944         return ret;
945 }
946
947 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
948                 unsigned long arg)
949 {
950         bool open_for_write = file->f_mode & FMODE_WRITE;
951         struct nvme_ctrl *ctrl = file->private_data;
952         void __user *argp = (void __user *)arg;
953
954         switch (cmd) {
955         case NVME_IOCTL_ADMIN_CMD:
956                 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
957         case NVME_IOCTL_ADMIN64_CMD:
958                 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
959         case NVME_IOCTL_IO_CMD:
960                 return nvme_dev_user_cmd(ctrl, argp, open_for_write);
961         case NVME_IOCTL_RESET:
962                 if (!capable(CAP_SYS_ADMIN))
963                         return -EACCES;
964                 dev_warn(ctrl->device, "resetting controller\n");
965                 return nvme_reset_ctrl_sync(ctrl);
966         case NVME_IOCTL_SUBSYS_RESET:
967                 if (!capable(CAP_SYS_ADMIN))
968                         return -EACCES;
969                 return nvme_reset_subsystem(ctrl);
970         case NVME_IOCTL_RESCAN:
971                 if (!capable(CAP_SYS_ADMIN))
972                         return -EACCES;
973                 nvme_queue_scan(ctrl);
974                 return 0;
975         default:
976                 return -ENOTTY;
977         }
978 }