1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 NXP Semiconductors
4 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
16 #include <dm/device-internal.h>
17 #include <linux/compat.h>
20 #define NVME_Q_DEPTH 2
21 #define NVME_AQ_DEPTH 2
22 #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
23 #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
24 #define NVME_CQ_ALLOCATION ALIGN(NVME_CQ_SIZE(NVME_Q_DEPTH), \
26 #define ADMIN_TIMEOUT 60
28 #define MAX_PRP_POOL 512
30 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
32 u32 bit = enabled ? NVME_CSTS_RDY : 0;
36 /* Timeout field in the CAP register is in 500 millisecond units */
37 timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
40 while (get_timer(start) < timeout) {
41 if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
48 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
49 int total_len, u64 dma_addr)
51 u32 page_size = dev->page_size;
52 int offset = dma_addr & (page_size - 1);
54 int length = total_len;
56 u32 prps_per_page = page_size >> 3;
59 length -= (page_size - offset);
67 dma_addr += (page_size - offset);
69 if (length <= page_size) {
74 nprps = DIV_ROUND_UP(length, page_size);
75 num_pages = DIV_ROUND_UP(nprps, prps_per_page);
77 if (nprps > dev->prp_entry_num) {
80 * Always increase in increments of pages. It doesn't waste
81 * much memory and reduces the number of allocations.
83 dev->prp_pool = memalign(page_size, num_pages * page_size);
85 printf("Error: malloc prp_pool fail\n");
88 dev->prp_entry_num = prps_per_page * num_pages;
91 prp_pool = dev->prp_pool;
94 if (i == ((page_size >> 3) - 1)) {
95 *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
98 prp_pool += page_size;
100 *(prp_pool + i++) = cpu_to_le64(dma_addr);
101 dma_addr += page_size;
104 *prp2 = (ulong)dev->prp_pool;
106 flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
107 dev->prp_entry_num * sizeof(u64));
112 static __le16 nvme_get_cmd_id(void)
114 static unsigned short cmdid;
116 return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
119 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
122 * Single CQ entries are always smaller than a cache line, so we
123 * can't invalidate them individually. However CQ entries are
124 * read only by the CPU, so it's safe to always invalidate all of them,
125 * as the cache line should never become dirty.
127 ulong start = (ulong)&nvmeq->cqes[0];
128 ulong stop = start + NVME_CQ_ALLOCATION;
130 invalidate_dcache_range(start, stop);
132 return readw(&(nvmeq->cqes[index].status));
136 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
138 * @nvmeq: The queue to use
139 * @cmd: The command to send
141 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
143 struct nvme_ops *ops;
144 u16 tail = nvmeq->sq_tail;
146 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
147 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
148 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
150 ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
151 if (ops && ops->submit_cmd) {
152 ops->submit_cmd(nvmeq, cmd);
156 if (++tail == nvmeq->q_depth)
158 writel(tail, nvmeq->q_db);
159 nvmeq->sq_tail = tail;
162 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
163 struct nvme_command *cmd,
164 u32 *result, unsigned timeout)
166 struct nvme_ops *ops;
167 u16 head = nvmeq->cq_head;
168 u16 phase = nvmeq->cq_phase;
171 ulong timeout_us = timeout * 100000;
173 cmd->common.command_id = nvme_get_cmd_id();
174 nvme_submit_cmd(nvmeq, cmd);
176 start_time = timer_get_us();
179 status = nvme_read_completion_status(nvmeq, head);
180 if ((status & 0x01) == phase)
182 if (timeout_us > 0 && (timer_get_us() - start_time)
187 ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
188 if (ops && ops->complete_cmd)
189 ops->complete_cmd(nvmeq, cmd);
193 printf("ERROR: status = %x, phase = %d, head = %d\n",
194 status, phase, head);
196 if (++head == nvmeq->q_depth) {
200 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
201 nvmeq->cq_head = head;
202 nvmeq->cq_phase = phase;
208 *result = readl(&(nvmeq->cqes[head].result));
210 if (++head == nvmeq->q_depth) {
214 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
215 nvmeq->cq_head = head;
216 nvmeq->cq_phase = phase;
221 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
224 return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
225 result, ADMIN_TIMEOUT);
228 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
231 struct nvme_ops *ops;
232 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
235 memset(nvmeq, 0, sizeof(*nvmeq));
237 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION);
240 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
242 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
245 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
251 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
252 nvmeq->q_depth = depth;
255 dev->queues[qid] = nvmeq;
257 ops = (struct nvme_ops *)dev->udev->driver->ops;
258 if (ops && ops->setup_queue)
259 ops->setup_queue(nvmeq);
264 free((void *)nvmeq->cqes);
271 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
273 struct nvme_command c;
275 memset(&c, 0, sizeof(c));
276 c.delete_queue.opcode = opcode;
277 c.delete_queue.qid = cpu_to_le16(id);
279 return nvme_submit_admin_cmd(dev, &c, NULL);
282 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
284 return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
287 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
289 return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
292 static int nvme_enable_ctrl(struct nvme_dev *dev)
294 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
295 dev->ctrl_config |= NVME_CC_ENABLE;
296 writel(dev->ctrl_config, &dev->bar->cc);
298 return nvme_wait_ready(dev, true);
301 static int nvme_disable_ctrl(struct nvme_dev *dev)
303 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
304 dev->ctrl_config &= ~NVME_CC_ENABLE;
305 writel(dev->ctrl_config, &dev->bar->cc);
307 return nvme_wait_ready(dev, false);
310 static void nvme_free_queue(struct nvme_queue *nvmeq)
312 free((void *)nvmeq->cqes);
313 free(nvmeq->sq_cmds);
317 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
321 for (i = dev->queue_count - 1; i >= lowest; i--) {
322 struct nvme_queue *nvmeq = dev->queues[i];
324 dev->queues[i] = NULL;
325 nvme_free_queue(nvmeq);
329 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
331 struct nvme_dev *dev = nvmeq->dev;
336 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
337 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
338 flush_dcache_range((ulong)nvmeq->cqes,
339 (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION);
340 dev->online_queues++;
343 static int nvme_configure_admin_queue(struct nvme_dev *dev)
348 struct nvme_queue *nvmeq;
349 /* most architectures use 4KB as the page size */
350 unsigned page_shift = 12;
351 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
352 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
354 if (page_shift < dev_page_min) {
355 debug("Device minimum page size (%u) too large for host (%u)\n",
356 1 << dev_page_min, 1 << page_shift);
360 if (page_shift > dev_page_max) {
361 debug("Device maximum page size (%u) smaller than host (%u)\n",
362 1 << dev_page_max, 1 << page_shift);
363 page_shift = dev_page_max;
366 result = nvme_disable_ctrl(dev);
370 nvmeq = dev->queues[NVME_ADMIN_Q];
372 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
377 aqa = nvmeq->q_depth - 1;
380 dev->page_size = 1 << page_shift;
382 dev->ctrl_config = NVME_CC_CSS_NVM;
383 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
384 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
385 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
387 writel(aqa, &dev->bar->aqa);
388 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
389 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
391 result = nvme_enable_ctrl(dev);
395 nvmeq->cq_vector = 0;
397 nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
402 nvme_free_queues(dev, 0);
407 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
408 struct nvme_queue *nvmeq)
410 struct nvme_command c;
411 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
413 memset(&c, 0, sizeof(c));
414 c.create_cq.opcode = nvme_admin_create_cq;
415 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
416 c.create_cq.cqid = cpu_to_le16(qid);
417 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
418 c.create_cq.cq_flags = cpu_to_le16(flags);
419 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
421 return nvme_submit_admin_cmd(dev, &c, NULL);
424 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
425 struct nvme_queue *nvmeq)
427 struct nvme_command c;
428 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
430 memset(&c, 0, sizeof(c));
431 c.create_sq.opcode = nvme_admin_create_sq;
432 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
433 c.create_sq.sqid = cpu_to_le16(qid);
434 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
435 c.create_sq.sq_flags = cpu_to_le16(flags);
436 c.create_sq.cqid = cpu_to_le16(qid);
438 return nvme_submit_admin_cmd(dev, &c, NULL);
441 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
442 unsigned cns, dma_addr_t dma_addr)
444 struct nvme_command c;
445 u32 page_size = dev->page_size;
446 int offset = dma_addr & (page_size - 1);
447 int length = sizeof(struct nvme_id_ctrl);
450 memset(&c, 0, sizeof(c));
451 c.identify.opcode = nvme_admin_identify;
452 c.identify.nsid = cpu_to_le32(nsid);
453 c.identify.prp1 = cpu_to_le64(dma_addr);
455 length -= (page_size - offset);
459 dma_addr += (page_size - offset);
460 c.identify.prp2 = cpu_to_le64(dma_addr);
463 c.identify.cns = cpu_to_le32(cns);
465 invalidate_dcache_range(dma_addr,
466 dma_addr + sizeof(struct nvme_id_ctrl));
468 ret = nvme_submit_admin_cmd(dev, &c, NULL);
470 invalidate_dcache_range(dma_addr,
471 dma_addr + sizeof(struct nvme_id_ctrl));
476 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
477 dma_addr_t dma_addr, u32 *result)
479 struct nvme_command c;
482 memset(&c, 0, sizeof(c));
483 c.features.opcode = nvme_admin_get_features;
484 c.features.nsid = cpu_to_le32(nsid);
485 c.features.prp1 = cpu_to_le64(dma_addr);
486 c.features.fid = cpu_to_le32(fid);
488 ret = nvme_submit_admin_cmd(dev, &c, result);
491 * TODO: Add some cache invalidation when a DMA buffer is involved
492 * in the request, here and before the command gets submitted. The
493 * buffer size varies by feature, also some features use a different
494 * field in the command packet to hold the buffer address.
495 * Section 5.21.1 (Set Features command) in the NVMe specification
496 * details the buffer requirements for each feature.
498 * At the moment there is no user of this function.
504 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
505 dma_addr_t dma_addr, u32 *result)
507 struct nvme_command c;
509 memset(&c, 0, sizeof(c));
510 c.features.opcode = nvme_admin_set_features;
511 c.features.prp1 = cpu_to_le64(dma_addr);
512 c.features.fid = cpu_to_le32(fid);
513 c.features.dword11 = cpu_to_le32(dword11);
516 * TODO: Add a cache clean (aka flush) operation when a DMA buffer is
517 * involved in the request. The buffer size varies by feature, also
518 * some features use a different field in the command packet to hold
519 * the buffer address. Section 5.21.1 (Set Features command) in the
520 * NVMe specification details the buffer requirements for each
522 * At the moment the only user of this function is not using
523 * any DMA buffer at all.
526 return nvme_submit_admin_cmd(dev, &c, result);
529 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
531 struct nvme_dev *dev = nvmeq->dev;
534 nvmeq->cq_vector = qid - 1;
535 result = nvme_alloc_cq(dev, qid, nvmeq);
539 result = nvme_alloc_sq(dev, qid, nvmeq);
543 nvme_init_queue(nvmeq, qid);
548 nvme_delete_sq(dev, qid);
550 nvme_delete_cq(dev, qid);
555 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
559 u32 q_count = (count - 1) | ((count - 1) << 16);
561 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
562 q_count, 0, &result);
569 return min(result & 0xffff, result >> 16) + 1;
572 static void nvme_create_io_queues(struct nvme_dev *dev)
576 for (i = dev->queue_count; i <= dev->max_qid; i++)
577 if (!nvme_alloc_queue(dev, i, dev->q_depth))
580 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
581 if (nvme_create_queue(dev->queues[i], i))
585 static int nvme_setup_io_queues(struct nvme_dev *dev)
591 result = nvme_set_queue_count(dev, nr_io_queues);
595 dev->max_qid = nr_io_queues;
597 /* Free previously allocated queues */
598 nvme_free_queues(dev, nr_io_queues + 1);
599 nvme_create_io_queues(dev);
604 static int nvme_get_info_from_identify(struct nvme_dev *dev)
606 struct nvme_id_ctrl *ctrl;
608 int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
610 ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
614 ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
620 dev->nn = le32_to_cpu(ctrl->nn);
621 dev->vwc = ctrl->vwc;
622 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
623 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
624 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
626 dev->max_transfer_shift = (ctrl->mdts + shift);
629 * Maximum Data Transfer Size (MDTS) field indicates the maximum
630 * data transfer size between the host and the controller. The
631 * host should not submit a command that exceeds this transfer
632 * size. The value is in units of the minimum memory page size
633 * and is reported as a power of two (2^n).
635 * The spec also says: a value of 0h indicates no restrictions
636 * on transfer size. But in nvme_blk_read/write() below we have
637 * the following algorithm for maximum number of logic blocks
640 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
642 * In order for lbas not to overflow, the maximum number is 15
643 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
644 * Let's use 20 which provides 1MB size.
646 dev->max_transfer_shift = 20;
653 int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
655 struct nvme_ns *ns = dev_get_priv(udev);
660 memcpy(eui64, ns->eui64, sizeof(ns->eui64));
665 int nvme_scan_namespace(void)
671 ret = uclass_get(UCLASS_NVME, &uc);
675 uclass_foreach_dev(dev, uc) {
676 ret = device_probe(dev);
684 static int nvme_blk_probe(struct udevice *udev)
686 struct nvme_dev *ndev = dev_get_priv(udev->parent);
687 struct blk_desc *desc = dev_get_uclass_plat(udev);
688 struct nvme_ns *ns = dev_get_priv(udev);
690 struct nvme_id_ns *id;
692 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
697 /* extract the namespace id from the block device name */
698 ns->ns_id = trailing_strtol(udev->name);
699 if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
704 memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
705 flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
707 ns->lba_shift = id->lbaf[flbas].ds;
708 list_add(&ns->list, &ndev->namespaces);
710 desc->lba = le64_to_cpu(id->nsze);
711 desc->log2blksz = ns->lba_shift;
712 desc->blksz = 1 << ns->lba_shift;
714 memcpy(desc->vendor, ndev->vendor, sizeof(ndev->vendor));
715 memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
716 memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
722 static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
723 lbaint_t blkcnt, void *buffer, bool read)
725 struct nvme_ns *ns = dev_get_priv(udev);
726 struct nvme_dev *dev = ns->dev;
727 struct nvme_command c;
728 struct blk_desc *desc = dev_get_uclass_plat(udev);
731 u64 total_len = blkcnt << desc->log2blksz;
732 u64 temp_len = total_len;
733 uintptr_t temp_buffer = (uintptr_t)buffer;
736 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
737 u64 total_lbas = blkcnt;
739 flush_dcache_range((unsigned long)buffer,
740 (unsigned long)buffer + total_len);
742 c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
744 c.rw.nsid = cpu_to_le32(ns->ns_id);
753 if (total_lbas < lbas) {
754 lbas = (u16)total_lbas;
760 if (nvme_setup_prps(dev, &prp2,
761 lbas << ns->lba_shift, temp_buffer))
763 c.rw.slba = cpu_to_le64(slba);
765 c.rw.length = cpu_to_le16(lbas - 1);
766 c.rw.prp1 = cpu_to_le64(temp_buffer);
767 c.rw.prp2 = cpu_to_le64(prp2);
768 status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
769 &c, NULL, IO_TIMEOUT);
772 temp_len -= (u32)lbas << ns->lba_shift;
773 temp_buffer += lbas << ns->lba_shift;
777 invalidate_dcache_range((unsigned long)buffer,
778 (unsigned long)buffer + total_len);
780 return (total_len - temp_len) >> desc->log2blksz;
783 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
784 lbaint_t blkcnt, void *buffer)
786 return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
789 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
790 lbaint_t blkcnt, const void *buffer)
792 return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
795 static const struct blk_ops nvme_blk_ops = {
796 .read = nvme_blk_read,
797 .write = nvme_blk_write,
800 U_BOOT_DRIVER(nvme_blk) = {
803 .probe = nvme_blk_probe,
804 .ops = &nvme_blk_ops,
805 .priv_auto = sizeof(struct nvme_ns),
808 int nvme_init(struct udevice *udev)
810 struct nvme_dev *ndev = dev_get_priv(udev);
811 struct nvme_id_ns *id;
815 INIT_LIST_HEAD(&ndev->namespaces);
816 if (readl(&ndev->bar->csts) == -1) {
818 printf("Error: %s: Out of memory!\n", udev->name);
822 ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
825 printf("Error: %s: Out of memory!\n", udev->name);
828 memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
830 ndev->cap = nvme_readq(&ndev->bar->cap);
831 ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
832 ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
833 ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
835 ret = nvme_configure_admin_queue(ndev);
839 /* Allocate after the page size is known */
840 ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
841 if (!ndev->prp_pool) {
843 printf("Error: %s: Out of memory!\n", udev->name);
846 ndev->prp_entry_num = MAX_PRP_POOL >> 3;
848 ret = nvme_setup_io_queues(ndev);
852 nvme_get_info_from_identify(ndev);
854 /* Create a blk device for each namespace */
856 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
862 for (int i = 1; i <= ndev->nn; i++) {
863 struct udevice *ns_udev;
866 memset(id, 0, sizeof(*id));
867 if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
872 /* skip inactive namespace */
877 * Encode the namespace id to the device name so that
878 * we can extract it when doing the probe.
880 sprintf(name, "blk#%d", i);
882 /* The real blksz and size will be set by nvme_blk_probe() */
883 ret = blk_create_devicef(udev, "nvme-blk", name, IF_TYPE_NVME,
884 -1, 512, 0, &ns_udev);
888 ret = blk_probe_or_unbind(ns_udev);
899 free((void *)ndev->queues);
904 int nvme_shutdown(struct udevice *udev)
906 struct nvme_dev *ndev = dev_get_priv(udev);
908 return nvme_disable_ctrl(ndev);