1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 NXP Semiconductors
4 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
17 #include <dm/device-internal.h>
18 #include <linux/compat.h>
21 #define NVME_Q_DEPTH 2
22 #define NVME_AQ_DEPTH 2
23 #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
24 #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
25 #define NVME_CQ_ALLOCATION ALIGN(NVME_CQ_SIZE(NVME_Q_DEPTH), \
27 #define ADMIN_TIMEOUT 60
29 #define MAX_PRP_POOL 512
38 * An NVM Express queue. Each device has at least two (one for admin
39 * commands and one for I/O commands).
43 struct nvme_command *sq_cmds;
44 struct nvme_completion *cqes;
45 wait_queue_head_t sq_full;
55 unsigned long cmdid_data[];
58 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
60 u32 bit = enabled ? NVME_CSTS_RDY : 0;
64 /* Timeout field in the CAP register is in 500 millisecond units */
65 timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
68 while (get_timer(start) < timeout) {
69 if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
76 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
77 int total_len, u64 dma_addr)
79 u32 page_size = dev->page_size;
80 int offset = dma_addr & (page_size - 1);
82 int length = total_len;
84 u32 prps_per_page = page_size >> 3;
87 length -= (page_size - offset);
95 dma_addr += (page_size - offset);
97 if (length <= page_size) {
102 nprps = DIV_ROUND_UP(length, page_size);
103 num_pages = DIV_ROUND_UP(nprps, prps_per_page);
105 if (nprps > dev->prp_entry_num) {
108 * Always increase in increments of pages. It doesn't waste
109 * much memory and reduces the number of allocations.
111 dev->prp_pool = memalign(page_size, num_pages * page_size);
112 if (!dev->prp_pool) {
113 printf("Error: malloc prp_pool fail\n");
116 dev->prp_entry_num = prps_per_page * num_pages;
119 prp_pool = dev->prp_pool;
122 if (i == ((page_size >> 3) - 1)) {
123 *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
126 prp_pool += page_size;
128 *(prp_pool + i++) = cpu_to_le64(dma_addr);
129 dma_addr += page_size;
132 *prp2 = (ulong)dev->prp_pool;
134 flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
135 dev->prp_entry_num * sizeof(u64));
140 static __le16 nvme_get_cmd_id(void)
142 static unsigned short cmdid;
144 return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
147 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
150 * Single CQ entries are always smaller than a cache line, so we
151 * can't invalidate them individually. However CQ entries are
152 * read only by the CPU, so it's safe to always invalidate all of them,
153 * as the cache line should never become dirty.
155 ulong start = (ulong)&nvmeq->cqes[0];
156 ulong stop = start + NVME_CQ_ALLOCATION;
158 invalidate_dcache_range(start, stop);
160 return readw(&(nvmeq->cqes[index].status));
164 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
166 * @nvmeq: The queue to use
167 * @cmd: The command to send
169 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
171 u16 tail = nvmeq->sq_tail;
173 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
174 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
175 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
177 if (++tail == nvmeq->q_depth)
179 writel(tail, nvmeq->q_db);
180 nvmeq->sq_tail = tail;
183 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
184 struct nvme_command *cmd,
185 u32 *result, unsigned timeout)
187 u16 head = nvmeq->cq_head;
188 u16 phase = nvmeq->cq_phase;
191 ulong timeout_us = timeout * 100000;
193 cmd->common.command_id = nvme_get_cmd_id();
194 nvme_submit_cmd(nvmeq, cmd);
196 start_time = timer_get_us();
199 status = nvme_read_completion_status(nvmeq, head);
200 if ((status & 0x01) == phase)
202 if (timeout_us > 0 && (timer_get_us() - start_time)
209 printf("ERROR: status = %x, phase = %d, head = %d\n",
210 status, phase, head);
212 if (++head == nvmeq->q_depth) {
216 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
217 nvmeq->cq_head = head;
218 nvmeq->cq_phase = phase;
224 *result = readl(&(nvmeq->cqes[head].result));
226 if (++head == nvmeq->q_depth) {
230 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
231 nvmeq->cq_head = head;
232 nvmeq->cq_phase = phase;
237 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
240 return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
241 result, ADMIN_TIMEOUT);
244 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
247 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
250 memset(nvmeq, 0, sizeof(*nvmeq));
252 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION);
255 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
257 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
260 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
266 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
267 nvmeq->q_depth = depth;
270 dev->queues[qid] = nvmeq;
275 free((void *)nvmeq->cqes);
282 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
284 struct nvme_command c;
286 memset(&c, 0, sizeof(c));
287 c.delete_queue.opcode = opcode;
288 c.delete_queue.qid = cpu_to_le16(id);
290 return nvme_submit_admin_cmd(dev, &c, NULL);
293 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
295 return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
298 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
300 return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
303 static int nvme_enable_ctrl(struct nvme_dev *dev)
305 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
306 dev->ctrl_config |= NVME_CC_ENABLE;
307 writel(dev->ctrl_config, &dev->bar->cc);
309 return nvme_wait_ready(dev, true);
312 static int nvme_disable_ctrl(struct nvme_dev *dev)
314 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
315 dev->ctrl_config &= ~NVME_CC_ENABLE;
316 writel(dev->ctrl_config, &dev->bar->cc);
318 return nvme_wait_ready(dev, false);
321 static void nvme_free_queue(struct nvme_queue *nvmeq)
323 free((void *)nvmeq->cqes);
324 free(nvmeq->sq_cmds);
328 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
332 for (i = dev->queue_count - 1; i >= lowest; i--) {
333 struct nvme_queue *nvmeq = dev->queues[i];
335 dev->queues[i] = NULL;
336 nvme_free_queue(nvmeq);
340 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
342 struct nvme_dev *dev = nvmeq->dev;
347 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
348 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
349 flush_dcache_range((ulong)nvmeq->cqes,
350 (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION);
351 dev->online_queues++;
354 static int nvme_configure_admin_queue(struct nvme_dev *dev)
359 struct nvme_queue *nvmeq;
360 /* most architectures use 4KB as the page size */
361 unsigned page_shift = 12;
362 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
363 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
365 if (page_shift < dev_page_min) {
366 debug("Device minimum page size (%u) too large for host (%u)\n",
367 1 << dev_page_min, 1 << page_shift);
371 if (page_shift > dev_page_max) {
372 debug("Device maximum page size (%u) smaller than host (%u)\n",
373 1 << dev_page_max, 1 << page_shift);
374 page_shift = dev_page_max;
377 result = nvme_disable_ctrl(dev);
381 nvmeq = dev->queues[NVME_ADMIN_Q];
383 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
388 aqa = nvmeq->q_depth - 1;
391 dev->page_size = 1 << page_shift;
393 dev->ctrl_config = NVME_CC_CSS_NVM;
394 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
395 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
396 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
398 writel(aqa, &dev->bar->aqa);
399 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
400 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
402 result = nvme_enable_ctrl(dev);
406 nvmeq->cq_vector = 0;
408 nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
413 nvme_free_queues(dev, 0);
418 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
419 struct nvme_queue *nvmeq)
421 struct nvme_command c;
422 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
424 memset(&c, 0, sizeof(c));
425 c.create_cq.opcode = nvme_admin_create_cq;
426 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
427 c.create_cq.cqid = cpu_to_le16(qid);
428 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
429 c.create_cq.cq_flags = cpu_to_le16(flags);
430 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
432 return nvme_submit_admin_cmd(dev, &c, NULL);
435 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
436 struct nvme_queue *nvmeq)
438 struct nvme_command c;
439 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
441 memset(&c, 0, sizeof(c));
442 c.create_sq.opcode = nvme_admin_create_sq;
443 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
444 c.create_sq.sqid = cpu_to_le16(qid);
445 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
446 c.create_sq.sq_flags = cpu_to_le16(flags);
447 c.create_sq.cqid = cpu_to_le16(qid);
449 return nvme_submit_admin_cmd(dev, &c, NULL);
452 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
453 unsigned cns, dma_addr_t dma_addr)
455 struct nvme_command c;
456 u32 page_size = dev->page_size;
457 int offset = dma_addr & (page_size - 1);
458 int length = sizeof(struct nvme_id_ctrl);
461 memset(&c, 0, sizeof(c));
462 c.identify.opcode = nvme_admin_identify;
463 c.identify.nsid = cpu_to_le32(nsid);
464 c.identify.prp1 = cpu_to_le64(dma_addr);
466 length -= (page_size - offset);
470 dma_addr += (page_size - offset);
471 c.identify.prp2 = cpu_to_le64(dma_addr);
474 c.identify.cns = cpu_to_le32(cns);
476 invalidate_dcache_range(dma_addr,
477 dma_addr + sizeof(struct nvme_id_ctrl));
479 ret = nvme_submit_admin_cmd(dev, &c, NULL);
481 invalidate_dcache_range(dma_addr,
482 dma_addr + sizeof(struct nvme_id_ctrl));
487 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
488 dma_addr_t dma_addr, u32 *result)
490 struct nvme_command c;
493 memset(&c, 0, sizeof(c));
494 c.features.opcode = nvme_admin_get_features;
495 c.features.nsid = cpu_to_le32(nsid);
496 c.features.prp1 = cpu_to_le64(dma_addr);
497 c.features.fid = cpu_to_le32(fid);
499 ret = nvme_submit_admin_cmd(dev, &c, result);
502 * TODO: Add some cache invalidation when a DMA buffer is involved
503 * in the request, here and before the command gets submitted. The
504 * buffer size varies by feature, also some features use a different
505 * field in the command packet to hold the buffer address.
506 * Section 5.21.1 (Set Features command) in the NVMe specification
507 * details the buffer requirements for each feature.
509 * At the moment there is no user of this function.
515 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
516 dma_addr_t dma_addr, u32 *result)
518 struct nvme_command c;
520 memset(&c, 0, sizeof(c));
521 c.features.opcode = nvme_admin_set_features;
522 c.features.prp1 = cpu_to_le64(dma_addr);
523 c.features.fid = cpu_to_le32(fid);
524 c.features.dword11 = cpu_to_le32(dword11);
527 * TODO: Add a cache clean (aka flush) operation when a DMA buffer is
528 * involved in the request. The buffer size varies by feature, also
529 * some features use a different field in the command packet to hold
530 * the buffer address. Section 5.21.1 (Set Features command) in the
531 * NVMe specification details the buffer requirements for each
533 * At the moment the only user of this function is not using
534 * any DMA buffer at all.
537 return nvme_submit_admin_cmd(dev, &c, result);
540 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
542 struct nvme_dev *dev = nvmeq->dev;
545 nvmeq->cq_vector = qid - 1;
546 result = nvme_alloc_cq(dev, qid, nvmeq);
550 result = nvme_alloc_sq(dev, qid, nvmeq);
554 nvme_init_queue(nvmeq, qid);
559 nvme_delete_sq(dev, qid);
561 nvme_delete_cq(dev, qid);
566 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
570 u32 q_count = (count - 1) | ((count - 1) << 16);
572 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
573 q_count, 0, &result);
580 return min(result & 0xffff, result >> 16) + 1;
583 static void nvme_create_io_queues(struct nvme_dev *dev)
587 for (i = dev->queue_count; i <= dev->max_qid; i++)
588 if (!nvme_alloc_queue(dev, i, dev->q_depth))
591 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
592 if (nvme_create_queue(dev->queues[i], i))
596 static int nvme_setup_io_queues(struct nvme_dev *dev)
602 result = nvme_set_queue_count(dev, nr_io_queues);
606 dev->max_qid = nr_io_queues;
608 /* Free previously allocated queues */
609 nvme_free_queues(dev, nr_io_queues + 1);
610 nvme_create_io_queues(dev);
615 static int nvme_get_info_from_identify(struct nvme_dev *dev)
617 struct nvme_id_ctrl *ctrl;
619 int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
621 ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
625 ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
631 dev->nn = le32_to_cpu(ctrl->nn);
632 dev->vwc = ctrl->vwc;
633 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
634 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
635 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
637 dev->max_transfer_shift = (ctrl->mdts + shift);
640 * Maximum Data Transfer Size (MDTS) field indicates the maximum
641 * data transfer size between the host and the controller. The
642 * host should not submit a command that exceeds this transfer
643 * size. The value is in units of the minimum memory page size
644 * and is reported as a power of two (2^n).
646 * The spec also says: a value of 0h indicates no restrictions
647 * on transfer size. But in nvme_blk_read/write() below we have
648 * the following algorithm for maximum number of logic blocks
651 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
653 * In order for lbas not to overflow, the maximum number is 15
654 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
655 * Let's use 20 which provides 1MB size.
657 dev->max_transfer_shift = 20;
664 int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
666 struct nvme_ns *ns = dev_get_priv(udev);
671 memcpy(eui64, ns->eui64, sizeof(ns->eui64));
676 int nvme_scan_namespace(void)
682 ret = uclass_get(UCLASS_NVME, &uc);
686 uclass_foreach_dev(dev, uc) {
687 ret = device_probe(dev);
695 static int nvme_blk_probe(struct udevice *udev)
697 struct nvme_dev *ndev = dev_get_priv(udev->parent);
698 struct blk_desc *desc = dev_get_uclass_plat(udev);
699 struct nvme_ns *ns = dev_get_priv(udev);
701 struct pci_child_plat *pplat;
702 struct nvme_id_ns *id;
704 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
709 /* extract the namespace id from the block device name */
710 ns->ns_id = trailing_strtol(udev->name);
711 if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
716 memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
717 flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
719 ns->lba_shift = id->lbaf[flbas].ds;
720 list_add(&ns->list, &ndev->namespaces);
722 desc->lba = le64_to_cpu(id->nsze);
723 desc->log2blksz = ns->lba_shift;
724 desc->blksz = 1 << ns->lba_shift;
726 pplat = dev_get_parent_plat(udev->parent);
727 sprintf(desc->vendor, "0x%.4x", pplat->vendor);
728 memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
729 memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
735 static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
736 lbaint_t blkcnt, void *buffer, bool read)
738 struct nvme_ns *ns = dev_get_priv(udev);
739 struct nvme_dev *dev = ns->dev;
740 struct nvme_command c;
741 struct blk_desc *desc = dev_get_uclass_plat(udev);
744 u64 total_len = blkcnt << desc->log2blksz;
745 u64 temp_len = total_len;
748 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
749 u64 total_lbas = blkcnt;
751 flush_dcache_range((unsigned long)buffer,
752 (unsigned long)buffer + total_len);
754 c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
756 c.rw.nsid = cpu_to_le32(ns->ns_id);
765 if (total_lbas < lbas) {
766 lbas = (u16)total_lbas;
772 if (nvme_setup_prps(dev, &prp2,
773 lbas << ns->lba_shift, (ulong)buffer))
775 c.rw.slba = cpu_to_le64(slba);
777 c.rw.length = cpu_to_le16(lbas - 1);
778 c.rw.prp1 = cpu_to_le64((ulong)buffer);
779 c.rw.prp2 = cpu_to_le64(prp2);
780 status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
781 &c, NULL, IO_TIMEOUT);
784 temp_len -= (u32)lbas << ns->lba_shift;
785 buffer += lbas << ns->lba_shift;
789 invalidate_dcache_range((unsigned long)buffer,
790 (unsigned long)buffer + total_len);
792 return (total_len - temp_len) >> desc->log2blksz;
795 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
796 lbaint_t blkcnt, void *buffer)
798 return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
801 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
802 lbaint_t blkcnt, const void *buffer)
804 return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
807 static const struct blk_ops nvme_blk_ops = {
808 .read = nvme_blk_read,
809 .write = nvme_blk_write,
812 U_BOOT_DRIVER(nvme_blk) = {
815 .probe = nvme_blk_probe,
816 .ops = &nvme_blk_ops,
817 .priv_auto = sizeof(struct nvme_ns),
820 static int nvme_bind(struct udevice *udev)
825 sprintf(name, "nvme#%d", ndev_num++);
827 return device_set_name(udev, name);
830 static int nvme_probe(struct udevice *udev)
833 struct nvme_dev *ndev = dev_get_priv(udev);
834 struct nvme_id_ns *id;
836 ndev->instance = trailing_strtol(udev->name);
838 INIT_LIST_HEAD(&ndev->namespaces);
839 ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
841 if (readl(&ndev->bar->csts) == -1) {
843 printf("Error: %s: Out of memory!\n", udev->name);
847 ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
850 printf("Error: %s: Out of memory!\n", udev->name);
853 memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
855 ndev->cap = nvme_readq(&ndev->bar->cap);
856 ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
857 ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
858 ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
860 ret = nvme_configure_admin_queue(ndev);
864 /* Allocate after the page size is known */
865 ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
866 if (!ndev->prp_pool) {
868 printf("Error: %s: Out of memory!\n", udev->name);
871 ndev->prp_entry_num = MAX_PRP_POOL >> 3;
873 ret = nvme_setup_io_queues(ndev);
877 nvme_get_info_from_identify(ndev);
879 /* Create a blk device for each namespace */
881 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
887 for (int i = 1; i <= ndev->nn; i++) {
888 struct udevice *ns_udev;
891 memset(id, 0, sizeof(*id));
892 if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
897 /* skip inactive namespace */
902 * Encode the namespace id to the device name so that
903 * we can extract it when doing the probe.
905 sprintf(name, "blk#%d", i);
907 /* The real blksz and size will be set by nvme_blk_probe() */
908 ret = blk_create_devicef(udev, "nvme-blk", name, IF_TYPE_NVME,
909 -1, 512, 0, &ns_udev);
920 free((void *)ndev->queues);
925 U_BOOT_DRIVER(nvme) = {
930 .priv_auto = sizeof(struct nvme_dev),
933 struct pci_device_id nvme_supported[] = {
934 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
938 U_BOOT_PCI_DEVICE(nvme, nvme_supported);