nvme-pci: Pass the queue to SQ_SIZE/CQ_SIZE macros
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 7 Aug 2019 07:51:19 +0000 (17:51 +1000)
committerSagi Grimberg <sagi@grimberg.me>
Thu, 29 Aug 2019 19:55:02 +0000 (12:55 -0700)
This will make it easier to handle variable queue entry sizes
later. No functional change.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
drivers/nvme/host/pci.c

index bf54b12..a09e6c4 100644 (file)
@@ -28,8 +28,8 @@
 #include "trace.h"
 #include "nvme.h"
 
-#define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
-#define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
+#define SQ_SIZE(q)     ((q)->q_depth * sizeof(struct nvme_command))
+#define CQ_SIZE(q)     ((q)->q_depth * sizeof(struct nvme_completion))
 
 #define SGES_PER_PAGE  (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
@@ -1344,16 +1344,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
 {
-       dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
+       dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
                                (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
        if (!nvmeq->sq_cmds)
                return;
 
        if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
                pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
-                               nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
+                               nvmeq->sq_cmds, SQ_SIZE(nvmeq));
        } else {
-               dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
+               dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
                                nvmeq->sq_cmds, nvmeq->sq_dma_addr);
        }
 }
@@ -1433,12 +1433,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
 }
 
 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
-                               int qid, int depth)
+                               int qid)
 {
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
-               nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
+               nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
                if (nvmeq->sq_cmds) {
                        nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
                                                        nvmeq->sq_cmds);
@@ -1447,11 +1447,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
                                return 0;
                        }
 
-                       pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
+                       pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
                }
        }
 
-       nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+       nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
                                &nvmeq->sq_dma_addr, GFP_KERNEL);
        if (!nvmeq->sq_cmds)
                return -ENOMEM;
@@ -1465,12 +1465,13 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
        if (dev->ctrl.queue_count > qid)
                return 0;
 
-       nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
+       nvmeq->q_depth = depth;
+       nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
                                         &nvmeq->cq_dma_addr, GFP_KERNEL);
        if (!nvmeq->cqes)
                goto free_nvmeq;
 
-       if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
+       if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
                goto free_cqdma;
 
        nvmeq->dev = dev;
@@ -1479,15 +1480,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
-       nvmeq->q_depth = depth;
        nvmeq->qid = qid;
        dev->ctrl.queue_count++;
 
        return 0;
 
  free_cqdma:
-       dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
-                                                       nvmeq->cq_dma_addr);
+       dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
+                         nvmeq->cq_dma_addr);
  free_nvmeq:
        return -ENOMEM;
 }
@@ -1515,7 +1515,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
-       memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
+       memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
        nvme_dbbuf_init(dev, nvmeq, qid);
        dev->online_queues++;
        wmb(); /* ensure the first interrupt sees the initialization */