dmaengine: IOATDMA: Convert pci_pool_* to dma_pool_*
authorDave Jiang <dave.jiang@intel.com>
Wed, 10 Feb 2016 22:00:21 +0000 (15:00 -0700)
committerVinod Koul <vinod.koul@intel.com>
Mon, 15 Feb 2016 17:36:53 +0000 (23:06 +0530)
Converting old pci_pool_* calls to "new" dma_pool_* to make everything
uniform.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma.h
drivers/dma/ioat/init.c

index 1d5df2e..7a04c16 100644 (file)
@@ -298,14 +298,14 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
        dma_addr_t phys;
 
        ioat_dma = to_ioatdma_device(chan->device);
-       hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
+       hw = dma_pool_alloc(ioat_dma->dma_pool, flags, &phys);
        if (!hw)
                return NULL;
        memset(hw, 0, sizeof(*hw));
 
        desc = kmem_cache_zalloc(ioat_cache, flags);
        if (!desc) {
-               pci_pool_free(ioat_dma->dma_pool, hw, phys);
+               dma_pool_free(ioat_dma->dma_pool, hw, phys);
                return NULL;
        }
 
@@ -321,7 +321,7 @@ void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
        struct ioatdma_device *ioat_dma;
 
        ioat_dma = to_ioatdma_device(chan->device);
-       pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
+       dma_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
        kmem_cache_free(ioat_cache, desc);
 }
 
index b8f4807..f471092 100644 (file)
@@ -76,8 +76,8 @@ enum ioat_irq_mode {
 struct ioatdma_device {
        struct pci_dev *pdev;
        void __iomem *reg_base;
-       struct pci_pool *dma_pool;
-       struct pci_pool *completion_pool;
+       struct dma_pool *dma_pool;
+       struct dma_pool *completion_pool;
 #define MAX_SED_POOLS  5
        struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
        struct dma_device dma_dev;
index 4ef0c5e..b02b63b 100644 (file)
@@ -505,7 +505,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
        struct device *dev = &pdev->dev;
 
        /* DMA coherent memory pool for DMA descriptor allocations */
-       ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+       ioat_dma->dma_pool = dma_pool_create("dma_desc_pool", dev,
                                             sizeof(struct ioat_dma_descriptor),
                                             64, 0);
        if (!ioat_dma->dma_pool) {
@@ -513,7 +513,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
                goto err_dma_pool;
        }
 
-       ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+       ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
                                                    sizeof(u64),
                                                    SMP_CACHE_BYTES,
                                                    SMP_CACHE_BYTES);
@@ -546,9 +546,9 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
 err_self_test:
        ioat_disable_interrupts(ioat_dma);
 err_setup_interrupts:
-       pci_pool_destroy(ioat_dma->completion_pool);
+       dma_pool_destroy(ioat_dma->completion_pool);
 err_completion_pool:
-       pci_pool_destroy(ioat_dma->dma_pool);
+       dma_pool_destroy(ioat_dma->dma_pool);
 err_dma_pool:
        return err;
 }
@@ -559,8 +559,8 @@ static int ioat_register(struct ioatdma_device *ioat_dma)
 
        if (err) {
                ioat_disable_interrupts(ioat_dma);
-               pci_pool_destroy(ioat_dma->completion_pool);
-               pci_pool_destroy(ioat_dma->dma_pool);
+               dma_pool_destroy(ioat_dma->completion_pool);
+               dma_pool_destroy(ioat_dma->dma_pool);
        }
 
        return err;
@@ -576,8 +576,8 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
 
        dma_async_device_unregister(dma);
 
-       pci_pool_destroy(ioat_dma->dma_pool);
-       pci_pool_destroy(ioat_dma->completion_pool);
+       dma_pool_destroy(ioat_dma->dma_pool);
+       dma_pool_destroy(ioat_dma->completion_pool);
 
        INIT_LIST_HEAD(&dma->channels);
 }
@@ -669,7 +669,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
        kfree(ioat_chan->ring);
        ioat_chan->ring = NULL;
        ioat_chan->alloc_order = 0;
-       pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+       dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
                      ioat_chan->completion_dma);
        spin_unlock_bh(&ioat_chan->prep_lock);
        spin_unlock_bh(&ioat_chan->cleanup_lock);
@@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
        /* allocate a completion writeback area */
        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
        ioat_chan->completion =
-               pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+               dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
                               GFP_KERNEL, &ioat_chan->completion_dma);
        if (!ioat_chan->completion)
                return -ENOMEM;