dma_addr_t phys;
ioat_dma = to_ioatdma_device(chan->device);
- hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
+ hw = dma_pool_alloc(ioat_dma->dma_pool, flags, &phys);
if (!hw)
return NULL;
memset(hw, 0, sizeof(*hw));
desc = kmem_cache_zalloc(ioat_cache, flags);
if (!desc) {
- pci_pool_free(ioat_dma->dma_pool, hw, phys);
+ dma_pool_free(ioat_dma->dma_pool, hw, phys);
return NULL;
}
struct ioatdma_device *ioat_dma;
ioat_dma = to_ioatdma_device(chan->device);
- pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
+ dma_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
kmem_cache_free(ioat_cache, desc);
}
struct device *dev = &pdev->dev;
/* DMA coherent memory pool for DMA descriptor allocations */
- ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+ ioat_dma->dma_pool = dma_pool_create("dma_desc_pool", dev,
sizeof(struct ioat_dma_descriptor),
64, 0);
if (!ioat_dma->dma_pool) {
goto err_dma_pool;
}
- ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+ ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
sizeof(u64),
SMP_CACHE_BYTES,
SMP_CACHE_BYTES);
err_self_test:
ioat_disable_interrupts(ioat_dma);
err_setup_interrupts:
- pci_pool_destroy(ioat_dma->completion_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
err_completion_pool:
- pci_pool_destroy(ioat_dma->dma_pool);
+ dma_pool_destroy(ioat_dma->dma_pool);
err_dma_pool:
return err;
}
if (err) {
ioat_disable_interrupts(ioat_dma);
- pci_pool_destroy(ioat_dma->completion_pool);
- pci_pool_destroy(ioat_dma->dma_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
+ dma_pool_destroy(ioat_dma->dma_pool);
}
return err;
dma_async_device_unregister(dma);
- pci_pool_destroy(ioat_dma->dma_pool);
- pci_pool_destroy(ioat_dma->completion_pool);
+ dma_pool_destroy(ioat_dma->dma_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
INIT_LIST_HEAD(&dma->channels);
}
kfree(ioat_chan->ring);
ioat_chan->ring = NULL;
ioat_chan->alloc_order = 0;
- pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+ dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
ioat_chan->completion_dma);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
- pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+ dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
GFP_KERNEL, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;