if (ep->dma) {
struct net2280_dma *td;
- td = pci_pool_alloc(ep->dev->requests, gfp_flags,
+ td = dma_pool_alloc(ep->dev->requests, gfp_flags,
&req->td_dma);
if (!td) {
kfree(req);
req = container_of(_req, struct net2280_request, req);
WARN_ON(!list_empty(&req->queue));
if (req->td)
- pci_pool_free(ep->dev->requests, req->td, req->td_dma);
+ dma_pool_free(ep->dev->requests, req->td, req->td_dma);
kfree(req);
}
for (i = 1; i < 5; i++) {
if (!dev->ep[i].dummy)
continue;
- pci_pool_free(dev->requests, dev->ep[i].dummy,
+ dma_pool_free(dev->requests, dev->ep[i].dummy,
dev->ep[i].td_dma);
}
- pci_pool_destroy(dev->requests);
+ dma_pool_destroy(dev->requests);
}
if (dev->got_irq)
free_irq(pdev->irq, dev);
/* DMA setup */
/* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
- dev->requests = pci_pool_create("requests", pdev,
+ dev->requests = dma_pool_create("requests", &pdev->dev,
sizeof(struct net2280_dma),
0 /* no alignment requirements */,
0 /* or page-crossing issues */);
for (i = 1; i < 5; i++) {
struct net2280_dma *td;
- td = pci_pool_alloc(dev->requests, GFP_KERNEL,
+ td = dma_pool_alloc(dev->requests, GFP_KERNEL,
&dev->ep[i].td_dma);
if (!td) {
ep_dbg(dev, "can't get dummy %d\n", i);