From fd1496a0fc77f955317d6ca511f66a9e788e7e02 Mon Sep 17 00:00:00 2001 From: Alexandre Courbot Date: Thu, 31 Jul 2014 18:09:42 +0900 Subject: [PATCH] drm/nouveau: map pages using DMA API The DMA API is the recommended way to map pages no matter what the underlying bus is. Use the DMA functions for page mapping and remove currently existing wrappers. Signed-off-by: Alexandre Courbot Acked-by: Daniel Vetter Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/core/engine/device/base.c | 25 ---------------------- drivers/gpu/drm/nouveau/core/include/core/device.h | 6 ------ drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c | 7 ++++-- drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c | 7 ++++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 22 +++++++++++++------ 5 files changed, 26 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c index 466dda2..6c16dab 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c @@ -487,31 +487,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar) } } -dma_addr_t -nv_device_map_page(struct nouveau_device *device, struct page *page) -{ - dma_addr_t ret; - - if (nv_device_is_pci(device)) { - ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(device->pdev, ret)) - ret = 0; - } else { - ret = page_to_phys(page); - } - - return ret; -} - -void -nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr) -{ - if (nv_device_is_pci(device)) - pci_unmap_page(device->pdev, addr, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); -} - int nv_device_get_irq(struct nouveau_device *device, bool stall) { diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h index 9ce2ee9..03c039d 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/device.h +++ b/drivers/gpu/drm/nouveau/core/include/core/device.h @@ -174,12 +174,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar); resource_size_t nv_device_resource_len(struct nouveau_device *device, unsigned int bar); -dma_addr_t -nv_device_map_page(struct nouveau_device *device, struct page *page); - -void -nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr); - int nv_device_get_irq(struct nouveau_device *device, bool stall); diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 1fc55c1..7d88e17 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c @@ -250,7 +250,9 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (priv->r100c08_page) { - priv->r100c08 = nv_device_map_page(device, priv->r100c08_page); + priv->r100c08 = dma_map_page(nv_device_base(device), + priv->r100c08_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); if (!priv->r100c08) nv_warn(priv, "failed 0x100c08 page map\n"); } else { @@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object) struct nv50_fb_priv *priv = (void *)object; if (priv->r100c08_page) { - nv_device_unmap_page(device, priv->r100c08); + dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(priv->r100c08_page); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c index 0670ae3..9f5f3ac 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c @@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object) struct nvc0_fb_priv *priv = (void *)object; if (priv->r100c10_page) { - nv_device_unmap_page(device, priv->r100c10); + dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(priv->r100c10_page); } @@ -93,7 +94,9 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (priv->r100c10_page) { - priv->r100c10 = nv_device_map_page(device, priv->r100c10_page); + priv->r100c10 = dma_map_page(nv_device_base(device), + priv->r100c10_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); if (!priv->r100c10) return -EFAULT; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index d349078..a405b75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1340,6 +1340,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) struct nouveau_drm *drm; struct nouveau_device *device; struct drm_device *dev; + struct device *pdev; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1358,6 +1359,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) drm = nouveau_bdev(ttm->bdev); device = nv_device(drm->device); dev = drm->dev; + pdev = nv_device_base(device); #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { @@ -1377,17 +1379,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } for (i = 0; i < ttm->num_pages; i++) { - ttm_dma->dma_address[i] = nv_device_map_page(device, - ttm->pages[i]); - if (!ttm_dma->dma_address[i]) { + dma_addr_t addr; + + addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(pdev, addr)) { while (--i) { - nv_device_unmap_page(device, - ttm_dma->dma_address[i]); + dma_unmap_page(pdev, ttm_dma->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); ttm_dma->dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } + + ttm_dma->dma_address[i] = addr; } return 0; } @@ -1399,6 +1406,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) struct nouveau_drm *drm; struct nouveau_device *device; struct drm_device *dev; + struct device *pdev; unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1408,6 +1416,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) drm = nouveau_bdev(ttm->bdev); device = nv_device(drm->device); dev = drm->dev; + pdev = nv_device_base(device); #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { @@ -1425,7 +1434,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { if (ttm_dma->dma_address[i]) { - nv_device_unmap_page(device, ttm_dma->dma_address[i]); + dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE, + DMA_BIDIRECTIONAL); } } -- 2.7.4