drm: omapdrm: Map pages for DMA in DMA_TO_DEVICE direction
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Thu, 20 Apr 2017 21:33:58 +0000 (00:33 +0300)
committerTomi Valkeinen <tomi.valkeinen@ti.com>
Fri, 2 Jun 2017 07:57:08 +0000 (10:57 +0300)
The display engine only reads from memory, there's no need to use
bidirectional DMA mappings. Use DMA_TO_DEVICE instead.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c

index 94aef52..461fbb5 100644 (file)
@@ -254,7 +254,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
 
                for (i = 0; i < npages; i++) {
                        addrs[i] = dma_map_page(dev->dev, pages[i],
-                                       0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+                                       0, PAGE_SIZE, DMA_TO_DEVICE);
 
                        if (dma_mapping_error(dev->dev, addrs[i])) {
                                dev_warn(dev->dev,
@@ -262,7 +262,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
 
                                for (i = i - 1; i >= 0; --i) {
                                        dma_unmap_page(dev->dev, addrs[i],
-                                               PAGE_SIZE, DMA_BIDIRECTIONAL);
+                                               PAGE_SIZE, DMA_TO_DEVICE);
                                }
 
                                ret = -ENOMEM;
@@ -322,7 +322,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
        for (i = 0; i < npages; i++) {
                if (omap_obj->dma_addrs[i])
                        dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
-                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+                                      PAGE_SIZE, DMA_TO_DEVICE);
        }
 
        kfree(omap_obj->dma_addrs);
@@ -744,7 +744,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 
        if (omap_obj->dma_addrs[pgoff]) {
                dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
-                               PAGE_SIZE, DMA_BIDIRECTIONAL);
+                               PAGE_SIZE, DMA_TO_DEVICE);
                omap_obj->dma_addrs[pgoff] = 0;
        }
 }
@@ -767,8 +767,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
                        dma_addr_t addr;
 
                        addr = dma_map_page(dev->dev, pages[i], 0,
-                                           PAGE_SIZE, DMA_BIDIRECTIONAL);
-
+                                           PAGE_SIZE, dir);
                        if (dma_mapping_error(dev->dev, addr)) {
                                dev_warn(dev->dev, "%s: failed to map page\n",
                                        __func__);
index a2b9136..6ab6f52 100644 (file)
@@ -210,7 +210,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
 
        get_dma_buf(dma_buf);
 
-       sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+       sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
        if (IS_ERR(sgt)) {
                ret = PTR_ERR(sgt);
                goto fail_detach;
@@ -227,7 +227,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
        return obj;
 
 fail_unmap:
-       dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+       dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
 fail_detach:
        dma_buf_detach(dma_buf, attach);
        dma_buf_put(dma_buf);