iommu/dma: Don't remap CMA unnecessarily
authorRobin Murphy <robin.murphy@arm.com>
Mon, 20 May 2019 07:29:38 +0000 (09:29 +0200)
committerJoerg Roedel <jroedel@suse.de>
Mon, 27 May 2019 15:31:11 +0000 (17:31 +0200)
Always remapping CMA allocations was largely a bodge to keep the freeing
logic manageable when it was split between here and an arch wrapper. Now
that it's all together and streamlined, we can relax that limitation.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c

index 4134f13..cffd308 100644 (file)
@@ -973,7 +973,6 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 {
        bool coherent = dev_is_dma_coherent(dev);
        int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
-       pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
        size_t iosize = size;
        struct page *page;
        void *addr;
@@ -1021,13 +1020,19 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
        if (*handle == DMA_MAPPING_ERROR)
                goto out_free_pages;
 
-       addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
-                       __builtin_return_address(0));
-       if (!addr)
-               goto out_unmap;
+       if (!coherent || PageHighMem(page)) {
+               pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
 
-       if (!coherent)
-               arch_dma_prep_coherent(page, iosize);
+               addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
+                               __builtin_return_address(0));
+               if (!addr)
+                       goto out_unmap;
+
+               if (!coherent)
+                       arch_dma_prep_coherent(page, iosize);
+       } else {
+               addr = page_address(page);
+       }
        memset(addr, 0, size);
        return addr;
 out_unmap: