ARM/dma-mapping: merge IOMMU ops
authorRobin Murphy <robin.murphy@arm.com>
Thu, 21 Apr 2022 11:36:59 +0000 (12:36 +0100)
committerChristoph Hellwig <hch@lst.de>
Thu, 7 Jul 2022 16:18:58 +0000 (18:18 +0200)
The dma_sync_* operations are now the only difference between the
coherent and non-coherent IOMMU ops. Some minor tweaks to make those
safe for coherent devices with minimal overhead, and we can condense
down to a single set of DMA ops.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Marc Zyngier <maz@kernel.org>
arch/arm/mm/dma-mapping.c

index e7ccf7c..e68d1d2 100644 (file)
@@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
        struct scatterlist *s;
        int i;
 
+       if (dev->dma_coherent)
+               return;
+
        for_each_sg(sg, s, nents, i)
                __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
 
@@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
        struct scatterlist *s;
        int i;
 
+       if (dev->dma_coherent)
+               return;
+
        for_each_sg(sg, s, nents, i)
                __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 }
@@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        dma_addr_t iova = handle & PAGE_MASK;
-       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       struct page *page;
        unsigned int offset = handle & ~PAGE_MASK;
 
-       if (!iova)
+       if (dev->dma_coherent || !iova)
                return;
 
+       page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
        __dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
@@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        dma_addr_t iova = handle & PAGE_MASK;
-       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       struct page *page;
        unsigned int offset = handle & ~PAGE_MASK;
 
-       if (!iova)
+       if (dev->dma_coherent || !iova)
                return;
 
+       page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
        __dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
@@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
        .unmap_resource         = arm_iommu_unmap_resource,
 };
 
-static const struct dma_map_ops iommu_coherent_ops = {
-       .alloc          = arm_iommu_alloc_attrs,
-       .free           = arm_iommu_free_attrs,
-       .mmap           = arm_iommu_mmap_attrs,
-       .get_sgtable    = arm_iommu_get_sgtable,
-
-       .map_page       = arm_iommu_map_page,
-       .unmap_page     = arm_iommu_unmap_page,
-
-       .map_sg         = arm_iommu_map_sg,
-       .unmap_sg       = arm_iommu_unmap_sg,
-
-       .map_resource   = arm_iommu_map_resource,
-       .unmap_resource = arm_iommu_unmap_resource,
-};
-
 /**
  * arm_iommu_create_mapping
  * @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
                return;
        }
 
-       if (coherent)
-               set_dma_ops(dev, &iommu_coherent_ops);
-       else
-               set_dma_ops(dev, &iommu_ops);
+       set_dma_ops(dev, &iommu_ops);
 }
 
 static void arm_teardown_iommu_dma_ops(struct device *dev)