Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[platform/kernel/linux-rpi.git] / arch / arm64 / mm / dma-mapping.c
index 4a14b25..351f759 100644 (file)
@@ -211,7 +211,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
        dma_addr_t dev_addr;
 
        dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
-       if (!is_device_dma_coherent(dev))
+       if (!is_device_dma_coherent(dev) &&
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 
        return dev_addr;
@@ -222,7 +223,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
                                 size_t size, enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       if (!is_device_dma_coherent(dev))
+       if (!is_device_dma_coherent(dev) &&
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
        swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
 }
@@ -235,7 +237,8 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
        int i, ret;
 
        ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
-       if (!is_device_dma_coherent(dev))
+       if (!is_device_dma_coherent(dev) &&
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                for_each_sg(sgl, sg, ret, i)
                        __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
                                       sg->length, dir);
@@ -251,7 +254,8 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
        struct scatterlist *sg;
        int i;
 
-       if (!is_device_dma_coherent(dev))
+       if (!is_device_dma_coherent(dev) &&
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                for_each_sg(sgl, sg, nelems, i)
                        __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
                                         sg->length, dir);
@@ -352,6 +356,13 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
        return 1;
 }
 
+static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
+{
+       if (swiotlb)
+               return swiotlb_dma_mapping_error(hwdev, addr);
+       return 0;
+}
+
 static struct dma_map_ops swiotlb_dma_ops = {
        .alloc = __dma_alloc,
        .free = __dma_free,
@@ -366,7 +377,7 @@ static struct dma_map_ops swiotlb_dma_ops = {
        .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
        .sync_sg_for_device = __swiotlb_sync_sg_for_device,
        .dma_supported = __swiotlb_dma_supported,
-       .mapping_error = swiotlb_dma_mapping_error,
+       .mapping_error = __swiotlb_dma_mapping_error,
 };
 
 static int __init atomic_pool_init(void)
@@ -830,14 +841,21 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
         * then the IOMMU core will have already configured a group for this
         * device, and allocated the default domain for that group.
         */
-       if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
-               pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
-                       dev_name(dev));
-               return false;
+       if (!domain)
+               goto out_err;
+
+       if (domain->type == IOMMU_DOMAIN_DMA) {
+               if (iommu_dma_init_domain(domain, dma_base, size, dev))
+                       goto out_err;
+
+               dev->archdata.dma_ops = &iommu_dma_ops;
        }
 
-       dev->archdata.dma_ops = &iommu_dma_ops;
        return true;
+out_err:
+       pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+                dev_name(dev));
+       return false;
 }
 
 static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,