memset(padding_start, 0, padding_size);
}
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, org_size, dir);
+
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
- dma_addr_t dma_handle;
- dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
+ return __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
coherent, dir, attrs);
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- dma_handle != DMA_MAPPING_ERROR)
- arch_sync_dma_for_device(phys, size, dir);
- return dma_handle;
}
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
goto out;
}
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
-
if (dev_is_untrusted(dev))
return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
+
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever