dma-direct: add an explicit dma_direct_get_required_mask
authorChristoph Hellwig <hch@lst.de>
Thu, 20 Sep 2018 11:26:13 +0000 (13:26 +0200)
committerChristoph Hellwig <hch@lst.de>
Mon, 1 Oct 2018 14:27:15 +0000 (07:27 -0700)
This is somewhat modelled after the powerpc version, and differs from
the legacy fallback in use fls64 instead of pointlessly splitting up the
address into low and high dwords and in that it takes (__)phys_to_dma
into account.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
include/linux/dma-direct.h
kernel/dma/direct.c

index 86a59ba..b79496d 100644 (file)
@@ -55,6 +55,7 @@ static inline void dma_mark_clean(void *addr, size_t size)
 }
 #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
 
+u64 dma_direct_get_required_mask(struct device *dev);
 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs);
 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
index c954f0a..f32b33c 100644 (file)
@@ -4,6 +4,7 @@
  *
  * DMA operations that map physical memory directly without using an IOMMU.
  */
+#include <linux/bootmem.h> /* for max_pfn */
 #include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/dma-direct.h>
@@ -53,11 +54,25 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
        return true;
 }
 
+static inline dma_addr_t phys_to_dma_direct(struct device *dev,
+               phys_addr_t phys)
+{
+       if (force_dma_unencrypted())
+               return __phys_to_dma(dev, phys);
+       return phys_to_dma(dev, phys);
+}
+
+u64 dma_direct_get_required_mask(struct device *dev)
+{
+       u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
+
+       return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
+}
+
 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 {
-       dma_addr_t addr = force_dma_unencrypted() ?
-               __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
-       return addr + size - 1 <= dev->coherent_dma_mask;
+       return phys_to_dma_direct(dev, phys) + size - 1 <=
+                       dev->coherent_dma_mask;
 }
 
 void *dma_direct_alloc_pages(struct device *dev, size_t size,
@@ -296,6 +311,7 @@ const struct dma_map_ops dma_direct_ops = {
        .unmap_page             = dma_direct_unmap_page,
        .unmap_sg               = dma_direct_unmap_sg,
 #endif
+       .get_required_mask      = dma_direct_get_required_mask,
        .dma_supported          = dma_direct_supported,
        .mapping_error          = dma_direct_mapping_error,
        .cache_sync             = arch_dma_cache_sync,