dma-mapping: consolidate the dma mmap implementations
authorChristoph Hellwig <hch@lst.de>
Tue, 11 Sep 2018 06:55:28 +0000 (08:55 +0200)
committerChristoph Hellwig <hch@lst.de>
Thu, 20 Sep 2018 07:01:16 +0000 (09:01 +0200)
The only functional differences (modulo a few missing fixes in the arch
code) is that architectures without coherent caches need a hook to
convert a virtual or dma address into a pfn, given that we don't have
the kernel linear mapping available for the otherwise easy virt_to_page
call.  As a side effect we can support mmap of the per-device coherent
area even on architectures not providing the callback, and we make
previous dangerous default methods dma_common_mmap actually save for
non-coherent architectures by rejecting it without the right helper.

In addition to that we need a hook so that some architectures can
override the protection bits when mmaping a dma coherent allocations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Paul Burton <paul.burton@mips.com> # MIPS parts
16 files changed:
arch/arc/Kconfig
arch/arc/mm/dma.c
arch/arm/mm/dma-mapping-nommu.c
arch/microblaze/Kconfig
arch/microblaze/include/asm/pgtable.h
arch/microblaze/kernel/dma.c
arch/microblaze/mm/consistent.c
arch/mips/Kconfig
arch/mips/jazz/jazzdma.c
arch/mips/mm/dma-noncoherent.c
drivers/xen/swiotlb-xen.c
include/linux/dma-mapping.h
include/linux/dma-noncoherent.h
kernel/dma/Kconfig
kernel/dma/direct.c
kernel/dma/mapping.c

index ca03694..3d9bdec 100644 (file)
@@ -9,6 +9,7 @@
 config ARC
        def_bool y
        select ARC_TIMERS
+       select ARCH_HAS_DMA_COHERENT_TO_PFN
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -18,7 +19,6 @@ config ARC
        select CLONE_BACKWARDS
        select COMMON_CLK
        select DMA_DIRECT_OPS
-       select DMA_NONCOHERENT_MMAP
        select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
        select GENERIC_CLOCKEVENTS
        select GENERIC_FIND_FIRST_BIT
index 535ed4a..db203ff 100644 (file)
@@ -84,29 +84,10 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
        __free_pages(page, get_order(size));
 }
 
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t dma_addr, size_t size,
-               unsigned long attrs)
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+               dma_addr_t dma_addr)
 {
-       unsigned long user_count = vma_pages(vma);
-       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long pfn = __phys_to_pfn(dma_addr);
-       unsigned long off = vma->vm_pgoff;
-       int ret = -ENXIO;
-
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
-               return ret;
-
-       if (off < count && user_count <= (count - off)) {
-               ret = remap_pfn_range(vma, vma->vm_start,
-                                     pfn + off,
-                                     user_count << PAGE_SHIFT,
-                                     vma->vm_page_prot);
-       }
-
-       return ret;
+       return __phys_to_pfn(dma_addr);
 }
 
 /*
index 0ad156f..712416e 100644 (file)
@@ -91,7 +91,7 @@ static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
                return ret;
 
-       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
 
index 0f48ab6..164a485 100644 (file)
@@ -1,6 +1,7 @@
 config MICROBLAZE
        def_bool y
        select ARCH_NO_SWAP
+       select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -12,7 +13,6 @@ config MICROBLAZE
        select CLONE_BACKWARDS3
        select COMMON_CLK
        select DMA_DIRECT_OPS
-       select DMA_NONCOHERENT_MMAP
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
        select GENERIC_CPU_DEVICES
index 7b650ab..f64ebb9 100644 (file)
@@ -553,8 +553,6 @@ void __init *early_get_page(void);
 
 extern unsigned long ioremap_bot, ioremap_base;
 
-unsigned long consistent_virt_to_pfn(void *vaddr);
-
 void setup_memory(void);
 #endif /* __ASSEMBLY__ */
 
index 71032cf..a89c2d4 100644 (file)
@@ -42,25 +42,3 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
 {
        __dma_sync(dev, paddr, size, dir);
 }
-
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t handle, size_t size,
-               unsigned long attrs)
-{
-#ifdef CONFIG_MMU
-       unsigned long user_count = vma_pages(vma);
-       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long off = vma->vm_pgoff;
-       unsigned long pfn;
-
-       if (off >= count || user_count > (count - off))
-               return -ENXIO;
-
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       pfn = consistent_virt_to_pfn(cpu_addr);
-       return remap_pfn_range(vma, vma->vm_start, pfn + off,
-                              vma->vm_end - vma->vm_start, vma->vm_page_prot);
-#else
-       return -ENXIO;
-#endif
-}
index c9a278a..d801cc5 100644 (file)
@@ -165,7 +165,8 @@ static pte_t *consistent_virt_to_pte(void *vaddr)
        return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
 }
 
-unsigned long consistent_virt_to_pfn(void *vaddr)
+long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
+               dma_addr_t dma_addr)
 {
        pte_t *ptep = consistent_virt_to_pte(vaddr);
 
index 96da6e3..77c022e 100644 (file)
@@ -1116,10 +1116,11 @@ config DMA_PERDEV_COHERENT
 
 config DMA_NONCOHERENT
        bool
+       select ARCH_HAS_DMA_MMAP_PGPROT
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select NEED_DMA_MAP_STATE
-       select DMA_NONCOHERENT_MMAP
+       select ARCH_HAS_DMA_COHERENT_TO_PFN
        select DMA_NONCOHERENT_CACHE_SYNC
 
 config SYS_HAS_EARLY_PRINTK
index bb49dfa..0a0aaf3 100644 (file)
@@ -682,7 +682,6 @@ static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 const struct dma_map_ops jazz_dma_ops = {
        .alloc                  = jazz_dma_alloc,
        .free                   = jazz_dma_free,
-       .mmap                   = arch_dma_mmap,
        .map_page               = jazz_dma_map_page,
        .unmap_page             = jazz_dma_unmap_page,
        .map_sg                 = jazz_dma_map_sg,
index b01b9a3..e6c9485 100644 (file)
@@ -66,33 +66,19 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
        dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
 }
 
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t dma_addr, size_t size,
-               unsigned long attrs)
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+               dma_addr_t dma_addr)
 {
-       unsigned long user_count = vma_pages(vma);
-       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long addr = CAC_ADDR((unsigned long)cpu_addr);
-       unsigned long off = vma->vm_pgoff;
-       unsigned long pfn = page_to_pfn(virt_to_page((void *)addr));
-       int ret = -ENXIO;
+       return page_to_pfn(virt_to_page((void *)addr));
+}
 
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+               unsigned long attrs)
+{
        if (attrs & DMA_ATTR_WRITE_COMBINE)
-               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       else
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
-               return ret;
-
-       if (off < count && user_count <= (count - off)) {
-               ret = remap_pfn_range(vma, vma->vm_start,
-                                     pfn + off,
-                                     user_count << PAGE_SHIFT,
-                                     vma->vm_page_prot);
-       }
-
-       return ret;
+               return pgprot_writecombine(prot);
+       return pgprot_noncached(prot);
 }
 
 static inline void dma_sync_virt(void *addr, size_t size,
index a6f9ba8..470757d 100644 (file)
@@ -662,7 +662,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
                                                    dma_addr, size, attrs);
 #endif
-       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
 /*
index 8f20011..c3378d4 100644 (file)
@@ -444,7 +444,8 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 }
 
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               unsigned long attrs);
 
 void *dma_common_contiguous_remap(struct page *page, size_t size,
                        unsigned long vm_flags,
@@ -476,7 +477,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
        BUG_ON(!ops);
        if (ops->mmap)
                return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
index 3f50302..9051b05 100644 (file)
@@ -24,9 +24,15 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs);
 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+               dma_addr_t dma_addr);
+
+#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
                unsigned long attrs);
+#else
+# define arch_dma_mmap_pgprot(dev, prot, attrs)        pgprot_noncached(prot)
+#endif
 
 #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
index 5617c9a..645c7a2 100644 (file)
@@ -29,13 +29,15 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU
 config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
        bool
 
-config DMA_DIRECT_OPS
+config ARCH_HAS_DMA_COHERENT_TO_PFN
        bool
-       depends on HAS_DMA
 
-config DMA_NONCOHERENT_MMAP
+config ARCH_HAS_DMA_MMAP_PGPROT
        bool
-       depends on DMA_DIRECT_OPS
+
+config DMA_DIRECT_OPS
+       bool
+       depends on HAS_DMA
 
 config DMA_NONCOHERENT_CACHE_SYNC
        bool
index 09e85f6..c954f0a 100644 (file)
@@ -155,16 +155,6 @@ void dma_direct_free(struct device *dev, size_t size,
                dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
 }
 
-static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t dma_addr, size_t size,
-               unsigned long attrs)
-{
-       if (!dev_is_dma_coherent(dev) &&
-           IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP))
-               return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-
 static void dma_direct_sync_single_for_device(struct device *dev,
                dma_addr_t addr, size_t size, enum dma_data_direction dir)
 {
@@ -293,7 +283,6 @@ int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
 const struct dma_map_ops dma_direct_ops = {
        .alloc                  = dma_direct_alloc,
        .free                   = dma_direct_free,
-       .mmap                   = dma_direct_mmap,
        .map_page               = dma_direct_map_page,
        .map_sg                 = dma_direct_map_sg,
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
index 3540cb3..42fd73a 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/acpi.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 #include <linux/of_device.h>
@@ -220,27 +220,37 @@ EXPORT_SYMBOL(dma_common_get_sgtable);
  * Create userspace mapping for the DMA-coherent memory.
  */
 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               unsigned long attrs)
 {
-       int ret = -ENXIO;
 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
        unsigned long user_count = vma_pages(vma);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long off = vma->vm_pgoff;
+       unsigned long pfn;
+       int ret = -ENXIO;
 
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
 
        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
-       if (off < count && user_count <= (count - off))
-               ret = remap_pfn_range(vma, vma->vm_start,
-                                     page_to_pfn(virt_to_page(cpu_addr)) + off,
-                                     user_count << PAGE_SHIFT,
-                                     vma->vm_page_prot);
-#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+       if (off >= count || user_count > count - off)
+               return -ENXIO;
+
+       if (!dev_is_dma_coherent(dev)) {
+               if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
+                       return -ENXIO;
+               pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+       } else {
+               pfn = page_to_pfn(virt_to_page(cpu_addr));
+       }
 
-       return ret;
+       return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+                       user_count << PAGE_SHIFT, vma->vm_page_prot);
+#else
+       return -ENXIO;
+#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
 }
 EXPORT_SYMBOL(dma_common_mmap);