Merge branch 'for-next-arm-dma' into for-linus
[profile/ivi/kernel-adaptation-intel-automotive.git] / arch / arm / mm / dma-mapping.c
index 153f555..ea6b431 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 #include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 #include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
 #include <asm/mach/map.h>
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
 
 #include "mm.h"
 
+/*
+ * The DMA API is built upon the notion of "buffer ownership".  A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device.  These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches.  We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+               size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+               size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+       return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+                                     handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned int offset = handle & (PAGE_SIZE - 1);
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned int offset = handle & (PAGE_SIZE - 1);
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+       .alloc                  = arm_dma_alloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .map_page               = arm_dma_map_page,
+       .unmap_page             = arm_dma_unmap_page,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_single_for_cpu    = arm_dma_sync_single_for_cpu,
+       .sync_single_for_device = arm_dma_sync_single_for_device,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .set_dma_mask           = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
        u64 mask = (u64)arm_dma_limit;
@@ -69,9 +171,11 @@ static void __dma_clear_buffer(struct page *page, size_t size)
         * lurking in the kernel direct-mapped region is invalidated.
         */
        ptr = page_address(page);
-       memset(ptr, 0, size);
-       dmac_flush_range(ptr, ptr + size);
-       outer_flush_range(__pa(ptr), __pa(ptr) + size);
+       if (ptr) {
+               memset(ptr, 0, size);
+               dmac_flush_range(ptr, ptr + size);
+               outer_flush_range(__pa(ptr), __pa(ptr) + size);
+       }
 }
 
 /*
@@ -164,8 +268,10 @@ static int __init consistent_init(void)
        unsigned long base = consistent_base;
        unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
+#ifndef CONFIG_ARM_DMA_USE_IOMMU
        if (cpu_architecture() >= CPU_ARCH_ARMv6)
                return 0;
+#endif
 
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
@@ -181,14 +287,14 @@ static int __init consistent_init(void)
 
                pud = pud_alloc(&init_mm, pgd, base);
                if (!pud) {
-                       printk(KERN_ERR "%s: no pud tables\n", __func__);
+                       pr_err("%s: no pud tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
 
                pmd = pmd_alloc(&init_mm, pud, base);
                if (!pmd) {
-                       printk(KERN_ERR "%s: no pmd tables\n", __func__);
+                       pr_err("%s: no pmd tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
@@ -196,7 +302,7 @@ static int __init consistent_init(void)
 
                pte = pte_alloc_kernel(pmd, base);
                if (!pte) {
-                       printk(KERN_ERR "%s: no pte tables\n", __func__);
+                       pr_err("%s: no pte tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
@@ -311,7 +417,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
        int bit;
 
        if (!consistent_pte) {
-               printk(KERN_ERR "%s: not initialised\n", __func__);
+               pr_err("%s: not initialised\n", __func__);
                dump_stack();
                return NULL;
        }
@@ -338,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
                u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
 
                pte = consistent_pte[idx] + off;
-               c->vm_pages = page;
+               c->priv = page;
 
                do {
                        BUG_ON(!pte_none(*pte));
@@ -370,14 +476,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
 
        c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
        if (!c) {
-               printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+               pr_err("%s: trying to free invalid coherent area: %p\n",
                       __func__, cpu_addr);
                dump_stack();
                return;
        }
 
        if ((c->vm_end - c->vm_start) != size) {
-               printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+               pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
                       __func__, c->vm_end - c->vm_start, size);
                dump_stack();
                size = c->vm_end - c->vm_start;
@@ -399,8 +505,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
                }
 
                if (pte_none(pte) || !pte_present(pte))
-                       printk(KERN_CRIT "%s: bad page in kernel page table\n",
-                              __func__);
+                       pr_crit("%s: bad page in kernel page table\n",
+                               __func__);
        } while (size -= PAGE_SIZE);
 
        flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -524,12 +630,21 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
        dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 }
 
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+       prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+                           pgprot_writecombine(prot) :
+                           pgprot_dmacoherent(prot);
+       return prot;
+}
+
 #define nommu() 0
 
 #else  /* !CONFIG_MMU */
 
 #define nommu() 1
 
+#define __get_dma_pgprot(attrs, prot)  __pgprot(0)
 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)     NULL
 #define __alloc_from_pool(dev, size, ret_page, c)              NULL
 #define __alloc_from_contiguous(dev, size, prot, ret)          NULL
@@ -584,7 +699,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
         */
        gfp &= ~(__GFP_COMP);
 
-       *handle = ~0;
+       *handle = DMA_ERROR_CODE;
        size = PAGE_ALIGN(size);
 
        if (arch_is_coherent() || nommu())
@@ -606,39 +721,34 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  * Allocate DMA-coherent memory space and return both the kernel remapped
  * virtual and bus address for that space.
  */
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
-                        gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                   gfp_t gfp, struct dma_attrs *attrs)
 {
+       pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
        void *memory;
 
        if (dma_alloc_from_coherent(dev, size, handle, &memory))
                return memory;
 
-       return __dma_alloc(dev, size, handle, gfp,
-                          pgprot_dmacoherent(pgprot_kernel),
+       return __dma_alloc(dev, size, handle, gfp, prot,
                           __builtin_return_address(0));
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
 /*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
  */
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
-       return __dma_alloc(dev, size, handle, gfp,
-                          pgprot_writecombine(pgprot_kernel),
-                          __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                struct dma_attrs *attrs)
 {
        int ret = -ENXIO;
 #ifdef CONFIG_MMU
        unsigned long pfn = dma_to_pfn(dev, dma_addr);
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
        ret = remap_pfn_range(vma, vma->vm_start,
                              pfn + vma->vm_pgoff,
                              vma->vm_end - vma->vm_start,
@@ -648,27 +758,11 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
        return ret;
 }
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-                         void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
 /*
  * Free a buffer as defined by the above mapping.
  */
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                 dma_addr_t handle, struct dma_attrs *attrs)
 {
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
@@ -692,48 +786,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
                __free_from_contiguous(dev, page, size);
        }
 }
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       unsigned long paddr;
-
-       BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-       dmac_map_area(kaddr, size, dir);
-
-       paddr = __pa(kaddr);
-       if (dir == DMA_FROM_DEVICE) {
-               outer_inv_range(paddr, paddr + size);
-       } else {
-               outer_clean_range(paddr, paddr + size);
-       }
-       /* FIXME: non-speculating: flush on bidirectional mappings? */
-}
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-       /* FIXME: non-speculating: not required */
-       /* don't bother invalidating if DMA to device */
-       if (dir != DMA_TO_DEVICE) {
-               unsigned long paddr = __pa(kaddr);
-               outer_inv_range(paddr, paddr + size);
-       }
-
-       dmac_unmap_area(kaddr, size, dir);
-}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
 
 static void dma_cache_maint_page(struct page *page, unsigned long offset,
        size_t size, enum dma_data_direction dir,
@@ -779,7 +831,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
        } while (left);
 }
 
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
        size_t size, enum dma_data_direction dir)
 {
        unsigned long paddr;
@@ -794,9 +852,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
        }
        /* FIXME: non-speculating: flush on bidirectional mappings? */
 }
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
 
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
        size_t size, enum dma_data_direction dir)
 {
        unsigned long paddr = page_to_phys(page) + off;
@@ -814,10 +871,9 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
        if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
                set_bit(PG_dcache_clean, &page->flags);
 }
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
 
 /**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map
@@ -832,32 +888,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
  * Device ownership issues as mentioned for dma_map_single are the same
  * here.
  */
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i, j;
 
-       BUG_ON(!valid_dma_direction(dir));
-
        for_each_sg(sg, s, nents, i) {
-               s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
-                                               s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+               s->dma_length = s->length;
+#endif
+               s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+                                               s->length, dir, attrs);
                if (dma_mapping_error(dev, s->dma_address))
                        goto bad_mapping;
        }
-       debug_dma_map_sg(dev, sg, nents, nents, dir);
        return nents;
 
  bad_mapping:
        for_each_sg(sg, s, i, j)
-               __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+               ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
        return 0;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
 /**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -866,70 +922,55 @@ EXPORT_SYMBOL(dma_map_sg);
  * Unmap a set of streaming mode DMA translations.  Again, CPU access
  * rules concerning calls here are the same as for dma_unmap_single().
  */
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
-       int i;
 
-       debug_dma_unmap_sg(dev, sg, nents, dir);
+       int i;
 
        for_each_sg(sg, s, nents, i)
-               __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+               ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
 }
-EXPORT_SYMBOL(dma_unmap_sg);
 
 /**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i;
 
-       for_each_sg(sg, s, nents, i) {
-               if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
-                                           sg_dma_len(s), dir))
-                       continue;
-
-               __dma_page_dev_to_cpu(sg_page(s), s->offset,
-                                     s->length, dir);
-       }
-
-       debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+       for_each_sg(sg, s, nents, i)
+               ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+                                        dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
 
 /**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i;
 
-       for_each_sg(sg, s, nents, i) {
-               if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
-                                       sg_dma_len(s), dir))
-                       continue;
-
-               __dma_page_cpu_to_dev(sg_page(s), s->offset,
-                                     s->length, dir);
-       }
-
-       debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+       for_each_sg(sg, s, nents, i)
+               ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+                                           dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -945,18 +986,15 @@ int dma_supported(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_supported);
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
 {
        if (!dev->dma_mask || !dma_supported(dev, dma_mask))
                return -EIO;
 
-#ifndef CONFIG_DMABOUNCE
        *dev->dma_mask = dma_mask;
-#endif
 
        return 0;
 }
-EXPORT_SYMBOL(dma_set_mask);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
@@ -969,3 +1007,679 @@ static int __init dma_debug_do_init(void)
        return 0;
 }
 fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+                                     size_t size)
+{
+       unsigned int order = get_order(size);
+       unsigned int align = 0;
+       unsigned int count, start;
+       unsigned long flags;
+
+       count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+                (1 << mapping->order) - 1) >> mapping->order;
+
+       if (order > mapping->order)
+               align = (1 << (order - mapping->order)) - 1;
+
+       spin_lock_irqsave(&mapping->lock, flags);
+       start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+                                          count, align);
+       if (start > mapping->bits) {
+               spin_unlock_irqrestore(&mapping->lock, flags);
+               return DMA_ERROR_CODE;
+       }
+
+       bitmap_set(mapping->bitmap, start, count);
+       spin_unlock_irqrestore(&mapping->lock, flags);
+
+       return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+                              dma_addr_t addr, size_t size)
+{
+       unsigned int start = (addr - mapping->base) >>
+                            (mapping->order + PAGE_SHIFT);
+       unsigned int count = ((size >> PAGE_SHIFT) +
+                             (1 << mapping->order) - 1) >> mapping->order;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mapping->lock, flags);
+       bitmap_clear(mapping->bitmap, start, count);
+       spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+       struct page **pages;
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i = 0;
+
+       if (array_size <= PAGE_SIZE)
+               pages = kzalloc(array_size, gfp);
+       else
+               pages = vzalloc(array_size);
+       if (!pages)
+               return NULL;
+
+       while (count) {
+               int j, order = __ffs(count);
+
+               pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+               while (!pages[i] && order)
+                       pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+               if (!pages[i])
+                       goto error;
+
+               if (order)
+                       split_page(pages[i], order);
+               j = 1 << order;
+               while (--j)
+                       pages[i + j] = pages[i] + j;
+
+               __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+               i += 1 << order;
+               count -= 1 << order;
+       }
+
+       return pages;
+error:
+       while (--i)
+               if (pages[i])
+                       __free_pages(pages[i], 0);
+       if (array_size < PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i;
+       for (i = 0; i < count; i++)
+               if (pages[i])
+                       __free_pages(pages[i], 0);
+       if (array_size < PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+       struct arm_vmregion *c;
+       size_t align;
+       size_t count = size >> PAGE_SHIFT;
+       int bit;
+
+       if (!consistent_pte[0]) {
+               pr_err("%s: not initialised\n", __func__);
+               dump_stack();
+               return NULL;
+       }
+
+       /*
+        * Align the virtual region allocation - maximum alignment is
+        * a section size, minimum is a page size.  This helps reduce
+        * fragmentation of the DMA space, and also prevents allocations
+        * smaller than a section from crossing a section boundary.
+        */
+       bit = fls(size - 1);
+       if (bit > SECTION_SHIFT)
+               bit = SECTION_SHIFT;
+       align = 1 << bit;
+
+       /*
+        * Allocate a virtual address in the consistent mapping region.
+        */
+       c = arm_vmregion_alloc(&consistent_head, align, size,
+                           gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+       if (c) {
+               pte_t *pte;
+               int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+               int i = 0;
+               u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+               pte = consistent_pte[idx] + off;
+               c->priv = pages;
+
+               do {
+                       BUG_ON(!pte_none(*pte));
+
+                       set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+                       pte++;
+                       off++;
+                       i++;
+                       if (off >= PTRS_PER_PTE) {
+                               off = 0;
+                               pte = consistent_pte[++idx];
+                       }
+               } while (i < count);
+
+               dsb();
+
+               return (void *)c->vm_start;
+       }
+       return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       dma_addr_t dma_addr, iova;
+       int i, ret = DMA_ERROR_CODE;
+
+       dma_addr = __alloc_iova(mapping, size);
+       if (dma_addr == DMA_ERROR_CODE)
+               return dma_addr;
+
+       iova = dma_addr;
+       for (i = 0; i < count; ) {
+               unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+               phys_addr_t phys = page_to_phys(pages[i]);
+               unsigned int len, j;
+
+               for (j = i + 1; j < count; j++, next_pfn++)
+                       if (page_to_pfn(pages[j]) != next_pfn)
+                               break;
+
+               len = (j - i) << PAGE_SHIFT;
+               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               if (ret < 0)
+                       goto fail;
+               iova += len;
+               i = j;
+       }
+       return dma_addr;
+fail:
+       iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+       __free_iova(mapping, dma_addr, size);
+       return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+       /*
+        * add optional in-page offset from iova to size and align
+        * result to page size
+        */
+       size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+       iova &= PAGE_MASK;
+
+       iommu_unmap(mapping->domain, iova, size);
+       __free_iova(mapping, iova, size);
+       return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+           dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+       pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+       struct page **pages;
+       void *addr = NULL;
+
+       *handle = DMA_ERROR_CODE;
+       size = PAGE_ALIGN(size);
+
+       pages = __iommu_alloc_buffer(dev, size, gfp);
+       if (!pages)
+               return NULL;
+
+       *handle = __iommu_create_mapping(dev, pages, size);
+       if (*handle == DMA_ERROR_CODE)
+               goto err_buffer;
+
+       addr = __iommu_alloc_remap(pages, size, gfp, prot);
+       if (!addr)
+               goto err_mapping;
+
+       return addr;
+
+err_mapping:
+       __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+       __iommu_free_buffer(dev, pages, size);
+       return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+                   void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                   struct dma_attrs *attrs)
+{
+       struct arm_vmregion *c;
+
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+       if (c) {
+               struct page **pages = c->priv;
+
+               unsigned long uaddr = vma->vm_start;
+               unsigned long usize = vma->vm_end - vma->vm_start;
+               int i = 0;
+
+               do {
+                       int ret;
+
+                       ret = vm_insert_page(vma, uaddr, pages[i++]);
+                       if (ret) {
+                               pr_err("Remapping memory, error: %d\n", ret);
+                               return ret;
+                       }
+
+                       uaddr += PAGE_SIZE;
+                       usize -= PAGE_SIZE;
+               } while (usize > 0);
+       }
+       return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+                         dma_addr_t handle, struct dma_attrs *attrs)
+{
+       struct arm_vmregion *c;
+       size = PAGE_ALIGN(size);
+
+       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+       if (c) {
+               struct page **pages = c->priv;
+               __dma_free_remap(cpu_addr, size);
+               __iommu_remove_mapping(dev, handle, size);
+               __iommu_free_buffer(dev, pages, size);
+       }
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+                         size_t size, dma_addr_t *handle,
+                         enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova, iova_base;
+       int ret = 0;
+       unsigned int count;
+       struct scatterlist *s;
+
+       size = PAGE_ALIGN(size);
+       *handle = DMA_ERROR_CODE;
+
+       iova_base = iova = __alloc_iova(mapping, size);
+       if (iova == DMA_ERROR_CODE)
+               return -ENOMEM;
+
+       for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+               phys_addr_t phys = page_to_phys(sg_page(s));
+               unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+               if (!arch_is_coherent())
+                       __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               if (ret < 0)
+                       goto fail;
+               count += len >> PAGE_SHIFT;
+               iova += len;
+       }
+       *handle = iova_base;
+
+       return 0;
+fail:
+       iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+       __free_iova(mapping, iova_base, size);
+       return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+                    enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       struct scatterlist *s = sg, *dma = sg, *start = sg;
+       int i, count = 0;
+       unsigned int offset = s->offset;
+       unsigned int size = s->offset + s->length;
+       unsigned int max = dma_get_max_seg_size(dev);
+
+       for (i = 1; i < nents; i++) {
+               s = sg_next(s);
+
+               s->dma_address = DMA_ERROR_CODE;
+               s->dma_length = 0;
+
+               if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+                       if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+                           dir) < 0)
+                               goto bad_mapping;
+
+                       dma->dma_address += offset;
+                       dma->dma_length = size - offset;
+
+                       size = offset = s->offset;
+                       start = s;
+                       dma = sg_next(dma);
+                       count += 1;
+               }
+               size += s->length;
+       }
+       if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+               goto bad_mapping;
+
+       dma->dma_address += offset;
+       dma->dma_length = size - offset;
+
+       return count+1;
+
+bad_mapping:
+       for_each_sg(sg, s, count, i)
+               __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+       return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+                       enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               if (sg_dma_len(s))
+                       __iommu_remove_mapping(dev, sg_dma_address(s),
+                                              sg_dma_len(s));
+               if (!arch_is_coherent())
+                       __dma_page_dev_to_cpu(sg_page(s), s->offset,
+                                             s->length, dir);
+       }
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                       int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               if (!arch_is_coherent())
+                       __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                       int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               if (!arch_is_coherent())
+                       __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t dma_addr;
+       int ret, len = PAGE_ALIGN(size + offset);
+
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+
+       dma_addr = __alloc_iova(mapping, len);
+       if (dma_addr == DMA_ERROR_CODE)
+               return dma_addr;
+
+       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+       if (ret < 0)
+               goto fail;
+
+       return dma_addr + offset;
+fail:
+       __free_iova(mapping, dma_addr, len);
+       return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       int offset = handle & ~PAGE_MASK;
+       int len = PAGE_ALIGN(size + offset);
+
+       if (!iova)
+               return;
+
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+
+       iommu_unmap(mapping->domain, iova, len);
+       __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       unsigned int offset = handle & ~PAGE_MASK;
+
+       if (!iova)
+               return;
+
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       unsigned int offset = handle & ~PAGE_MASK;
+
+       if (!iova)
+               return;
+
+       __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+       .alloc          = arm_iommu_alloc_attrs,
+       .free           = arm_iommu_free_attrs,
+       .mmap           = arm_iommu_mmap_attrs,
+
+       .map_page               = arm_iommu_map_page,
+       .unmap_page             = arm_iommu_unmap_page,
+       .sync_single_for_cpu    = arm_iommu_sync_single_for_cpu,
+       .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+       .map_sg                 = arm_iommu_map_sg,
+       .unmap_sg               = arm_iommu_unmap_sg,
+       .sync_sg_for_cpu        = arm_iommu_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+                        int order)
+{
+       unsigned int count = size >> (PAGE_SHIFT + order);
+       unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+       struct dma_iommu_mapping *mapping;
+       int err = -ENOMEM;
+
+       if (!count)
+               return ERR_PTR(-EINVAL);
+
+       mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+       if (!mapping)
+               goto err;
+
+       mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       if (!mapping->bitmap)
+               goto err2;
+
+       mapping->base = base;
+       mapping->bits = BITS_PER_BYTE * bitmap_size;
+       mapping->order = order;
+       spin_lock_init(&mapping->lock);
+
+       mapping->domain = iommu_domain_alloc(bus);
+       if (!mapping->domain)
+               goto err3;
+
+       kref_init(&mapping->kref);
+       return mapping;
+err3:
+       kfree(mapping->bitmap);
+err2:
+       kfree(mapping);
+err:
+       return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+       struct dma_iommu_mapping *mapping =
+               container_of(kref, struct dma_iommu_mapping, kref);
+
+       iommu_domain_free(mapping->domain);
+       kfree(mapping->bitmap);
+       kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+       if (mapping)
+               kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *     arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+                           struct dma_iommu_mapping *mapping)
+{
+       int err;
+
+       err = iommu_attach_device(mapping->domain, dev);
+       if (err)
+               return err;
+
+       kref_get(&mapping->kref);
+       dev->archdata.mapping = mapping;
+       set_dma_ops(dev, &iommu_ops);
+
+       pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       return 0;
+}
+
+#endif