#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
+ #include <linux/iommu.h>
+ #include <linux/vmalloc.h>
#include <asm/memory.h>
#include <asm/highmem.h>
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
+ #include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
#include "mm.h"
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
++#ifndef CONFIG_ARM_DMA_USE_IOMMU
+ if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ return 0;
++#endif
+
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
arm_vmregion_free(&consistent_head, c);
}
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page *page = virt_to_page(addr);
+ pgprot_t prot = *(pgprot_t *)data;
+
+ set_pte_ext(pte, mk_pte(page, prot), 0);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+ unsigned long start = (unsigned long) page_address(page);
+ unsigned end = start + size;
+
+ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+ dsb();
+ flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
+{
+ struct page *page;
+ void *ptr;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (!ptr) {
+ __dma_free_buffer(page, size);
+ return NULL;
+ }
+
+ *ret_page = page;
+ return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, const void *caller)
+{
+ struct arm_vmregion *c;
+ size_t align;
+
+ if (!coherent_head.vm_start) {
+ printk(KERN_ERR "%s: coherent pool not initialised!\n",
+ __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the region allocation - allocations from pool are rather
+ * small, so align them to their order in pages, minimum is a page
+ * size. This helps reduce fragmentation of the DMA space.
+ */
+ align = PAGE_SIZE << get_order(size);
+ c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+ if (c) {
+ void *ptr = (void *)c->vm_start;
+ struct page *page = virt_to_page(ptr);
+ *ret_page = page;
+ return ptr;
+ }
+ return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+ unsigned long start = (unsigned long)cpu_addr;
+ unsigned long end = start + size;
+ struct arm_vmregion *c;
+
+ if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ return 0;
+
+ c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+ if ((c->vm_end - c->vm_start) != size) {
+ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ __func__, c->vm_end - c->vm_start, size);
+ dump_stack();
+ size = c->vm_end - c->vm_start;
+ }
+
+ arm_vmregion_free(&coherent_head, c);
+ return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page)
+{
+ unsigned long order = get_order(size);
+ size_t count = size >> PAGE_SHIFT;
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ return NULL;
+
+ __dma_clear_buffer(page, size);
+ __dma_remap(page, size, prot);
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __dma_remap(page, size, pgprot_kernel);
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+ {
+ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+ pgprot_writecombine(prot) :
+ pgprot_dmacoherent(prot);
+ return prot;
+ }
+
+#define nommu() 0
+
#else /* !CONFIG_MMU */
-#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
-#define __dma_free_remap(addr, size) do { } while (0)
+#define nommu() 1
+
+ #define __get_dma_pgprot(attrs, prot) __pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+#define __alloc_from_pool(dev, size, ret_page, c) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __free_from_pool(cpu_addr, size) 0
+#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
*/
gfp &= ~(__GFP_COMP);
- *handle = ~0;
+ *handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- page = __dma_alloc_buffer(dev, size, gfp);
- if (!page)
- return NULL;
-
- if (!arch_is_coherent())
- addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (arch_is_coherent() || nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(dev, size, &page, caller);
else
- addr = page_address(page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
- unsigned long user_size, kern_size;
- struct arm_vmregion *c;
-
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
- user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- unsigned long off = vma->vm_pgoff;
- struct page *pages = c->priv;
-
- kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
- if (off < kern_size &&
- user_size <= (kern_size - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(pages) + off,
- user_size << PAGE_SHIFT,
- vma->vm_page_prot);
- }
- }
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
#endif /* CONFIG_MMU */
return ret;
}
- int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
- {
- vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
- }
- EXPORT_SYMBOL(dma_mmap_coherent);
-
- int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
- {
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
- }
- EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
/*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
*/
- void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
{
- WARN_ON(irqs_disabled());
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
size = PAGE_ALIGN(size);
- if (!arch_is_coherent())
+ if (arch_is_coherent() || nommu()) {
+ __dma_free_buffer(page, size);
+ } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
__dma_free_remap(cpu_addr, size);
-
- __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
+ __dma_free_buffer(page, size);
+ } else {
+ if (__free_from_pool(cpu_addr, size))
+ return;
+ /*
+ * Non-atomic allocations cannot be freed with IRQs disabled
+ */
+ WARN_ON(irqs_disabled());
+ __free_from_contiguous(dev, page, size);
+ }
}
- EXPORT_SYMBOL(dma_free_coherent);
-
- /*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
- void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
- {
- unsigned long paddr;
-
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- dmac_map_area(kaddr, size, dir);
-
- paddr = __pa(kaddr);
- if (dir == DMA_FROM_DEVICE) {
- outer_inv_range(paddr, paddr + size);
- } else {
- outer_clean_range(paddr, paddr + size);
- }
- /* FIXME: non-speculating: flush on bidirectional mappings? */
- }
- EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
- void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
- {
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- /* FIXME: non-speculating: not required */
- /* don't bother invalidating if DMA to device */
- if (dir != DMA_TO_DEVICE) {
- unsigned long paddr = __pa(kaddr);
- outer_inv_range(paddr, paddr + size);
- }
-
- dmac_unmap_area(kaddr, size, dir);
- }
- EXPORT_SYMBOL(___dma_single_dev_to_cpu);
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,