1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
7 * DMA uncached mapping support.
9 #include <linux/module.h>
11 #include <linux/genalloc.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
19 #include <linux/highmem.h>
20 #include <linux/memblock.h>
21 #include <linux/slab.h>
22 #include <linux/iommu.h>
24 #include <linux/vmalloc.h>
25 #include <linux/sizes.h>
26 #include <linux/cma.h>
28 #include <asm/memory.h>
29 #include <asm/highmem.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <asm/mach/arch.h>
33 #include <asm/dma-iommu.h>
34 #include <asm/mach/map.h>
35 #include <asm/system_info.h>
36 #include <asm/xen/xen-ops.h>
41 struct arm_dma_alloc_args {
51 struct arm_dma_free_args {
62 struct arm_dma_allocator {
63 void *(*alloc)(struct arm_dma_alloc_args *args,
64 struct page **ret_page);
65 void (*free)(struct arm_dma_free_args *args);
68 struct arm_dma_buffer {
69 struct list_head list;
71 struct arm_dma_allocator *allocator;
74 static LIST_HEAD(arm_dma_bufs);
75 static DEFINE_SPINLOCK(arm_dma_bufs_lock);
77 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
79 struct arm_dma_buffer *buf, *found = NULL;
82 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
83 list_for_each_entry(buf, &arm_dma_bufs, list) {
84 if (buf->virt == virt) {
90 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
95 * The DMA API is built upon the notion of "buffer ownership". A buffer
96 * is either exclusively owned by the CPU (and therefore may be accessed
97 * by it) or exclusively owned by the DMA device. These helper functions
98 * represent the transitions between these two ownership states.
100 * Note, however, that on later ARMs, this notion does not work due to
101 * speculative prefetches. We model our approach on the assumption that
102 * the CPU does do speculative prefetches, which means we clean caches
103 * before transfers and delay cache invalidation until transfer completion.
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
110 * Ensure that the allocated pages are zeroed, and that any data
111 * lurking in the kernel direct-mapped region is invalidated.
113 if (PageHighMem(page)) {
114 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
115 phys_addr_t end = base + size;
117 void *ptr = kmap_atomic(page);
118 memset(ptr, 0, PAGE_SIZE);
119 if (coherent_flag != COHERENT)
120 dmac_flush_range(ptr, ptr + PAGE_SIZE);
125 if (coherent_flag != COHERENT)
126 outer_flush_range(base, end);
128 void *ptr = page_address(page);
129 memset(ptr, 0, size);
130 if (coherent_flag != COHERENT) {
131 dmac_flush_range(ptr, ptr + size);
132 outer_flush_range(__pa(ptr), __pa(ptr) + size);
138 * Allocate a DMA buffer for 'dev' of size 'size' using the
139 * specified gfp mask. Note that 'size' must be page aligned.
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
142 gfp_t gfp, int coherent_flag)
144 unsigned long order = get_order(size);
145 struct page *page, *p, *e;
147 page = alloc_pages(gfp, order);
152 * Now split the huge page and free the excess pages
154 split_page(page, order);
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
158 __dma_clear_buffer(page, size, coherent_flag);
164 * Free a DMA buffer. 'size' must be page aligned.
166 static void __dma_free_buffer(struct page *page, size_t size)
168 struct page *e = page + (size >> PAGE_SHIFT);
176 static void *__alloc_from_contiguous(struct device *dev, size_t size,
177 pgprot_t prot, struct page **ret_page,
178 const void *caller, bool want_vaddr,
179 int coherent_flag, gfp_t gfp);
181 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
182 pgprot_t prot, struct page **ret_page,
183 const void *caller, bool want_vaddr);
185 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
186 static struct gen_pool *atomic_pool __ro_after_init;
188 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
190 static int __init early_coherent_pool(char *p)
192 atomic_pool_size = memparse(p, &p);
195 early_param("coherent_pool", early_coherent_pool);
198 * Initialise the coherent pool for atomic allocations.
200 static int __init atomic_pool_init(void)
202 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
203 gfp_t gfp = GFP_KERNEL | GFP_DMA;
207 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
211 * The atomic pool is only used for non-coherent allocations
212 * so we must pass NORMAL for coherent_flag.
214 if (dev_get_cma_area(NULL))
215 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
216 &page, atomic_pool_init, true, NORMAL,
219 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
220 &page, atomic_pool_init, true);
224 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
226 atomic_pool_size, -1);
228 goto destroy_genpool;
230 gen_pool_set_algo(atomic_pool,
231 gen_pool_first_fit_order_align,
233 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
234 atomic_pool_size / 1024);
239 gen_pool_destroy(atomic_pool);
242 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
243 atomic_pool_size / 1024);
247 * CMA is activated by core_initcall, so we must be called after it.
249 postcore_initcall(atomic_pool_init);
251 #ifdef CONFIG_CMA_AREAS
252 struct dma_contig_early_reserve {
257 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
259 static int dma_mmu_remap_num __initdata;
261 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
263 dma_mmu_remap[dma_mmu_remap_num].base = base;
264 dma_mmu_remap[dma_mmu_remap_num].size = size;
268 void __init dma_contiguous_remap(void)
271 for (i = 0; i < dma_mmu_remap_num; i++) {
272 phys_addr_t start = dma_mmu_remap[i].base;
273 phys_addr_t end = start + dma_mmu_remap[i].size;
277 if (end > arm_lowmem_limit)
278 end = arm_lowmem_limit;
282 map.pfn = __phys_to_pfn(start);
283 map.virtual = __phys_to_virt(start);
284 map.length = end - start;
285 map.type = MT_MEMORY_DMA_READY;
288 * Clear previous low-memory mapping to ensure that the
289 * TLB does not see any conflicting entries, then flush
290 * the TLB of the old entries before creating new mappings.
292 * This ensures that any speculatively loaded TLB entries
293 * (even though they may be rare) can not cause any problems,
294 * and ensures that this code is architecturally compliant.
296 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
298 pmd_clear(pmd_off_k(addr));
300 flush_tlb_kernel_range(__phys_to_virt(start),
301 __phys_to_virt(end));
303 iotable_init(&map, 1);
308 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
310 struct page *page = virt_to_page(addr);
311 pgprot_t prot = *(pgprot_t *)data;
313 set_pte_ext(pte, mk_pte(page, prot), 0);
317 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
319 unsigned long start = (unsigned long) page_address(page);
320 unsigned end = start + size;
322 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
323 flush_tlb_kernel_range(start, end);
326 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
327 pgprot_t prot, struct page **ret_page,
328 const void *caller, bool want_vaddr)
333 * __alloc_remap_buffer is only called when the device is
336 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
342 ptr = dma_common_contiguous_remap(page, size, prot, caller);
344 __dma_free_buffer(page, size);
353 static void *__alloc_from_pool(size_t size, struct page **ret_page)
359 WARN(1, "coherent pool not initialised!\n");
363 val = gen_pool_alloc(atomic_pool, size);
365 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
367 *ret_page = phys_to_page(phys);
374 static bool __in_atomic_pool(void *start, size_t size)
376 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
379 static int __free_from_pool(void *start, size_t size)
381 if (!__in_atomic_pool(start, size))
384 gen_pool_free(atomic_pool, (unsigned long)start, size);
389 static void *__alloc_from_contiguous(struct device *dev, size_t size,
390 pgprot_t prot, struct page **ret_page,
391 const void *caller, bool want_vaddr,
392 int coherent_flag, gfp_t gfp)
394 unsigned long order = get_order(size);
395 size_t count = size >> PAGE_SHIFT;
399 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
403 __dma_clear_buffer(page, size, coherent_flag);
408 if (PageHighMem(page)) {
409 ptr = dma_common_contiguous_remap(page, size, prot, caller);
411 dma_release_from_contiguous(dev, page, count);
415 __dma_remap(page, size, prot);
416 ptr = page_address(page);
424 static void __free_from_contiguous(struct device *dev, struct page *page,
425 void *cpu_addr, size_t size, bool want_vaddr)
428 if (PageHighMem(page))
429 dma_common_free_remap(cpu_addr, size);
431 __dma_remap(page, size, PAGE_KERNEL);
433 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
436 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
438 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
439 pgprot_writecombine(prot) :
440 pgprot_dmacoherent(prot);
444 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
445 struct page **ret_page)
448 /* __alloc_simple_buffer is only called when the device is coherent */
449 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
454 return page_address(page);
457 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
458 struct page **ret_page)
460 return __alloc_simple_buffer(args->dev, args->size, args->gfp,
464 static void simple_allocator_free(struct arm_dma_free_args *args)
466 __dma_free_buffer(args->page, args->size);
469 static struct arm_dma_allocator simple_allocator = {
470 .alloc = simple_allocator_alloc,
471 .free = simple_allocator_free,
474 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
475 struct page **ret_page)
477 return __alloc_from_contiguous(args->dev, args->size, args->prot,
478 ret_page, args->caller,
479 args->want_vaddr, args->coherent_flag,
483 static void cma_allocator_free(struct arm_dma_free_args *args)
485 __free_from_contiguous(args->dev, args->page, args->cpu_addr,
486 args->size, args->want_vaddr);
489 static struct arm_dma_allocator cma_allocator = {
490 .alloc = cma_allocator_alloc,
491 .free = cma_allocator_free,
494 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
495 struct page **ret_page)
497 return __alloc_from_pool(args->size, ret_page);
500 static void pool_allocator_free(struct arm_dma_free_args *args)
502 __free_from_pool(args->cpu_addr, args->size);
505 static struct arm_dma_allocator pool_allocator = {
506 .alloc = pool_allocator_alloc,
507 .free = pool_allocator_free,
510 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
511 struct page **ret_page)
513 return __alloc_remap_buffer(args->dev, args->size, args->gfp,
514 args->prot, ret_page, args->caller,
518 static void remap_allocator_free(struct arm_dma_free_args *args)
520 if (args->want_vaddr)
521 dma_common_free_remap(args->cpu_addr, args->size);
523 __dma_free_buffer(args->page, args->size);
526 static struct arm_dma_allocator remap_allocator = {
527 .alloc = remap_allocator_alloc,
528 .free = remap_allocator_free,
531 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
532 gfp_t gfp, pgprot_t prot, bool is_coherent,
533 unsigned long attrs, const void *caller)
535 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
536 struct page *page = NULL;
538 bool allowblock, cma;
539 struct arm_dma_buffer *buf;
540 struct arm_dma_alloc_args args = {
542 .size = PAGE_ALIGN(size),
546 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
547 .coherent_flag = is_coherent ? COHERENT : NORMAL,
550 #ifdef CONFIG_DMA_API_DEBUG
551 u64 limit = (mask + 1) & ~mask;
552 if (limit && size >= limit) {
553 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
559 buf = kzalloc(sizeof(*buf),
560 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
564 if (mask < 0xffffffffULL)
568 * Following is a work-around (a.k.a. hack) to prevent pages
569 * with __GFP_COMP being passed to split_page() which cannot
570 * handle them. The real problem is that this flag probably
571 * should be 0 on ARM as it is not supported on this
572 * platform; see CONFIG_HUGETLBFS.
574 gfp &= ~(__GFP_COMP);
577 *handle = DMA_MAPPING_ERROR;
578 allowblock = gfpflags_allow_blocking(gfp);
579 cma = allowblock ? dev_get_cma_area(dev) : false;
582 buf->allocator = &cma_allocator;
583 else if (is_coherent)
584 buf->allocator = &simple_allocator;
586 buf->allocator = &remap_allocator;
588 buf->allocator = &pool_allocator;
590 addr = buf->allocator->alloc(&args, &page);
595 *handle = phys_to_dma(dev, page_to_phys(page));
596 buf->virt = args.want_vaddr ? addr : page;
598 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
599 list_add(&buf->list, &arm_dma_bufs);
600 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
605 return args.want_vaddr ? addr : page;
609 * Free a buffer as defined by the above mapping.
611 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
612 dma_addr_t handle, unsigned long attrs,
615 struct page *page = phys_to_page(dma_to_phys(dev, handle));
616 struct arm_dma_buffer *buf;
617 struct arm_dma_free_args args = {
619 .size = PAGE_ALIGN(size),
620 .cpu_addr = cpu_addr,
622 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
625 buf = arm_dma_buffer_find(cpu_addr);
626 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
629 buf->allocator->free(&args);
633 static void dma_cache_maint_page(struct page *page, unsigned long offset,
634 size_t size, enum dma_data_direction dir,
635 void (*op)(const void *, size_t, int))
640 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
644 * A single sg entry may refer to multiple physically contiguous
645 * pages. But we still need to process highmem pages individually.
646 * If highmem is not configured then the bulk of this loop gets
653 page = pfn_to_page(pfn);
655 if (PageHighMem(page)) {
656 if (len + offset > PAGE_SIZE)
657 len = PAGE_SIZE - offset;
659 if (cache_is_vipt_nonaliasing()) {
660 vaddr = kmap_atomic(page);
661 op(vaddr + offset, len, dir);
662 kunmap_atomic(vaddr);
664 vaddr = kmap_high_get(page);
666 op(vaddr + offset, len, dir);
671 vaddr = page_address(page) + offset;
681 * Make an area consistent for devices.
682 * Note: Drivers should NOT use this function directly.
683 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
685 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
686 size_t size, enum dma_data_direction dir)
690 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
692 paddr = page_to_phys(page) + off;
693 if (dir == DMA_FROM_DEVICE) {
694 outer_inv_range(paddr, paddr + size);
696 outer_clean_range(paddr, paddr + size);
698 /* FIXME: non-speculating: flush on bidirectional mappings? */
701 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
702 size_t size, enum dma_data_direction dir)
704 phys_addr_t paddr = page_to_phys(page) + off;
706 /* FIXME: non-speculating: not required */
707 /* in any case, don't bother invalidating if DMA to device */
708 if (dir != DMA_TO_DEVICE) {
709 outer_inv_range(paddr, paddr + size);
711 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
715 * Mark the D-cache clean for these pages to avoid extra flushing.
717 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
721 pfn = page_to_pfn(page) + off / PAGE_SIZE;
725 left -= PAGE_SIZE - off;
727 while (left >= PAGE_SIZE) {
728 page = pfn_to_page(pfn++);
729 set_bit(PG_dcache_clean, &page->flags);
735 #ifdef CONFIG_ARM_DMA_USE_IOMMU
737 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
741 if (attrs & DMA_ATTR_PRIVILEGED)
745 case DMA_BIDIRECTIONAL:
746 return prot | IOMMU_READ | IOMMU_WRITE;
748 return prot | IOMMU_READ;
749 case DMA_FROM_DEVICE:
750 return prot | IOMMU_WRITE;
758 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
760 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
763 unsigned int order = get_order(size);
764 unsigned int align = 0;
765 unsigned int count, start;
766 size_t mapping_size = mapping->bits << PAGE_SHIFT;
771 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
772 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
774 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
775 align = (1 << order) - 1;
777 spin_lock_irqsave(&mapping->lock, flags);
778 for (i = 0; i < mapping->nr_bitmaps; i++) {
779 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
780 mapping->bits, 0, count, align);
782 if (start > mapping->bits)
785 bitmap_set(mapping->bitmaps[i], start, count);
790 * No unused range found. Try to extend the existing mapping
791 * and perform a second attempt to reserve an IO virtual
792 * address range of size bytes.
794 if (i == mapping->nr_bitmaps) {
795 if (extend_iommu_mapping(mapping)) {
796 spin_unlock_irqrestore(&mapping->lock, flags);
797 return DMA_MAPPING_ERROR;
800 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
801 mapping->bits, 0, count, align);
803 if (start > mapping->bits) {
804 spin_unlock_irqrestore(&mapping->lock, flags);
805 return DMA_MAPPING_ERROR;
808 bitmap_set(mapping->bitmaps[i], start, count);
810 spin_unlock_irqrestore(&mapping->lock, flags);
812 iova = mapping->base + (mapping_size * i);
813 iova += start << PAGE_SHIFT;
818 static inline void __free_iova(struct dma_iommu_mapping *mapping,
819 dma_addr_t addr, size_t size)
821 unsigned int start, count;
822 size_t mapping_size = mapping->bits << PAGE_SHIFT;
824 dma_addr_t bitmap_base;
830 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
831 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
833 bitmap_base = mapping->base + mapping_size * bitmap_index;
835 start = (addr - bitmap_base) >> PAGE_SHIFT;
837 if (addr + size > bitmap_base + mapping_size) {
839 * The address range to be freed reaches into the iova
840 * range of the next bitmap. This should not happen as
841 * we don't allow this in __alloc_iova (at the
846 count = size >> PAGE_SHIFT;
848 spin_lock_irqsave(&mapping->lock, flags);
849 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
850 spin_unlock_irqrestore(&mapping->lock, flags);
853 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
854 static const int iommu_order_array[] = { 9, 8, 4, 0 };
856 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
857 gfp_t gfp, unsigned long attrs,
861 int count = size >> PAGE_SHIFT;
862 int array_size = count * sizeof(struct page *);
866 if (array_size <= PAGE_SIZE)
867 pages = kzalloc(array_size, GFP_KERNEL);
869 pages = vzalloc(array_size);
873 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
875 unsigned long order = get_order(size);
878 page = dma_alloc_from_contiguous(dev, count, order,
883 __dma_clear_buffer(page, size, coherent_flag);
885 for (i = 0; i < count; i++)
891 /* Go straight to 4K chunks if caller says it's OK. */
892 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
893 order_idx = ARRAY_SIZE(iommu_order_array) - 1;
896 * IOMMU can map any pages, so himem can also be used here
898 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
903 order = iommu_order_array[order_idx];
905 /* Drop down when we get small */
906 if (__fls(count) < order) {
912 /* See if it's easy to allocate a high-order chunk */
913 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
915 /* Go down a notch at first sign of pressure */
921 pages[i] = alloc_pages(gfp, 0);
927 split_page(pages[i], order);
930 pages[i + j] = pages[i] + j;
933 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
942 __free_pages(pages[i], 0);
947 static int __iommu_free_buffer(struct device *dev, struct page **pages,
948 size_t size, unsigned long attrs)
950 int count = size >> PAGE_SHIFT;
953 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
954 dma_release_from_contiguous(dev, pages[0], count);
956 for (i = 0; i < count; i++)
958 __free_pages(pages[i], 0);
966 * Create a mapping in device IO address space for specified pages
969 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
972 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
973 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
974 dma_addr_t dma_addr, iova;
977 dma_addr = __alloc_iova(mapping, size);
978 if (dma_addr == DMA_MAPPING_ERROR)
982 for (i = 0; i < count; ) {
985 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
986 phys_addr_t phys = page_to_phys(pages[i]);
989 for (j = i + 1; j < count; j++, next_pfn++)
990 if (page_to_pfn(pages[j]) != next_pfn)
993 len = (j - i) << PAGE_SHIFT;
994 ret = iommu_map(mapping->domain, iova, phys, len,
995 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
1003 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1004 __free_iova(mapping, dma_addr, size);
1005 return DMA_MAPPING_ERROR;
1008 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1010 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1013 * add optional in-page offset from iova to size and align
1014 * result to page size
1016 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1019 iommu_unmap(mapping->domain, iova, size);
1020 __free_iova(mapping, iova, size);
1024 static struct page **__atomic_get_pages(void *addr)
1029 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1030 page = phys_to_page(phys);
1032 return (struct page **)page;
1035 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1037 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1038 return __atomic_get_pages(cpu_addr);
1040 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1043 return dma_common_find_pages(cpu_addr);
1046 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1047 dma_addr_t *handle, int coherent_flag,
1048 unsigned long attrs)
1053 if (coherent_flag == COHERENT)
1054 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1056 addr = __alloc_from_pool(size, &page);
1060 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1061 if (*handle == DMA_MAPPING_ERROR)
1067 __free_from_pool(addr, size);
1071 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1072 dma_addr_t handle, size_t size, int coherent_flag)
1074 __iommu_remove_mapping(dev, handle, size);
1075 if (coherent_flag == COHERENT)
1076 __dma_free_buffer(virt_to_page(cpu_addr), size);
1078 __free_from_pool(cpu_addr, size);
1081 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1082 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1084 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1085 struct page **pages;
1087 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
1089 *handle = DMA_MAPPING_ERROR;
1090 size = PAGE_ALIGN(size);
1092 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1093 return __iommu_alloc_simple(dev, size, gfp, handle,
1094 coherent_flag, attrs);
1097 * Following is a work-around (a.k.a. hack) to prevent pages
1098 * with __GFP_COMP being passed to split_page() which cannot
1099 * handle them. The real problem is that this flag probably
1100 * should be 0 on ARM as it is not supported on this
1101 * platform; see CONFIG_HUGETLBFS.
1103 gfp &= ~(__GFP_COMP);
1105 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1109 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1110 if (*handle == DMA_MAPPING_ERROR)
1113 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1116 addr = dma_common_pages_remap(pages, size, prot,
1117 __builtin_return_address(0));
1124 __iommu_remove_mapping(dev, *handle, size);
1126 __iommu_free_buffer(dev, pages, size, attrs);
1130 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1131 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1132 unsigned long attrs)
1134 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1135 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1141 if (vma->vm_pgoff >= nr_pages)
1144 if (!dev->dma_coherent)
1145 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1147 err = vm_map_pages(vma, pages, nr_pages);
1149 pr_err("Remapping memory failed: %d\n", err);
1155 * free a page as defined by the above mapping.
1156 * Must not be called with IRQs disabled.
1158 static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1159 dma_addr_t handle, unsigned long attrs)
1161 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
1162 struct page **pages;
1163 size = PAGE_ALIGN(size);
1165 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1166 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1170 pages = __iommu_get_pages(cpu_addr, attrs);
1172 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1176 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
1177 dma_common_free_remap(cpu_addr, size);
1179 __iommu_remove_mapping(dev, handle, size);
1180 __iommu_free_buffer(dev, pages, size, attrs);
1183 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1184 void *cpu_addr, dma_addr_t dma_addr,
1185 size_t size, unsigned long attrs)
1187 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1188 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1193 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1198 * Map a part of the scatter-gather list into contiguous io address space
1200 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1201 size_t size, dma_addr_t *handle,
1202 enum dma_data_direction dir, unsigned long attrs)
1204 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1205 dma_addr_t iova, iova_base;
1208 struct scatterlist *s;
1211 size = PAGE_ALIGN(size);
1212 *handle = DMA_MAPPING_ERROR;
1214 iova_base = iova = __alloc_iova(mapping, size);
1215 if (iova == DMA_MAPPING_ERROR)
1218 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1219 phys_addr_t phys = page_to_phys(sg_page(s));
1220 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1222 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1223 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1225 prot = __dma_info_to_prot(dir, attrs);
1227 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1230 count += len >> PAGE_SHIFT;
1233 *handle = iova_base;
1237 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1238 __free_iova(mapping, iova_base, size);
1243 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1244 * @dev: valid struct device pointer
1245 * @sg: list of buffers
1246 * @nents: number of buffers to map
1247 * @dir: DMA transfer direction
1249 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1250 * The scatter gather list elements are merged together (if possible) and
1251 * tagged with the appropriate dma address and length. They are obtained via
1252 * sg_dma_{address,length}.
1254 static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1255 int nents, enum dma_data_direction dir, unsigned long attrs)
1257 struct scatterlist *s = sg, *dma = sg, *start = sg;
1258 int i, count = 0, ret;
1259 unsigned int offset = s->offset;
1260 unsigned int size = s->offset + s->length;
1261 unsigned int max = dma_get_max_seg_size(dev);
1263 for (i = 1; i < nents; i++) {
1268 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1269 ret = __map_sg_chunk(dev, start, size,
1270 &dma->dma_address, dir, attrs);
1274 dma->dma_address += offset;
1275 dma->dma_length = size - offset;
1277 size = offset = s->offset;
1284 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
1288 dma->dma_address += offset;
1289 dma->dma_length = size - offset;
1294 for_each_sg(sg, s, count, i)
1295 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1302 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1303 * @dev: valid struct device pointer
1304 * @sg: list of buffers
1305 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1306 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1308 * Unmap a set of streaming mode DMA translations. Again, CPU access
1309 * rules concerning calls here are the same as for dma_unmap_single().
1311 static void arm_iommu_unmap_sg(struct device *dev,
1312 struct scatterlist *sg, int nents,
1313 enum dma_data_direction dir,
1314 unsigned long attrs)
1316 struct scatterlist *s;
1319 for_each_sg(sg, s, nents, i) {
1321 __iommu_remove_mapping(dev, sg_dma_address(s),
1323 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1324 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1330 * arm_iommu_sync_sg_for_cpu
1331 * @dev: valid struct device pointer
1332 * @sg: list of buffers
1333 * @nents: number of buffers to map (returned from dma_map_sg)
1334 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1336 static void arm_iommu_sync_sg_for_cpu(struct device *dev,
1337 struct scatterlist *sg,
1338 int nents, enum dma_data_direction dir)
1340 struct scatterlist *s;
1343 if (dev->dma_coherent)
1346 for_each_sg(sg, s, nents, i)
1347 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1352 * arm_iommu_sync_sg_for_device
1353 * @dev: valid struct device pointer
1354 * @sg: list of buffers
1355 * @nents: number of buffers to map (returned from dma_map_sg)
1356 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1358 static void arm_iommu_sync_sg_for_device(struct device *dev,
1359 struct scatterlist *sg,
1360 int nents, enum dma_data_direction dir)
1362 struct scatterlist *s;
1365 if (dev->dma_coherent)
1368 for_each_sg(sg, s, nents, i)
1369 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1373 * arm_iommu_map_page
1374 * @dev: valid struct device pointer
1375 * @page: page that buffer resides in
1376 * @offset: offset into page for start of buffer
1377 * @size: size of buffer to map
1378 * @dir: DMA transfer direction
1380 * IOMMU aware version of arm_dma_map_page()
1382 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1383 unsigned long offset, size_t size, enum dma_data_direction dir,
1384 unsigned long attrs)
1386 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1387 dma_addr_t dma_addr;
1388 int ret, prot, len = PAGE_ALIGN(size + offset);
1390 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1391 __dma_page_cpu_to_dev(page, offset, size, dir);
1393 dma_addr = __alloc_iova(mapping, len);
1394 if (dma_addr == DMA_MAPPING_ERROR)
1397 prot = __dma_info_to_prot(dir, attrs);
1399 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1403 return dma_addr + offset;
1405 __free_iova(mapping, dma_addr, len);
1406 return DMA_MAPPING_ERROR;
1410 * arm_iommu_unmap_page
1411 * @dev: valid struct device pointer
1412 * @handle: DMA address of buffer
1413 * @size: size of buffer (same as passed to dma_map_page)
1414 * @dir: DMA transfer direction (same as passed to dma_map_page)
1416 * IOMMU aware version of arm_dma_unmap_page()
1418 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1419 size_t size, enum dma_data_direction dir, unsigned long attrs)
1421 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1422 dma_addr_t iova = handle & PAGE_MASK;
1424 int offset = handle & ~PAGE_MASK;
1425 int len = PAGE_ALIGN(size + offset);
1430 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
1431 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1432 __dma_page_dev_to_cpu(page, offset, size, dir);
1435 iommu_unmap(mapping->domain, iova, len);
1436 __free_iova(mapping, iova, len);
1440 * arm_iommu_map_resource - map a device resource for DMA
1441 * @dev: valid struct device pointer
1442 * @phys_addr: physical address of resource
1443 * @size: size of resource to map
1444 * @dir: DMA transfer direction
1446 static dma_addr_t arm_iommu_map_resource(struct device *dev,
1447 phys_addr_t phys_addr, size_t size,
1448 enum dma_data_direction dir, unsigned long attrs)
1450 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1451 dma_addr_t dma_addr;
1453 phys_addr_t addr = phys_addr & PAGE_MASK;
1454 unsigned int offset = phys_addr & ~PAGE_MASK;
1455 size_t len = PAGE_ALIGN(size + offset);
1457 dma_addr = __alloc_iova(mapping, len);
1458 if (dma_addr == DMA_MAPPING_ERROR)
1461 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
1463 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
1467 return dma_addr + offset;
1469 __free_iova(mapping, dma_addr, len);
1470 return DMA_MAPPING_ERROR;
1474 * arm_iommu_unmap_resource - unmap a device DMA resource
1475 * @dev: valid struct device pointer
1476 * @dma_handle: DMA address to resource
1477 * @size: size of resource to map
1478 * @dir: DMA transfer direction
1480 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
1481 size_t size, enum dma_data_direction dir,
1482 unsigned long attrs)
1484 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1485 dma_addr_t iova = dma_handle & PAGE_MASK;
1486 unsigned int offset = dma_handle & ~PAGE_MASK;
1487 size_t len = PAGE_ALIGN(size + offset);
1492 iommu_unmap(mapping->domain, iova, len);
1493 __free_iova(mapping, iova, len);
1496 static void arm_iommu_sync_single_for_cpu(struct device *dev,
1497 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1499 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1500 dma_addr_t iova = handle & PAGE_MASK;
1502 unsigned int offset = handle & ~PAGE_MASK;
1504 if (dev->dma_coherent || !iova)
1507 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1508 __dma_page_dev_to_cpu(page, offset, size, dir);
1511 static void arm_iommu_sync_single_for_device(struct device *dev,
1512 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1514 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1515 dma_addr_t iova = handle & PAGE_MASK;
1517 unsigned int offset = handle & ~PAGE_MASK;
1519 if (dev->dma_coherent || !iova)
1522 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1523 __dma_page_cpu_to_dev(page, offset, size, dir);
1526 static const struct dma_map_ops iommu_ops = {
1527 .alloc = arm_iommu_alloc_attrs,
1528 .free = arm_iommu_free_attrs,
1529 .mmap = arm_iommu_mmap_attrs,
1530 .get_sgtable = arm_iommu_get_sgtable,
1532 .map_page = arm_iommu_map_page,
1533 .unmap_page = arm_iommu_unmap_page,
1534 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1535 .sync_single_for_device = arm_iommu_sync_single_for_device,
1537 .map_sg = arm_iommu_map_sg,
1538 .unmap_sg = arm_iommu_unmap_sg,
1539 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1540 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1542 .map_resource = arm_iommu_map_resource,
1543 .unmap_resource = arm_iommu_unmap_resource,
1547 * arm_iommu_create_mapping
1548 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1549 * @base: start address of the valid IO address space
1550 * @size: maximum size of the valid IO address space
1552 * Creates a mapping structure which holds information about used/unused
1553 * IO address ranges, which is required to perform memory allocation and
1554 * mapping with IOMMU aware functions.
1556 * The client device need to be attached to the mapping with
1557 * arm_iommu_attach_device function.
1559 struct dma_iommu_mapping *
1560 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
1562 unsigned int bits = size >> PAGE_SHIFT;
1563 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
1564 struct dma_iommu_mapping *mapping;
1568 /* currently only 32-bit DMA address space is supported */
1569 if (size > DMA_BIT_MASK(32) + 1)
1570 return ERR_PTR(-ERANGE);
1573 return ERR_PTR(-EINVAL);
1575 if (bitmap_size > PAGE_SIZE) {
1576 extensions = bitmap_size / PAGE_SIZE;
1577 bitmap_size = PAGE_SIZE;
1580 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1584 mapping->bitmap_size = bitmap_size;
1585 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
1587 if (!mapping->bitmaps)
1590 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
1591 if (!mapping->bitmaps[0])
1594 mapping->nr_bitmaps = 1;
1595 mapping->extensions = extensions;
1596 mapping->base = base;
1597 mapping->bits = BITS_PER_BYTE * bitmap_size;
1599 spin_lock_init(&mapping->lock);
1601 mapping->domain = iommu_domain_alloc(bus);
1602 if (!mapping->domain)
1605 kref_init(&mapping->kref);
1608 kfree(mapping->bitmaps[0]);
1610 kfree(mapping->bitmaps);
1614 return ERR_PTR(err);
1616 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1618 static void release_iommu_mapping(struct kref *kref)
1621 struct dma_iommu_mapping *mapping =
1622 container_of(kref, struct dma_iommu_mapping, kref);
1624 iommu_domain_free(mapping->domain);
1625 for (i = 0; i < mapping->nr_bitmaps; i++)
1626 kfree(mapping->bitmaps[i]);
1627 kfree(mapping->bitmaps);
1631 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
1635 if (mapping->nr_bitmaps >= mapping->extensions)
1638 next_bitmap = mapping->nr_bitmaps;
1639 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
1641 if (!mapping->bitmaps[next_bitmap])
1644 mapping->nr_bitmaps++;
1649 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1652 kref_put(&mapping->kref, release_iommu_mapping);
1654 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
1656 static int __arm_iommu_attach_device(struct device *dev,
1657 struct dma_iommu_mapping *mapping)
1661 err = iommu_attach_device(mapping->domain, dev);
1665 kref_get(&mapping->kref);
1666 to_dma_iommu_mapping(dev) = mapping;
1668 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1673 * arm_iommu_attach_device
1674 * @dev: valid struct device pointer
1675 * @mapping: io address space mapping structure (returned from
1676 * arm_iommu_create_mapping)
1678 * Attaches specified io address space mapping to the provided device.
1679 * This replaces the dma operations (dma_map_ops pointer) with the
1680 * IOMMU aware version.
1682 * More than one client might be attached to the same io address space
1685 int arm_iommu_attach_device(struct device *dev,
1686 struct dma_iommu_mapping *mapping)
1690 err = __arm_iommu_attach_device(dev, mapping);
1694 set_dma_ops(dev, &iommu_ops);
1697 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1700 * arm_iommu_detach_device
1701 * @dev: valid struct device pointer
1703 * Detaches the provided device from a previously attached map.
1704 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
1706 void arm_iommu_detach_device(struct device *dev)
1708 struct dma_iommu_mapping *mapping;
1710 mapping = to_dma_iommu_mapping(dev);
1712 dev_warn(dev, "Not attached\n");
1716 iommu_detach_device(mapping->domain, dev);
1717 kref_put(&mapping->kref, release_iommu_mapping);
1718 to_dma_iommu_mapping(dev) = NULL;
1719 set_dma_ops(dev, NULL);
1721 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
1723 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
1725 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1726 const struct iommu_ops *iommu, bool coherent)
1728 struct dma_iommu_mapping *mapping;
1730 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
1731 if (IS_ERR(mapping)) {
1732 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
1733 size, dev_name(dev));
1737 if (__arm_iommu_attach_device(dev, mapping)) {
1738 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
1740 arm_iommu_release_mapping(mapping);
1744 set_dma_ops(dev, &iommu_ops);
1747 static void arm_teardown_iommu_dma_ops(struct device *dev)
1749 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1754 arm_iommu_detach_device(dev);
1755 arm_iommu_release_mapping(mapping);
1760 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1761 const struct iommu_ops *iommu, bool coherent)
1765 static void arm_teardown_iommu_dma_ops(struct device *dev) { }
1767 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
1769 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1770 const struct iommu_ops *iommu, bool coherent)
1772 dev->archdata.dma_coherent = coherent;
1773 dev->dma_coherent = coherent;
1776 * Don't override the dma_ops if they have already been set. Ideally
1777 * this should be the only location where dma_ops are set, remove this
1778 * check when all other callers of set_dma_ops will have disappeared.
1784 arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
1786 xen_setup_dma_ops(dev);
1787 dev->archdata.dma_ops_setup = true;
1790 void arch_teardown_dma_ops(struct device *dev)
1792 if (!dev->archdata.dma_ops_setup)
1795 arm_teardown_iommu_dma_ops(dev);
1796 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
1797 set_dma_ops(dev, NULL);
1800 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
1801 enum dma_data_direction dir)
1803 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1807 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
1808 enum dma_data_direction dir)
1810 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1814 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
1815 gfp_t gfp, unsigned long attrs)
1817 return __dma_alloc(dev, size, dma_handle, gfp,
1818 __get_dma_pgprot(attrs, PAGE_KERNEL), false,
1819 attrs, __builtin_return_address(0));
1822 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
1823 dma_addr_t dma_handle, unsigned long attrs)
1825 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);