1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/kmsan.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
20 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
21 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
22 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
23 bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT);
32 dma_addr_t dma_handle;
36 static void dmam_release(struct device *dev, void *res)
38 struct dma_devres *this = res;
40 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
44 static int dmam_match(struct device *dev, void *res, void *match_data)
46 struct dma_devres *this = res, *match = match_data;
48 if (this->vaddr == match->vaddr) {
49 WARN_ON(this->size != match->size ||
50 this->dma_handle != match->dma_handle);
57 * dmam_free_coherent - Managed dma_free_coherent()
58 * @dev: Device to free coherent memory for
59 * @size: Size of allocation
60 * @vaddr: Virtual address of the memory to free
61 * @dma_handle: DMA handle of the memory to free
63 * Managed dma_free_coherent().
65 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
66 dma_addr_t dma_handle)
68 struct dma_devres match_data = { size, vaddr, dma_handle };
70 dma_free_coherent(dev, size, vaddr, dma_handle);
71 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
73 EXPORT_SYMBOL(dmam_free_coherent);
76 * dmam_alloc_attrs - Managed dma_alloc_attrs()
77 * @dev: Device to allocate non_coherent memory for
78 * @size: Size of allocation
79 * @dma_handle: Out argument for allocated DMA handle
80 * @gfp: Allocation flags
81 * @attrs: Flags in the DMA_ATTR_* namespace.
83 * Managed dma_alloc_attrs(). Memory allocated using this function will be
84 * automatically released on driver detach.
87 * Pointer to allocated memory on success, NULL on failure.
89 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
90 gfp_t gfp, unsigned long attrs)
92 struct dma_devres *dr;
95 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
99 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
106 dr->dma_handle = *dma_handle;
114 EXPORT_SYMBOL(dmam_alloc_attrs);
116 static bool dma_go_direct(struct device *dev, dma_addr_t mask,
117 const struct dma_map_ops *ops)
121 #ifdef CONFIG_DMA_OPS_BYPASS
122 if (dev->dma_ops_bypass)
123 return min_not_zero(mask, dev->bus_dma_limit) >=
124 dma_direct_get_required_mask(dev);
131 * Check if the devices uses a direct mapping for streaming DMA operations.
132 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
135 static inline bool dma_alloc_direct(struct device *dev,
136 const struct dma_map_ops *ops)
138 return dma_go_direct(dev, dev->coherent_dma_mask, ops);
141 static inline bool dma_map_direct(struct device *dev,
142 const struct dma_map_ops *ops)
144 return dma_go_direct(dev, *dev->dma_mask, ops);
147 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
148 size_t offset, size_t size, enum dma_data_direction dir,
151 const struct dma_map_ops *ops = get_dma_ops(dev);
154 BUG_ON(!valid_dma_direction(dir));
156 if (WARN_ON_ONCE(!dev->dma_mask))
157 return DMA_MAPPING_ERROR;
159 if (dma_map_direct(dev, ops) ||
160 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
161 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
163 addr = ops->map_page(dev, page, offset, size, dir, attrs);
164 kmsan_handle_dma(page, offset, size, dir);
165 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
169 EXPORT_SYMBOL(dma_map_page_attrs);
171 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
172 enum dma_data_direction dir, unsigned long attrs)
174 const struct dma_map_ops *ops = get_dma_ops(dev);
176 BUG_ON(!valid_dma_direction(dir));
177 if (dma_map_direct(dev, ops) ||
178 arch_dma_unmap_page_direct(dev, addr + size))
179 dma_direct_unmap_page(dev, addr, size, dir, attrs);
180 else if (ops->unmap_page)
181 ops->unmap_page(dev, addr, size, dir, attrs);
182 debug_dma_unmap_page(dev, addr, size, dir);
184 EXPORT_SYMBOL(dma_unmap_page_attrs);
186 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
187 int nents, enum dma_data_direction dir, unsigned long attrs)
189 const struct dma_map_ops *ops = get_dma_ops(dev);
192 BUG_ON(!valid_dma_direction(dir));
194 if (WARN_ON_ONCE(!dev->dma_mask))
197 if (dma_map_direct(dev, ops) ||
198 arch_dma_map_sg_direct(dev, sg, nents))
199 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
201 ents = ops->map_sg(dev, sg, nents, dir, attrs);
204 kmsan_handle_dma_sg(sg, nents, dir);
205 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
206 } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
207 ents != -EIO && ents != -EREMOTEIO)) {
215 * dma_map_sg_attrs - Map the given buffer for DMA
216 * @dev: The device for which to perform the DMA operation
217 * @sg: The sg_table object describing the buffer
218 * @nents: Number of entries to map
219 * @dir: DMA direction
220 * @attrs: Optional DMA attributes for the map operation
222 * Maps a buffer described by a scatterlist passed in the sg argument with
223 * nents segments for the @dir DMA operation by the @dev device.
225 * Returns the number of mapped entries (which can be less than nents)
226 * on success. Zero is returned for any error.
228 * dma_unmap_sg_attrs() should be used to unmap the buffer with the
229 * original sg and original nents (not the value returned by this funciton).
231 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
232 int nents, enum dma_data_direction dir, unsigned long attrs)
236 ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
241 EXPORT_SYMBOL(dma_map_sg_attrs);
244 * dma_map_sgtable - Map the given buffer for DMA
245 * @dev: The device for which to perform the DMA operation
246 * @sgt: The sg_table object describing the buffer
247 * @dir: DMA direction
248 * @attrs: Optional DMA attributes for the map operation
250 * Maps a buffer described by a scatterlist stored in the given sg_table
251 * object for the @dir DMA operation by the @dev device. After success, the
252 * ownership for the buffer is transferred to the DMA domain. One has to
253 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
254 * ownership of the buffer back to the CPU domain before touching the
257 * Returns 0 on success or a negative error code on error. The following
258 * error codes are supported with the given meaning:
260 * -EINVAL An invalid argument, unaligned access or other error
261 * in usage. Will not succeed if retried.
262 * -ENOMEM Insufficient resources (like memory or IOVA space) to
263 * complete the mapping. Should succeed if retried later.
264 * -EIO Legacy error code with an unknown meaning. eg. this is
265 * returned if a lower level call returned
267 * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
268 * in the sg_table. This will not succeed if retried.
270 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
271 enum dma_data_direction dir, unsigned long attrs)
275 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
281 EXPORT_SYMBOL_GPL(dma_map_sgtable);
283 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
284 int nents, enum dma_data_direction dir,
287 const struct dma_map_ops *ops = get_dma_ops(dev);
289 BUG_ON(!valid_dma_direction(dir));
290 debug_dma_unmap_sg(dev, sg, nents, dir);
291 if (dma_map_direct(dev, ops) ||
292 arch_dma_unmap_sg_direct(dev, sg, nents))
293 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
294 else if (ops->unmap_sg)
295 ops->unmap_sg(dev, sg, nents, dir, attrs);
297 EXPORT_SYMBOL(dma_unmap_sg_attrs);
299 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
300 size_t size, enum dma_data_direction dir, unsigned long attrs)
302 const struct dma_map_ops *ops = get_dma_ops(dev);
303 dma_addr_t addr = DMA_MAPPING_ERROR;
305 BUG_ON(!valid_dma_direction(dir));
307 if (WARN_ON_ONCE(!dev->dma_mask))
308 return DMA_MAPPING_ERROR;
310 if (dma_map_direct(dev, ops))
311 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
312 else if (ops->map_resource)
313 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
315 debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
318 EXPORT_SYMBOL(dma_map_resource);
320 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
321 enum dma_data_direction dir, unsigned long attrs)
323 const struct dma_map_ops *ops = get_dma_ops(dev);
325 BUG_ON(!valid_dma_direction(dir));
326 if (!dma_map_direct(dev, ops) && ops->unmap_resource)
327 ops->unmap_resource(dev, addr, size, dir, attrs);
328 debug_dma_unmap_resource(dev, addr, size, dir);
330 EXPORT_SYMBOL(dma_unmap_resource);
332 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
333 enum dma_data_direction dir)
335 const struct dma_map_ops *ops = get_dma_ops(dev);
337 BUG_ON(!valid_dma_direction(dir));
338 if (dma_map_direct(dev, ops))
339 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
340 else if (ops->sync_single_for_cpu)
341 ops->sync_single_for_cpu(dev, addr, size, dir);
342 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
344 EXPORT_SYMBOL(dma_sync_single_for_cpu);
346 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
347 size_t size, enum dma_data_direction dir)
349 const struct dma_map_ops *ops = get_dma_ops(dev);
351 BUG_ON(!valid_dma_direction(dir));
352 if (dma_map_direct(dev, ops))
353 dma_direct_sync_single_for_device(dev, addr, size, dir);
354 else if (ops->sync_single_for_device)
355 ops->sync_single_for_device(dev, addr, size, dir);
356 debug_dma_sync_single_for_device(dev, addr, size, dir);
358 EXPORT_SYMBOL(dma_sync_single_for_device);
360 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
361 int nelems, enum dma_data_direction dir)
363 const struct dma_map_ops *ops = get_dma_ops(dev);
365 BUG_ON(!valid_dma_direction(dir));
366 if (dma_map_direct(dev, ops))
367 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
368 else if (ops->sync_sg_for_cpu)
369 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
370 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
372 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
374 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
375 int nelems, enum dma_data_direction dir)
377 const struct dma_map_ops *ops = get_dma_ops(dev);
379 BUG_ON(!valid_dma_direction(dir));
380 if (dma_map_direct(dev, ops))
381 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
382 else if (ops->sync_sg_for_device)
383 ops->sync_sg_for_device(dev, sg, nelems, dir);
384 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
386 EXPORT_SYMBOL(dma_sync_sg_for_device);
389 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
390 * that the intention is to allow exporting memory allocated via the
391 * coherent DMA APIs through the dma_buf API, which only accepts a
392 * scattertable. This presents a couple of problems:
393 * 1. Not all memory allocated via the coherent DMA APIs is backed by
395 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
396 * as we will try to flush the memory through a different alias to that
397 * actually being used (and the flushes are redundant.)
399 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
400 void *cpu_addr, dma_addr_t dma_addr, size_t size,
403 const struct dma_map_ops *ops = get_dma_ops(dev);
405 if (dma_alloc_direct(dev, ops))
406 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
408 if (!ops->get_sgtable)
410 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
412 EXPORT_SYMBOL(dma_get_sgtable_attrs);
416 * Return the page attributes used for mapping dma_alloc_* memory, either in
417 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
419 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
421 if (dev_is_dma_coherent(dev))
423 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
424 if (attrs & DMA_ATTR_WRITE_COMBINE)
425 return pgprot_writecombine(prot);
427 return pgprot_dmacoherent(prot);
429 #endif /* CONFIG_MMU */
432 * dma_can_mmap - check if a given device supports dma_mmap_*
433 * @dev: device to check
435 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
436 * map DMA allocations to userspace.
438 bool dma_can_mmap(struct device *dev)
440 const struct dma_map_ops *ops = get_dma_ops(dev);
442 if (dma_alloc_direct(dev, ops))
443 return dma_direct_can_mmap(dev);
444 return ops->mmap != NULL;
446 EXPORT_SYMBOL_GPL(dma_can_mmap);
449 * dma_mmap_attrs - map a coherent DMA allocation into user space
450 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
451 * @vma: vm_area_struct describing requested user mapping
452 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
453 * @dma_addr: device-view address returned from dma_alloc_attrs
454 * @size: size of memory originally requested in dma_alloc_attrs
455 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
457 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
458 * space. The coherent DMA buffer must not be freed by the driver until the
459 * user space mapping has been released.
461 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
462 void *cpu_addr, dma_addr_t dma_addr, size_t size,
465 const struct dma_map_ops *ops = get_dma_ops(dev);
467 if (dma_alloc_direct(dev, ops))
468 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
472 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
474 EXPORT_SYMBOL(dma_mmap_attrs);
476 u64 dma_get_required_mask(struct device *dev)
478 const struct dma_map_ops *ops = get_dma_ops(dev);
480 if (dma_alloc_direct(dev, ops))
481 return dma_direct_get_required_mask(dev);
482 if (ops->get_required_mask)
483 return ops->get_required_mask(dev);
486 * We require every DMA ops implementation to at least support a 32-bit
487 * DMA mask (and use bounce buffering if that isn't supported in
488 * hardware). As the direct mapping code has its own routine to
489 * actually report an optimal mask we default to 32-bit here as that
490 * is the right thing for most IOMMUs, and at least not actively
491 * harmful in general.
493 return DMA_BIT_MASK(32);
495 EXPORT_SYMBOL_GPL(dma_get_required_mask);
497 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
498 gfp_t flag, unsigned long attrs)
500 const struct dma_map_ops *ops = get_dma_ops(dev);
503 WARN_ON_ONCE(!dev->coherent_dma_mask);
506 * DMA allocations can never be turned back into a page pointer, so
507 * requesting compound pages doesn't make sense (and can't even be
508 * supported at all by various backends).
510 if (WARN_ON_ONCE(flag & __GFP_COMP))
513 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
516 /* let the implementation decide on the zone to allocate from: */
517 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
519 if (dma_alloc_direct(dev, ops))
520 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
522 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
526 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
529 EXPORT_SYMBOL(dma_alloc_attrs);
531 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
532 dma_addr_t dma_handle, unsigned long attrs)
534 const struct dma_map_ops *ops = get_dma_ops(dev);
536 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
539 * On non-coherent platforms which implement DMA-coherent buffers via
540 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
541 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
542 * sleep on some machines, and b) an indication that the driver is
543 * probably misusing the coherent API anyway.
545 WARN_ON(irqs_disabled());
550 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
551 if (dma_alloc_direct(dev, ops))
552 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
554 ops->free(dev, size, cpu_addr, dma_handle, attrs);
556 EXPORT_SYMBOL(dma_free_attrs);
558 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
559 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
561 const struct dma_map_ops *ops = get_dma_ops(dev);
563 if (WARN_ON_ONCE(!dev->coherent_dma_mask))
565 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
567 if (WARN_ON_ONCE(gfp & __GFP_COMP))
570 size = PAGE_ALIGN(size);
571 if (dma_alloc_direct(dev, ops))
572 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
573 if (!ops->alloc_pages)
575 return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
578 struct page *dma_alloc_pages(struct device *dev, size_t size,
579 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
581 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
584 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
587 EXPORT_SYMBOL_GPL(dma_alloc_pages);
589 static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
590 dma_addr_t dma_handle, enum dma_data_direction dir)
592 const struct dma_map_ops *ops = get_dma_ops(dev);
594 size = PAGE_ALIGN(size);
595 if (dma_alloc_direct(dev, ops))
596 dma_direct_free_pages(dev, size, page, dma_handle, dir);
597 else if (ops->free_pages)
598 ops->free_pages(dev, size, page, dma_handle, dir);
601 void dma_free_pages(struct device *dev, size_t size, struct page *page,
602 dma_addr_t dma_handle, enum dma_data_direction dir)
604 debug_dma_unmap_page(dev, dma_handle, size, dir);
605 __dma_free_pages(dev, size, page, dma_handle, dir);
607 EXPORT_SYMBOL_GPL(dma_free_pages);
609 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
610 size_t size, struct page *page)
612 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
614 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
616 return remap_pfn_range(vma, vma->vm_start,
617 page_to_pfn(page) + vma->vm_pgoff,
618 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
620 EXPORT_SYMBOL_GPL(dma_mmap_pages);
622 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
623 enum dma_data_direction dir, gfp_t gfp)
625 struct sg_table *sgt;
628 sgt = kmalloc(sizeof(*sgt), gfp);
631 if (sg_alloc_table(sgt, 1, gfp))
633 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
636 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
637 sg_dma_len(sgt->sgl) = sgt->sgl->length;
646 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
647 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
649 const struct dma_map_ops *ops = get_dma_ops(dev);
650 struct sg_table *sgt;
652 if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
654 if (WARN_ON_ONCE(gfp & __GFP_COMP))
657 if (ops && ops->alloc_noncontiguous)
658 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
660 sgt = alloc_single_sgt(dev, size, dir, gfp);
664 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
668 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
670 static void free_single_sgt(struct device *dev, size_t size,
671 struct sg_table *sgt, enum dma_data_direction dir)
673 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
679 void dma_free_noncontiguous(struct device *dev, size_t size,
680 struct sg_table *sgt, enum dma_data_direction dir)
682 const struct dma_map_ops *ops = get_dma_ops(dev);
684 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
685 if (ops && ops->free_noncontiguous)
686 ops->free_noncontiguous(dev, size, sgt, dir);
688 free_single_sgt(dev, size, sgt, dir);
690 EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
692 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
693 struct sg_table *sgt)
695 const struct dma_map_ops *ops = get_dma_ops(dev);
696 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
698 if (ops && ops->alloc_noncontiguous)
699 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
700 return page_address(sg_page(sgt->sgl));
702 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
704 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
706 const struct dma_map_ops *ops = get_dma_ops(dev);
708 if (ops && ops->alloc_noncontiguous)
711 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
713 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
714 size_t size, struct sg_table *sgt)
716 const struct dma_map_ops *ops = get_dma_ops(dev);
718 if (ops && ops->alloc_noncontiguous) {
719 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
721 if (vma->vm_pgoff >= count ||
722 vma_pages(vma) > count - vma->vm_pgoff)
724 return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
726 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
728 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
730 static int dma_supported(struct device *dev, u64 mask)
732 const struct dma_map_ops *ops = get_dma_ops(dev);
735 * ->dma_supported sets the bypass flag, so we must always call
736 * into the method here unless the device is truly direct mapped.
739 return dma_direct_supported(dev, mask);
740 if (!ops->dma_supported)
742 return ops->dma_supported(dev, mask);
745 bool dma_pci_p2pdma_supported(struct device *dev)
747 const struct dma_map_ops *ops = get_dma_ops(dev);
749 /* if ops is not set, dma direct will be used which supports P2PDMA */
754 * Note: dma_ops_bypass is not checked here because P2PDMA should
755 * not be used with dma mapping ops that do not have support even
756 * if the specific device is bypassing them.
759 return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
761 EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
763 int dma_set_mask(struct device *dev, u64 mask)
766 * Truncate the mask to the actually supported dma_addr_t width to
767 * avoid generating unsupportable addresses.
769 mask = (dma_addr_t)mask;
771 if (!dev->dma_mask || !dma_supported(dev, mask))
774 arch_dma_set_mask(dev, mask);
775 *dev->dma_mask = mask;
778 EXPORT_SYMBOL(dma_set_mask);
780 int dma_set_coherent_mask(struct device *dev, u64 mask)
783 * Truncate the mask to the actually supported dma_addr_t width to
784 * avoid generating unsupportable addresses.
786 mask = (dma_addr_t)mask;
788 if (!dma_supported(dev, mask))
791 dev->coherent_dma_mask = mask;
794 EXPORT_SYMBOL(dma_set_coherent_mask);
796 size_t dma_max_mapping_size(struct device *dev)
798 const struct dma_map_ops *ops = get_dma_ops(dev);
799 size_t size = SIZE_MAX;
801 if (dma_map_direct(dev, ops))
802 size = dma_direct_max_mapping_size(dev);
803 else if (ops && ops->max_mapping_size)
804 size = ops->max_mapping_size(dev);
808 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
810 size_t dma_opt_mapping_size(struct device *dev)
812 const struct dma_map_ops *ops = get_dma_ops(dev);
813 size_t size = SIZE_MAX;
815 if (ops && ops->opt_mapping_size)
816 size = ops->opt_mapping_size();
818 return min(dma_max_mapping_size(dev), size);
820 EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
822 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
824 const struct dma_map_ops *ops = get_dma_ops(dev);
826 if (dma_map_direct(dev, ops))
827 return dma_direct_need_sync(dev, dma_addr);
828 return ops->sync_single_for_cpu || ops->sync_single_for_device;
830 EXPORT_SYMBOL_GPL(dma_need_sync);
832 unsigned long dma_get_merge_boundary(struct device *dev)
834 const struct dma_map_ops *ops = get_dma_ops(dev);
836 if (!ops || !ops->get_merge_boundary)
837 return 0; /* can't merge */
839 return ops->get_merge_boundary(dev);
841 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);