1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/scatterlist.h>
12 #include <linux/pfn.h>
13 #include <linux/vmalloc.h>
14 #include <linux/set_memory.h>
15 #include <linux/slab.h>
19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20 * it for entirely different regions. In that case the arch code needs to
21 * override the variable below for dma-direct to work properly.
23 unsigned int zone_dma_bits __ro_after_init = 24;
25 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
28 if (force_dma_unencrypted(dev))
29 return phys_to_dma_unencrypted(dev, phys);
30 return phys_to_dma(dev, phys);
33 static inline struct page *dma_direct_to_page(struct device *dev,
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
39 u64 dma_direct_get_required_mask(struct device *dev)
41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 u64 max_dma = phys_to_dma_direct(dev, phys);
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
53 * Optimistically try the zone that the physical address mask falls
54 * into first. If that returns memory that isn't actually addressable
55 * we will fallback to the next lower zone and try again.
57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
60 *phys_limit = dma_to_phys(dev, dma_limit);
61 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
63 if (*phys_limit <= DMA_BIT_MASK(32))
68 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
70 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
72 if (dma_addr == DMA_MAPPING_ERROR)
74 return dma_addr + size - 1 <=
75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
78 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
80 if (!force_dma_unencrypted(dev))
82 return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
85 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
89 if (!force_dma_unencrypted(dev))
91 ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
93 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
97 static void __dma_direct_free_pages(struct device *dev, struct page *page,
100 if (swiotlb_free(dev, page, size))
102 dma_free_contiguous(dev, page, size);
105 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
107 struct page *page = swiotlb_alloc(dev, size);
109 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
110 swiotlb_free(dev, page, size);
117 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
118 gfp_t gfp, bool allow_highmem)
120 int node = dev_to_node(dev);
121 struct page *page = NULL;
124 WARN_ON_ONCE(!PAGE_ALIGNED(size));
126 if (is_swiotlb_for_alloc(dev))
127 return dma_direct_alloc_swiotlb(dev, size);
129 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
131 page = dma_alloc_contiguous(dev, size, gfp);
133 if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
134 (!allow_highmem && PageHighMem(page))) {
135 dma_free_contiguous(dev, page, size);
141 page = alloc_pages_node(node, gfp, get_order(size));
142 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
143 dma_free_contiguous(dev, page, size);
146 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
147 phys_limit < DMA_BIT_MASK(64) &&
148 !(gfp & (GFP_DMA32 | GFP_DMA))) {
153 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
154 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
163 * Check if a potentially blocking operations needs to dip into the atomic
164 * pools for the given device/gfp.
166 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
168 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
171 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
172 dma_addr_t *dma_handle, gfp_t gfp)
178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
181 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
183 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
186 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
190 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
191 dma_addr_t *dma_handle, gfp_t gfp)
195 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
199 /* remove any dirty cache lines on the kernel alias */
200 if (!PageHighMem(page))
201 arch_dma_prep_coherent(page, size);
203 /* return the page pointer as the opaque cookie */
204 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
208 void *dma_direct_alloc(struct device *dev, size_t size,
209 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
211 bool remap = false, set_uncached = false;
215 size = PAGE_ALIGN(size);
216 if (attrs & DMA_ATTR_NO_WARN)
219 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
220 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
221 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
223 if (!dev_is_dma_coherent(dev)) {
225 * Fallback to the arch handler if it exists. This should
226 * eventually go away.
228 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
229 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
230 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
231 !is_swiotlb_for_alloc(dev))
232 return arch_dma_alloc(dev, size, dma_handle, gfp,
236 * If there is a global pool, always allocate from it for
237 * non-coherent devices.
239 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
240 return dma_alloc_from_global_coherent(dev, size,
244 * Otherwise remap if the architecture is asking for it. But
245 * given that remapping memory is a blocking operation we'll
246 * instead have to dip into the atomic pools.
248 remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
250 if (dma_direct_use_pool(dev, gfp))
251 return dma_direct_alloc_from_pool(dev, size,
254 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
261 * Decrypting memory may block, so allocate the memory from the atomic
262 * pools if we can't block.
264 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
265 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
267 /* we always manually zero the memory once we are done */
268 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
273 * dma_alloc_contiguous can return highmem pages depending on a
274 * combination the cma= arguments and per-arch setup. These need to be
275 * remapped to return a kernel virtual address.
277 if (PageHighMem(page)) {
279 set_uncached = false;
283 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
285 if (force_dma_unencrypted(dev))
286 prot = pgprot_decrypted(prot);
288 /* remove any dirty cache lines on the kernel alias */
289 arch_dma_prep_coherent(page, size);
291 /* create a coherent mapping */
292 ret = dma_common_contiguous_remap(page, size, prot,
293 __builtin_return_address(0));
297 ret = page_address(page);
298 if (dma_set_decrypted(dev, ret, size))
302 memset(ret, 0, size);
305 arch_dma_prep_coherent(page, size);
306 ret = arch_dma_set_uncached(ret, size);
308 goto out_encrypt_pages;
311 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
315 if (dma_set_encrypted(dev, page_address(page), size))
318 __dma_direct_free_pages(dev, page, size);
322 void dma_direct_free(struct device *dev, size_t size,
323 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
325 unsigned int page_order = get_order(size);
327 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
328 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
329 /* cpu_addr is a struct page cookie, not a kernel address */
330 dma_free_contiguous(dev, cpu_addr, size);
334 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
335 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
336 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
337 !dev_is_dma_coherent(dev) &&
338 !is_swiotlb_for_alloc(dev)) {
339 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
343 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
344 !dev_is_dma_coherent(dev)) {
345 if (!dma_release_from_global_coherent(page_order, cpu_addr))
350 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
351 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
352 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
355 if (is_vmalloc_addr(cpu_addr)) {
358 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
359 arch_dma_clear_uncached(cpu_addr, size);
360 if (dma_set_encrypted(dev, cpu_addr, size))
364 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
367 struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
368 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
373 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
374 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
376 page = __dma_direct_alloc_pages(dev, size, gfp, false);
380 ret = page_address(page);
381 if (dma_set_decrypted(dev, ret, size))
383 memset(ret, 0, size);
384 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
387 __dma_direct_free_pages(dev, page, size);
391 void dma_direct_free_pages(struct device *dev, size_t size,
392 struct page *page, dma_addr_t dma_addr,
393 enum dma_data_direction dir)
395 void *vaddr = page_address(page);
397 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
398 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
399 dma_free_from_pool(dev, vaddr, size))
402 if (dma_set_encrypted(dev, vaddr, size))
404 __dma_direct_free_pages(dev, page, size);
407 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
408 defined(CONFIG_SWIOTLB)
409 void dma_direct_sync_sg_for_device(struct device *dev,
410 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
412 struct scatterlist *sg;
415 for_each_sg(sgl, sg, nents, i) {
416 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
418 if (unlikely(is_swiotlb_buffer(dev, paddr)))
419 swiotlb_sync_single_for_device(dev, paddr, sg->length,
422 if (!dev_is_dma_coherent(dev))
423 arch_sync_dma_for_device(paddr, sg->length,
429 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
430 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
431 defined(CONFIG_SWIOTLB)
432 void dma_direct_sync_sg_for_cpu(struct device *dev,
433 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
435 struct scatterlist *sg;
438 for_each_sg(sgl, sg, nents, i) {
439 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
441 if (!dev_is_dma_coherent(dev))
442 arch_sync_dma_for_cpu(paddr, sg->length, dir);
444 if (unlikely(is_swiotlb_buffer(dev, paddr)))
445 swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
448 if (dir == DMA_FROM_DEVICE)
449 arch_dma_mark_clean(paddr, sg->length);
452 if (!dev_is_dma_coherent(dev))
453 arch_sync_dma_for_cpu_all();
457 * Unmaps segments, except for ones marked as pci_p2pdma which do not
458 * require any further action as they contain a bus address.
460 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
461 int nents, enum dma_data_direction dir, unsigned long attrs)
463 struct scatterlist *sg;
466 for_each_sg(sgl, sg, nents, i) {
467 if (sg_is_dma_bus_address(sg))
468 sg_dma_unmark_bus_address(sg);
470 dma_direct_unmap_page(dev, sg->dma_address,
471 sg_dma_len(sg), dir, attrs);
476 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
477 enum dma_data_direction dir, unsigned long attrs)
479 struct pci_p2pdma_map_state p2pdma_state = {};
480 enum pci_p2pdma_map_type map;
481 struct scatterlist *sg;
484 for_each_sg(sgl, sg, nents, i) {
485 if (is_pci_p2pdma_page(sg_page(sg))) {
486 map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
488 case PCI_P2PDMA_MAP_BUS_ADDR:
490 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
492 * Any P2P mapping that traverses the PCI
493 * host bridge must be mapped with CPU physical
494 * address and not PCI bus addresses. This is
495 * done with dma_direct_map_page() below.
504 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
505 sg->offset, sg->length, dir, attrs);
506 if (sg->dma_address == DMA_MAPPING_ERROR) {
510 sg_dma_len(sg) = sg->length;
516 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
520 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
521 size_t size, enum dma_data_direction dir, unsigned long attrs)
523 dma_addr_t dma_addr = paddr;
525 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
527 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
528 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
530 return DMA_MAPPING_ERROR;
536 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
537 void *cpu_addr, dma_addr_t dma_addr, size_t size,
540 struct page *page = dma_direct_to_page(dev, dma_addr);
543 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
545 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
549 bool dma_direct_can_mmap(struct device *dev)
551 return dev_is_dma_coherent(dev) ||
552 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
555 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
556 void *cpu_addr, dma_addr_t dma_addr, size_t size,
559 unsigned long user_count = vma_pages(vma);
560 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
561 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
564 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
565 if (force_dma_unencrypted(dev))
566 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
568 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
570 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
573 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
575 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
576 user_count << PAGE_SHIFT, vma->vm_page_prot);
579 int dma_direct_supported(struct device *dev, u64 mask)
581 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
584 * Because 32-bit DMA masks are so common we expect every architecture
585 * to be able to satisfy them - either by not supporting more physical
586 * memory, or by providing a ZONE_DMA32. If neither is the case, the
587 * architecture needs to use an IOMMU instead of the direct mapping.
589 if (mask >= DMA_BIT_MASK(32))
593 * This check needs to be against the actual bit mask value, so use
594 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
597 if (IS_ENABLED(CONFIG_ZONE_DMA))
598 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
599 return mask >= phys_to_dma_unencrypted(dev, min_mask);
602 size_t dma_direct_max_mapping_size(struct device *dev)
604 /* If SWIOTLB is active, use its maximum mapping size */
605 if (is_swiotlb_active(dev) &&
606 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
607 return swiotlb_max_mapping_size(dev);
611 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
613 return !dev_is_dma_coherent(dev) ||
614 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
618 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
619 * @dev: device pointer; needed to "own" the alloced memory.
620 * @cpu_start: beginning of memory region covered by this offset.
621 * @dma_start: beginning of DMA/PCI region covered by this offset.
622 * @size: size of the region.
624 * This is for the simple case of a uniform offset which cannot
625 * be discovered by "dma-ranges".
627 * It returns -ENOMEM if out of memory, -EINVAL if a map
628 * already exists, 0 otherwise.
630 * Note: any call to this from a driver is a bug. The mapping needs
631 * to be described by the device tree or other firmware interfaces.
633 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
634 dma_addr_t dma_start, u64 size)
636 struct bus_dma_region *map;
637 u64 offset = (u64)cpu_start - (u64)dma_start;
639 if (dev->dma_range_map) {
640 dev_err(dev, "attempt to add DMA range to existing map\n");
647 map = kcalloc(2, sizeof(*map), GFP_KERNEL);
650 map[0].cpu_start = cpu_start;
651 map[0].dma_start = dma_start;
652 map[0].offset = offset;
654 dev->dma_range_map = map;