1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
21 #define pr_fmt(fmt) "software IO TLB: " fmt
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
37 #include <linux/pfn.h>
38 #include <linux/rculist.h>
39 #include <linux/scatterlist.h>
40 #include <linux/set_memory.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/swiotlb.h>
44 #include <linux/types.h>
45 #ifdef CONFIG_DMA_RESTRICTED_POOL
47 #include <linux/of_fdt.h>
48 #include <linux/of_reserved_mem.h>
49 #include <linux/slab.h>
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/swiotlb.h>
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
58 * Minimum IO TLB size to bother booting with. Systems with mainly
59 * 64bit capable cards will only lightly use the swiotlb. If we can't
60 * allocate a contiguous 1MB, we're probably in trouble anyway.
62 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
64 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
67 * struct io_tlb_slot - IO TLB slot descriptor
68 * @orig_addr: The original address corresponding to a mapped entry.
69 * @alloc_size: Size of the allocated buffer.
70 * @list: The free list describing the number of free entries available
74 phys_addr_t orig_addr;
79 static bool swiotlb_force_bounce;
80 static bool swiotlb_force_disable;
82 #ifdef CONFIG_SWIOTLB_DYNAMIC
84 static void swiotlb_dyn_alloc(struct work_struct *work);
86 static struct io_tlb_mem io_tlb_default_mem = {
87 .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
88 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
89 .dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
93 #else /* !CONFIG_SWIOTLB_DYNAMIC */
95 static struct io_tlb_mem io_tlb_default_mem;
97 #endif /* CONFIG_SWIOTLB_DYNAMIC */
99 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
100 static unsigned long default_nareas;
103 * struct io_tlb_area - IO TLB memory area descriptor
105 * This is a single area with a single lock.
107 * @used: The number of used IO TLB block.
108 * @index: The slot index to start searching in this area for next round.
109 * @lock: The lock to protect the above data structures in the map and
119 * Round up number of slabs to the next power of 2. The last area is going
120 * be smaller than the rest if default_nslabs is not power of two.
121 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
122 * otherwise a segment may span two or more areas. It conflicts with free
123 * contiguous slots tracking: free slots are treated contiguous no matter
124 * whether they cross an area boundary.
126 * Return true if default_nslabs is rounded up.
128 static bool round_up_default_nslabs(void)
133 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
134 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
135 else if (is_power_of_2(default_nslabs))
137 default_nslabs = roundup_pow_of_two(default_nslabs);
142 * swiotlb_adjust_nareas() - adjust the number of areas and slots
143 * @nareas: Desired number of areas. Zero is treated as 1.
145 * Adjust the default number of areas in a memory pool.
146 * The default size of the memory pool may also change to meet minimum area
149 static void swiotlb_adjust_nareas(unsigned int nareas)
153 else if (!is_power_of_2(nareas))
154 nareas = roundup_pow_of_two(nareas);
156 default_nareas = nareas;
158 pr_info("area num %d.\n", nareas);
159 if (round_up_default_nslabs())
160 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
161 (default_nslabs << IO_TLB_SHIFT) >> 20);
165 * limit_nareas() - get the maximum number of areas for a given memory pool size
166 * @nareas: Desired number of areas.
167 * @nslots: Total number of slots in the memory pool.
169 * Limit the number of areas to the maximum possible number of areas in
170 * a memory pool of the given size.
172 * Return: Maximum possible number of areas.
174 static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
176 if (nslots < nareas * IO_TLB_SEGSIZE)
177 return nslots / IO_TLB_SEGSIZE;
182 setup_io_tlb_npages(char *str)
185 /* avoid tail segment of size < IO_TLB_SEGSIZE */
187 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
192 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
195 if (!strcmp(str, "force"))
196 swiotlb_force_bounce = true;
197 else if (!strcmp(str, "noforce"))
198 swiotlb_force_disable = true;
202 early_param("swiotlb", setup_io_tlb_npages);
204 unsigned long swiotlb_size_or_default(void)
206 return default_nslabs << IO_TLB_SHIFT;
209 void __init swiotlb_adjust_size(unsigned long size)
212 * If swiotlb parameter has not been specified, give a chance to
213 * architectures such as those supporting memory encryption to
214 * adjust/expand SWIOTLB size for their use.
216 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
219 size = ALIGN(size, IO_TLB_SIZE);
220 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
221 if (round_up_default_nslabs())
222 size = default_nslabs << IO_TLB_SHIFT;
223 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
226 void swiotlb_print_info(void)
228 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
231 pr_warn("No low mem\n");
235 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
236 (mem->nslabs << IO_TLB_SHIFT) >> 20);
239 static inline unsigned long io_tlb_offset(unsigned long val)
241 return val & (IO_TLB_SEGSIZE - 1);
244 static inline unsigned long nr_slots(u64 val)
246 return DIV_ROUND_UP(val, IO_TLB_SIZE);
250 * Early SWIOTLB allocation may be too early to allow an architecture to
251 * perform the desired operations. This function allows the architecture to
252 * call SWIOTLB when the operations are possible. It needs to be called
253 * before the SWIOTLB memory is used.
255 void __init swiotlb_update_mem_attributes(void)
257 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
260 if (!mem->nslabs || mem->late_alloc)
262 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
263 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
266 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
267 unsigned long nslabs, bool late_alloc, unsigned int nareas)
269 void *vaddr = phys_to_virt(start);
270 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
272 mem->nslabs = nslabs;
274 mem->end = mem->start + bytes;
275 mem->late_alloc = late_alloc;
276 mem->nareas = nareas;
277 mem->area_nslabs = nslabs / mem->nareas;
279 for (i = 0; i < mem->nareas; i++) {
280 spin_lock_init(&mem->areas[i].lock);
281 mem->areas[i].index = 0;
282 mem->areas[i].used = 0;
285 for (i = 0; i < mem->nslabs; i++) {
286 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
287 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
288 mem->slots[i].alloc_size = 0;
291 memset(vaddr, 0, bytes);
297 * add_mem_pool() - add a memory pool to the allocator
298 * @mem: Software IO TLB allocator.
299 * @pool: Memory pool to be added.
301 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
303 #ifdef CONFIG_SWIOTLB_DYNAMIC
304 spin_lock(&mem->lock);
305 list_add_rcu(&pool->node, &mem->pools);
306 mem->nslabs += pool->nslabs;
307 spin_unlock(&mem->lock);
309 mem->nslabs = pool->nslabs;
313 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
315 int (*remap)(void *tlb, unsigned long nslabs))
317 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
321 * By default allocate the bounce buffer memory from low memory, but
322 * allow to pick a location everywhere for hypervisors with guest
325 if (flags & SWIOTLB_ANY)
326 tlb = memblock_alloc(bytes, PAGE_SIZE);
328 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
331 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
336 if (remap && remap(tlb, nslabs) < 0) {
337 memblock_free(tlb, PAGE_ALIGN(bytes));
338 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
346 * Statically reserve bounce buffer space and initialize bounce buffer data
347 * structures for the software IO TLB used to implement the DMA API.
349 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
350 int (*remap)(void *tlb, unsigned long nslabs))
352 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
353 unsigned long nslabs;
358 if (!addressing_limit && !swiotlb_force_bounce)
360 if (swiotlb_force_disable)
363 io_tlb_default_mem.force_bounce =
364 swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
366 #ifdef CONFIG_SWIOTLB_DYNAMIC
368 io_tlb_default_mem.can_grow = true;
369 if (flags & SWIOTLB_ANY)
370 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
372 io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
376 swiotlb_adjust_nareas(num_possible_cpus());
378 nslabs = default_nslabs;
379 nareas = limit_nareas(default_nareas, nslabs);
380 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
381 if (nslabs <= IO_TLB_MIN_SLABS)
383 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
384 nareas = limit_nareas(nareas, nslabs);
387 if (default_nslabs != nslabs) {
388 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
389 default_nslabs, nslabs);
390 default_nslabs = nslabs;
393 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
394 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
396 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
397 __func__, alloc_size, PAGE_SIZE);
401 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
402 default_nareas), SMP_CACHE_BYTES);
404 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
408 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
410 add_mem_pool(&io_tlb_default_mem, mem);
412 if (flags & SWIOTLB_VERBOSE)
413 swiotlb_print_info();
416 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
418 swiotlb_init_remap(addressing_limit, flags, NULL);
422 * Systems with larger DMA zones (those that don't support ISA) can
423 * initialize the swiotlb later using the slab allocator if needed.
424 * This should be just like above, but with some error catching.
426 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
427 int (*remap)(void *tlb, unsigned long nslabs))
429 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
430 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
432 unsigned char *vstart = NULL;
433 unsigned int order, area_order;
434 bool retried = false;
437 if (io_tlb_default_mem.nslabs)
440 if (swiotlb_force_disable)
443 io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
445 #ifdef CONFIG_SWIOTLB_DYNAMIC
447 io_tlb_default_mem.can_grow = true;
448 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
449 io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
450 else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
451 io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
453 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
457 swiotlb_adjust_nareas(num_possible_cpus());
460 order = get_order(nslabs << IO_TLB_SHIFT);
461 nslabs = SLABS_PER_PAGE << order;
463 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
464 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
469 nslabs = SLABS_PER_PAGE << order;
477 rc = remap(vstart, nslabs);
479 free_pages((unsigned long)vstart, order);
481 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
482 if (nslabs < IO_TLB_MIN_SLABS)
489 pr_warn("only able to allocate %ld MB\n",
490 (PAGE_SIZE << order) >> 20);
493 nareas = limit_nareas(default_nareas, nslabs);
494 area_order = get_order(array_size(sizeof(*mem->areas), nareas));
495 mem->areas = (struct io_tlb_area *)
496 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
500 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
501 get_order(array_size(sizeof(*mem->slots), nslabs)));
505 set_memory_decrypted((unsigned long)vstart,
506 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
507 swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
509 add_mem_pool(&io_tlb_default_mem, mem);
511 swiotlb_print_info();
515 free_pages((unsigned long)mem->areas, area_order);
517 free_pages((unsigned long)vstart, order);
521 void __init swiotlb_exit(void)
523 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
524 unsigned long tbl_vaddr;
525 size_t tbl_size, slots_size;
526 unsigned int area_order;
528 if (swiotlb_force_bounce)
534 pr_info("tearing down default memory pool\n");
535 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
536 tbl_size = PAGE_ALIGN(mem->end - mem->start);
537 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
539 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
540 if (mem->late_alloc) {
541 area_order = get_order(array_size(sizeof(*mem->areas),
543 free_pages((unsigned long)mem->areas, area_order);
544 free_pages(tbl_vaddr, get_order(tbl_size));
545 free_pages((unsigned long)mem->slots, get_order(slots_size));
547 memblock_free_late(__pa(mem->areas),
548 array_size(sizeof(*mem->areas), mem->nareas));
549 memblock_free_late(mem->start, tbl_size);
550 memblock_free_late(__pa(mem->slots), slots_size);
553 memset(mem, 0, sizeof(*mem));
556 #ifdef CONFIG_SWIOTLB_DYNAMIC
559 * alloc_dma_pages() - allocate pages to be used for DMA
560 * @gfp: GFP flags for the allocation.
561 * @bytes: Size of the buffer.
563 * Allocate pages from the buddy allocator. If successful, make the allocated
564 * pages decrypted that they can be used for DMA.
566 * Return: Decrypted pages, or %NULL on failure.
568 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
570 unsigned int order = get_order(bytes);
574 page = alloc_pages(gfp, order);
578 vaddr = page_address(page);
579 if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
584 __free_pages(page, order);
589 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
590 * @dev: Device for which a memory pool is allocated.
591 * @bytes: Size of the buffer.
592 * @phys_limit: Maximum allowed physical address of the buffer.
593 * @gfp: GFP flags for the allocation.
595 * Return: Allocated pages, or %NULL on allocation failure.
597 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
598 u64 phys_limit, gfp_t gfp)
603 * Allocate from the atomic pools if memory is encrypted and
604 * the allocation is atomic, because decrypting may block.
606 if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
609 if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
612 return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
616 gfp &= ~GFP_ZONEMASK;
617 if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
619 else if (phys_limit <= DMA_BIT_MASK(32))
622 while ((page = alloc_dma_pages(gfp, bytes)) &&
623 page_to_phys(page) + bytes - 1 > phys_limit) {
624 /* allocated, but too high */
625 __free_pages(page, get_order(bytes));
627 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
628 phys_limit < DMA_BIT_MASK(64) &&
629 !(gfp & (__GFP_DMA32 | __GFP_DMA)))
631 else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
633 gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
642 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
643 * @vaddr: Virtual address of the buffer.
644 * @bytes: Size of the buffer.
646 static void swiotlb_free_tlb(void *vaddr, size_t bytes)
648 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
649 dma_free_from_pool(NULL, vaddr, bytes))
652 /* Intentional leak if pages cannot be encrypted again. */
653 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
654 __free_pages(virt_to_page(vaddr), get_order(bytes));
658 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
659 * @dev: Device for which a memory pool is allocated.
660 * @minslabs: Minimum number of slabs.
661 * @nslabs: Desired (maximum) number of slabs.
662 * @nareas: Number of areas.
663 * @phys_limit: Maximum DMA buffer physical address.
664 * @gfp: GFP flags for the allocations.
666 * Allocate and initialize a new IO TLB memory pool. The actual number of
667 * slabs may be reduced if allocation of @nslabs fails. If even
668 * @minslabs cannot be allocated, this function fails.
670 * Return: New memory pool, or %NULL on allocation failure.
672 static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
673 unsigned long minslabs, unsigned long nslabs,
674 unsigned int nareas, u64 phys_limit, gfp_t gfp)
676 struct io_tlb_pool *pool;
677 unsigned int slot_order;
682 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
683 pool = kzalloc(pool_size, gfp);
686 pool->areas = (void *)pool + sizeof(*pool);
688 tlb_size = nslabs << IO_TLB_SHIFT;
689 while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
690 if (nslabs <= minslabs)
692 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
693 nareas = limit_nareas(nareas, nslabs);
694 tlb_size = nslabs << IO_TLB_SHIFT;
697 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
698 pool->slots = (struct io_tlb_slot *)
699 __get_free_pages(gfp, slot_order);
703 swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
707 swiotlb_free_tlb(page_address(tlb), tlb_size);
715 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
716 * @work: Pointer to dyn_alloc in struct io_tlb_mem.
718 static void swiotlb_dyn_alloc(struct work_struct *work)
720 struct io_tlb_mem *mem =
721 container_of(work, struct io_tlb_mem, dyn_alloc);
722 struct io_tlb_pool *pool;
724 pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
725 default_nareas, mem->phys_limit, GFP_KERNEL);
727 pr_warn_ratelimited("Failed to allocate new pool");
731 add_mem_pool(mem, pool);
733 /* Pairs with smp_rmb() in is_swiotlb_buffer(). */
738 * swiotlb_dyn_free() - RCU callback to free a memory pool
739 * @rcu: RCU head in the corresponding struct io_tlb_pool.
741 static void swiotlb_dyn_free(struct rcu_head *rcu)
743 struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
744 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
745 size_t tlb_size = pool->end - pool->start;
747 free_pages((unsigned long)pool->slots, get_order(slots_size));
748 swiotlb_free_tlb(pool->vaddr, tlb_size);
753 * swiotlb_find_pool() - find the IO TLB pool for a physical address
754 * @dev: Device which has mapped the DMA buffer.
755 * @paddr: Physical address within the DMA buffer.
757 * Find the IO TLB memory pool descriptor which contains the given physical
760 * Return: Memory pool which contains @paddr, or %NULL if none.
762 struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
764 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
765 struct io_tlb_pool *pool;
768 list_for_each_entry_rcu(pool, &mem->pools, node) {
769 if (paddr >= pool->start && paddr < pool->end)
773 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
774 if (paddr >= pool->start && paddr < pool->end)
784 * swiotlb_del_pool() - remove an IO TLB pool from a device
785 * @dev: Owning device.
786 * @pool: Memory pool to be removed.
788 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
792 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
793 list_del_rcu(&pool->node);
794 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
796 call_rcu(&pool->rcu, swiotlb_dyn_free);
799 #endif /* CONFIG_SWIOTLB_DYNAMIC */
802 * swiotlb_dev_init() - initialize swiotlb fields in &struct device
803 * @dev: Device to be initialized.
805 void swiotlb_dev_init(struct device *dev)
807 dev->dma_io_tlb_mem = &io_tlb_default_mem;
808 #ifdef CONFIG_SWIOTLB_DYNAMIC
809 INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
810 spin_lock_init(&dev->dma_io_tlb_lock);
811 dev->dma_uses_io_tlb = false;
816 * Return the offset into a iotlb slot required to keep the device happy.
818 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
820 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
824 * Bounce: copy the swiotlb buffer from or back to the original dma location
826 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
827 enum dma_data_direction dir)
829 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
830 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
831 phys_addr_t orig_addr = mem->slots[index].orig_addr;
832 size_t alloc_size = mem->slots[index].alloc_size;
833 unsigned long pfn = PFN_DOWN(orig_addr);
834 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
835 unsigned int tlb_offset, orig_addr_offset;
837 if (orig_addr == INVALID_PHYS_ADDR)
840 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
841 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
842 if (tlb_offset < orig_addr_offset) {
843 dev_WARN_ONCE(dev, 1,
844 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
845 orig_addr_offset, tlb_offset);
849 tlb_offset -= orig_addr_offset;
850 if (tlb_offset > alloc_size) {
851 dev_WARN_ONCE(dev, 1,
852 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
853 alloc_size, size, tlb_offset);
857 orig_addr += tlb_offset;
858 alloc_size -= tlb_offset;
860 if (size > alloc_size) {
861 dev_WARN_ONCE(dev, 1,
862 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
867 if (PageHighMem(pfn_to_page(pfn))) {
868 unsigned int offset = orig_addr & ~PAGE_MASK;
874 sz = min_t(size_t, PAGE_SIZE - offset, size);
876 local_irq_save(flags);
877 page = pfn_to_page(pfn);
878 if (dir == DMA_TO_DEVICE)
879 memcpy_from_page(vaddr, page, offset, sz);
881 memcpy_to_page(page, offset, vaddr, sz);
882 local_irq_restore(flags);
889 } else if (dir == DMA_TO_DEVICE) {
890 memcpy(vaddr, phys_to_virt(orig_addr), size);
892 memcpy(phys_to_virt(orig_addr), vaddr, size);
896 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
898 return start + (idx << IO_TLB_SHIFT);
902 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
904 static inline unsigned long get_max_slots(unsigned long boundary_mask)
906 return (boundary_mask >> IO_TLB_SHIFT) + 1;
909 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
911 if (index >= mem->area_nslabs)
917 * Track the total used slots with a global atomic value in order to have
918 * correct information to determine the high water mark. The mem_used()
919 * function gives imprecise results because there's no locking across
922 #ifdef CONFIG_DEBUG_FS
923 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
925 unsigned long old_hiwater, new_used;
927 new_used = atomic_long_add_return(nslots, &mem->total_used);
928 old_hiwater = atomic_long_read(&mem->used_hiwater);
930 if (new_used <= old_hiwater)
932 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
933 &old_hiwater, new_used));
936 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
938 atomic_long_sub(nslots, &mem->total_used);
941 #else /* !CONFIG_DEBUG_FS */
942 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
945 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
948 #endif /* CONFIG_DEBUG_FS */
951 * swiotlb_area_find_slots() - search for slots in one IO TLB memory area
952 * @dev: Device which maps the buffer.
953 * @pool: Memory pool to be searched.
954 * @area_index: Index of the IO TLB memory area to be searched.
955 * @orig_addr: Original (non-bounced) IO buffer address.
956 * @alloc_size: Total requested size of the bounce buffer,
957 * including initial alignment padding.
958 * @alloc_align_mask: Required alignment of the allocated buffer.
960 * Find a suitable sequence of IO TLB entries for the request and allocate
961 * a buffer from the given IO TLB memory area.
962 * This function takes care of locking.
964 * Return: Index of the first allocated slot, or -1 on error.
966 static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
967 int area_index, phys_addr_t orig_addr, size_t alloc_size,
968 unsigned int alloc_align_mask)
970 struct io_tlb_area *area = pool->areas + area_index;
971 unsigned long boundary_mask = dma_get_seg_boundary(dev);
972 dma_addr_t tbl_dma_addr =
973 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
974 unsigned long max_slots = get_max_slots(boundary_mask);
975 unsigned int iotlb_align_mask =
976 dma_get_min_align_mask(dev) | alloc_align_mask;
977 unsigned int nslots = nr_slots(alloc_size), stride;
978 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
979 unsigned int index, slots_checked, count = 0, i;
981 unsigned int slot_base;
982 unsigned int slot_index;
985 BUG_ON(area_index >= pool->nareas);
988 * For allocations of PAGE_SIZE or larger only look for page aligned
991 if (alloc_size >= PAGE_SIZE)
992 iotlb_align_mask |= ~PAGE_MASK;
993 iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
996 * For mappings with an alignment requirement don't bother looping to
997 * unaligned slots once we found an aligned one.
999 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
1001 spin_lock_irqsave(&area->lock, flags);
1002 if (unlikely(nslots > pool->area_nslabs - area->used))
1005 slot_base = area_index * pool->area_nslabs;
1006 index = area->index;
1008 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1009 slot_index = slot_base + index;
1012 (slot_addr(tbl_dma_addr, slot_index) &
1013 iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
1014 index = wrap_area_index(pool, index + 1);
1019 if (!iommu_is_span_boundary(slot_index, nslots,
1020 nr_slots(tbl_dma_addr),
1022 if (pool->slots[slot_index].list >= nslots)
1025 index = wrap_area_index(pool, index + stride);
1026 slots_checked += stride;
1030 spin_unlock_irqrestore(&area->lock, flags);
1035 * If we find a slot that indicates we have 'nslots' number of
1036 * contiguous buffers, we allocate the buffers from that slot onwards
1037 * and set the list of free entries to '0' indicating unavailable.
1039 for (i = slot_index; i < slot_index + nslots; i++) {
1040 pool->slots[i].list = 0;
1041 pool->slots[i].alloc_size = alloc_size - (offset +
1042 ((i - slot_index) << IO_TLB_SHIFT));
1044 for (i = slot_index - 1;
1045 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
1046 pool->slots[i].list; i--)
1047 pool->slots[i].list = ++count;
1050 * Update the indices to avoid searching in the next round.
1052 area->index = wrap_area_index(pool, index + nslots);
1053 area->used += nslots;
1054 spin_unlock_irqrestore(&area->lock, flags);
1056 inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
1061 * swiotlb_pool_find_slots() - search for slots in one memory pool
1062 * @dev: Device which maps the buffer.
1063 * @pool: Memory pool to be searched.
1064 * @orig_addr: Original (non-bounced) IO buffer address.
1065 * @alloc_size: Total requested size of the bounce buffer,
1066 * including initial alignment padding.
1067 * @alloc_align_mask: Required alignment of the allocated buffer.
1069 * Search through one memory pool to find a sequence of slots that match the
1070 * allocation constraints.
1072 * Return: Index of the first allocated slot, or -1 on error.
1074 static int swiotlb_pool_find_slots(struct device *dev, struct io_tlb_pool *pool,
1075 phys_addr_t orig_addr, size_t alloc_size,
1076 unsigned int alloc_align_mask)
1078 int start = raw_smp_processor_id() & (pool->nareas - 1);
1079 int i = start, index;
1082 index = swiotlb_area_find_slots(dev, pool, i, orig_addr,
1083 alloc_size, alloc_align_mask);
1086 if (++i >= pool->nareas)
1088 } while (i != start);
1093 #ifdef CONFIG_SWIOTLB_DYNAMIC
1096 * swiotlb_find_slots() - search for slots in the whole swiotlb
1097 * @dev: Device which maps the buffer.
1098 * @orig_addr: Original (non-bounced) IO buffer address.
1099 * @alloc_size: Total requested size of the bounce buffer,
1100 * including initial alignment padding.
1101 * @alloc_align_mask: Required alignment of the allocated buffer.
1102 * @retpool: Used memory pool, updated on return.
1104 * Search through the whole software IO TLB to find a sequence of slots that
1105 * match the allocation constraints.
1107 * Return: Index of the first allocated slot, or -1 on error.
1109 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1110 size_t alloc_size, unsigned int alloc_align_mask,
1111 struct io_tlb_pool **retpool)
1113 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1114 struct io_tlb_pool *pool;
1115 unsigned long nslabs;
1116 unsigned long flags;
1121 list_for_each_entry_rcu(pool, &mem->pools, node) {
1122 index = swiotlb_pool_find_slots(dev, pool, orig_addr,
1123 alloc_size, alloc_align_mask);
1133 schedule_work(&mem->dyn_alloc);
1135 nslabs = nr_slots(alloc_size);
1136 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
1137 pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1138 GFP_NOWAIT | __GFP_NOWARN);
1142 index = swiotlb_pool_find_slots(dev, pool, orig_addr,
1143 alloc_size, alloc_align_mask);
1145 swiotlb_dyn_free(&pool->rcu);
1149 pool->transient = true;
1150 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
1151 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1152 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
1155 dev->dma_uses_io_tlb = true;
1156 /* Pairs with smp_rmb() in is_swiotlb_buffer() */
1163 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1165 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1166 size_t alloc_size, unsigned int alloc_align_mask,
1167 struct io_tlb_pool **retpool)
1169 *retpool = &dev->dma_io_tlb_mem->defpool;
1170 return swiotlb_pool_find_slots(dev, *retpool,
1171 orig_addr, alloc_size, alloc_align_mask);
1174 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1176 #ifdef CONFIG_DEBUG_FS
1179 * mem_used() - get number of used slots in an allocator
1180 * @mem: Software IO TLB allocator.
1182 * The result is accurate in this version of the function, because an atomic
1183 * counter is available if CONFIG_DEBUG_FS is set.
1185 * Return: Number of used slots.
1187 static unsigned long mem_used(struct io_tlb_mem *mem)
1189 return atomic_long_read(&mem->total_used);
1192 #else /* !CONFIG_DEBUG_FS */
1195 * mem_pool_used() - get number of used slots in a memory pool
1196 * @pool: Software IO TLB memory pool.
1198 * The result is not accurate, see mem_used().
1200 * Return: Approximate number of used slots.
1202 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1205 unsigned long used = 0;
1207 for (i = 0; i < pool->nareas; i++)
1208 used += pool->areas[i].used;
1213 * mem_used() - get number of used slots in an allocator
1214 * @mem: Software IO TLB allocator.
1216 * The result is not accurate, because there is no locking of individual
1219 * Return: Approximate number of used slots.
1221 static unsigned long mem_used(struct io_tlb_mem *mem)
1223 #ifdef CONFIG_SWIOTLB_DYNAMIC
1224 struct io_tlb_pool *pool;
1225 unsigned long used = 0;
1228 list_for_each_entry_rcu(pool, &mem->pools, node)
1229 used += mem_pool_used(pool);
1234 return mem_pool_used(&mem->defpool);
1238 #endif /* CONFIG_DEBUG_FS */
1240 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
1241 size_t mapping_size, size_t alloc_size,
1242 unsigned int alloc_align_mask, enum dma_data_direction dir,
1243 unsigned long attrs)
1245 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1246 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
1247 struct io_tlb_pool *pool;
1250 phys_addr_t tlb_addr;
1252 if (!mem || !mem->nslabs) {
1253 dev_warn_ratelimited(dev,
1254 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
1255 return (phys_addr_t)DMA_MAPPING_ERROR;
1258 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
1259 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
1261 if (mapping_size > alloc_size) {
1262 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
1263 mapping_size, alloc_size);
1264 return (phys_addr_t)DMA_MAPPING_ERROR;
1267 index = swiotlb_find_slots(dev, orig_addr,
1268 alloc_size + offset, alloc_align_mask, &pool);
1270 if (!(attrs & DMA_ATTR_NO_WARN))
1271 dev_warn_ratelimited(dev,
1272 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
1273 alloc_size, mem->nslabs, mem_used(mem));
1274 return (phys_addr_t)DMA_MAPPING_ERROR;
1278 * Save away the mapping from the original address to the DMA address.
1279 * This is needed when we sync the memory. Then we sync the buffer if
1282 for (i = 0; i < nr_slots(alloc_size + offset); i++)
1283 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1284 tlb_addr = slot_addr(pool->start, index) + offset;
1286 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
1287 * to the tlb buffer, if we knew for sure the device will
1288 * overwrite the entire current content. But we don't. Thus
1289 * unconditional bounce may prevent leaking swiotlb content (i.e.
1290 * kernel memory) to user-space.
1292 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
1296 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
1298 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
1299 unsigned long flags;
1300 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
1301 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
1302 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
1303 int aindex = index / mem->area_nslabs;
1304 struct io_tlb_area *area = &mem->areas[aindex];
1308 * Return the buffer to the free list by setting the corresponding
1309 * entries to indicate the number of contiguous entries available.
1310 * While returning the entries to the free list, we merge the entries
1311 * with slots below and above the pool being returned.
1313 BUG_ON(aindex >= mem->nareas);
1315 spin_lock_irqsave(&area->lock, flags);
1316 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
1317 count = mem->slots[index + nslots].list;
1322 * Step 1: return the slots to the free list, merging the slots with
1323 * superceeding slots
1325 for (i = index + nslots - 1; i >= index; i--) {
1326 mem->slots[i].list = ++count;
1327 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
1328 mem->slots[i].alloc_size = 0;
1332 * Step 2: merge the returned slots with the preceding slots, if
1333 * available (non zero)
1336 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
1338 mem->slots[i].list = ++count;
1339 area->used -= nslots;
1340 spin_unlock_irqrestore(&area->lock, flags);
1342 dec_used(dev->dma_io_tlb_mem, nslots);
1345 #ifdef CONFIG_SWIOTLB_DYNAMIC
1348 * swiotlb_del_transient() - delete a transient memory pool
1349 * @dev: Device which mapped the buffer.
1350 * @tlb_addr: Physical address within a bounce buffer.
1352 * Check whether the address belongs to a transient SWIOTLB memory pool.
1353 * If yes, then delete the pool.
1355 * Return: %true if @tlb_addr belonged to a transient pool that was released.
1357 static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr)
1359 struct io_tlb_pool *pool;
1361 pool = swiotlb_find_pool(dev, tlb_addr);
1362 if (!pool->transient)
1365 dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1366 swiotlb_del_pool(dev, pool);
1370 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1372 static inline bool swiotlb_del_transient(struct device *dev,
1373 phys_addr_t tlb_addr)
1378 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1381 * tlb_addr is the physical address of the bounce buffer to unmap.
1383 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
1384 size_t mapping_size, enum dma_data_direction dir,
1385 unsigned long attrs)
1388 * First, sync the memory before unmapping the entry
1390 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
1391 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
1392 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
1394 if (swiotlb_del_transient(dev, tlb_addr))
1396 swiotlb_release_slots(dev, tlb_addr);
1399 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
1400 size_t size, enum dma_data_direction dir)
1402 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
1403 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
1405 BUG_ON(dir != DMA_FROM_DEVICE);
1408 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
1409 size_t size, enum dma_data_direction dir)
1411 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1412 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
1414 BUG_ON(dir != DMA_TO_DEVICE);
1418 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
1419 * to the device copy the data into it as well.
1421 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
1422 enum dma_data_direction dir, unsigned long attrs)
1424 phys_addr_t swiotlb_addr;
1425 dma_addr_t dma_addr;
1427 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
1429 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
1431 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
1432 return DMA_MAPPING_ERROR;
1434 /* Ensure that the address returned is DMA'ble */
1435 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
1436 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
1437 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
1438 attrs | DMA_ATTR_SKIP_CPU_SYNC);
1439 dev_WARN_ONCE(dev, 1,
1440 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
1441 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
1442 return DMA_MAPPING_ERROR;
1445 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1446 arch_sync_dma_for_device(swiotlb_addr, size, dir);
1450 size_t swiotlb_max_mapping_size(struct device *dev)
1452 int min_align_mask = dma_get_min_align_mask(dev);
1456 * swiotlb_find_slots() skips slots according to
1457 * min align mask. This affects max mapping size.
1458 * Take it into acount here.
1461 min_align = roundup(min_align_mask, IO_TLB_SIZE);
1463 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
1467 * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1469 bool is_swiotlb_allocated(void)
1471 return io_tlb_default_mem.nslabs;
1474 bool is_swiotlb_active(struct device *dev)
1476 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1478 return mem && mem->nslabs;
1482 * default_swiotlb_base() - get the base address of the default SWIOTLB
1484 * Get the lowest physical address used by the default software IO TLB pool.
1486 phys_addr_t default_swiotlb_base(void)
1488 #ifdef CONFIG_SWIOTLB_DYNAMIC
1489 io_tlb_default_mem.can_grow = false;
1491 return io_tlb_default_mem.defpool.start;
1495 * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1497 * Get the highest physical address used by the default software IO TLB pool.
1499 phys_addr_t default_swiotlb_limit(void)
1501 #ifdef CONFIG_SWIOTLB_DYNAMIC
1502 return io_tlb_default_mem.phys_limit;
1504 return io_tlb_default_mem.defpool.end - 1;
1508 #ifdef CONFIG_DEBUG_FS
1510 static int io_tlb_used_get(void *data, u64 *val)
1512 struct io_tlb_mem *mem = data;
1514 *val = mem_used(mem);
1518 static int io_tlb_hiwater_get(void *data, u64 *val)
1520 struct io_tlb_mem *mem = data;
1522 *val = atomic_long_read(&mem->used_hiwater);
1526 static int io_tlb_hiwater_set(void *data, u64 val)
1528 struct io_tlb_mem *mem = data;
1530 /* Only allow setting to zero */
1534 atomic_long_set(&mem->used_hiwater, val);
1538 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
1539 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
1540 io_tlb_hiwater_set, "%llu\n");
1542 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1543 const char *dirname)
1545 atomic_long_set(&mem->total_used, 0);
1546 atomic_long_set(&mem->used_hiwater, 0);
1548 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
1552 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
1553 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
1555 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
1556 &fops_io_tlb_hiwater);
1559 static int __init swiotlb_create_default_debugfs(void)
1561 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
1565 late_initcall(swiotlb_create_default_debugfs);
1567 #else /* !CONFIG_DEBUG_FS */
1569 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1570 const char *dirname)
1574 #endif /* CONFIG_DEBUG_FS */
1576 #ifdef CONFIG_DMA_RESTRICTED_POOL
1578 struct page *swiotlb_alloc(struct device *dev, size_t size)
1580 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1581 struct io_tlb_pool *pool;
1582 phys_addr_t tlb_addr;
1588 index = swiotlb_find_slots(dev, 0, size, 0, &pool);
1592 tlb_addr = slot_addr(pool->start, index);
1594 return pfn_to_page(PFN_DOWN(tlb_addr));
1597 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
1599 phys_addr_t tlb_addr = page_to_phys(page);
1601 if (!is_swiotlb_buffer(dev, tlb_addr))
1604 swiotlb_release_slots(dev, tlb_addr);
1609 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1612 struct io_tlb_mem *mem = rmem->priv;
1613 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1615 /* Set Per-device io tlb area to one */
1616 unsigned int nareas = 1;
1618 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1619 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1624 * Since multiple devices can share the same pool, the private data,
1625 * io_tlb_mem struct, will be initialized by the first device attached
1629 struct io_tlb_pool *pool;
1631 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1634 pool = &mem->defpool;
1636 pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
1642 pool->areas = kcalloc(nareas, sizeof(*pool->areas),
1650 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1651 rmem->size >> PAGE_SHIFT);
1652 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1654 mem->force_bounce = true;
1655 mem->for_alloc = true;
1656 #ifdef CONFIG_SWIOTLB_DYNAMIC
1657 spin_lock_init(&mem->lock);
1659 add_mem_pool(mem, pool);
1663 swiotlb_create_debugfs_files(mem, rmem->name);
1666 dev->dma_io_tlb_mem = mem;
1671 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1674 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1677 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1678 .device_init = rmem_swiotlb_device_init,
1679 .device_release = rmem_swiotlb_device_release,
1682 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1684 unsigned long node = rmem->fdt_node;
1686 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1687 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1688 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1689 of_get_flat_dt_prop(node, "no-map", NULL))
1692 rmem->ops = &rmem_swiotlb_ops;
1693 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1694 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1698 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1699 #endif /* CONFIG_DMA_RESTRICTED_POOL */