1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SWIOTLB_H
3 #define __LINUX_SWIOTLB_H
5 #include <linux/dma-direction.h>
6 #include <linux/init.h>
7 #include <linux/types.h>
8 #include <linux/limits.h>
9 #include <linux/spinlock.h>
16 SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
17 SWIOTLB_FORCE, /* swiotlb=force */
18 SWIOTLB_NO_FORCE, /* swiotlb=noforce */
22 * Maximum allowable number of contiguous slabs to map,
23 * must be a power of 2. What is the appropriate value ?
24 * The complexity of {map,unmap}_single is linearly dependent on this value.
26 #define IO_TLB_SEGSIZE 128
29 * log of the size of each IO TLB slab. The number of slabs is command line
32 #define IO_TLB_SHIFT 11
33 #define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
36 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
38 extern void swiotlb_init(int verbose);
39 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
40 unsigned long swiotlb_size_or_default(void);
41 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
42 extern int swiotlb_late_init_with_default_size(size_t default_size);
43 extern void __init swiotlb_update_mem_attributes(void);
45 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
46 size_t mapping_size, size_t alloc_size,
47 enum dma_data_direction dir, unsigned long attrs);
49 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
52 enum dma_data_direction dir,
55 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
56 size_t size, enum dma_data_direction dir);
57 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
58 size_t size, enum dma_data_direction dir);
59 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
60 size_t size, enum dma_data_direction dir, unsigned long attrs);
63 extern enum swiotlb_force swiotlb_force;
66 * struct io_tlb_mem - IO TLB Memory Pool Descriptor
68 * @start: The start address of the swiotlb memory pool. Used to do a quick
69 * range check to see if the memory was in fact allocated by this
71 * @end: The end address of the swiotlb memory pool. Used to do a quick
72 * range check to see if the memory was in fact allocated by this
74 * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
75 * @end. This is command line adjustable via setup_io_tlb_npages.
76 * @used: The number of used IO TLB block.
77 * @list: The free list describing the number of free entries available
79 * @index: The index to start searching in the next round.
80 * @orig_addr: The original address corresponding to a mapped entry.
81 * @alloc_size: Size of the allocated buffer.
82 * @lock: The lock to protect the above data structures in the map and
84 * @debugfs: The dentry to debugfs.
85 * @late_alloc: %true if allocated using the page allocator
94 struct dentry *debugfs;
97 phys_addr_t orig_addr;
102 extern struct io_tlb_mem *io_tlb_default_mem;
104 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
106 struct io_tlb_mem *mem = io_tlb_default_mem;
108 return mem && paddr >= mem->start && paddr < mem->end;
111 void __init swiotlb_exit(void);
112 unsigned int swiotlb_max_segment(void);
113 size_t swiotlb_max_mapping_size(struct device *dev);
114 bool is_swiotlb_active(void);
115 void __init swiotlb_adjust_size(unsigned long size);
117 #define swiotlb_force SWIOTLB_NO_FORCE
118 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
122 static inline void swiotlb_exit(void)
125 static inline unsigned int swiotlb_max_segment(void)
129 static inline size_t swiotlb_max_mapping_size(struct device *dev)
134 static inline bool is_swiotlb_active(void)
139 static inline void swiotlb_adjust_size(unsigned long size)
142 #endif /* CONFIG_SWIOTLB */
144 extern void swiotlb_print_info(void);
145 extern void swiotlb_set_max_segment(unsigned int);
147 #endif /* __LINUX_SWIOTLB_H */