1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/set_memory.h>
16 #include <linux/swiotlb.h>
19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
20 * some use it for entirely different regions:
22 #ifndef ARCH_ZONE_DMA_BITS
23 #define ARCH_ZONE_DMA_BITS 24
27 * For AMD SEV all DMA must be to unencrypted addresses.
29 static inline bool force_dma_unencrypted(void)
34 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
37 dev_err_once(dev, "DMA map on device without dma_mask\n");
38 } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
40 "overflow %pad+%zu of DMA mask %llx bus mask %llx\n",
41 &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
46 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
49 if (force_dma_unencrypted())
50 return __phys_to_dma(dev, phys);
51 return phys_to_dma(dev, phys);
54 u64 dma_direct_get_required_mask(struct device *dev)
56 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
58 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
59 max_dma = dev->bus_dma_mask;
61 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
64 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
67 if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
68 dma_mask = dev->bus_dma_mask;
70 if (force_dma_unencrypted())
71 *phys_mask = __dma_to_phys(dev, dma_mask);
73 *phys_mask = dma_to_phys(dev, dma_mask);
76 * Optimistically try the zone that the physical address mask falls
77 * into first. If that returns memory that isn't actually addressable
78 * we will fallback to the next lower zone and try again.
80 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
83 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
85 if (*phys_mask <= DMA_BIT_MASK(32))
90 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
92 return phys_to_dma_direct(dev, phys) + size - 1 <=
93 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
96 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
97 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
99 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
100 int page_order = get_order(size);
101 struct page *page = NULL;
104 if (attrs & DMA_ATTR_NO_WARN)
107 /* we always manually zero the memory once we are done: */
109 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
112 /* CMA can be used only in the context which permits sleeping */
113 if (gfpflags_allow_blocking(gfp)) {
114 page = dma_alloc_from_contiguous(dev, count, page_order,
116 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
117 dma_release_from_contiguous(dev, page, count);
122 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
124 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
125 __free_pages(page, page_order);
128 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
129 phys_mask < DMA_BIT_MASK(64) &&
130 !(gfp & (GFP_DMA32 | GFP_DMA))) {
135 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
136 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
144 void *dma_direct_alloc_pages(struct device *dev, size_t size,
145 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
150 page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
154 if (PageHighMem(page)) {
156 * Depending on the cma= arguments and per-arch setup
157 * dma_alloc_from_contiguous could return highmem pages.
158 * Without remapping there is no way to return them here,
159 * so log an error and fail.
161 dev_info(dev, "Rejecting highmem page from CMA.\n");
162 __dma_direct_free_pages(dev, size, page);
166 ret = page_address(page);
167 if (force_dma_unencrypted()) {
168 set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
169 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
171 *dma_handle = phys_to_dma(dev, page_to_phys(page));
173 memset(ret, 0, size);
177 void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
179 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
181 if (!dma_release_from_contiguous(dev, page, count))
182 __free_pages(page, get_order(size));
185 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
186 dma_addr_t dma_addr, unsigned long attrs)
188 unsigned int page_order = get_order(size);
190 if (force_dma_unencrypted())
191 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
192 __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
195 void *dma_direct_alloc(struct device *dev, size_t size,
196 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
198 if (!dev_is_dma_coherent(dev))
199 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
200 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
203 void dma_direct_free(struct device *dev, size_t size,
204 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
206 if (!dev_is_dma_coherent(dev))
207 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
209 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
212 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
213 defined(CONFIG_SWIOTLB)
214 void dma_direct_sync_single_for_device(struct device *dev,
215 dma_addr_t addr, size_t size, enum dma_data_direction dir)
217 phys_addr_t paddr = dma_to_phys(dev, addr);
219 if (unlikely(is_swiotlb_buffer(paddr)))
220 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
222 if (!dev_is_dma_coherent(dev))
223 arch_sync_dma_for_device(dev, paddr, size, dir);
225 EXPORT_SYMBOL(dma_direct_sync_single_for_device);
227 void dma_direct_sync_sg_for_device(struct device *dev,
228 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
230 struct scatterlist *sg;
233 for_each_sg(sgl, sg, nents, i) {
234 if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
235 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
236 dir, SYNC_FOR_DEVICE);
238 if (!dev_is_dma_coherent(dev))
239 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
243 EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
246 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
247 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
248 defined(CONFIG_SWIOTLB)
249 void dma_direct_sync_single_for_cpu(struct device *dev,
250 dma_addr_t addr, size_t size, enum dma_data_direction dir)
252 phys_addr_t paddr = dma_to_phys(dev, addr);
254 if (!dev_is_dma_coherent(dev)) {
255 arch_sync_dma_for_cpu(dev, paddr, size, dir);
256 arch_sync_dma_for_cpu_all(dev);
259 if (unlikely(is_swiotlb_buffer(paddr)))
260 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
262 EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
264 void dma_direct_sync_sg_for_cpu(struct device *dev,
265 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
267 struct scatterlist *sg;
270 for_each_sg(sgl, sg, nents, i) {
271 if (!dev_is_dma_coherent(dev))
272 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
274 if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
275 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, dir,
279 if (!dev_is_dma_coherent(dev))
280 arch_sync_dma_for_cpu_all(dev);
282 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
284 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
285 size_t size, enum dma_data_direction dir, unsigned long attrs)
287 phys_addr_t phys = dma_to_phys(dev, addr);
289 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
290 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
292 if (unlikely(is_swiotlb_buffer(phys)))
293 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
295 EXPORT_SYMBOL(dma_direct_unmap_page);
297 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
298 int nents, enum dma_data_direction dir, unsigned long attrs)
300 struct scatterlist *sg;
303 for_each_sg(sgl, sg, nents, i)
304 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
307 EXPORT_SYMBOL(dma_direct_unmap_sg);
310 static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
313 return swiotlb_force != SWIOTLB_FORCE &&
314 dma_capable(dev, dma_addr, size);
317 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
318 unsigned long offset, size_t size, enum dma_data_direction dir,
321 phys_addr_t phys = page_to_phys(page) + offset;
322 dma_addr_t dma_addr = phys_to_dma(dev, phys);
324 if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
325 !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
326 report_addr(dev, dma_addr, size);
327 return DMA_MAPPING_ERROR;
330 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
331 arch_sync_dma_for_device(dev, phys, size, dir);
334 EXPORT_SYMBOL(dma_direct_map_page);
336 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
337 enum dma_data_direction dir, unsigned long attrs)
340 struct scatterlist *sg;
342 for_each_sg(sgl, sg, nents, i) {
343 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
344 sg->offset, sg->length, dir, attrs);
345 if (sg->dma_address == DMA_MAPPING_ERROR)
347 sg_dma_len(sg) = sg->length;
353 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
356 EXPORT_SYMBOL(dma_direct_map_sg);
358 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
359 size_t size, enum dma_data_direction dir, unsigned long attrs)
361 dma_addr_t dma_addr = paddr;
363 if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
364 report_addr(dev, dma_addr, size);
365 return DMA_MAPPING_ERROR;
370 EXPORT_SYMBOL(dma_direct_map_resource);
373 * Because 32-bit DMA masks are so common we expect every architecture to be
374 * able to satisfy them - either by not supporting more physical memory, or by
375 * providing a ZONE_DMA32. If neither is the case, the architecture needs to
376 * use an IOMMU instead of the direct mapping.
378 int dma_direct_supported(struct device *dev, u64 mask)
382 if (IS_ENABLED(CONFIG_ZONE_DMA))
383 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
385 min_mask = DMA_BIT_MASK(32);
387 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
390 * This check needs to be against the actual bit mask value, so
391 * use __phys_to_dma() here so that the SME encryption mask isn't
394 return mask >= __phys_to_dma(dev, min_mask);
397 size_t dma_direct_max_mapping_size(struct device *dev)
399 size_t size = SIZE_MAX;
401 /* If SWIOTLB is active, use its maximum mapping size */
402 if (is_swiotlb_active())
403 size = swiotlb_max_mapping_size(dev);