Merge tag 'input-for-v6.0-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor...
[platform/kernel/linux-starfive.git] / kernel / dma / direct.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #ifndef _KERNEL_DMA_DIRECT_H
8 #define _KERNEL_DMA_DIRECT_H
9
10 #include <linux/dma-direct.h>
11 #include <linux/memremap.h>
12
13 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
14                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
15                 unsigned long attrs);
16 bool dma_direct_can_mmap(struct device *dev);
17 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
18                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
19                 unsigned long attrs);
20 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
21 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
22                 enum dma_data_direction dir, unsigned long attrs);
23 size_t dma_direct_max_mapping_size(struct device *dev);
24
25 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
26     defined(CONFIG_SWIOTLB)
27 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
28                 int nents, enum dma_data_direction dir);
29 #else
30 static inline void dma_direct_sync_sg_for_device(struct device *dev,
31                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
32 {
33 }
34 #endif
35
36 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
37     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
38     defined(CONFIG_SWIOTLB)
39 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
40                 int nents, enum dma_data_direction dir, unsigned long attrs);
41 void dma_direct_sync_sg_for_cpu(struct device *dev,
42                 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
43 #else
44 static inline void dma_direct_unmap_sg(struct device *dev,
45                 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
46                 unsigned long attrs)
47 {
48 }
49 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
50                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
51 {
52 }
53 #endif
54
55 static inline void dma_direct_sync_single_for_device(struct device *dev,
56                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
57 {
58         phys_addr_t paddr = dma_to_phys(dev, addr);
59
60         if (unlikely(is_swiotlb_buffer(dev, paddr)))
61                 swiotlb_sync_single_for_device(dev, paddr, size, dir);
62
63         if (!dev_is_dma_coherent(dev))
64                 arch_sync_dma_for_device(paddr, size, dir);
65 }
66
67 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
68                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
69 {
70         phys_addr_t paddr = dma_to_phys(dev, addr);
71
72         if (!dev_is_dma_coherent(dev)) {
73                 arch_sync_dma_for_cpu(paddr, size, dir);
74                 arch_sync_dma_for_cpu_all();
75         }
76
77         if (unlikely(is_swiotlb_buffer(dev, paddr)))
78                 swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
79
80         if (dir == DMA_FROM_DEVICE)
81                 arch_dma_mark_clean(paddr, size);
82 }
83
84 static inline dma_addr_t dma_direct_map_page(struct device *dev,
85                 struct page *page, unsigned long offset, size_t size,
86                 enum dma_data_direction dir, unsigned long attrs)
87 {
88         phys_addr_t phys = page_to_phys(page) + offset;
89         dma_addr_t dma_addr = phys_to_dma(dev, phys);
90
91         if (is_swiotlb_force_bounce(dev)) {
92                 if (is_pci_p2pdma_page(page))
93                         return DMA_MAPPING_ERROR;
94                 return swiotlb_map(dev, phys, size, dir, attrs);
95         }
96
97         if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
98                 if (is_pci_p2pdma_page(page))
99                         return DMA_MAPPING_ERROR;
100                 if (is_swiotlb_active(dev))
101                         return swiotlb_map(dev, phys, size, dir, attrs);
102
103                 dev_WARN_ONCE(dev, 1,
104                              "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
105                              &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
106                 return DMA_MAPPING_ERROR;
107         }
108
109         if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
110                 arch_sync_dma_for_device(phys, size, dir);
111         return dma_addr;
112 }
113
114 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
115                 size_t size, enum dma_data_direction dir, unsigned long attrs)
116 {
117         phys_addr_t phys = dma_to_phys(dev, addr);
118
119         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
120                 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
121
122         if (unlikely(is_swiotlb_buffer(dev, phys)))
123                 swiotlb_tbl_unmap_single(dev, phys, size, dir,
124                                          attrs | DMA_ATTR_SKIP_CPU_SYNC);
125 }
126 #endif /* _KERNEL_DMA_DIRECT_H */