Merge tag 'amdtee-fix-for-v6.6' of https://git.linaro.org/people/jens.wiklander/linux...
[platform/kernel/linux-starfive.git] / arch / riscv / mm / dma-noncoherent.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * RISC-V specific functions to support DMA for non-coherent devices
4  *
5  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6  */
7
8 #include <linux/dma-direct.h>
9 #include <linux/dma-map-ops.h>
10 #include <linux/mm.h>
11 #include <asm/cacheflush.h>
12 #include <asm/dma-noncoherent.h>
13
14 static bool noncoherent_supported __ro_after_init;
15 int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
16 EXPORT_SYMBOL_GPL(dma_cache_alignment);
17
18 struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
19         .wback = NULL,
20         .inv = NULL,
21         .wback_inv = NULL,
22 };
23
24 static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
25 {
26         void *vaddr = phys_to_virt(paddr);
27
28 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
29         if (unlikely(noncoherent_cache_ops.wback)) {
30                 noncoherent_cache_ops.wback(paddr, size);
31                 return;
32         }
33 #endif
34         ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
35 }
36
37 static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
38 {
39         void *vaddr = phys_to_virt(paddr);
40
41 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
42         if (unlikely(noncoherent_cache_ops.inv)) {
43                 noncoherent_cache_ops.inv(paddr, size);
44                 return;
45         }
46 #endif
47
48         ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
49 }
50
51 static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
52 {
53         void *vaddr = phys_to_virt(paddr);
54
55 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
56         if (unlikely(noncoherent_cache_ops.wback_inv)) {
57                 noncoherent_cache_ops.wback_inv(paddr, size);
58                 return;
59         }
60 #endif
61
62         ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
63 }
64
65 static inline bool arch_sync_dma_clean_before_fromdevice(void)
66 {
67         return true;
68 }
69
70 static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
71 {
72         return true;
73 }
74
75 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
76                               enum dma_data_direction dir)
77 {
78         switch (dir) {
79         case DMA_TO_DEVICE:
80                 arch_dma_cache_wback(paddr, size);
81                 break;
82
83         case DMA_FROM_DEVICE:
84                 if (!arch_sync_dma_clean_before_fromdevice()) {
85                         arch_dma_cache_inv(paddr, size);
86                         break;
87                 }
88                 fallthrough;
89
90         case DMA_BIDIRECTIONAL:
91                 /* Skip the invalidate here if it's done later */
92                 if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
93                     arch_sync_dma_cpu_needs_post_dma_flush())
94                         arch_dma_cache_wback(paddr, size);
95                 else
96                         arch_dma_cache_wback_inv(paddr, size);
97                 break;
98
99         default:
100                 break;
101         }
102 }
103
104 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
105                            enum dma_data_direction dir)
106 {
107         switch (dir) {
108         case DMA_TO_DEVICE:
109                 break;
110
111         case DMA_FROM_DEVICE:
112         case DMA_BIDIRECTIONAL:
113                 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
114                 if (arch_sync_dma_cpu_needs_post_dma_flush())
115                         arch_dma_cache_inv(paddr, size);
116                 break;
117
118         default:
119                 break;
120         }
121 }
122
123 void arch_dma_prep_coherent(struct page *page, size_t size)
124 {
125         void *flush_addr = page_address(page);
126
127 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
128         if (unlikely(noncoherent_cache_ops.wback_inv)) {
129                 noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
130                 return;
131         }
132 #endif
133
134         ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
135 }
136
137 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
138                 const struct iommu_ops *iommu, bool coherent)
139 {
140         WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
141                    TAINT_CPU_OUT_OF_SPEC,
142                    "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
143                    dev_driver_string(dev), dev_name(dev),
144                    ARCH_DMA_MINALIGN, riscv_cbom_block_size);
145
146         WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC,
147                    "%s %s: device non-coherent but no non-coherent operations supported",
148                    dev_driver_string(dev), dev_name(dev));
149
150         dev->dma_coherent = coherent;
151 }
152
153 void riscv_noncoherent_supported(void)
154 {
155         WARN(!riscv_cbom_block_size,
156              "Non-coherent DMA support enabled without a block size\n");
157         noncoherent_supported = true;
158 }
159
160 void __init riscv_set_dma_cache_alignment(void)
161 {
162         if (!noncoherent_supported)
163                 dma_cache_alignment = 1;
164 }
165
166 void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
167 {
168         if (!ops)
169                 return;
170
171         noncoherent_cache_ops = *ops;
172 }
173 EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);