059932ab590710379bc707047b5a81ffc4c6a56b
[platform/kernel/linux-starfive.git] / drivers / staging / kpc2000 / kpc_dma / dma.c
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/types.h>
5 #include <linux/io.h>
6 #include <linux/export.h>
7 #include <linux/slab.h>
8 #include <linux/platform_device.h>
9 #include <linux/fs.h>
10 #include <linux/rwsem.h>
11 #include "kpc_dma_driver.h"
12
13 /**********  IRQ Handlers  **********/
14 static
15 irqreturn_t  ndd_irq_handler(int irq, void *dev_id)
16 {
17         struct kpc_dma_device *ldev = (struct kpc_dma_device *)dev_id;
18
19         if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) || (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
20                 schedule_work(&ldev->irq_work);
21
22         return IRQ_HANDLED;
23 }
24
25 static
26 void  ndd_irq_worker(struct work_struct *ws)
27 {
28         struct kpc_dma_descriptor *cur;
29         struct kpc_dma_device *eng = container_of(ws, struct kpc_dma_device, irq_work);
30
31         lock_engine(eng);
32
33         if (GetEngineCompletePtr(eng) == 0)
34                 goto out;
35
36         if (eng->desc_completed->MyDMAAddr == GetEngineCompletePtr(eng))
37                 goto out;
38
39         cur = eng->desc_completed;
40         do {
41                 cur = cur->Next;
42                 dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n", cur, cur->acd);
43                 BUG_ON(cur == eng->desc_next); // Ordering failure.
44
45                 if (cur->DescControlFlags & DMA_DESC_CTL_SOP) {
46                         eng->accumulated_bytes = 0;
47                         eng->accumulated_flags = 0;
48                 }
49
50                 eng->accumulated_bytes += cur->DescByteCount;
51                 if (cur->DescStatusFlags & DMA_DESC_STS_ERROR)
52                         eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_ERROR;
53
54                 if (cur->DescStatusFlags & DMA_DESC_STS_SHORT)
55                         eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_SHORT;
56
57                 if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
58                         if (cur->acd)
59                                 transfer_complete_cb(cur->acd, eng->accumulated_bytes, eng->accumulated_flags | ACD_FLAG_DONE);
60                 }
61
62                 eng->desc_completed = cur;
63         } while (cur->MyDMAAddr != GetEngineCompletePtr(eng));
64
65  out:
66         SetClearEngineControl(eng, ENG_CTL_IRQ_ACTIVE, 0);
67
68         unlock_engine(eng);
69 }
70
71 /**********  DMA Engine Init/Teardown  **********/
72 void  start_dma_engine(struct kpc_dma_device *eng)
73 {
74         eng->desc_next       = eng->desc_pool_first;
75         eng->desc_completed  = eng->desc_pool_last;
76
77         // Setup the engine pointer registers
78         SetEngineNextPtr(eng, eng->desc_pool_first);
79         SetEngineSWPtr(eng, eng->desc_pool_first);
80         ClearEngineCompletePtr(eng);
81
82         WriteEngineControl(eng, ENG_CTL_DMA_ENABLE | ENG_CTL_IRQ_ENABLE);
83 }
84
85 int  setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
86 {
87         u32 caps;
88         struct kpc_dma_descriptor *cur;
89         struct kpc_dma_descriptor *next;
90         dma_addr_t next_handle;
91         dma_addr_t head_handle;
92         unsigned int i;
93         int rv;
94
95         dev_dbg(&eng->pldev->dev, "Setting up DMA engine [%p]\n", eng);
96
97         caps = GetEngineCapabilities(eng);
98
99         if (WARN(!(caps & ENG_CAP_PRESENT), "%s() called for DMA Engine at %p which isn't present in hardware!\n", __func__, eng))
100                 return -ENXIO;
101
102         if (caps & ENG_CAP_DIRECTION) {
103                 eng->dir = DMA_FROM_DEVICE;
104         } else {
105                 eng->dir = DMA_TO_DEVICE;
106         }
107
108         eng->desc_pool_cnt = desc_cnt;
109         eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096);
110
111         eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle);
112         if (!eng->desc_pool_first) {
113                 dev_err(&eng->pldev->dev, "%s: couldn't allocate desc_pool_first!\n", __func__);
114                 dma_pool_destroy(eng->desc_pool);
115                 return -ENOMEM;
116         }
117
118         eng->desc_pool_first->MyDMAAddr = head_handle;
119         clear_desc(eng->desc_pool_first);
120
121         cur = eng->desc_pool_first;
122         for (i = 1 ; i < eng->desc_pool_cnt ; i++) {
123                 next = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &next_handle);
124                 if (next == NULL)
125                         goto done_alloc;
126
127                 clear_desc(next);
128                 next->MyDMAAddr = next_handle;
129
130                 cur->DescNextDescPtr = next_handle;
131                 cur->Next = next;
132                 cur = next;
133         }
134
135  done_alloc:
136         // Link the last descriptor back to the first, so it's a circular linked list
137         cur->Next = eng->desc_pool_first;
138         cur->DescNextDescPtr = eng->desc_pool_first->MyDMAAddr;
139
140         eng->desc_pool_last = cur;
141         eng->desc_completed = eng->desc_pool_last;
142
143         // Setup work queue
144         INIT_WORK(&eng->irq_work, ndd_irq_worker);
145
146         // Grab IRQ line
147         rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED, KP_DRIVER_NAME_DMA_CONTROLLER, eng);
148         if (rv) {
149                 dev_err(&eng->pldev->dev, "%s: failed to request_irq: %d\n", __func__, rv);
150                 return rv;
151         }
152
153         // Turn on the engine!
154         start_dma_engine(eng);
155         unlock_engine(eng);
156
157         return 0;
158 }
159
160 void  stop_dma_engine(struct kpc_dma_device *eng)
161 {
162         unsigned long timeout;
163
164         dev_dbg(&eng->pldev->dev, "Destroying DMA engine [%p]\n", eng);
165
166         // Disable the descriptor engine
167         WriteEngineControl(eng, 0);
168
169         // Wait for descriptor engine to finish current operaion
170         timeout = jiffies + (HZ / 2);
171         while (GetEngineControl(eng) & ENG_CTL_DMA_RUNNING) {
172                 if (time_after(jiffies, timeout)) {
173                         dev_crit(&eng->pldev->dev, "DMA_RUNNING still asserted!\n");
174                         break;
175                 }
176         }
177
178         // Request a reset
179         WriteEngineControl(eng, ENG_CTL_DMA_RESET_REQUEST);
180
181         // Wait for reset request to be processed
182         timeout = jiffies + (HZ / 2);
183         while (GetEngineControl(eng) & (ENG_CTL_DMA_RUNNING | ENG_CTL_DMA_RESET_REQUEST)) {
184                 if (time_after(jiffies, timeout)) {
185                         dev_crit(&eng->pldev->dev, "ENG_CTL_DMA_RESET_REQUEST still asserted!\n");
186                         break;
187                 }
188         }
189
190         // Request a reset
191         WriteEngineControl(eng, ENG_CTL_DMA_RESET);
192
193         // And wait for reset to complete
194         timeout = jiffies + (HZ / 2);
195         while (GetEngineControl(eng) & ENG_CTL_DMA_RESET) {
196                 if (time_after(jiffies, timeout)) {
197                         dev_crit(&eng->pldev->dev, "DMA_RESET still asserted!\n");
198                         break;
199                 }
200         }
201
202         // Clear any persistent bits just to make sure there is no residue from the reset
203         SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE | ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR | ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END | ENG_CTL_DMA_WAITING_PERSIST), 0);
204
205         // Reset performance counters
206
207         // Completely disable the engine
208         WriteEngineControl(eng, 0);
209 }
210
211 void  destroy_dma_engine(struct kpc_dma_device *eng)
212 {
213         struct kpc_dma_descriptor *cur;
214         dma_addr_t cur_handle;
215         unsigned int i;
216
217         stop_dma_engine(eng);
218
219         cur = eng->desc_pool_first;
220         cur_handle = eng->desc_pool_first->MyDMAAddr;
221
222         for (i = 0 ; i < eng->desc_pool_cnt ; i++) {
223                 struct kpc_dma_descriptor *next = cur->Next;
224                 dma_addr_t next_handle = cur->DescNextDescPtr;
225
226                 dma_pool_free(eng->desc_pool, cur, cur_handle);
227                 cur_handle = next_handle;
228                 cur = next;
229         }
230
231         dma_pool_destroy(eng->desc_pool);
232
233         free_irq(eng->irq, eng);
234 }
235
236 /**********  Helper Functions  **********/
237 int  count_descriptors_available(struct kpc_dma_device *eng)
238 {
239         u32 count = 0;
240         struct kpc_dma_descriptor *cur = eng->desc_next;
241
242         while (cur != eng->desc_completed) {
243                 BUG_ON(cur == NULL);
244                 count++;
245                 cur = cur->Next;
246         }
247         return count;
248 }
249
250 void  clear_desc(struct kpc_dma_descriptor *desc)
251 {
252         if (desc == NULL)
253                 return;
254         desc->DescByteCount         = 0;
255         desc->DescStatusErrorFlags  = 0;
256         desc->DescStatusFlags       = 0;
257         desc->DescUserControlLS     = 0;
258         desc->DescUserControlMS     = 0;
259         desc->DescCardAddrLS        = 0;
260         desc->DescBufferByteCount   = 0;
261         desc->DescCardAddrMS        = 0;
262         desc->DescControlFlags      = 0;
263         desc->DescSystemAddrLS      = 0;
264         desc->DescSystemAddrMS      = 0;
265         desc->acd = NULL;
266 }