Merge tag 'drm-misc-next-fixes-2023-09-01' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / dma / mcf-edma.c
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
5
6 #include <linux/module.h>
7 #include <linux/interrupt.h>
8 #include <linux/dmaengine.h>
9 #include <linux/platform_device.h>
10 #include <linux/platform_data/dma-mcf-edma.h>
11
12 #include "fsl-edma-common.h"
13
14 #define EDMA_CHANNELS           64
15 #define EDMA_MASK_CH(x)         ((x) & GENMASK(5, 0))
16
17 static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
18 {
19         struct fsl_edma_engine *mcf_edma = dev_id;
20         struct edma_regs *regs = &mcf_edma->regs;
21         unsigned int ch;
22         struct fsl_edma_chan *mcf_chan;
23         u64 intmap;
24
25         intmap = ioread32(regs->inth);
26         intmap <<= 32;
27         intmap |= ioread32(regs->intl);
28         if (!intmap)
29                 return IRQ_NONE;
30
31         for (ch = 0; ch < mcf_edma->n_chans; ch++) {
32                 if (intmap & BIT(ch)) {
33                         iowrite8(EDMA_MASK_CH(ch), regs->cint);
34
35                         mcf_chan = &mcf_edma->chans[ch];
36
37                         spin_lock(&mcf_chan->vchan.lock);
38
39                         if (!mcf_chan->edesc) {
40                                 /* terminate_all called before */
41                                 spin_unlock(&mcf_chan->vchan.lock);
42                                 continue;
43                         }
44
45                         if (!mcf_chan->edesc->iscyclic) {
46                                 list_del(&mcf_chan->edesc->vdesc.node);
47                                 vchan_cookie_complete(&mcf_chan->edesc->vdesc);
48                                 mcf_chan->edesc = NULL;
49                                 mcf_chan->status = DMA_COMPLETE;
50                                 mcf_chan->idle = true;
51                         } else {
52                                 vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
53                         }
54
55                         if (!mcf_chan->edesc)
56                                 fsl_edma_xfer_desc(mcf_chan);
57
58                         spin_unlock(&mcf_chan->vchan.lock);
59                 }
60         }
61
62         return IRQ_HANDLED;
63 }
64
65 static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
66 {
67         struct fsl_edma_engine *mcf_edma = dev_id;
68         struct edma_regs *regs = &mcf_edma->regs;
69         unsigned int err, ch;
70
71         err = ioread32(regs->errl);
72         if (!err)
73                 return IRQ_NONE;
74
75         for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
76                 if (err & BIT(ch)) {
77                         fsl_edma_disable_request(&mcf_edma->chans[ch]);
78                         iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
79                         mcf_edma->chans[ch].status = DMA_ERROR;
80                         mcf_edma->chans[ch].idle = true;
81                 }
82         }
83
84         err = ioread32(regs->errh);
85         if (!err)
86                 return IRQ_NONE;
87
88         for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
89                 if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
90                         fsl_edma_disable_request(&mcf_edma->chans[ch]);
91                         iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
92                         mcf_edma->chans[ch].status = DMA_ERROR;
93                         mcf_edma->chans[ch].idle = true;
94                 }
95         }
96
97         return IRQ_HANDLED;
98 }
99
100 static int mcf_edma_irq_init(struct platform_device *pdev,
101                                 struct fsl_edma_engine *mcf_edma)
102 {
103         int ret = 0, i;
104         struct resource *res;
105
106         res = platform_get_resource_byname(pdev,
107                                 IORESOURCE_IRQ, "edma-tx-00-15");
108         if (!res)
109                 return -1;
110
111         for (ret = 0, i = res->start; i <= res->end; ++i)
112                 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
113         if (ret)
114                 return ret;
115
116         res = platform_get_resource_byname(pdev,
117                         IORESOURCE_IRQ, "edma-tx-16-55");
118         if (!res)
119                 return -1;
120
121         for (ret = 0, i = res->start; i <= res->end; ++i)
122                 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
123         if (ret)
124                 return ret;
125
126         ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
127         if (ret != -ENXIO) {
128                 ret = request_irq(ret, mcf_edma_tx_handler,
129                                   0, "eDMA", mcf_edma);
130                 if (ret)
131                         return ret;
132         }
133
134         ret = platform_get_irq_byname(pdev, "edma-err");
135         if (ret != -ENXIO) {
136                 ret = request_irq(ret, mcf_edma_err_handler,
137                                   0, "eDMA", mcf_edma);
138                 if (ret)
139                         return ret;
140         }
141
142         return 0;
143 }
144
145 static void mcf_edma_irq_free(struct platform_device *pdev,
146                                 struct fsl_edma_engine *mcf_edma)
147 {
148         int irq;
149         struct resource *res;
150
151         res = platform_get_resource_byname(pdev,
152                         IORESOURCE_IRQ, "edma-tx-00-15");
153         if (res) {
154                 for (irq = res->start; irq <= res->end; irq++)
155                         free_irq(irq, mcf_edma);
156         }
157
158         res = platform_get_resource_byname(pdev,
159                         IORESOURCE_IRQ, "edma-tx-16-55");
160         if (res) {
161                 for (irq = res->start; irq <= res->end; irq++)
162                         free_irq(irq, mcf_edma);
163         }
164
165         irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
166         if (irq != -ENXIO)
167                 free_irq(irq, mcf_edma);
168
169         irq = platform_get_irq_byname(pdev, "edma-err");
170         if (irq != -ENXIO)
171                 free_irq(irq, mcf_edma);
172 }
173
174 static struct fsl_edma_drvdata mcf_data = {
175         .version = v2,
176         .setup_irq = mcf_edma_irq_init,
177 };
178
179 static int mcf_edma_probe(struct platform_device *pdev)
180 {
181         struct mcf_edma_platform_data *pdata;
182         struct fsl_edma_engine *mcf_edma;
183         struct fsl_edma_chan *mcf_chan;
184         struct edma_regs *regs;
185         int ret, i, len, chans;
186
187         pdata = dev_get_platdata(&pdev->dev);
188         if (!pdata) {
189                 dev_err(&pdev->dev, "no platform data supplied\n");
190                 return -EINVAL;
191         }
192
193         if (!pdata->dma_channels) {
194                 dev_info(&pdev->dev, "setting default channel number to 64");
195                 chans = 64;
196         } else {
197                 chans = pdata->dma_channels;
198         }
199
200         len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
201         mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
202         if (!mcf_edma)
203                 return -ENOMEM;
204
205         mcf_edma->n_chans = chans;
206
207         /* Set up drvdata for ColdFire edma */
208         mcf_edma->drvdata = &mcf_data;
209         mcf_edma->big_endian = 1;
210
211         mutex_init(&mcf_edma->fsl_edma_mutex);
212
213         mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
214         if (IS_ERR(mcf_edma->membase))
215                 return PTR_ERR(mcf_edma->membase);
216
217         fsl_edma_setup_regs(mcf_edma);
218         regs = &mcf_edma->regs;
219
220         INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
221         for (i = 0; i < mcf_edma->n_chans; i++) {
222                 struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
223
224                 mcf_chan->edma = mcf_edma;
225                 mcf_chan->slave_id = i;
226                 mcf_chan->idle = true;
227                 mcf_chan->dma_dir = DMA_NONE;
228                 mcf_chan->vchan.desc_free = fsl_edma_free_desc;
229                 vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
230                 iowrite32(0x0, &regs->tcd[i].csr);
231         }
232
233         iowrite32(~0, regs->inth);
234         iowrite32(~0, regs->intl);
235
236         ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
237         if (ret)
238                 return ret;
239
240         dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
241         dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
242         dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
243
244         mcf_edma->dma_dev.dev = &pdev->dev;
245         mcf_edma->dma_dev.device_alloc_chan_resources =
246                         fsl_edma_alloc_chan_resources;
247         mcf_edma->dma_dev.device_free_chan_resources =
248                         fsl_edma_free_chan_resources;
249         mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
250         mcf_edma->dma_dev.device_prep_dma_cyclic =
251                         fsl_edma_prep_dma_cyclic;
252         mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
253         mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
254         mcf_edma->dma_dev.device_pause = fsl_edma_pause;
255         mcf_edma->dma_dev.device_resume = fsl_edma_resume;
256         mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
257         mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
258
259         mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
260         mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
261         mcf_edma->dma_dev.directions =
262                         BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
263
264         mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
265         mcf_edma->dma_dev.filter.map = pdata->slave_map;
266         mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
267
268         platform_set_drvdata(pdev, mcf_edma);
269
270         ret = dma_async_device_register(&mcf_edma->dma_dev);
271         if (ret) {
272                 dev_err(&pdev->dev,
273                         "Can't register Freescale eDMA engine. (%d)\n", ret);
274                 return ret;
275         }
276
277         /* Enable round robin arbitration */
278         iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
279
280         return 0;
281 }
282
283 static int mcf_edma_remove(struct platform_device *pdev)
284 {
285         struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
286
287         mcf_edma_irq_free(pdev, mcf_edma);
288         fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
289         dma_async_device_unregister(&mcf_edma->dma_dev);
290
291         return 0;
292 }
293
294 static struct platform_driver mcf_edma_driver = {
295         .driver         = {
296                 .name   = "mcf-edma",
297         },
298         .probe          = mcf_edma_probe,
299         .remove         = mcf_edma_remove,
300 };
301
302 bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
303 {
304         if (chan->device->dev->driver == &mcf_edma_driver.driver) {
305                 struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
306
307                 return (mcf_chan->slave_id == (uintptr_t)param);
308         }
309
310         return false;
311 }
312 EXPORT_SYMBOL(mcf_edma_filter_fn);
313
314 static int __init mcf_edma_init(void)
315 {
316         return platform_driver_register(&mcf_edma_driver);
317 }
318 subsys_initcall(mcf_edma_init);
319
320 static void __exit mcf_edma_exit(void)
321 {
322         platform_driver_unregister(&mcf_edma_driver);
323 }
324 module_exit(mcf_edma_exit);
325
326 MODULE_ALIAS("platform:mcf-edma");
327 MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
328 MODULE_LICENSE("GPL v2");