drm/vc4: Drop planes that have 0 destination size
[platform/kernel/linux-rpi.git] / drivers / dma / mcf-edma-main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
5
6 #include <linux/module.h>
7 #include <linux/interrupt.h>
8 #include <linux/dmaengine.h>
9 #include <linux/platform_device.h>
10 #include <linux/platform_data/dma-mcf-edma.h>
11
12 #include "fsl-edma-common.h"
13
14 #define EDMA_CHANNELS           64
15 #define EDMA_MASK_CH(x)         ((x) & GENMASK(5, 0))
16
17 static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
18 {
19         struct fsl_edma_engine *mcf_edma = dev_id;
20         struct edma_regs *regs = &mcf_edma->regs;
21         unsigned int ch;
22         u64 intmap;
23
24         intmap = ioread32(regs->inth);
25         intmap <<= 32;
26         intmap |= ioread32(regs->intl);
27         if (!intmap)
28                 return IRQ_NONE;
29
30         for (ch = 0; ch < mcf_edma->n_chans; ch++) {
31                 if (intmap & BIT(ch)) {
32                         iowrite8(EDMA_MASK_CH(ch), regs->cint);
33                         fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
34                 }
35         }
36
37         return IRQ_HANDLED;
38 }
39
40 static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
41 {
42         struct fsl_edma_engine *mcf_edma = dev_id;
43         struct edma_regs *regs = &mcf_edma->regs;
44         unsigned int err, ch;
45
46         err = ioread32(regs->errl);
47         if (!err)
48                 return IRQ_NONE;
49
50         for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
51                 if (err & BIT(ch)) {
52                         fsl_edma_disable_request(&mcf_edma->chans[ch]);
53                         iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
54                         fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
55                 }
56         }
57
58         err = ioread32(regs->errh);
59         if (!err)
60                 return IRQ_NONE;
61
62         for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
63                 if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
64                         fsl_edma_disable_request(&mcf_edma->chans[ch]);
65                         iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
66                         mcf_edma->chans[ch].status = DMA_ERROR;
67                         mcf_edma->chans[ch].idle = true;
68                 }
69         }
70
71         return IRQ_HANDLED;
72 }
73
74 static int mcf_edma_irq_init(struct platform_device *pdev,
75                                 struct fsl_edma_engine *mcf_edma)
76 {
77         int ret = 0, i;
78         struct resource *res;
79
80         res = platform_get_resource_byname(pdev,
81                                 IORESOURCE_IRQ, "edma-tx-00-15");
82         if (!res)
83                 return -1;
84
85         for (ret = 0, i = res->start; i <= res->end; ++i)
86                 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
87         if (ret)
88                 return ret;
89
90         res = platform_get_resource_byname(pdev,
91                         IORESOURCE_IRQ, "edma-tx-16-55");
92         if (!res)
93                 return -1;
94
95         for (ret = 0, i = res->start; i <= res->end; ++i)
96                 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
97         if (ret)
98                 return ret;
99
100         ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
101         if (ret != -ENXIO) {
102                 ret = request_irq(ret, mcf_edma_tx_handler,
103                                   0, "eDMA", mcf_edma);
104                 if (ret)
105                         return ret;
106         }
107
108         ret = platform_get_irq_byname(pdev, "edma-err");
109         if (ret != -ENXIO) {
110                 ret = request_irq(ret, mcf_edma_err_handler,
111                                   0, "eDMA", mcf_edma);
112                 if (ret)
113                         return ret;
114         }
115
116         return 0;
117 }
118
119 static void mcf_edma_irq_free(struct platform_device *pdev,
120                                 struct fsl_edma_engine *mcf_edma)
121 {
122         int irq;
123         struct resource *res;
124
125         res = platform_get_resource_byname(pdev,
126                         IORESOURCE_IRQ, "edma-tx-00-15");
127         if (res) {
128                 for (irq = res->start; irq <= res->end; irq++)
129                         free_irq(irq, mcf_edma);
130         }
131
132         res = platform_get_resource_byname(pdev,
133                         IORESOURCE_IRQ, "edma-tx-16-55");
134         if (res) {
135                 for (irq = res->start; irq <= res->end; irq++)
136                         free_irq(irq, mcf_edma);
137         }
138
139         irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
140         if (irq != -ENXIO)
141                 free_irq(irq, mcf_edma);
142
143         irq = platform_get_irq_byname(pdev, "edma-err");
144         if (irq != -ENXIO)
145                 free_irq(irq, mcf_edma);
146 }
147
148 static struct fsl_edma_drvdata mcf_data = {
149         .flags = FSL_EDMA_DRV_EDMA64,
150         .setup_irq = mcf_edma_irq_init,
151 };
152
153 static int mcf_edma_probe(struct platform_device *pdev)
154 {
155         struct mcf_edma_platform_data *pdata;
156         struct fsl_edma_engine *mcf_edma;
157         struct edma_regs *regs;
158         int ret, i, chans;
159
160         pdata = dev_get_platdata(&pdev->dev);
161         if (!pdata) {
162                 dev_err(&pdev->dev, "no platform data supplied\n");
163                 return -EINVAL;
164         }
165
166         if (!pdata->dma_channels) {
167                 dev_info(&pdev->dev, "setting default channel number to 64");
168                 chans = 64;
169         } else {
170                 chans = pdata->dma_channels;
171         }
172
173         mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
174                                 GFP_KERNEL);
175         if (!mcf_edma)
176                 return -ENOMEM;
177
178         mcf_edma->n_chans = chans;
179
180         /* Set up drvdata for ColdFire edma */
181         mcf_edma->drvdata = &mcf_data;
182         mcf_edma->big_endian = 1;
183
184         mutex_init(&mcf_edma->fsl_edma_mutex);
185
186         mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
187         if (IS_ERR(mcf_edma->membase))
188                 return PTR_ERR(mcf_edma->membase);
189
190         fsl_edma_setup_regs(mcf_edma);
191         regs = &mcf_edma->regs;
192
193         INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
194         for (i = 0; i < mcf_edma->n_chans; i++) {
195                 struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
196
197                 mcf_chan->edma = mcf_edma;
198                 mcf_chan->slave_id = i;
199                 mcf_chan->idle = true;
200                 mcf_chan->dma_dir = DMA_NONE;
201                 mcf_chan->vchan.desc_free = fsl_edma_free_desc;
202                 vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
203                 mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
204                                 + i * sizeof(struct fsl_edma_hw_tcd);
205                 iowrite32(0x0, &mcf_chan->tcd->csr);
206         }
207
208         iowrite32(~0, regs->inth);
209         iowrite32(~0, regs->intl);
210
211         ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
212         if (ret)
213                 return ret;
214
215         dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
216         dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
217         dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
218
219         mcf_edma->dma_dev.dev = &pdev->dev;
220         mcf_edma->dma_dev.device_alloc_chan_resources =
221                         fsl_edma_alloc_chan_resources;
222         mcf_edma->dma_dev.device_free_chan_resources =
223                         fsl_edma_free_chan_resources;
224         mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
225         mcf_edma->dma_dev.device_prep_dma_cyclic =
226                         fsl_edma_prep_dma_cyclic;
227         mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
228         mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
229         mcf_edma->dma_dev.device_pause = fsl_edma_pause;
230         mcf_edma->dma_dev.device_resume = fsl_edma_resume;
231         mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
232         mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
233
234         mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
235         mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
236         mcf_edma->dma_dev.directions =
237                         BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
238
239         mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
240         mcf_edma->dma_dev.filter.map = pdata->slave_map;
241         mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
242
243         platform_set_drvdata(pdev, mcf_edma);
244
245         ret = dma_async_device_register(&mcf_edma->dma_dev);
246         if (ret) {
247                 dev_err(&pdev->dev,
248                         "Can't register Freescale eDMA engine. (%d)\n", ret);
249                 return ret;
250         }
251
252         /* Enable round robin arbitration */
253         iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
254
255         return 0;
256 }
257
258 static int mcf_edma_remove(struct platform_device *pdev)
259 {
260         struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
261
262         mcf_edma_irq_free(pdev, mcf_edma);
263         fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
264         dma_async_device_unregister(&mcf_edma->dma_dev);
265
266         return 0;
267 }
268
269 static struct platform_driver mcf_edma_driver = {
270         .driver         = {
271                 .name   = "mcf-edma",
272         },
273         .probe          = mcf_edma_probe,
274         .remove         = mcf_edma_remove,
275 };
276
277 bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
278 {
279         if (chan->device->dev->driver == &mcf_edma_driver.driver) {
280                 struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
281
282                 return (mcf_chan->slave_id == (uintptr_t)param);
283         }
284
285         return false;
286 }
287 EXPORT_SYMBOL(mcf_edma_filter_fn);
288
289 static int __init mcf_edma_init(void)
290 {
291         return platform_driver_register(&mcf_edma_driver);
292 }
293 subsys_initcall(mcf_edma_init);
294
295 static void __exit mcf_edma_exit(void)
296 {
297         platform_driver_unregister(&mcf_edma_driver);
298 }
299 module_exit(mcf_edma_exit);
300
301 MODULE_ALIAS("platform:mcf-edma");
302 MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
303 MODULE_LICENSE("GPL v2");