1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/dma/fsl-edma.c
5 * Copyright 2013-2014 Freescale Semiconductor, Inc.
7 * Driver for the Freescale eDMA engine with flexible channel multiplexing
8 * capability for DMA request sources. The eDMA block can be found on some
9 * Vybrid and Layerscape SoCs.
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/clk.h>
16 #include <linux/of_device.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_dma.h>
20 #include <linux/dma-mapping.h>
22 #include "fsl-edma-common.h"
24 static void fsl_edma_synchronize(struct dma_chan *chan)
26 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
28 vchan_synchronize(&fsl_chan->vchan);
31 static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
33 struct fsl_edma_engine *fsl_edma = dev_id;
34 unsigned int intr, ch;
35 struct edma_regs *regs = &fsl_edma->regs;
36 struct fsl_edma_chan *fsl_chan;
38 intr = edma_readl(fsl_edma, regs->intl);
42 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
43 if (intr & (0x1 << ch)) {
44 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
46 fsl_chan = &fsl_edma->chans[ch];
48 spin_lock(&fsl_chan->vchan.lock);
50 if (!fsl_chan->edesc) {
51 /* terminate_all called before */
52 spin_unlock(&fsl_chan->vchan.lock);
56 if (!fsl_chan->edesc->iscyclic) {
57 list_del(&fsl_chan->edesc->vdesc.node);
58 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
59 fsl_chan->edesc = NULL;
60 fsl_chan->status = DMA_COMPLETE;
61 fsl_chan->idle = true;
63 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
67 fsl_edma_xfer_desc(fsl_chan);
69 spin_unlock(&fsl_chan->vchan.lock);
75 static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
77 struct fsl_edma_engine *fsl_edma = dev_id;
79 struct edma_regs *regs = &fsl_edma->regs;
81 err = edma_readl(fsl_edma, regs->errl);
85 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
86 if (err & (0x1 << ch)) {
87 fsl_edma_disable_request(&fsl_edma->chans[ch]);
88 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
89 fsl_edma->chans[ch].status = DMA_ERROR;
90 fsl_edma->chans[ch].idle = true;
96 static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
98 if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
101 return fsl_edma_err_handler(irq, dev_id);
104 static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
105 struct of_dma *ofdma)
107 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
108 struct dma_chan *chan, *_chan;
109 struct fsl_edma_chan *fsl_chan;
110 u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
111 unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
113 if (dma_spec->args_count != 2)
116 mutex_lock(&fsl_edma->fsl_edma_mutex);
117 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
118 if (chan->client_count)
120 if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
121 chan = dma_get_slave_channel(chan);
123 chan->device->privatecnt++;
124 fsl_chan = to_fsl_edma_chan(chan);
125 fsl_chan->slave_id = dma_spec->args[1];
126 fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
128 mutex_unlock(&fsl_edma->fsl_edma_mutex);
133 mutex_unlock(&fsl_edma->fsl_edma_mutex);
138 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
142 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
143 if (fsl_edma->txirq < 0)
144 return fsl_edma->txirq;
146 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
147 if (fsl_edma->errirq < 0)
148 return fsl_edma->errirq;
150 if (fsl_edma->txirq == fsl_edma->errirq) {
151 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
152 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
154 dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
158 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
159 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
161 dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
165 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
166 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
168 dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
177 fsl_edma2_irq_init(struct platform_device *pdev,
178 struct fsl_edma_engine *fsl_edma)
183 count = platform_irq_count(pdev);
184 dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
186 dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
190 * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
191 * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
192 * For now, just simply request irq without IRQF_SHARED flag, since 16
193 * channels are enough on i.mx7ulp whose M4 domain own some peripherals.
195 for (i = 0; i < count; i++) {
196 irq = platform_get_irq(pdev, i);
200 sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
202 /* The last IRQ is for eDMA err */
204 ret = devm_request_irq(&pdev->dev, irq,
205 fsl_edma_err_handler,
206 0, "eDMA2-ERR", fsl_edma);
208 ret = devm_request_irq(&pdev->dev, irq,
209 fsl_edma_tx_handler, 0,
210 fsl_edma->chans[i].chan_name,
219 static void fsl_edma_irq_exit(
220 struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
222 if (fsl_edma->txirq == fsl_edma->errirq) {
223 devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
225 devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
226 devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
230 static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
234 for (i = 0; i < nr_clocks; i++)
235 clk_disable_unprepare(fsl_edma->muxclk[i]);
238 static struct fsl_edma_drvdata vf610_data = {
240 .dmamuxs = DMAMUX_NR,
241 .setup_irq = fsl_edma_irq_init,
244 static struct fsl_edma_drvdata ls1028a_data = {
246 .dmamuxs = DMAMUX_NR,
248 .setup_irq = fsl_edma_irq_init,
251 static struct fsl_edma_drvdata imx7ulp_data = {
255 .setup_irq = fsl_edma2_irq_init,
258 static const struct of_device_id fsl_edma_dt_ids[] = {
259 { .compatible = "fsl,vf610-edma", .data = &vf610_data},
260 { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
261 { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
264 MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
266 static int fsl_edma_probe(struct platform_device *pdev)
268 const struct of_device_id *of_id =
269 of_match_device(fsl_edma_dt_ids, &pdev->dev);
270 struct device_node *np = pdev->dev.of_node;
271 struct fsl_edma_engine *fsl_edma;
272 const struct fsl_edma_drvdata *drvdata = NULL;
273 struct edma_regs *regs;
278 drvdata = of_id->data;
280 dev_err(&pdev->dev, "unable to find driver data\n");
284 ret = of_property_read_u32(np, "dma-channels", &chans);
286 dev_err(&pdev->dev, "Can't get dma-channels.\n");
290 fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
295 fsl_edma->drvdata = drvdata;
296 fsl_edma->n_chans = chans;
297 mutex_init(&fsl_edma->fsl_edma_mutex);
299 fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
300 if (IS_ERR(fsl_edma->membase))
301 return PTR_ERR(fsl_edma->membase);
303 fsl_edma_setup_regs(fsl_edma);
304 regs = &fsl_edma->regs;
306 if (drvdata->has_dmaclk) {
307 fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
308 if (IS_ERR(fsl_edma->dmaclk)) {
309 dev_err(&pdev->dev, "Missing DMA block clock.\n");
310 return PTR_ERR(fsl_edma->dmaclk);
313 ret = clk_prepare_enable(fsl_edma->dmaclk);
315 dev_err(&pdev->dev, "DMA clk block failed.\n");
320 for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
323 fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
325 if (IS_ERR(fsl_edma->muxbase[i])) {
326 /* on error: disable all previously enabled clks */
327 fsl_disable_clocks(fsl_edma, i);
328 return PTR_ERR(fsl_edma->muxbase[i]);
331 sprintf(clkname, "dmamux%d", i);
332 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
333 if (IS_ERR(fsl_edma->muxclk[i])) {
334 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
335 /* on error: disable all previously enabled clks */
336 fsl_disable_clocks(fsl_edma, i);
337 return PTR_ERR(fsl_edma->muxclk[i]);
340 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
342 /* on error: disable all previously enabled clks */
343 fsl_disable_clocks(fsl_edma, i);
347 fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
349 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
350 for (i = 0; i < fsl_edma->n_chans; i++) {
351 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
353 fsl_chan->edma = fsl_edma;
354 fsl_chan->pm_state = RUNNING;
355 fsl_chan->slave_id = 0;
356 fsl_chan->idle = true;
357 fsl_chan->dma_dir = DMA_NONE;
358 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
359 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
361 edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr);
362 fsl_edma_chan_mux(fsl_chan, 0, false);
365 edma_writel(fsl_edma, ~0, regs->intl);
366 ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
370 dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
371 dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
372 dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
373 dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask);
375 fsl_edma->dma_dev.dev = &pdev->dev;
376 fsl_edma->dma_dev.device_alloc_chan_resources
377 = fsl_edma_alloc_chan_resources;
378 fsl_edma->dma_dev.device_free_chan_resources
379 = fsl_edma_free_chan_resources;
380 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
381 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
382 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
383 fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
384 fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
385 fsl_edma->dma_dev.device_pause = fsl_edma_pause;
386 fsl_edma->dma_dev.device_resume = fsl_edma_resume;
387 fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
388 fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
389 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
391 fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
392 fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
393 fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
395 fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
396 /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
397 dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
399 platform_set_drvdata(pdev, fsl_edma);
401 ret = dma_async_device_register(&fsl_edma->dma_dev);
404 "Can't register Freescale eDMA engine. (%d)\n", ret);
405 fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
409 ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
412 "Can't register Freescale eDMA of_dma. (%d)\n", ret);
413 dma_async_device_unregister(&fsl_edma->dma_dev);
414 fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
418 /* enable round robin arbitration */
419 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
424 static int fsl_edma_remove(struct platform_device *pdev)
426 struct device_node *np = pdev->dev.of_node;
427 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
429 fsl_edma_irq_exit(pdev, fsl_edma);
430 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
431 of_dma_controller_free(np);
432 dma_async_device_unregister(&fsl_edma->dma_dev);
433 fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
438 static int fsl_edma_suspend_late(struct device *dev)
440 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
441 struct fsl_edma_chan *fsl_chan;
445 for (i = 0; i < fsl_edma->n_chans; i++) {
446 fsl_chan = &fsl_edma->chans[i];
447 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
448 /* Make sure chan is idle or will force disable. */
449 if (unlikely(!fsl_chan->idle)) {
450 dev_warn(dev, "WARN: There is non-idle channel.");
451 fsl_edma_disable_request(fsl_chan);
452 fsl_edma_chan_mux(fsl_chan, 0, false);
455 fsl_chan->pm_state = SUSPENDED;
456 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
462 static int fsl_edma_resume_early(struct device *dev)
464 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
465 struct fsl_edma_chan *fsl_chan;
466 struct edma_regs *regs = &fsl_edma->regs;
469 for (i = 0; i < fsl_edma->n_chans; i++) {
470 fsl_chan = &fsl_edma->chans[i];
471 fsl_chan->pm_state = RUNNING;
472 edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr);
473 if (fsl_chan->slave_id != 0)
474 fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
477 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
483 * eDMA provides the service to others, so it should be suspend late
484 * and resume early. When eDMA suspend, all of the clients should stop
485 * the DMA data transmission and let the channel idle.
487 static const struct dev_pm_ops fsl_edma_pm_ops = {
488 .suspend_late = fsl_edma_suspend_late,
489 .resume_early = fsl_edma_resume_early,
492 static struct platform_driver fsl_edma_driver = {
495 .of_match_table = fsl_edma_dt_ids,
496 .pm = &fsl_edma_pm_ops,
498 .probe = fsl_edma_probe,
499 .remove = fsl_edma_remove,
502 static int __init fsl_edma_init(void)
504 return platform_driver_register(&fsl_edma_driver);
506 subsys_initcall(fsl_edma_init);
508 static void __exit fsl_edma_exit(void)
510 platform_driver_unregister(&fsl_edma_driver);
512 module_exit(fsl_edma_exit);
514 MODULE_ALIAS("platform:fsl-edma");
515 MODULE_DESCRIPTION("Freescale eDMA engine driver");
516 MODULE_LICENSE("GPL v2");