1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
11 #include "fsl-edma-common.h"
17 #define EDMA_SERQ 0x1B
18 #define EDMA_CERQ 0x1A
19 #define EDMA_SEEI 0x19
20 #define EDMA_CEEI 0x18
21 #define EDMA_CINT 0x1F
22 #define EDMA_CERR 0x1E
23 #define EDMA_SSRT 0x1D
24 #define EDMA_CDNE 0x1C
25 #define EDMA_INTR 0x24
28 #define EDMA64_ERQH 0x08
29 #define EDMA64_EEIH 0x10
30 #define EDMA64_SERQ 0x18
31 #define EDMA64_CERQ 0x19
32 #define EDMA64_SEEI 0x1a
33 #define EDMA64_CEEI 0x1b
34 #define EDMA64_CINT 0x1c
35 #define EDMA64_CERR 0x1d
36 #define EDMA64_SSRT 0x1e
37 #define EDMA64_CDNE 0x1f
38 #define EDMA64_INTH 0x20
39 #define EDMA64_INTL 0x24
40 #define EDMA64_ERRH 0x28
41 #define EDMA64_ERRL 0x2c
43 #define EDMA_TCD 0x1000
45 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
47 spin_lock(&fsl_chan->vchan.lock);
49 if (!fsl_chan->edesc) {
50 /* terminate_all called before */
51 spin_unlock(&fsl_chan->vchan.lock);
55 if (!fsl_chan->edesc->iscyclic) {
56 list_del(&fsl_chan->edesc->vdesc.node);
57 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
58 fsl_chan->edesc = NULL;
59 fsl_chan->status = DMA_COMPLETE;
60 fsl_chan->idle = true;
62 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
66 fsl_edma_xfer_desc(fsl_chan);
68 spin_unlock(&fsl_chan->vchan.lock);
71 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
73 struct edma_regs *regs = &fsl_chan->edma->regs;
74 u32 ch = fsl_chan->vchan.chan.chan_id;
76 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
77 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
78 edma_writeb(fsl_chan->edma, ch, regs->serq);
80 /* ColdFire is big endian, and accesses natively
81 * big endian I/O peripherals
83 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
84 iowrite8(ch, regs->serq);
88 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
90 struct edma_regs *regs = &fsl_chan->edma->regs;
91 u32 ch = fsl_chan->vchan.chan.chan_id;
93 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
94 edma_writeb(fsl_chan->edma, ch, regs->cerq);
95 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
97 /* ColdFire is big endian, and accesses natively
98 * big endian I/O peripherals
100 iowrite8(ch, regs->cerq);
101 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
105 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
106 u32 off, u32 slot, bool enable)
111 val8 = EDMAMUX_CHCFG_ENBL | slot;
113 val8 = EDMAMUX_CHCFG_DIS;
115 iowrite8(val8, addr + off);
118 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
119 u32 off, u32 slot, bool enable)
124 val = EDMAMUX_CHCFG_ENBL << 24 | slot;
126 val = EDMAMUX_CHCFG_DIS;
128 iowrite32(val, addr + off * 4);
131 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
132 unsigned int slot, bool enable)
134 u32 ch = fsl_chan->vchan.chan.chan_id;
135 void __iomem *muxaddr;
136 unsigned int chans_per_mux, ch_off;
137 int endian_diff[4] = {3, 1, -1, -3};
138 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
140 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
141 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
143 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
144 ch_off += endian_diff[ch_off % 4];
146 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
147 slot = EDMAMUX_CHCFG_SOURCE(slot);
149 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
150 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
152 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
155 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
159 if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
160 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
162 val = ffs(addr_width) - 1;
163 return val | (val << 8);
166 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
168 struct fsl_edma_desc *fsl_desc;
171 fsl_desc = to_fsl_edma_desc(vdesc);
172 for (i = 0; i < fsl_desc->n_tcds; i++)
173 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
174 fsl_desc->tcd[i].ptcd);
178 int fsl_edma_terminate_all(struct dma_chan *chan)
180 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
184 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
185 fsl_edma_disable_request(fsl_chan);
186 fsl_chan->edesc = NULL;
187 fsl_chan->idle = true;
188 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
189 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
190 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
194 int fsl_edma_pause(struct dma_chan *chan)
196 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
199 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
200 if (fsl_chan->edesc) {
201 fsl_edma_disable_request(fsl_chan);
202 fsl_chan->status = DMA_PAUSED;
203 fsl_chan->idle = true;
205 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
209 int fsl_edma_resume(struct dma_chan *chan)
211 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
214 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
215 if (fsl_chan->edesc) {
216 fsl_edma_enable_request(fsl_chan);
217 fsl_chan->status = DMA_IN_PROGRESS;
218 fsl_chan->idle = false;
220 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
224 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
226 if (fsl_chan->dma_dir != DMA_NONE)
227 dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
228 fsl_chan->dma_dev_addr,
229 fsl_chan->dma_dev_size,
230 fsl_chan->dma_dir, 0);
231 fsl_chan->dma_dir = DMA_NONE;
234 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
235 enum dma_transfer_direction dir)
237 struct device *dev = fsl_chan->vchan.chan.device->dev;
238 enum dma_data_direction dma_dir;
239 phys_addr_t addr = 0;
244 dma_dir = DMA_FROM_DEVICE;
245 addr = fsl_chan->cfg.dst_addr;
246 size = fsl_chan->cfg.dst_maxburst;
249 dma_dir = DMA_TO_DEVICE;
250 addr = fsl_chan->cfg.src_addr;
251 size = fsl_chan->cfg.src_maxburst;
258 /* Already mapped for this config? */
259 if (fsl_chan->dma_dir == dma_dir)
262 fsl_edma_unprep_slave_dma(fsl_chan);
264 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
265 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
267 fsl_chan->dma_dev_size = size;
268 fsl_chan->dma_dir = dma_dir;
273 int fsl_edma_slave_config(struct dma_chan *chan,
274 struct dma_slave_config *cfg)
276 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
278 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
279 fsl_edma_unprep_slave_dma(fsl_chan);
284 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
285 struct virt_dma_desc *vdesc, bool in_progress)
287 struct fsl_edma_desc *edesc = fsl_chan->edesc;
288 struct edma_regs *regs = &fsl_chan->edma->regs;
289 u32 ch = fsl_chan->vchan.chan.chan_id;
290 enum dma_transfer_direction dir = edesc->dirn;
291 dma_addr_t cur_addr, dma_addr;
295 /* calculate the total size in this desc */
296 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
297 len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
298 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
303 if (dir == DMA_MEM_TO_DEV)
304 cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr);
306 cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr);
308 /* figure out the finished and calculate the residue */
309 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
310 size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
311 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
312 if (dir == DMA_MEM_TO_DEV)
313 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
315 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
318 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
319 len += dma_addr + size - cur_addr;
327 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
328 dma_cookie_t cookie, struct dma_tx_state *txstate)
330 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
331 struct virt_dma_desc *vdesc;
332 enum dma_status status;
335 status = dma_cookie_status(chan, cookie, txstate);
336 if (status == DMA_COMPLETE)
340 return fsl_chan->status;
342 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
343 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
344 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
346 fsl_edma_desc_residue(fsl_chan, vdesc, true);
349 fsl_edma_desc_residue(fsl_chan, vdesc, false);
351 txstate->residue = 0;
353 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
355 return fsl_chan->status;
358 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
359 struct fsl_edma_hw_tcd *tcd)
361 struct fsl_edma_engine *edma = fsl_chan->edma;
362 struct edma_regs *regs = &fsl_chan->edma->regs;
363 u32 ch = fsl_chan->vchan.chan.chan_id;
367 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
368 * endian format. However, we need to load the TCD registers in
369 * big- or little-endian obeying the eDMA engine model endian,
370 * and this is performed from specific edma_write functions
372 edma_writew(edma, 0, ®s->tcd[ch].csr);
374 edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr);
375 edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr);
377 edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr);
378 edma_writew(edma, tcd->soff, ®s->tcd[ch].soff);
380 edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes);
381 edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast);
383 edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer);
384 edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter);
385 edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff);
387 edma_writel(edma, (s32)tcd->dlast_sga,
388 ®s->tcd[ch].dlast_sga);
390 if (fsl_chan->is_sw) {
391 csr = le16_to_cpu(tcd->csr);
392 csr |= EDMA_TCD_CSR_START;
393 tcd->csr = cpu_to_le16(csr);
396 edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr);
400 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
401 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
402 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
403 bool disable_req, bool enable_sg)
408 * eDMA hardware SGs require the TCDs to be stored in little
409 * endian format irrespective of the register endian model.
410 * So we put the value in little endian in memory, waiting
411 * for fsl_edma_set_tcd_regs doing the swap.
413 tcd->saddr = cpu_to_le32(src);
414 tcd->daddr = cpu_to_le32(dst);
416 tcd->attr = cpu_to_le16(attr);
418 tcd->soff = cpu_to_le16(soff);
420 tcd->nbytes = cpu_to_le32(nbytes);
421 tcd->slast = cpu_to_le32(slast);
423 tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
424 tcd->doff = cpu_to_le16(doff);
426 tcd->dlast_sga = cpu_to_le32(dlast_sga);
428 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
430 csr |= EDMA_TCD_CSR_INT_MAJOR;
433 csr |= EDMA_TCD_CSR_D_REQ;
436 csr |= EDMA_TCD_CSR_E_SG;
438 tcd->csr = cpu_to_le16(csr);
441 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
444 struct fsl_edma_desc *fsl_desc;
447 fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
451 fsl_desc->echan = fsl_chan;
452 fsl_desc->n_tcds = sg_len;
453 for (i = 0; i < sg_len; i++) {
454 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
455 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
456 if (!fsl_desc->tcd[i].vtcd)
463 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
464 fsl_desc->tcd[i].ptcd);
469 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
470 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
471 size_t period_len, enum dma_transfer_direction direction,
474 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
475 struct fsl_edma_desc *fsl_desc;
476 dma_addr_t dma_buf_next;
478 u32 src_addr, dst_addr, last_sg, nbytes;
479 u16 soff, doff, iter;
481 if (!is_slave_direction(direction))
484 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
487 sg_len = buf_len / period_len;
488 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
491 fsl_desc->iscyclic = true;
492 fsl_desc->dirn = direction;
494 dma_buf_next = dma_addr;
495 if (direction == DMA_MEM_TO_DEV) {
497 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
498 nbytes = fsl_chan->cfg.dst_addr_width *
499 fsl_chan->cfg.dst_maxburst;
502 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
503 nbytes = fsl_chan->cfg.src_addr_width *
504 fsl_chan->cfg.src_maxburst;
507 iter = period_len / nbytes;
509 for (i = 0; i < sg_len; i++) {
510 if (dma_buf_next >= dma_addr + buf_len)
511 dma_buf_next = dma_addr;
513 /* get next sg's physical address */
514 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
516 if (direction == DMA_MEM_TO_DEV) {
517 src_addr = dma_buf_next;
518 dst_addr = fsl_chan->dma_dev_addr;
519 soff = fsl_chan->cfg.dst_addr_width;
522 src_addr = fsl_chan->dma_dev_addr;
523 dst_addr = dma_buf_next;
525 doff = fsl_chan->cfg.src_addr_width;
528 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
529 fsl_chan->attr, soff, nbytes, 0, iter,
530 iter, doff, last_sg, true, false, true);
531 dma_buf_next += period_len;
534 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
537 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
538 struct dma_chan *chan, struct scatterlist *sgl,
539 unsigned int sg_len, enum dma_transfer_direction direction,
540 unsigned long flags, void *context)
542 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
543 struct fsl_edma_desc *fsl_desc;
544 struct scatterlist *sg;
545 u32 src_addr, dst_addr, last_sg, nbytes;
546 u16 soff, doff, iter;
549 if (!is_slave_direction(direction))
552 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
555 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
558 fsl_desc->iscyclic = false;
559 fsl_desc->dirn = direction;
561 if (direction == DMA_MEM_TO_DEV) {
563 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
564 nbytes = fsl_chan->cfg.dst_addr_width *
565 fsl_chan->cfg.dst_maxburst;
568 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
569 nbytes = fsl_chan->cfg.src_addr_width *
570 fsl_chan->cfg.src_maxburst;
573 for_each_sg(sgl, sg, sg_len, i) {
574 if (direction == DMA_MEM_TO_DEV) {
575 src_addr = sg_dma_address(sg);
576 dst_addr = fsl_chan->dma_dev_addr;
577 soff = fsl_chan->cfg.dst_addr_width;
580 src_addr = fsl_chan->dma_dev_addr;
581 dst_addr = sg_dma_address(sg);
583 doff = fsl_chan->cfg.src_addr_width;
586 iter = sg_dma_len(sg) / nbytes;
587 if (i < sg_len - 1) {
588 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
589 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
590 dst_addr, fsl_chan->attr, soff,
591 nbytes, 0, iter, iter, doff, last_sg,
595 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
596 dst_addr, fsl_chan->attr, soff,
597 nbytes, 0, iter, iter, doff, last_sg,
602 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
605 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
606 dma_addr_t dma_dst, dma_addr_t dma_src,
607 size_t len, unsigned long flags)
609 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
610 struct fsl_edma_desc *fsl_desc;
612 fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
615 fsl_desc->iscyclic = false;
617 fsl_chan->is_sw = true;
619 /* To match with copy_align and max_seg_size so 1 tcd is enough */
620 fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
621 fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
622 32, len, 0, 1, 1, 32, 0, true, true, false);
624 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
627 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
629 struct virt_dma_desc *vdesc;
631 lockdep_assert_held(&fsl_chan->vchan.lock);
633 vdesc = vchan_next_desc(&fsl_chan->vchan);
636 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
637 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
638 fsl_edma_enable_request(fsl_chan);
639 fsl_chan->status = DMA_IN_PROGRESS;
640 fsl_chan->idle = false;
643 void fsl_edma_issue_pending(struct dma_chan *chan)
645 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
648 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
650 if (unlikely(fsl_chan->pm_state != RUNNING)) {
651 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
652 /* cannot submit due to suspend */
656 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
657 fsl_edma_xfer_desc(fsl_chan);
659 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
662 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
664 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
666 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
667 sizeof(struct fsl_edma_hw_tcd),
672 void fsl_edma_free_chan_resources(struct dma_chan *chan)
674 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
675 struct fsl_edma_engine *edma = fsl_chan->edma;
679 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
680 fsl_edma_disable_request(fsl_chan);
681 if (edma->drvdata->dmamuxs)
682 fsl_edma_chan_mux(fsl_chan, 0, false);
683 fsl_chan->edesc = NULL;
684 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
685 fsl_edma_unprep_slave_dma(fsl_chan);
686 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
688 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
689 dma_pool_destroy(fsl_chan->tcd_pool);
690 fsl_chan->tcd_pool = NULL;
691 fsl_chan->is_sw = false;
694 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
696 struct fsl_edma_chan *chan, *_chan;
698 list_for_each_entry_safe(chan, _chan,
699 &dmadev->channels, vchan.chan.device_node) {
700 list_del(&chan->vchan.chan.device_node);
701 tasklet_kill(&chan->vchan.task);
706 * On the 32 channels Vybrid/mpc577x edma version, register offsets are
707 * different compared to ColdFire mcf5441x 64 channels edma.
709 * This function sets up register offsets as per proper declared version
710 * so must be called in xxx_edma_probe() just after setting the
711 * edma "version" and "membase" appropriately.
713 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
715 bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
717 edma->regs.cr = edma->membase + EDMA_CR;
718 edma->regs.es = edma->membase + EDMA_ES;
719 edma->regs.erql = edma->membase + EDMA_ERQ;
720 edma->regs.eeil = edma->membase + EDMA_EEI;
722 edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
723 edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
724 edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
725 edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
726 edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
727 edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
728 edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
729 edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
730 edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
731 edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
734 edma->regs.erqh = edma->membase + EDMA64_ERQH;
735 edma->regs.eeih = edma->membase + EDMA64_EEIH;
736 edma->regs.errh = edma->membase + EDMA64_ERRH;
737 edma->regs.inth = edma->membase + EDMA64_INTH;
740 edma->regs.tcd = edma->membase + EDMA_TCD;
743 MODULE_LICENSE("GPL v2");