1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
11 #include "fsl-edma-common.h"
17 #define EDMA_SERQ 0x1B
18 #define EDMA_CERQ 0x1A
19 #define EDMA_SEEI 0x19
20 #define EDMA_CEEI 0x18
21 #define EDMA_CINT 0x1F
22 #define EDMA_CERR 0x1E
23 #define EDMA_SSRT 0x1D
24 #define EDMA_CDNE 0x1C
25 #define EDMA_INTR 0x24
28 #define EDMA64_ERQH 0x08
29 #define EDMA64_EEIH 0x10
30 #define EDMA64_SERQ 0x18
31 #define EDMA64_CERQ 0x19
32 #define EDMA64_SEEI 0x1a
33 #define EDMA64_CEEI 0x1b
34 #define EDMA64_CINT 0x1c
35 #define EDMA64_CERR 0x1d
36 #define EDMA64_SSRT 0x1e
37 #define EDMA64_CDNE 0x1f
38 #define EDMA64_INTH 0x20
39 #define EDMA64_INTL 0x24
40 #define EDMA64_ERRH 0x28
41 #define EDMA64_ERRL 0x2c
43 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
45 spin_lock(&fsl_chan->vchan.lock);
47 if (!fsl_chan->edesc) {
48 /* terminate_all called before */
49 spin_unlock(&fsl_chan->vchan.lock);
53 if (!fsl_chan->edesc->iscyclic) {
54 list_del(&fsl_chan->edesc->vdesc.node);
55 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
56 fsl_chan->edesc = NULL;
57 fsl_chan->status = DMA_COMPLETE;
58 fsl_chan->idle = true;
60 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
64 fsl_edma_xfer_desc(fsl_chan);
66 spin_unlock(&fsl_chan->vchan.lock);
69 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
71 struct edma_regs *regs = &fsl_chan->edma->regs;
72 u32 ch = fsl_chan->vchan.chan.chan_id;
74 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
75 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
76 edma_writeb(fsl_chan->edma, ch, regs->serq);
78 /* ColdFire is big endian, and accesses natively
79 * big endian I/O peripherals
81 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
82 iowrite8(ch, regs->serq);
86 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
88 struct edma_regs *regs = &fsl_chan->edma->regs;
89 u32 ch = fsl_chan->vchan.chan.chan_id;
91 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
92 edma_writeb(fsl_chan->edma, ch, regs->cerq);
93 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
95 /* ColdFire is big endian, and accesses natively
96 * big endian I/O peripherals
98 iowrite8(ch, regs->cerq);
99 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
103 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
104 u32 off, u32 slot, bool enable)
109 val8 = EDMAMUX_CHCFG_ENBL | slot;
111 val8 = EDMAMUX_CHCFG_DIS;
113 iowrite8(val8, addr + off);
116 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
117 u32 off, u32 slot, bool enable)
122 val = EDMAMUX_CHCFG_ENBL << 24 | slot;
124 val = EDMAMUX_CHCFG_DIS;
126 iowrite32(val, addr + off * 4);
129 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
130 unsigned int slot, bool enable)
132 u32 ch = fsl_chan->vchan.chan.chan_id;
133 void __iomem *muxaddr;
134 unsigned int chans_per_mux, ch_off;
135 int endian_diff[4] = {3, 1, -1, -3};
136 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
138 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
139 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
141 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
142 ch_off += endian_diff[ch_off % 4];
144 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
145 slot = EDMAMUX_CHCFG_SOURCE(slot);
147 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
148 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
150 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
153 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
157 if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
158 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
160 val = ffs(addr_width) - 1;
161 return val | (val << 8);
164 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
166 struct fsl_edma_desc *fsl_desc;
169 fsl_desc = to_fsl_edma_desc(vdesc);
170 for (i = 0; i < fsl_desc->n_tcds; i++)
171 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
172 fsl_desc->tcd[i].ptcd);
176 int fsl_edma_terminate_all(struct dma_chan *chan)
178 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
182 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
183 fsl_edma_disable_request(fsl_chan);
184 fsl_chan->edesc = NULL;
185 fsl_chan->idle = true;
186 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
187 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
188 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
192 int fsl_edma_pause(struct dma_chan *chan)
194 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
197 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
198 if (fsl_chan->edesc) {
199 fsl_edma_disable_request(fsl_chan);
200 fsl_chan->status = DMA_PAUSED;
201 fsl_chan->idle = true;
203 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
207 int fsl_edma_resume(struct dma_chan *chan)
209 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
212 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
213 if (fsl_chan->edesc) {
214 fsl_edma_enable_request(fsl_chan);
215 fsl_chan->status = DMA_IN_PROGRESS;
216 fsl_chan->idle = false;
218 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
222 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
224 if (fsl_chan->dma_dir != DMA_NONE)
225 dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
226 fsl_chan->dma_dev_addr,
227 fsl_chan->dma_dev_size,
228 fsl_chan->dma_dir, 0);
229 fsl_chan->dma_dir = DMA_NONE;
232 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
233 enum dma_transfer_direction dir)
235 struct device *dev = fsl_chan->vchan.chan.device->dev;
236 enum dma_data_direction dma_dir;
237 phys_addr_t addr = 0;
242 dma_dir = DMA_FROM_DEVICE;
243 addr = fsl_chan->cfg.dst_addr;
244 size = fsl_chan->cfg.dst_maxburst;
247 dma_dir = DMA_TO_DEVICE;
248 addr = fsl_chan->cfg.src_addr;
249 size = fsl_chan->cfg.src_maxburst;
256 /* Already mapped for this config? */
257 if (fsl_chan->dma_dir == dma_dir)
260 fsl_edma_unprep_slave_dma(fsl_chan);
262 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
263 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
265 fsl_chan->dma_dev_size = size;
266 fsl_chan->dma_dir = dma_dir;
271 int fsl_edma_slave_config(struct dma_chan *chan,
272 struct dma_slave_config *cfg)
274 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
276 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
277 fsl_edma_unprep_slave_dma(fsl_chan);
282 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
283 struct virt_dma_desc *vdesc, bool in_progress)
285 struct fsl_edma_desc *edesc = fsl_chan->edesc;
286 enum dma_transfer_direction dir = edesc->dirn;
287 dma_addr_t cur_addr, dma_addr;
291 /* calculate the total size in this desc */
292 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
293 len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
294 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
299 if (dir == DMA_MEM_TO_DEV)
300 cur_addr = edma_read_tcdreg(fsl_chan, saddr);
302 cur_addr = edma_read_tcdreg(fsl_chan, daddr);
304 /* figure out the finished and calculate the residue */
305 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
306 size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
307 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
308 if (dir == DMA_MEM_TO_DEV)
309 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
311 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
314 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
315 len += dma_addr + size - cur_addr;
323 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
324 dma_cookie_t cookie, struct dma_tx_state *txstate)
326 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
327 struct virt_dma_desc *vdesc;
328 enum dma_status status;
331 status = dma_cookie_status(chan, cookie, txstate);
332 if (status == DMA_COMPLETE)
336 return fsl_chan->status;
338 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
339 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
340 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
342 fsl_edma_desc_residue(fsl_chan, vdesc, true);
345 fsl_edma_desc_residue(fsl_chan, vdesc, false);
347 txstate->residue = 0;
349 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
351 return fsl_chan->status;
354 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
355 struct fsl_edma_hw_tcd *tcd)
360 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
361 * endian format. However, we need to load the TCD registers in
362 * big- or little-endian obeying the eDMA engine model endian,
363 * and this is performed from specific edma_write functions
365 edma_write_tcdreg(fsl_chan, 0, csr);
367 edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
368 edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
370 edma_write_tcdreg(fsl_chan, tcd->attr, attr);
371 edma_write_tcdreg(fsl_chan, tcd->soff, soff);
373 edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
374 edma_write_tcdreg(fsl_chan, tcd->slast, slast);
376 edma_write_tcdreg(fsl_chan, tcd->citer, citer);
377 edma_write_tcdreg(fsl_chan, tcd->biter, biter);
378 edma_write_tcdreg(fsl_chan, tcd->doff, doff);
380 edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
382 if (fsl_chan->is_sw) {
383 csr = le16_to_cpu(tcd->csr);
384 csr |= EDMA_TCD_CSR_START;
385 tcd->csr = cpu_to_le16(csr);
388 edma_write_tcdreg(fsl_chan, tcd->csr, csr);
392 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
393 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
394 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
395 bool disable_req, bool enable_sg)
400 * eDMA hardware SGs require the TCDs to be stored in little
401 * endian format irrespective of the register endian model.
402 * So we put the value in little endian in memory, waiting
403 * for fsl_edma_set_tcd_regs doing the swap.
405 tcd->saddr = cpu_to_le32(src);
406 tcd->daddr = cpu_to_le32(dst);
408 tcd->attr = cpu_to_le16(attr);
410 tcd->soff = cpu_to_le16(soff);
412 tcd->nbytes = cpu_to_le32(nbytes);
413 tcd->slast = cpu_to_le32(slast);
415 tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
416 tcd->doff = cpu_to_le16(doff);
418 tcd->dlast_sga = cpu_to_le32(dlast_sga);
420 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
422 csr |= EDMA_TCD_CSR_INT_MAJOR;
425 csr |= EDMA_TCD_CSR_D_REQ;
428 csr |= EDMA_TCD_CSR_E_SG;
430 tcd->csr = cpu_to_le16(csr);
433 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
436 struct fsl_edma_desc *fsl_desc;
439 fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
443 fsl_desc->echan = fsl_chan;
444 fsl_desc->n_tcds = sg_len;
445 for (i = 0; i < sg_len; i++) {
446 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
447 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
448 if (!fsl_desc->tcd[i].vtcd)
455 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
456 fsl_desc->tcd[i].ptcd);
461 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
462 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
463 size_t period_len, enum dma_transfer_direction direction,
466 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
467 struct fsl_edma_desc *fsl_desc;
468 dma_addr_t dma_buf_next;
470 u32 src_addr, dst_addr, last_sg, nbytes;
471 u16 soff, doff, iter;
473 if (!is_slave_direction(direction))
476 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
479 sg_len = buf_len / period_len;
480 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
483 fsl_desc->iscyclic = true;
484 fsl_desc->dirn = direction;
486 dma_buf_next = dma_addr;
487 if (direction == DMA_MEM_TO_DEV) {
489 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
490 nbytes = fsl_chan->cfg.dst_addr_width *
491 fsl_chan->cfg.dst_maxburst;
494 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
495 nbytes = fsl_chan->cfg.src_addr_width *
496 fsl_chan->cfg.src_maxburst;
499 iter = period_len / nbytes;
501 for (i = 0; i < sg_len; i++) {
502 if (dma_buf_next >= dma_addr + buf_len)
503 dma_buf_next = dma_addr;
505 /* get next sg's physical address */
506 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
508 if (direction == DMA_MEM_TO_DEV) {
509 src_addr = dma_buf_next;
510 dst_addr = fsl_chan->dma_dev_addr;
511 soff = fsl_chan->cfg.dst_addr_width;
514 src_addr = fsl_chan->dma_dev_addr;
515 dst_addr = dma_buf_next;
517 doff = fsl_chan->cfg.src_addr_width;
520 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
521 fsl_chan->attr, soff, nbytes, 0, iter,
522 iter, doff, last_sg, true, false, true);
523 dma_buf_next += period_len;
526 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
529 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
530 struct dma_chan *chan, struct scatterlist *sgl,
531 unsigned int sg_len, enum dma_transfer_direction direction,
532 unsigned long flags, void *context)
534 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
535 struct fsl_edma_desc *fsl_desc;
536 struct scatterlist *sg;
537 u32 src_addr, dst_addr, last_sg, nbytes;
538 u16 soff, doff, iter;
541 if (!is_slave_direction(direction))
544 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
547 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
550 fsl_desc->iscyclic = false;
551 fsl_desc->dirn = direction;
553 if (direction == DMA_MEM_TO_DEV) {
555 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
556 nbytes = fsl_chan->cfg.dst_addr_width *
557 fsl_chan->cfg.dst_maxburst;
560 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
561 nbytes = fsl_chan->cfg.src_addr_width *
562 fsl_chan->cfg.src_maxburst;
565 for_each_sg(sgl, sg, sg_len, i) {
566 if (direction == DMA_MEM_TO_DEV) {
567 src_addr = sg_dma_address(sg);
568 dst_addr = fsl_chan->dma_dev_addr;
569 soff = fsl_chan->cfg.dst_addr_width;
572 src_addr = fsl_chan->dma_dev_addr;
573 dst_addr = sg_dma_address(sg);
575 doff = fsl_chan->cfg.src_addr_width;
578 iter = sg_dma_len(sg) / nbytes;
579 if (i < sg_len - 1) {
580 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
581 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
582 dst_addr, fsl_chan->attr, soff,
583 nbytes, 0, iter, iter, doff, last_sg,
587 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
588 dst_addr, fsl_chan->attr, soff,
589 nbytes, 0, iter, iter, doff, last_sg,
594 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
597 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
598 dma_addr_t dma_dst, dma_addr_t dma_src,
599 size_t len, unsigned long flags)
601 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
602 struct fsl_edma_desc *fsl_desc;
604 fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
607 fsl_desc->iscyclic = false;
609 fsl_chan->is_sw = true;
611 /* To match with copy_align and max_seg_size so 1 tcd is enough */
612 fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
613 fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
614 32, len, 0, 1, 1, 32, 0, true, true, false);
616 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
619 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
621 struct virt_dma_desc *vdesc;
623 lockdep_assert_held(&fsl_chan->vchan.lock);
625 vdesc = vchan_next_desc(&fsl_chan->vchan);
628 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
629 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
630 fsl_edma_enable_request(fsl_chan);
631 fsl_chan->status = DMA_IN_PROGRESS;
632 fsl_chan->idle = false;
635 void fsl_edma_issue_pending(struct dma_chan *chan)
637 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
640 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
642 if (unlikely(fsl_chan->pm_state != RUNNING)) {
643 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
644 /* cannot submit due to suspend */
648 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
649 fsl_edma_xfer_desc(fsl_chan);
651 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
654 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
656 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
658 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
659 sizeof(struct fsl_edma_hw_tcd),
664 void fsl_edma_free_chan_resources(struct dma_chan *chan)
666 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
667 struct fsl_edma_engine *edma = fsl_chan->edma;
671 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
672 fsl_edma_disable_request(fsl_chan);
673 if (edma->drvdata->dmamuxs)
674 fsl_edma_chan_mux(fsl_chan, 0, false);
675 fsl_chan->edesc = NULL;
676 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
677 fsl_edma_unprep_slave_dma(fsl_chan);
678 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
680 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
681 dma_pool_destroy(fsl_chan->tcd_pool);
682 fsl_chan->tcd_pool = NULL;
683 fsl_chan->is_sw = false;
686 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
688 struct fsl_edma_chan *chan, *_chan;
690 list_for_each_entry_safe(chan, _chan,
691 &dmadev->channels, vchan.chan.device_node) {
692 list_del(&chan->vchan.chan.device_node);
693 tasklet_kill(&chan->vchan.task);
698 * On the 32 channels Vybrid/mpc577x edma version, register offsets are
699 * different compared to ColdFire mcf5441x 64 channels edma.
701 * This function sets up register offsets as per proper declared version
702 * so must be called in xxx_edma_probe() just after setting the
703 * edma "version" and "membase" appropriately.
705 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
707 bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
709 edma->regs.cr = edma->membase + EDMA_CR;
710 edma->regs.es = edma->membase + EDMA_ES;
711 edma->regs.erql = edma->membase + EDMA_ERQ;
712 edma->regs.eeil = edma->membase + EDMA_EEI;
714 edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
715 edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
716 edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
717 edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
718 edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
719 edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
720 edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
721 edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
722 edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
723 edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
726 edma->regs.erqh = edma->membase + EDMA64_ERQH;
727 edma->regs.eeih = edma->membase + EDMA64_EEIH;
728 edma->regs.errh = edma->membase + EDMA64_ERRH;
729 edma->regs.inth = edma->membase + EDMA64_INTH;
733 MODULE_LICENSE("GPL v2");