1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP DMAengine support
5 #include <linux/cpu_pm.h>
6 #include <linux/delay.h>
7 #include <linux/dmaengine.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dmapool.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_device.h>
22 #include "../virt-dma.h"
24 #define OMAP_SDMA_REQUESTS 127
25 #define OMAP_SDMA_CHANNELS 32
27 struct omap_dma_config {
29 unsigned int rw_priority:1;
30 unsigned int needs_busy_check:1;
31 unsigned int may_lose_context:1;
32 unsigned int needs_lch_clear:1;
35 struct omap_dma_context {
43 struct dma_device ddev;
46 const struct omap_dma_reg *reg_map;
47 struct omap_system_dma_plat_info *plat;
48 const struct omap_dma_config *cfg;
49 struct notifier_block nb;
50 struct omap_dma_context context;
52 DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS);
53 struct mutex lch_lock; /* for assigning logical channels */
56 struct dma_pool *desc_pool;
57 unsigned dma_requests;
59 uint32_t irq_enable_mask;
60 struct omap_chan **lch_map;
64 struct virt_dma_chan vc;
65 void __iomem *channel_base;
66 const struct omap_dma_reg *reg_map;
69 struct dma_slave_config cfg;
76 struct omap_desc *desc;
80 #define DESC_NXT_SV_REFRESH (0x1 << 24)
81 #define DESC_NXT_SV_REUSE (0x2 << 24)
82 #define DESC_NXT_DV_REFRESH (0x1 << 26)
83 #define DESC_NXT_DV_REUSE (0x2 << 26)
84 #define DESC_NTYPE_TYPE2 (0x2 << 29)
86 /* Type 2 descriptor with Source or Destination address update */
87 struct omap_type2_desc {
90 uint32_t addr; /* src or dst */
101 uint32_t en; /* number of elements (24-bit) */
102 uint32_t fn; /* number of frames (16-bit) */
103 int32_t fi; /* for double indexing */
104 int16_t ei; /* for double indexing */
107 struct omap_type2_desc *t2_desc;
108 dma_addr_t t2_desc_paddr;
112 struct virt_dma_desc vd;
114 enum dma_transfer_direction dir;
118 int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
119 int16_t ei; /* for double indexing */
120 uint8_t es; /* CSDP_DATA_TYPE_xxx */
121 uint32_t ccr; /* CCR value */
122 uint16_t clnk_ctrl; /* CLNK_CTRL value */
123 uint16_t cicr; /* CICR value */
124 uint32_t csdp; /* CSDP value */
131 CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */
132 CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */
135 CCR_READ_PRIORITY = BIT(6),
137 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
138 CCR_REPEAT = BIT(9), /* OMAP1 only */
139 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
140 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
141 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
142 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
143 CCR_SRC_AMODE_CONSTANT = 0 << 12,
144 CCR_SRC_AMODE_POSTINC = 1 << 12,
145 CCR_SRC_AMODE_SGLIDX = 2 << 12,
146 CCR_SRC_AMODE_DBLIDX = 3 << 12,
147 CCR_DST_AMODE_CONSTANT = 0 << 14,
148 CCR_DST_AMODE_POSTINC = 1 << 14,
149 CCR_DST_AMODE_SGLIDX = 2 << 14,
150 CCR_DST_AMODE_DBLIDX = 3 << 14,
151 CCR_CONSTANT_FILL = BIT(16),
152 CCR_TRANSPARENT_COPY = BIT(17),
154 CCR_SUPERVISOR = BIT(22),
155 CCR_PREFETCH = BIT(23),
156 CCR_TRIGGER_SRC = BIT(24),
157 CCR_BUFFERING_DISABLE = BIT(25),
158 CCR_WRITE_PRIORITY = BIT(26),
159 CCR_SYNC_ELEMENT = 0,
160 CCR_SYNC_FRAME = CCR_FS,
161 CCR_SYNC_BLOCK = CCR_BS,
162 CCR_SYNC_PACKET = CCR_BS | CCR_FS,
164 CSDP_DATA_TYPE_8 = 0,
165 CSDP_DATA_TYPE_16 = 1,
166 CSDP_DATA_TYPE_32 = 2,
167 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
168 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
169 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
170 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
171 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
172 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
173 CSDP_SRC_PACKED = BIT(6),
174 CSDP_SRC_BURST_1 = 0 << 7,
175 CSDP_SRC_BURST_16 = 1 << 7,
176 CSDP_SRC_BURST_32 = 2 << 7,
177 CSDP_SRC_BURST_64 = 3 << 7,
178 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
179 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
180 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
181 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
182 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
183 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
184 CSDP_DST_PACKED = BIT(13),
185 CSDP_DST_BURST_1 = 0 << 14,
186 CSDP_DST_BURST_16 = 1 << 14,
187 CSDP_DST_BURST_32 = 2 << 14,
188 CSDP_DST_BURST_64 = 3 << 14,
189 CSDP_WRITE_NON_POSTED = 0 << 16,
190 CSDP_WRITE_POSTED = 1 << 16,
191 CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
193 CICR_TOUT_IE = BIT(0), /* OMAP1 only */
194 CICR_DROP_IE = BIT(1),
195 CICR_HALF_IE = BIT(2),
196 CICR_FRAME_IE = BIT(3),
197 CICR_LAST_IE = BIT(4),
198 CICR_BLOCK_IE = BIT(5),
199 CICR_PKT_IE = BIT(7), /* OMAP2+ only */
200 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
201 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
202 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
203 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
204 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
206 CLNK_CTRL_ENABLE_LNK = BIT(15),
208 CDP_DST_VALID_INC = 0 << 0,
209 CDP_DST_VALID_RELOAD = 1 << 0,
210 CDP_DST_VALID_REUSE = 2 << 0,
211 CDP_SRC_VALID_INC = 0 << 2,
212 CDP_SRC_VALID_RELOAD = 1 << 2,
213 CDP_SRC_VALID_REUSE = 2 << 2,
214 CDP_NTYPE_TYPE1 = 1 << 4,
215 CDP_NTYPE_TYPE2 = 2 << 4,
216 CDP_NTYPE_TYPE3 = 3 << 4,
217 CDP_TMODE_NORMAL = 0 << 8,
218 CDP_TMODE_LLIST = 1 << 8,
222 static const unsigned es_bytes[] = {
223 [CSDP_DATA_TYPE_8] = 1,
224 [CSDP_DATA_TYPE_16] = 2,
225 [CSDP_DATA_TYPE_32] = 4,
228 static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
229 static struct of_dma_filter_info omap_dma_info = {
230 .filter_fn = omap_dma_filter_fn,
233 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
235 return container_of(d, struct omap_dmadev, ddev);
238 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
240 return container_of(c, struct omap_chan, vc.chan);
243 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
245 return container_of(t, struct omap_desc, vd.tx);
248 static void omap_dma_desc_free(struct virt_dma_desc *vd)
250 struct omap_desc *d = to_omap_dma_desc(&vd->tx);
253 struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
256 for (i = 0; i < d->sglen; i++) {
257 if (d->sg[i].t2_desc)
258 dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
259 d->sg[i].t2_desc_paddr);
266 static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
267 enum dma_transfer_direction dir, bool last)
269 struct omap_sg *sg = &d->sg[idx];
270 struct omap_type2_desc *t2_desc = sg->t2_desc;
273 d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
275 t2_desc->next_desc = 0xfffffffc;
277 t2_desc->en = sg->en;
278 t2_desc->addr = sg->addr;
279 t2_desc->fn = sg->fn & 0xffff;
280 t2_desc->cicr = d->cicr;
282 t2_desc->cicr &= ~CICR_BLOCK_IE;
286 t2_desc->cdei = sg->ei;
287 t2_desc->csei = d->ei;
288 t2_desc->cdfi = sg->fi;
289 t2_desc->csfi = d->fi;
291 t2_desc->en |= DESC_NXT_DV_REFRESH;
292 t2_desc->en |= DESC_NXT_SV_REUSE;
295 t2_desc->cdei = d->ei;
296 t2_desc->csei = sg->ei;
297 t2_desc->cdfi = d->fi;
298 t2_desc->csfi = sg->fi;
300 t2_desc->en |= DESC_NXT_SV_REFRESH;
301 t2_desc->en |= DESC_NXT_DV_REUSE;
307 t2_desc->en |= DESC_NTYPE_TYPE2;
310 static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
313 case OMAP_DMA_REG_16BIT:
314 writew_relaxed(val, addr);
316 case OMAP_DMA_REG_2X16BIT:
317 writew_relaxed(val, addr);
318 writew_relaxed(val >> 16, addr + 2);
320 case OMAP_DMA_REG_32BIT:
321 writel_relaxed(val, addr);
328 static unsigned omap_dma_read(unsigned type, void __iomem *addr)
333 case OMAP_DMA_REG_16BIT:
334 val = readw_relaxed(addr);
336 case OMAP_DMA_REG_2X16BIT:
337 val = readw_relaxed(addr);
338 val |= readw_relaxed(addr + 2) << 16;
340 case OMAP_DMA_REG_32BIT:
341 val = readl_relaxed(addr);
351 static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
353 const struct omap_dma_reg *r = od->reg_map + reg;
357 omap_dma_write(val, r->type, od->base + r->offset);
360 static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
362 const struct omap_dma_reg *r = od->reg_map + reg;
366 return omap_dma_read(r->type, od->base + r->offset);
369 static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
371 const struct omap_dma_reg *r = c->reg_map + reg;
373 omap_dma_write(val, r->type, c->channel_base + r->offset);
376 static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
378 const struct omap_dma_reg *r = c->reg_map + reg;
380 return omap_dma_read(r->type, c->channel_base + r->offset);
383 static void omap_dma_clear_csr(struct omap_chan *c)
386 omap_dma_chan_read(c, CSR);
388 omap_dma_chan_write(c, CSR, ~0);
391 static unsigned omap_dma_get_csr(struct omap_chan *c)
393 unsigned val = omap_dma_chan_read(c, CSR);
396 omap_dma_chan_write(c, CSR, val);
401 static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
406 c = od->lch_map[lch];
410 for (i = CSDP; i <= od->cfg->lch_end; i++)
411 omap_dma_chan_write(c, i, 0);
414 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
417 c->channel_base = od->base + od->plat->channel_stride * lch;
419 od->lch_map[lch] = c;
422 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
424 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
425 uint16_t cicr = d->cicr;
427 if (__dma_omap15xx(od->plat->dma_attr))
428 omap_dma_chan_write(c, CPC, 0);
430 omap_dma_chan_write(c, CDAC, 0);
432 omap_dma_clear_csr(c);
435 uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
437 if (d->dir == DMA_DEV_TO_MEM)
438 cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
440 cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
441 omap_dma_chan_write(c, CDP, cdp);
443 omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
444 omap_dma_chan_write(c, CCDN, 0);
445 omap_dma_chan_write(c, CCFN, 0xffff);
446 omap_dma_chan_write(c, CCEN, 0xffffff);
448 cicr &= ~CICR_BLOCK_IE;
449 } else if (od->ll123_supported) {
450 omap_dma_chan_write(c, CDP, 0);
453 /* Enable interrupts */
454 omap_dma_chan_write(c, CICR, cicr);
457 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
462 static void omap_dma_drain_chan(struct omap_chan *c)
467 /* Wait for sDMA FIFO to drain */
469 val = omap_dma_chan_read(c, CCR);
470 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
479 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
480 dev_err(c->vc.chan.device->dev,
481 "DMA drain did not complete on lch %d\n",
485 static int omap_dma_stop(struct omap_chan *c)
487 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
491 omap_dma_chan_write(c, CICR, 0);
493 omap_dma_clear_csr(c);
495 val = omap_dma_chan_read(c, CCR);
496 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
499 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
500 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
501 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
502 omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
504 val = omap_dma_chan_read(c, CCR);
506 omap_dma_chan_write(c, CCR, val);
508 if (!(c->ccr & CCR_BUFFERING_DISABLE))
509 omap_dma_drain_chan(c);
511 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
513 if (!(val & CCR_ENABLE))
517 omap_dma_chan_write(c, CCR, val);
519 if (!(c->ccr & CCR_BUFFERING_DISABLE))
520 omap_dma_drain_chan(c);
525 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
526 val = omap_dma_chan_read(c, CLNK_CTRL);
529 val |= 1 << 14; /* set the STOP_LNK bit */
531 val &= ~CLNK_CTRL_ENABLE_LNK;
533 omap_dma_chan_write(c, CLNK_CTRL, val);
539 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
541 struct omap_sg *sg = d->sg + c->sgidx;
542 unsigned cxsa, cxei, cxfi;
544 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
554 omap_dma_chan_write(c, cxsa, sg->addr);
555 omap_dma_chan_write(c, cxei, sg->ei);
556 omap_dma_chan_write(c, cxfi, sg->fi);
557 omap_dma_chan_write(c, CEN, sg->en);
558 omap_dma_chan_write(c, CFN, sg->fn);
560 omap_dma_start(c, d);
564 static void omap_dma_start_desc(struct omap_chan *c)
566 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
568 unsigned cxsa, cxei, cxfi;
577 c->desc = d = to_omap_dma_desc(&vd->tx);
581 * This provides the necessary barrier to ensure data held in
582 * DMA coherent memory is visible to the DMA engine prior to
583 * the transfer starting.
587 omap_dma_chan_write(c, CCR, d->ccr);
589 omap_dma_chan_write(c, CCR2, d->ccr >> 16);
591 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
601 omap_dma_chan_write(c, cxsa, d->dev_addr);
602 omap_dma_chan_write(c, cxei, d->ei);
603 omap_dma_chan_write(c, cxfi, d->fi);
604 omap_dma_chan_write(c, CSDP, d->csdp);
605 omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
607 omap_dma_start_sg(c, d);
610 static void omap_dma_callback(int ch, u16 status, void *data)
612 struct omap_chan *c = data;
616 spin_lock_irqsave(&c->vc.lock, flags);
620 vchan_cyclic_callback(&d->vd);
621 } else if (d->using_ll || c->sgidx == d->sglen) {
622 omap_dma_start_desc(c);
623 vchan_cookie_complete(&d->vd);
625 omap_dma_start_sg(c, d);
628 spin_unlock_irqrestore(&c->vc.lock, flags);
631 static irqreturn_t omap_dma_irq(int irq, void *devid)
633 struct omap_dmadev *od = devid;
634 unsigned status, channel;
636 spin_lock(&od->irq_lock);
638 status = omap_dma_glbl_read(od, IRQSTATUS_L1);
639 status &= od->irq_enable_mask;
641 spin_unlock(&od->irq_lock);
645 while ((channel = ffs(status)) != 0) {
653 c = od->lch_map[channel];
655 /* This should never happen */
656 dev_err(od->ddev.dev, "invalid channel %u\n", channel);
660 csr = omap_dma_get_csr(c);
661 omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
663 omap_dma_callback(channel, csr, c);
666 spin_unlock(&od->irq_lock);
671 static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
675 mutex_lock(&od->lch_lock);
676 channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
677 if (channel >= od->lch_count)
679 set_bit(channel, od->lch_bitmap);
680 mutex_unlock(&od->lch_lock);
682 omap_dma_clear_lch(od, channel);
688 mutex_unlock(&od->lch_lock);
694 static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
696 omap_dma_clear_lch(od, lch);
697 mutex_lock(&od->lch_lock);
698 clear_bit(lch, od->lch_bitmap);
699 mutex_unlock(&od->lch_lock);
702 static inline bool omap_dma_legacy(struct omap_dmadev *od)
704 return IS_ENABLED(CONFIG_ARCH_OMAP1) && od->legacy;
707 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
709 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
710 struct omap_chan *c = to_omap_dma_chan(chan);
711 struct device *dev = od->ddev.dev;
714 if (omap_dma_legacy(od)) {
715 ret = omap_request_dma(c->dma_sig, "DMA engine",
716 omap_dma_callback, c, &c->dma_ch);
718 ret = omap_dma_get_lch(od, &c->dma_ch);
721 dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
724 omap_dma_assign(od, c, c->dma_ch);
726 if (!omap_dma_legacy(od)) {
729 spin_lock_irq(&od->irq_lock);
730 val = BIT(c->dma_ch);
731 omap_dma_glbl_write(od, IRQSTATUS_L1, val);
732 od->irq_enable_mask |= val;
733 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
735 val = omap_dma_glbl_read(od, IRQENABLE_L0);
736 val &= ~BIT(c->dma_ch);
737 omap_dma_glbl_write(od, IRQENABLE_L0, val);
738 spin_unlock_irq(&od->irq_lock);
743 if (__dma_omap16xx(od->plat->dma_attr)) {
744 c->ccr = CCR_OMAP31_DISABLE;
745 /* Duplicate what plat-omap/dma.c does */
746 c->ccr |= c->dma_ch + 1;
748 c->ccr = c->dma_sig & 0x1f;
751 c->ccr = c->dma_sig & 0x1f;
752 c->ccr |= (c->dma_sig & ~0x1f) << 14;
754 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
755 c->ccr |= CCR_BUFFERING_DISABLE;
760 static void omap_dma_free_chan_resources(struct dma_chan *chan)
762 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
763 struct omap_chan *c = to_omap_dma_chan(chan);
765 if (!omap_dma_legacy(od)) {
766 spin_lock_irq(&od->irq_lock);
767 od->irq_enable_mask &= ~BIT(c->dma_ch);
768 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
769 spin_unlock_irq(&od->irq_lock);
772 c->channel_base = NULL;
773 od->lch_map[c->dma_ch] = NULL;
774 vchan_free_chan_resources(&c->vc);
776 if (omap_dma_legacy(od))
777 omap_free_dma(c->dma_ch);
779 omap_dma_put_lch(od, c->dma_ch);
781 dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
786 static size_t omap_dma_sg_size(struct omap_sg *sg)
788 return sg->en * sg->fn;
791 static size_t omap_dma_desc_size(struct omap_desc *d)
796 for (size = i = 0; i < d->sglen; i++)
797 size += omap_dma_sg_size(&d->sg[i]);
799 return size * es_bytes[d->es];
802 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
805 size_t size, es_size = es_bytes[d->es];
807 for (size = i = 0; i < d->sglen; i++) {
808 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
812 else if (addr >= d->sg[i].addr &&
813 addr < d->sg[i].addr + this_size)
814 size += d->sg[i].addr + this_size - addr;
820 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
821 * read before the DMA controller finished disabling the channel.
823 static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
825 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
828 val = omap_dma_chan_read(c, reg);
829 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
830 val = omap_dma_chan_read(c, reg);
835 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
837 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
838 dma_addr_t addr, cdac;
840 if (__dma_omap15xx(od->plat->dma_attr)) {
841 addr = omap_dma_chan_read(c, CPC);
843 addr = omap_dma_chan_read_3_3(c, CSAC);
844 cdac = omap_dma_chan_read_3_3(c, CDAC);
847 * CDAC == 0 indicates that the DMA transfer on the channel has
848 * not been started (no data has been transferred so far).
849 * Return the programmed source start address in this case.
852 addr = omap_dma_chan_read(c, CSSA);
856 addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
861 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
863 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
866 if (__dma_omap15xx(od->plat->dma_attr)) {
867 addr = omap_dma_chan_read(c, CPC);
869 addr = omap_dma_chan_read_3_3(c, CDAC);
872 * CDAC == 0 indicates that the DMA transfer on the channel
873 * has not been started (no data has been transferred so
874 * far). Return the programmed destination start address in
878 addr = omap_dma_chan_read(c, CDSA);
882 addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
887 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
888 dma_cookie_t cookie, struct dma_tx_state *txstate)
890 struct omap_chan *c = to_omap_dma_chan(chan);
893 struct omap_desc *d = NULL;
895 ret = dma_cookie_status(chan, cookie, txstate);
896 if (ret == DMA_COMPLETE)
899 spin_lock_irqsave(&c->vc.lock, flags);
900 if (c->desc && c->desc->vd.tx.cookie == cookie)
909 if (d->dir == DMA_MEM_TO_DEV)
910 pos = omap_dma_get_src_pos(c);
911 else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM)
912 pos = omap_dma_get_dst_pos(c);
916 txstate->residue = omap_dma_desc_size_pos(d, pos);
918 struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
921 txstate->residue = omap_dma_desc_size(
922 to_omap_dma_desc(&vd->tx));
924 txstate->residue = 0;
928 if (ret == DMA_IN_PROGRESS && c->paused) {
930 } else if (d && d->polled && c->running) {
931 uint32_t ccr = omap_dma_chan_read(c, CCR);
933 * The channel is no longer active, set the return value
934 * accordingly and mark it as completed
936 if (!(ccr & CCR_ENABLE)) {
938 omap_dma_start_desc(c);
939 vchan_cookie_complete(&d->vd);
943 spin_unlock_irqrestore(&c->vc.lock, flags);
948 static void omap_dma_issue_pending(struct dma_chan *chan)
950 struct omap_chan *c = to_omap_dma_chan(chan);
953 spin_lock_irqsave(&c->vc.lock, flags);
954 if (vchan_issue_pending(&c->vc) && !c->desc)
955 omap_dma_start_desc(c);
956 spin_unlock_irqrestore(&c->vc.lock, flags);
959 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
960 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
961 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
963 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
964 struct omap_chan *c = to_omap_dma_chan(chan);
965 enum dma_slave_buswidth dev_width;
966 struct scatterlist *sgent;
969 unsigned i, es, en, frame_bytes;
970 bool ll_failed = false;
972 u32 port_window, port_window_bytes;
974 if (dir == DMA_DEV_TO_MEM) {
975 dev_addr = c->cfg.src_addr;
976 dev_width = c->cfg.src_addr_width;
977 burst = c->cfg.src_maxburst;
978 port_window = c->cfg.src_port_window_size;
979 } else if (dir == DMA_MEM_TO_DEV) {
980 dev_addr = c->cfg.dst_addr;
981 dev_width = c->cfg.dst_addr_width;
982 burst = c->cfg.dst_maxburst;
983 port_window = c->cfg.dst_port_window_size;
985 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
989 /* Bus width translates to the element size (ES) */
991 case DMA_SLAVE_BUSWIDTH_1_BYTE:
992 es = CSDP_DATA_TYPE_8;
994 case DMA_SLAVE_BUSWIDTH_2_BYTES:
995 es = CSDP_DATA_TYPE_16;
997 case DMA_SLAVE_BUSWIDTH_4_BYTES:
998 es = CSDP_DATA_TYPE_32;
1000 default: /* not reached */
1004 /* Now allocate and setup the descriptor. */
1005 d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
1010 d->dev_addr = dev_addr;
1013 /* When the port_window is used, one frame must cover the window */
1015 burst = port_window;
1016 port_window_bytes = port_window * es_bytes[es];
1020 * One frame covers the port_window and by configure
1021 * the source frame index to be -1 * (port_window - 1)
1022 * we instruct the sDMA that after a frame is processed
1023 * it should move back to the start of the window.
1025 d->fi = -(port_window_bytes - 1);
1028 d->ccr = c->ccr | CCR_SYNC_FRAME;
1029 if (dir == DMA_DEV_TO_MEM) {
1030 d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
1032 d->ccr |= CCR_DST_AMODE_POSTINC;
1034 d->ccr |= CCR_SRC_AMODE_DBLIDX;
1036 if (port_window_bytes >= 64)
1037 d->csdp |= CSDP_SRC_BURST_64;
1038 else if (port_window_bytes >= 32)
1039 d->csdp |= CSDP_SRC_BURST_32;
1040 else if (port_window_bytes >= 16)
1041 d->csdp |= CSDP_SRC_BURST_16;
1044 d->ccr |= CCR_SRC_AMODE_CONSTANT;
1047 d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
1049 d->ccr |= CCR_SRC_AMODE_POSTINC;
1051 d->ccr |= CCR_DST_AMODE_DBLIDX;
1053 if (port_window_bytes >= 64)
1054 d->csdp |= CSDP_DST_BURST_64;
1055 else if (port_window_bytes >= 32)
1056 d->csdp |= CSDP_DST_BURST_32;
1057 else if (port_window_bytes >= 16)
1058 d->csdp |= CSDP_DST_BURST_16;
1060 d->ccr |= CCR_DST_AMODE_CONSTANT;
1064 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
1068 d->cicr |= CICR_TOUT_IE;
1070 if (dir == DMA_DEV_TO_MEM)
1071 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
1073 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
1075 if (dir == DMA_DEV_TO_MEM)
1076 d->ccr |= CCR_TRIGGER_SRC;
1078 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1081 d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
1083 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
1084 d->clnk_ctrl = c->dma_ch;
1087 * Build our scatterlist entries: each contains the address,
1088 * the number of elements (EN) in each frame, and the number of
1089 * frames (FN). Number of bytes for this entry = ES * EN * FN.
1091 * Burst size translates to number of elements with frame sync.
1092 * Note: DMA engine defines burst to be the number of dev-width
1096 frame_bytes = es_bytes[es] * en;
1099 d->using_ll = od->ll123_supported;
1101 for_each_sg(sgl, sgent, sglen, i) {
1102 struct omap_sg *osg = &d->sg[i];
1104 osg->addr = sg_dma_address(sgent);
1106 osg->fn = sg_dma_len(sgent) / frame_bytes;
1109 osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
1110 &osg->t2_desc_paddr);
1111 if (!osg->t2_desc) {
1112 dev_err(chan->device->dev,
1113 "t2_desc[%d] allocation failed\n", i);
1115 d->using_ll = false;
1119 omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
1125 /* Release the dma_pool entries if one allocation failed */
1127 for (i = 0; i < d->sglen; i++) {
1128 struct omap_sg *osg = &d->sg[i];
1131 dma_pool_free(od->desc_pool, osg->t2_desc,
1132 osg->t2_desc_paddr);
1133 osg->t2_desc = NULL;
1138 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1141 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
1142 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1143 size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
1145 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1146 struct omap_chan *c = to_omap_dma_chan(chan);
1147 enum dma_slave_buswidth dev_width;
1148 struct omap_desc *d;
1149 dma_addr_t dev_addr;
1153 if (dir == DMA_DEV_TO_MEM) {
1154 dev_addr = c->cfg.src_addr;
1155 dev_width = c->cfg.src_addr_width;
1156 burst = c->cfg.src_maxburst;
1157 } else if (dir == DMA_MEM_TO_DEV) {
1158 dev_addr = c->cfg.dst_addr;
1159 dev_width = c->cfg.dst_addr_width;
1160 burst = c->cfg.dst_maxburst;
1162 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
1166 /* Bus width translates to the element size (ES) */
1167 switch (dev_width) {
1168 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1169 es = CSDP_DATA_TYPE_8;
1171 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1172 es = CSDP_DATA_TYPE_16;
1174 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1175 es = CSDP_DATA_TYPE_32;
1177 default: /* not reached */
1181 /* Now allocate and setup the descriptor. */
1182 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1187 d->dev_addr = dev_addr;
1190 d->sg[0].addr = buf_addr;
1191 d->sg[0].en = period_len / es_bytes[es];
1192 d->sg[0].fn = buf_len / period_len;
1196 if (dir == DMA_DEV_TO_MEM)
1197 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
1199 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
1201 d->cicr = CICR_DROP_IE;
1202 if (flags & DMA_PREP_INTERRUPT)
1203 d->cicr |= CICR_FRAME_IE;
1208 d->cicr |= CICR_TOUT_IE;
1210 if (dir == DMA_DEV_TO_MEM)
1211 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
1213 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
1216 d->ccr |= CCR_SYNC_PACKET;
1218 d->ccr |= CCR_SYNC_ELEMENT;
1220 if (dir == DMA_DEV_TO_MEM) {
1221 d->ccr |= CCR_TRIGGER_SRC;
1222 d->csdp |= CSDP_DST_PACKED;
1224 d->csdp |= CSDP_SRC_PACKED;
1227 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1229 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1232 if (__dma_omap15xx(od->plat->dma_attr))
1233 d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
1235 d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
1239 return vchan_tx_prep(&c->vc, &d->vd, flags);
1242 static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
1243 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1244 size_t len, unsigned long tx_flags)
1246 struct omap_chan *c = to_omap_dma_chan(chan);
1247 struct omap_desc *d;
1250 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1254 data_type = __ffs((src | dest | len));
1255 if (data_type > CSDP_DATA_TYPE_32)
1256 data_type = CSDP_DATA_TYPE_32;
1258 d->dir = DMA_MEM_TO_MEM;
1262 d->sg[0].en = len / BIT(data_type);
1264 d->sg[0].addr = dest;
1267 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
1269 if (tx_flags & DMA_PREP_INTERRUPT)
1270 d->cicr |= CICR_FRAME_IE;
1274 d->csdp = data_type;
1277 d->cicr |= CICR_TOUT_IE;
1278 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1280 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1281 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1282 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1285 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1288 static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1289 struct dma_chan *chan, struct dma_interleaved_template *xt,
1290 unsigned long flags)
1292 struct omap_chan *c = to_omap_dma_chan(chan);
1293 struct omap_desc *d;
1296 size_t src_icg, dst_icg;
1298 /* Slave mode is not supported */
1299 if (is_slave_direction(xt->dir))
1302 if (xt->frame_size != 1 || xt->numf == 0)
1305 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1309 data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
1310 if (data_type > CSDP_DATA_TYPE_32)
1311 data_type = CSDP_DATA_TYPE_32;
1314 d->dir = DMA_MEM_TO_MEM;
1315 d->dev_addr = xt->src_start;
1317 sg->en = xt->sgl[0].size / BIT(data_type);
1319 sg->addr = xt->dst_start;
1323 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1324 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1326 d->ccr |= CCR_SRC_AMODE_DBLIDX;
1328 d->fi = src_icg + 1;
1329 } else if (xt->src_inc) {
1330 d->ccr |= CCR_SRC_AMODE_POSTINC;
1333 dev_err(chan->device->dev,
1334 "%s: SRC constant addressing is not supported\n",
1341 d->ccr |= CCR_DST_AMODE_DBLIDX;
1343 sg->fi = dst_icg + 1;
1344 } else if (xt->dst_inc) {
1345 d->ccr |= CCR_DST_AMODE_POSTINC;
1348 dev_err(chan->device->dev,
1349 "%s: DST constant addressing is not supported\n",
1355 d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1357 d->csdp = data_type;
1360 d->cicr |= CICR_TOUT_IE;
1361 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1363 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1364 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1365 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1368 return vchan_tx_prep(&c->vc, &d->vd, flags);
1371 static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1373 struct omap_chan *c = to_omap_dma_chan(chan);
1375 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1376 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1379 if (cfg->src_maxburst > chan->device->max_burst ||
1380 cfg->dst_maxburst > chan->device->max_burst)
1383 memcpy(&c->cfg, cfg, sizeof(c->cfg));
1388 static int omap_dma_terminate_all(struct dma_chan *chan)
1390 struct omap_chan *c = to_omap_dma_chan(chan);
1391 unsigned long flags;
1394 spin_lock_irqsave(&c->vc.lock, flags);
1397 * Stop DMA activity: we assume the callback will not be called
1398 * after omap_dma_stop() returns (even if it does, it will see
1399 * c->desc is NULL and exit.)
1402 vchan_terminate_vdesc(&c->desc->vd);
1404 /* Avoid stopping the dma twice */
1412 vchan_get_all_descriptors(&c->vc, &head);
1413 spin_unlock_irqrestore(&c->vc.lock, flags);
1414 vchan_dma_desc_free_list(&c->vc, &head);
1419 static void omap_dma_synchronize(struct dma_chan *chan)
1421 struct omap_chan *c = to_omap_dma_chan(chan);
1423 vchan_synchronize(&c->vc);
1426 static int omap_dma_pause(struct dma_chan *chan)
1428 struct omap_chan *c = to_omap_dma_chan(chan);
1429 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1430 unsigned long flags;
1432 bool can_pause = false;
1434 spin_lock_irqsave(&od->irq_lock, flags);
1443 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1444 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1445 * "When a channel is disabled during a transfer, the channel undergoes
1446 * an abort, unless it is hardware-source-synchronized …".
1447 * A source-synchronised channel is one where the fetching of data is
1448 * under control of the device. In other words, a device-to-memory
1449 * transfer. So, a destination-synchronised channel (which would be a
1450 * memory-to-device transfer) undergoes an abort if the CCR_ENABLE
1452 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1453 * aborts immediately after completion of current read/write
1454 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1455 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1456 * are both clear _before_ disabling the channel, otherwise data loss
1458 * The problem is that if the channel is active, then device activity
1459 * can result in DMA activity starting between reading those as both
1460 * clear and the write to DMA_CCR to clear the enable bit hitting the
1461 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1462 * destination, then data loss "might" occur (say if we write to an UART
1463 * and the UART is not accepting any further data).
1465 else if (c->desc->dir == DMA_DEV_TO_MEM)
1468 if (can_pause && !c->paused) {
1469 ret = omap_dma_stop(c);
1474 spin_unlock_irqrestore(&od->irq_lock, flags);
1479 static int omap_dma_resume(struct dma_chan *chan)
1481 struct omap_chan *c = to_omap_dma_chan(chan);
1482 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1483 unsigned long flags;
1486 spin_lock_irqsave(&od->irq_lock, flags);
1488 if (c->paused && c->desc) {
1491 /* Restore channel link register */
1492 omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1494 omap_dma_start(c, c->desc);
1498 spin_unlock_irqrestore(&od->irq_lock, flags);
1503 static int omap_dma_chan_init(struct omap_dmadev *od)
1505 struct omap_chan *c;
1507 c = kzalloc(sizeof(*c), GFP_KERNEL);
1511 c->reg_map = od->reg_map;
1512 c->vc.desc_free = omap_dma_desc_free;
1513 vchan_init(&c->vc, &od->ddev);
1518 static void omap_dma_free(struct omap_dmadev *od)
1520 while (!list_empty(&od->ddev.channels)) {
1521 struct omap_chan *c = list_first_entry(&od->ddev.channels,
1522 struct omap_chan, vc.chan.device_node);
1524 list_del(&c->vc.chan.device_node);
1525 tasklet_kill(&c->vc.task);
1530 /* Currently used by omap2 & 3 to block deeper SoC idle states */
1531 static bool omap_dma_busy(struct omap_dmadev *od)
1533 struct omap_chan *c;
1537 lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
1538 if (lch >= od->lch_count)
1540 c = od->lch_map[lch];
1543 if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
1550 /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
1551 static int omap_dma_busy_notifier(struct notifier_block *nb,
1552 unsigned long cmd, void *v)
1554 struct omap_dmadev *od;
1556 od = container_of(nb, struct omap_dmadev, nb);
1559 case CPU_CLUSTER_PM_ENTER:
1560 if (omap_dma_busy(od))
1563 case CPU_CLUSTER_PM_ENTER_FAILED:
1564 case CPU_CLUSTER_PM_EXIT:
1572 * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
1573 * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
1574 * now. Context save seems to be only currently needed on omap3.
1576 static void omap_dma_context_save(struct omap_dmadev *od)
1578 od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
1579 od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
1580 od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
1581 od->context.gcr = omap_dma_glbl_read(od, GCR);
1584 static void omap_dma_context_restore(struct omap_dmadev *od)
1588 omap_dma_glbl_write(od, GCR, od->context.gcr);
1589 omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
1590 omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
1591 omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
1593 /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
1594 if (od->plat->errata & DMA_ROMCODE_BUG)
1595 omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
1597 /* Clear dma channels */
1598 for (i = 0; i < od->lch_count; i++)
1599 omap_dma_clear_lch(od, i);
1602 /* Currently only used for omap3 */
1603 static int omap_dma_context_notifier(struct notifier_block *nb,
1604 unsigned long cmd, void *v)
1606 struct omap_dmadev *od;
1608 od = container_of(nb, struct omap_dmadev, nb);
1611 case CPU_CLUSTER_PM_ENTER:
1612 if (omap_dma_busy(od))
1614 omap_dma_context_save(od);
1616 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
1618 case CPU_CLUSTER_PM_EXIT:
1619 omap_dma_context_restore(od);
1626 static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
1627 int max_fifo_depth, int tparams)
1631 /* Set only for omap2430 and later */
1632 if (!od->cfg->rw_priority)
1635 if (max_fifo_depth == 0)
1640 val = 0xff & max_fifo_depth;
1641 val |= (0x3 & tparams) << 12;
1642 val |= (arb_rate & 0xff) << 16;
1644 omap_dma_glbl_write(od, GCR, val);
1647 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1648 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1649 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1652 * No flags currently set for default configuration as omap1 is still
1653 * using platform data.
1655 static const struct omap_dma_config default_cfg;
1657 static int omap_dma_probe(struct platform_device *pdev)
1659 const struct omap_dma_config *conf;
1660 struct omap_dmadev *od;
1664 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1668 od->base = devm_platform_ioremap_resource(pdev, 0);
1669 if (IS_ERR(od->base))
1670 return PTR_ERR(od->base);
1672 conf = of_device_get_match_data(&pdev->dev);
1675 od->plat = dev_get_platdata(&pdev->dev);
1677 dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
1680 } else if (IS_ENABLED(CONFIG_ARCH_OMAP1)) {
1681 od->cfg = &default_cfg;
1683 od->plat = omap_get_plat_info();
1685 return -EPROBE_DEFER;
1690 od->reg_map = od->plat->reg_map;
1692 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1693 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1694 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1695 dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
1696 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1697 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1698 od->ddev.device_tx_status = omap_dma_tx_status;
1699 od->ddev.device_issue_pending = omap_dma_issue_pending;
1700 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1701 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1702 od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
1703 od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
1704 od->ddev.device_config = omap_dma_slave_config;
1705 od->ddev.device_pause = omap_dma_pause;
1706 od->ddev.device_resume = omap_dma_resume;
1707 od->ddev.device_terminate_all = omap_dma_terminate_all;
1708 od->ddev.device_synchronize = omap_dma_synchronize;
1709 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1710 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1711 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1712 if (__dma_omap15xx(od->plat->dma_attr))
1713 od->ddev.residue_granularity =
1714 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1716 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1717 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
1718 od->ddev.dev = &pdev->dev;
1719 INIT_LIST_HEAD(&od->ddev.channels);
1720 mutex_init(&od->lch_lock);
1721 spin_lock_init(&od->lock);
1722 spin_lock_init(&od->irq_lock);
1724 /* Number of DMA requests */
1725 od->dma_requests = OMAP_SDMA_REQUESTS;
1726 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1728 &od->dma_requests)) {
1729 dev_info(&pdev->dev,
1730 "Missing dma-requests property, using %u.\n",
1731 OMAP_SDMA_REQUESTS);
1734 /* Number of available logical channels */
1735 if (!pdev->dev.of_node) {
1736 od->lch_count = od->plat->dma_attr->lch_count;
1737 if (unlikely(!od->lch_count))
1738 od->lch_count = OMAP_SDMA_CHANNELS;
1739 } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
1741 dev_info(&pdev->dev,
1742 "Missing dma-channels property, using %u.\n",
1743 OMAP_SDMA_CHANNELS);
1744 od->lch_count = OMAP_SDMA_CHANNELS;
1747 /* Mask of allowed logical channels */
1748 if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
1751 /* Tag channels not in mask as reserved */
1753 bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
1755 if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
1756 bitmap_set(od->lch_bitmap, 0, 2);
1758 od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
1759 sizeof(*od->lch_map),
1764 for (i = 0; i < od->dma_requests; i++) {
1765 rc = omap_dma_chan_init(od);
1772 irq = platform_get_irq(pdev, 1);
1774 dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1777 /* Disable all interrupts */
1778 od->irq_enable_mask = 0;
1779 omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1781 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1782 IRQF_SHARED, "omap-dma-engine", od);
1789 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1790 od->ll123_supported = true;
1792 od->ddev.filter.map = od->plat->slave_map;
1793 od->ddev.filter.mapcnt = od->plat->slavecnt;
1794 od->ddev.filter.fn = omap_dma_filter_fn;
1796 if (od->ll123_supported) {
1797 od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
1799 sizeof(struct omap_type2_desc),
1801 if (!od->desc_pool) {
1803 "unable to allocate descriptor pool\n");
1804 od->ll123_supported = false;
1808 rc = dma_async_device_register(&od->ddev);
1810 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1816 platform_set_drvdata(pdev, od);
1818 if (pdev->dev.of_node) {
1819 omap_dma_info.dma_cap = od->ddev.cap_mask;
1821 /* Device-tree DMA controller registration */
1822 rc = of_dma_controller_register(pdev->dev.of_node,
1823 of_dma_simple_xlate, &omap_dma_info);
1825 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1826 dma_async_device_unregister(&od->ddev);
1831 omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
1833 if (od->cfg->needs_busy_check) {
1834 od->nb.notifier_call = omap_dma_busy_notifier;
1835 cpu_pm_register_notifier(&od->nb);
1836 } else if (od->cfg->may_lose_context) {
1837 od->nb.notifier_call = omap_dma_context_notifier;
1838 cpu_pm_register_notifier(&od->nb);
1841 dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
1842 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
1847 static int omap_dma_remove(struct platform_device *pdev)
1849 struct omap_dmadev *od = platform_get_drvdata(pdev);
1852 if (od->cfg->may_lose_context)
1853 cpu_pm_unregister_notifier(&od->nb);
1855 if (pdev->dev.of_node)
1856 of_dma_controller_free(pdev->dev.of_node);
1858 irq = platform_get_irq(pdev, 1);
1859 devm_free_irq(&pdev->dev, irq, od);
1861 dma_async_device_unregister(&od->ddev);
1863 if (!omap_dma_legacy(od)) {
1864 /* Disable all interrupts */
1865 omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1868 if (od->ll123_supported)
1869 dma_pool_destroy(od->desc_pool);
1876 static const struct omap_dma_config omap2420_data = {
1878 .rw_priority = true,
1879 .needs_lch_clear = true,
1880 .needs_busy_check = true,
1883 static const struct omap_dma_config omap2430_data = {
1885 .rw_priority = true,
1886 .needs_lch_clear = true,
1889 static const struct omap_dma_config omap3430_data = {
1891 .rw_priority = true,
1892 .needs_lch_clear = true,
1893 .may_lose_context = true,
1896 static const struct omap_dma_config omap3630_data = {
1898 .rw_priority = true,
1899 .needs_lch_clear = true,
1900 .may_lose_context = true,
1903 static const struct omap_dma_config omap4_data = {
1905 .rw_priority = true,
1906 .needs_lch_clear = true,
1909 static const struct of_device_id omap_dma_match[] = {
1910 { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, },
1911 { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, },
1912 { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, },
1913 { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, },
1914 { .compatible = "ti,omap4430-sdma", .data = &omap4_data, },
1917 MODULE_DEVICE_TABLE(of, omap_dma_match);
1919 static struct platform_driver omap_dma_driver = {
1920 .probe = omap_dma_probe,
1921 .remove = omap_dma_remove,
1923 .name = "omap-dma-engine",
1924 .of_match_table = omap_dma_match,
1928 static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1930 if (chan->device->dev->driver == &omap_dma_driver.driver) {
1931 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1932 struct omap_chan *c = to_omap_dma_chan(chan);
1933 unsigned req = *(unsigned *)param;
1935 if (req <= od->dma_requests) {
1943 static int omap_dma_init(void)
1945 return platform_driver_register(&omap_dma_driver);
1947 subsys_initcall(omap_dma_init);
1949 static void __exit omap_dma_exit(void)
1951 platform_driver_unregister(&omap_dma_driver);
1953 module_exit(omap_dma_exit);
1955 MODULE_AUTHOR("Russell King");
1956 MODULE_LICENSE("GPL");