1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
5 * Synopsys DesignWare AXI DMA Controller driver.
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/property.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
31 #include "dw-axi-dmac.h"
32 #include "../dmaengine.h"
33 #include "../virt-dma.h"
36 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
37 * master data bus width up to 512 bits (for both AXI master interfaces), but
38 * it depends on IP block configuration.
40 #define AXI_DMA_BUSWIDTHS \
41 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
42 DMA_SLAVE_BUSWIDTH_2_BYTES | \
43 DMA_SLAVE_BUSWIDTH_4_BYTES | \
44 DMA_SLAVE_BUSWIDTH_8_BYTES | \
45 DMA_SLAVE_BUSWIDTH_16_BYTES | \
46 DMA_SLAVE_BUSWIDTH_32_BYTES | \
47 DMA_SLAVE_BUSWIDTH_64_BYTES)
50 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
52 iowrite32(val, chip->regs + reg);
55 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
57 return ioread32(chip->regs + reg);
61 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
63 iowrite32(val, chan->chan_regs + reg);
66 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
68 return ioread32(chan->chan_regs + reg);
72 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
75 * We split one 64 bit write for two 32 bit write as some HW doesn't
76 * support 64 bit access.
78 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
79 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
82 static inline void axi_chan_config_write(struct axi_dma_chan *chan,
83 struct axi_dma_chan_config *config)
87 cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
88 config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
89 if (chan->chip->dw->hdata->reg_map_8_channels) {
90 cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
91 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
92 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
93 config->src_per << CH_CFG_H_SRC_PER_POS |
94 config->dst_per << CH_CFG_H_DST_PER_POS |
95 config->prior << CH_CFG_H_PRIORITY_POS;
97 cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
98 config->dst_per << CH_CFG2_L_DST_PER_POS;
99 cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
100 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
101 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
102 config->prior << CH_CFG2_H_PRIORITY_POS;
104 axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
105 axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
108 static inline void axi_dma_disable(struct axi_dma_chip *chip)
112 val = axi_dma_ioread32(chip, DMAC_CFG);
113 val &= ~DMAC_EN_MASK;
114 axi_dma_iowrite32(chip, DMAC_CFG, val);
117 static inline void axi_dma_enable(struct axi_dma_chip *chip)
121 val = axi_dma_ioread32(chip, DMAC_CFG);
123 axi_dma_iowrite32(chip, DMAC_CFG, val);
126 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
130 val = axi_dma_ioread32(chip, DMAC_CFG);
132 axi_dma_iowrite32(chip, DMAC_CFG, val);
135 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
139 val = axi_dma_ioread32(chip, DMAC_CFG);
141 axi_dma_iowrite32(chip, DMAC_CFG, val);
144 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
148 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
149 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
151 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
153 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
157 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
159 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
162 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
164 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
167 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
169 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
172 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
174 return axi_chan_ioread32(chan, CH_INTSTATUS);
177 static inline void axi_chan_disable(struct axi_dma_chan *chan)
181 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
182 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
183 if (chan->chip->dw->hdata->reg_map_8_channels)
184 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
186 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
187 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
190 static inline void axi_chan_enable(struct axi_dma_chan *chan)
194 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
195 if (chan->chip->dw->hdata->reg_map_8_channels)
196 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
197 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
199 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
200 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
201 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
204 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
208 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
210 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
213 static void axi_dma_hw_init(struct axi_dma_chip *chip)
218 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
219 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
220 axi_chan_disable(&chip->dw->chan[i]);
222 ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
224 dev_warn(chip->dev, "Unable to set coherent mask\n");
227 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
228 dma_addr_t dst, size_t len)
230 u32 max_width = chan->chip->dw->hdata->m_data_width;
232 return __ffs(src | dst | len | BIT(max_width));
235 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
237 return dma_chan_name(&chan->vc.chan);
240 static struct axi_dma_desc *axi_desc_alloc(u32 num)
242 struct axi_dma_desc *desc;
244 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
248 desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
249 if (!desc->hw_desc) {
257 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
260 struct axi_dma_lli *lli;
263 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
264 if (unlikely(!lli)) {
265 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
266 axi_chan_name(chan));
270 atomic_inc(&chan->descs_allocated);
276 static void axi_desc_put(struct axi_dma_desc *desc)
278 struct axi_dma_chan *chan = desc->chan;
279 int count = atomic_read(&chan->descs_allocated);
280 struct axi_dma_hw_desc *hw_desc;
283 for (descs_put = 0; descs_put < count; descs_put++) {
284 hw_desc = &desc->hw_desc[descs_put];
285 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
288 kfree(desc->hw_desc);
290 atomic_sub(descs_put, &chan->descs_allocated);
291 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
292 axi_chan_name(chan), descs_put,
293 atomic_read(&chan->descs_allocated));
296 static void vchan_desc_put(struct virt_dma_desc *vdesc)
298 axi_desc_put(vd_to_axi_desc(vdesc));
301 static enum dma_status
302 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
303 struct dma_tx_state *txstate)
305 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
306 struct virt_dma_desc *vdesc;
307 enum dma_status status;
308 u32 completed_length;
310 u32 completed_blocks;
315 status = dma_cookie_status(dchan, cookie, txstate);
316 if (status == DMA_COMPLETE || !txstate)
319 spin_lock_irqsave(&chan->vc.lock, flags);
321 vdesc = vchan_find_desc(&chan->vc, cookie);
323 length = vd_to_axi_desc(vdesc)->length;
324 completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
325 len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
326 completed_length = completed_blocks * len;
327 bytes = length - completed_length;
329 bytes = vd_to_axi_desc(vdesc)->length;
332 spin_unlock_irqrestore(&chan->vc.lock, flags);
333 dma_set_residue(txstate, bytes);
338 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
340 desc->lli->llp = cpu_to_le64(adr);
343 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
345 axi_chan_iowrite64(chan, CH_LLP, adr);
348 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
350 u32 offset = DMAC_APB_BYTE_WR_CH_EN;
353 if (!chan->chip->apb_regs) {
354 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
358 reg_width = __ffs(chan->config.dst_addr_width);
359 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
360 offset = DMAC_APB_HALFWORD_WR_CH_EN;
362 val = ioread32(chan->chip->apb_regs + offset);
365 val |= BIT(chan->id);
367 val &= ~BIT(chan->id);
369 iowrite32(val, chan->chip->apb_regs + offset);
371 /* Called in chan locked context */
372 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
373 struct axi_dma_desc *first)
375 u32 priority = chan->chip->dw->hdata->priority[chan->id];
376 struct axi_dma_chan_config config = {};
378 u8 lms = 0; /* Select AXI0 master for LLI fetching */
380 chan->is_err = false;
381 if (unlikely(axi_chan_is_hw_enable(chan))) {
382 dev_err(chan2dev(chan), "%s is non-idle!\n",
383 axi_chan_name(chan));
385 axi_chan_disable(chan);
389 axi_dma_enable(chan->chip);
391 config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
392 config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
393 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
394 config.prior = priority;
395 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
396 config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
397 switch (chan->direction) {
399 dw_axi_dma_set_byte_halfword(chan, true);
400 config.tt_fc = chan->config.device_fc ?
401 DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
402 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
403 if (chan->chip->apb_regs)
404 config.dst_per = chan->id;
406 config.dst_per = chan->hw_handshake_num;
409 config.tt_fc = chan->config.device_fc ?
410 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
411 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
412 if (chan->chip->apb_regs)
413 config.src_per = chan->id;
415 config.src_per = chan->hw_handshake_num;
420 axi_chan_config_write(chan, &config);
422 write_chan_llp(chan, first->hw_desc[0].llp | lms);
424 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
425 axi_chan_irq_sig_set(chan, irq_mask);
427 /* Generate 'suspend' status but don't generate interrupt */
428 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
429 axi_chan_irq_set(chan, irq_mask);
431 axi_chan_enable(chan);
434 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
436 struct axi_dma_desc *desc;
437 struct virt_dma_desc *vd;
439 vd = vchan_next_desc(&chan->vc);
443 desc = vd_to_axi_desc(vd);
444 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
446 axi_chan_block_xfer_start(chan, desc);
449 static void dma_chan_issue_pending(struct dma_chan *dchan)
451 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
454 spin_lock_irqsave(&chan->vc.lock, flags);
455 if (vchan_issue_pending(&chan->vc))
456 axi_chan_start_first_queued(chan);
457 spin_unlock_irqrestore(&chan->vc.lock, flags);
460 static void dw_axi_dma_synchronize(struct dma_chan *dchan)
462 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
464 vchan_synchronize(&chan->vc);
467 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
469 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
471 /* ASSERT: channel is idle */
472 if (axi_chan_is_hw_enable(chan)) {
473 dev_err(chan2dev(chan), "%s is non-idle!\n",
474 axi_chan_name(chan));
478 /* LLI address must be aligned to a 64-byte boundary */
479 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
481 sizeof(struct axi_dma_lli),
483 if (!chan->desc_pool) {
484 dev_err(chan2dev(chan), "No memory for descriptors\n");
487 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
489 pm_runtime_get(chan->chip->dev);
494 static void dma_chan_free_chan_resources(struct dma_chan *dchan)
496 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
498 /* ASSERT: channel is idle */
499 if (axi_chan_is_hw_enable(chan))
500 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
501 axi_chan_name(chan));
503 axi_chan_disable(chan);
504 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
506 vchan_free_chan_resources(&chan->vc);
508 dma_pool_destroy(chan->desc_pool);
509 chan->desc_pool = NULL;
510 dev_vdbg(dchan2dev(dchan),
511 "%s: free resources, descriptor still allocated: %u\n",
512 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
514 pm_runtime_put(chan->chip->dev);
517 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
519 struct axi_dma_chip *chip = chan->chip;
520 unsigned long reg_value, val;
522 if (!chip->apb_regs) {
523 dev_err(chip->dev, "apb_regs not initialized\n");
528 * An unused DMA channel has a default value of 0x3F.
529 * Lock the DMA channel by assign a handshake number to the channel.
530 * Unlock the DMA channel by assign 0x3F to the channel.
533 val = chan->hw_handshake_num;
535 val = UNUSED_CHANNEL;
537 reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
539 /* Channel is already allocated, set handshake as per channel ID */
540 /* 64 bit write should handle for 8 channels */
542 reg_value &= ~(DMA_APB_HS_SEL_MASK <<
543 (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
544 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
545 lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
551 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
552 * as 1, it understands that the current block is the final block in the
553 * transfer and completes the DMA transfer operation at the end of current
556 static void set_desc_last(struct axi_dma_hw_desc *desc)
560 val = le32_to_cpu(desc->lli->ctl_hi);
561 val |= CH_CTL_H_LLI_LAST;
562 desc->lli->ctl_hi = cpu_to_le32(val);
565 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
567 desc->lli->sar = cpu_to_le64(adr);
570 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
572 desc->lli->dar = cpu_to_le64(adr);
575 static void set_desc_src_master(struct axi_dma_hw_desc *desc)
579 /* Select AXI0 for source master */
580 val = le32_to_cpu(desc->lli->ctl_lo);
581 val &= ~CH_CTL_L_SRC_MAST;
582 desc->lli->ctl_lo = cpu_to_le32(val);
585 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
586 struct axi_dma_desc *desc)
590 /* Select AXI1 for source master if available */
591 val = le32_to_cpu(hw_desc->lli->ctl_lo);
592 if (desc->chan->chip->dw->hdata->nr_masters > 1)
593 val |= CH_CTL_L_DST_MAST;
595 val &= ~CH_CTL_L_DST_MAST;
597 hw_desc->lli->ctl_lo = cpu_to_le32(val);
600 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
601 struct axi_dma_hw_desc *hw_desc,
602 dma_addr_t mem_addr, size_t len)
604 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
605 unsigned int reg_width;
606 unsigned int mem_width;
607 dma_addr_t device_addr;
613 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
615 mem_width = __ffs(data_width | mem_addr | len);
616 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
617 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
619 if (!IS_ALIGNED(mem_addr, 4)) {
620 dev_err(chan->chip->dev, "invalid buffer alignment\n");
624 switch (chan->direction) {
626 reg_width = __ffs(chan->config.dst_addr_width);
627 device_addr = chan->config.dst_addr;
628 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
629 mem_width << CH_CTL_L_SRC_WIDTH_POS |
630 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
631 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
632 block_ts = len >> mem_width;
635 reg_width = __ffs(chan->config.src_addr_width);
636 device_addr = chan->config.src_addr;
637 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
638 mem_width << CH_CTL_L_DST_WIDTH_POS |
639 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
640 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
641 block_ts = len >> reg_width;
647 if (block_ts > axi_block_ts)
650 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
651 if (unlikely(!hw_desc->lli))
654 ctlhi = CH_CTL_H_LLI_VALID;
656 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
657 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
658 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
659 burst_len << CH_CTL_H_ARLEN_POS |
660 burst_len << CH_CTL_H_AWLEN_POS;
663 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
665 if (chan->direction == DMA_MEM_TO_DEV) {
666 write_desc_sar(hw_desc, mem_addr);
667 write_desc_dar(hw_desc, device_addr);
669 write_desc_sar(hw_desc, device_addr);
670 write_desc_dar(hw_desc, mem_addr);
673 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
675 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
676 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
677 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
679 set_desc_src_master(hw_desc);
685 static size_t calculate_block_len(struct axi_dma_chan *chan,
686 dma_addr_t dma_addr, size_t buf_len,
687 enum dma_transfer_direction direction)
689 u32 data_width, reg_width, mem_width;
690 size_t axi_block_ts, block_len;
692 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
696 data_width = BIT(chan->chip->dw->hdata->m_data_width);
697 mem_width = __ffs(data_width | dma_addr | buf_len);
698 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
699 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
701 block_len = axi_block_ts << mem_width;
704 reg_width = __ffs(chan->config.src_addr_width);
705 block_len = axi_block_ts << reg_width;
714 static struct dma_async_tx_descriptor *
715 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
716 size_t buf_len, size_t period_len,
717 enum dma_transfer_direction direction,
720 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
721 struct axi_dma_hw_desc *hw_desc = NULL;
722 struct axi_dma_desc *desc = NULL;
723 dma_addr_t src_addr = dma_addr;
724 u32 num_periods, num_segments;
725 size_t axi_block_len;
731 u8 lms = 0; /* Select AXI0 master for LLI fetching */
733 num_periods = buf_len / period_len;
735 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
736 if (axi_block_len == 0)
739 num_segments = DIV_ROUND_UP(period_len, axi_block_len);
740 segment_len = DIV_ROUND_UP(period_len, num_segments);
742 total_segments = num_periods * num_segments;
744 desc = axi_desc_alloc(total_segments);
748 chan->direction = direction;
752 desc->period_len = period_len;
754 for (i = 0; i < total_segments; i++) {
755 hw_desc = &desc->hw_desc[i];
757 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
762 desc->length += hw_desc->len;
763 /* Set end-of-link to the linked descriptor, so that cyclic
764 * callback function can be triggered during interrupt.
766 set_desc_last(hw_desc);
768 src_addr += segment_len;
771 llp = desc->hw_desc[0].llp;
773 /* Managed transfer list */
775 hw_desc = &desc->hw_desc[--total_segments];
776 write_desc_llp(hw_desc, llp | lms);
778 } while (total_segments);
780 dw_axi_dma_set_hw_channel(chan, true);
782 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
791 static struct dma_async_tx_descriptor *
792 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
794 enum dma_transfer_direction direction,
795 unsigned long flags, void *context)
797 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
798 struct axi_dma_hw_desc *hw_desc = NULL;
799 struct axi_dma_desc *desc = NULL;
800 u32 num_segments, segment_len;
801 unsigned int loop = 0;
802 struct scatterlist *sg;
803 size_t axi_block_len;
804 u32 len, num_sgs = 0;
809 u8 lms = 0; /* Select AXI0 master for LLI fetching */
811 if (unlikely(!is_slave_direction(direction) || !sg_len))
814 mem = sg_dma_address(sgl);
815 len = sg_dma_len(sgl);
817 axi_block_len = calculate_block_len(chan, mem, len, direction);
818 if (axi_block_len == 0)
821 for_each_sg(sgl, sg, sg_len, i)
822 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
824 desc = axi_desc_alloc(num_sgs);
830 chan->direction = direction;
832 for_each_sg(sgl, sg, sg_len, i) {
833 mem = sg_dma_address(sg);
834 len = sg_dma_len(sg);
835 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
836 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
839 hw_desc = &desc->hw_desc[loop++];
840 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
844 desc->length += hw_desc->len;
847 } while (len >= segment_len);
850 /* Set end-of-link to the last link descriptor of list */
851 set_desc_last(&desc->hw_desc[num_sgs - 1]);
853 /* Managed transfer list */
855 hw_desc = &desc->hw_desc[--num_sgs];
856 write_desc_llp(hw_desc, llp | lms);
860 dw_axi_dma_set_hw_channel(chan, true);
862 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
871 static struct dma_async_tx_descriptor *
872 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
873 dma_addr_t src_adr, size_t len, unsigned long flags)
875 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
876 size_t block_ts, max_block_ts, xfer_len;
877 struct axi_dma_hw_desc *hw_desc = NULL;
878 struct axi_dma_desc *desc = NULL;
879 u32 xfer_width, reg, num;
881 u8 lms = 0; /* Select AXI0 master for LLI fetching */
883 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
884 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
886 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
887 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
888 num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
889 desc = axi_desc_alloc(num);
899 hw_desc = &desc->hw_desc[num];
901 * Take care for the alignment.
902 * Actually source and destination widths can be different, but
903 * make them same to be simpler.
905 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
908 * block_ts indicates the total number of data of width
909 * to be transferred in a DMA block transfer.
910 * BLOCK_TS register should be set to block_ts - 1
912 block_ts = xfer_len >> xfer_width;
913 if (block_ts > max_block_ts) {
914 block_ts = max_block_ts;
915 xfer_len = max_block_ts << xfer_width;
918 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
919 if (unlikely(!hw_desc->lli))
922 write_desc_sar(hw_desc, src_adr);
923 write_desc_dar(hw_desc, dst_adr);
924 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
926 reg = CH_CTL_H_LLI_VALID;
927 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
928 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
930 reg |= (CH_CTL_H_ARLEN_EN |
931 burst_len << CH_CTL_H_ARLEN_POS |
933 burst_len << CH_CTL_H_AWLEN_POS);
935 hw_desc->lli->ctl_hi = cpu_to_le32(reg);
937 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
938 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
939 xfer_width << CH_CTL_L_DST_WIDTH_POS |
940 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
941 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
942 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
943 hw_desc->lli->ctl_lo = cpu_to_le32(reg);
945 set_desc_src_master(hw_desc);
946 set_desc_dest_master(hw_desc, desc);
948 hw_desc->len = xfer_len;
949 desc->length += hw_desc->len;
950 /* update the length and addresses for the next loop cycle */
957 /* Set end-of-link to the last link descriptor of list */
958 set_desc_last(&desc->hw_desc[num - 1]);
959 /* Managed transfer list */
961 hw_desc = &desc->hw_desc[--num];
962 write_desc_llp(hw_desc, llp | lms);
966 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
974 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
975 struct dma_slave_config *config)
977 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
979 memcpy(&chan->config, config, sizeof(*config));
984 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
985 struct axi_dma_hw_desc *desc)
988 dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
992 dev_err(dchan2dev(&chan->vc.chan),
993 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
994 le64_to_cpu(desc->lli->sar),
995 le64_to_cpu(desc->lli->dar),
996 le64_to_cpu(desc->lli->llp),
997 le32_to_cpu(desc->lli->block_ts_lo),
998 le32_to_cpu(desc->lli->ctl_hi),
999 le32_to_cpu(desc->lli->ctl_lo));
1002 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
1003 struct axi_dma_desc *desc_head)
1005 int count = atomic_read(&chan->descs_allocated);
1008 for (i = 0; i < count; i++)
1009 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
1012 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
1014 struct virt_dma_desc *vd;
1015 unsigned long flags;
1017 spin_lock_irqsave(&chan->vc.lock, flags);
1019 axi_chan_disable(chan);
1021 /* The bad descriptor currently is in the head of vc list */
1022 vd = vchan_next_desc(&chan->vc);
1024 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1025 axi_chan_name(chan));
1030 struct axi_dma_desc *desc = vd_to_axi_desc(vd);
1032 axi_chan_block_xfer_start(chan, desc);
1033 chan->is_err = false;
1037 /* Remove the completed descriptor from issued list */
1038 list_del(&vd->node);
1040 /* WARN about bad descriptor */
1041 dev_err(chan2dev(chan),
1042 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
1043 axi_chan_name(chan), vd->tx.cookie, status);
1044 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
1046 vchan_cookie_complete(vd);
1048 /* Try to restart the controller */
1049 axi_chan_start_first_queued(chan);
1052 spin_unlock_irqrestore(&chan->vc.lock, flags);
1055 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1057 int count = atomic_read(&chan->descs_allocated);
1058 struct axi_dma_hw_desc *hw_desc;
1059 struct axi_dma_desc *desc;
1060 struct virt_dma_desc *vd;
1061 unsigned long flags;
1065 spin_lock_irqsave(&chan->vc.lock, flags);
1066 if (unlikely(axi_chan_is_hw_enable(chan))) {
1067 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1068 axi_chan_name(chan));
1069 axi_chan_disable(chan);
1072 /* The completed descriptor currently is in the head of vc list */
1073 vd = vchan_next_desc(&chan->vc);
1075 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1076 axi_chan_name(chan));
1081 desc = vd_to_axi_desc(vd);
1083 llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1084 for (i = 0; i < count; i++) {
1085 hw_desc = &desc->hw_desc[i];
1086 if (hw_desc->llp == llp) {
1087 axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1088 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1089 desc->completed_blocks = i;
1091 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1092 vchan_cyclic_callback(vd);
1097 axi_chan_enable(chan);
1100 /* Remove the completed descriptor from issued list before completing */
1101 list_del(&vd->node);
1102 vchan_cookie_complete(vd);
1104 /* Submit queued descriptors after processing the completed ones */
1105 axi_chan_start_first_queued(chan);
1109 spin_unlock_irqrestore(&chan->vc.lock, flags);
1112 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1114 struct axi_dma_chip *chip = dev_id;
1115 struct dw_axi_dma *dw = chip->dw;
1116 struct axi_dma_chan *chan;
1120 /* Disable DMAC interrupts. We'll enable them after processing channels */
1121 axi_dma_irq_disable(chip);
1123 /* Poll, clear and process every channel interrupt status */
1124 for (i = 0; i < dw->hdata->nr_channels; i++) {
1125 chan = &dw->chan[i];
1126 status = axi_chan_irq_read(chan);
1127 axi_chan_irq_clear(chan, status);
1129 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1130 axi_chan_name(chan), i, status);
1132 if (status & DWAXIDMAC_IRQ_ALL_ERR)
1133 axi_chan_handle_err(chan, status);
1134 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1135 axi_chan_block_xfer_complete(chan);
1138 /* Re-enable interrupts */
1139 axi_dma_irq_enable(chip);
1144 static int dma_chan_terminate_all(struct dma_chan *dchan)
1146 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1147 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1148 unsigned long flags;
1153 axi_chan_disable(chan);
1155 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1156 !(val & chan_active), 1000, 10000);
1157 if (ret == -ETIMEDOUT)
1158 dev_warn(dchan2dev(dchan),
1159 "%s failed to stop\n", axi_chan_name(chan));
1161 if (chan->direction != DMA_MEM_TO_MEM)
1162 dw_axi_dma_set_hw_channel(chan, false);
1163 if (chan->direction == DMA_MEM_TO_DEV)
1164 dw_axi_dma_set_byte_halfword(chan, false);
1166 spin_lock_irqsave(&chan->vc.lock, flags);
1168 vchan_get_all_descriptors(&chan->vc, &head);
1170 chan->cyclic = false;
1171 spin_unlock_irqrestore(&chan->vc.lock, flags);
1173 vchan_dma_desc_free_list(&chan->vc, &head);
1175 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1180 static int dma_chan_pause(struct dma_chan *dchan)
1182 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1183 unsigned long flags;
1184 unsigned int timeout = 20; /* timeout iterations */
1187 spin_lock_irqsave(&chan->vc.lock, flags);
1189 if (chan->chip->dw->hdata->reg_map_8_channels) {
1190 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1191 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1192 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1193 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1195 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1196 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1197 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1198 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
1202 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1206 } while (--timeout);
1208 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1210 chan->is_paused = true;
1212 spin_unlock_irqrestore(&chan->vc.lock, flags);
1214 return timeout ? 0 : -EAGAIN;
1217 /* Called in chan locked context */
1218 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1222 if (chan->chip->dw->hdata->reg_map_8_channels) {
1223 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1224 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1225 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1226 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1228 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1229 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1230 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1231 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
1234 chan->is_paused = false;
1237 static int dma_chan_resume(struct dma_chan *dchan)
1239 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1240 unsigned long flags;
1242 spin_lock_irqsave(&chan->vc.lock, flags);
1244 if (chan->is_paused)
1245 axi_chan_resume(chan);
1247 spin_unlock_irqrestore(&chan->vc.lock, flags);
1252 static int axi_dma_suspend(struct axi_dma_chip *chip)
1254 axi_dma_irq_disable(chip);
1255 axi_dma_disable(chip);
1257 clk_disable_unprepare(chip->core_clk);
1258 clk_disable_unprepare(chip->cfgr_clk);
1263 static int axi_dma_resume(struct axi_dma_chip *chip)
1267 ret = clk_prepare_enable(chip->cfgr_clk);
1271 ret = clk_prepare_enable(chip->core_clk);
1275 axi_dma_enable(chip);
1276 axi_dma_irq_enable(chip);
1281 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1283 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1285 return axi_dma_suspend(chip);
1288 static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1290 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1292 return axi_dma_resume(chip);
1295 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1296 struct of_dma *ofdma)
1298 struct dw_axi_dma *dw = ofdma->of_dma_data;
1299 struct axi_dma_chan *chan;
1300 struct dma_chan *dchan;
1302 dchan = dma_get_any_slave_channel(&dw->dma);
1306 chan = dchan_to_axi_dma_chan(dchan);
1307 chan->hw_handshake_num = dma_spec->args[0];
1311 static int parse_device_properties(struct axi_dma_chip *chip)
1313 struct device *dev = chip->dev;
1314 u32 tmp, carr[DMAC_MAX_CHANNELS];
1317 ret = device_property_read_u32(dev, "dma-channels", &tmp);
1320 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1323 chip->dw->hdata->nr_channels = tmp;
1324 if (tmp <= DMA_REG_MAP_CH_REF)
1325 chip->dw->hdata->reg_map_8_channels = true;
1327 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1330 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1333 chip->dw->hdata->nr_masters = tmp;
1335 ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1338 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1341 chip->dw->hdata->m_data_width = tmp;
1343 ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1344 chip->dw->hdata->nr_channels);
1347 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1348 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1351 chip->dw->hdata->block_size[tmp] = carr[tmp];
1354 ret = device_property_read_u32_array(dev, "snps,priority", carr,
1355 chip->dw->hdata->nr_channels);
1358 /* Priority value must be programmed within [0:nr_channels-1] range */
1359 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1360 if (carr[tmp] >= chip->dw->hdata->nr_channels)
1363 chip->dw->hdata->priority[tmp] = carr[tmp];
1366 /* axi-max-burst-len is optional property */
1367 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1369 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1371 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1374 chip->dw->hdata->restrict_axi_burst_len = true;
1375 chip->dw->hdata->axi_rw_burst_len = tmp;
1381 static int dw_probe(struct platform_device *pdev)
1383 struct device_node *node = pdev->dev.of_node;
1384 struct axi_dma_chip *chip;
1385 struct resource *mem;
1386 struct dw_axi_dma *dw;
1387 struct dw_axi_dma_hcfg *hdata;
1391 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1395 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1399 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1404 chip->dev = &pdev->dev;
1405 chip->dw->hdata = hdata;
1407 chip->irq = platform_get_irq(pdev, 0);
1411 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1412 chip->regs = devm_ioremap_resource(chip->dev, mem);
1413 if (IS_ERR(chip->regs))
1414 return PTR_ERR(chip->regs);
1416 if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
1417 chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1418 if (IS_ERR(chip->apb_regs))
1419 return PTR_ERR(chip->apb_regs);
1422 chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1423 if (IS_ERR(chip->core_clk))
1424 return PTR_ERR(chip->core_clk);
1426 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1427 if (IS_ERR(chip->cfgr_clk))
1428 return PTR_ERR(chip->cfgr_clk);
1430 ret = parse_device_properties(chip);
1434 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1435 sizeof(*dw->chan), GFP_KERNEL);
1439 ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
1440 IRQF_SHARED, KBUILD_MODNAME, chip);
1444 INIT_LIST_HEAD(&dw->dma.channels);
1445 for (i = 0; i < hdata->nr_channels; i++) {
1446 struct axi_dma_chan *chan = &dw->chan[i];
1450 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1451 atomic_set(&chan->descs_allocated, 0);
1453 chan->vc.desc_free = vchan_desc_put;
1454 vchan_init(&chan->vc, &dw->dma);
1457 /* Set capabilities */
1458 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1459 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1460 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1462 /* DMA capabilities */
1463 dw->dma.chancnt = hdata->nr_channels;
1464 dw->dma.max_burst = hdata->axi_rw_burst_len;
1465 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1466 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1467 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1468 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1469 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1471 dw->dma.dev = chip->dev;
1472 dw->dma.device_tx_status = dma_chan_tx_status;
1473 dw->dma.device_issue_pending = dma_chan_issue_pending;
1474 dw->dma.device_terminate_all = dma_chan_terminate_all;
1475 dw->dma.device_pause = dma_chan_pause;
1476 dw->dma.device_resume = dma_chan_resume;
1478 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1479 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1481 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1482 dw->dma.device_synchronize = dw_axi_dma_synchronize;
1483 dw->dma.device_config = dw_axi_dma_chan_slave_config;
1484 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1485 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1488 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1489 * supported blocks is 1024. Device register width is 4 bytes.
1490 * Therefore, set constraint to 1024 * 4.
1492 dw->dma.dev->dma_parms = &dw->dma_parms;
1493 dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1494 platform_set_drvdata(pdev, chip);
1496 pm_runtime_enable(chip->dev);
1499 * We can't just call pm_runtime_get here instead of
1500 * pm_runtime_get_noresume + axi_dma_resume because we need
1501 * driver to work also without Runtime PM.
1503 pm_runtime_get_noresume(chip->dev);
1504 ret = axi_dma_resume(chip);
1506 goto err_pm_disable;
1508 axi_dma_hw_init(chip);
1510 pm_runtime_put(chip->dev);
1512 ret = dmaenginem_async_device_register(&dw->dma);
1514 goto err_pm_disable;
1516 /* Register with OF helpers for DMA lookups */
1517 ret = of_dma_controller_register(pdev->dev.of_node,
1518 dw_axi_dma_of_xlate, dw);
1520 dev_warn(&pdev->dev,
1521 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1523 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1524 dw->hdata->nr_channels);
1529 pm_runtime_disable(chip->dev);
1534 static int dw_remove(struct platform_device *pdev)
1536 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1537 struct dw_axi_dma *dw = chip->dw;
1538 struct axi_dma_chan *chan, *_chan;
1541 /* Enable clk before accessing to registers */
1542 clk_prepare_enable(chip->cfgr_clk);
1543 clk_prepare_enable(chip->core_clk);
1544 axi_dma_irq_disable(chip);
1545 for (i = 0; i < dw->hdata->nr_channels; i++) {
1546 axi_chan_disable(&chip->dw->chan[i]);
1547 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1549 axi_dma_disable(chip);
1551 pm_runtime_disable(chip->dev);
1552 axi_dma_suspend(chip);
1554 devm_free_irq(chip->dev, chip->irq, chip);
1556 of_dma_controller_free(chip->dev->of_node);
1558 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1559 vc.chan.device_node) {
1560 list_del(&chan->vc.chan.device_node);
1561 tasklet_kill(&chan->vc.task);
1567 static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1568 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1571 static const struct of_device_id dw_dma_of_id_table[] = {
1572 { .compatible = "snps,axi-dma-1.01a" },
1573 { .compatible = "intel,kmb-axi-dma" },
1576 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1578 static struct platform_driver dw_driver = {
1580 .remove = dw_remove,
1582 .name = KBUILD_MODNAME,
1583 .of_match_table = dw_dma_of_id_table,
1584 .pm = &dw_axi_dma_pm_ops,
1587 module_platform_driver(dw_driver);
1589 MODULE_LICENSE("GPL v2");
1590 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1591 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");