2 * Ingenic JZ4780 DMA controller
4 * Copyright (c) 2015 Imagination Technologies
5 * Author: Alex Smith <alex@alex-smith.me.uk>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
13 #include <linux/clk.h>
14 #include <linux/dmapool.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #include "dmaengine.h"
27 /* Global registers. */
28 #define JZ_DMA_REG_DMAC 0x00
29 #define JZ_DMA_REG_DIRQP 0x04
30 #define JZ_DMA_REG_DDR 0x08
31 #define JZ_DMA_REG_DDRS 0x0c
32 #define JZ_DMA_REG_DCKE 0x10
33 #define JZ_DMA_REG_DCKES 0x14
34 #define JZ_DMA_REG_DCKEC 0x18
35 #define JZ_DMA_REG_DMACP 0x1c
36 #define JZ_DMA_REG_DSIRQP 0x20
37 #define JZ_DMA_REG_DSIRQM 0x24
38 #define JZ_DMA_REG_DCIRQP 0x28
39 #define JZ_DMA_REG_DCIRQM 0x2c
41 /* Per-channel registers. */
42 #define JZ_DMA_REG_CHAN(n) (n * 0x20)
43 #define JZ_DMA_REG_DSA 0x00
44 #define JZ_DMA_REG_DTA 0x04
45 #define JZ_DMA_REG_DTC 0x08
46 #define JZ_DMA_REG_DRT 0x0c
47 #define JZ_DMA_REG_DCS 0x10
48 #define JZ_DMA_REG_DCM 0x14
49 #define JZ_DMA_REG_DDA 0x18
50 #define JZ_DMA_REG_DSD 0x1c
52 #define JZ_DMA_DMAC_DMAE BIT(0)
53 #define JZ_DMA_DMAC_AR BIT(2)
54 #define JZ_DMA_DMAC_HLT BIT(3)
55 #define JZ_DMA_DMAC_FAIC BIT(27)
56 #define JZ_DMA_DMAC_FMSC BIT(31)
58 #define JZ_DMA_DRT_AUTO 0x8
60 #define JZ_DMA_DCS_CTE BIT(0)
61 #define JZ_DMA_DCS_HLT BIT(2)
62 #define JZ_DMA_DCS_TT BIT(3)
63 #define JZ_DMA_DCS_AR BIT(4)
64 #define JZ_DMA_DCS_DES8 BIT(30)
66 #define JZ_DMA_DCM_LINK BIT(0)
67 #define JZ_DMA_DCM_TIE BIT(1)
68 #define JZ_DMA_DCM_STDE BIT(2)
69 #define JZ_DMA_DCM_TSZ_SHIFT 8
70 #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
71 #define JZ_DMA_DCM_DP_SHIFT 12
72 #define JZ_DMA_DCM_SP_SHIFT 14
73 #define JZ_DMA_DCM_DAI BIT(22)
74 #define JZ_DMA_DCM_SAI BIT(23)
76 #define JZ_DMA_SIZE_4_BYTE 0x0
77 #define JZ_DMA_SIZE_1_BYTE 0x1
78 #define JZ_DMA_SIZE_2_BYTE 0x2
79 #define JZ_DMA_SIZE_16_BYTE 0x3
80 #define JZ_DMA_SIZE_32_BYTE 0x4
81 #define JZ_DMA_SIZE_64_BYTE 0x5
82 #define JZ_DMA_SIZE_128_BYTE 0x6
84 #define JZ_DMA_WIDTH_32_BIT 0x0
85 #define JZ_DMA_WIDTH_8_BIT 0x1
86 #define JZ_DMA_WIDTH_16_BIT 0x2
88 #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
89 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
90 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
92 #define JZ4780_DMA_CTRL_OFFSET 0x1000
94 /* macros for use with jz4780_dma_soc_data.flags */
95 #define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
96 #define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
97 #define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
98 #define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
101 * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
102 * @dcm: value for the DCM (channel command) register
103 * @dsa: source address
104 * @dta: target address
105 * @dtc: transfer count (number of blocks of the transfer size specified in DCM
106 * to transfer) in the low 24 bits, offset of the next descriptor from the
107 * descriptor base address in the upper 8 bits.
109 struct jz4780_dma_hwdesc {
116 /* Size of allocations for hardware descriptor blocks. */
117 #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
118 #define JZ_DMA_MAX_DESC \
119 (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
121 struct jz4780_dma_desc {
122 struct virt_dma_desc vdesc;
124 struct jz4780_dma_hwdesc *desc;
125 dma_addr_t desc_phys;
127 enum dma_transaction_type type;
131 struct jz4780_dma_chan {
132 struct virt_dma_chan vchan;
134 struct dma_pool *desc_pool;
136 uint32_t transfer_type;
137 uint32_t transfer_shift;
138 struct dma_slave_config config;
140 struct jz4780_dma_desc *desc;
141 unsigned int curr_hwdesc;
144 struct jz4780_dma_soc_data {
145 unsigned int nb_channels;
146 unsigned int transfer_ord_max;
150 struct jz4780_dma_dev {
151 struct dma_device dma_device;
152 void __iomem *chn_base;
153 void __iomem *ctrl_base;
156 const struct jz4780_dma_soc_data *soc_data;
158 uint32_t chan_reserved;
159 struct jz4780_dma_chan chan[];
162 struct jz4780_dma_filter_data {
163 struct device_node *of_node;
164 uint32_t transfer_type;
168 static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
170 return container_of(chan, struct jz4780_dma_chan, vchan.chan);
173 static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
174 struct virt_dma_desc *vdesc)
176 return container_of(vdesc, struct jz4780_dma_desc, vdesc);
179 static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
180 struct jz4780_dma_chan *jzchan)
182 return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
186 static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
187 unsigned int chn, unsigned int reg)
189 return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
192 static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
193 unsigned int chn, unsigned int reg, uint32_t val)
195 writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
198 static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
201 return readl(jzdma->ctrl_base + reg);
204 static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
205 unsigned int reg, uint32_t val)
207 writel(val, jzdma->ctrl_base + reg);
210 static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
213 if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
216 if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
217 reg = JZ_DMA_REG_DCKE;
219 reg = JZ_DMA_REG_DCKES;
221 jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
225 static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
228 if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
229 !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
230 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
233 static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
234 struct jz4780_dma_chan *jzchan, unsigned int count,
235 enum dma_transaction_type type)
237 struct jz4780_dma_desc *desc;
239 if (count > JZ_DMA_MAX_DESC)
242 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
246 desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
258 static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
260 struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
261 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
263 dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
267 static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
268 unsigned long val, uint32_t *shift)
270 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
271 int ord = ffs(val) - 1;
274 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
275 * than the maximum, just limit it. It is perfectly safe to fall back
276 * in this way since we won't exceed the maximum burst size supported
277 * by the device, the only effect is reduced efficiency. This is better
278 * than refusing to perform the request at all.
282 else if (ord > jzdma->soc_data->transfer_ord_max)
283 ord = jzdma->soc_data->transfer_ord_max;
289 return JZ_DMA_SIZE_1_BYTE;
291 return JZ_DMA_SIZE_2_BYTE;
293 return JZ_DMA_SIZE_4_BYTE;
295 return JZ_DMA_SIZE_16_BYTE;
297 return JZ_DMA_SIZE_32_BYTE;
299 return JZ_DMA_SIZE_64_BYTE;
301 return JZ_DMA_SIZE_128_BYTE;
305 static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
306 struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
307 enum dma_transfer_direction direction)
309 struct dma_slave_config *config = &jzchan->config;
310 uint32_t width, maxburst, tsz;
312 if (direction == DMA_MEM_TO_DEV) {
313 desc->dcm = JZ_DMA_DCM_SAI;
315 desc->dta = config->dst_addr;
317 width = config->dst_addr_width;
318 maxburst = config->dst_maxburst;
320 desc->dcm = JZ_DMA_DCM_DAI;
321 desc->dsa = config->src_addr;
324 width = config->src_addr_width;
325 maxburst = config->src_maxburst;
329 * This calculates the maximum transfer size that can be used with the
330 * given address, length, width and maximum burst size. The address
331 * must be aligned to the transfer size, the total length must be
332 * divisible by the transfer size, and we must not use more than the
333 * maximum burst specified by the user.
335 tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
336 &jzchan->transfer_shift);
339 case DMA_SLAVE_BUSWIDTH_1_BYTE:
340 case DMA_SLAVE_BUSWIDTH_2_BYTES:
342 case DMA_SLAVE_BUSWIDTH_4_BYTES:
343 width = JZ_DMA_WIDTH_32_BIT;
349 desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
350 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
351 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
353 desc->dtc = len >> jzchan->transfer_shift;
357 static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
358 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
359 enum dma_transfer_direction direction, unsigned long flags,
362 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
363 struct jz4780_dma_desc *desc;
367 desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
371 for (i = 0; i < sg_len; i++) {
372 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
373 sg_dma_address(&sgl[i]),
377 jz4780_dma_desc_free(&jzchan->desc->vdesc);
381 desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
383 if (i != (sg_len - 1)) {
384 /* Automatically proceeed to the next descriptor. */
385 desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
388 * The upper 8 bits of the DTC field in the descriptor
389 * must be set to (offset from descriptor base of next
393 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
397 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
400 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
401 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
402 size_t period_len, enum dma_transfer_direction direction,
405 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
406 struct jz4780_dma_desc *desc;
407 unsigned int periods, i;
410 if (buf_len % period_len)
413 periods = buf_len / period_len;
415 desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
419 for (i = 0; i < periods; i++) {
420 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
421 period_len, direction);
423 jz4780_dma_desc_free(&jzchan->desc->vdesc);
427 buf_addr += period_len;
430 * Set the link bit to indicate that the controller should
431 * automatically proceed to the next descriptor. In
432 * jz4780_dma_begin(), this will be cleared if we need to issue
433 * an interrupt after each period.
435 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
438 * The upper 8 bits of the DTC field in the descriptor must be
439 * set to (offset from descriptor base of next descriptor >> 4).
440 * If this is the last descriptor, link it back to the first,
441 * i.e. leave offset set to 0, otherwise point to the next one.
443 if (i != (periods - 1)) {
445 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
449 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
452 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
453 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
454 size_t len, unsigned long flags)
456 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
457 struct jz4780_dma_desc *desc;
460 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
464 tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
465 &jzchan->transfer_shift);
467 jzchan->transfer_type = JZ_DMA_DRT_AUTO;
469 desc->desc[0].dsa = src;
470 desc->desc[0].dta = dest;
471 desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
472 tsz << JZ_DMA_DCM_TSZ_SHIFT |
473 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
474 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
475 desc->desc[0].dtc = len >> jzchan->transfer_shift;
477 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
480 static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
482 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
483 struct virt_dma_desc *vdesc;
485 dma_addr_t desc_phys;
488 vdesc = vchan_next_desc(&jzchan->vchan);
492 list_del(&vdesc->node);
494 jzchan->desc = to_jz4780_dma_desc(vdesc);
495 jzchan->curr_hwdesc = 0;
497 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
499 * The DMA controller doesn't support triggering an
500 * interrupt after processing each descriptor, only
501 * after processing an entire terminated list of
502 * descriptors. For a cyclic DMA setup the list of
503 * descriptors is not terminated so we can never get an
506 * If the user requested a callback for a cyclic DMA
507 * setup then we workaround this hardware limitation
508 * here by degrading to a set of unlinked descriptors
509 * which we will submit in sequence in response to the
510 * completion of processing the previous descriptor.
512 for (i = 0; i < jzchan->desc->count; i++)
513 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
517 * There is an existing transfer, therefore this must be one
518 * for which we unlinked the descriptors above. Advance to the
519 * next one in the list.
521 jzchan->curr_hwdesc =
522 (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
525 /* Enable the channel's clock. */
526 jz4780_dma_chan_enable(jzdma, jzchan->id);
528 /* Use 4-word descriptors. */
529 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
531 /* Set transfer type. */
532 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
533 jzchan->transfer_type);
536 * Set the transfer count. This is redundant for a descriptor-driven
537 * transfer. However, there can be a delay between the transfer start
538 * time and when DTCn reg contains the new transfer count. Setting
539 * it explicitly ensures residue is computed correctly at all times.
541 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
542 jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
544 /* Write descriptor address and initiate descriptor fetch. */
545 desc_phys = jzchan->desc->desc_phys +
546 (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
547 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
548 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
550 /* Enable the channel. */
551 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
555 static void jz4780_dma_issue_pending(struct dma_chan *chan)
557 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
560 spin_lock_irqsave(&jzchan->vchan.lock, flags);
562 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
563 jz4780_dma_begin(jzchan);
565 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
568 static int jz4780_dma_terminate_all(struct dma_chan *chan)
570 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
571 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
575 spin_lock_irqsave(&jzchan->vchan.lock, flags);
577 /* Clear the DMA status and stop the transfer. */
578 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
580 vchan_terminate_vdesc(&jzchan->desc->vdesc);
584 jz4780_dma_chan_disable(jzdma, jzchan->id);
586 vchan_get_all_descriptors(&jzchan->vchan, &head);
588 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
590 vchan_dma_desc_free_list(&jzchan->vchan, &head);
594 static void jz4780_dma_synchronize(struct dma_chan *chan)
596 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
597 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
599 vchan_synchronize(&jzchan->vchan);
600 jz4780_dma_chan_disable(jzdma, jzchan->id);
603 static int jz4780_dma_config(struct dma_chan *chan,
604 struct dma_slave_config *config)
606 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
608 if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
609 || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
612 /* Copy the reset of the slave configuration, it is used later. */
613 memcpy(&jzchan->config, config, sizeof(jzchan->config));
618 static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
619 struct jz4780_dma_desc *desc, unsigned int next_sg)
621 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
622 unsigned int count = 0;
625 for (i = next_sg; i < desc->count; i++)
626 count += desc->desc[i].dtc & GENMASK(23, 0);
629 count += jz4780_dma_chn_readl(jzdma, jzchan->id,
632 return count << jzchan->transfer_shift;
635 static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
636 dma_cookie_t cookie, struct dma_tx_state *txstate)
638 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
639 struct virt_dma_desc *vdesc;
640 enum dma_status status;
642 unsigned long residue = 0;
644 status = dma_cookie_status(chan, cookie, txstate);
645 if ((status == DMA_COMPLETE) || (txstate == NULL))
648 spin_lock_irqsave(&jzchan->vchan.lock, flags);
650 vdesc = vchan_find_desc(&jzchan->vchan, cookie);
652 /* On the issued list, so hasn't been processed yet */
653 residue = jz4780_dma_desc_residue(jzchan,
654 to_jz4780_dma_desc(vdesc), 0);
655 } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
656 residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
657 jzchan->curr_hwdesc + 1);
659 dma_set_residue(txstate, residue);
661 if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
662 && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
665 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
669 static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
670 struct jz4780_dma_chan *jzchan)
674 spin_lock(&jzchan->vchan.lock);
676 dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
677 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
679 if (dcs & JZ_DMA_DCS_AR) {
680 dev_warn(&jzchan->vchan.chan.dev->device,
681 "address error (DCS=0x%x)\n", dcs);
684 if (dcs & JZ_DMA_DCS_HLT) {
685 dev_warn(&jzchan->vchan.chan.dev->device,
686 "channel halt (DCS=0x%x)\n", dcs);
690 jzchan->desc->status = dcs;
692 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
693 if (jzchan->desc->type == DMA_CYCLIC) {
694 vchan_cyclic_callback(&jzchan->desc->vdesc);
696 vchan_cookie_complete(&jzchan->desc->vdesc);
700 jz4780_dma_begin(jzchan);
703 dev_err(&jzchan->vchan.chan.dev->device,
704 "channel IRQ with no active transfer\n");
707 spin_unlock(&jzchan->vchan.lock);
710 static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
712 struct jz4780_dma_dev *jzdma = data;
713 uint32_t pending, dmac;
716 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
718 for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
719 if (!(pending & (1<<i)))
722 jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
725 /* Clear halt and address error status of all channels. */
726 dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
727 dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
728 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
730 /* Clear interrupt pending status. */
731 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
736 static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
738 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
740 jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
742 JZ_DMA_DESC_BLOCK_SIZE,
744 if (!jzchan->desc_pool) {
745 dev_err(&chan->dev->device,
746 "failed to allocate descriptor pool\n");
753 static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
755 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
757 vchan_free_chan_resources(&jzchan->vchan);
758 dma_pool_destroy(jzchan->desc_pool);
759 jzchan->desc_pool = NULL;
762 static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
764 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
765 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
766 struct jz4780_dma_filter_data *data = param;
768 if (jzdma->dma_device.dev->of_node != data->of_node)
771 if (data->channel > -1) {
772 if (data->channel != jzchan->id)
774 } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
778 jzchan->transfer_type = data->transfer_type;
783 static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
784 struct of_dma *ofdma)
786 struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
787 dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
788 struct jz4780_dma_filter_data data;
790 if (dma_spec->args_count != 2)
793 data.of_node = ofdma->of_node;
794 data.transfer_type = dma_spec->args[0];
795 data.channel = dma_spec->args[1];
797 if (data.channel > -1) {
798 if (data.channel >= jzdma->soc_data->nb_channels) {
799 dev_err(jzdma->dma_device.dev,
800 "device requested non-existent channel %u\n",
805 /* Can only select a channel marked as reserved. */
806 if (!(jzdma->chan_reserved & BIT(data.channel))) {
807 dev_err(jzdma->dma_device.dev,
808 "device requested unreserved channel %u\n",
813 jzdma->chan[data.channel].transfer_type = data.transfer_type;
815 return dma_get_slave_channel(
816 &jzdma->chan[data.channel].vchan.chan);
818 return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
822 static int jz4780_dma_probe(struct platform_device *pdev)
824 struct device *dev = &pdev->dev;
825 const struct jz4780_dma_soc_data *soc_data;
826 struct jz4780_dma_dev *jzdma;
827 struct jz4780_dma_chan *jzchan;
828 struct dma_device *dd;
829 struct resource *res;
833 dev_err(dev, "This driver must be probed from devicetree\n");
837 soc_data = device_get_match_data(dev);
841 jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
842 soc_data->nb_channels), GFP_KERNEL);
846 jzdma->soc_data = soc_data;
847 platform_set_drvdata(pdev, jzdma);
849 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
851 dev_err(dev, "failed to get I/O memory\n");
855 jzdma->chn_base = devm_ioremap_resource(dev, res);
856 if (IS_ERR(jzdma->chn_base))
857 return PTR_ERR(jzdma->chn_base);
859 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
861 jzdma->ctrl_base = devm_ioremap_resource(dev, res);
862 if (IS_ERR(jzdma->ctrl_base))
863 return PTR_ERR(jzdma->ctrl_base);
864 } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
866 * On JZ4780, if the second memory resource was not supplied,
867 * assume we're using an old devicetree, and calculate the
868 * offset to the control registers.
870 jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
872 dev_err(dev, "failed to get I/O memory\n");
876 ret = platform_get_irq(pdev, 0);
878 dev_err(dev, "failed to get IRQ: %d\n", ret);
884 ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
887 dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
891 jzdma->clk = devm_clk_get(dev, NULL);
892 if (IS_ERR(jzdma->clk)) {
893 dev_err(dev, "failed to get clock\n");
894 ret = PTR_ERR(jzdma->clk);
898 clk_prepare_enable(jzdma->clk);
900 /* Property is optional, if it doesn't exist the value will remain 0. */
901 of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
902 0, &jzdma->chan_reserved);
904 dd = &jzdma->dma_device;
906 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
907 dma_cap_set(DMA_SLAVE, dd->cap_mask);
908 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
911 dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
912 dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
913 dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
914 dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
915 dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
916 dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
917 dd->device_config = jz4780_dma_config;
918 dd->device_terminate_all = jz4780_dma_terminate_all;
919 dd->device_synchronize = jz4780_dma_synchronize;
920 dd->device_tx_status = jz4780_dma_tx_status;
921 dd->device_issue_pending = jz4780_dma_issue_pending;
922 dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
923 dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
924 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
925 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
928 * Enable DMA controller, mark all channels as not programmable.
929 * Also set the FMSC bit - it increases MSC performance, so it makes
930 * little sense not to enable it.
932 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
933 JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
935 if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
936 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
938 INIT_LIST_HEAD(&dd->channels);
940 for (i = 0; i < soc_data->nb_channels; i++) {
941 jzchan = &jzdma->chan[i];
944 vchan_init(&jzchan->vchan, dd);
945 jzchan->vchan.desc_free = jz4780_dma_desc_free;
948 ret = dmaenginem_async_device_register(dd);
950 dev_err(dev, "failed to register device\n");
951 goto err_disable_clk;
954 /* Register with OF DMA helpers. */
955 ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
958 dev_err(dev, "failed to register OF DMA controller\n");
959 goto err_disable_clk;
962 dev_info(dev, "JZ4780 DMA controller initialised\n");
966 clk_disable_unprepare(jzdma->clk);
969 free_irq(jzdma->irq, jzdma);
973 static int jz4780_dma_remove(struct platform_device *pdev)
975 struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
978 of_dma_controller_free(pdev->dev.of_node);
980 free_irq(jzdma->irq, jzdma);
982 for (i = 0; i < jzdma->soc_data->nb_channels; i++)
983 tasklet_kill(&jzdma->chan[i].vchan.task);
988 static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
990 .transfer_ord_max = 5,
993 static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
995 .transfer_ord_max = 5,
996 .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
999 static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
1001 .transfer_ord_max = 6,
1002 .flags = JZ_SOC_DATA_PER_CHAN_PM,
1005 static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
1007 .transfer_ord_max = 7,
1008 .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
1011 static const struct of_device_id jz4780_dma_dt_match[] = {
1012 { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
1013 { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
1014 { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
1015 { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
1018 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
1020 static struct platform_driver jz4780_dma_driver = {
1021 .probe = jz4780_dma_probe,
1022 .remove = jz4780_dma_remove,
1024 .name = "jz4780-dma",
1025 .of_match_table = of_match_ptr(jz4780_dma_dt_match),
1029 static int __init jz4780_dma_init(void)
1031 return platform_driver_register(&jz4780_dma_driver);
1033 subsys_initcall(jz4780_dma_init);
1035 static void __exit jz4780_dma_exit(void)
1037 platform_driver_unregister(&jz4780_dma_driver);
1039 module_exit(jz4780_dma_exit);
1041 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
1042 MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
1043 MODULE_LICENSE("GPL");