1 // SPDX-License-Identifier: GPL-2.0+
3 * BCM2835 DMA engine support
5 * Author: Florian Meier <florian.meier@koalo.de>
9 * OMAP DMAengine support by Russell King
12 * Copyright (C) 2010 Broadcom
14 * Raspberry Pi PCM I2S ALSA Driver
15 * Copyright (c) by Phil Poole 2013
17 * MARVELL MMP Peripheral DMA Driver
18 * Copyright 2012 Marvell International Ltd.
20 #include <linux/dmaengine.h>
21 #include <linux/dma-direct.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmapool.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/platform_data/dma-bcm2708.h>
30 #include <linux/platform_device.h>
31 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <linux/of_dma.h>
39 #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
40 #define BCM2835_DMA_CHAN_NAME_SIZE 8
41 #define BCM2835_DMA_BULK_MASK BIT(0)
42 #define BCM2711_DMA_MEMCPY_CHAN 14
44 struct bcm2835_dma_cfg_data {
50 * struct bcm2835_dmadev - BCM2835 DMA controller
52 * @base: base address of register map
53 * @zero_page: bus address of zero page (to detect transactions copying from
54 * zero page and avoid accessing memory if so)
56 struct bcm2835_dmadev {
57 struct dma_device ddev;
60 const struct bcm2835_dma_cfg_data *cfg_data;
63 struct bcm2835_dma_cb {
73 struct bcm2711_dma40_scb {
84 struct bcm2835_cb_entry {
85 struct bcm2835_dma_cb *cb;
90 struct virt_dma_chan vc;
92 struct dma_slave_config cfg;
96 struct bcm2835_desc *desc;
97 struct dma_pool *cb_pool;
99 void __iomem *chan_base;
101 unsigned int irq_flags;
103 bool is_lite_channel;
104 bool is_40bit_channel;
108 struct bcm2835_desc {
109 struct bcm2835_chan *c;
110 struct virt_dma_desc vd;
111 enum dma_transfer_direction dir;
118 struct bcm2835_cb_entry cb_list[];
121 #define BCM2835_DMA_CS 0x00
122 #define BCM2835_DMA_ADDR 0x04
123 #define BCM2835_DMA_TI 0x08
124 #define BCM2835_DMA_SOURCE_AD 0x0c
125 #define BCM2835_DMA_DEST_AD 0x10
126 #define BCM2835_DMA_LEN 0x14
127 #define BCM2835_DMA_STRIDE 0x18
128 #define BCM2835_DMA_NEXTCB 0x1c
129 #define BCM2835_DMA_DEBUG 0x20
131 /* DMA CS Control and Status bits */
132 #define BCM2835_DMA_ACTIVE BIT(0) /* activate the DMA */
133 #define BCM2835_DMA_END BIT(1) /* current CB has ended */
134 #define BCM2835_DMA_INT BIT(2) /* interrupt status */
135 #define BCM2835_DMA_DREQ BIT(3) /* DREQ state */
136 #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
137 #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
138 #define BCM2835_DMA_WAITING_FOR_WRITES BIT(6) /* waiting for last
141 #define BCM2835_DMA_ERR BIT(8)
142 #define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16) /* AXI priority */
143 #define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20) /* panic priority */
144 /* current value of TI.BCM2835_DMA_WAIT_RESP */
145 #define BCM2835_DMA_WAIT_FOR_WRITES BIT(28)
146 #define BCM2835_DMA_DIS_DEBUG BIT(29) /* disable debug pause signal */
147 #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
148 #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
150 /* Transfer information bits - also bcm2835_cb.info field */
151 #define BCM2835_DMA_INT_EN BIT(0)
152 #define BCM2835_DMA_TDMODE BIT(1) /* 2D-Mode */
153 #define BCM2835_DMA_WAIT_RESP BIT(3) /* wait for AXI-write to be acked */
154 #define BCM2835_DMA_D_INC BIT(4)
155 #define BCM2835_DMA_D_WIDTH BIT(5) /* 128bit writes if set */
156 #define BCM2835_DMA_D_DREQ BIT(6) /* enable DREQ for destination */
157 #define BCM2835_DMA_D_IGNORE BIT(7) /* ignore destination writes */
158 #define BCM2835_DMA_S_INC BIT(8)
159 #define BCM2835_DMA_S_WIDTH BIT(9) /* 128bit writes if set */
160 #define BCM2835_DMA_S_DREQ BIT(10) /* enable SREQ for source */
161 #define BCM2835_DMA_S_IGNORE BIT(11) /* ignore source reads - read 0 */
162 #define BCM2835_DMA_BURST_LENGTH(x) (((x) & 15) << 12)
163 #define BCM2835_DMA_GET_BURST_LENGTH(x) (((x) >> 12) & 15)
164 #define BCM2835_DMA_CS_FLAGS(x) (x & (BCM2835_DMA_PRIORITY(15) | \
165 BCM2835_DMA_PANIC_PRIORITY(15) | \
166 BCM2835_DMA_WAIT_FOR_WRITES | \
167 BCM2835_DMA_DIS_DEBUG))
168 #define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16) /* REQ source */
169 #define BCM2835_DMA_WAIT(x) ((x & 31) << 21) /* add DMA-wait cycles */
170 #define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */
172 /* A fake bit to request that the driver doesn't set the WAIT_RESP bit. */
173 #define BCM2835_DMA_NO_WAIT_RESP BIT(27)
174 #define WAIT_RESP(x) ((x & BCM2835_DMA_NO_WAIT_RESP) ? \
175 0 : BCM2835_DMA_WAIT_RESP)
177 /* A fake bit to request that the driver requires wide reads */
178 #define BCM2835_DMA_WIDE_SOURCE BIT(24)
179 #define WIDE_SOURCE(x) ((x & BCM2835_DMA_WIDE_SOURCE) ? \
180 BCM2835_DMA_S_WIDTH : 0)
182 /* A fake bit to request that the driver requires wide writes */
183 #define BCM2835_DMA_WIDE_DEST BIT(25)
184 #define WIDE_DEST(x) ((x & BCM2835_DMA_WIDE_DEST) ? \
185 BCM2835_DMA_D_WIDTH : 0)
187 /* A fake bit to request that the driver requires multi-beat burst */
188 #define BCM2835_DMA_BURST BIT(30)
189 #define BURST_LENGTH(x) ((x & BCM2835_DMA_BURST) ? \
190 BCM2835_DMA_BURST_LENGTH(3) : 0)
193 /* debug register bits */
194 #define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0)
195 #define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1)
196 #define BCM2835_DMA_DEBUG_READ_ERR BIT(2)
197 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4
198 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4
199 #define BCM2835_DMA_DEBUG_ID_SHIFT 16
200 #define BCM2835_DMA_DEBUG_ID_BITS 9
201 #define BCM2835_DMA_DEBUG_STATE_SHIFT 16
202 #define BCM2835_DMA_DEBUG_STATE_BITS 9
203 #define BCM2835_DMA_DEBUG_VERSION_SHIFT 25
204 #define BCM2835_DMA_DEBUG_VERSION_BITS 3
205 #define BCM2835_DMA_DEBUG_LITE BIT(28)
207 /* shared registers for all dma channels */
208 #define BCM2835_DMA_INT_STATUS 0xfe0
209 #define BCM2835_DMA_ENABLE 0xff0
211 #define BCM2835_DMA_DATA_TYPE_S8 1
212 #define BCM2835_DMA_DATA_TYPE_S16 2
213 #define BCM2835_DMA_DATA_TYPE_S32 4
214 #define BCM2835_DMA_DATA_TYPE_S128 16
216 /* Valid only for channels 0 - 14, 15 has its own base address */
217 #define BCM2835_DMA_CHAN_SIZE 0x100
218 #define BCM2835_DMA_CHAN(n) ((n) * BCM2835_DMA_CHAN_SIZE) /* Base address */
219 #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
221 /* the max dma length for different channels */
222 #define MAX_DMA_LEN SZ_1G
223 #define MAX_LITE_DMA_LEN (SZ_64K - 4)
225 /* 40-bit DMA support */
226 #define BCM2711_DMA40_CS 0x00
227 #define BCM2711_DMA40_CB 0x04
228 #define BCM2711_DMA40_DEBUG 0x0c
229 #define BCM2711_DMA40_TI 0x10
230 #define BCM2711_DMA40_SRC 0x14
231 #define BCM2711_DMA40_SRCI 0x18
232 #define BCM2711_DMA40_DEST 0x1c
233 #define BCM2711_DMA40_DESTI 0x20
234 #define BCM2711_DMA40_LEN 0x24
235 #define BCM2711_DMA40_NEXT_CB 0x28
236 #define BCM2711_DMA40_DEBUG2 0x2c
238 #define BCM2711_DMA40_ACTIVE BIT(0)
239 #define BCM2711_DMA40_END BIT(1)
240 #define BCM2711_DMA40_INT BIT(2)
241 #define BCM2711_DMA40_DREQ BIT(3) /* DREQ state */
242 #define BCM2711_DMA40_RD_PAUSED BIT(4) /* Reading is paused */
243 #define BCM2711_DMA40_WR_PAUSED BIT(5) /* Writing is paused */
244 #define BCM2711_DMA40_DREQ_PAUSED BIT(6) /* Is paused by DREQ flow control */
245 #define BCM2711_DMA40_WAITING_FOR_WRITES BIT(7) /* Waiting for last write */
246 // we always want to run in supervisor mode
247 #define BCM2711_DMA40_PROT (BIT(8)|BIT(9))
248 #define BCM2711_DMA40_ERR BIT(10)
249 #define BCM2711_DMA40_QOS(x) (((x) & 0x1f) << 16)
250 #define BCM2711_DMA40_PANIC_QOS(x) (((x) & 0x1f) << 20)
251 #define BCM2711_DMA40_TRANSACTIONS BIT(25)
252 #define BCM2711_DMA40_WAIT_FOR_WRITES BIT(28)
253 #define BCM2711_DMA40_DISDEBUG BIT(29)
254 #define BCM2711_DMA40_ABORT BIT(30)
255 #define BCM2711_DMA40_HALT BIT(31)
257 #define BCM2711_DMA40_CS_FLAGS(x) (x & (BCM2711_DMA40_QOS(15) | \
258 BCM2711_DMA40_PANIC_QOS(15) | \
259 BCM2711_DMA40_WAIT_FOR_WRITES | \
260 BCM2711_DMA40_DISDEBUG))
262 /* Transfer information bits */
263 #define BCM2711_DMA40_INTEN BIT(0)
264 #define BCM2711_DMA40_TDMODE BIT(1) /* 2D-Mode */
265 #define BCM2711_DMA40_WAIT_RESP BIT(2) /* wait for AXI write to be acked */
266 #define BCM2711_DMA40_WAIT_RD_RESP BIT(3) /* wait for AXI read to complete */
267 #define BCM2711_DMA40_PER_MAP(x) ((x & 31) << 9) /* REQ source */
268 #define BCM2711_DMA40_S_DREQ BIT(14) /* enable SREQ for source */
269 #define BCM2711_DMA40_D_DREQ BIT(15) /* enable DREQ for destination */
270 #define BCM2711_DMA40_S_WAIT(x) ((x & 0xff) << 16) /* add DMA read-wait cycles */
271 #define BCM2711_DMA40_D_WAIT(x) ((x & 0xff) << 24) /* add DMA write-wait cycles */
273 /* debug register bits */
274 #define BCM2711_DMA40_DEBUG_WRITE_ERR BIT(0)
275 #define BCM2711_DMA40_DEBUG_FIFO_ERR BIT(1)
276 #define BCM2711_DMA40_DEBUG_READ_ERR BIT(2)
277 #define BCM2711_DMA40_DEBUG_READ_CB_ERR BIT(3)
278 #define BCM2711_DMA40_DEBUG_IN_ON_ERR BIT(8)
279 #define BCM2711_DMA40_DEBUG_ABORT_ON_ERR BIT(9)
280 #define BCM2711_DMA40_DEBUG_HALT_ON_ERR BIT(10)
281 #define BCM2711_DMA40_DEBUG_DISABLE_CLK_GATE BIT(11)
282 #define BCM2711_DMA40_DEBUG_RSTATE_SHIFT 14
283 #define BCM2711_DMA40_DEBUG_RSTATE_BITS 4
284 #define BCM2711_DMA40_DEBUG_WSTATE_SHIFT 18
285 #define BCM2711_DMA40_DEBUG_WSTATE_BITS 4
286 #define BCM2711_DMA40_DEBUG_RESET BIT(23)
287 #define BCM2711_DMA40_DEBUG_ID_SHIFT 24
288 #define BCM2711_DMA40_DEBUG_ID_BITS 4
289 #define BCM2711_DMA40_DEBUG_VERSION_SHIFT 28
290 #define BCM2711_DMA40_DEBUG_VERSION_BITS 4
292 /* Valid only for channels 0 - 3 (11 - 14) */
293 #define BCM2711_DMA40_CHAN(n) (((n) + 11) << 8) /* Base address */
294 #define BCM2711_DMA40_CHANIO(base, n) ((base) + BCM2711_DMA_CHAN(n))
296 /* the max dma length for different channels */
297 #define MAX_DMA40_LEN SZ_1G
299 #define BCM2711_DMA40_BURST_LEN(x) (((x) & 15) << 8)
300 #define BCM2711_DMA40_INC BIT(12)
301 #define BCM2711_DMA40_SIZE_32 (0 << 13)
302 #define BCM2711_DMA40_SIZE_64 (1 << 13)
303 #define BCM2711_DMA40_SIZE_128 (2 << 13)
304 #define BCM2711_DMA40_SIZE_256 (3 << 13)
305 #define BCM2711_DMA40_IGNORE BIT(15)
306 #define BCM2711_DMA40_STRIDE(x) ((x) << 16) /* For 2D mode */
308 #define BCM2711_DMA40_MEMCPY_FLAGS \
309 (BCM2711_DMA40_QOS(0) | \
310 BCM2711_DMA40_PANIC_QOS(0) | \
311 BCM2711_DMA40_WAIT_FOR_WRITES | \
312 BCM2711_DMA40_DISDEBUG)
314 #define BCM2711_DMA40_MEMCPY_XFER_INFO \
315 (BCM2711_DMA40_SIZE_128 | \
316 BCM2711_DMA40_INC | \
317 BCM2711_DMA40_BURST_LEN(16))
319 struct bcm2835_dmadev *memcpy_parent;
320 static void __iomem *memcpy_chan;
321 static struct bcm2711_dma40_scb *memcpy_scb;
322 static dma_addr_t memcpy_scb_dma;
323 DEFINE_SPINLOCK(memcpy_lock);
325 static const struct bcm2835_dma_cfg_data bcm2835_dma_cfg = {
326 .chan_40bit_mask = 0,
327 .dma_mask = DMA_BIT_MASK(32),
330 static const struct bcm2835_dma_cfg_data bcm2711_dma_cfg = {
331 .chan_40bit_mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
332 .dma_mask = DMA_BIT_MASK(36),
335 static const struct bcm2835_dma_cfg_data bcm2712_dma_cfg = {
336 .chan_40bit_mask = BIT(6) | BIT(7) | BIT(8) | BIT(9) |
338 .dma_mask = DMA_BIT_MASK(40),
341 static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c)
343 /* lite and normal channels have different max frame length */
344 return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN;
347 /* how many frames of max_len size do we need to transfer len bytes */
348 static inline size_t bcm2835_dma_frames_for_length(size_t len,
351 return DIV_ROUND_UP(len, max_len);
354 static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
356 return container_of(d, struct bcm2835_dmadev, ddev);
359 static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
361 return container_of(c, struct bcm2835_chan, vc.chan);
364 static inline struct bcm2835_desc *to_bcm2835_dma_desc(
365 struct dma_async_tx_descriptor *t)
367 return container_of(t, struct bcm2835_desc, vd.tx);
370 static inline uint32_t to_bcm2711_ti(uint32_t info)
372 return ((info & BCM2835_DMA_INT_EN) ? BCM2711_DMA40_INTEN : 0) |
373 ((info & BCM2835_DMA_WAIT_RESP) ? BCM2711_DMA40_WAIT_RESP : 0) |
374 ((info & BCM2835_DMA_S_DREQ) ?
375 (BCM2711_DMA40_S_DREQ | BCM2711_DMA40_WAIT_RD_RESP) : 0) |
376 ((info & BCM2835_DMA_D_DREQ) ? BCM2711_DMA40_D_DREQ : 0) |
377 BCM2711_DMA40_PER_MAP((info >> 16) & 0x1f);
380 static inline uint32_t to_bcm2711_srci(uint32_t info)
382 return ((info & BCM2835_DMA_S_INC) ? BCM2711_DMA40_INC : 0) |
383 ((info & BCM2835_DMA_S_WIDTH) ? BCM2711_DMA40_SIZE_128 : 0) |
384 BCM2711_DMA40_BURST_LEN(BCM2835_DMA_GET_BURST_LENGTH(info));
387 static inline uint32_t to_bcm2711_dsti(uint32_t info)
389 return ((info & BCM2835_DMA_D_INC) ? BCM2711_DMA40_INC : 0) |
390 ((info & BCM2835_DMA_D_WIDTH) ? BCM2711_DMA40_SIZE_128 : 0) |
391 BCM2711_DMA40_BURST_LEN(BCM2835_DMA_GET_BURST_LENGTH(info));
394 static inline uint32_t to_40bit_cbaddr(dma_addr_t addr)
400 static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
404 for (i = 0; i < desc->frames; i++)
405 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
406 desc->cb_list[i].paddr);
411 static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
413 bcm2835_dma_free_cb_chain(
414 container_of(vd, struct bcm2835_desc, vd));
417 static void bcm2835_dma_create_cb_set_length(
418 struct bcm2835_chan *c,
419 struct bcm2835_dma_cb *control_block,
425 size_t max_len = bcm2835_dma_max_frame_length(c);
428 /* set the length taking lite-channel limitations into account */
429 cb_len = min_t(u32, len, max_len);
433 * period_len means: that we need to generate
434 * transfers that are terminating at every
435 * multiple of period_len - this is typically
436 * used to set the interrupt flag in info
437 * which is required during cyclic transfers
440 /* have we filled in period_length yet? */
441 if (*total_len + cb_len < period_len) {
442 /* update number of bytes in this period so far */
443 *total_len += cb_len;
445 /* calculate the length that remains to reach period_len */
446 cb_len = period_len - *total_len;
448 /* reset total_length for next period */
453 if (c->is_40bit_channel) {
454 struct bcm2711_dma40_scb *scb =
455 (struct bcm2711_dma40_scb *)control_block;
458 /* add extrainfo bits to ti */
459 scb->ti |= to_bcm2711_ti(finalextrainfo);
461 control_block->length = cb_len;
462 /* add extrainfo bits to info */
463 control_block->info |= finalextrainfo;
467 static inline size_t bcm2835_dma_count_frames_for_sg(
468 struct bcm2835_chan *c,
469 struct scatterlist *sgl,
473 struct scatterlist *sgent;
475 size_t plength = bcm2835_dma_max_frame_length(c);
477 for_each_sg(sgl, sgent, sg_len, i)
478 frames += bcm2835_dma_frames_for_length(
479 sg_dma_len(sgent), plength);
485 * bcm2835_dma_create_cb_chain - create a control block and fills data in
487 * @c: the @bcm2835_chan for which we run this
488 * @direction: the direction in which we transfer
489 * @cyclic: it is a cyclic transfer
490 * @info: the default info bits to apply per controlblock
491 * @frames: number of controlblocks to allocate
492 * @src: the src address to assign (if the S_INC bit is set
493 * in @info, then it gets incremented)
494 * @dst: the dst address to assign (if the D_INC bit is set
495 * in @info, then it gets incremented)
496 * @buf_len: the full buffer length (may also be 0)
497 * @period_len: the period length when to apply @finalextrainfo
498 * in addition to the last transfer
499 * this will also break some control-blocks early
500 * @finalextrainfo: additional bits in last controlblock
501 * (or when period_len is reached in case of cyclic)
502 * @gfp: the GFP flag to use for allocation
504 static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
505 struct bcm2835_chan *c, enum dma_transfer_direction direction,
506 bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
507 dma_addr_t src, dma_addr_t dst, size_t buf_len,
508 size_t period_len, gfp_t gfp)
510 size_t len = buf_len, total_len;
512 struct bcm2835_desc *d;
513 struct bcm2835_cb_entry *cb_entry;
514 struct bcm2835_dma_cb *control_block;
519 /* allocate and setup the descriptor. */
520 d = kzalloc(struct_size(d, cb_list, frames), gfp);
529 * Iterate over all frames, create a control block
530 * for each frame and link them together.
532 for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
533 cb_entry = &d->cb_list[frame];
534 cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
539 /* fill in the control block */
540 control_block = cb_entry->cb;
541 if (c->is_40bit_channel) {
542 struct bcm2711_dma40_scb *scb =
543 (struct bcm2711_dma40_scb *)control_block;
544 scb->ti = to_bcm2711_ti(info);
545 scb->src = lower_32_bits(src);
546 scb->srci= upper_32_bits(src) | to_bcm2711_srci(info);
547 scb->dst = lower_32_bits(dst);
548 scb->dsti = upper_32_bits(dst) | to_bcm2711_dsti(info);
551 control_block->info = info;
552 control_block->src = src;
553 control_block->dst = dst;
555 control_block->stride = (upper_32_bits(dst) << 8) |
558 control_block->stride = 0;
559 control_block->next = 0;
562 /* set up length in control_block if requested */
564 /* calculate length honoring period_length */
565 bcm2835_dma_create_cb_set_length(
567 len, period_len, &total_len,
568 cyclic ? finalextrainfo : 0);
570 /* calculate new remaining length */
571 if (c->is_40bit_channel)
572 len -= ((struct bcm2711_dma40_scb *)control_block)->len;
574 len -= control_block->length;
577 /* link this the last controlblock */
578 if (frame && c->is_40bit_channel)
579 ((struct bcm2711_dma40_scb *)
580 d->cb_list[frame - 1].cb)->next_cb =
581 to_40bit_cbaddr(cb_entry->paddr);
582 if (frame && !c->is_40bit_channel)
583 d->cb_list[frame - 1].cb->next = c->is_2712 ?
584 to_40bit_cbaddr(cb_entry->paddr) : cb_entry->paddr;
586 /* update src and dst and length */
587 if (src && (info & BCM2835_DMA_S_INC)) {
588 if (c->is_40bit_channel)
589 src += ((struct bcm2711_dma40_scb *)control_block)->len;
591 src += control_block->length;
594 if (dst && (info & BCM2835_DMA_D_INC)) {
595 if (c->is_40bit_channel)
596 dst += ((struct bcm2711_dma40_scb *)control_block)->len;
598 dst += control_block->length;
601 /* Length of total transfer */
602 if (c->is_40bit_channel)
603 d->size += ((struct bcm2711_dma40_scb *)control_block)->len;
605 d->size += control_block->length;
608 /* the last frame requires extra flags */
609 if (c->is_40bit_channel) {
610 struct bcm2711_dma40_scb *scb =
611 (struct bcm2711_dma40_scb *)d->cb_list[d->frames-1].cb;
613 scb->ti |= to_bcm2711_ti(finalextrainfo);
615 d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
618 /* detect a size missmatch */
619 if (buf_len && (d->size != buf_len))
624 bcm2835_dma_free_cb_chain(d);
629 static void bcm2835_dma_fill_cb_chain_with_sg(
630 struct bcm2835_chan *c,
631 enum dma_transfer_direction direction,
632 struct bcm2835_cb_entry *cb,
633 struct scatterlist *sgl,
639 struct scatterlist *sgent;
641 max_len = bcm2835_dma_max_frame_length(c);
642 for_each_sg(sgl, sgent, sg_len, i) {
643 if (c->is_40bit_channel) {
644 struct bcm2711_dma40_scb *scb;
646 for (addr = sg_dma_address(sgent),
647 len = sg_dma_len(sgent);
649 addr += scb->len, len -= scb->len, cb++) {
650 scb = (struct bcm2711_dma40_scb *)cb->cb;
651 if (direction == DMA_DEV_TO_MEM) {
652 scb->dst = lower_32_bits(addr);
653 scb->dsti = upper_32_bits(addr) | BCM2711_DMA40_INC;
655 scb->src = lower_32_bits(addr);
656 scb->srci = upper_32_bits(addr) | BCM2711_DMA40_INC;
658 scb->len = min(len, max_len);
661 for (addr = sg_dma_address(sgent),
662 len = sg_dma_len(sgent);
664 addr += cb->cb->length, len -= cb->cb->length,
666 if (direction == DMA_DEV_TO_MEM)
670 cb->cb->length = min(len, max_len);
676 static void bcm2835_dma_abort(struct bcm2835_chan *c)
678 void __iomem *chan_base = c->chan_base;
681 if (c->is_40bit_channel) {
683 * A zero control block address means the channel is idle.
684 * (The ACTIVE flag in the CS register is not a reliable indicator.)
686 if (!readl(chan_base + BCM2711_DMA40_CB))
689 /* Pause the current DMA */
690 writel(readl(chan_base + BCM2711_DMA40_CS) & ~BCM2711_DMA40_ACTIVE,
691 chan_base + BCM2711_DMA40_CS);
693 /* wait for outstanding transactions to complete */
694 while ((readl(chan_base + BCM2711_DMA40_CS) & BCM2711_DMA40_TRANSACTIONS) &&
698 /* Peripheral might be stuck and fail to complete */
700 dev_err(c->vc.chan.device->dev,
701 "failed to complete pause on dma %d (CS:%08x)\n", c->ch,
702 readl(chan_base + BCM2711_DMA40_CS));
704 /* Set CS back to default state */
705 writel(BCM2711_DMA40_PROT, chan_base + BCM2711_DMA40_CS);
708 writel(readl(chan_base + BCM2711_DMA40_DEBUG) | BCM2711_DMA40_DEBUG_RESET,
709 chan_base + BCM2711_DMA40_DEBUG);
712 * A zero control block address means the channel is idle.
713 * (The ACTIVE flag in the CS register is not a reliable indicator.)
715 if (!readl(chan_base + BCM2835_DMA_ADDR))
718 /* We need to clear the next DMA block pending */
719 writel(0, chan_base + BCM2835_DMA_NEXTCB);
721 /* Abort the DMA, which needs to be enabled to complete */
722 writel(readl(chan_base + BCM2835_DMA_CS) | BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
723 chan_base + BCM2835_DMA_CS);
725 /* wait for DMA to be aborted */
726 while ((readl(chan_base + BCM2835_DMA_CS) & BCM2835_DMA_ABORT) && --timeout)
729 /* Write 0 to the active bit - Pause the DMA */
730 writel(readl(chan_base + BCM2835_DMA_CS) & ~BCM2835_DMA_ACTIVE,
731 chan_base + BCM2835_DMA_CS);
734 * Peripheral might be stuck and fail to complete
735 * This is expected when dreqs are enabled but not asserted
736 * so only report error in non dreq case
738 if (!timeout && !(readl(chan_base + BCM2835_DMA_TI) &
739 (BCM2835_DMA_S_DREQ | BCM2835_DMA_D_DREQ)))
740 dev_err(c->vc.chan.device->dev,
741 "failed to complete pause on dma %d (CS:%08x)\n", c->ch,
742 readl(chan_base + BCM2835_DMA_CS));
744 /* Set CS back to default state and reset the DMA */
745 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
749 static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
751 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
752 struct bcm2835_desc *d;
761 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
763 if (c->is_40bit_channel) {
764 writel(to_40bit_cbaddr(d->cb_list[0].paddr),
765 c->chan_base + BCM2711_DMA40_CB);
766 writel(BCM2711_DMA40_ACTIVE | BCM2711_DMA40_PROT | BCM2711_DMA40_CS_FLAGS(c->dreq),
767 c->chan_base + BCM2711_DMA40_CS);
769 writel(BIT(31), c->chan_base + BCM2835_DMA_CS);
771 writel(c->is_2712 ? to_40bit_cbaddr(d->cb_list[0].paddr) : d->cb_list[0].paddr,
772 c->chan_base + BCM2835_DMA_ADDR);
773 writel(BCM2835_DMA_ACTIVE | BCM2835_DMA_CS_FLAGS(c->dreq),
774 c->chan_base + BCM2835_DMA_CS);
778 static irqreturn_t bcm2835_dma_callback(int irq, void *data)
780 struct bcm2835_chan *c = data;
781 struct bcm2835_desc *d;
784 /* check the shared interrupt */
785 if (c->irq_flags & IRQF_SHARED) {
786 /* check if the interrupt is enabled */
787 flags = readl(c->chan_base + BCM2835_DMA_CS);
788 /* if not set then we are not the reason for the irq */
789 if (!(flags & BCM2835_DMA_INT))
793 spin_lock_irqsave(&c->vc.lock, flags);
796 * Clear the INT flag to receive further interrupts. Keep the channel
797 * active in case the descriptor is cyclic or in case the client has
798 * already terminated the descriptor and issued a new one. (May happen
799 * if this IRQ handler is threaded.) If the channel is finished, it
800 * will remain idle despite the ACTIVE flag being set.
802 if (c->is_40bit_channel)
803 writel(BCM2835_DMA_INT | BCM2711_DMA40_ACTIVE | BCM2711_DMA40_PROT |
804 BCM2711_DMA40_CS_FLAGS(c->dreq),
805 c->chan_base + BCM2711_DMA40_CS);
807 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE | BCM2835_DMA_CS_FLAGS(c->dreq),
808 c->chan_base + BCM2835_DMA_CS);
814 /* call the cyclic callback */
815 vchan_cyclic_callback(&d->vd);
816 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
817 vchan_cookie_complete(&c->desc->vd);
818 bcm2835_dma_start_desc(c);
822 spin_unlock_irqrestore(&c->vc.lock, flags);
827 static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
829 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
830 struct device *dev = c->vc.chan.device->dev;
832 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
835 * Control blocks are 256 bit in length and must start at a 256 bit
836 * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1).
838 c->cb_pool = dma_pool_create(dev_name(dev), dev,
839 sizeof(struct bcm2835_dma_cb), 32, 0);
841 dev_err(dev, "unable to allocate descriptor pool\n");
845 return request_irq(c->irq_number, bcm2835_dma_callback,
846 c->irq_flags, "DMA IRQ", c);
849 static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
851 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
853 vchan_free_chan_resources(&c->vc);
854 free_irq(c->irq_number, c);
855 dma_pool_destroy(c->cb_pool);
857 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
860 static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
865 static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
870 if (d->c->is_40bit_channel) {
871 for (size = i = 0; i < d->frames; i++) {
872 struct bcm2711_dma40_scb *control_block =
873 (struct bcm2711_dma40_scb *)d->cb_list[i].cb;
874 size_t this_size = control_block->len;
877 if (d->dir == DMA_DEV_TO_MEM)
878 dma = control_block->dst;
880 dma = control_block->src;
884 else if (addr >= dma && addr < dma + this_size)
885 size += dma + this_size - addr;
888 for (size = i = 0; i < d->frames; i++) {
889 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
890 size_t this_size = control_block->length;
893 if (d->dir == DMA_DEV_TO_MEM)
894 dma = control_block->dst;
896 dma = control_block->src;
900 else if (addr >= dma && addr < dma + this_size)
901 size += dma + this_size - addr;
908 static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
909 dma_cookie_t cookie, struct dma_tx_state *txstate)
911 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
912 struct virt_dma_desc *vd;
916 ret = dma_cookie_status(chan, cookie, txstate);
917 if (ret == DMA_COMPLETE || !txstate)
920 spin_lock_irqsave(&c->vc.lock, flags);
921 vd = vchan_find_desc(&c->vc, cookie);
924 bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
925 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
926 struct bcm2835_desc *d = c->desc;
929 if (d->dir == DMA_MEM_TO_DEV && c->is_40bit_channel) {
930 u64 lo_bits, hi_bits;
932 lo_bits = readl(c->chan_base + BCM2711_DMA40_SRC);
933 hi_bits = readl(c->chan_base + BCM2711_DMA40_SRCI) & 0xff;
934 pos = (hi_bits << 32) | lo_bits;
935 } else if (d->dir == DMA_MEM_TO_DEV && !c->is_40bit_channel) {
936 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
937 } else if (d->dir == DMA_DEV_TO_MEM && c->is_40bit_channel) {
938 u64 lo_bits, hi_bits;
940 lo_bits = readl(c->chan_base + BCM2711_DMA40_DEST);
941 hi_bits = readl(c->chan_base + BCM2711_DMA40_DESTI) & 0xff;
942 pos = (hi_bits << 32) | lo_bits;
943 } else if (d->dir == DMA_DEV_TO_MEM && !c->is_40bit_channel) {
944 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
949 txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
951 txstate->residue = 0;
954 spin_unlock_irqrestore(&c->vc.lock, flags);
959 static void bcm2835_dma_issue_pending(struct dma_chan *chan)
961 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
964 spin_lock_irqsave(&c->vc.lock, flags);
965 if (vchan_issue_pending(&c->vc) && !c->desc)
966 bcm2835_dma_start_desc(c);
968 spin_unlock_irqrestore(&c->vc.lock, flags);
971 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
972 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
973 size_t len, unsigned long flags)
975 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
976 struct bcm2835_desc *d;
977 u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC |
978 WAIT_RESP(c->dreq) | WIDE_SOURCE(c->dreq) |
979 WIDE_DEST(c->dreq) | BURST_LENGTH(c->dreq);
980 u32 extra = BCM2835_DMA_INT_EN;
981 size_t max_len = bcm2835_dma_max_frame_length(c);
984 /* if src, dst or len is not given return with an error */
985 if (!src || !dst || !len)
988 /* calculate number of frames */
989 frames = bcm2835_dma_frames_for_length(len, max_len);
991 /* allocate the CB chain - this also fills in the pointers */
992 d = bcm2835_dma_create_cb_chain(c, DMA_MEM_TO_MEM, false,
994 src, dst, len, 0, GFP_KERNEL);
998 return vchan_tx_prep(&c->vc, &d->vd, flags);
1001 static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
1002 struct dma_chan *chan,
1003 struct scatterlist *sgl, unsigned int sg_len,
1004 enum dma_transfer_direction direction,
1005 unsigned long flags, void *context)
1007 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
1008 struct bcm2835_desc *d;
1009 dma_addr_t src = 0, dst = 0;
1010 u32 info = WAIT_RESP(c->dreq) | WIDE_SOURCE(c->dreq) |
1011 WIDE_DEST(c->dreq) | BURST_LENGTH(c->dreq);
1012 u32 extra = BCM2835_DMA_INT_EN;
1015 if (!is_slave_direction(direction)) {
1016 dev_err(chan->device->dev,
1017 "%s: bad direction?\n", __func__);
1022 info |= BCM2835_DMA_PER_MAP(c->dreq);
1024 if (direction == DMA_DEV_TO_MEM) {
1025 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
1027 src = phys_to_dma(chan->device->dev, c->cfg.src_addr);
1028 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
1030 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
1032 dst = phys_to_dma(chan->device->dev, c->cfg.dst_addr);
1033 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
1036 /* count frames in sg list */
1037 frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len);
1039 /* allocate the CB chain */
1040 d = bcm2835_dma_create_cb_chain(c, direction, false,
1042 frames, src, dst, 0, 0,
1047 /* fill in frames with scatterlist pointers */
1048 bcm2835_dma_fill_cb_chain_with_sg(c, direction, d->cb_list,
1051 return vchan_tx_prep(&c->vc, &d->vd, flags);
1054 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
1055 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1056 size_t period_len, enum dma_transfer_direction direction,
1057 unsigned long flags)
1059 struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
1060 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
1061 struct bcm2835_desc *d;
1062 dma_addr_t src, dst;
1063 u32 info = WAIT_RESP(c->dreq) | WIDE_SOURCE(c->dreq) |
1064 WIDE_DEST(c->dreq) | BURST_LENGTH(c->dreq);
1066 size_t max_len = bcm2835_dma_max_frame_length(c);
1069 /* Grab configuration */
1070 if (!is_slave_direction(direction)) {
1071 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
1076 dev_err(chan->device->dev,
1077 "%s: bad buffer length (= 0)\n", __func__);
1081 if (flags & DMA_PREP_INTERRUPT)
1082 extra |= BCM2835_DMA_INT_EN;
1084 period_len = buf_len;
1087 * warn if buf_len is not a multiple of period_len - this may leed
1088 * to unexpected latencies for interrupts and thus audiable clicks
1090 if (buf_len % period_len)
1091 dev_warn_once(chan->device->dev,
1092 "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
1093 __func__, buf_len, period_len);
1095 /* Setup DREQ channel */
1097 info |= BCM2835_DMA_PER_MAP(c->dreq);
1099 if (direction == DMA_DEV_TO_MEM) {
1100 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
1102 src = phys_to_dma(chan->device->dev, c->cfg.src_addr);
1104 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
1106 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
1108 dst = phys_to_dma(chan->device->dev, c->cfg.dst_addr);
1110 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
1112 /* non-lite channels can write zeroes w/o accessing memory */
1113 if (buf_addr == od->zero_page && !c->is_lite_channel)
1114 info |= BCM2835_DMA_S_IGNORE;
1117 /* calculate number of frames */
1118 frames = /* number of periods */
1119 DIV_ROUND_UP(buf_len, period_len) *
1120 /* number of frames per period */
1121 bcm2835_dma_frames_for_length(period_len, max_len);
1124 * allocate the CB chain
1125 * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine
1126 * implementation calls prep_dma_cyclic with interrupts disabled.
1128 d = bcm2835_dma_create_cb_chain(c, direction, true,
1130 frames, src, dst, buf_len,
1131 period_len, GFP_NOWAIT);
1135 /* wrap around into a loop */
1136 if (c->is_40bit_channel)
1137 ((struct bcm2711_dma40_scb *)
1138 d->cb_list[frames - 1].cb)->next_cb =
1139 to_40bit_cbaddr(d->cb_list[0].paddr);
1141 d->cb_list[d->frames - 1].cb->next = c->is_2712 ?
1142 to_40bit_cbaddr(d->cb_list[0].paddr) : d->cb_list[0].paddr;
1144 return vchan_tx_prep(&c->vc, &d->vd, flags);
1147 static int bcm2835_dma_slave_config(struct dma_chan *chan,
1148 struct dma_slave_config *cfg)
1150 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
1157 static int bcm2835_dma_terminate_all(struct dma_chan *chan)
1159 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
1160 unsigned long flags;
1163 spin_lock_irqsave(&c->vc.lock, flags);
1165 /* stop DMA activity */
1167 vchan_terminate_vdesc(&c->desc->vd);
1169 bcm2835_dma_abort(c);
1172 vchan_get_all_descriptors(&c->vc, &head);
1173 spin_unlock_irqrestore(&c->vc.lock, flags);
1174 vchan_dma_desc_free_list(&c->vc, &head);
1179 static void bcm2835_dma_synchronize(struct dma_chan *chan)
1181 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
1183 vchan_synchronize(&c->vc);
1186 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
1187 int irq, unsigned int irq_flags)
1189 struct bcm2835_chan *c;
1191 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
1195 c->vc.desc_free = bcm2835_dma_desc_free;
1196 vchan_init(&c->vc, &d->ddev);
1198 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
1200 c->irq_number = irq;
1201 c->irq_flags = irq_flags;
1203 /* check for 40bit and lite channels */
1204 if (d->cfg_data->chan_40bit_mask & BIT(chan_id))
1205 c->is_40bit_channel = true;
1206 else if (readl(c->chan_base + BCM2835_DMA_DEBUG) &
1207 BCM2835_DMA_DEBUG_LITE)
1208 c->is_lite_channel = true;
1209 if (d->cfg_data->dma_mask == DMA_BIT_MASK(40))
1215 static void bcm2835_dma_free(struct bcm2835_dmadev *od)
1217 struct bcm2835_chan *c, *next;
1219 list_for_each_entry_safe(c, next, &od->ddev.channels,
1220 vc.chan.device_node) {
1221 list_del(&c->vc.chan.device_node);
1222 tasklet_kill(&c->vc.task);
1225 dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
1226 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1229 int bcm2711_dma40_memcpy_init(void)
1232 return -EPROBE_DEFER;
1242 EXPORT_SYMBOL(bcm2711_dma40_memcpy_init);
1244 void bcm2711_dma40_memcpy(dma_addr_t dst, dma_addr_t src, size_t size)
1246 struct bcm2711_dma40_scb *scb = memcpy_scb;
1247 unsigned long flags;
1250 pr_err("bcm2711_dma40_memcpy not initialised!\n");
1254 spin_lock_irqsave(&memcpy_lock, flags);
1257 scb->src = lower_32_bits(src);
1258 scb->srci = upper_32_bits(src) | BCM2711_DMA40_MEMCPY_XFER_INFO;
1259 scb->dst = lower_32_bits(dst);
1260 scb->dsti = upper_32_bits(dst) | BCM2711_DMA40_MEMCPY_XFER_INFO;
1264 writel(to_40bit_cbaddr(memcpy_scb_dma), memcpy_chan + BCM2711_DMA40_CB);
1265 writel(BCM2711_DMA40_MEMCPY_FLAGS | BCM2711_DMA40_ACTIVE | BCM2711_DMA40_PROT,
1266 memcpy_chan + BCM2711_DMA40_CS);
1268 /* Poll for completion */
1269 while (!(readl(memcpy_chan + BCM2711_DMA40_CS) & BCM2711_DMA40_END))
1272 writel(BCM2711_DMA40_END | BCM2711_DMA40_PROT, memcpy_chan + BCM2711_DMA40_CS);
1274 spin_unlock_irqrestore(&memcpy_lock, flags);
1276 EXPORT_SYMBOL(bcm2711_dma40_memcpy);
1278 static const struct of_device_id bcm2835_dma_of_match[] = {
1279 { .compatible = "brcm,bcm2835-dma", .data = &bcm2835_dma_cfg },
1280 { .compatible = "brcm,bcm2711-dma", .data = &bcm2711_dma_cfg },
1281 { .compatible = "brcm,bcm2712-dma", .data = &bcm2712_dma_cfg },
1284 MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
1286 static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
1287 struct of_dma *ofdma)
1289 struct bcm2835_dmadev *d = ofdma->of_dma_data;
1290 struct dma_chan *chan;
1292 chan = dma_get_any_slave_channel(&d->ddev);
1296 /* Set DREQ from param */
1297 to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
1302 static int bcm2835_dma_probe(struct platform_device *pdev)
1304 const struct bcm2835_dma_cfg_data *cfg_data;
1305 const struct of_device_id *of_id;
1306 struct bcm2835_dmadev *od;
1307 struct resource *res;
1311 int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1];
1313 uint32_t chans_available;
1314 char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
1315 int chan_count, chan_start, chan_end;
1317 of_id = of_match_node(bcm2835_dma_of_match, pdev->dev.of_node);
1319 dev_err(&pdev->dev, "Failed to match compatible string\n");
1323 cfg_data = of_id->data;
1325 if (!pdev->dev.dma_mask)
1326 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1328 rc = dma_set_mask_and_coherent(&pdev->dev, cfg_data->dma_mask);
1330 dev_err(&pdev->dev, "Unable to set DMA mask\n");
1334 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1338 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
1340 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1342 return PTR_ERR(base);
1344 /* The set of channels can be split across multiple instances. */
1345 chan_start = ((u32)(uintptr_t)base / BCM2835_DMA_CHAN_SIZE) & 0xf;
1346 base -= BCM2835_DMA_CHAN(chan_start);
1347 chan_count = resource_size(res) / BCM2835_DMA_CHAN_SIZE;
1348 chan_end = min(chan_start + chan_count,
1349 BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1);
1353 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1354 dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
1355 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1356 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1357 od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
1358 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
1359 od->ddev.device_tx_status = bcm2835_dma_tx_status;
1360 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
1361 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
1362 od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
1363 od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
1364 od->ddev.device_config = bcm2835_dma_slave_config;
1365 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
1366 od->ddev.device_synchronize = bcm2835_dma_synchronize;
1367 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1368 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1369 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1370 BIT(DMA_MEM_TO_MEM);
1371 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1372 od->ddev.descriptor_reuse = true;
1373 od->ddev.dev = &pdev->dev;
1374 INIT_LIST_HEAD(&od->ddev.channels);
1376 platform_set_drvdata(pdev, od);
1378 od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
1379 PAGE_SIZE, DMA_TO_DEVICE,
1380 DMA_ATTR_SKIP_CPU_SYNC);
1381 if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
1382 dev_err(&pdev->dev, "Failed to map zero page\n");
1386 of_id = of_match_node(bcm2835_dma_of_match, pdev->dev.of_node);
1388 dev_err(&pdev->dev, "Failed to match compatible string\n");
1392 od->cfg_data = cfg_data;
1394 /* Request DMA channel mask from device tree */
1395 if (of_property_read_u32(pdev->dev.of_node,
1396 "brcm,dma-channel-mask",
1397 &chans_available)) {
1398 dev_err(&pdev->dev, "Failed to get channel mask\n");
1403 #ifdef CONFIG_DMA_BCM2708
1404 /* One channel is reserved for the legacy API */
1405 if (chans_available & BCM2835_DMA_BULK_MASK) {
1406 rc = bcm_dmaman_probe(pdev, base,
1407 chans_available & BCM2835_DMA_BULK_MASK);
1410 "Failed to initialize the legacy API\n");
1412 chans_available &= ~BCM2835_DMA_BULK_MASK;
1416 /* And possibly one for the 40-bit DMA memcpy API */
1417 if (chans_available & od->cfg_data->chan_40bit_mask &
1418 BIT(BCM2711_DMA_MEMCPY_CHAN)) {
1420 memcpy_chan = BCM2835_DMA_CHANIO(base, BCM2711_DMA_MEMCPY_CHAN);
1421 memcpy_scb = dma_alloc_coherent(memcpy_parent->ddev.dev,
1422 sizeof(*memcpy_scb),
1423 &memcpy_scb_dma, GFP_KERNEL);
1425 dev_warn(&pdev->dev,
1426 "Failed to allocated memcpy scb\n");
1428 chans_available &= ~BIT(BCM2711_DMA_MEMCPY_CHAN);
1431 /* get irqs for each channel that we support */
1432 for (i = chan_start; i < chan_end; i++) {
1433 /* skip masked out channels */
1434 if (!(chans_available & (1 << i))) {
1439 /* get the named irq */
1440 snprintf(chan_name, sizeof(chan_name), "dma%i", i);
1441 irq[i] = platform_get_irq_byname(pdev, chan_name);
1445 /* legacy device tree case handling */
1446 dev_warn_once(&pdev->dev,
1447 "missing interrupt-names property in device tree - legacy interpretation is used\n");
1449 * in case of channel >= 11
1450 * use the 11th interrupt and that is shared
1452 irq[i] = platform_get_irq(pdev, i < 11 ? i : 11);
1457 /* get irqs for each channel */
1458 for (i = chan_start; i < chan_end; i++) {
1459 /* skip channels without irq */
1463 /* check if there are other channels that also use this irq */
1464 /* FIXME: This will fail if interrupts are shared across
1467 for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
1468 if ((i != j) && (irq[j] == irq[i])) {
1469 irq_flags = IRQF_SHARED;
1473 /* initialize the channel */
1474 rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags);
1480 dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", chan_count);
1482 /* Device-tree DMA controller registration */
1483 rc = of_dma_controller_register(pdev->dev.of_node,
1484 bcm2835_dma_xlate, od);
1486 dev_err(&pdev->dev, "Failed to register DMA controller\n");
1490 rc = dma_async_device_register(&od->ddev);
1493 "Failed to register slave DMA engine device: %d\n", rc);
1497 dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
1502 bcm2835_dma_free(od);
1506 static int bcm2835_dma_remove(struct platform_device *pdev)
1508 struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
1510 bcm_dmaman_remove(pdev);
1511 dma_async_device_unregister(&od->ddev);
1512 if (memcpy_parent == od) {
1513 dma_free_coherent(&pdev->dev, sizeof(*memcpy_scb), memcpy_scb,
1515 memcpy_parent = NULL;
1519 bcm2835_dma_free(od);
1524 static struct platform_driver bcm2835_dma_driver = {
1525 .probe = bcm2835_dma_probe,
1526 .remove = bcm2835_dma_remove,
1528 .name = "bcm2835-dma",
1529 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
1533 static int bcm2835_dma_init(void)
1535 return platform_driver_register(&bcm2835_dma_driver);
1538 static void bcm2835_dma_exit(void)
1540 platform_driver_unregister(&bcm2835_dma_driver);
1544 * Load after serial driver (arch_initcall) so we see the messages if it fails,
1545 * but before drivers (module_init) that need a DMA channel.
1547 subsys_initcall(bcm2835_dma_init);
1548 module_exit(bcm2835_dma_exit);
1550 MODULE_ALIAS("platform:bcm2835-dma");
1551 MODULE_DESCRIPTION("BCM2835 DMA engine driver");
1552 MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
1553 MODULE_LICENSE("GPL");