1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
7 * Jaswinder Singh <jassi.brar@samsung.com>
10 #include <linux/debugfs.h>
11 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmaengine.h>
21 #include <linux/amba/bus.h>
22 #include <linux/scatterlist.h>
24 #include <linux/of_dma.h>
25 #include <linux/err.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/bug.h>
28 #include <linux/reset.h>
30 #include "dmaengine.h"
31 #define PL330_MAX_CHAN 8
32 #define PL330_MAX_IRQS 32
33 #define PL330_MAX_PERI 32
34 #define PL330_MAX_BURST 16
36 #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
37 #define PL330_QUIRK_PERIPH_BURST BIT(1)
39 enum pl330_cachectrl {
40 CCTRL0, /* Noncacheable and nonbufferable */
41 CCTRL1, /* Bufferable only */
42 CCTRL2, /* Cacheable, but do not allocate */
43 CCTRL3, /* Cacheable and bufferable, but do not allocate */
44 INVALID1, /* AWCACHE = 0x1000 */
46 CCTRL6, /* Cacheable write-through, allocate on writes only */
47 CCTRL7, /* Cacheable write-back, allocate on writes only */
58 /* Register and Bit field Definitions */
60 #define DS_ST_STOP 0x0
61 #define DS_ST_EXEC 0x1
62 #define DS_ST_CMISS 0x2
63 #define DS_ST_UPDTPC 0x3
65 #define DS_ST_ATBRR 0x5
66 #define DS_ST_QBUSY 0x6
68 #define DS_ST_KILL 0x8
69 #define DS_ST_CMPLT 0x9
70 #define DS_ST_FLTCMP 0xe
71 #define DS_ST_FAULT 0xf
76 #define INTSTATUS 0x28
83 #define FTC(n) (_FTC + (n)*0x4)
86 #define CS(n) (_CS + (n)*0x8)
87 #define CS_CNS (1 << 21)
90 #define CPC(n) (_CPC + (n)*0x8)
93 #define SA(n) (_SA + (n)*0x20)
96 #define DA(n) (_DA + (n)*0x20)
99 #define CC(n) (_CC + (n)*0x20)
101 #define CC_SRCINC (1 << 0)
102 #define CC_DSTINC (1 << 14)
103 #define CC_SRCPRI (1 << 8)
104 #define CC_DSTPRI (1 << 22)
105 #define CC_SRCNS (1 << 9)
106 #define CC_DSTNS (1 << 23)
107 #define CC_SRCIA (1 << 10)
108 #define CC_DSTIA (1 << 24)
109 #define CC_SRCBRSTLEN_SHFT 4
110 #define CC_DSTBRSTLEN_SHFT 18
111 #define CC_SRCBRSTSIZE_SHFT 1
112 #define CC_DSTBRSTSIZE_SHFT 15
113 #define CC_SRCCCTRL_SHFT 11
114 #define CC_SRCCCTRL_MASK 0x7
115 #define CC_DSTCCTRL_SHFT 25
116 #define CC_DRCCCTRL_MASK 0x7
117 #define CC_SWAP_SHFT 28
120 #define LC0(n) (_LC0 + (n)*0x20)
123 #define LC1(n) (_LC1 + (n)*0x20)
125 #define DBGSTATUS 0xd00
126 #define DBG_BUSY (1 << 0)
129 #define DBGINST0 0xd08
130 #define DBGINST1 0xd0c
139 #define PERIPH_ID 0xfe0
140 #define PERIPH_REV_SHIFT 20
141 #define PERIPH_REV_MASK 0xf
142 #define PERIPH_REV_R0P0 0
143 #define PERIPH_REV_R1P0 1
144 #define PERIPH_REV_R1P1 2
146 #define CR0_PERIPH_REQ_SET (1 << 0)
147 #define CR0_BOOT_EN_SET (1 << 1)
148 #define CR0_BOOT_MAN_NS (1 << 2)
149 #define CR0_NUM_CHANS_SHIFT 4
150 #define CR0_NUM_CHANS_MASK 0x7
151 #define CR0_NUM_PERIPH_SHIFT 12
152 #define CR0_NUM_PERIPH_MASK 0x1f
153 #define CR0_NUM_EVENTS_SHIFT 17
154 #define CR0_NUM_EVENTS_MASK 0x1f
156 #define CR1_ICACHE_LEN_SHIFT 0
157 #define CR1_ICACHE_LEN_MASK 0x7
158 #define CR1_NUM_ICACHELINES_SHIFT 4
159 #define CR1_NUM_ICACHELINES_MASK 0xf
161 #define CRD_DATA_WIDTH_SHIFT 0
162 #define CRD_DATA_WIDTH_MASK 0x7
163 #define CRD_WR_CAP_SHIFT 4
164 #define CRD_WR_CAP_MASK 0x7
165 #define CRD_WR_Q_DEP_SHIFT 8
166 #define CRD_WR_Q_DEP_MASK 0xf
167 #define CRD_RD_CAP_SHIFT 12
168 #define CRD_RD_CAP_MASK 0x7
169 #define CRD_RD_Q_DEP_SHIFT 16
170 #define CRD_RD_Q_DEP_MASK 0xf
171 #define CRD_DATA_BUFF_SHIFT 20
172 #define CRD_DATA_BUFF_MASK 0x3ff
175 #define DESIGNER 0x41
177 #define INTEG_CFG 0x0
178 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
180 #define PL330_STATE_STOPPED (1 << 0)
181 #define PL330_STATE_EXECUTING (1 << 1)
182 #define PL330_STATE_WFE (1 << 2)
183 #define PL330_STATE_FAULTING (1 << 3)
184 #define PL330_STATE_COMPLETING (1 << 4)
185 #define PL330_STATE_WFP (1 << 5)
186 #define PL330_STATE_KILLING (1 << 6)
187 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
188 #define PL330_STATE_CACHEMISS (1 << 8)
189 #define PL330_STATE_UPDTPC (1 << 9)
190 #define PL330_STATE_ATBARRIER (1 << 10)
191 #define PL330_STATE_QUEUEBUSY (1 << 11)
192 #define PL330_STATE_INVALID (1 << 15)
194 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
195 | PL330_STATE_WFE | PL330_STATE_FAULTING)
197 #define CMD_DMAADDH 0x54
198 #define CMD_DMAEND 0x00
199 #define CMD_DMAFLUSHP 0x35
200 #define CMD_DMAGO 0xa0
201 #define CMD_DMALD 0x04
202 #define CMD_DMALDP 0x25
203 #define CMD_DMALP 0x20
204 #define CMD_DMALPEND 0x28
205 #define CMD_DMAKILL 0x01
206 #define CMD_DMAMOV 0xbc
207 #define CMD_DMANOP 0x18
208 #define CMD_DMARMB 0x12
209 #define CMD_DMASEV 0x34
210 #define CMD_DMAST 0x08
211 #define CMD_DMASTP 0x29
212 #define CMD_DMASTZ 0x0c
213 #define CMD_DMAWFE 0x36
214 #define CMD_DMAWFP 0x30
215 #define CMD_DMAWMB 0x13
219 #define SZ_DMAFLUSHP 2
223 #define SZ_DMALPEND 2
237 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
238 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
240 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
241 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
244 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
245 * at 1byte/burst for P<->M and M<->M respectively.
246 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
247 * should be enough for P<->M and M<->M respectively.
249 #define MCODE_BUFF_PER_REQ 256
251 /* Use this _only_ to wait on transient states */
252 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
254 #ifdef PL330_DEBUG_MCGEN
255 static unsigned cmd_line;
256 #define PL330_DBGCMD_DUMP(off, x...) do { \
257 printk("%x:", cmd_line); \
258 printk(KERN_CONT x); \
261 #define PL330_DBGMC_START(addr) (cmd_line = addr)
263 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
264 #define PL330_DBGMC_START(addr) do {} while (0)
267 /* The number of default descriptors */
269 #define NR_DEFAULT_DESC 16
271 /* Delay for runtime PM autosuspend, ms */
272 #define PL330_AUTOSUSPEND_DELAY 20
274 /* Populated by the PL330 core driver for DMA API driver's info */
275 struct pl330_config {
277 #define DMAC_MODE_NS (1 << 0)
279 unsigned int data_bus_width:10; /* In number of bits */
280 unsigned int data_buf_dep:11;
281 unsigned int num_chan:4;
282 unsigned int num_peri:6;
284 unsigned int num_events:6;
289 * Request Configuration.
290 * The PL330 core does not modify this and uses the last
291 * working configuration if the request doesn't provide any.
293 * The Client may want to provide this info only for the
294 * first request and a request with new settings.
296 struct pl330_reqcfg {
297 /* Address Incrementing */
302 * For now, the SRC & DST protection levels
303 * and burst size/length are assumed same.
309 unsigned brst_size:3; /* in power of 2 */
311 enum pl330_cachectrl dcctl;
312 enum pl330_cachectrl scctl;
313 enum pl330_byteswap swap;
314 struct pl330_config *pcfg;
318 * One cycle of DMAC operation.
319 * There may be more than one xfer in a request.
328 /* The xfer callbacks are made with one of these arguments. */
330 /* The all xfers in the request were success. */
332 /* If req aborted due to global error. */
334 /* If req failed due to problem with Channel. */
355 struct dma_pl330_desc;
360 struct dma_pl330_desc *desc;
363 /* ToBeDone for tasklet */
371 struct pl330_thread {
374 /* If the channel is not yet acquired by any client */
377 struct pl330_dmac *dmac;
378 /* Only two at a time */
379 struct _pl330_req req[2];
380 /* Index of the last enqueued request */
382 /* Index of the last submitted request or -1 if the DMA is stopped */
386 enum pl330_dmac_state {
393 /* In the DMAC pool */
396 * Allocated to some channel during prep_xxx
397 * Also may be sitting on the work_list.
401 * Sitting on the work_list and already submitted
402 * to the PL330 core. Not more than two descriptors
403 * of a channel can be BUSY at any time.
407 * Pause was called while descriptor was BUSY. Due to hardware
408 * limitations, only termination is possible for descriptors
409 * that have been paused.
413 * Sitting on the channel work_list but xfer done
419 struct dma_pl330_chan {
420 /* Schedule desc completion */
421 struct tasklet_struct task;
423 /* DMA-Engine Channel */
424 struct dma_chan chan;
426 /* List of submitted descriptors */
427 struct list_head submitted_list;
428 /* List of issued descriptors */
429 struct list_head work_list;
430 /* List of completed descriptors */
431 struct list_head completed_list;
433 /* Pointer to the DMAC that manages this channel,
434 * NULL if the channel is available to be acquired.
435 * As the parent, this DMAC also provides descriptors
438 struct pl330_dmac *dmac;
440 /* To protect channel manipulation */
444 * Hardware channel thread of PL330 DMAC. NULL if the channel is
447 struct pl330_thread *thread;
449 /* For D-to-M and M-to-D channels */
450 int burst_sz; /* the peripheral fifo width */
451 int burst_len; /* the number of burst */
452 phys_addr_t fifo_addr;
453 /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */
455 enum dma_data_direction dir;
456 struct dma_slave_config slave_config;
458 /* for cyclic capability */
461 /* for runtime pm tracking */
466 /* DMA-Engine Device */
467 struct dma_device ddma;
469 /* Pool of descriptors available for the DMAC's channels */
470 struct list_head desc_pool;
471 /* To protect desc_pool manipulation */
472 spinlock_t pool_lock;
474 /* Size of MicroCode buffers for each channel. */
476 /* ioremap'ed address of PL330 registers. */
478 /* Populated by the PL330 core driver during pl330_add */
479 struct pl330_config pcfg;
482 /* Maximum possible events/irqs */
484 /* BUS address of MicroCode buffer */
485 dma_addr_t mcode_bus;
486 /* CPU address of MicroCode buffer */
488 /* List of all Channel threads */
489 struct pl330_thread *channels;
490 /* Pointer to the MANAGER thread */
491 struct pl330_thread *manager;
492 /* To handle bad news in interrupt */
493 struct tasklet_struct tasks;
494 struct _pl330_tbd dmac_tbd;
495 /* State of DMAC operation */
496 enum pl330_dmac_state state;
497 /* Holds list of reqs with due callbacks */
498 struct list_head req_done;
500 /* Peripheral channels connected to this DMAC */
501 unsigned int num_peripherals;
502 struct dma_pl330_chan *peripherals; /* keep at end */
505 struct reset_control *rstc;
506 struct reset_control *rstc_ocp;
509 static struct pl330_of_quirks {
514 .quirk = "arm,pl330-broken-no-flushp",
515 .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
518 .quirk = "arm,pl330-periph-burst",
519 .id = PL330_QUIRK_PERIPH_BURST,
523 struct dma_pl330_desc {
524 /* To attach to a queue as child */
525 struct list_head node;
527 /* Descriptor for the DMA Engine API */
528 struct dma_async_tx_descriptor txd;
530 /* Xfer for PL330 core */
531 struct pl330_xfer px;
533 struct pl330_reqcfg rqcfg;
535 enum desc_status status;
540 /* The channel which currently holds this desc */
541 struct dma_pl330_chan *pchan;
543 enum dma_transfer_direction rqtype;
544 /* Index of peripheral for the xfer. */
546 /* Hook to attach to DMAC's list of reqs with due callback */
547 struct list_head rqd;
552 struct dma_pl330_desc *desc;
555 static int pl330_config_write(struct dma_chan *chan,
556 struct dma_slave_config *slave_config,
557 enum dma_transfer_direction direction);
559 static inline bool _queue_full(struct pl330_thread *thrd)
561 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
564 static inline bool is_manager(struct pl330_thread *thrd)
566 return thrd->dmac->manager == thrd;
569 /* If manager of the thread is in Non-Secure mode */
570 static inline bool _manager_ns(struct pl330_thread *thrd)
572 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
575 static inline u32 get_revision(u32 periph_id)
577 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
580 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
587 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
592 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
597 buf[0] = CMD_DMAFLUSHP;
603 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
608 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
616 buf[0] |= (0 << 1) | (1 << 0);
617 else if (cond == BURST)
618 buf[0] |= (1 << 1) | (1 << 0);
620 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
621 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
626 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
627 enum pl330_cond cond, u8 peri)
641 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
642 cond == SINGLE ? 'S' : 'B', peri >> 3);
647 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
648 unsigned loop, u8 cnt)
658 cnt--; /* DMAC increments by 1 internally */
661 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
667 enum pl330_cond cond;
673 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
674 const struct _arg_LPEND *arg)
676 enum pl330_cond cond = arg->cond;
677 bool forever = arg->forever;
678 unsigned loop = arg->loop;
679 u8 bjump = arg->bjump;
684 buf[0] = CMD_DMALPEND;
693 buf[0] |= (0 << 1) | (1 << 0);
694 else if (cond == BURST)
695 buf[0] |= (1 << 1) | (1 << 0);
699 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
700 forever ? "FE" : "END",
701 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
708 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
713 buf[0] = CMD_DMAKILL;
718 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
719 enum dmamov_dst dst, u32 val)
731 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
732 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
737 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
744 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
749 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
760 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
765 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
773 buf[0] |= (0 << 1) | (1 << 0);
774 else if (cond == BURST)
775 buf[0] |= (1 << 1) | (1 << 0);
777 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
778 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
783 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
784 enum pl330_cond cond, u8 peri)
798 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
799 cond == SINGLE ? 'S' : 'B', peri >> 3);
804 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
805 enum pl330_cond cond, u8 peri)
813 buf[0] |= (0 << 1) | (0 << 0);
814 else if (cond == BURST)
815 buf[0] |= (1 << 1) | (0 << 0);
817 buf[0] |= (0 << 1) | (1 << 0);
823 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
824 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
829 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
836 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
847 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
848 const struct _arg_GO *arg)
851 u32 addr = arg->addr;
852 unsigned ns = arg->ns;
868 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
870 /* Returns Time-Out */
871 static bool _until_dmac_idle(struct pl330_thread *thrd)
873 void __iomem *regs = thrd->dmac->base;
874 unsigned long loops = msecs_to_loops(5);
877 /* Until Manager is Idle */
878 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
890 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
891 u8 insn[], bool as_manager)
893 void __iomem *regs = thrd->dmac->base;
896 /* If timed out due to halted state-machine */
897 if (_until_dmac_idle(thrd)) {
898 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
902 val = (insn[0] << 16) | (insn[1] << 24);
905 val |= (thrd->id << 8); /* Channel Number */
907 writel(val, regs + DBGINST0);
909 val = le32_to_cpu(*((__le32 *)&insn[2]));
910 writel(val, regs + DBGINST1);
913 writel(0, regs + DBGCMD);
916 static inline u32 _state(struct pl330_thread *thrd)
918 void __iomem *regs = thrd->dmac->base;
921 if (is_manager(thrd))
922 val = readl(regs + DS) & 0xf;
924 val = readl(regs + CS(thrd->id)) & 0xf;
928 return PL330_STATE_STOPPED;
930 return PL330_STATE_EXECUTING;
932 return PL330_STATE_CACHEMISS;
934 return PL330_STATE_UPDTPC;
936 return PL330_STATE_WFE;
938 return PL330_STATE_FAULTING;
940 if (is_manager(thrd))
941 return PL330_STATE_INVALID;
943 return PL330_STATE_ATBARRIER;
945 if (is_manager(thrd))
946 return PL330_STATE_INVALID;
948 return PL330_STATE_QUEUEBUSY;
950 if (is_manager(thrd))
951 return PL330_STATE_INVALID;
953 return PL330_STATE_WFP;
955 if (is_manager(thrd))
956 return PL330_STATE_INVALID;
958 return PL330_STATE_KILLING;
960 if (is_manager(thrd))
961 return PL330_STATE_INVALID;
963 return PL330_STATE_COMPLETING;
965 if (is_manager(thrd))
966 return PL330_STATE_INVALID;
968 return PL330_STATE_FAULT_COMPLETING;
970 return PL330_STATE_INVALID;
974 static void _stop(struct pl330_thread *thrd)
976 void __iomem *regs = thrd->dmac->base;
977 u8 insn[6] = {0, 0, 0, 0, 0, 0};
978 u32 inten = readl(regs + INTEN);
980 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
981 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
983 /* Return if nothing needs to be done */
984 if (_state(thrd) == PL330_STATE_COMPLETING
985 || _state(thrd) == PL330_STATE_KILLING
986 || _state(thrd) == PL330_STATE_STOPPED)
991 _execute_DBGINSN(thrd, insn, is_manager(thrd));
993 /* clear the event */
994 if (inten & (1 << thrd->ev))
995 writel(1 << thrd->ev, regs + INTCLR);
996 /* Stop generating interrupts for SEV */
997 writel(inten & ~(1 << thrd->ev), regs + INTEN);
1000 /* Start doing req 'idx' of thread 'thrd' */
1001 static bool _trigger(struct pl330_thread *thrd)
1003 void __iomem *regs = thrd->dmac->base;
1004 struct _pl330_req *req;
1005 struct dma_pl330_desc *desc;
1008 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1011 /* Return if already ACTIVE */
1012 if (_state(thrd) != PL330_STATE_STOPPED)
1015 idx = 1 - thrd->lstenq;
1016 if (thrd->req[idx].desc != NULL) {
1017 req = &thrd->req[idx];
1020 if (thrd->req[idx].desc != NULL)
1021 req = &thrd->req[idx];
1026 /* Return if no request */
1030 /* Return if req is running */
1031 if (idx == thrd->req_running)
1036 ns = desc->rqcfg.nonsecure ? 1 : 0;
1038 /* See 'Abort Sources' point-4 at Page 2-25 */
1039 if (_manager_ns(thrd) && !ns)
1040 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
1041 __func__, __LINE__);
1044 go.addr = req->mc_bus;
1046 _emit_GO(0, insn, &go);
1048 /* Set to generate interrupts for SEV */
1049 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1051 /* Only manager can execute GO */
1052 _execute_DBGINSN(thrd, insn, true);
1054 thrd->req_running = idx;
1059 static bool pl330_start_thread(struct pl330_thread *thrd)
1061 switch (_state(thrd)) {
1062 case PL330_STATE_FAULT_COMPLETING:
1063 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1065 if (_state(thrd) == PL330_STATE_KILLING)
1066 UNTIL(thrd, PL330_STATE_STOPPED)
1069 case PL330_STATE_FAULTING:
1073 case PL330_STATE_KILLING:
1074 case PL330_STATE_COMPLETING:
1075 UNTIL(thrd, PL330_STATE_STOPPED)
1078 case PL330_STATE_STOPPED:
1079 return _trigger(thrd);
1081 case PL330_STATE_WFP:
1082 case PL330_STATE_QUEUEBUSY:
1083 case PL330_STATE_ATBARRIER:
1084 case PL330_STATE_UPDTPC:
1085 case PL330_STATE_CACHEMISS:
1086 case PL330_STATE_EXECUTING:
1089 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1095 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1096 const struct _xfer_spec *pxs, int cyc)
1099 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1101 /* check lock-up free version */
1102 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1104 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1105 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1109 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1110 off += _emit_RMB(dry_run, &buf[off]);
1111 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1112 off += _emit_WMB(dry_run, &buf[off]);
1119 static u32 _emit_load(unsigned int dry_run, u8 buf[],
1120 enum pl330_cond cond, enum dma_transfer_direction direction,
1125 switch (direction) {
1126 case DMA_MEM_TO_MEM:
1127 case DMA_MEM_TO_DEV:
1128 off += _emit_LD(dry_run, &buf[off], cond);
1131 case DMA_DEV_TO_MEM:
1132 if (cond == ALWAYS) {
1133 off += _emit_LDP(dry_run, &buf[off], SINGLE,
1135 off += _emit_LDP(dry_run, &buf[off], BURST,
1138 off += _emit_LDP(dry_run, &buf[off], cond,
1144 /* this code should be unreachable */
1152 static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
1153 enum pl330_cond cond, enum dma_transfer_direction direction,
1158 switch (direction) {
1159 case DMA_MEM_TO_MEM:
1160 case DMA_DEV_TO_MEM:
1161 off += _emit_ST(dry_run, &buf[off], cond);
1164 case DMA_MEM_TO_DEV:
1165 if (cond == ALWAYS) {
1166 off += _emit_STP(dry_run, &buf[off], SINGLE,
1168 off += _emit_STP(dry_run, &buf[off], BURST,
1171 off += _emit_STP(dry_run, &buf[off], cond,
1177 /* this code should be unreachable */
1185 static inline int _ldst_peripheral(struct pl330_dmac *pl330,
1186 unsigned dry_run, u8 buf[],
1187 const struct _xfer_spec *pxs, int cyc,
1188 enum pl330_cond cond)
1193 * do FLUSHP at beginning to clear any stale dma requests before the
1196 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1197 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
1199 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1200 off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
1202 off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
1209 static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1210 const struct _xfer_spec *pxs, int cyc)
1213 enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
1215 if (pl330->quirks & PL330_QUIRK_PERIPH_BURST)
1218 switch (pxs->desc->rqtype) {
1219 case DMA_MEM_TO_DEV:
1220 case DMA_DEV_TO_MEM:
1221 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
1225 case DMA_MEM_TO_MEM:
1226 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1230 /* this code should be unreachable */
1239 * only the unaligned burst transfers have the dregs.
1240 * so, still transfer dregs with a reduced size burst
1241 * for mem-to-mem, mem-to-dev or dev-to-mem.
1243 static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
1244 const struct _xfer_spec *pxs, int transfer_length)
1249 if (transfer_length == 0)
1253 * dregs_len = (total bytes - BURST_TO_BYTE(bursts, ccr)) /
1255 * the dregs len must be smaller than burst len,
1256 * so, for higher efficiency, we can modify CCR
1257 * to use a reduced size burst len for the dregs.
1259 dregs_ccr = pxs->ccr;
1260 dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
1261 (0xf << CC_DSTBRSTLEN_SHFT));
1262 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1263 CC_SRCBRSTLEN_SHFT);
1264 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1265 CC_DSTBRSTLEN_SHFT);
1267 switch (pxs->desc->rqtype) {
1268 case DMA_MEM_TO_DEV:
1269 case DMA_DEV_TO_MEM:
1270 off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
1271 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1,
1275 case DMA_MEM_TO_MEM:
1276 off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
1277 off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
1281 /* this code should be unreachable */
1289 /* Returns bytes consumed and updates bursts */
1290 static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1291 unsigned long *bursts, const struct _xfer_spec *pxs)
1293 int cyc, cycmax, szlp, szlpend, szbrst, off;
1294 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1295 struct _arg_LPEND lpend;
1298 return _bursts(pl330, dry_run, buf, pxs, 1);
1300 /* Max iterations possible in DMALP is 256 */
1301 if (*bursts >= 256*256) {
1304 cyc = *bursts / lcnt1 / lcnt0;
1305 } else if (*bursts > 256) {
1307 lcnt0 = *bursts / lcnt1;
1315 szlp = _emit_LP(1, buf, 0, 0);
1316 szbrst = _bursts(pl330, 1, buf, pxs, 1);
1318 lpend.cond = ALWAYS;
1319 lpend.forever = false;
1322 szlpend = _emit_LPEND(1, buf, &lpend);
1330 * Max bursts that we can unroll due to limit on the
1331 * size of backward jump that can be encoded in DMALPEND
1332 * which is 8-bits and hence 255
1334 cycmax = (255 - (szlp + szlpend)) / szbrst;
1336 cyc = (cycmax < cyc) ? cycmax : cyc;
1341 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1345 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1348 off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
1350 lpend.cond = ALWAYS;
1351 lpend.forever = false;
1353 lpend.bjump = off - ljmp1;
1354 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1357 lpend.cond = ALWAYS;
1358 lpend.forever = false;
1360 lpend.bjump = off - ljmp0;
1361 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1364 *bursts = lcnt1 * cyc;
1371 static inline int _setup_loops(struct pl330_dmac *pl330,
1372 unsigned dry_run, u8 buf[],
1373 const struct _xfer_spec *pxs)
1375 struct pl330_xfer *x = &pxs->desc->px;
1377 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1378 int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) /
1384 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
1387 off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs);
1392 static inline int _setup_xfer(struct pl330_dmac *pl330,
1393 unsigned dry_run, u8 buf[],
1394 const struct _xfer_spec *pxs)
1396 struct pl330_xfer *x = &pxs->desc->px;
1399 /* DMAMOV SAR, x->src_addr */
1400 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1401 /* DMAMOV DAR, x->dst_addr */
1402 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1405 off += _setup_loops(pl330, dry_run, &buf[off], pxs);
1411 * A req is a sequence of one or more xfer units.
1412 * Returns the number of bytes taken to setup the MC for the req.
1414 static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
1415 struct pl330_thread *thrd, unsigned index,
1416 struct _xfer_spec *pxs)
1418 struct _pl330_req *req = &thrd->req[index];
1419 u8 *buf = req->mc_cpu;
1422 PL330_DBGMC_START(req->mc_bus);
1424 /* DMAMOV CCR, ccr */
1425 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1427 off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
1429 /* DMASEV peripheral/event */
1430 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1432 off += _emit_END(dry_run, &buf[off]);
1437 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1447 /* We set same protection levels for Src and DST for now */
1448 if (rqc->privileged)
1449 ccr |= CC_SRCPRI | CC_DSTPRI;
1451 ccr |= CC_SRCNS | CC_DSTNS;
1452 if (rqc->insnaccess)
1453 ccr |= CC_SRCIA | CC_DSTIA;
1455 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1456 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1458 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1459 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1461 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1462 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1464 ccr |= (rqc->swap << CC_SWAP_SHFT);
1470 * Submit a list of xfers after which the client wants notification.
1471 * Client is not notified after each xfer unit, just once after all
1472 * xfer units are done or some error occurs.
1474 static int pl330_submit_req(struct pl330_thread *thrd,
1475 struct dma_pl330_desc *desc)
1477 struct pl330_dmac *pl330 = thrd->dmac;
1478 struct _xfer_spec xs;
1479 unsigned long flags;
1484 switch (desc->rqtype) {
1485 case DMA_MEM_TO_DEV:
1488 case DMA_DEV_TO_MEM:
1491 case DMA_MEM_TO_MEM:
1498 if (pl330->state == DYING
1499 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1500 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
1501 __func__, __LINE__);
1505 /* If request for non-existing peripheral */
1506 if (desc->rqtype != DMA_MEM_TO_MEM &&
1507 desc->peri >= pl330->pcfg.num_peri) {
1508 dev_info(thrd->dmac->ddma.dev,
1509 "%s:%d Invalid peripheral(%u)!\n",
1510 __func__, __LINE__, desc->peri);
1514 spin_lock_irqsave(&pl330->lock, flags);
1516 if (_queue_full(thrd)) {
1521 /* Prefer Secure Channel */
1522 if (!_manager_ns(thrd))
1523 desc->rqcfg.nonsecure = 0;
1525 desc->rqcfg.nonsecure = 1;
1527 ccr = _prepare_ccr(&desc->rqcfg);
1529 idx = thrd->req[0].desc == NULL ? 0 : 1;
1534 /* First dry run to check if req is acceptable */
1535 ret = _setup_req(pl330, 1, thrd, idx, &xs);
1537 if (ret > pl330->mcbufsz / 2) {
1538 dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
1539 __func__, __LINE__, ret, pl330->mcbufsz / 2);
1544 /* Hook the request */
1546 thrd->req[idx].desc = desc;
1547 _setup_req(pl330, 0, thrd, idx, &xs);
1552 spin_unlock_irqrestore(&pl330->lock, flags);
1557 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1559 struct dma_pl330_chan *pch;
1560 unsigned long flags;
1567 /* If desc aborted */
1571 spin_lock_irqsave(&pch->lock, flags);
1573 desc->status = DONE;
1575 spin_unlock_irqrestore(&pch->lock, flags);
1577 tasklet_schedule(&pch->task);
1580 static void pl330_dotask(struct tasklet_struct *t)
1582 struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks);
1583 unsigned long flags;
1586 spin_lock_irqsave(&pl330->lock, flags);
1588 /* The DMAC itself gone nuts */
1589 if (pl330->dmac_tbd.reset_dmac) {
1590 pl330->state = DYING;
1591 /* Reset the manager too */
1592 pl330->dmac_tbd.reset_mngr = true;
1593 /* Clear the reset flag */
1594 pl330->dmac_tbd.reset_dmac = false;
1597 if (pl330->dmac_tbd.reset_mngr) {
1598 _stop(pl330->manager);
1599 /* Reset all channels */
1600 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
1601 /* Clear the reset flag */
1602 pl330->dmac_tbd.reset_mngr = false;
1605 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1607 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1608 struct pl330_thread *thrd = &pl330->channels[i];
1609 void __iomem *regs = pl330->base;
1610 enum pl330_op_err err;
1614 if (readl(regs + FSC) & (1 << thrd->id))
1615 err = PL330_ERR_FAIL;
1617 err = PL330_ERR_ABORT;
1619 spin_unlock_irqrestore(&pl330->lock, flags);
1620 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1621 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1622 spin_lock_irqsave(&pl330->lock, flags);
1624 thrd->req[0].desc = NULL;
1625 thrd->req[1].desc = NULL;
1626 thrd->req_running = -1;
1628 /* Clear the reset flag */
1629 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1633 spin_unlock_irqrestore(&pl330->lock, flags);
1638 /* Returns 1 if state was updated, 0 otherwise */
1639 static int pl330_update(struct pl330_dmac *pl330)
1641 struct dma_pl330_desc *descdone;
1642 unsigned long flags;
1645 int id, ev, ret = 0;
1649 spin_lock_irqsave(&pl330->lock, flags);
1651 val = readl(regs + FSM) & 0x1;
1653 pl330->dmac_tbd.reset_mngr = true;
1655 pl330->dmac_tbd.reset_mngr = false;
1657 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
1658 pl330->dmac_tbd.reset_chan |= val;
1661 while (i < pl330->pcfg.num_chan) {
1662 if (val & (1 << i)) {
1663 dev_info(pl330->ddma.dev,
1664 "Reset Channel-%d\t CS-%x FTC-%x\n",
1665 i, readl(regs + CS(i)),
1666 readl(regs + FTC(i)));
1667 _stop(&pl330->channels[i]);
1673 /* Check which event happened i.e, thread notified */
1674 val = readl(regs + ES);
1675 if (pl330->pcfg.num_events < 32
1676 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
1677 pl330->dmac_tbd.reset_dmac = true;
1678 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1684 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
1685 if (val & (1 << ev)) { /* Event occurred */
1686 struct pl330_thread *thrd;
1687 u32 inten = readl(regs + INTEN);
1690 /* Clear the event */
1691 if (inten & (1 << ev))
1692 writel(1 << ev, regs + INTCLR);
1696 id = pl330->events[ev];
1698 thrd = &pl330->channels[id];
1700 active = thrd->req_running;
1701 if (active == -1) /* Aborted */
1704 /* Detach the req */
1705 descdone = thrd->req[active].desc;
1706 thrd->req[active].desc = NULL;
1708 thrd->req_running = -1;
1710 /* Get going again ASAP */
1711 pl330_start_thread(thrd);
1713 /* For now, just make a list of callbacks to be done */
1714 list_add_tail(&descdone->rqd, &pl330->req_done);
1718 /* Now that we are in no hurry, do the callbacks */
1719 while (!list_empty(&pl330->req_done)) {
1720 descdone = list_first_entry(&pl330->req_done,
1721 struct dma_pl330_desc, rqd);
1722 list_del(&descdone->rqd);
1723 spin_unlock_irqrestore(&pl330->lock, flags);
1724 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1725 spin_lock_irqsave(&pl330->lock, flags);
1729 spin_unlock_irqrestore(&pl330->lock, flags);
1731 if (pl330->dmac_tbd.reset_dmac
1732 || pl330->dmac_tbd.reset_mngr
1733 || pl330->dmac_tbd.reset_chan) {
1735 tasklet_schedule(&pl330->tasks);
1741 /* Reserve an event */
1742 static inline int _alloc_event(struct pl330_thread *thrd)
1744 struct pl330_dmac *pl330 = thrd->dmac;
1747 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
1748 if (pl330->events[ev] == -1) {
1749 pl330->events[ev] = thrd->id;
1756 static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1758 return pl330->pcfg.irq_ns & (1 << i);
1761 /* Upon success, returns IdentityToken for the
1762 * allocated channel, NULL otherwise.
1764 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1766 struct pl330_thread *thrd = NULL;
1769 if (pl330->state == DYING)
1772 chans = pl330->pcfg.num_chan;
1774 for (i = 0; i < chans; i++) {
1775 thrd = &pl330->channels[i];
1776 if ((thrd->free) && (!_manager_ns(thrd) ||
1777 _chan_ns(pl330, i))) {
1778 thrd->ev = _alloc_event(thrd);
1779 if (thrd->ev >= 0) {
1782 thrd->req[0].desc = NULL;
1783 thrd->req[1].desc = NULL;
1784 thrd->req_running = -1;
1794 /* Release an event */
1795 static inline void _free_event(struct pl330_thread *thrd, int ev)
1797 struct pl330_dmac *pl330 = thrd->dmac;
1799 /* If the event is valid and was held by the thread */
1800 if (ev >= 0 && ev < pl330->pcfg.num_events
1801 && pl330->events[ev] == thrd->id)
1802 pl330->events[ev] = -1;
1805 static void pl330_release_channel(struct pl330_thread *thrd)
1807 if (!thrd || thrd->free)
1812 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1813 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1815 _free_event(thrd, thrd->ev);
1819 /* Initialize the structure for PL330 configuration, that can be used
1820 * by the client driver the make best use of the DMAC
1822 static void read_dmac_config(struct pl330_dmac *pl330)
1824 void __iomem *regs = pl330->base;
1827 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1828 val &= CRD_DATA_WIDTH_MASK;
1829 pl330->pcfg.data_bus_width = 8 * (1 << val);
1831 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1832 val &= CRD_DATA_BUFF_MASK;
1833 pl330->pcfg.data_buf_dep = val + 1;
1835 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1836 val &= CR0_NUM_CHANS_MASK;
1838 pl330->pcfg.num_chan = val;
1840 val = readl(regs + CR0);
1841 if (val & CR0_PERIPH_REQ_SET) {
1842 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1844 pl330->pcfg.num_peri = val;
1845 pl330->pcfg.peri_ns = readl(regs + CR4);
1847 pl330->pcfg.num_peri = 0;
1850 val = readl(regs + CR0);
1851 if (val & CR0_BOOT_MAN_NS)
1852 pl330->pcfg.mode |= DMAC_MODE_NS;
1854 pl330->pcfg.mode &= ~DMAC_MODE_NS;
1856 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1857 val &= CR0_NUM_EVENTS_MASK;
1859 pl330->pcfg.num_events = val;
1861 pl330->pcfg.irq_ns = readl(regs + CR3);
1864 static inline void _reset_thread(struct pl330_thread *thrd)
1866 struct pl330_dmac *pl330 = thrd->dmac;
1868 thrd->req[0].mc_cpu = pl330->mcode_cpu
1869 + (thrd->id * pl330->mcbufsz);
1870 thrd->req[0].mc_bus = pl330->mcode_bus
1871 + (thrd->id * pl330->mcbufsz);
1872 thrd->req[0].desc = NULL;
1874 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1875 + pl330->mcbufsz / 2;
1876 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1877 + pl330->mcbufsz / 2;
1878 thrd->req[1].desc = NULL;
1880 thrd->req_running = -1;
1883 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1885 int chans = pl330->pcfg.num_chan;
1886 struct pl330_thread *thrd;
1889 /* Allocate 1 Manager and 'chans' Channel threads */
1890 pl330->channels = kcalloc(1 + chans, sizeof(*thrd),
1892 if (!pl330->channels)
1895 /* Init Channel threads */
1896 for (i = 0; i < chans; i++) {
1897 thrd = &pl330->channels[i];
1900 _reset_thread(thrd);
1904 /* MANAGER is indexed at the end */
1905 thrd = &pl330->channels[chans];
1909 pl330->manager = thrd;
1914 static int dmac_alloc_resources(struct pl330_dmac *pl330)
1916 int chans = pl330->pcfg.num_chan;
1920 * Alloc MicroCode buffer for 'chans' Channel threads.
1921 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1923 pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
1924 chans * pl330->mcbufsz,
1925 &pl330->mcode_bus, GFP_KERNEL,
1926 DMA_ATTR_PRIVILEGED);
1927 if (!pl330->mcode_cpu) {
1928 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1929 __func__, __LINE__);
1933 ret = dmac_alloc_threads(pl330);
1935 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
1936 __func__, __LINE__);
1937 dma_free_attrs(pl330->ddma.dev,
1938 chans * pl330->mcbufsz,
1939 pl330->mcode_cpu, pl330->mcode_bus,
1940 DMA_ATTR_PRIVILEGED);
1947 static int pl330_add(struct pl330_dmac *pl330)
1951 /* Check if we can handle this DMAC */
1952 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1953 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1954 pl330->pcfg.periph_id);
1958 /* Read the configuration of the DMAC */
1959 read_dmac_config(pl330);
1961 if (pl330->pcfg.num_events == 0) {
1962 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
1963 __func__, __LINE__);
1967 spin_lock_init(&pl330->lock);
1969 INIT_LIST_HEAD(&pl330->req_done);
1971 /* Use default MC buffer size if not provided */
1972 if (!pl330->mcbufsz)
1973 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1975 /* Mark all events as free */
1976 for (i = 0; i < pl330->pcfg.num_events; i++)
1977 pl330->events[i] = -1;
1979 /* Allocate resources needed by the DMAC */
1980 ret = dmac_alloc_resources(pl330);
1982 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
1986 tasklet_setup(&pl330->tasks, pl330_dotask);
1988 pl330->state = INIT;
1993 static int dmac_free_threads(struct pl330_dmac *pl330)
1995 struct pl330_thread *thrd;
1998 /* Release Channel threads */
1999 for (i = 0; i < pl330->pcfg.num_chan; i++) {
2000 thrd = &pl330->channels[i];
2001 pl330_release_channel(thrd);
2005 kfree(pl330->channels);
2010 static void pl330_del(struct pl330_dmac *pl330)
2012 pl330->state = UNINIT;
2014 tasklet_kill(&pl330->tasks);
2016 /* Free DMAC resources */
2017 dmac_free_threads(pl330);
2019 dma_free_attrs(pl330->ddma.dev,
2020 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
2021 pl330->mcode_bus, DMA_ATTR_PRIVILEGED);
2024 /* forward declaration */
2025 static struct amba_driver pl330_driver;
2027 static inline struct dma_pl330_chan *
2028 to_pchan(struct dma_chan *ch)
2033 return container_of(ch, struct dma_pl330_chan, chan);
2036 static inline struct dma_pl330_desc *
2037 to_desc(struct dma_async_tx_descriptor *tx)
2039 return container_of(tx, struct dma_pl330_desc, txd);
2042 static inline void fill_queue(struct dma_pl330_chan *pch)
2044 struct dma_pl330_desc *desc;
2047 list_for_each_entry(desc, &pch->work_list, node) {
2049 /* If already submitted */
2050 if (desc->status == BUSY || desc->status == PAUSED)
2053 ret = pl330_submit_req(pch->thread, desc);
2055 desc->status = BUSY;
2056 } else if (ret == -EAGAIN) {
2057 /* QFull or DMAC Dying */
2060 /* Unacceptable request */
2061 desc->status = DONE;
2062 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
2063 __func__, __LINE__, desc->txd.cookie);
2064 tasklet_schedule(&pch->task);
2069 static void pl330_tasklet(struct tasklet_struct *t)
2071 struct dma_pl330_chan *pch = from_tasklet(pch, t, task);
2072 struct dma_pl330_desc *desc, *_dt;
2073 unsigned long flags;
2074 bool power_down = false;
2076 spin_lock_irqsave(&pch->lock, flags);
2078 /* Pick up ripe tomatoes */
2079 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2080 if (desc->status == DONE) {
2082 dma_cookie_complete(&desc->txd);
2083 list_move_tail(&desc->node, &pch->completed_list);
2086 /* Try to submit a req imm. next to the last completed cookie */
2089 if (list_empty(&pch->work_list)) {
2090 spin_lock(&pch->thread->dmac->lock);
2092 spin_unlock(&pch->thread->dmac->lock);
2094 pch->active = false;
2096 /* Make sure the PL330 Channel thread is active */
2097 spin_lock(&pch->thread->dmac->lock);
2098 pl330_start_thread(pch->thread);
2099 spin_unlock(&pch->thread->dmac->lock);
2102 while (!list_empty(&pch->completed_list)) {
2103 struct dmaengine_desc_callback cb;
2105 desc = list_first_entry(&pch->completed_list,
2106 struct dma_pl330_desc, node);
2108 dmaengine_desc_get_callback(&desc->txd, &cb);
2111 desc->status = PREP;
2112 list_move_tail(&desc->node, &pch->work_list);
2115 spin_lock(&pch->thread->dmac->lock);
2116 pl330_start_thread(pch->thread);
2117 spin_unlock(&pch->thread->dmac->lock);
2121 desc->status = FREE;
2122 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2125 dma_descriptor_unmap(&desc->txd);
2127 if (dmaengine_desc_callback_valid(&cb)) {
2128 spin_unlock_irqrestore(&pch->lock, flags);
2129 dmaengine_desc_callback_invoke(&cb, NULL);
2130 spin_lock_irqsave(&pch->lock, flags);
2133 spin_unlock_irqrestore(&pch->lock, flags);
2135 /* If work list empty, power down */
2137 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2138 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2142 static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2143 struct of_dma *ofdma)
2145 int count = dma_spec->args_count;
2146 struct pl330_dmac *pl330 = ofdma->of_dma_data;
2147 unsigned int chan_id;
2155 chan_id = dma_spec->args[0];
2156 if (chan_id >= pl330->num_peripherals)
2159 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
2162 static int pl330_alloc_chan_resources(struct dma_chan *chan)
2164 struct dma_pl330_chan *pch = to_pchan(chan);
2165 struct pl330_dmac *pl330 = pch->dmac;
2166 unsigned long flags;
2168 spin_lock_irqsave(&pl330->lock, flags);
2170 dma_cookie_init(chan);
2171 pch->cyclic = false;
2173 pch->thread = pl330_request_channel(pl330);
2175 spin_unlock_irqrestore(&pl330->lock, flags);
2179 tasklet_setup(&pch->task, pl330_tasklet);
2181 spin_unlock_irqrestore(&pl330->lock, flags);
2187 * We need the data direction between the DMAC (the dma-mapping "device") and
2188 * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing!
2190 static enum dma_data_direction
2191 pl330_dma_slave_map_dir(enum dma_transfer_direction dir)
2194 case DMA_MEM_TO_DEV:
2195 return DMA_FROM_DEVICE;
2196 case DMA_DEV_TO_MEM:
2197 return DMA_TO_DEVICE;
2198 case DMA_DEV_TO_DEV:
2199 return DMA_BIDIRECTIONAL;
2205 static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch)
2207 if (pch->dir != DMA_NONE)
2208 dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma,
2209 1 << pch->burst_sz, pch->dir, 0);
2210 pch->dir = DMA_NONE;
2214 static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
2215 enum dma_transfer_direction dir)
2217 struct device *dev = pch->chan.device->dev;
2218 enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir);
2220 /* Already mapped for this config? */
2221 if (pch->dir == dma_dir)
2224 pl330_unprep_slave_fifo(pch);
2225 pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr,
2226 1 << pch->burst_sz, dma_dir, 0);
2227 if (dma_mapping_error(dev, pch->fifo_dma))
2234 static int fixup_burst_len(int max_burst_len, int quirks)
2236 if (max_burst_len > PL330_MAX_BURST)
2237 return PL330_MAX_BURST;
2238 else if (max_burst_len < 1)
2241 return max_burst_len;
2244 static int pl330_config_write(struct dma_chan *chan,
2245 struct dma_slave_config *slave_config,
2246 enum dma_transfer_direction direction)
2248 struct dma_pl330_chan *pch = to_pchan(chan);
2250 pl330_unprep_slave_fifo(pch);
2251 if (direction == DMA_MEM_TO_DEV) {
2252 if (slave_config->dst_addr)
2253 pch->fifo_addr = slave_config->dst_addr;
2254 if (slave_config->dst_addr_width)
2255 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2256 pch->burst_len = fixup_burst_len(slave_config->dst_maxburst,
2258 } else if (direction == DMA_DEV_TO_MEM) {
2259 if (slave_config->src_addr)
2260 pch->fifo_addr = slave_config->src_addr;
2261 if (slave_config->src_addr_width)
2262 pch->burst_sz = __ffs(slave_config->src_addr_width);
2263 pch->burst_len = fixup_burst_len(slave_config->src_maxburst,
2270 static int pl330_config(struct dma_chan *chan,
2271 struct dma_slave_config *slave_config)
2273 struct dma_pl330_chan *pch = to_pchan(chan);
2275 memcpy(&pch->slave_config, slave_config, sizeof(*slave_config));
2280 static int pl330_terminate_all(struct dma_chan *chan)
2282 struct dma_pl330_chan *pch = to_pchan(chan);
2283 struct dma_pl330_desc *desc;
2284 unsigned long flags;
2285 struct pl330_dmac *pl330 = pch->dmac;
2286 bool power_down = false;
2288 pm_runtime_get_sync(pl330->ddma.dev);
2289 spin_lock_irqsave(&pch->lock, flags);
2291 spin_lock(&pl330->lock);
2293 pch->thread->req[0].desc = NULL;
2294 pch->thread->req[1].desc = NULL;
2295 pch->thread->req_running = -1;
2296 spin_unlock(&pl330->lock);
2298 power_down = pch->active;
2299 pch->active = false;
2301 /* Mark all desc done */
2302 list_for_each_entry(desc, &pch->submitted_list, node) {
2303 desc->status = FREE;
2304 dma_cookie_complete(&desc->txd);
2307 list_for_each_entry(desc, &pch->work_list , node) {
2308 desc->status = FREE;
2309 dma_cookie_complete(&desc->txd);
2312 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2313 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2314 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2315 spin_unlock_irqrestore(&pch->lock, flags);
2316 pm_runtime_mark_last_busy(pl330->ddma.dev);
2318 pm_runtime_put_autosuspend(pl330->ddma.dev);
2319 pm_runtime_put_autosuspend(pl330->ddma.dev);
2325 * We don't support DMA_RESUME command because of hardware
2326 * limitations, so after pausing the channel we cannot restore
2327 * it to active state. We have to terminate channel and setup
2328 * DMA transfer again. This pause feature was implemented to
2329 * allow safely read residue before channel termination.
2331 static int pl330_pause(struct dma_chan *chan)
2333 struct dma_pl330_chan *pch = to_pchan(chan);
2334 struct pl330_dmac *pl330 = pch->dmac;
2335 struct dma_pl330_desc *desc;
2336 unsigned long flags;
2338 pm_runtime_get_sync(pl330->ddma.dev);
2339 spin_lock_irqsave(&pch->lock, flags);
2341 spin_lock(&pl330->lock);
2343 spin_unlock(&pl330->lock);
2345 list_for_each_entry(desc, &pch->work_list, node) {
2346 if (desc->status == BUSY)
2347 desc->status = PAUSED;
2349 spin_unlock_irqrestore(&pch->lock, flags);
2350 pm_runtime_mark_last_busy(pl330->ddma.dev);
2351 pm_runtime_put_autosuspend(pl330->ddma.dev);
2356 static void pl330_free_chan_resources(struct dma_chan *chan)
2358 struct dma_pl330_chan *pch = to_pchan(chan);
2359 struct pl330_dmac *pl330 = pch->dmac;
2360 unsigned long flags;
2362 tasklet_kill(&pch->task);
2364 pm_runtime_get_sync(pch->dmac->ddma.dev);
2365 spin_lock_irqsave(&pl330->lock, flags);
2367 pl330_release_channel(pch->thread);
2371 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2373 spin_unlock_irqrestore(&pl330->lock, flags);
2374 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2375 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2376 pl330_unprep_slave_fifo(pch);
2379 static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2380 struct dma_pl330_desc *desc)
2382 struct pl330_thread *thrd = pch->thread;
2383 struct pl330_dmac *pl330 = pch->dmac;
2384 void __iomem *regs = thrd->dmac->base;
2387 pm_runtime_get_sync(pl330->ddma.dev);
2389 if (desc->rqcfg.src_inc) {
2390 val = readl(regs + SA(thrd->id));
2391 addr = desc->px.src_addr;
2393 val = readl(regs + DA(thrd->id));
2394 addr = desc->px.dst_addr;
2396 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2397 pm_runtime_put_autosuspend(pl330->ddma.dev);
2399 /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
2406 static enum dma_status
2407 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2408 struct dma_tx_state *txstate)
2410 enum dma_status ret;
2411 unsigned long flags;
2412 struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
2413 struct dma_pl330_chan *pch = to_pchan(chan);
2414 unsigned int transferred, residual = 0;
2416 ret = dma_cookie_status(chan, cookie, txstate);
2421 if (ret == DMA_COMPLETE)
2424 spin_lock_irqsave(&pch->lock, flags);
2425 spin_lock(&pch->thread->dmac->lock);
2427 if (pch->thread->req_running != -1)
2428 running = pch->thread->req[pch->thread->req_running].desc;
2430 last_enq = pch->thread->req[pch->thread->lstenq].desc;
2432 /* Check in pending list */
2433 list_for_each_entry(desc, &pch->work_list, node) {
2434 if (desc->status == DONE)
2435 transferred = desc->bytes_requested;
2436 else if (running && desc == running)
2438 pl330_get_current_xferred_count(pch, desc);
2439 else if (desc->status == BUSY || desc->status == PAUSED)
2441 * Busy but not running means either just enqueued,
2442 * or finished and not yet marked done
2444 if (desc == last_enq)
2447 transferred = desc->bytes_requested;
2450 residual += desc->bytes_requested - transferred;
2451 if (desc->txd.cookie == cookie) {
2452 switch (desc->status) {
2461 ret = DMA_IN_PROGRESS;
2471 spin_unlock(&pch->thread->dmac->lock);
2472 spin_unlock_irqrestore(&pch->lock, flags);
2475 dma_set_residue(txstate, residual);
2480 static void pl330_issue_pending(struct dma_chan *chan)
2482 struct dma_pl330_chan *pch = to_pchan(chan);
2483 unsigned long flags;
2485 spin_lock_irqsave(&pch->lock, flags);
2486 if (list_empty(&pch->work_list)) {
2488 * Warn on nothing pending. Empty submitted_list may
2489 * break our pm_runtime usage counter as it is
2490 * updated on work_list emptiness status.
2492 WARN_ON(list_empty(&pch->submitted_list));
2494 pm_runtime_get_sync(pch->dmac->ddma.dev);
2496 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2497 spin_unlock_irqrestore(&pch->lock, flags);
2499 pl330_tasklet(&pch->task);
2503 * We returned the last one of the circular list of descriptor(s)
2504 * from prep_xxx, so the argument to submit corresponds to the last
2505 * descriptor of the list.
2507 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2509 struct dma_pl330_desc *desc, *last = to_desc(tx);
2510 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2511 dma_cookie_t cookie;
2512 unsigned long flags;
2514 spin_lock_irqsave(&pch->lock, flags);
2516 /* Assign cookies to all nodes */
2517 while (!list_empty(&last->node)) {
2518 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2520 desc->txd.callback = last->txd.callback;
2521 desc->txd.callback_param = last->txd.callback_param;
2525 dma_cookie_assign(&desc->txd);
2527 list_move_tail(&desc->node, &pch->submitted_list);
2531 cookie = dma_cookie_assign(&last->txd);
2532 list_add_tail(&last->node, &pch->submitted_list);
2533 spin_unlock_irqrestore(&pch->lock, flags);
2538 static inline void _init_desc(struct dma_pl330_desc *desc)
2540 desc->rqcfg.swap = SWAP_NO;
2541 desc->rqcfg.scctl = CCTRL0;
2542 desc->rqcfg.dcctl = CCTRL0;
2543 desc->txd.tx_submit = pl330_tx_submit;
2545 INIT_LIST_HEAD(&desc->node);
2548 /* Returns the number of descriptors added to the DMAC pool */
2549 static int add_desc(struct list_head *pool, spinlock_t *lock,
2550 gfp_t flg, int count)
2552 struct dma_pl330_desc *desc;
2553 unsigned long flags;
2556 desc = kcalloc(count, sizeof(*desc), flg);
2560 spin_lock_irqsave(lock, flags);
2562 for (i = 0; i < count; i++) {
2563 _init_desc(&desc[i]);
2564 list_add_tail(&desc[i].node, pool);
2567 spin_unlock_irqrestore(lock, flags);
2572 static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
2575 struct dma_pl330_desc *desc = NULL;
2576 unsigned long flags;
2578 spin_lock_irqsave(lock, flags);
2580 if (!list_empty(pool)) {
2581 desc = list_entry(pool->next,
2582 struct dma_pl330_desc, node);
2584 list_del_init(&desc->node);
2586 desc->status = PREP;
2587 desc->txd.callback = NULL;
2590 spin_unlock_irqrestore(lock, flags);
2595 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2597 struct pl330_dmac *pl330 = pch->dmac;
2598 u8 *peri_id = pch->chan.private;
2599 struct dma_pl330_desc *desc;
2601 /* Pluck one desc from the pool of DMAC */
2602 desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
2604 /* If the DMAC pool is empty, alloc new */
2606 static DEFINE_SPINLOCK(lock);
2609 if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
2612 desc = pluck_desc(&pool, &lock);
2613 WARN_ON(!desc || !list_empty(&pool));
2616 /* Initialize the descriptor */
2618 desc->txd.cookie = 0;
2619 async_tx_ack(&desc->txd);
2621 desc->peri = peri_id ? pch->chan.chan_id : 0;
2622 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2624 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2629 static inline void fill_px(struct pl330_xfer *px,
2630 dma_addr_t dst, dma_addr_t src, size_t len)
2637 static struct dma_pl330_desc *
2638 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2639 dma_addr_t src, size_t len)
2641 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2644 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2645 __func__, __LINE__);
2650 * Ideally we should lookout for reqs bigger than
2651 * those that can be programmed with 256 bytes of
2652 * MC buffer, but considering a req size is seldom
2653 * going to be word-unaligned and more than 200MB,
2655 * Also, should the limit is reached we'd rather
2656 * have the platform increase MC buffer size than
2657 * complicating this API driver.
2659 fill_px(&desc->px, dst, src, len);
2664 /* Call after fixing burst size */
2665 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2667 struct dma_pl330_chan *pch = desc->pchan;
2668 struct pl330_dmac *pl330 = pch->dmac;
2671 burst_len = pl330->pcfg.data_bus_width / 8;
2672 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
2673 burst_len >>= desc->rqcfg.brst_size;
2675 /* src/dst_burst_len can't be more than 16 */
2676 if (burst_len > PL330_MAX_BURST)
2677 burst_len = PL330_MAX_BURST;
2682 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2683 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2684 size_t period_len, enum dma_transfer_direction direction,
2685 unsigned long flags)
2687 struct dma_pl330_desc *desc = NULL, *first = NULL;
2688 struct dma_pl330_chan *pch = to_pchan(chan);
2689 struct pl330_dmac *pl330 = pch->dmac;
2694 if (len % period_len != 0)
2697 if (!is_slave_direction(direction)) {
2698 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
2699 __func__, __LINE__);
2703 pl330_config_write(chan, &pch->slave_config, direction);
2705 if (!pl330_prep_slave_fifo(pch, direction))
2708 for (i = 0; i < len / period_len; i++) {
2709 desc = pl330_get_desc(pch);
2711 unsigned long iflags;
2713 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2714 __func__, __LINE__);
2719 spin_lock_irqsave(&pl330->pool_lock, iflags);
2721 while (!list_empty(&first->node)) {
2722 desc = list_entry(first->node.next,
2723 struct dma_pl330_desc, node);
2724 list_move_tail(&desc->node, &pl330->desc_pool);
2727 list_move_tail(&first->node, &pl330->desc_pool);
2729 spin_unlock_irqrestore(&pl330->pool_lock, iflags);
2734 switch (direction) {
2735 case DMA_MEM_TO_DEV:
2736 desc->rqcfg.src_inc = 1;
2737 desc->rqcfg.dst_inc = 0;
2739 dst = pch->fifo_dma;
2741 case DMA_DEV_TO_MEM:
2742 desc->rqcfg.src_inc = 0;
2743 desc->rqcfg.dst_inc = 1;
2744 src = pch->fifo_dma;
2751 desc->rqtype = direction;
2752 desc->rqcfg.brst_size = pch->burst_sz;
2753 desc->rqcfg.brst_len = pch->burst_len;
2754 desc->bytes_requested = period_len;
2755 fill_px(&desc->px, dst, src, period_len);
2760 list_add_tail(&desc->node, &first->node);
2762 dma_addr += period_len;
2773 static struct dma_async_tx_descriptor *
2774 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2775 dma_addr_t src, size_t len, unsigned long flags)
2777 struct dma_pl330_desc *desc;
2778 struct dma_pl330_chan *pch = to_pchan(chan);
2779 struct pl330_dmac *pl330;
2782 if (unlikely(!pch || !len))
2787 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2791 desc->rqcfg.src_inc = 1;
2792 desc->rqcfg.dst_inc = 1;
2793 desc->rqtype = DMA_MEM_TO_MEM;
2795 /* Select max possible burst size */
2796 burst = pl330->pcfg.data_bus_width / 8;
2799 * Make sure we use a burst size that aligns with all the memcpy
2800 * parameters because our DMA programming algorithm doesn't cope with
2801 * transfers which straddle an entry in the DMA device's MFIFO.
2803 while ((src | dst | len) & (burst - 1))
2806 desc->rqcfg.brst_size = 0;
2807 while (burst != (1 << desc->rqcfg.brst_size))
2808 desc->rqcfg.brst_size++;
2810 desc->rqcfg.brst_len = get_burst_len(desc, len);
2812 * If burst size is smaller than bus width then make sure we only
2813 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2815 if (burst * 8 < pl330->pcfg.data_bus_width)
2816 desc->rqcfg.brst_len = 1;
2818 desc->bytes_requested = len;
2823 static void __pl330_giveback_desc(struct pl330_dmac *pl330,
2824 struct dma_pl330_desc *first)
2826 unsigned long flags;
2827 struct dma_pl330_desc *desc;
2832 spin_lock_irqsave(&pl330->pool_lock, flags);
2834 while (!list_empty(&first->node)) {
2835 desc = list_entry(first->node.next,
2836 struct dma_pl330_desc, node);
2837 list_move_tail(&desc->node, &pl330->desc_pool);
2840 list_move_tail(&first->node, &pl330->desc_pool);
2842 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2845 static struct dma_async_tx_descriptor *
2846 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2847 unsigned int sg_len, enum dma_transfer_direction direction,
2848 unsigned long flg, void *context)
2850 struct dma_pl330_desc *first, *desc = NULL;
2851 struct dma_pl330_chan *pch = to_pchan(chan);
2852 struct scatterlist *sg;
2855 if (unlikely(!pch || !sgl || !sg_len))
2858 pl330_config_write(chan, &pch->slave_config, direction);
2860 if (!pl330_prep_slave_fifo(pch, direction))
2865 for_each_sg(sgl, sg, sg_len, i) {
2867 desc = pl330_get_desc(pch);
2869 struct pl330_dmac *pl330 = pch->dmac;
2871 dev_err(pch->dmac->ddma.dev,
2872 "%s:%d Unable to fetch desc\n",
2873 __func__, __LINE__);
2874 __pl330_giveback_desc(pl330, first);
2882 list_add_tail(&desc->node, &first->node);
2884 if (direction == DMA_MEM_TO_DEV) {
2885 desc->rqcfg.src_inc = 1;
2886 desc->rqcfg.dst_inc = 0;
2887 fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
2890 desc->rqcfg.src_inc = 0;
2891 desc->rqcfg.dst_inc = 1;
2892 fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
2896 desc->rqcfg.brst_size = pch->burst_sz;
2897 desc->rqcfg.brst_len = pch->burst_len;
2898 desc->rqtype = direction;
2899 desc->bytes_requested = sg_dma_len(sg);
2902 /* Return the last desc in the chain */
2906 static irqreturn_t pl330_irq_handler(int irq, void *data)
2908 if (pl330_update(data))
2914 #define PL330_DMA_BUSWIDTHS \
2915 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2916 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2917 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2918 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2919 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2921 #ifdef CONFIG_DEBUG_FS
2922 static int pl330_debugfs_show(struct seq_file *s, void *data)
2924 struct pl330_dmac *pl330 = s->private;
2925 int chans, pchs, ch, pr;
2927 chans = pl330->pcfg.num_chan;
2928 pchs = pl330->num_peripherals;
2930 seq_puts(s, "PL330 physical channels:\n");
2931 seq_puts(s, "THREAD:\t\tCHANNEL:\n");
2932 seq_puts(s, "--------\t-----\n");
2933 for (ch = 0; ch < chans; ch++) {
2934 struct pl330_thread *thrd = &pl330->channels[ch];
2937 for (pr = 0; pr < pchs; pr++) {
2938 struct dma_pl330_chan *pch = &pl330->peripherals[pr];
2940 if (!pch->thread || thrd->id != pch->thread->id)
2946 seq_printf(s, "%d\t\t", thrd->id);
2948 seq_puts(s, "--\n");
2950 seq_printf(s, "%d\n", found);
2956 DEFINE_SHOW_ATTRIBUTE(pl330_debugfs);
2958 static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
2960 debugfs_create_file(dev_name(pl330->ddma.dev),
2961 S_IFREG | 0444, NULL, pl330,
2962 &pl330_debugfs_fops);
2965 static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
2971 * Runtime PM callbacks are provided by amba/bus.c driver.
2973 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2974 * bus driver will only disable/enable the clock in runtime PM callbacks.
2976 static int __maybe_unused pl330_suspend(struct device *dev)
2978 struct amba_device *pcdev = to_amba_device(dev);
2980 pm_runtime_force_suspend(dev);
2981 clk_unprepare(pcdev->pclk);
2986 static int __maybe_unused pl330_resume(struct device *dev)
2988 struct amba_device *pcdev = to_amba_device(dev);
2991 ret = clk_prepare(pcdev->pclk);
2995 pm_runtime_force_resume(dev);
3000 static const struct dev_pm_ops pl330_pm = {
3001 SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
3005 pl330_probe(struct amba_device *adev, const struct amba_id *id)
3007 struct pl330_config *pcfg;
3008 struct pl330_dmac *pl330;
3009 struct dma_pl330_chan *pch, *_p;
3010 struct dma_device *pd;
3011 struct resource *res;
3014 struct device_node *np = adev->dev.of_node;
3016 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
3020 /* Allocate a new DMAC and its Channels */
3021 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
3026 pd->dev = &adev->dev;
3031 for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
3032 if (of_property_read_bool(np, of_quirks[i].quirk))
3033 pl330->quirks |= of_quirks[i].id;
3036 pl330->base = devm_ioremap_resource(&adev->dev, res);
3037 if (IS_ERR(pl330->base))
3038 return PTR_ERR(pl330->base);
3040 amba_set_drvdata(adev, pl330);
3042 pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
3043 if (IS_ERR(pl330->rstc)) {
3044 return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc), "Failed to get reset!\n");
3046 ret = reset_control_deassert(pl330->rstc);
3048 dev_err(&adev->dev, "Couldn't deassert the device from reset!\n");
3053 pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
3054 if (IS_ERR(pl330->rstc_ocp)) {
3055 return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc_ocp),
3056 "Failed to get OCP reset!\n");
3058 ret = reset_control_deassert(pl330->rstc_ocp);
3060 dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n");
3065 for (i = 0; i < AMBA_NR_IRQS; i++) {
3068 ret = devm_request_irq(&adev->dev, irq,
3069 pl330_irq_handler, 0,
3070 dev_name(&adev->dev), pl330);
3078 pcfg = &pl330->pcfg;
3080 pcfg->periph_id = adev->periphid;
3081 ret = pl330_add(pl330);
3085 INIT_LIST_HEAD(&pl330->desc_pool);
3086 spin_lock_init(&pl330->pool_lock);
3088 /* Create a descriptor pool of default size */
3089 if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
3090 GFP_KERNEL, NR_DEFAULT_DESC))
3091 dev_warn(&adev->dev, "unable to allocate desc\n");
3093 INIT_LIST_HEAD(&pd->channels);
3095 /* Initialize channel parameters */
3096 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
3098 pl330->num_peripherals = num_chan;
3100 pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL);
3101 if (!pl330->peripherals) {
3106 for (i = 0; i < num_chan; i++) {
3107 pch = &pl330->peripherals[i];
3109 pch->chan.private = adev->dev.of_node;
3110 INIT_LIST_HEAD(&pch->submitted_list);
3111 INIT_LIST_HEAD(&pch->work_list);
3112 INIT_LIST_HEAD(&pch->completed_list);
3113 spin_lock_init(&pch->lock);
3115 pch->chan.device = pd;
3117 pch->dir = DMA_NONE;
3119 /* Add the channel to the DMAC list */
3120 list_add_tail(&pch->chan.device_node, &pd->channels);
3123 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
3124 if (pcfg->num_peri) {
3125 dma_cap_set(DMA_SLAVE, pd->cap_mask);
3126 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
3127 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
3130 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
3131 pd->device_free_chan_resources = pl330_free_chan_resources;
3132 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
3133 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
3134 pd->device_tx_status = pl330_tx_status;
3135 pd->device_prep_slave_sg = pl330_prep_slave_sg;
3136 pd->device_config = pl330_config;
3137 pd->device_pause = pl330_pause;
3138 pd->device_terminate_all = pl330_terminate_all;
3139 pd->device_issue_pending = pl330_issue_pending;
3140 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
3141 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
3142 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3143 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3144 pd->max_burst = PL330_MAX_BURST;
3146 ret = dma_async_device_register(pd);
3148 dev_err(&adev->dev, "unable to register DMAC\n");
3152 if (adev->dev.of_node) {
3153 ret = of_dma_controller_register(adev->dev.of_node,
3154 of_dma_pl330_xlate, pl330);
3157 "unable to register DMA to the generic DT DMA helpers\n");
3162 * This is the limit for transfers with a buswidth of 1, larger
3163 * buswidths will have larger limits.
3165 ret = dma_set_max_seg_size(&adev->dev, 1900800);
3167 dev_err(&adev->dev, "unable to set the seg size\n");
3170 init_pl330_debugfs(pl330);
3171 dev_info(&adev->dev,
3172 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
3173 dev_info(&adev->dev,
3174 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
3175 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
3176 pcfg->num_peri, pcfg->num_events);
3178 pm_runtime_irq_safe(&adev->dev);
3179 pm_runtime_use_autosuspend(&adev->dev);
3180 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
3181 pm_runtime_mark_last_busy(&adev->dev);
3182 pm_runtime_put_autosuspend(&adev->dev);
3187 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3190 /* Remove the channel */
3191 list_del(&pch->chan.device_node);
3193 /* Flush the channel */
3195 pl330_terminate_all(&pch->chan);
3196 pl330_free_chan_resources(&pch->chan);
3202 if (pl330->rstc_ocp)
3203 reset_control_assert(pl330->rstc_ocp);
3206 reset_control_assert(pl330->rstc);
3210 static void pl330_remove(struct amba_device *adev)
3212 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
3213 struct dma_pl330_chan *pch, *_p;
3216 pm_runtime_get_noresume(pl330->ddma.dev);
3218 if (adev->dev.of_node)
3219 of_dma_controller_free(adev->dev.of_node);
3221 for (i = 0; i < AMBA_NR_IRQS; i++) {
3224 devm_free_irq(&adev->dev, irq, pl330);
3227 dma_async_device_unregister(&pl330->ddma);
3230 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3233 /* Remove the channel */
3234 list_del(&pch->chan.device_node);
3236 /* Flush the channel */
3238 pl330_terminate_all(&pch->chan);
3239 pl330_free_chan_resources(&pch->chan);
3245 if (pl330->rstc_ocp)
3246 reset_control_assert(pl330->rstc_ocp);
3249 reset_control_assert(pl330->rstc);
3252 static const struct amba_id pl330_ids[] = {
3260 MODULE_DEVICE_TABLE(amba, pl330_ids);
3262 static struct amba_driver pl330_driver = {
3264 .owner = THIS_MODULE,
3265 .name = "dma-pl330",
3268 .id_table = pl330_ids,
3269 .probe = pl330_probe,
3270 .remove = pl330_remove,
3273 module_amba_driver(pl330_driver);
3275 MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
3276 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3277 MODULE_LICENSE("GPL");