2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
29 #include "dmaengine.h"
30 #define PL330_MAX_CHAN 8
31 #define PL330_MAX_IRQS 32
32 #define PL330_MAX_PERI 32
34 enum pl330_srccachectrl {
35 SCCTRL0, /* Noncacheable and nonbufferable */
36 SCCTRL1, /* Bufferable only */
37 SCCTRL2, /* Cacheable, but do not allocate */
38 SCCTRL3, /* Cacheable and bufferable, but do not allocate */
41 SCCTRL6, /* Cacheable write-through, allocate on reads only */
42 SCCTRL7, /* Cacheable write-back, allocate on reads only */
45 enum pl330_dstcachectrl {
46 DCCTRL0, /* Noncacheable and nonbufferable */
47 DCCTRL1, /* Bufferable only */
48 DCCTRL2, /* Cacheable, but do not allocate */
49 DCCTRL3, /* Cacheable and bufferable, but do not allocate */
50 DINVALID1, /* AWCACHE = 0x1000 */
52 DCCTRL6, /* Cacheable write-through, allocate on writes only */
53 DCCTRL7, /* Cacheable write-back, allocate on writes only */
71 /* Register and Bit field Definitions */
73 #define DS_ST_STOP 0x0
74 #define DS_ST_EXEC 0x1
75 #define DS_ST_CMISS 0x2
76 #define DS_ST_UPDTPC 0x3
78 #define DS_ST_ATBRR 0x5
79 #define DS_ST_QBUSY 0x6
81 #define DS_ST_KILL 0x8
82 #define DS_ST_CMPLT 0x9
83 #define DS_ST_FLTCMP 0xe
84 #define DS_ST_FAULT 0xf
89 #define INTSTATUS 0x28
96 #define FTC(n) (_FTC + (n)*0x4)
99 #define CS(n) (_CS + (n)*0x8)
100 #define CS_CNS (1 << 21)
103 #define CPC(n) (_CPC + (n)*0x8)
106 #define SA(n) (_SA + (n)*0x20)
109 #define DA(n) (_DA + (n)*0x20)
112 #define CC(n) (_CC + (n)*0x20)
114 #define CC_SRCINC (1 << 0)
115 #define CC_DSTINC (1 << 14)
116 #define CC_SRCPRI (1 << 8)
117 #define CC_DSTPRI (1 << 22)
118 #define CC_SRCNS (1 << 9)
119 #define CC_DSTNS (1 << 23)
120 #define CC_SRCIA (1 << 10)
121 #define CC_DSTIA (1 << 24)
122 #define CC_SRCBRSTLEN_SHFT 4
123 #define CC_DSTBRSTLEN_SHFT 18
124 #define CC_SRCBRSTSIZE_SHFT 1
125 #define CC_DSTBRSTSIZE_SHFT 15
126 #define CC_SRCCCTRL_SHFT 11
127 #define CC_SRCCCTRL_MASK 0x7
128 #define CC_DSTCCTRL_SHFT 25
129 #define CC_DRCCCTRL_MASK 0x7
130 #define CC_SWAP_SHFT 28
133 #define LC0(n) (_LC0 + (n)*0x20)
136 #define LC1(n) (_LC1 + (n)*0x20)
138 #define DBGSTATUS 0xd00
139 #define DBG_BUSY (1 << 0)
142 #define DBGINST0 0xd08
143 #define DBGINST1 0xd0c
152 #define PERIPH_ID 0xfe0
153 #define PERIPH_REV_SHIFT 20
154 #define PERIPH_REV_MASK 0xf
155 #define PERIPH_REV_R0P0 0
156 #define PERIPH_REV_R1P0 1
157 #define PERIPH_REV_R1P1 2
158 #define PCELL_ID 0xff0
160 #define CR0_PERIPH_REQ_SET (1 << 0)
161 #define CR0_BOOT_EN_SET (1 << 1)
162 #define CR0_BOOT_MAN_NS (1 << 2)
163 #define CR0_NUM_CHANS_SHIFT 4
164 #define CR0_NUM_CHANS_MASK 0x7
165 #define CR0_NUM_PERIPH_SHIFT 12
166 #define CR0_NUM_PERIPH_MASK 0x1f
167 #define CR0_NUM_EVENTS_SHIFT 17
168 #define CR0_NUM_EVENTS_MASK 0x1f
170 #define CR1_ICACHE_LEN_SHIFT 0
171 #define CR1_ICACHE_LEN_MASK 0x7
172 #define CR1_NUM_ICACHELINES_SHIFT 4
173 #define CR1_NUM_ICACHELINES_MASK 0xf
175 #define CRD_DATA_WIDTH_SHIFT 0
176 #define CRD_DATA_WIDTH_MASK 0x7
177 #define CRD_WR_CAP_SHIFT 4
178 #define CRD_WR_CAP_MASK 0x7
179 #define CRD_WR_Q_DEP_SHIFT 8
180 #define CRD_WR_Q_DEP_MASK 0xf
181 #define CRD_RD_CAP_SHIFT 12
182 #define CRD_RD_CAP_MASK 0x7
183 #define CRD_RD_Q_DEP_SHIFT 16
184 #define CRD_RD_Q_DEP_MASK 0xf
185 #define CRD_DATA_BUFF_SHIFT 20
186 #define CRD_DATA_BUFF_MASK 0x3ff
189 #define DESIGNER 0x41
191 #define INTEG_CFG 0x0
192 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
194 #define PCELL_ID_VAL 0xb105f00d
196 #define PL330_STATE_STOPPED (1 << 0)
197 #define PL330_STATE_EXECUTING (1 << 1)
198 #define PL330_STATE_WFE (1 << 2)
199 #define PL330_STATE_FAULTING (1 << 3)
200 #define PL330_STATE_COMPLETING (1 << 4)
201 #define PL330_STATE_WFP (1 << 5)
202 #define PL330_STATE_KILLING (1 << 6)
203 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
204 #define PL330_STATE_CACHEMISS (1 << 8)
205 #define PL330_STATE_UPDTPC (1 << 9)
206 #define PL330_STATE_ATBARRIER (1 << 10)
207 #define PL330_STATE_QUEUEBUSY (1 << 11)
208 #define PL330_STATE_INVALID (1 << 15)
210 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
211 | PL330_STATE_WFE | PL330_STATE_FAULTING)
213 #define CMD_DMAADDH 0x54
214 #define CMD_DMAEND 0x00
215 #define CMD_DMAFLUSHP 0x35
216 #define CMD_DMAGO 0xa0
217 #define CMD_DMALD 0x04
218 #define CMD_DMALDP 0x25
219 #define CMD_DMALP 0x20
220 #define CMD_DMALPEND 0x28
221 #define CMD_DMAKILL 0x01
222 #define CMD_DMAMOV 0xbc
223 #define CMD_DMANOP 0x18
224 #define CMD_DMARMB 0x12
225 #define CMD_DMASEV 0x34
226 #define CMD_DMAST 0x08
227 #define CMD_DMASTP 0x29
228 #define CMD_DMASTZ 0x0c
229 #define CMD_DMAWFE 0x36
230 #define CMD_DMAWFP 0x30
231 #define CMD_DMAWMB 0x13
235 #define SZ_DMAFLUSHP 2
239 #define SZ_DMALPEND 2
253 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
254 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
256 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
257 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
260 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
261 * at 1byte/burst for P<->M and M<->M respectively.
262 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
263 * should be enough for P<->M and M<->M respectively.
265 #define MCODE_BUFF_PER_REQ 256
267 /* If the _pl330_req is available to the client */
268 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
270 /* Use this _only_ to wait on transient states */
271 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
273 #ifdef PL330_DEBUG_MCGEN
274 static unsigned cmd_line;
275 #define PL330_DBGCMD_DUMP(off, x...) do { \
276 printk("%x:", cmd_line); \
280 #define PL330_DBGMC_START(addr) (cmd_line = addr)
282 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
283 #define PL330_DBGMC_START(addr) do {} while (0)
286 /* The number of default descriptors */
288 #define NR_DEFAULT_DESC 16
290 /* Populated by the PL330 core driver for DMA API driver's info */
291 struct pl330_config {
294 #define DMAC_MODE_NS (1 << 0)
296 unsigned int data_bus_width:10; /* In number of bits */
297 unsigned int data_buf_dep:10;
298 unsigned int num_chan:4;
299 unsigned int num_peri:6;
301 unsigned int num_events:6;
305 /* Handle to the DMAC provided to the PL330 core */
309 /* Size of MicroCode buffers for each channel. */
311 /* ioremap'ed address of PL330 registers. */
313 /* Client can freely use it. */
315 /* PL330 core data, Client must not touch it. */
317 /* Populated by the PL330 core driver during pl330_add */
318 struct pl330_config pcfg;
320 * If the DMAC has some reset mechanism, then the
321 * client may want to provide pointer to the method.
323 void (*dmac_reset)(struct pl330_info *pi);
327 * Request Configuration.
328 * The PL330 core does not modify this and uses the last
329 * working configuration if the request doesn't provide any.
331 * The Client may want to provide this info only for the
332 * first request and a request with new settings.
334 struct pl330_reqcfg {
335 /* Address Incrementing */
340 * For now, the SRC & DST protection levels
341 * and burst size/length are assumed same.
347 unsigned brst_size:3; /* in power of 2 */
349 enum pl330_dstcachectrl dcctl;
350 enum pl330_srccachectrl scctl;
351 enum pl330_byteswap swap;
352 struct pl330_config *pcfg;
356 * One cycle of DMAC operation.
357 * There may be more than one xfer in a request.
365 * Pointer to next xfer in the list.
366 * The last xfer in the req must point to NULL.
368 struct pl330_xfer *next;
371 /* The xfer callbacks are made with one of these arguments. */
373 /* The all xfers in the request were success. */
375 /* If req aborted due to global error. */
377 /* If req failed due to problem with Channel. */
381 /* A request defining Scatter-Gather List ending with NULL xfer. */
383 enum pl330_reqtype rqtype;
384 /* Index of peripheral for the xfer. */
386 /* Unique token for this xfer, set by the client. */
388 /* Callback to be called after xfer. */
389 void (*xfer_cb)(void *token, enum pl330_op_err err);
390 /* If NULL, req will be done at last set parameters. */
391 struct pl330_reqcfg *cfg;
392 /* Pointer to first xfer in the request. */
393 struct pl330_xfer *x;
394 /* Hook to attach to DMAC's list of reqs with due callback */
395 struct list_head rqd;
399 * To know the status of the channel and DMAC, the client
400 * provides a pointer to this structure. The PL330 core
401 * fills it with current information.
403 struct pl330_chanstatus {
405 * If the DMAC engine halted due to some error,
406 * the client should remove-add DMAC.
410 * If channel is halted due to some error,
411 * the client should ABORT/FLUSH and START the channel.
414 /* Location of last load */
416 /* Location of last store */
419 * Pointer to the currently active req, NULL if channel is
420 * inactive, even though the requests may be present.
422 struct pl330_req *top_req;
423 /* Pointer to req waiting second in the queue if any. */
424 struct pl330_req *wait_req;
428 /* Start the channel */
430 /* Abort the active xfer */
432 /* Stop xfer and flush queue */
439 struct pl330_xfer *x;
462 /* Number of bytes taken to setup MC for the req */
467 /* ToBeDone for tasklet */
475 struct pl330_thread {
478 /* If the channel is not yet acquired by any client */
481 struct pl330_dmac *dmac;
482 /* Only two at a time */
483 struct _pl330_req req[2];
484 /* Index of the last enqueued request */
486 /* Index of the last submitted request or -1 if the DMA is stopped */
490 enum pl330_dmac_state {
499 /* Holds list of reqs with due callbacks */
500 struct list_head req_done;
501 /* Pointer to platform specific stuff */
502 struct pl330_info *pinfo;
503 /* Maximum possible events/irqs */
505 /* BUS address of MicroCode buffer */
507 /* CPU address of MicroCode buffer */
509 /* List of all Channel threads */
510 struct pl330_thread *channels;
511 /* Pointer to the MANAGER thread */
512 struct pl330_thread *manager;
513 /* To handle bad news in interrupt */
514 struct tasklet_struct tasks;
515 struct _pl330_tbd dmac_tbd;
516 /* State of DMAC operation */
517 enum pl330_dmac_state state;
521 /* In the DMAC pool */
524 * Allocated to some channel during prep_xxx
525 * Also may be sitting on the work_list.
529 * Sitting on the work_list and already submitted
530 * to the PL330 core. Not more than two descriptors
531 * of a channel can be BUSY at any time.
535 * Sitting on the channel work_list but xfer done
541 struct dma_pl330_chan {
542 /* Schedule desc completion */
543 struct tasklet_struct task;
545 /* DMA-Engine Channel */
546 struct dma_chan chan;
548 /* List of to be xfered descriptors */
549 struct list_head work_list;
551 /* Pointer to the DMAC that manages this channel,
552 * NULL if the channel is available to be acquired.
553 * As the parent, this DMAC also provides descriptors
556 struct dma_pl330_dmac *dmac;
558 /* To protect channel manipulation */
561 /* Token of a hardware channel thread of PL330 DMAC
562 * NULL if the channel is available to be acquired.
566 /* For D-to-M and M-to-D channels */
567 int burst_sz; /* the peripheral fifo width */
568 int burst_len; /* the number of burst */
569 dma_addr_t fifo_addr;
571 /* for cyclic capability */
575 struct dma_pl330_dmac {
576 struct pl330_info pif;
578 /* DMA-Engine Device */
579 struct dma_device ddma;
581 /* Pool of descriptors available for the DMAC's channels */
582 struct list_head desc_pool;
583 /* To protect desc_pool manipulation */
584 spinlock_t pool_lock;
586 /* Peripheral channels connected to this DMAC */
587 struct dma_pl330_chan *peripherals; /* keep at end */
590 struct dma_pl330_desc {
591 /* To attach to a queue as child */
592 struct list_head node;
594 /* Descriptor for the DMA Engine API */
595 struct dma_async_tx_descriptor txd;
597 /* Xfer for PL330 core */
598 struct pl330_xfer px;
600 struct pl330_reqcfg rqcfg;
601 struct pl330_req req;
603 enum desc_status status;
605 /* The channel which currently holds this desc */
606 struct dma_pl330_chan *pchan;
609 struct dma_pl330_filter_args {
610 struct dma_pl330_dmac *pdmac;
611 unsigned int chan_id;
614 static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
617 r->xfer_cb(r->token, err);
620 static inline bool _queue_empty(struct pl330_thread *thrd)
622 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
626 static inline bool _queue_full(struct pl330_thread *thrd)
628 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
632 static inline bool is_manager(struct pl330_thread *thrd)
634 struct pl330_dmac *pl330 = thrd->dmac;
636 /* MANAGER is indexed at the end */
637 if (thrd->id == pl330->pinfo->pcfg.num_chan)
643 /* If manager of the thread is in Non-Secure mode */
644 static inline bool _manager_ns(struct pl330_thread *thrd)
646 struct pl330_dmac *pl330 = thrd->dmac;
648 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
651 static inline u32 get_id(struct pl330_info *pi, u32 off)
653 void __iomem *regs = pi->base;
656 id |= (readb(regs + off + 0x0) << 0);
657 id |= (readb(regs + off + 0x4) << 8);
658 id |= (readb(regs + off + 0x8) << 16);
659 id |= (readb(regs + off + 0xc) << 24);
664 static inline u32 get_revision(u32 periph_id)
666 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
669 static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
670 enum pl330_dst da, u16 val)
675 buf[0] = CMD_DMAADDH;
677 *((u16 *)&buf[1]) = val;
679 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
680 da == 1 ? "DA" : "SA", val);
685 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
692 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
697 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
702 buf[0] = CMD_DMAFLUSHP;
708 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
713 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
721 buf[0] |= (0 << 1) | (1 << 0);
722 else if (cond == BURST)
723 buf[0] |= (1 << 1) | (1 << 0);
725 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
726 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
731 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
732 enum pl330_cond cond, u8 peri)
746 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
747 cond == SINGLE ? 'S' : 'B', peri >> 3);
752 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
753 unsigned loop, u8 cnt)
763 cnt--; /* DMAC increments by 1 internally */
766 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
772 enum pl330_cond cond;
778 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
779 const struct _arg_LPEND *arg)
781 enum pl330_cond cond = arg->cond;
782 bool forever = arg->forever;
783 unsigned loop = arg->loop;
784 u8 bjump = arg->bjump;
789 buf[0] = CMD_DMALPEND;
798 buf[0] |= (0 << 1) | (1 << 0);
799 else if (cond == BURST)
800 buf[0] |= (1 << 1) | (1 << 0);
804 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
805 forever ? "FE" : "END",
806 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
813 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
818 buf[0] = CMD_DMAKILL;
823 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
824 enum dmamov_dst dst, u32 val)
831 *((u32 *)&buf[2]) = val;
833 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
834 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
839 static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
846 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
851 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
858 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
863 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
874 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
879 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
887 buf[0] |= (0 << 1) | (1 << 0);
888 else if (cond == BURST)
889 buf[0] |= (1 << 1) | (1 << 0);
891 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
892 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
897 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
898 enum pl330_cond cond, u8 peri)
912 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
913 cond == SINGLE ? 'S' : 'B', peri >> 3);
918 static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
925 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
930 static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
945 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
946 ev >> 3, invalidate ? ", I" : "");
951 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
952 enum pl330_cond cond, u8 peri)
960 buf[0] |= (0 << 1) | (0 << 0);
961 else if (cond == BURST)
962 buf[0] |= (1 << 1) | (0 << 0);
964 buf[0] |= (0 << 1) | (1 << 0);
970 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
971 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
976 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
983 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
994 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
995 const struct _arg_GO *arg)
998 u32 addr = arg->addr;
999 unsigned ns = arg->ns;
1005 buf[0] |= (ns << 1);
1007 buf[1] = chan & 0x7;
1009 *((u32 *)&buf[2]) = addr;
1014 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1016 /* Returns Time-Out */
1017 static bool _until_dmac_idle(struct pl330_thread *thrd)
1019 void __iomem *regs = thrd->dmac->pinfo->base;
1020 unsigned long loops = msecs_to_loops(5);
1023 /* Until Manager is Idle */
1024 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
1036 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
1037 u8 insn[], bool as_manager)
1039 void __iomem *regs = thrd->dmac->pinfo->base;
1042 val = (insn[0] << 16) | (insn[1] << 24);
1045 val |= (thrd->id << 8); /* Channel Number */
1047 writel(val, regs + DBGINST0);
1049 val = *((u32 *)&insn[2]);
1050 writel(val, regs + DBGINST1);
1052 /* If timed out due to halted state-machine */
1053 if (_until_dmac_idle(thrd)) {
1054 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
1059 writel(0, regs + DBGCMD);
1063 * Mark a _pl330_req as free.
1064 * We do it by writing DMAEND as the first instruction
1065 * because no valid request is going to have DMAEND as
1066 * its first instruction to execute.
1068 static void mark_free(struct pl330_thread *thrd, int idx)
1070 struct _pl330_req *req = &thrd->req[idx];
1072 _emit_END(0, req->mc_cpu);
1075 thrd->req_running = -1;
1078 static inline u32 _state(struct pl330_thread *thrd)
1080 void __iomem *regs = thrd->dmac->pinfo->base;
1083 if (is_manager(thrd))
1084 val = readl(regs + DS) & 0xf;
1086 val = readl(regs + CS(thrd->id)) & 0xf;
1090 return PL330_STATE_STOPPED;
1092 return PL330_STATE_EXECUTING;
1094 return PL330_STATE_CACHEMISS;
1096 return PL330_STATE_UPDTPC;
1098 return PL330_STATE_WFE;
1100 return PL330_STATE_FAULTING;
1102 if (is_manager(thrd))
1103 return PL330_STATE_INVALID;
1105 return PL330_STATE_ATBARRIER;
1107 if (is_manager(thrd))
1108 return PL330_STATE_INVALID;
1110 return PL330_STATE_QUEUEBUSY;
1112 if (is_manager(thrd))
1113 return PL330_STATE_INVALID;
1115 return PL330_STATE_WFP;
1117 if (is_manager(thrd))
1118 return PL330_STATE_INVALID;
1120 return PL330_STATE_KILLING;
1122 if (is_manager(thrd))
1123 return PL330_STATE_INVALID;
1125 return PL330_STATE_COMPLETING;
1127 if (is_manager(thrd))
1128 return PL330_STATE_INVALID;
1130 return PL330_STATE_FAULT_COMPLETING;
1132 return PL330_STATE_INVALID;
1136 static void _stop(struct pl330_thread *thrd)
1138 void __iomem *regs = thrd->dmac->pinfo->base;
1139 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1141 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1142 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1144 /* Return if nothing needs to be done */
1145 if (_state(thrd) == PL330_STATE_COMPLETING
1146 || _state(thrd) == PL330_STATE_KILLING
1147 || _state(thrd) == PL330_STATE_STOPPED)
1150 _emit_KILL(0, insn);
1152 /* Stop generating interrupts for SEV */
1153 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1155 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1158 /* Start doing req 'idx' of thread 'thrd' */
1159 static bool _trigger(struct pl330_thread *thrd)
1161 void __iomem *regs = thrd->dmac->pinfo->base;
1162 struct _pl330_req *req;
1163 struct pl330_req *r;
1166 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1169 /* Return if already ACTIVE */
1170 if (_state(thrd) != PL330_STATE_STOPPED)
1173 idx = 1 - thrd->lstenq;
1174 if (!IS_FREE(&thrd->req[idx]))
1175 req = &thrd->req[idx];
1178 if (!IS_FREE(&thrd->req[idx]))
1179 req = &thrd->req[idx];
1184 /* Return if no request */
1185 if (!req || !req->r)
1191 ns = r->cfg->nonsecure ? 1 : 0;
1192 else if (readl(regs + CS(thrd->id)) & CS_CNS)
1197 /* See 'Abort Sources' point-4 at Page 2-25 */
1198 if (_manager_ns(thrd) && !ns)
1199 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
1200 __func__, __LINE__);
1203 go.addr = req->mc_bus;
1205 _emit_GO(0, insn, &go);
1207 /* Set to generate interrupts for SEV */
1208 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1210 /* Only manager can execute GO */
1211 _execute_DBGINSN(thrd, insn, true);
1213 thrd->req_running = idx;
1218 static bool _start(struct pl330_thread *thrd)
1220 switch (_state(thrd)) {
1221 case PL330_STATE_FAULT_COMPLETING:
1222 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1224 if (_state(thrd) == PL330_STATE_KILLING)
1225 UNTIL(thrd, PL330_STATE_STOPPED)
1227 case PL330_STATE_FAULTING:
1230 case PL330_STATE_KILLING:
1231 case PL330_STATE_COMPLETING:
1232 UNTIL(thrd, PL330_STATE_STOPPED)
1234 case PL330_STATE_STOPPED:
1235 return _trigger(thrd);
1237 case PL330_STATE_WFP:
1238 case PL330_STATE_QUEUEBUSY:
1239 case PL330_STATE_ATBARRIER:
1240 case PL330_STATE_UPDTPC:
1241 case PL330_STATE_CACHEMISS:
1242 case PL330_STATE_EXECUTING:
1245 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1251 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1252 const struct _xfer_spec *pxs, int cyc)
1255 struct pl330_config *pcfg = pxs->r->cfg->pcfg;
1257 /* check lock-up free version */
1258 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1260 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1261 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1265 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1266 off += _emit_RMB(dry_run, &buf[off]);
1267 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1268 off += _emit_WMB(dry_run, &buf[off]);
1275 static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1276 const struct _xfer_spec *pxs, int cyc)
1281 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1282 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1283 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1284 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1290 static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1291 const struct _xfer_spec *pxs, int cyc)
1296 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1297 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1298 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1299 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1305 static int _bursts(unsigned dry_run, u8 buf[],
1306 const struct _xfer_spec *pxs, int cyc)
1310 switch (pxs->r->rqtype) {
1312 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1315 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1318 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1321 off += 0x40000000; /* Scare off the Client */
1328 /* Returns bytes consumed and updates bursts */
1329 static inline int _loop(unsigned dry_run, u8 buf[],
1330 unsigned long *bursts, const struct _xfer_spec *pxs)
1332 int cyc, cycmax, szlp, szlpend, szbrst, off;
1333 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1334 struct _arg_LPEND lpend;
1336 /* Max iterations possible in DMALP is 256 */
1337 if (*bursts >= 256*256) {
1340 cyc = *bursts / lcnt1 / lcnt0;
1341 } else if (*bursts > 256) {
1343 lcnt0 = *bursts / lcnt1;
1351 szlp = _emit_LP(1, buf, 0, 0);
1352 szbrst = _bursts(1, buf, pxs, 1);
1354 lpend.cond = ALWAYS;
1355 lpend.forever = false;
1358 szlpend = _emit_LPEND(1, buf, &lpend);
1366 * Max bursts that we can unroll due to limit on the
1367 * size of backward jump that can be encoded in DMALPEND
1368 * which is 8-bits and hence 255
1370 cycmax = (255 - (szlp + szlpend)) / szbrst;
1372 cyc = (cycmax < cyc) ? cycmax : cyc;
1377 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1381 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1384 off += _bursts(dry_run, &buf[off], pxs, cyc);
1386 lpend.cond = ALWAYS;
1387 lpend.forever = false;
1389 lpend.bjump = off - ljmp1;
1390 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1393 lpend.cond = ALWAYS;
1394 lpend.forever = false;
1396 lpend.bjump = off - ljmp0;
1397 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1400 *bursts = lcnt1 * cyc;
1407 static inline int _setup_loops(unsigned dry_run, u8 buf[],
1408 const struct _xfer_spec *pxs)
1410 struct pl330_xfer *x = pxs->x;
1412 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1417 off += _loop(dry_run, &buf[off], &c, pxs);
1424 static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1425 const struct _xfer_spec *pxs)
1427 struct pl330_xfer *x = pxs->x;
1430 /* DMAMOV SAR, x->src_addr */
1431 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1432 /* DMAMOV DAR, x->dst_addr */
1433 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1436 off += _setup_loops(dry_run, &buf[off], pxs);
1442 * A req is a sequence of one or more xfer units.
1443 * Returns the number of bytes taken to setup the MC for the req.
1445 static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1446 unsigned index, struct _xfer_spec *pxs)
1448 struct _pl330_req *req = &thrd->req[index];
1449 struct pl330_xfer *x;
1450 u8 *buf = req->mc_cpu;
1453 PL330_DBGMC_START(req->mc_bus);
1455 /* DMAMOV CCR, ccr */
1456 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1460 /* Error if xfer length is not aligned at burst size */
1461 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1465 off += _setup_xfer(dry_run, &buf[off], pxs);
1470 /* DMASEV peripheral/event */
1471 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1473 off += _emit_END(dry_run, &buf[off]);
1478 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1488 /* We set same protection levels for Src and DST for now */
1489 if (rqc->privileged)
1490 ccr |= CC_SRCPRI | CC_DSTPRI;
1492 ccr |= CC_SRCNS | CC_DSTNS;
1493 if (rqc->insnaccess)
1494 ccr |= CC_SRCIA | CC_DSTIA;
1496 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1497 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1499 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1500 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1502 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1503 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1505 ccr |= (rqc->swap << CC_SWAP_SHFT);
1510 static inline bool _is_valid(u32 ccr)
1512 enum pl330_dstcachectrl dcctl;
1513 enum pl330_srccachectrl scctl;
1515 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1516 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1518 if (dcctl == DINVALID1 || dcctl == DINVALID2
1519 || scctl == SINVALID1 || scctl == SINVALID2)
1526 * Submit a list of xfers after which the client wants notification.
1527 * Client is not notified after each xfer unit, just once after all
1528 * xfer units are done or some error occurs.
1530 static int pl330_submit_req(void *ch_id, struct pl330_req *r)
1532 struct pl330_thread *thrd = ch_id;
1533 struct pl330_dmac *pl330;
1534 struct pl330_info *pi;
1535 struct _xfer_spec xs;
1536 unsigned long flags;
1542 /* No Req or Unacquired Channel or DMAC */
1543 if (!r || !thrd || thrd->free)
1550 if (pl330->state == DYING
1551 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1552 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1553 __func__, __LINE__);
1557 /* If request for non-existing peripheral */
1558 if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1559 dev_info(thrd->dmac->pinfo->dev,
1560 "%s:%d Invalid peripheral(%u)!\n",
1561 __func__, __LINE__, r->peri);
1565 spin_lock_irqsave(&pl330->lock, flags);
1567 if (_queue_full(thrd)) {
1573 /* Use last settings, if not provided */
1575 /* Prefer Secure Channel */
1576 if (!_manager_ns(thrd))
1577 r->cfg->nonsecure = 0;
1579 r->cfg->nonsecure = 1;
1581 ccr = _prepare_ccr(r->cfg);
1583 ccr = readl(regs + CC(thrd->id));
1586 /* If this req doesn't have valid xfer settings */
1587 if (!_is_valid(ccr)) {
1589 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1590 __func__, __LINE__, ccr);
1594 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1599 /* First dry run to check if req is acceptable */
1600 ret = _setup_req(1, thrd, idx, &xs);
1604 if (ret > pi->mcbufsz / 2) {
1605 dev_info(thrd->dmac->pinfo->dev,
1606 "%s:%d Trying increasing mcbufsz\n",
1607 __func__, __LINE__);
1612 /* Hook the request */
1614 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1615 thrd->req[idx].r = r;
1620 spin_unlock_irqrestore(&pl330->lock, flags);
1625 static void pl330_dotask(unsigned long data)
1627 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1628 struct pl330_info *pi = pl330->pinfo;
1629 unsigned long flags;
1632 spin_lock_irqsave(&pl330->lock, flags);
1634 /* The DMAC itself gone nuts */
1635 if (pl330->dmac_tbd.reset_dmac) {
1636 pl330->state = DYING;
1637 /* Reset the manager too */
1638 pl330->dmac_tbd.reset_mngr = true;
1639 /* Clear the reset flag */
1640 pl330->dmac_tbd.reset_dmac = false;
1643 if (pl330->dmac_tbd.reset_mngr) {
1644 _stop(pl330->manager);
1645 /* Reset all channels */
1646 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1647 /* Clear the reset flag */
1648 pl330->dmac_tbd.reset_mngr = false;
1651 for (i = 0; i < pi->pcfg.num_chan; i++) {
1653 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1654 struct pl330_thread *thrd = &pl330->channels[i];
1655 void __iomem *regs = pi->base;
1656 enum pl330_op_err err;
1660 if (readl(regs + FSC) & (1 << thrd->id))
1661 err = PL330_ERR_FAIL;
1663 err = PL330_ERR_ABORT;
1665 spin_unlock_irqrestore(&pl330->lock, flags);
1667 _callback(thrd->req[1 - thrd->lstenq].r, err);
1668 _callback(thrd->req[thrd->lstenq].r, err);
1670 spin_lock_irqsave(&pl330->lock, flags);
1672 thrd->req[0].r = NULL;
1673 thrd->req[1].r = NULL;
1677 /* Clear the reset flag */
1678 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1682 spin_unlock_irqrestore(&pl330->lock, flags);
1687 /* Returns 1 if state was updated, 0 otherwise */
1688 static int pl330_update(const struct pl330_info *pi)
1690 struct pl330_req *rqdone, *tmp;
1691 struct pl330_dmac *pl330;
1692 unsigned long flags;
1695 int id, ev, ret = 0;
1697 if (!pi || !pi->pl330_data)
1701 pl330 = pi->pl330_data;
1703 spin_lock_irqsave(&pl330->lock, flags);
1705 val = readl(regs + FSM) & 0x1;
1707 pl330->dmac_tbd.reset_mngr = true;
1709 pl330->dmac_tbd.reset_mngr = false;
1711 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1712 pl330->dmac_tbd.reset_chan |= val;
1715 while (i < pi->pcfg.num_chan) {
1716 if (val & (1 << i)) {
1718 "Reset Channel-%d\t CS-%x FTC-%x\n",
1719 i, readl(regs + CS(i)),
1720 readl(regs + FTC(i)));
1721 _stop(&pl330->channels[i]);
1727 /* Check which event happened i.e, thread notified */
1728 val = readl(regs + ES);
1729 if (pi->pcfg.num_events < 32
1730 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1731 pl330->dmac_tbd.reset_dmac = true;
1732 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1737 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1738 if (val & (1 << ev)) { /* Event occurred */
1739 struct pl330_thread *thrd;
1740 u32 inten = readl(regs + INTEN);
1743 /* Clear the event */
1744 if (inten & (1 << ev))
1745 writel(1 << ev, regs + INTCLR);
1749 id = pl330->events[ev];
1751 thrd = &pl330->channels[id];
1753 active = thrd->req_running;
1754 if (active == -1) /* Aborted */
1757 /* Detach the req */
1758 rqdone = thrd->req[active].r;
1759 thrd->req[active].r = NULL;
1761 mark_free(thrd, active);
1763 /* Get going again ASAP */
1766 /* For now, just make a list of callbacks to be done */
1767 list_add_tail(&rqdone->rqd, &pl330->req_done);
1771 /* Now that we are in no hurry, do the callbacks */
1772 list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
1773 list_del(&rqdone->rqd);
1775 spin_unlock_irqrestore(&pl330->lock, flags);
1776 _callback(rqdone, PL330_ERR_NONE);
1777 spin_lock_irqsave(&pl330->lock, flags);
1781 spin_unlock_irqrestore(&pl330->lock, flags);
1783 if (pl330->dmac_tbd.reset_dmac
1784 || pl330->dmac_tbd.reset_mngr
1785 || pl330->dmac_tbd.reset_chan) {
1787 tasklet_schedule(&pl330->tasks);
1793 static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1795 struct pl330_thread *thrd = ch_id;
1796 struct pl330_dmac *pl330;
1797 unsigned long flags;
1798 int ret = 0, active;
1800 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1804 active = thrd->req_running;
1806 spin_lock_irqsave(&pl330->lock, flags);
1809 case PL330_OP_FLUSH:
1810 /* Make sure the channel is stopped */
1813 thrd->req[0].r = NULL;
1814 thrd->req[1].r = NULL;
1819 case PL330_OP_ABORT:
1820 /* Make sure the channel is stopped */
1823 /* ABORT is only for the active req */
1827 thrd->req[active].r = NULL;
1828 mark_free(thrd, active);
1830 /* Start the next */
1831 case PL330_OP_START:
1832 if ((active == -1) && !_start(thrd))
1840 spin_unlock_irqrestore(&pl330->lock, flags);
1844 /* Reserve an event */
1845 static inline int _alloc_event(struct pl330_thread *thrd)
1847 struct pl330_dmac *pl330 = thrd->dmac;
1848 struct pl330_info *pi = pl330->pinfo;
1851 for (ev = 0; ev < pi->pcfg.num_events; ev++)
1852 if (pl330->events[ev] == -1) {
1853 pl330->events[ev] = thrd->id;
1860 static bool _chan_ns(const struct pl330_info *pi, int i)
1862 return pi->pcfg.irq_ns & (1 << i);
1865 /* Upon success, returns IdentityToken for the
1866 * allocated channel, NULL otherwise.
1868 static void *pl330_request_channel(const struct pl330_info *pi)
1870 struct pl330_thread *thrd = NULL;
1871 struct pl330_dmac *pl330;
1872 unsigned long flags;
1875 if (!pi || !pi->pl330_data)
1878 pl330 = pi->pl330_data;
1880 if (pl330->state == DYING)
1883 chans = pi->pcfg.num_chan;
1885 spin_lock_irqsave(&pl330->lock, flags);
1887 for (i = 0; i < chans; i++) {
1888 thrd = &pl330->channels[i];
1889 if ((thrd->free) && (!_manager_ns(thrd) ||
1891 thrd->ev = _alloc_event(thrd);
1892 if (thrd->ev >= 0) {
1895 thrd->req[0].r = NULL;
1897 thrd->req[1].r = NULL;
1905 spin_unlock_irqrestore(&pl330->lock, flags);
1910 /* Release an event */
1911 static inline void _free_event(struct pl330_thread *thrd, int ev)
1913 struct pl330_dmac *pl330 = thrd->dmac;
1914 struct pl330_info *pi = pl330->pinfo;
1916 /* If the event is valid and was held by the thread */
1917 if (ev >= 0 && ev < pi->pcfg.num_events
1918 && pl330->events[ev] == thrd->id)
1919 pl330->events[ev] = -1;
1922 static void pl330_release_channel(void *ch_id)
1924 struct pl330_thread *thrd = ch_id;
1925 struct pl330_dmac *pl330;
1926 unsigned long flags;
1928 if (!thrd || thrd->free)
1933 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1934 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1938 spin_lock_irqsave(&pl330->lock, flags);
1939 _free_event(thrd, thrd->ev);
1941 spin_unlock_irqrestore(&pl330->lock, flags);
1944 /* Initialize the structure for PL330 configuration, that can be used
1945 * by the client driver the make best use of the DMAC
1947 static void read_dmac_config(struct pl330_info *pi)
1949 void __iomem *regs = pi->base;
1952 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1953 val &= CRD_DATA_WIDTH_MASK;
1954 pi->pcfg.data_bus_width = 8 * (1 << val);
1956 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1957 val &= CRD_DATA_BUFF_MASK;
1958 pi->pcfg.data_buf_dep = val + 1;
1960 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1961 val &= CR0_NUM_CHANS_MASK;
1963 pi->pcfg.num_chan = val;
1965 val = readl(regs + CR0);
1966 if (val & CR0_PERIPH_REQ_SET) {
1967 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1969 pi->pcfg.num_peri = val;
1970 pi->pcfg.peri_ns = readl(regs + CR4);
1972 pi->pcfg.num_peri = 0;
1975 val = readl(regs + CR0);
1976 if (val & CR0_BOOT_MAN_NS)
1977 pi->pcfg.mode |= DMAC_MODE_NS;
1979 pi->pcfg.mode &= ~DMAC_MODE_NS;
1981 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1982 val &= CR0_NUM_EVENTS_MASK;
1984 pi->pcfg.num_events = val;
1986 pi->pcfg.irq_ns = readl(regs + CR3);
1988 pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
1989 pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
1992 static inline void _reset_thread(struct pl330_thread *thrd)
1994 struct pl330_dmac *pl330 = thrd->dmac;
1995 struct pl330_info *pi = pl330->pinfo;
1997 thrd->req[0].mc_cpu = pl330->mcode_cpu
1998 + (thrd->id * pi->mcbufsz);
1999 thrd->req[0].mc_bus = pl330->mcode_bus
2000 + (thrd->id * pi->mcbufsz);
2001 thrd->req[0].r = NULL;
2004 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
2006 thrd->req[1].mc_bus = thrd->req[0].mc_bus
2008 thrd->req[1].r = NULL;
2012 static int dmac_alloc_threads(struct pl330_dmac *pl330)
2014 struct pl330_info *pi = pl330->pinfo;
2015 int chans = pi->pcfg.num_chan;
2016 struct pl330_thread *thrd;
2019 /* Allocate 1 Manager and 'chans' Channel threads */
2020 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
2022 if (!pl330->channels)
2025 /* Init Channel threads */
2026 for (i = 0; i < chans; i++) {
2027 thrd = &pl330->channels[i];
2030 _reset_thread(thrd);
2034 /* MANAGER is indexed at the end */
2035 thrd = &pl330->channels[chans];
2039 pl330->manager = thrd;
2044 static int dmac_alloc_resources(struct pl330_dmac *pl330)
2046 struct pl330_info *pi = pl330->pinfo;
2047 int chans = pi->pcfg.num_chan;
2051 * Alloc MicroCode buffer for 'chans' Channel threads.
2052 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2054 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
2055 chans * pi->mcbufsz,
2056 &pl330->mcode_bus, GFP_KERNEL);
2057 if (!pl330->mcode_cpu) {
2058 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2059 __func__, __LINE__);
2063 ret = dmac_alloc_threads(pl330);
2065 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
2066 __func__, __LINE__);
2067 dma_free_coherent(pi->dev,
2068 chans * pi->mcbufsz,
2069 pl330->mcode_cpu, pl330->mcode_bus);
2076 static int pl330_add(struct pl330_info *pi)
2078 struct pl330_dmac *pl330;
2082 if (!pi || !pi->dev)
2085 /* If already added */
2090 * If the SoC can perform reset on the DMAC, then do it
2091 * before reading its configuration.
2098 /* Check if we can handle this DMAC */
2099 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
2100 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
2101 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
2102 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
2106 /* Read the configuration of the DMAC */
2107 read_dmac_config(pi);
2109 if (pi->pcfg.num_events == 0) {
2110 dev_err(pi->dev, "%s:%d Can't work without events!\n",
2111 __func__, __LINE__);
2115 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
2117 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2118 __func__, __LINE__);
2122 /* Assign the info structure and private data */
2124 pi->pl330_data = pl330;
2126 spin_lock_init(&pl330->lock);
2128 INIT_LIST_HEAD(&pl330->req_done);
2130 /* Use default MC buffer size if not provided */
2132 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
2134 /* Mark all events as free */
2135 for (i = 0; i < pi->pcfg.num_events; i++)
2136 pl330->events[i] = -1;
2138 /* Allocate resources needed by the DMAC */
2139 ret = dmac_alloc_resources(pl330);
2141 dev_err(pi->dev, "Unable to create channels for DMAC\n");
2146 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
2148 pl330->state = INIT;
2153 static int dmac_free_threads(struct pl330_dmac *pl330)
2155 struct pl330_info *pi = pl330->pinfo;
2156 int chans = pi->pcfg.num_chan;
2157 struct pl330_thread *thrd;
2160 /* Release Channel threads */
2161 for (i = 0; i < chans; i++) {
2162 thrd = &pl330->channels[i];
2163 pl330_release_channel((void *)thrd);
2167 kfree(pl330->channels);
2172 static void dmac_free_resources(struct pl330_dmac *pl330)
2174 struct pl330_info *pi = pl330->pinfo;
2175 int chans = pi->pcfg.num_chan;
2177 dmac_free_threads(pl330);
2179 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
2180 pl330->mcode_cpu, pl330->mcode_bus);
2183 static void pl330_del(struct pl330_info *pi)
2185 struct pl330_dmac *pl330;
2187 if (!pi || !pi->pl330_data)
2190 pl330 = pi->pl330_data;
2192 pl330->state = UNINIT;
2194 tasklet_kill(&pl330->tasks);
2196 /* Free DMAC resources */
2197 dmac_free_resources(pl330);
2200 pi->pl330_data = NULL;
2203 /* forward declaration */
2204 static struct amba_driver pl330_driver;
2206 static inline struct dma_pl330_chan *
2207 to_pchan(struct dma_chan *ch)
2212 return container_of(ch, struct dma_pl330_chan, chan);
2215 static inline struct dma_pl330_desc *
2216 to_desc(struct dma_async_tx_descriptor *tx)
2218 return container_of(tx, struct dma_pl330_desc, txd);
2221 static inline void free_desc_list(struct list_head *list)
2223 struct dma_pl330_dmac *pdmac;
2224 struct dma_pl330_desc *desc;
2225 struct dma_pl330_chan *pch = NULL;
2226 unsigned long flags;
2228 /* Finish off the work list */
2229 list_for_each_entry(desc, list, node) {
2230 dma_async_tx_callback callback;
2233 /* All desc in a list belong to same channel */
2235 callback = desc->txd.callback;
2236 param = desc->txd.callback_param;
2244 /* pch will be unset if list was empty */
2250 spin_lock_irqsave(&pdmac->pool_lock, flags);
2251 list_splice_tail_init(list, &pdmac->desc_pool);
2252 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2255 static inline void handle_cyclic_desc_list(struct list_head *list)
2257 struct dma_pl330_desc *desc;
2258 struct dma_pl330_chan *pch = NULL;
2259 unsigned long flags;
2261 list_for_each_entry(desc, list, node) {
2262 dma_async_tx_callback callback;
2264 /* Change status to reload it */
2265 desc->status = PREP;
2267 callback = desc->txd.callback;
2269 callback(desc->txd.callback_param);
2272 /* pch will be unset if list was empty */
2276 spin_lock_irqsave(&pch->lock, flags);
2277 list_splice_tail_init(list, &pch->work_list);
2278 spin_unlock_irqrestore(&pch->lock, flags);
2281 static inline void fill_queue(struct dma_pl330_chan *pch)
2283 struct dma_pl330_desc *desc;
2286 list_for_each_entry(desc, &pch->work_list, node) {
2288 /* If already submitted */
2289 if (desc->status == BUSY)
2292 ret = pl330_submit_req(pch->pl330_chid,
2295 desc->status = BUSY;
2297 } else if (ret == -EAGAIN) {
2298 /* QFull or DMAC Dying */
2301 /* Unacceptable request */
2302 desc->status = DONE;
2303 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
2304 __func__, __LINE__, desc->txd.cookie);
2305 tasklet_schedule(&pch->task);
2310 static void pl330_tasklet(unsigned long data)
2312 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2313 struct dma_pl330_desc *desc, *_dt;
2314 unsigned long flags;
2317 spin_lock_irqsave(&pch->lock, flags);
2319 /* Pick up ripe tomatoes */
2320 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2321 if (desc->status == DONE) {
2323 dma_cookie_complete(&desc->txd);
2324 list_move_tail(&desc->node, &list);
2327 /* Try to submit a req imm. next to the last completed cookie */
2330 /* Make sure the PL330 Channel thread is active */
2331 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
2333 spin_unlock_irqrestore(&pch->lock, flags);
2336 handle_cyclic_desc_list(&list);
2338 free_desc_list(&list);
2341 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
2343 struct dma_pl330_desc *desc = token;
2344 struct dma_pl330_chan *pch = desc->pchan;
2345 unsigned long flags;
2347 /* If desc aborted */
2351 spin_lock_irqsave(&pch->lock, flags);
2353 desc->status = DONE;
2355 spin_unlock_irqrestore(&pch->lock, flags);
2357 tasklet_schedule(&pch->task);
2360 static bool pl330_dt_filter(struct dma_chan *chan, void *param)
2362 struct dma_pl330_filter_args *fargs = param;
2364 if (chan->device != &fargs->pdmac->ddma)
2367 return (chan->chan_id == fargs->chan_id);
2370 bool pl330_filter(struct dma_chan *chan, void *param)
2374 if (chan->device->dev->driver != &pl330_driver.drv)
2377 peri_id = chan->private;
2378 return *peri_id == (unsigned)param;
2380 EXPORT_SYMBOL(pl330_filter);
2382 static int pl330_alloc_chan_resources(struct dma_chan *chan)
2384 struct dma_pl330_chan *pch = to_pchan(chan);
2385 struct dma_pl330_dmac *pdmac = pch->dmac;
2386 unsigned long flags;
2388 spin_lock_irqsave(&pch->lock, flags);
2390 dma_cookie_init(chan);
2391 pch->cyclic = false;
2393 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
2394 if (!pch->pl330_chid) {
2395 spin_unlock_irqrestore(&pch->lock, flags);
2399 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2401 spin_unlock_irqrestore(&pch->lock, flags);
2406 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
2408 struct dma_pl330_chan *pch = to_pchan(chan);
2409 struct dma_pl330_desc *desc, *_dt;
2410 unsigned long flags;
2411 struct dma_pl330_dmac *pdmac = pch->dmac;
2412 struct dma_slave_config *slave_config;
2416 case DMA_TERMINATE_ALL:
2417 spin_lock_irqsave(&pch->lock, flags);
2419 /* FLUSH the PL330 Channel thread */
2420 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
2422 /* Mark all desc done */
2423 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
2424 desc->status = DONE;
2425 list_move_tail(&desc->node, &list);
2428 list_splice_tail_init(&list, &pdmac->desc_pool);
2429 spin_unlock_irqrestore(&pch->lock, flags);
2431 case DMA_SLAVE_CONFIG:
2432 slave_config = (struct dma_slave_config *)arg;
2434 if (slave_config->direction == DMA_MEM_TO_DEV) {
2435 if (slave_config->dst_addr)
2436 pch->fifo_addr = slave_config->dst_addr;
2437 if (slave_config->dst_addr_width)
2438 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2439 if (slave_config->dst_maxburst)
2440 pch->burst_len = slave_config->dst_maxburst;
2441 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2442 if (slave_config->src_addr)
2443 pch->fifo_addr = slave_config->src_addr;
2444 if (slave_config->src_addr_width)
2445 pch->burst_sz = __ffs(slave_config->src_addr_width);
2446 if (slave_config->src_maxburst)
2447 pch->burst_len = slave_config->src_maxburst;
2451 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
2458 static void pl330_free_chan_resources(struct dma_chan *chan)
2460 struct dma_pl330_chan *pch = to_pchan(chan);
2461 unsigned long flags;
2463 spin_lock_irqsave(&pch->lock, flags);
2465 tasklet_kill(&pch->task);
2467 pl330_release_channel(pch->pl330_chid);
2468 pch->pl330_chid = NULL;
2471 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2473 spin_unlock_irqrestore(&pch->lock, flags);
2476 static enum dma_status
2477 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2478 struct dma_tx_state *txstate)
2480 return dma_cookie_status(chan, cookie, txstate);
2483 static void pl330_issue_pending(struct dma_chan *chan)
2485 pl330_tasklet((unsigned long) to_pchan(chan));
2489 * We returned the last one of the circular list of descriptor(s)
2490 * from prep_xxx, so the argument to submit corresponds to the last
2491 * descriptor of the list.
2493 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2495 struct dma_pl330_desc *desc, *last = to_desc(tx);
2496 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2497 dma_cookie_t cookie;
2498 unsigned long flags;
2500 spin_lock_irqsave(&pch->lock, flags);
2502 /* Assign cookies to all nodes */
2503 while (!list_empty(&last->node)) {
2504 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2506 dma_cookie_assign(&desc->txd);
2508 list_move_tail(&desc->node, &pch->work_list);
2511 cookie = dma_cookie_assign(&last->txd);
2512 list_add_tail(&last->node, &pch->work_list);
2513 spin_unlock_irqrestore(&pch->lock, flags);
2518 static inline void _init_desc(struct dma_pl330_desc *desc)
2521 desc->req.x = &desc->px;
2522 desc->req.token = desc;
2523 desc->rqcfg.swap = SWAP_NO;
2524 desc->rqcfg.privileged = 0;
2525 desc->rqcfg.insnaccess = 0;
2526 desc->rqcfg.scctl = SCCTRL0;
2527 desc->rqcfg.dcctl = DCCTRL0;
2528 desc->req.cfg = &desc->rqcfg;
2529 desc->req.xfer_cb = dma_pl330_rqcb;
2530 desc->txd.tx_submit = pl330_tx_submit;
2532 INIT_LIST_HEAD(&desc->node);
2535 /* Returns the number of descriptors added to the DMAC pool */
2536 static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2538 struct dma_pl330_desc *desc;
2539 unsigned long flags;
2545 desc = kmalloc(count * sizeof(*desc), flg);
2549 spin_lock_irqsave(&pdmac->pool_lock, flags);
2551 for (i = 0; i < count; i++) {
2552 _init_desc(&desc[i]);
2553 list_add_tail(&desc[i].node, &pdmac->desc_pool);
2556 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2561 static struct dma_pl330_desc *
2562 pluck_desc(struct dma_pl330_dmac *pdmac)
2564 struct dma_pl330_desc *desc = NULL;
2565 unsigned long flags;
2570 spin_lock_irqsave(&pdmac->pool_lock, flags);
2572 if (!list_empty(&pdmac->desc_pool)) {
2573 desc = list_entry(pdmac->desc_pool.next,
2574 struct dma_pl330_desc, node);
2576 list_del_init(&desc->node);
2578 desc->status = PREP;
2579 desc->txd.callback = NULL;
2582 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2587 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2589 struct dma_pl330_dmac *pdmac = pch->dmac;
2590 u8 *peri_id = pch->chan.private;
2591 struct dma_pl330_desc *desc;
2593 /* Pluck one desc from the pool of DMAC */
2594 desc = pluck_desc(pdmac);
2596 /* If the DMAC pool is empty, alloc new */
2598 if (!add_desc(pdmac, GFP_ATOMIC, 1))
2602 desc = pluck_desc(pdmac);
2604 dev_err(pch->dmac->pif.dev,
2605 "%s:%d ALERT!\n", __func__, __LINE__);
2610 /* Initialize the descriptor */
2612 desc->txd.cookie = 0;
2613 async_tx_ack(&desc->txd);
2615 desc->req.peri = peri_id ? pch->chan.chan_id : 0;
2616 desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
2618 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2623 static inline void fill_px(struct pl330_xfer *px,
2624 dma_addr_t dst, dma_addr_t src, size_t len)
2632 static struct dma_pl330_desc *
2633 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2634 dma_addr_t src, size_t len)
2636 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2639 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2640 __func__, __LINE__);
2645 * Ideally we should lookout for reqs bigger than
2646 * those that can be programmed with 256 bytes of
2647 * MC buffer, but considering a req size is seldom
2648 * going to be word-unaligned and more than 200MB,
2650 * Also, should the limit is reached we'd rather
2651 * have the platform increase MC buffer size than
2652 * complicating this API driver.
2654 fill_px(&desc->px, dst, src, len);
2659 /* Call after fixing burst size */
2660 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2662 struct dma_pl330_chan *pch = desc->pchan;
2663 struct pl330_info *pi = &pch->dmac->pif;
2666 burst_len = pi->pcfg.data_bus_width / 8;
2667 burst_len *= pi->pcfg.data_buf_dep;
2668 burst_len >>= desc->rqcfg.brst_size;
2670 /* src/dst_burst_len can't be more than 16 */
2674 while (burst_len > 1) {
2675 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2683 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2684 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2685 size_t period_len, enum dma_transfer_direction direction,
2686 unsigned long flags, void *context)
2688 struct dma_pl330_desc *desc;
2689 struct dma_pl330_chan *pch = to_pchan(chan);
2693 desc = pl330_get_desc(pch);
2695 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2696 __func__, __LINE__);
2700 switch (direction) {
2701 case DMA_MEM_TO_DEV:
2702 desc->rqcfg.src_inc = 1;
2703 desc->rqcfg.dst_inc = 0;
2704 desc->req.rqtype = MEMTODEV;
2706 dst = pch->fifo_addr;
2708 case DMA_DEV_TO_MEM:
2709 desc->rqcfg.src_inc = 0;
2710 desc->rqcfg.dst_inc = 1;
2711 desc->req.rqtype = DEVTOMEM;
2712 src = pch->fifo_addr;
2716 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
2717 __func__, __LINE__);
2721 desc->rqcfg.brst_size = pch->burst_sz;
2722 desc->rqcfg.brst_len = 1;
2726 fill_px(&desc->px, dst, src, period_len);
2731 static struct dma_async_tx_descriptor *
2732 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2733 dma_addr_t src, size_t len, unsigned long flags)
2735 struct dma_pl330_desc *desc;
2736 struct dma_pl330_chan *pch = to_pchan(chan);
2737 struct pl330_info *pi;
2740 if (unlikely(!pch || !len))
2743 pi = &pch->dmac->pif;
2745 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2749 desc->rqcfg.src_inc = 1;
2750 desc->rqcfg.dst_inc = 1;
2751 desc->req.rqtype = MEMTOMEM;
2753 /* Select max possible burst size */
2754 burst = pi->pcfg.data_bus_width / 8;
2762 desc->rqcfg.brst_size = 0;
2763 while (burst != (1 << desc->rqcfg.brst_size))
2764 desc->rqcfg.brst_size++;
2766 desc->rqcfg.brst_len = get_burst_len(desc, len);
2768 desc->txd.flags = flags;
2773 static struct dma_async_tx_descriptor *
2774 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2775 unsigned int sg_len, enum dma_transfer_direction direction,
2776 unsigned long flg, void *context)
2778 struct dma_pl330_desc *first, *desc = NULL;
2779 struct dma_pl330_chan *pch = to_pchan(chan);
2780 struct scatterlist *sg;
2781 unsigned long flags;
2785 if (unlikely(!pch || !sgl || !sg_len))
2788 addr = pch->fifo_addr;
2792 for_each_sg(sgl, sg, sg_len, i) {
2794 desc = pl330_get_desc(pch);
2796 struct dma_pl330_dmac *pdmac = pch->dmac;
2798 dev_err(pch->dmac->pif.dev,
2799 "%s:%d Unable to fetch desc\n",
2800 __func__, __LINE__);
2804 spin_lock_irqsave(&pdmac->pool_lock, flags);
2806 while (!list_empty(&first->node)) {
2807 desc = list_entry(first->node.next,
2808 struct dma_pl330_desc, node);
2809 list_move_tail(&desc->node, &pdmac->desc_pool);
2812 list_move_tail(&first->node, &pdmac->desc_pool);
2814 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2822 list_add_tail(&desc->node, &first->node);
2824 if (direction == DMA_MEM_TO_DEV) {
2825 desc->rqcfg.src_inc = 1;
2826 desc->rqcfg.dst_inc = 0;
2827 desc->req.rqtype = MEMTODEV;
2829 addr, sg_dma_address(sg), sg_dma_len(sg));
2831 desc->rqcfg.src_inc = 0;
2832 desc->rqcfg.dst_inc = 1;
2833 desc->req.rqtype = DEVTOMEM;
2835 sg_dma_address(sg), addr, sg_dma_len(sg));
2838 desc->rqcfg.brst_size = pch->burst_sz;
2839 desc->rqcfg.brst_len = 1;
2842 /* Return the last desc in the chain */
2843 desc->txd.flags = flg;
2847 static irqreturn_t pl330_irq_handler(int irq, void *data)
2849 if (pl330_update(data))
2856 pl330_probe(struct amba_device *adev, const struct amba_id *id)
2858 struct dma_pl330_platdata *pdat;
2859 struct dma_pl330_dmac *pdmac;
2860 struct dma_pl330_chan *pch;
2861 struct pl330_info *pi;
2862 struct dma_device *pd;
2863 struct resource *res;
2867 pdat = adev->dev.platform_data;
2869 /* Allocate a new DMAC and its Channels */
2870 pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
2872 dev_err(&adev->dev, "unable to allocate mem\n");
2877 pi->dev = &adev->dev;
2878 pi->pl330_data = NULL;
2879 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
2882 pi->base = devm_request_and_ioremap(&adev->dev, res);
2886 amba_set_drvdata(adev, pdmac);
2889 ret = request_irq(irq, pl330_irq_handler, 0,
2890 dev_name(&adev->dev), pi);
2894 ret = pl330_add(pi);
2898 INIT_LIST_HEAD(&pdmac->desc_pool);
2899 spin_lock_init(&pdmac->pool_lock);
2901 /* Create a descriptor pool of default size */
2902 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
2903 dev_warn(&adev->dev, "unable to allocate desc\n");
2906 INIT_LIST_HEAD(&pd->channels);
2908 /* Initialize channel parameters */
2910 num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
2912 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
2914 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2915 if (!pdmac->peripherals) {
2917 dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
2921 for (i = 0; i < num_chan; i++) {
2922 pch = &pdmac->peripherals[i];
2923 if (!adev->dev.of_node)
2924 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
2926 pch->chan.private = adev->dev.of_node;
2928 INIT_LIST_HEAD(&pch->work_list);
2929 spin_lock_init(&pch->lock);
2930 pch->pl330_chid = NULL;
2931 pch->chan.device = pd;
2934 /* Add the channel to the DMAC list */
2935 list_add_tail(&pch->chan.device_node, &pd->channels);
2938 pd->dev = &adev->dev;
2940 pd->cap_mask = pdat->cap_mask;
2942 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
2943 if (pi->pcfg.num_peri) {
2944 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2945 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
2946 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
2950 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2951 pd->device_free_chan_resources = pl330_free_chan_resources;
2952 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
2953 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2954 pd->device_tx_status = pl330_tx_status;
2955 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2956 pd->device_control = pl330_control;
2957 pd->device_issue_pending = pl330_issue_pending;
2959 ret = dma_async_device_register(pd);
2961 dev_err(&adev->dev, "unable to register DMAC\n");
2965 dev_info(&adev->dev,
2966 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
2967 dev_info(&adev->dev,
2968 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2969 pi->pcfg.data_buf_dep,
2970 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
2971 pi->pcfg.num_peri, pi->pcfg.num_events);
2983 static int __devexit pl330_remove(struct amba_device *adev)
2985 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
2986 struct dma_pl330_chan *pch, *_p;
2987 struct pl330_info *pi;
2993 amba_set_drvdata(adev, NULL);
2996 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
2999 /* Remove the channel */
3000 list_del(&pch->chan.device_node);
3002 /* Flush the channel */
3003 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3004 pl330_free_chan_resources(&pch->chan);
3017 static struct amba_id pl330_ids[] = {
3025 MODULE_DEVICE_TABLE(amba, pl330_ids);
3027 static struct amba_driver pl330_driver = {
3029 .owner = THIS_MODULE,
3030 .name = "dma-pl330",
3032 .id_table = pl330_ids,
3033 .probe = pl330_probe,
3034 .remove = pl330_remove,
3037 module_amba_driver(pl330_driver);
3039 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3040 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3041 MODULE_LICENSE("GPL");