2 * Copyright (C) 2012 Spreadtrum Communications Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/semaphore.h>
17 #include <linux/slab.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/errno.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_device.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/dmaengine.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/delay.h>
29 #include "dmaengine.h"
31 #include <soc/sprd/sci_glb_regs.h>
32 #include <soc/sprd/sci.h>
33 #include <soc/sprd/hardware.h>
38 #define pr_dma(fmt, ...) printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
40 #define pr_dma(fmt, ...)
43 #define STANDARD_DMA_NUM (24)
44 #define MPC_DMA_DESCRIPTORS (64)
45 #define DMA_CHN_OFFSET (0x40)
46 #define DMA_MEMCPY_MIN_SIZE (64)
47 #define DMA_CFG_COUNT (MPC_DMA_DESCRIPTORS)
48 #define DMA_CHx_OFFSET (0x40)
49 #define SPRD_DMA_REQ_CID(base,uid) ((unsigned long)base + 0x2000 + 0x4 * ((uid) -1))
51 /* dma channel register definition */
52 struct sprd_dma_chn_reg {
61 /* only full chn have following regs */
72 /* dma request description */
73 struct sprd_dma_desc {
74 struct dma_async_tx_descriptor desc;
75 struct sprd_dma_chn_reg *dma_chn_reg;
76 dma_addr_t dma_chn_reg_paddr;
77 struct list_head node;
78 struct list_head next_node;
83 /* dma channel description */
86 struct list_head free;
87 struct list_head prepared;
88 struct list_head queued;
89 struct list_head active;
90 struct list_head completed;
94 enum dma_chn_status chan_status;
95 void __iomem *dma_chn_base;
96 void __iomem *dma_desc;
97 dma_addr_t dma_desc_paddr;
98 enum dma_chn_type chn_type;
99 enum request_mode re_mode;
100 int irq_handle_enable;
104 struct sprd_dma_dev {
105 struct dma_device dma_dev;
107 void __iomem *dma_glb_base;
108 void __iomem *aon_dma_glb_base;
111 struct tasklet_struct tasklet;
112 struct kmem_cache *dma_desc_node_cachep;
116 struct sprd_dma_chn channels[0];
119 struct dma_cfg_group_t {
120 struct semaphore cfg_sema;
122 struct sprd_dma_cfg dma_cfg[DMA_CFG_COUNT];
125 struct dma_cfg_group_t dma_cfg_group;
127 static struct of_dma_filter_info sprd_dma_info = {
128 .filter_fn = sprd_dma_filter_fn,
131 /* dma debug function */
132 static int __dma_cfg_check_register(void __iomem * dma_reg_addr)
134 volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)dma_reg_addr;
135 volatile struct sprd_dma_glb_reg *aon_dma_glb_reg;
136 struct device_node *dma_node;
137 struct platform_device *dma_dev;
138 struct sprd_dma_dev *sdev;
140 dma_node = of_find_compatible_node(NULL, NULL, "sprd,sharkl64-dma");
142 pr_warn("Can't get the dmac node!\n");
145 dma_dev = of_find_device_by_node(dma_node);
147 pr_warn("Can't get the dma_dev!\n");
152 sdev = platform_get_drvdata(dma_dev);
154 pr_warn("Can't get the sdev!\n");
157 aon_dma_glb_reg = sdev->aon_dma_glb_base;
159 pr_dma("------------------------------------------------------------------------------------>>>\n");
160 pr_dma("DMA register:\n pause=0x%x,\n req=0x%x,\n cfg=0x%x,\n int=0x%x,\n src_addr=0x%x,\n"
161 "des_addr=0x%x,\n frg_len=0x%x,\n blk_len=0x%x,\n trsc_len=0x%x,\n trsf_step=0x%x,\n"
162 "wrap_ptr=0x%x,\n wrap_to=0x%x,\n llist_ptr=0x%x,\n frg_step=0x%x,\n src_blk_step=0x%x,\n"
163 "des_blk_step=0x%x,\n",dma_reg->pause,dma_reg->req,dma_reg->cfg,dma_reg->intc,
164 dma_reg->src_addr,dma_reg->des_addr,dma_reg->frg_len,dma_reg->blk_len,dma_reg->trsc_len,
165 dma_reg->trsf_step,dma_reg->wrap_ptr,dma_reg->wrap_to,dma_reg->llist_ptr,dma_reg->frg_step,
166 dma_reg->src_blk_step,dma_reg->des_blk_step);
168 pr_dma("Global reg:INTC2 aon dma irq raw status=0x%x,dma irq raw status=0x%x!\n",
169 sci_glb_read((unsigned long)(SPRD_INTC2_BASE+0x4), BIT(6)),
170 0/*sci_glb_read((unsigned long)(SPRD_INTC1_BASE+0x4), BIT(18))*/);
172 pr_dma("Global reg:INTC_EB_b15=0x%x,DMA_INT_AP_EN_b0=0x%x,INTC2_DMA_EN_b6=0x%x,APB_INTC2_EB_b21=0x%x!\n",
173 sci_glb_read((unsigned long)REG_AON_APB_APB_EB0, BIT_INTC_EB),
174 sci_glb_read((unsigned long)REG_AON_APB_AON_DMA_INT_EN, BIT_AON_DMA_INT_AP_EN),
175 sci_glb_read((unsigned long)(SPRD_INTC2_BASE+0x8), BIT(6)),
176 sci_glb_read((unsigned long)REG_AP_APB_APB_EB, BIT(21)));
178 pr_dma("AON global debug reg:debug sts=0x%x,pause=0x%x,req_sts=0x%x,en_sts=0x%x,arb_sel_sts=0x%x!\n",
179 aon_dma_glb_reg->debug_sts,aon_dma_glb_reg->pause,aon_dma_glb_reg->req_sts,
180 aon_dma_glb_reg->en_sts,aon_dma_glb_reg->arb_sel_sts);
181 pr_dma("<<<------------------------------------------------------------------------------------\n");
185 static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
187 return container_of(c, struct sprd_dma_chn, chan);
190 static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
192 struct sprd_dma_chn *mchan = to_sprd_dma_chan(c);
193 return container_of(mchan, struct sprd_dma_dev, channels[c->chan_id]);
196 static inline struct sprd_dma_desc *to_sprd_dma_desc(struct dma_async_tx_descriptor *tx)
198 return container_of(tx, struct sprd_dma_desc, desc);
201 static void __inline __ap_dma_enable(void)
203 if (!sci_glb_read((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB))
204 sci_glb_set((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB);
207 static void __inline __ap_dma_disable(void)
209 if (sci_glb_read((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB))
210 sci_glb_clr((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB);
213 static void __inline __ap_dma_softreset(void)
215 sci_glb_set(REG_AP_AHB_AHB_RST, BIT_DMA_SOFT_RST);
217 sci_glb_clr(REG_AP_AHB_AHB_RST, BIT_DMA_SOFT_RST);
220 #ifdef CONFIG_AON_DMA_SPRD
221 static void __inline __aon_dma_enable(void)
223 if (!sci_glb_read((unsigned long)REG_AON_APB_APB_EB1, BIT_AON_DMA_EB))
224 sci_glb_set((unsigned long)REG_AON_APB_APB_EB1, BIT_AON_DMA_EB);
227 static void __inline __aon_dma_disable(void)
229 if (sci_glb_read((unsigned long)REG_AON_APB_APB_EB1, BIT_AON_DMA_EB))
230 sci_glb_clr((unsigned long)REG_AON_APB_APB_EB1,BIT_AON_DMA_EB);
233 static void __inline __aon_dma_reset(void)
235 sci_glb_set(REG_AON_APB_APB_RST1, BIT_AON_DMA_SOFT_RST);
237 sci_glb_clr(REG_AON_APB_APB_RST1, BIT_AON_DMA_SOFT_RST);
240 static void __inline __aon_dma_int_enable(void)
242 if (!sci_glb_read((unsigned long)(SPRD_INTC2_BASE+0x8), BIT(6)))
243 sci_glb_set((unsigned long)(SPRD_INTC2_BASE+0x8), BIT(6));
245 if (!sci_glb_read((unsigned long)REG_AON_APB_AON_DMA_INT_EN, BIT_AON_DMA_INT_AP_EN))
246 sci_glb_set((unsigned long)REG_AON_APB_AON_DMA_INT_EN,BIT_AON_DMA_INT_AP_EN);
249 static void __inline __aon_dma_int_disable(void)
251 if (sci_glb_read((unsigned long)REG_AON_APB_AON_DMA_INT_EN, BIT_AON_DMA_INT_AP_EN))
252 sci_glb_clr((unsigned long)REG_AON_APB_AON_DMA_INT_EN,BIT_AON_DMA_INT_AP_EN);
255 static void __inline __aon_dma_enable(void){ }
257 static void __inline __aon_dma_disable(void){ }
259 static void __inline __aon_dma_reset(void){ }
261 static void __inline __aon_dma_int_enable(void){ }
263 static void __inline __aon_dma_int_disable(void){ }
266 static void __inline __dma_set_uid(struct sprd_dma_dev *sdev,
267 struct sprd_dma_chn *mchan, u32 dev_id)
269 u32 ap_chn_cnt = sdev->ap_chn_cnt;
271 if (DMA_UID_SOFTWARE != dev_id) {
272 if(mchan->chan_num < ap_chn_cnt)
273 writel_relaxed((mchan->chan_num + 1),
274 (void __iomem *)SPRD_DMA_REQ_CID(sdev->dma_glb_base, dev_id));
276 writel_relaxed((mchan->chan_num - ap_chn_cnt + 1),
277 (void __iomem *)SPRD_DMA_REQ_CID(sdev->aon_dma_glb_base, dev_id));
281 static void __inline __dma_unset_uid(struct sprd_dma_dev *sdev,
282 struct sprd_dma_chn *mchan, u32 dev_id)
284 u32 ap_chn_cnt = sdev->ap_chn_cnt;
286 if (DMA_UID_SOFTWARE != dev_id) {
287 if(mchan->chan_num < ap_chn_cnt)
288 writel_relaxed(0x0,(void __iomem *)SPRD_DMA_REQ_CID(sdev->dma_glb_base, dev_id));
290 writel_relaxed(0x0,(void __iomem *)SPRD_DMA_REQ_CID(sdev->aon_dma_glb_base, dev_id));
294 static void __inline __dma_int_clr(struct sprd_dma_chn *mchan)
296 volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
298 dma_reg->intc |= 0x1f << 24;
301 static void __inline __dma_int_dis(struct sprd_dma_chn *mchan)
303 volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
305 dma_reg->intc |= 0x1f << 24;
306 dma_reg->intc &= ~0x1f;
309 static void __inline __dma_chn_enable(struct sprd_dma_chn *mchan)
311 volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
316 static void __inline __dma_soft_request(struct sprd_dma_chn *mchan)
318 volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
323 static void __dma_stop_and_disable(struct sprd_dma_chn *mchan)
325 u32 timeout = 0x2000;
327 volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
328 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&mchan->chan);
329 u32 ap_chn_cnt = sdev->ap_chn_cnt;
331 if (!(dma_reg->cfg & 0x1))
334 dma_reg->pause |= 0x1;
336 /* fixme, need to deal with timeout */
337 while (!(dma_reg->pause & (0x1 << 16))){
339 if(mchan->chan_num >= ap_chn_cnt)
342 __ap_dma_softreset();
348 dma_reg->cfg &= ~0x1;
349 dma_reg->pause = 0x0;
352 /* get dma source reg addr */
353 static unsigned long __dma_get_src_addr(struct dma_chan *dma_chn)
355 struct sprd_dma_chn *mchan = to_sprd_dma_chan(dma_chn);
356 unsigned long addr = (unsigned long)mchan->dma_chn_base + 0x10;
358 return readl_relaxed((void __iomem *)addr);
361 /* get dma dest reg addr */
362 static unsigned long __dma_get_dst_addr(struct dma_chan *dma_chn)
364 struct sprd_dma_chn *mchan = to_sprd_dma_chan(dma_chn);
365 unsigned long addr = (unsigned long)mchan->dma_chn_base + 0x14;
367 return readl_relaxed((void __iomem *)addr);
370 static int __dma_config(struct dma_chan *chan,struct sprd_dma_desc *mdesc,
371 struct sprd_dma_cfg *cfg_list,struct sprd_dma_chn_reg *dma_reg_addr,
372 enum config_type type)
374 volatile struct sprd_dma_chn_reg *dma_reg;
375 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
376 struct sprd_dma_cfg *dma_cfg_tmp = cfg_list;
377 u32 fix_mode = 0, llist_en = 0, wrap_en = 0;
378 u32 list_end = 0, fix_en = 0,irq_mode = 0,wrap_mode = 0;
381 /* check dma fix mode */
382 if (dma_cfg_tmp->src_step != 0 && dma_cfg_tmp->des_step != 0) {
385 if ((dma_cfg_tmp->src_step | dma_cfg_tmp->des_step) == 0) {
389 if (dma_cfg_tmp->src_step)
396 /* check dma wrap mode */
397 if (dma_cfg_tmp->wrap_ptr && dma_cfg_tmp->wrap_to) {
399 if (dma_cfg_tmp->wrap_to == dma_cfg_tmp->src_addr) {
402 if (dma_cfg_tmp->wrap_to == dma_cfg_tmp->des_addr)
409 /* linklist configuration */
410 if (dma_cfg_tmp->linklist_ptr) {
412 if (dma_cfg_tmp->is_end == 1)
418 chn_type = mchan->chn_type;
419 irq_mode = dma_cfg_tmp->irq_mode;
422 if ((chn_type == STANDARD_DMA) &&
423 (irq_mode == TRANS_DONE || irq_mode == LIST_DONE)) {
424 pr_err("Irq type isn't compatible with channel type!");
428 if (!IS_ALIGNED(dma_cfg_tmp->src_step, dma_cfg_tmp->datawidth)){
429 pr_err("Source step is not aligned!");
433 if (!IS_ALIGNED(dma_cfg_tmp->des_step, dma_cfg_tmp->datawidth)){
434 pr_err("Destination step is not aligned!");
439 mchan->dev_id = dma_cfg_tmp->dev_id;
441 if(type == CONFIG_DESC)
442 dma_reg = mdesc->dma_chn_reg;
443 else if(type == CONFIG_LINKLIST)
444 dma_reg = dma_reg_addr;
448 dma_reg->pause = 0x0;
451 /* set default priority = 1 */
452 dma_reg->cfg = DMA_PRI_1 << CHN_PRIORITY_OFFSET |
453 llist_en << LLIST_EN_OFFSET;
455 /* src and des addr */
456 dma_reg->src_addr = dma_cfg_tmp->src_addr;
457 dma_reg->des_addr = dma_cfg_tmp->des_addr;
461 (dma_cfg_tmp->datawidth << SRC_DATAWIDTH_OFFSET) |
462 (dma_cfg_tmp->datawidth << DES_DATAWIDTH_OFFSET) |
463 (0x0 << SWT_MODE_OFFSET) |
464 (dma_cfg_tmp->req_mode << REQ_MODE_OFFSET) |
465 (wrap_mode << ADDR_WRAP_SEL_OFFSET) |
466 (wrap_en << ADDR_WRAP_EN_OFFSET) |
467 (fix_mode << ADDR_FIX_SEL_OFFSET) |
468 (fix_en << ADDR_FIX_SEL_EN) |
469 (list_end << LLIST_END_OFFSET) |
470 (dma_cfg_tmp->fragmens_len & FRG_LEN_MASK);
473 dma_reg->blk_len = dma_cfg_tmp->block_len & BLK_LEN_MASK;
475 /* set interrupt type*/
476 if(type == CONFIG_DESC){
477 if(irq_mode == NO_INT)
478 mchan->irq_handle_enable = 0;
480 mchan->irq_handle_enable = 1;
482 dma_reg->intc &= ~0x1f;
483 dma_reg->intc |= 0x1 << 4;
488 dma_reg->intc |= 0x1;
491 dma_reg->intc |= 0x2;
494 dma_reg->intc |= 0x4;
497 dma_reg->intc |= 0x8;
500 dma_reg->intc |= 0x10;
509 pr_dma("dma_config:cfg=0x%x,frg_len=0x%x,blk_len=0x%x,intc=0x%x!\n",
510 dma_reg->cfg,dma_reg->frg_len,dma_reg->blk_len,dma_reg->intc);
512 if(chn_type == STANDARD_DMA)
515 /* full dma config */
516 if (0x0 == dma_cfg_tmp->transcation_len)
517 dma_reg->trsc_len = dma_cfg_tmp->block_len & TRSC_LEN_MASK;
519 dma_reg->trsc_len = dma_cfg_tmp->transcation_len & TRSC_LEN_MASK;
522 (dma_cfg_tmp->des_step & TRSF_STEP_MASK) << DEST_TRSF_STEP_OFFSET |
523 (dma_cfg_tmp->src_step & TRSF_STEP_MASK) << SRC_TRSF_STEP_OFFSET;
525 dma_reg->wrap_ptr = dma_cfg_tmp->wrap_ptr;
526 dma_reg->wrap_to = dma_cfg_tmp->wrap_to;
528 dma_reg->llist_ptr = dma_cfg_tmp->linklist_ptr;
531 (dma_cfg_tmp->dst_frag_step & FRAG_STEP_MASK) << DEST_FRAG_STEP_OFFSET |
532 (dma_cfg_tmp->src_frag_step & FRAG_STEP_MASK) << SRC_FRAG_STEP_OFFSET;
534 dma_reg->src_blk_step = dma_cfg_tmp->src_blk_step;
535 dma_reg->des_blk_step = dma_cfg_tmp->dst_blk_step;
537 pr_dma("dma_config:trsc_len=0x%x,trsf_step=0x%x,llist_ptr=0x%x,frg_step=0x%x!\n",
538 dma_reg->trsc_len,dma_reg->trsf_step,dma_reg->llist_ptr,dma_reg->frg_step);
543 /* config dma linklist */
544 static int __dma_config_linklist(struct dma_chan *chan,struct sprd_dma_desc *mdesc,
545 struct sprd_dma_cfg *cfg_list,u32 node_size)
548 struct sprd_dma_chn_reg *dma_reg_list;
549 struct sprd_dma_cfg list_cfg;
555 /* check dma linklist node memory */
556 if(cfg_list[0].link_cfg_v == 0 || cfg_list[0].link_cfg_p == 0){
557 pr_err("Haven't allocated memory for list node!\n");
561 /* get linklist node virtual addr and physical addr */
562 dma_reg_list = (struct sprd_dma_chn_reg *)cfg_list[0].link_cfg_v;
563 cfg_p = (dma_addr_t)cfg_list[0].link_cfg_p;
565 pr_dma("Linklist:alloc addr virt:0x%lx,phys addr: 0x%lx\n",
566 (unsigned long)dma_reg_list,(unsigned long)cfg_p);
568 /* linklist configuration */
569 for (i = 0; i < node_size; i++) {
570 cfg_list[i].linklist_ptr = (u32)(cfg_p +
571 ((i + 1) % node_size) * sizeof(struct sprd_dma_chn_reg) + 0x10);
573 ret = __dma_config(chan,NULL,cfg_list + i,dma_reg_list + i,CONFIG_LINKLIST);
575 pr_err("Linklist configuration error!\n");
578 pr_dma("Configuration the link list!\n");
579 __dma_cfg_check_register(dma_reg_list + i);
582 memset((void *)&list_cfg, 0x0, sizeof(list_cfg));
583 list_cfg.linklist_ptr = cfg_p + 0x10;
584 list_cfg.irq_mode = cfg_list[0].irq_mode;
585 list_cfg.src_addr = cfg_list[0].src_addr;
586 list_cfg.des_addr = cfg_list[0].des_addr;
588 /* support for audio */
589 if(cfg_list[node_size - 1].is_end > 1)
592 ret = __dma_config(chan,mdesc,&list_cfg,NULL,CONFIG_DESC);
597 static dma_int_type __dma_check_int_type(u32 intc_reg)
599 if(intc_reg & 0x1000)
601 else if(intc_reg & 0x800)
603 else if(intc_reg & 0x400)
605 else if(intc_reg & 0x200)
607 else if(intc_reg & 0x100)
613 static dma_request_mode __dma_check_req_type(u32 frag_reg)
615 u32 frag_reg_t = frag_reg >> 24;
616 if((frag_reg_t & 0x3) == 0)
617 return FRAG_REQ_MODE;
618 else if((frag_reg_t & 0x3) == 0x1)
619 return BLOCK_REQ_MODE;
620 else if((frag_reg_t & 0x3) == 0x2)
621 return TRANS_REQ_MODE;
622 else if((frag_reg_t & 0x3) == 0x3)
623 return LIST_REQ_MODE;
625 return FRAG_REQ_MODE;
628 /* check if the dma request desc is done */
629 static void __dma_check_mdesc_done(struct sprd_dma_desc *mdesc,
630 dma_int_type int_type,dma_request_mode req_mode)
632 if(mdesc->cycle == 1){
637 if((unsigned int)int_type >= ((unsigned int)req_mode + 1))
643 static void __dma_check_int(struct sprd_dma_dev *sdev, int type)
645 struct sprd_dma_chn *mchan = NULL;
646 struct sprd_dma_chn_reg *dma_reg = NULL;
647 struct sprd_dma_desc *mdesc = NULL;
648 struct dma_async_tx_descriptor *desc = NULL;
649 u32 irq_status = 0,aon_irq_status = 0,i = 0;
650 dma_int_type int_type;
651 dma_request_mode req_type;
652 volatile struct sprd_dma_glb_reg *dma_glb_reg = sdev->dma_glb_base;
653 volatile struct sprd_dma_glb_reg *aon_dma_glb_reg = NULL;
654 if(sdev->aon_chn_cnt > 0 )
655 aon_dma_glb_reg = sdev->aon_dma_glb_base;
658 irq_status = dma_glb_reg->int_msk_sts;
659 aon_irq_status = (aon_dma_glb_reg != NULL) ? (aon_dma_glb_reg->int_msk_sts) : 0;
660 pr_dma("Enter DMA interrupt handle function!\n");
662 irq_status = dma_glb_reg->int_raw_sts;
663 aon_irq_status = (aon_dma_glb_reg != NULL) ? (aon_dma_glb_reg->int_raw_sts) : 0;
666 pr_dma("Check DMA interrupt,irq_status=0x%x,"
667 "aon_irq_status=0x%x!\n",irq_status,aon_irq_status);
669 while(irq_status || aon_irq_status) {
671 i = __ffs(irq_status);
672 irq_status &= (irq_status - 1);
673 }else if(aon_irq_status != 0){
674 i = __ffs(aon_irq_status);
675 aon_irq_status &= (aon_irq_status - 1);
676 i += sdev->ap_chn_cnt;
680 mchan = &sdev->channels[i];
681 spin_lock(&mchan->chn_lock);
682 dma_reg = (struct sprd_dma_chn_reg *)(mchan->dma_chn_base);
683 int_type = __dma_check_int_type(dma_reg->intc);
684 req_type = __dma_check_req_type(dma_reg->frg_len);
685 pr_dma("DMA channel [%d] interrupt,intc=0x%x,int_type=%d,"
686 "req_type=%d!\n",i,dma_reg->intc,int_type,(req_type + 1));
687 dma_reg->intc |= 0x1f << 24;
689 /* check if the dma request desc is done */
690 if(!list_empty(&mchan->active)){
691 mdesc = list_first_entry(&mchan->active,struct sprd_dma_desc, node);
692 __dma_check_mdesc_done(mdesc,int_type,req_type);
694 list_splice_tail_init(&mchan->active, &mchan->completed);
696 /* support for audio */
697 if(mdesc->cycle == 1){
700 desc->callback(desc->callback_param);
703 spin_unlock(&mchan->chn_lock);
707 static int sprd_dma_start(struct sprd_dma_dev *sdev, struct sprd_dma_chn *mchan,
708 struct sprd_dma_desc *mdesc, u32 dev_id)
710 __dma_set_uid(sdev,mchan,dev_id);
711 __dma_chn_enable(mchan);
713 if(DMA_UID_SOFTWARE == dev_id)
714 __dma_soft_request(mchan);
717 if(dev_id > DMA_UID_SOFTWARE){
718 pr_dma("sprd_dma_start,dev_id=%d,ap_req_cid=%d,!\n",dev_id,
719 readl_relaxed((void __iomem *)SPRD_DMA_REQ_CID(sdev->dma_glb_base, dev_id)));
720 if(sdev->aon_dma_glb_base)
721 pr_dma("sprd_dma_start,dev_id=%d,aon_req_cid=%d!\n",dev_id,
722 readl_relaxed((void __iomem *)SPRD_DMA_REQ_CID(sdev->aon_dma_glb_base, dev_id)));
728 static int sprd_dma_stop(struct sprd_dma_chn *mchan)
730 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&mchan->chan);
732 __dma_unset_uid(sdev,mchan,mchan->dev_id);
733 __dma_stop_and_disable(mchan);
734 __dma_int_clr(mchan);
739 static int sprd_dma_execute(struct sprd_dma_chn *mchan)
741 struct sprd_dma_desc *first = NULL;
742 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&mchan->chan);
744 if(!list_empty(&mchan->active))
745 first = list_first_entry(&mchan->active,struct sprd_dma_desc, node);
749 pr_dma("Before copy data to DMA reg!\n");
750 __dma_cfg_check_register(first->dma_chn_reg);
752 memcpy_toio((void __iomem *)mchan->dma_chn_base,(void *)first->dma_chn_reg,
753 sizeof(struct sprd_dma_chn_reg));
755 pr_dma("After copy data to DMA reg!\n");
756 __dma_cfg_check_register(mchan->dma_chn_base);
758 sprd_dma_start(sdev, mchan, first, mchan->dev_id);
760 pr_dma("After start the DMA!\n");
761 __dma_cfg_check_register(mchan->dma_chn_base);
766 static dma_cookie_t sprd_desc_submit(struct dma_async_tx_descriptor *tx)
768 struct sprd_dma_chn *mchan = to_sprd_dma_chan(tx->chan);
769 struct sprd_dma_desc *mdesc = to_sprd_dma_desc(tx);
770 struct sprd_dma_desc *first = NULL;
775 spin_lock_irqsave(&mchan->chn_lock, flags);
776 cookie = dma_cookie_assign(tx);
777 list_move_tail(&mdesc->node, &mchan->queued);
778 if(!list_empty(&mdesc->next_node)){
779 list_splice_tail_init(&mdesc->next_node, &mchan->queued);
780 pr_dma("Submitting has next node!\n");
783 /* execute the dma desc */
784 if (list_empty(&mchan->active)){
785 first = list_first_entry(&mchan->queued,struct sprd_dma_desc, node);
786 list_move_tail(&first->node, &mchan->active);
787 sprd_dma_execute(mchan);
789 spin_unlock_irqrestore(&mchan->chn_lock, flags);
794 static irqreturn_t __dma_irq_handle(int irq, void *dev_id)
796 struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
798 //spin_lock(&sdev->dma_lock);
799 __dma_check_int(sdev, 1);
800 //spin_unlock(&sdev->dma_lock);
802 tasklet_schedule(&sdev->tasklet);
807 static int sprd_dma_process_completed(struct sprd_dma_dev *sdev)
809 struct sprd_dma_chn *mchan;
810 struct sprd_dma_desc *mdesc;
811 struct sprd_dma_desc *first;
812 dma_cookie_t last_cookie = 0;
813 struct dma_async_tx_descriptor *desc;
816 u32 dma_chn_cnt = sdev->ap_chn_cnt + sdev->aon_chn_cnt;
819 for(i = 0;i < dma_chn_cnt; i++){
820 mchan = &sdev->channels[i];
822 /* deal the compelete dma request */
823 spin_lock_irqsave(&mchan->chn_lock, flags);
824 if(!list_empty(&mchan->completed))
825 list_splice_tail_init(&mchan->completed, &list);
826 spin_unlock_irqrestore(&mchan->chn_lock, flags);
828 if (list_empty(&list))
831 list_for_each_entry(mdesc, &list, node) {
832 pr_dma("Channel [%d] complete list have node!\n",i);
836 desc->callback(desc->callback_param);
839 /* submit desc->next */
840 dma_run_dependencies(desc);
841 last_cookie = desc->cookie;
844 spin_lock_irqsave(&mchan->chn_lock, flags);
845 list_splice_tail_init(&list, &mchan->free);
847 /* continue to process new adding queued request */
848 if (!list_empty(&mchan->queued)){
849 pr_dma("Channel [%d] queued list have node!\n",i);
850 if(list_empty(&mchan->active)){
851 first = list_first_entry(&mchan->queued,struct sprd_dma_desc, node);
852 list_move_tail(&first->node, &mchan->active);
853 sprd_dma_execute(mchan);
856 mchan->chan.completed_cookie = last_cookie;
857 pr_dma("Channel [%d] queued list is NULL,and transfer done!\n",i);
860 spin_unlock_irqrestore(&mchan->chn_lock, flags);
866 static void sprd_dma_tasklet(unsigned long data)
868 struct sprd_dma_dev *sdev = (void *)data;
869 sprd_dma_process_completed(sdev);
872 static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
874 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
875 struct sprd_dma_dev *mdev = to_sprd_dma_dev(chan);
876 dma_addr_t chn_reg_paddr;
877 struct sprd_dma_desc *mdesc;
878 struct sprd_dma_chn_reg *chn_reg;
879 unsigned long i,flags;
882 chn_reg = dmam_alloc_coherent(mdev->dma_dev.dev,
883 MPC_DMA_DESCRIPTORS * sizeof(struct sprd_dma_chn_reg),&chn_reg_paddr,GFP_KERNEL);
887 /* init the dma desc */
888 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
889 mdesc = (struct sprd_dma_desc *)kmem_cache_zalloc(mdev->dma_desc_node_cachep, GFP_ATOMIC);
891 pr_err("Memory allocation error: Allocated only %ld descriptors\n", i);
895 dma_async_tx_descriptor_init(&mdesc->desc, chan);
896 mdesc->desc.flags = DMA_CTRL_ACK;
897 mdesc->desc.tx_submit = sprd_desc_submit;
898 mdesc->dma_chn_reg = &chn_reg[i];
899 mdesc->dma_chn_reg_paddr = chn_reg_paddr + (i * sizeof(struct sprd_dma_chn_reg));
902 INIT_LIST_HEAD(&mdesc->node);
903 INIT_LIST_HEAD(&mdesc->next_node);
904 list_add_tail(&mdesc->node, &descs);
908 dma_free_coherent(mdev->dma_dev.dev,
909 MPC_DMA_DESCRIPTORS * sizeof(struct sprd_dma_chn_reg),chn_reg, chn_reg_paddr);
913 spin_lock_irqsave(&mchan->chn_lock, flags);
914 mchan->dma_desc = chn_reg;
915 mchan->dma_desc_paddr = chn_reg_paddr;
916 list_splice_tail_init(&descs, &mchan->free);
917 spin_unlock_irqrestore(&mchan->chn_lock, flags);
921 mchan->chan_status = USED;
922 if(mchan->chan_num < mdev->ap_chn_cnt)
926 __aon_dma_int_enable();
929 pr_dma("Alloc chan resources is OK, and chn_reg=0x%lx, chn_reg_paddr=0x%lx!\n",
930 (unsigned long)chn_reg,(unsigned long)chn_reg_paddr);
935 static void sprd_dma_free_chan_resources(struct dma_chan *chan)
937 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
938 struct sprd_dma_dev *mdev = to_sprd_dma_dev(chan);
939 dma_addr_t chn_reg_paddr;
940 struct sprd_dma_desc *mdesc,*tmp;
941 struct sprd_dma_chn_reg *chn_reg;
942 struct sprd_dma_chn *tchan;
946 u32 dma_chn_cnt = mdev->ap_chn_cnt + mdev->aon_chn_cnt;
947 u32 ap_chn_cnt = mdev->ap_chn_cnt;
950 spin_lock_irqsave(&mchan->chn_lock, flags);
951 list_splice_tail_init(&mchan->prepared, &mchan->free);
952 list_splice_tail_init(&mchan->queued, &mchan->free);
953 list_splice_tail_init(&mchan->active, &mchan->free);
954 list_splice_tail_init(&mchan->completed, &mchan->free);
956 list_splice_tail_init(&mchan->free, &descs);
957 chn_reg = mchan->dma_desc;
958 chn_reg_paddr = mchan->dma_desc_paddr;
959 spin_unlock_irqrestore(&mchan->chn_lock, flags);
961 dmam_free_coherent(mdev->dma_dev.dev,
962 MPC_DMA_DESCRIPTORS * sizeof(struct sprd_dma_chn_reg), chn_reg, chn_reg_paddr);
964 list_for_each_entry_safe(mdesc, tmp, &descs, node)
965 kmem_cache_free(mdev->dma_desc_node_cachep, mdesc);
967 mchan->chan_status = NO_USED;
969 /* stop and disable dma */
970 sprd_dma_stop(mchan);
971 for (i = 0; i < ap_chn_cnt; i++) {
972 tchan = &mdev->channels[i];
973 if(tchan->chan_status == USED)
977 __ap_dma_softreset();
981 for (i = ap_chn_cnt; i < dma_chn_cnt; i++) {
982 tchan = &mdev->channels[i];
983 if(tchan->chan_status == USED)
986 if(i == dma_chn_cnt){
987 __aon_dma_int_disable();
992 pr_dma("Release chan resources is OK!\n");
995 static int sprd_dma_check_int(struct dma_chan *chan)
997 struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
999 __dma_check_int(sdev, 0);
1000 sprd_dma_process_completed(sdev);
1005 static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
1006 dma_cookie_t cookie,struct dma_tx_state *txstate)
1008 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1009 enum dma_status ret;
1010 int residue = txstate->residue;
1012 /* for audio no int handler */
1013 if(mchan->irq_handle_enable == 0){
1014 sprd_dma_check_int(chan);
1015 pr_dma("Check dma interrupt by hand!\n");
1018 //spin_lock(&mchan->chn_lock);
1019 ret = dma_cookie_status(chan, cookie, txstate);
1020 //spin_unlock(&mchan->chn_lock);
1022 /* get dma sour&dest addr */
1023 if(residue == SPRD_SRC_ADDR)
1024 txstate->residue = __dma_get_src_addr(chan);
1025 else if(residue == SPRD_DST_ADDR)
1026 txstate->residue = __dma_get_dst_addr(chan);
1028 txstate->residue = 0;
1030 pr_dma("%s cookie=%d, residue=0x%x!\n",__func__, cookie,txstate->residue);
1034 static void sprd_dma_issue_pending(struct dma_chan *chan)
1037 * We are posting descriptors to the hardware as soon as
1038 * they are ready, so this function does nothing.
1042 struct dma_async_tx_descriptor *sprd_dma_prep_dma_memcpy(
1043 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1044 size_t len, unsigned long flags)
1046 struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
1047 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1048 struct sprd_dma_desc *mdesc = NULL;
1049 unsigned long irq_flags;
1050 u32 datawidth = 0, src_step = 0,des_step = 0;
1051 struct sprd_dma_cfg *dma_cfg;
1052 int dma_cfg_cnt = dma_cfg_group.dma_cfg_cnt;
1055 /* for configuration */
1056 if(flags & DMA_CFG_FLAG){
1057 if(dma_cfg_cnt < 1 || dma_cfg_cnt > DMA_CFG_COUNT){
1058 dma_cfg_group.dma_cfg_cnt = 0;
1059 up(&dma_cfg_group.cfg_sema);
1060 pr_err("DMA wrong configuration number!\n");
1063 dma_cfg = (struct sprd_dma_cfg *)kzalloc(sizeof(struct sprd_dma_cfg) * dma_cfg_cnt, GFP_KERNEL);
1064 memcpy(dma_cfg,dma_cfg_group.dma_cfg,sizeof(struct sprd_dma_cfg)*dma_cfg_cnt);
1066 dma_cfg_group.dma_cfg_cnt = 0;
1067 memset(dma_cfg_group.dma_cfg,0,sizeof(struct sprd_dma_cfg) * DMA_CFG_COUNT);
1068 up(&dma_cfg_group.cfg_sema);
1070 goto Have_configured;
1073 dma_cfg = (struct sprd_dma_cfg *)kzalloc(sizeof(struct sprd_dma_cfg), GFP_KERNEL);
1075 if (len > BLK_LEN_MASK && mchan->chn_type != FULL_DMA) {
1076 pr_err("Channel type isn't support!\n");
1080 /* set step automatically */
1081 if ((len & 0x3) == 0) {
1086 if ((len & 0x1) == 0) {
1098 memset(&dma_cfg[0],0,sizeof(struct sprd_dma_cfg));
1099 dma_cfg[0].src_addr = src;
1100 dma_cfg[0].des_addr = dest;
1101 dma_cfg[0].datawidth = datawidth;
1102 dma_cfg[0].src_step = src_step;
1103 dma_cfg[0].des_step = src_step;
1104 dma_cfg[0].fragmens_len = DMA_MEMCPY_MIN_SIZE;
1105 if (len <= BLK_LEN_MASK) {
1106 dma_cfg[0].block_len = len;
1107 dma_cfg[0].req_mode = BLOCK_REQ_MODE;
1108 dma_cfg[0].irq_mode = BLK_DONE;
1110 dma_cfg[0].block_len = DMA_MEMCPY_MIN_SIZE;
1111 dma_cfg[0].transcation_len = len;
1112 dma_cfg[0].req_mode = TRANS_REQ_MODE;
1113 dma_cfg[0].irq_mode = TRANS_DONE;
1118 /* get a free dma desc */
1119 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1120 if(!list_empty(&mchan->free)){
1121 mdesc = list_first_entry(&mchan->free, struct sprd_dma_desc, node);
1122 list_del(&mdesc->node);
1124 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1127 sprd_dma_process_completed(sdev);
1132 /* config into dma reg */
1133 if(dma_cfg_cnt == 1)
1134 ret = __dma_config(chan,mdesc,&dma_cfg[0],NULL,CONFIG_DESC);
1135 else if(dma_cfg_cnt > 1)
1136 ret = __dma_config_linklist(chan,mdesc,&dma_cfg[0],dma_cfg_cnt);
1138 pr_err("DMA configuration count isn't available!\n");
1142 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1143 list_add_tail(&mdesc->node, &mchan->free);
1144 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1145 pr_err("Configuration is error!\n");
1149 /* support hardware request */
1150 if(flags & DMA_HARDWARE_FLAG){
1151 mchan->re_mode = HARDWARE_REQ;
1153 mchan->re_mode = SOFTWARE_REQ;
1154 mchan->dev_id = DMA_UID_SOFTWARE;
1157 /* add the prepare list */
1158 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1159 list_add_tail(&mdesc->node, &mchan->prepared);
1160 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1162 return &mdesc->desc;
1165 struct dma_async_tx_descriptor *sprd_prep_dma_sg(struct dma_chan *chan,
1166 struct scatterlist *dst_sg, unsigned int dst_nents,
1167 struct scatterlist *src_sg, unsigned int src_nents,
1168 unsigned long flags)
1170 struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
1171 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1172 struct sprd_dma_desc *mdesc = NULL;
1173 struct sprd_dma_desc *first_mdesc = NULL;
1174 struct scatterlist *sg_d;
1175 struct scatterlist *sg_s;
1176 unsigned int scatterlist_entry,src_dma_len,dst_dma_len;
1179 u32 datawidth = 0, src_step = 0,des_step = 0;
1180 struct sprd_dma_cfg dma_cfg_t;
1181 dma_addr_t dst_dma_addr,src_dma_addr;
1182 unsigned long irq_flags;
1184 /* security check */
1185 if(dst_nents != src_nents ){
1186 pr_err("DMA scatterlist entry count is not equal!\n");
1189 scatterlist_entry = src_nents;
1191 if(scatterlist_entry > MPC_DMA_DESCRIPTORS){
1192 pr_err("DMA scatterlist is overrun!\n");
1196 if(flags & DMA_HARDWARE_FLAG){
1197 pr_err("DMA scatterlist do not support hardware request!\n");
1201 /* for scatter list */
1202 for (i = 0, sg_d = dst_sg, sg_s = src_sg; i < scatterlist_entry;
1203 i++, sg_d = sg_next(sg_d), sg_s = sg_next(sg_s)){
1204 dst_dma_addr = sg_dma_address(sg_d);
1205 dst_dma_len = sg_dma_len(sg_d);
1206 src_dma_addr = sg_dma_address(sg_s);
1207 src_dma_len = sg_dma_len(sg_s);
1209 pr_dma("DMA scatterlist dst_dma_addr=0x%x, src_dma_addr=0x%x,dst_len=%d,src_len=%d!\n",
1210 (unsigned int)dst_dma_addr,(unsigned int)src_dma_addr,dst_dma_len,src_dma_len);
1212 if(dst_dma_len != src_dma_len)
1217 if ((len & 0x3) == 0) {
1222 if ((len & 0x1) == 0) {
1233 memset(&dma_cfg_t,0,sizeof(struct sprd_dma_cfg));
1234 dma_cfg_t.src_addr = src_dma_addr;
1235 dma_cfg_t.des_addr = dst_dma_addr;
1236 dma_cfg_t.datawidth = datawidth;
1237 dma_cfg_t.src_step = src_step;
1238 dma_cfg_t.des_step = src_step;
1239 dma_cfg_t.fragmens_len = DMA_MEMCPY_MIN_SIZE;
1240 if (len <= BLK_LEN_MASK) {
1241 dma_cfg_t.block_len = len;
1242 dma_cfg_t.req_mode = BLOCK_REQ_MODE;
1243 dma_cfg_t.irq_mode = BLK_DONE;
1245 dma_cfg_t.block_len = DMA_MEMCPY_MIN_SIZE;
1246 dma_cfg_t.transcation_len = len;
1247 dma_cfg_t.req_mode = TRANS_REQ_MODE;
1248 dma_cfg_t.irq_mode = TRANS_DONE;
1251 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1252 if(!list_empty(&mchan->free)){
1253 mdesc = list_first_entry(&mchan->free, struct sprd_dma_desc, node);
1254 list_del(&mdesc->node);
1256 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1258 sprd_dma_process_completed(sdev);
1259 pr_err("Warning: There are not enough mdesc for scatterlist!\n");
1262 ret = __dma_config(chan,mdesc,&dma_cfg_t,NULL,CONFIG_DESC);
1264 pr_err("Warning: Configuration is error!\n");
1265 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1266 list_add_tail(&mdesc->node, &mchan->free);
1267 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1272 first_mdesc = mdesc;
1273 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1274 list_add_tail(&mdesc->node, &mchan->prepared);
1275 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1277 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1278 list_add_tail(&mdesc->node, &first_mdesc->next_node);
1279 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1283 mchan->re_mode = SOFTWARE_REQ;
1286 return &first_mdesc->desc;
1291 /* copy dma configuration */
1292 static void sprd_dma_copy(struct sprd_dma_chn *mchan, unsigned long arg)
1296 memcpy(&dma_cfg_group.dma_cfg[i],(struct sprd_dma_cfg *)arg,sizeof(struct sprd_dma_cfg));
1297 arg += sizeof(struct sprd_dma_cfg);
1299 pr_dma("%s, i:%d block_len:0x%x,fragmens_len:0x%x,src_step:0x%x,des_addr:0x%x,"
1300 "src_addr:0x%x,link_cfg_p:0x%x,link_cfg_v:0x%lx,is_end:%d,transcation_len:0x%x\n",
1301 __func__,i,dma_cfg_group.dma_cfg[i].block_len,dma_cfg_group.dma_cfg[i].fragmens_len,
1302 dma_cfg_group.dma_cfg[i].src_step,dma_cfg_group.dma_cfg[i].des_addr,
1303 dma_cfg_group.dma_cfg[i].src_addr,dma_cfg_group.dma_cfg[i].link_cfg_p,
1304 dma_cfg_group.dma_cfg[i].link_cfg_v,dma_cfg_group.dma_cfg[i].is_end,
1305 dma_cfg_group.dma_cfg[i].transcation_len);
1307 } while(dma_cfg_group.dma_cfg[i++].is_end == 0 && i < (DMA_CFG_COUNT - 1));
1308 dma_cfg_group.dma_cfg_cnt = i;
1309 pr_dma("Get dma configuration number is %d!\n",i);
1312 /* terminate dma channel */
1313 static int sprd_terminate_all(struct sprd_dma_chn *mchan)
1315 struct dma_chan *chan = &mchan->chan;
1317 sprd_dma_free_chan_resources(chan);
1321 /* dma control method */
1322 static int sprd_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
1325 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1328 case DMA_SLAVE_CONFIG:
1329 if(down_trylock(&dma_cfg_group.cfg_sema)){
1330 pr_err("DMA resource is busy, try again...\n");
1333 sprd_dma_copy(mchan, arg);
1336 ret = sprd_dma_stop(mchan);
1338 case DMA_TERMINATE_ALL:
1339 ret = sprd_terminate_all(mchan);
1342 ret = sprd_dma_execute(mchan);
1352 /* for dma channel request filter*/
1353 bool sprd_dma_filter_fn(struct dma_chan *chan, void *filter_param)
1355 struct sprd_dma_chn *mchan = NULL;
1356 unsigned int type = *(unsigned int *)filter_param;
1357 struct sprd_dma_dev *mdev = to_sprd_dma_dev(chan);
1358 u32 ap_chn_cnt = mdev->ap_chn_cnt;
1360 mchan = to_sprd_dma_chan(chan);
1364 if(type == AP_STANDARD_DMA){
1365 if(mchan->chn_type == STANDARD_DMA && mchan->chan_num < ap_chn_cnt)
1367 }else if(type == AP_FULL_DMA){
1368 if(mchan->chn_type == FULL_DMA && mchan->chan_num < ap_chn_cnt)
1370 }else if(type == AON_STANDARD_DMA){
1371 if(mchan->chn_type == STANDARD_DMA && mchan->chan_num > (ap_chn_cnt -1))
1373 }else if(type == AON_FULL_DMA){
1374 if(mchan->chn_type == FULL_DMA && mchan->chan_num > (ap_chn_cnt -1))
1376 }else if((type & 0xf00) == NUM_REQUEST_DMA){
1377 if(mchan->chan_num == (type & 0xff))
1384 /* for dma channel request filter*/
1385 bool sprd_dma_filter_fn_t(struct dma_chan *chan, void *param)
1387 struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1388 unsigned int req = *(unsigned int *)param;
1390 return req == (mchan->chan_num + 1);
1394 int sprd_dma_check_register(struct dma_chan *c)
1396 volatile struct sprd_dma_chn_reg *dma_reg = NULL;
1397 struct sprd_dma_chn *mchan = to_sprd_dma_chan(c);
1399 dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
1400 __dma_cfg_check_register((void __iomem *)dma_reg);
1406 static int sprd_dma_probe(struct platform_device *pdev)
1408 struct sprd_dma_dev *sdev = NULL;
1409 struct sprd_dma_chn *dma_chn = NULL;
1410 void __iomem *dma_ap_base = NULL;
1411 void __iomem *dma_aon_base = NULL;
1412 struct resource *res = NULL;
1413 u32 dma_irq = 0, aon_dma_irq = 0, aon_offset = 0;
1414 u32 dma_chn_cnt = 0, ap_chn_cnt = 0, aon_chn_cnt = 0;
1417 if(!pdev->dev.of_node){
1418 pr_warn("Error: Can't find the dma node!\n");
1422 if (of_property_read_u32(pdev->dev.of_node, "#dma-channels", &dma_chn_cnt)){
1423 pr_warn("Error: Can't get total dma channel number infor!\n");
1427 if (of_property_read_u32(pdev->dev.of_node, "sprd,aon-offset", &aon_offset)){
1428 pr_warn("Error: Can't get ap dma channel number infor!\n");
1432 aon_chn_cnt = dma_chn_cnt - aon_offset;
1433 ap_chn_cnt = dma_chn_cnt - aon_chn_cnt;
1435 pr_dma("DMA total chn: %d,ap chn :%d,aon chn :%d!\n",dma_chn_cnt,ap_chn_cnt,aon_chn_cnt);
1437 dma_irq = platform_get_irq(pdev, 0);
1439 pr_warn("Error: Can't get the ap dma irq number!\n");
1443 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1445 pr_warn("Error: Can't get ap dma registers resource!\n");
1449 dma_ap_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
1451 pr_warn("Error: Can't get the ap dma base addr!\n");
1455 if(aon_chn_cnt > 0){
1456 aon_dma_irq = platform_get_irq(pdev, 1);
1457 if (aon_dma_irq < 0) {
1458 pr_warn("Error: Can't get the aon dma irq number!\n");
1462 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1464 pr_warn("Error: Can't get aon dma registers resource!\n");
1468 dma_aon_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
1470 pr_warn("Error: Can't get the aon dma base addr!\n");
1475 pr_dma("DMA irq number is %d,aon irq num is %d!\n", dma_irq,aon_dma_irq);
1477 /* dma device momery alloc */
1478 sdev = devm_kzalloc(&pdev->dev,(sizeof(*sdev) +
1479 (sizeof(struct sprd_dma_chn) * dma_chn_cnt)),GFP_KERNEL);
1481 pr_err("Error: DMA alloc dma dev failed!\n");
1485 /* init dma device */
1486 dma_cap_set(DMA_MEMCPY|DMA_SG, sdev->dma_dev.cap_mask);
1487 sdev->dma_dev.chancnt = dma_chn_cnt;
1488 INIT_LIST_HEAD(&sdev->dma_dev.channels);
1489 INIT_LIST_HEAD(&sdev->dma_dev.global_node);
1490 spin_lock_init(&sdev->dma_lock);
1491 sdev->dma_dev.dev = &pdev->dev;
1493 sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
1494 sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
1495 sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
1496 sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
1497 sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
1498 sdev->dma_dev.device_prep_dma_sg = sprd_prep_dma_sg;
1499 sdev->dma_dev.device_control = sprd_dma_control;
1502 for (i = 0; i < dma_chn_cnt; i++) {
1503 dma_chn = &sdev->channels[i];
1504 dma_chn->chan.device = &sdev->dma_dev;
1505 dma_cookie_init(&dma_chn->chan);
1506 list_add_tail(&dma_chn->chan.device_node, &sdev->dma_dev.channels);
1508 dma_chn->chan_num = i;
1509 if(i < STANDARD_DMA_NUM || (i > (ap_chn_cnt - 1)
1510 && i < (ap_chn_cnt + STANDARD_DMA_NUM))){
1511 dma_chn->chn_type = STANDARD_DMA;
1513 dma_chn->chn_type = FULL_DMA;
1515 dma_chn->chan_status = NO_USED;
1516 dma_chn->irq_handle_enable = 0;
1518 dma_chn->dma_chn_base = (void __iomem *)((unsigned long)dma_ap_base +
1519 0x1000 + DMA_CHN_OFFSET * (i));
1522 dma_chn->dma_chn_base = (void __iomem *)((unsigned long)dma_aon_base +
1523 0x1000 + DMA_CHN_OFFSET * (i - ap_chn_cnt));
1525 dma_chn->dma_chn_base = NULL;
1528 pr_dma("dma_chn [%d] dma_chn_base = 0x%lx!\n",i,(unsigned long)dma_chn->dma_chn_base);
1530 spin_lock_init(&dma_chn->chn_lock);
1531 INIT_LIST_HEAD(&dma_chn->free);
1532 INIT_LIST_HEAD(&dma_chn->prepared);
1533 INIT_LIST_HEAD(&dma_chn->queued);
1534 INIT_LIST_HEAD(&dma_chn->active);
1535 INIT_LIST_HEAD(&dma_chn->completed);
1538 sdev->dma_glb_base = dma_ap_base;
1539 sdev->irq = dma_irq;
1540 sdev->aon_dma_glb_base = dma_aon_base;
1541 sdev->aon_irq = aon_dma_irq;
1542 sdev->ap_chn_cnt = ap_chn_cnt;
1543 sdev->aon_chn_cnt = aon_chn_cnt;
1545 pr_dma("dma_glb_base = 0x%lx, aon_dma_glb_base = 0x%lx!\n",
1546 (unsigned long)sdev->dma_glb_base,(unsigned long)sdev->aon_dma_glb_base);
1548 sdev->dma_desc_node_cachep =
1549 kmem_cache_create("dma_desc_node",sizeof(struct sprd_dma_desc), 0,
1550 SLAB_HWCACHE_ALIGN,NULL);
1551 if(!sdev->dma_desc_node_cachep){
1552 pr_err("Error: DMA alloc cache failed!\n");
1557 ret = devm_request_irq(&pdev->dev, dma_irq, __dma_irq_handle, 0, "sprd_dma",(void*)sdev);
1559 pr_err("Error: Request dma irq failed %d\n", ret);
1563 if(aon_chn_cnt > 0){
1564 ret = devm_request_irq(&pdev->dev, aon_dma_irq, __dma_irq_handle, 0, "sprd_aon_dma",(void*)sdev);
1566 pr_err("Error: Request aon dma irq failed %d\n", ret);
1571 /* initial the tasklet */
1572 tasklet_init(&sdev->tasklet, sprd_dma_tasklet, (unsigned long)sdev);
1573 /* save the sdev as private data */
1574 platform_set_drvdata(pdev,sdev);
1576 /* dma device register */
1577 ret = dma_async_device_register(&sdev->dma_dev);
1579 pr_err("SPRD-DMA: failed to register slave DMA engine device: %d\n",ret);
1584 /* Device-tree DMA controller registration */
1585 sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
1586 ret = of_dma_controller_register(pdev->dev.of_node,
1587 of_dma_simple_xlate, &sprd_dma_info);
1589 pr_warn("OMAP-DMA: failed to register of DMA controller\n");
1590 goto of_register_fail;
1593 sema_init(&dma_cfg_group.cfg_sema,1);
1594 dma_cfg_group.dma_cfg_cnt = 0;
1595 memset(dma_cfg_group.dma_cfg,0,sizeof(struct sprd_dma_cfg) * DMA_CFG_COUNT);
1596 pr_notice("SPRD DMA engine driver probe OK!\n");
1600 dma_async_device_unregister(&sdev->dma_dev);
1602 if(aon_chn_cnt > 0){
1603 devm_free_irq(&pdev->dev, aon_dma_irq, (void*)sdev);
1604 irq_dispose_mapping(sdev->aon_irq);
1607 devm_free_irq(&pdev->dev, dma_irq, (void*)sdev);
1608 irq_dispose_mapping(sdev->irq);
1610 kmem_cache_destroy(sdev->dma_desc_node_cachep);
1615 static int sprd_dma_remove(struct platform_device *pdev)
1617 struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
1619 dma_async_device_unregister(&sdev->dma_dev);
1620 devm_free_irq(&pdev->dev,sdev->irq,(void*)sdev);
1621 irq_dispose_mapping(sdev->irq);
1622 if(sdev->aon_chn_cnt > 0){
1623 devm_free_irq(&pdev->dev,sdev->aon_irq,(void*)sdev);
1624 irq_dispose_mapping(sdev->aon_irq);
1626 kmem_cache_destroy(sdev->dma_desc_node_cachep);
1627 pr_notice("SPRD DMA engine driver remove OK!\n");
1632 static const struct of_device_id sprd_dma_match[] = {
1633 { .compatible = "sprd,sharkl64-dma",
1638 static struct platform_driver sprd_dma_driver = {
1639 .probe = sprd_dma_probe,
1640 .remove = sprd_dma_remove,
1643 .owner = THIS_MODULE,
1644 .of_match_table = of_match_ptr(sprd_dma_match),
1648 int __init sprd_dma_init(void)
1650 return platform_driver_register(&sprd_dma_driver);
1653 void __exit sprd_dma_exit(void)
1655 platform_driver_unregister(&sprd_dma_driver);
1658 subsys_initcall(sprd_dma_init);
1659 module_exit(sprd_dma_exit);
1661 MODULE_LICENSE("GPL");
1662 MODULE_AUTHOR("Baolin.wang <Baolin.wang@spreadtrum.com>");