Merge "wlan_cfg80211: Set the hidden ssid scan properly." into tizen
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / dma / sprd_dma.c
1 /*
2  * Copyright (C) 2012 Spreadtrum Communications Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/semaphore.h>
17 #include <linux/slab.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/errno.h>
21 #include <linux/io.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_device.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/dmaengine.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/delay.h>
29 #include "dmaengine.h"
30
31 #include <soc/sprd/sci_glb_regs.h>
32 #include <soc/sprd/sci.h>
33 #include <soc/sprd/hardware.h>
34 #include "sprd_dma.h"
35
36 //#define DMA_DEBUG
37 #ifdef DMA_DEBUG
38 #define pr_dma(fmt, ...)        printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
39 #else
40 #define pr_dma(fmt, ...)
41 #endif
42
43 #define STANDARD_DMA_NUM                (24)
44 #define MPC_DMA_DESCRIPTORS             (64)
45 #define DMA_CHN_OFFSET                  (0x40)
46 #define DMA_MEMCPY_MIN_SIZE             (64)
47 #define DMA_CFG_COUNT                   (MPC_DMA_DESCRIPTORS)
48 #define DMA_CHx_OFFSET                  (0x40)
49 #define SPRD_DMA_REQ_CID(base,uid)              ((unsigned long)base + 0x2000 + 0x4 * ((uid) -1))
50
51 /* dma channel register definition */
52 struct sprd_dma_chn_reg {
53         u32 pause;
54         u32 req;
55         u32 cfg;
56         u32 intc;
57         u32 src_addr;
58         u32 des_addr;
59         u32 frg_len;
60         u32 blk_len;
61         /* only full chn have following regs */
62         u32 trsc_len;
63         u32 trsf_step;
64         u32 wrap_ptr;
65         u32 wrap_to;
66         u32 llist_ptr;
67         u32 frg_step;
68         u32 src_blk_step;
69         u32 des_blk_step;
70 };
71
72 /* dma request description */
73 struct sprd_dma_desc {
74         struct dma_async_tx_descriptor  desc;
75         struct sprd_dma_chn_reg                 *dma_chn_reg;
76         dma_addr_t                                              dma_chn_reg_paddr;      
77         struct list_head                                node;
78         struct list_head                                next_node;
79         int                                                             done;
80         int                                                             cycle;
81 };
82
83 /* dma channel description */
84 struct sprd_dma_chn {
85         struct dma_chan                 chan;
86         struct list_head                free;
87         struct list_head                prepared;
88         struct list_head                queued;
89         struct list_head                active;
90         struct list_head                completed;
91         spinlock_t                              chn_lock;
92         int                                             chan_num;
93         u32                                     dev_id;
94         enum dma_chn_status     chan_status;
95         void __iomem                    *dma_chn_base;
96         void __iomem                    *dma_desc;
97         dma_addr_t                              dma_desc_paddr;
98         enum dma_chn_type               chn_type;
99         enum request_mode               re_mode;
100         int                                             irq_handle_enable;
101 };
102
103 /* dma device */
104 struct sprd_dma_dev {
105         struct dma_device               dma_dev;
106         spinlock_t                              dma_lock;
107         void __iomem                    *dma_glb_base;
108         void __iomem                    *aon_dma_glb_base;
109         int                                             irq;
110         int                                             aon_irq;
111         struct tasklet_struct   tasklet;
112         struct kmem_cache               *dma_desc_node_cachep;
113         u32                                             ap_chn_cnt;
114         u32                                             aon_chn_cnt;
115         u32                                             chn_std_cnt;
116         struct sprd_dma_chn             channels[0];
117 };
118
119 struct dma_cfg_group_t {
120         struct semaphore                cfg_sema;
121         int                                             dma_cfg_cnt;
122         struct sprd_dma_cfg     dma_cfg[DMA_CFG_COUNT];
123 };
124
125 struct dma_cfg_group_t dma_cfg_group;
126
127 static struct of_dma_filter_info sprd_dma_info = {
128         .filter_fn = sprd_dma_filter_fn,
129 };
130
131 /* dma debug function */
132 static int __dma_cfg_check_register(void __iomem * dma_reg_addr)
133 {
134         volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)dma_reg_addr;
135         volatile struct sprd_dma_glb_reg *aon_dma_glb_reg;
136         struct device_node *dma_node;
137         struct platform_device *dma_dev;
138         struct sprd_dma_dev *sdev;
139
140         dma_node = of_find_compatible_node(NULL, NULL, "sprd,sharkl64-dma");
141         if (!dma_node) {
142                 pr_warn("Can't get the dmac node!\n");
143                 return -ENODEV;
144         } else {
145                 dma_dev = of_find_device_by_node(dma_node);
146                 if (!dma_dev) {
147                         pr_warn("Can't get the dma_dev!\n");
148                         return -ENODEV;
149                 }
150         }
151
152         sdev = platform_get_drvdata(dma_dev);
153         if(!sdev){
154                 pr_warn("Can't get the sdev!\n");
155                 return -ENODEV;
156         }
157         aon_dma_glb_reg = sdev->aon_dma_glb_base;
158
159         pr_dma("------------------------------------------------------------------------------------>>>\n");
160         pr_dma("DMA register:\n pause=0x%x,\n req=0x%x,\n cfg=0x%x,\n int=0x%x,\n src_addr=0x%x,\n"
161                         "des_addr=0x%x,\n frg_len=0x%x,\n blk_len=0x%x,\n trsc_len=0x%x,\n trsf_step=0x%x,\n"
162                         "wrap_ptr=0x%x,\n wrap_to=0x%x,\n llist_ptr=0x%x,\n frg_step=0x%x,\n src_blk_step=0x%x,\n"
163                         "des_blk_step=0x%x,\n",dma_reg->pause,dma_reg->req,dma_reg->cfg,dma_reg->intc,
164                         dma_reg->src_addr,dma_reg->des_addr,dma_reg->frg_len,dma_reg->blk_len,dma_reg->trsc_len,
165                         dma_reg->trsf_step,dma_reg->wrap_ptr,dma_reg->wrap_to,dma_reg->llist_ptr,dma_reg->frg_step,
166                         dma_reg->src_blk_step,dma_reg->des_blk_step);
167
168         pr_dma("Global reg:INTC2 aon dma irq raw status=0x%x,dma irq raw status=0x%x!\n",
169                         sci_glb_read((unsigned long)(SPRD_INTC2_BASE+0x4), BIT(6)),
170                         0/*sci_glb_read((unsigned long)(SPRD_INTC1_BASE+0x4), BIT(18))*/);
171         
172         pr_dma("Global reg:INTC_EB_b15=0x%x,DMA_INT_AP_EN_b0=0x%x,INTC2_DMA_EN_b6=0x%x,APB_INTC2_EB_b21=0x%x!\n",
173                         sci_glb_read((unsigned long)REG_AON_APB_APB_EB0, BIT_INTC_EB),
174                         sci_glb_read((unsigned long)REG_AON_APB_AON_DMA_INT_EN, BIT_AON_DMA_INT_AP_EN),
175                         sci_glb_read((unsigned long)(SPRD_INTC2_BASE+0x8), BIT(6)),
176                         sci_glb_read((unsigned long)REG_AP_APB_APB_EB, BIT(21)));
177
178         pr_dma("AON global debug reg:debug sts=0x%x,pause=0x%x,req_sts=0x%x,en_sts=0x%x,arb_sel_sts=0x%x!\n",
179                         aon_dma_glb_reg->debug_sts,aon_dma_glb_reg->pause,aon_dma_glb_reg->req_sts,
180                         aon_dma_glb_reg->en_sts,aon_dma_glb_reg->arb_sel_sts);
181         pr_dma("<<<------------------------------------------------------------------------------------\n");
182         return 0;
183 }
184
185 static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
186 {
187         return container_of(c, struct sprd_dma_chn, chan);
188 }
189
190 static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
191 {
192         struct sprd_dma_chn *mchan = to_sprd_dma_chan(c);
193         return container_of(mchan, struct sprd_dma_dev, channels[c->chan_id]);
194 }
195
196 static inline struct sprd_dma_desc *to_sprd_dma_desc(struct dma_async_tx_descriptor *tx)
197 {
198         return container_of(tx, struct sprd_dma_desc, desc);
199 }
200
201 static void __inline __ap_dma_enable(void)
202 {
203         if (!sci_glb_read((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB))
204                 sci_glb_set((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB);
205 }
206
207 static void __inline __ap_dma_disable(void)
208 {
209         if (sci_glb_read((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB))
210                 sci_glb_clr((unsigned long)REG_AP_AHB_AHB_EB, BIT_DMA_EB);
211 }
212
213 static void __inline __ap_dma_softreset(void)
214 {
215         sci_glb_set(REG_AP_AHB_AHB_RST, BIT_DMA_SOFT_RST);
216         udelay(1);
217         sci_glb_clr(REG_AP_AHB_AHB_RST, BIT_DMA_SOFT_RST);
218 }
219
220 #ifdef CONFIG_AON_DMA_SPRD
221 static void __inline __aon_dma_enable(void)
222 {
223         if (!sci_glb_read((unsigned long)REG_AON_APB_APB_EB1, BIT_AON_DMA_EB)) 
224                 sci_glb_set((unsigned long)REG_AON_APB_APB_EB1, BIT_AON_DMA_EB);
225 }
226
227 static void __inline __aon_dma_disable(void)
228 {
229         if (sci_glb_read((unsigned long)REG_AON_APB_APB_EB1, BIT_AON_DMA_EB)) 
230                 sci_glb_clr((unsigned long)REG_AON_APB_APB_EB1,BIT_AON_DMA_EB);
231 }
232
233 static void __inline __aon_dma_reset(void)
234 {
235         sci_glb_set(REG_AON_APB_APB_RST1, BIT_AON_DMA_SOFT_RST);
236         udelay(1);
237         sci_glb_clr(REG_AON_APB_APB_RST1, BIT_AON_DMA_SOFT_RST);
238 }
239
240 static void __inline __aon_dma_int_enable(void)
241 {
242         if (!sci_glb_read((unsigned long)(SPRD_INTC2_BASE+0x8), BIT(6))) 
243                 sci_glb_set((unsigned long)(SPRD_INTC2_BASE+0x8), BIT(6));
244
245         if (!sci_glb_read((unsigned long)REG_AON_APB_AON_DMA_INT_EN, BIT_AON_DMA_INT_AP_EN))
246                 sci_glb_set((unsigned long)REG_AON_APB_AON_DMA_INT_EN,BIT_AON_DMA_INT_AP_EN);
247 }
248
249 static void __inline __aon_dma_int_disable(void)
250 {
251         if (sci_glb_read((unsigned long)REG_AON_APB_AON_DMA_INT_EN, BIT_AON_DMA_INT_AP_EN))
252                 sci_glb_clr((unsigned long)REG_AON_APB_AON_DMA_INT_EN,BIT_AON_DMA_INT_AP_EN);
253 }
254 #else
255 static void __inline __aon_dma_enable(void){ }
256
257 static void __inline __aon_dma_disable(void){ }
258
259 static void __inline __aon_dma_reset(void){ }
260
261 static void __inline __aon_dma_int_enable(void){ }
262
263 static void __inline __aon_dma_int_disable(void){ }
264 #endif
265
266 static void __inline __dma_set_uid(struct sprd_dma_dev *sdev,
267                                                 struct sprd_dma_chn *mchan, u32 dev_id)
268 {
269         u32 ap_chn_cnt = sdev->ap_chn_cnt;
270
271         if (DMA_UID_SOFTWARE != dev_id) {
272                 if(mchan->chan_num < ap_chn_cnt)
273                         writel_relaxed((mchan->chan_num + 1),
274                                                 (void __iomem *)SPRD_DMA_REQ_CID(sdev->dma_glb_base, dev_id));
275                 else
276                         writel_relaxed((mchan->chan_num - ap_chn_cnt + 1),
277                                                 (void __iomem *)SPRD_DMA_REQ_CID(sdev->aon_dma_glb_base, dev_id));
278         }
279 }
280
281 static void __inline __dma_unset_uid(struct sprd_dma_dev *sdev,
282                                                 struct sprd_dma_chn *mchan, u32 dev_id)
283 {
284         u32 ap_chn_cnt = sdev->ap_chn_cnt;
285
286         if (DMA_UID_SOFTWARE != dev_id) {
287                 if(mchan->chan_num < ap_chn_cnt)
288                         writel_relaxed(0x0,(void __iomem *)SPRD_DMA_REQ_CID(sdev->dma_glb_base, dev_id));
289                 else
290                         writel_relaxed(0x0,(void __iomem *)SPRD_DMA_REQ_CID(sdev->aon_dma_glb_base, dev_id));
291         }
292 }
293
294 static void __inline __dma_int_clr(struct sprd_dma_chn *mchan)
295 {
296         volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;             
297
298         dma_reg->intc |= 0x1f << 24;
299 }
300
301 static void __inline __dma_int_dis(struct sprd_dma_chn *mchan)
302 {
303         volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
304
305         dma_reg->intc |= 0x1f << 24;
306         dma_reg->intc &= ~0x1f;
307 }
308
309 static void __inline __dma_chn_enable(struct sprd_dma_chn *mchan)
310 {
311         volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
312
313         dma_reg->cfg |= 0x1;
314 }
315
316 static void __inline __dma_soft_request(struct sprd_dma_chn *mchan)
317 {
318         volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
319
320         dma_reg->req |= 0x1;
321 }
322
323 static void __dma_stop_and_disable(struct sprd_dma_chn *mchan)
324 {
325         u32 timeout = 0x2000;
326
327         volatile struct sprd_dma_chn_reg *dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
328         struct sprd_dma_dev *sdev = to_sprd_dma_dev(&mchan->chan);
329         u32 ap_chn_cnt = sdev->ap_chn_cnt;
330
331         if (!(dma_reg->cfg & 0x1))
332                 return;
333
334         dma_reg->pause |= 0x1;
335
336         /* fixme, need to deal with timeout */
337         while (!(dma_reg->pause & (0x1 << 16))){
338                 if(--timeout == 0){
339                         if(mchan->chan_num >= ap_chn_cnt)
340                                 __aon_dma_reset();
341                         else
342                                 __ap_dma_softreset();
343                         break;
344                 }
345                 cpu_relax();
346         }
347
348         dma_reg->cfg &= ~0x1;
349         dma_reg->pause = 0x0;
350 }
351
352 /* get dma source reg addr */
353 static unsigned long __dma_get_src_addr(struct dma_chan *dma_chn)
354 {
355     struct sprd_dma_chn *mchan = to_sprd_dma_chan(dma_chn);
356     unsigned long addr = (unsigned long)mchan->dma_chn_base + 0x10;
357         
358     return readl_relaxed((void __iomem *)addr);
359 }
360
361 /* get dma dest reg addr */
362 static unsigned long __dma_get_dst_addr(struct dma_chan *dma_chn)
363 {
364     struct sprd_dma_chn *mchan = to_sprd_dma_chan(dma_chn);
365         unsigned long addr = (unsigned long)mchan->dma_chn_base + 0x14;
366         
367     return readl_relaxed((void __iomem *)addr);
368 }
369
370 static int __dma_config(struct dma_chan *chan,struct sprd_dma_desc *mdesc,
371                 struct sprd_dma_cfg *cfg_list,struct sprd_dma_chn_reg *dma_reg_addr,
372                 enum config_type type)
373 {
374         volatile struct sprd_dma_chn_reg *dma_reg;
375         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
376         struct sprd_dma_cfg *dma_cfg_tmp = cfg_list;
377         u32 fix_mode = 0, llist_en = 0, wrap_en = 0;
378         u32 list_end = 0, fix_en = 0,irq_mode = 0,wrap_mode = 0;
379         int chn_type;
380
381         /* check dma fix mode */
382         if (dma_cfg_tmp->src_step != 0 && dma_cfg_tmp->des_step != 0) {
383                 fix_en = 0x0;
384         } else {
385                 if ((dma_cfg_tmp->src_step | dma_cfg_tmp->des_step) == 0) {
386                         fix_en = 0x0;
387                 } else {
388                         fix_en = 0x1;
389                         if (dma_cfg_tmp->src_step)
390                                 fix_mode = 0x1;
391                         else
392                                 fix_mode = 0x0;
393                 }
394         }
395
396         /* check dma wrap mode */
397         if (dma_cfg_tmp->wrap_ptr && dma_cfg_tmp->wrap_to) {
398                 wrap_en = 0x1;
399                 if (dma_cfg_tmp->wrap_to == dma_cfg_tmp->src_addr) {
400                         wrap_mode = 0x0;
401                 } else {
402                         if (dma_cfg_tmp->wrap_to == dma_cfg_tmp->des_addr)
403                                 wrap_mode = 0x1;
404                         else
405                                 return -EINVAL;
406                 }
407         }
408
409         /* linklist configuration */
410         if (dma_cfg_tmp->linklist_ptr) {
411                 llist_en = 0x1;
412                 if (dma_cfg_tmp->is_end == 1)
413                         list_end = 0x1;
414                 else
415                         list_end = 0;
416         }
417
418         chn_type = mchan->chn_type;
419         irq_mode = dma_cfg_tmp->irq_mode;
420
421         /* check dma mode */
422         if ((chn_type == STANDARD_DMA) &&
423                 (irq_mode == TRANS_DONE || irq_mode == LIST_DONE)) {
424                 pr_err("Irq type isn't compatible with channel type!");
425                 return -EINVAL;
426         }
427         
428         if (!IS_ALIGNED(dma_cfg_tmp->src_step, dma_cfg_tmp->datawidth)){
429                 pr_err("Source step is not aligned!");
430                 return -EINVAL;
431         }
432
433         if (!IS_ALIGNED(dma_cfg_tmp->des_step, dma_cfg_tmp->datawidth)){
434                 pr_err("Destination step is not aligned!");
435                 return -EINVAL;
436         }
437
438         if(!mchan->dev_id)
439                 mchan->dev_id = dma_cfg_tmp->dev_id;
440         
441         if(type == CONFIG_DESC)
442                 dma_reg = mdesc->dma_chn_reg;
443         else if(type == CONFIG_LINKLIST)
444                 dma_reg = dma_reg_addr;
445         else
446                 return -EINVAL;
447
448         dma_reg->pause = 0x0;
449         dma_reg->req = 0x0;
450
451         /* set default priority = 1 */
452         dma_reg->cfg = DMA_PRI_1 << CHN_PRIORITY_OFFSET |
453             llist_en << LLIST_EN_OFFSET;
454
455         /* src and des addr */
456         dma_reg->src_addr = dma_cfg_tmp->src_addr;
457         dma_reg->des_addr = dma_cfg_tmp->des_addr;
458
459         /* frag len */
460         dma_reg->frg_len =
461             (dma_cfg_tmp->datawidth << SRC_DATAWIDTH_OFFSET) |
462             (dma_cfg_tmp->datawidth << DES_DATAWIDTH_OFFSET) |
463             (0x0 << SWT_MODE_OFFSET) |
464             (dma_cfg_tmp->req_mode << REQ_MODE_OFFSET) |
465             (wrap_mode << ADDR_WRAP_SEL_OFFSET) |
466             (wrap_en << ADDR_WRAP_EN_OFFSET) |
467             (fix_mode << ADDR_FIX_SEL_OFFSET) |
468             (fix_en << ADDR_FIX_SEL_EN) |
469             (list_end << LLIST_END_OFFSET) | 
470                 (dma_cfg_tmp->fragmens_len & FRG_LEN_MASK);
471
472         /* blk len */
473         dma_reg->blk_len = dma_cfg_tmp->block_len & BLK_LEN_MASK;
474
475         /* set interrupt type*/
476         if(type == CONFIG_DESC){
477                 if(irq_mode == NO_INT)
478                         mchan->irq_handle_enable = 0;
479                 else
480                         mchan->irq_handle_enable = 1;
481
482                 dma_reg->intc &= ~0x1f;
483                 dma_reg->intc |= 0x1 << 4;
484                 switch (irq_mode) {
485                 case NO_INT:
486                         break;
487                 case FRAG_DONE:
488                         dma_reg->intc |= 0x1;
489                         break;
490                 case BLK_DONE:
491                         dma_reg->intc |= 0x2;
492                         break;
493                 case TRANS_DONE:
494                         dma_reg->intc |= 0x4;
495                         break;
496                 case LIST_DONE:
497                         dma_reg->intc |= 0x8;
498                         break;
499                 case CONFIG_ERR:
500                         dma_reg->intc |= 0x10;
501                         break;
502                 default:
503                         return -EINVAL;
504                 }
505         }
506         else
507                 dma_reg->intc = 0;
508         
509         pr_dma("dma_config:cfg=0x%x,frg_len=0x%x,blk_len=0x%x,intc=0x%x!\n",
510                         dma_reg->cfg,dma_reg->frg_len,dma_reg->blk_len,dma_reg->intc);
511
512         if(chn_type == STANDARD_DMA)
513                 return 0;
514
515         /* full dma config */
516         if (0x0 == dma_cfg_tmp->transcation_len)
517                 dma_reg->trsc_len = dma_cfg_tmp->block_len & TRSC_LEN_MASK;
518         else
519                 dma_reg->trsc_len = dma_cfg_tmp->transcation_len & TRSC_LEN_MASK;
520
521         dma_reg->trsf_step =
522             (dma_cfg_tmp->des_step & TRSF_STEP_MASK) << DEST_TRSF_STEP_OFFSET |
523             (dma_cfg_tmp->src_step & TRSF_STEP_MASK) << SRC_TRSF_STEP_OFFSET;
524
525         dma_reg->wrap_ptr = dma_cfg_tmp->wrap_ptr;
526         dma_reg->wrap_to = dma_cfg_tmp->wrap_to;
527
528         dma_reg->llist_ptr = dma_cfg_tmp->linklist_ptr;
529
530         dma_reg->frg_step =
531             (dma_cfg_tmp->dst_frag_step & FRAG_STEP_MASK) << DEST_FRAG_STEP_OFFSET |
532             (dma_cfg_tmp->src_frag_step & FRAG_STEP_MASK) << SRC_FRAG_STEP_OFFSET;
533
534         dma_reg->src_blk_step = dma_cfg_tmp->src_blk_step;
535         dma_reg->des_blk_step = dma_cfg_tmp->dst_blk_step;
536
537         pr_dma("dma_config:trsc_len=0x%x,trsf_step=0x%x,llist_ptr=0x%x,frg_step=0x%x!\n",
538                         dma_reg->trsc_len,dma_reg->trsf_step,dma_reg->llist_ptr,dma_reg->frg_step);
539         
540         return 0;
541 }
542
543 /* config dma linklist */
544 static int __dma_config_linklist(struct dma_chan *chan,struct sprd_dma_desc *mdesc,
545                                                                 struct sprd_dma_cfg *cfg_list,u32 node_size)
546 {
547         int ret, i;
548         struct sprd_dma_chn_reg *dma_reg_list;
549         struct sprd_dma_cfg list_cfg;
550         dma_addr_t cfg_p;
551
552         if(node_size < 2)
553                 return -EINVAL;
554
555         /* check dma linklist node memory */
556         if(cfg_list[0].link_cfg_v == 0 || cfg_list[0].link_cfg_p == 0){
557                 pr_err("Haven't allocated memory for list node!\n");
558                 return -EINVAL;
559         }
560
561         /* get linklist node virtual addr and physical addr */
562         dma_reg_list = (struct sprd_dma_chn_reg *)cfg_list[0].link_cfg_v;
563         cfg_p = (dma_addr_t)cfg_list[0].link_cfg_p;
564
565         pr_dma("Linklist:alloc addr virt:0x%lx,phys addr: 0x%lx\n",
566                         (unsigned long)dma_reg_list,(unsigned long)cfg_p);
567
568         /* linklist configuration */
569         for (i = 0; i < node_size; i++) {
570                 cfg_list[i].linklist_ptr = (u32)(cfg_p +
571                                                 ((i + 1) % node_size) * sizeof(struct sprd_dma_chn_reg) + 0x10);
572
573                 ret = __dma_config(chan,NULL,cfg_list + i,dma_reg_list + i,CONFIG_LINKLIST);
574                 if (ret < 0) {
575                         pr_err("Linklist configuration error!\n");
576                         return -EINVAL;
577                 }
578                 pr_dma("Configuration the link list!\n");
579                 __dma_cfg_check_register(dma_reg_list + i);             
580         }
581
582         memset((void *)&list_cfg, 0x0, sizeof(list_cfg));
583         list_cfg.linklist_ptr = cfg_p + 0x10;
584         list_cfg.irq_mode = cfg_list[0].irq_mode;
585         list_cfg.src_addr = cfg_list[0].src_addr;
586         list_cfg.des_addr = cfg_list[0].des_addr;
587
588         /* support for audio */
589         if(cfg_list[node_size - 1].is_end > 1)
590                 mdesc->cycle = 1;
591
592         ret = __dma_config(chan,mdesc,&list_cfg,NULL,CONFIG_DESC);
593
594         return 0;
595 }
596
597 static dma_int_type __dma_check_int_type(u32 intc_reg)
598 {
599         if(intc_reg & 0x1000)
600                 return CONFIG_ERR;
601         else if(intc_reg & 0x800)
602                 return LIST_DONE;
603         else if(intc_reg & 0x400)
604                 return TRANS_DONE;
605         else if(intc_reg & 0x200)
606                 return BLK_DONE;
607         else if(intc_reg & 0x100)
608                 return FRAG_DONE;
609         else
610                 return NO_INT;
611 }
612
613 static dma_request_mode __dma_check_req_type(u32 frag_reg)
614 {
615         u32 frag_reg_t = frag_reg >> 24; 
616         if((frag_reg_t & 0x3) == 0)
617                 return  FRAG_REQ_MODE;
618         else if((frag_reg_t & 0x3) == 0x1)
619                 return  BLOCK_REQ_MODE;
620         else if((frag_reg_t & 0x3) == 0x2)
621                 return  TRANS_REQ_MODE;
622         else if((frag_reg_t & 0x3) == 0x3)
623                 return  LIST_REQ_MODE;
624         else
625                 return FRAG_REQ_MODE;
626 }
627
628 /* check if the dma request desc is done */
629 static void __dma_check_mdesc_done(struct sprd_dma_desc *mdesc,
630                                 dma_int_type int_type,dma_request_mode req_mode)
631 {
632         if(mdesc->cycle == 1){
633                 mdesc->done = 0;
634                 return;
635         }
636
637         if((unsigned int)int_type >= ((unsigned int)req_mode + 1))
638                 mdesc->done = 1;
639         else
640                 mdesc->done = 0;
641 }
642
643 static void __dma_check_int(struct sprd_dma_dev *sdev, int type)
644 {
645         struct sprd_dma_chn *mchan = NULL;
646         struct sprd_dma_chn_reg *dma_reg = NULL;
647         struct sprd_dma_desc *mdesc = NULL; 
648         struct dma_async_tx_descriptor *desc = NULL;
649         u32 irq_status = 0,aon_irq_status = 0,i = 0;
650         dma_int_type int_type;
651         dma_request_mode req_type;
652         volatile struct sprd_dma_glb_reg *dma_glb_reg = sdev->dma_glb_base;
653         volatile struct sprd_dma_glb_reg *aon_dma_glb_reg = NULL;
654         if(sdev->aon_chn_cnt > 0 )
655                  aon_dma_glb_reg = sdev->aon_dma_glb_base;
656
657         if(type == 1){
658                 irq_status = dma_glb_reg->int_msk_sts;
659                 aon_irq_status = (aon_dma_glb_reg != NULL) ? (aon_dma_glb_reg->int_msk_sts) : 0;
660                 pr_dma("Enter DMA interrupt handle function!\n");
661         } else {
662                 irq_status = dma_glb_reg->int_raw_sts;
663                 aon_irq_status = (aon_dma_glb_reg != NULL) ? (aon_dma_glb_reg->int_raw_sts) : 0;
664         }
665
666         pr_dma("Check DMA interrupt,irq_status=0x%x,"
667                         "aon_irq_status=0x%x!\n",irq_status,aon_irq_status);
668
669         while(irq_status || aon_irq_status) {
670                 if(irq_status != 0){
671                         i = __ffs(irq_status);
672                         irq_status &= (irq_status - 1);
673                 }else if(aon_irq_status != 0){
674                         i = __ffs(aon_irq_status);
675                         aon_irq_status &= (aon_irq_status - 1);
676                         i += sdev->ap_chn_cnt;
677                 }else
678                         break;
679
680                 mchan = &sdev->channels[i];
681                 spin_lock(&mchan->chn_lock);
682                 dma_reg = (struct sprd_dma_chn_reg *)(mchan->dma_chn_base);
683                 int_type = __dma_check_int_type(dma_reg->intc);
684                 req_type = __dma_check_req_type(dma_reg->frg_len);
685                 pr_dma("DMA channel [%d] interrupt,intc=0x%x,int_type=%d,"
686                            "req_type=%d!\n",i,dma_reg->intc,int_type,(req_type + 1));
687                 dma_reg->intc |= 0x1f << 24;
688                 
689                 /* check if the dma request desc is done */
690                 if(!list_empty(&mchan->active)){
691                         mdesc = list_first_entry(&mchan->active,struct sprd_dma_desc, node);
692                         __dma_check_mdesc_done(mdesc,int_type,req_type);
693                         if(mdesc->done == 1)
694                                 list_splice_tail_init(&mchan->active, &mchan->completed);
695
696                         /* support for audio */
697                         if(mdesc->cycle == 1){
698                                 desc = &mdesc->desc;
699                                 if(desc->callback)
700                                         desc->callback(desc->callback_param);
701                         }
702                 }
703                 spin_unlock(&mchan->chn_lock);          
704         }
705 }
706
707 static int sprd_dma_start(struct sprd_dma_dev *sdev, struct sprd_dma_chn *mchan, 
708                                                         struct sprd_dma_desc *mdesc, u32 dev_id)
709 {
710         __dma_set_uid(sdev,mchan,dev_id);
711         __dma_chn_enable(mchan);
712
713         if(DMA_UID_SOFTWARE == dev_id)
714                 __dma_soft_request(mchan);
715
716         /* for debug info */
717         if(dev_id > DMA_UID_SOFTWARE){
718                 pr_dma("sprd_dma_start,dev_id=%d,ap_req_cid=%d,!\n",dev_id,
719                         readl_relaxed((void __iomem *)SPRD_DMA_REQ_CID(sdev->dma_glb_base, dev_id)));
720                 if(sdev->aon_dma_glb_base)
721                         pr_dma("sprd_dma_start,dev_id=%d,aon_req_cid=%d!\n",dev_id,
722                                 readl_relaxed((void __iomem *)SPRD_DMA_REQ_CID(sdev->aon_dma_glb_base, dev_id)));
723         }
724
725         return 0;
726 }
727
728 static int sprd_dma_stop(struct sprd_dma_chn *mchan)
729 {
730         struct sprd_dma_dev *sdev = to_sprd_dma_dev(&mchan->chan);
731
732         __dma_unset_uid(sdev,mchan,mchan->dev_id);
733         __dma_stop_and_disable(mchan);
734         __dma_int_clr(mchan);
735
736         return 0;
737 }
738
739 static int sprd_dma_execute(struct sprd_dma_chn *mchan)
740 {
741         struct sprd_dma_desc *first = NULL;
742         struct sprd_dma_dev *sdev = to_sprd_dma_dev(&mchan->chan);
743
744         if(!list_empty(&mchan->active))
745                 first = list_first_entry(&mchan->active,struct sprd_dma_desc, node);
746         else 
747                 return 0;
748
749         pr_dma("Before copy data to DMA reg!\n");
750         __dma_cfg_check_register(first->dma_chn_reg);
751         
752         memcpy_toio((void __iomem *)mchan->dma_chn_base,(void *)first->dma_chn_reg,
753                                 sizeof(struct sprd_dma_chn_reg));
754
755         pr_dma("After copy data to DMA reg!\n");
756         __dma_cfg_check_register(mchan->dma_chn_base);
757         
758         sprd_dma_start(sdev, mchan, first, mchan->dev_id);
759         
760         pr_dma("After start the DMA!\n");
761         __dma_cfg_check_register(mchan->dma_chn_base);
762
763         return 0;
764 }
765
766 static dma_cookie_t sprd_desc_submit(struct dma_async_tx_descriptor *tx)
767 {
768         struct sprd_dma_chn *mchan = to_sprd_dma_chan(tx->chan);
769         struct sprd_dma_desc *mdesc = to_sprd_dma_desc(tx);
770         struct sprd_dma_desc *first = NULL;
771         unsigned long flags;
772         dma_cookie_t cookie;
773
774         /* assign cookie */
775         spin_lock_irqsave(&mchan->chn_lock, flags);
776         cookie = dma_cookie_assign(tx);
777         list_move_tail(&mdesc->node, &mchan->queued);
778         if(!list_empty(&mdesc->next_node)){
779                 list_splice_tail_init(&mdesc->next_node, &mchan->queued);
780                 pr_dma("Submitting has next node!\n");
781         }
782
783         /* execute the dma desc */
784         if (list_empty(&mchan->active)){
785                 first = list_first_entry(&mchan->queued,struct sprd_dma_desc, node);
786                 list_move_tail(&first->node, &mchan->active);
787                 sprd_dma_execute(mchan);
788         }
789         spin_unlock_irqrestore(&mchan->chn_lock, flags);
790         
791         return cookie;
792 }
793
794 static irqreturn_t __dma_irq_handle(int irq, void *dev_id)
795 {
796         struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
797
798         //spin_lock(&sdev->dma_lock);
799         __dma_check_int(sdev, 1);
800         //spin_unlock(&sdev->dma_lock);
801
802         tasklet_schedule(&sdev->tasklet);
803
804         return IRQ_HANDLED;
805 }
806
807 static int sprd_dma_process_completed(struct sprd_dma_dev *sdev)
808 {
809         struct sprd_dma_chn *mchan;
810         struct sprd_dma_desc *mdesc; 
811         struct sprd_dma_desc *first;
812         dma_cookie_t last_cookie = 0;
813         struct dma_async_tx_descriptor *desc;
814         unsigned long flags;
815         LIST_HEAD(list);
816         u32 dma_chn_cnt = sdev->ap_chn_cnt + sdev->aon_chn_cnt;
817         int i;
818         
819         for(i = 0;i < dma_chn_cnt; i++){
820                 mchan = &sdev->channels[i];
821         
822                 /* deal the compelete dma request */
823                 spin_lock_irqsave(&mchan->chn_lock, flags);
824                 if(!list_empty(&mchan->completed))
825                         list_splice_tail_init(&mchan->completed, &list);
826                 spin_unlock_irqrestore(&mchan->chn_lock, flags);
827
828                 if (list_empty(&list))
829                         continue;
830
831                 list_for_each_entry(mdesc, &list, node) {
832                         pr_dma("Channel [%d] complete list have node!\n",i);
833                         desc = &mdesc->desc;
834
835                         if(desc->callback){
836                                 desc->callback(desc->callback_param);
837                         }
838                         
839                         /* submit desc->next */
840                         dma_run_dependencies(desc);
841                         last_cookie = desc->cookie;
842                 }
843
844                 spin_lock_irqsave(&mchan->chn_lock, flags);
845                 list_splice_tail_init(&list, &mchan->free);
846                 
847                 /* continue to process new adding queued request */
848                 if (!list_empty(&mchan->queued)){
849                         pr_dma("Channel [%d] queued list have node!\n",i);
850                         if(list_empty(&mchan->active)){
851                                 first = list_first_entry(&mchan->queued,struct sprd_dma_desc, node);
852                                 list_move_tail(&first->node, &mchan->active);
853                                 sprd_dma_execute(mchan);
854                         }
855                 } else {
856                         mchan->chan.completed_cookie = last_cookie;
857                         pr_dma("Channel [%d] queued list is NULL,and transfer done!\n",i);
858                 }
859
860                 spin_unlock_irqrestore(&mchan->chn_lock, flags);
861         }
862         
863         return 0;
864 }
865
866 static void sprd_dma_tasklet(unsigned long data)
867 {
868         struct sprd_dma_dev *sdev = (void *)data;
869         sprd_dma_process_completed(sdev);
870 }
871
872 static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
873 {
874         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
875         struct sprd_dma_dev *mdev = to_sprd_dma_dev(chan);
876         dma_addr_t chn_reg_paddr;
877         struct sprd_dma_desc *mdesc;
878         struct sprd_dma_chn_reg *chn_reg;
879         unsigned long i,flags;
880         LIST_HEAD(descs);
881
882         chn_reg = dmam_alloc_coherent(mdev->dma_dev.dev,
883                                 MPC_DMA_DESCRIPTORS * sizeof(struct sprd_dma_chn_reg),&chn_reg_paddr,GFP_KERNEL);
884         if (!chn_reg)
885                 return -ENOMEM;
886
887         /* init the dma desc */
888         for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
889                 mdesc = (struct sprd_dma_desc *)kmem_cache_zalloc(mdev->dma_desc_node_cachep, GFP_ATOMIC);
890                 if (!mdesc) {
891                         pr_err("Memory allocation error: Allocated only %ld descriptors\n", i);
892                         break;
893                 }
894
895                 dma_async_tx_descriptor_init(&mdesc->desc, chan);
896                 mdesc->desc.flags = DMA_CTRL_ACK;
897                 mdesc->desc.tx_submit = sprd_desc_submit;
898                 mdesc->dma_chn_reg = &chn_reg[i];
899                 mdesc->dma_chn_reg_paddr = chn_reg_paddr + (i * sizeof(struct sprd_dma_chn_reg));
900                 mdesc->done = 0;
901                 mdesc->cycle = 0;
902                 INIT_LIST_HEAD(&mdesc->node);
903                 INIT_LIST_HEAD(&mdesc->next_node);
904                 list_add_tail(&mdesc->node, &descs);
905         }
906
907         if (i == 0) {
908                 dma_free_coherent(mdev->dma_dev.dev,
909                                 MPC_DMA_DESCRIPTORS * sizeof(struct sprd_dma_chn_reg),chn_reg, chn_reg_paddr);
910                 return -ENOMEM;
911         }
912
913         spin_lock_irqsave(&mchan->chn_lock, flags);
914         mchan->dma_desc = chn_reg;
915         mchan->dma_desc_paddr = chn_reg_paddr;
916         list_splice_tail_init(&descs, &mchan->free);
917         spin_unlock_irqrestore(&mchan->chn_lock, flags);
918         
919         /* enable dma */
920         mchan->dev_id = 0;
921         mchan->chan_status = USED;
922         if(mchan->chan_num < mdev->ap_chn_cnt)
923                 __ap_dma_enable();
924         else{
925                 __aon_dma_enable();
926                 __aon_dma_int_enable();
927         }
928
929         pr_dma("Alloc chan resources is OK, and chn_reg=0x%lx, chn_reg_paddr=0x%lx!\n",
930                         (unsigned long)chn_reg,(unsigned long)chn_reg_paddr);
931
932         return 0;
933 }
934
935 static void sprd_dma_free_chan_resources(struct dma_chan *chan)
936 {
937         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
938         struct sprd_dma_dev *mdev = to_sprd_dma_dev(chan);
939         dma_addr_t chn_reg_paddr;
940         struct sprd_dma_desc *mdesc,*tmp;
941         struct sprd_dma_chn_reg *chn_reg;
942         struct sprd_dma_chn *tchan;
943         unsigned long flags;
944         int i;
945         LIST_HEAD(descs);
946         u32 dma_chn_cnt = mdev->ap_chn_cnt + mdev->aon_chn_cnt;
947         u32 ap_chn_cnt = mdev->ap_chn_cnt;
948         
949         /* delete list */
950         spin_lock_irqsave(&mchan->chn_lock, flags);
951         list_splice_tail_init(&mchan->prepared, &mchan->free);
952         list_splice_tail_init(&mchan->queued, &mchan->free);
953         list_splice_tail_init(&mchan->active, &mchan->free);
954         list_splice_tail_init(&mchan->completed, &mchan->free);
955
956         list_splice_tail_init(&mchan->free, &descs);
957         chn_reg = mchan->dma_desc;
958         chn_reg_paddr = mchan->dma_desc_paddr;
959         spin_unlock_irqrestore(&mchan->chn_lock, flags);
960
961         dmam_free_coherent(mdev->dma_dev.dev,
962                                 MPC_DMA_DESCRIPTORS * sizeof(struct sprd_dma_chn_reg), chn_reg, chn_reg_paddr);
963
964         list_for_each_entry_safe(mdesc, tmp, &descs, node)
965                 kmem_cache_free(mdev->dma_desc_node_cachep, mdesc);
966
967         mchan->chan_status = NO_USED;
968
969         /* stop and disable dma */
970         sprd_dma_stop(mchan);
971         for (i = 0; i < ap_chn_cnt; i++) {
972                 tchan = &mdev->channels[i];
973                 if(tchan->chan_status == USED)
974                         break;
975         }
976         if(i == ap_chn_cnt){
977                 __ap_dma_softreset();
978                 __ap_dma_disable();
979         }
980
981         for (i = ap_chn_cnt; i < dma_chn_cnt; i++) {
982                 tchan = &mdev->channels[i];
983                 if(tchan->chan_status == USED)
984                         break;
985         }
986         if(i == dma_chn_cnt){
987                 __aon_dma_int_disable();
988                 __aon_dma_reset();
989                 __aon_dma_disable();
990         }
991                 
992         pr_dma("Release chan resources is OK!\n");
993 }
994
995 static int sprd_dma_check_int(struct dma_chan *chan)
996 {
997         struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
998
999         __dma_check_int(sdev, 0);
1000         sprd_dma_process_completed(sdev);
1001
1002         return 0;
1003 }
1004
1005 static enum dma_status sprd_dma_tx_status(struct dma_chan *chan, 
1006                                         dma_cookie_t cookie,struct dma_tx_state *txstate)
1007 {
1008         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1009         enum dma_status ret;
1010         int residue = txstate->residue;
1011
1012         /* for audio no int handler */
1013         if(mchan->irq_handle_enable == 0){
1014                 sprd_dma_check_int(chan);
1015                 pr_dma("Check dma interrupt by hand!\n");
1016         }
1017
1018         //spin_lock(&mchan->chn_lock);
1019         ret = dma_cookie_status(chan, cookie, txstate);
1020         //spin_unlock(&mchan->chn_lock);
1021
1022         /* get dma sour&dest addr */
1023         if(residue == SPRD_SRC_ADDR)
1024         txstate->residue =  __dma_get_src_addr(chan);
1025     else if(residue == SPRD_DST_ADDR)
1026         txstate->residue =  __dma_get_dst_addr(chan);
1027     else
1028         txstate->residue = 0;
1029
1030         pr_dma("%s cookie=%d, residue=0x%x!\n",__func__, cookie,txstate->residue);
1031         return ret;
1032 }
1033
1034 static void sprd_dma_issue_pending(struct dma_chan *chan)
1035 {
1036         /*
1037          * We are posting descriptors to the hardware as soon as
1038          * they are ready, so this function does nothing.
1039          */
1040 }
1041
1042 struct dma_async_tx_descriptor *sprd_dma_prep_dma_memcpy(
1043                 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1044                 size_t len, unsigned long flags)
1045 {
1046         struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
1047         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1048         struct sprd_dma_desc *mdesc = NULL;
1049         unsigned long irq_flags;
1050         u32 datawidth = 0, src_step = 0,des_step = 0;
1051         struct sprd_dma_cfg *dma_cfg;
1052         int dma_cfg_cnt = dma_cfg_group.dma_cfg_cnt;
1053         int ret;
1054
1055         /* for configuration */
1056         if(flags & DMA_CFG_FLAG){
1057                 if(dma_cfg_cnt < 1 || dma_cfg_cnt > DMA_CFG_COUNT){
1058                         dma_cfg_group.dma_cfg_cnt = 0;
1059                         up(&dma_cfg_group.cfg_sema);
1060                         pr_err("DMA wrong configuration number!\n");
1061                         return NULL;
1062                 } else {
1063                         dma_cfg = (struct sprd_dma_cfg *)kzalloc(sizeof(struct sprd_dma_cfg) * dma_cfg_cnt, GFP_KERNEL);
1064                         memcpy(dma_cfg,dma_cfg_group.dma_cfg,sizeof(struct sprd_dma_cfg)*dma_cfg_cnt);
1065
1066                         dma_cfg_group.dma_cfg_cnt = 0;
1067                         memset(dma_cfg_group.dma_cfg,0,sizeof(struct sprd_dma_cfg) * DMA_CFG_COUNT);
1068                         up(&dma_cfg_group.cfg_sema);
1069
1070                         goto Have_configured;
1071                 }
1072         } else
1073                 dma_cfg = (struct sprd_dma_cfg *)kzalloc(sizeof(struct sprd_dma_cfg), GFP_KERNEL);
1074
1075         if (len > BLK_LEN_MASK && mchan->chn_type != FULL_DMA) {
1076                 pr_err("Channel type isn't support!\n");
1077                 return NULL;
1078         }
1079
1080         /* set step automatically */
1081         if ((len & 0x3) == 0) {
1082                 datawidth = 2;
1083                 src_step = 4;
1084                 des_step = 4;
1085         } else {
1086                 if ((len & 0x1) == 0) {
1087                         datawidth = 1;
1088                         src_step = 2;
1089                         des_step = 2;
1090                 } else {
1091                         datawidth = 0;
1092                         src_step = 1;
1093                         des_step = 1;
1094                 }
1095         }
1096
1097         /* dma reg setup */
1098         memset(&dma_cfg[0],0,sizeof(struct sprd_dma_cfg));
1099         dma_cfg[0].src_addr = src;
1100         dma_cfg[0].des_addr = dest;
1101         dma_cfg[0].datawidth = datawidth;
1102         dma_cfg[0].src_step = src_step;
1103         dma_cfg[0].des_step = src_step;
1104         dma_cfg[0].fragmens_len = DMA_MEMCPY_MIN_SIZE;
1105         if (len <= BLK_LEN_MASK) {
1106                 dma_cfg[0].block_len = len;
1107                 dma_cfg[0].req_mode = BLOCK_REQ_MODE;
1108                 dma_cfg[0].irq_mode = BLK_DONE;
1109         } else {
1110                 dma_cfg[0].block_len = DMA_MEMCPY_MIN_SIZE;
1111                 dma_cfg[0].transcation_len = len;
1112                 dma_cfg[0].req_mode = TRANS_REQ_MODE;
1113                 dma_cfg[0].irq_mode = TRANS_DONE;
1114         }
1115         dma_cfg_cnt = 1;
1116
1117 Have_configured:        
1118         /* get a free dma desc */
1119         spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1120         if(!list_empty(&mchan->free)){
1121                 mdesc = list_first_entry(&mchan->free, struct sprd_dma_desc, node);
1122                 list_del(&mdesc->node);
1123         }
1124         spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1125
1126         if(!mdesc){
1127                 sprd_dma_process_completed(sdev);
1128                 kfree(dma_cfg);
1129                 return NULL;
1130         }
1131
1132         /* config into dma reg */
1133         if(dma_cfg_cnt == 1)
1134                 ret = __dma_config(chan,mdesc,&dma_cfg[0],NULL,CONFIG_DESC);
1135         else if(dma_cfg_cnt > 1)
1136                 ret = __dma_config_linklist(chan,mdesc,&dma_cfg[0],dma_cfg_cnt);
1137         else
1138                 pr_err("DMA configuration count isn't available!\n");
1139
1140         kfree(dma_cfg);
1141         if(ret < 0){
1142                 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1143                 list_add_tail(&mdesc->node, &mchan->free);
1144                 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1145                 pr_err("Configuration is error!\n");
1146                 return NULL;
1147         }
1148
1149         /* support hardware request */
1150         if(flags & DMA_HARDWARE_FLAG){
1151                 mchan->re_mode = HARDWARE_REQ;
1152         }else{
1153                 mchan->re_mode = SOFTWARE_REQ;
1154                 mchan->dev_id =  DMA_UID_SOFTWARE;
1155         }
1156
1157         /* add the prepare list */
1158         spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1159         list_add_tail(&mdesc->node, &mchan->prepared);
1160         spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1161
1162         return &mdesc->desc;
1163 }
1164
1165 struct dma_async_tx_descriptor *sprd_prep_dma_sg(struct dma_chan *chan,
1166                                                                 struct scatterlist *dst_sg, unsigned int dst_nents,
1167                                                                 struct scatterlist *src_sg, unsigned int src_nents,
1168                                                                 unsigned long flags)
1169 {
1170         struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
1171         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1172         struct sprd_dma_desc *mdesc = NULL;
1173         struct sprd_dma_desc *first_mdesc = NULL;
1174         struct scatterlist *sg_d;
1175         struct scatterlist *sg_s;
1176         unsigned int scatterlist_entry,src_dma_len,dst_dma_len;
1177         unsigned long len;
1178         int i, ret;
1179         u32 datawidth = 0, src_step = 0,des_step = 0;
1180         struct sprd_dma_cfg dma_cfg_t;
1181         dma_addr_t dst_dma_addr,src_dma_addr;
1182         unsigned long irq_flags;
1183
1184         /* security check */
1185         if(dst_nents != src_nents ){
1186                 pr_err("DMA scatterlist entry count is not equal!\n");
1187                 return NULL;
1188         }else
1189                 scatterlist_entry = src_nents;
1190
1191         if(scatterlist_entry > MPC_DMA_DESCRIPTORS){
1192                 pr_err("DMA scatterlist is overrun!\n");
1193                 return NULL;
1194         }
1195
1196         if(flags & DMA_HARDWARE_FLAG){
1197                 pr_err("DMA scatterlist do not support hardware request!\n");
1198                 return NULL;
1199         }
1200
1201         /* for scatter list */
1202         for (i = 0, sg_d = dst_sg, sg_s = src_sg; i < scatterlist_entry; 
1203                                         i++, sg_d = sg_next(sg_d), sg_s = sg_next(sg_s)){
1204                 dst_dma_addr = sg_dma_address(sg_d);
1205                 dst_dma_len = sg_dma_len(sg_d);
1206                 src_dma_addr = sg_dma_address(sg_s);
1207                 src_dma_len = sg_dma_len(sg_s);
1208
1209                 pr_dma("DMA scatterlist dst_dma_addr=0x%x, src_dma_addr=0x%x,dst_len=%d,src_len=%d!\n",
1210                                 (unsigned int)dst_dma_addr,(unsigned int)src_dma_addr,dst_dma_len,src_dma_len);
1211
1212                 if(dst_dma_len != src_dma_len)
1213                         continue;
1214                 else
1215                         len = src_dma_len;
1216
1217                 if ((len & 0x3) == 0) {
1218                         datawidth = 2;
1219                         src_step = 4;
1220                         des_step = 4;
1221                 } else {
1222                         if ((len & 0x1) == 0) {
1223                                 datawidth = 1;
1224                                 src_step = 2;
1225                                 des_step = 2;
1226                         } else {
1227                                 datawidth = 0;
1228                                 src_step = 1;
1229                                 des_step = 1;
1230                         }
1231                 }
1232
1233                 memset(&dma_cfg_t,0,sizeof(struct sprd_dma_cfg));
1234                 dma_cfg_t.src_addr = src_dma_addr;
1235                 dma_cfg_t.des_addr = dst_dma_addr;
1236                 dma_cfg_t.datawidth = datawidth;
1237                 dma_cfg_t.src_step = src_step;
1238                 dma_cfg_t.des_step = src_step;
1239                 dma_cfg_t.fragmens_len = DMA_MEMCPY_MIN_SIZE;
1240                 if (len <= BLK_LEN_MASK) {
1241                         dma_cfg_t.block_len = len;
1242                         dma_cfg_t.req_mode = BLOCK_REQ_MODE;
1243                         dma_cfg_t.irq_mode = BLK_DONE;
1244                 } else {
1245                         dma_cfg_t.block_len = DMA_MEMCPY_MIN_SIZE;
1246                         dma_cfg_t.transcation_len = len;
1247                         dma_cfg_t.req_mode = TRANS_REQ_MODE;
1248                         dma_cfg_t.irq_mode = TRANS_DONE;
1249                 }
1250
1251                 spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1252                 if(!list_empty(&mchan->free)){
1253                         mdesc = list_first_entry(&mchan->free, struct sprd_dma_desc, node);
1254                         list_del(&mdesc->node);
1255                 }
1256                 spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1257                 if(!mdesc){
1258                         sprd_dma_process_completed(sdev);
1259                         pr_err("Warning: There are not enough mdesc for scatterlist!\n");
1260                 }
1261
1262                 ret = __dma_config(chan,mdesc,&dma_cfg_t,NULL,CONFIG_DESC);
1263                 if(ret < 0){
1264                         pr_err("Warning: Configuration is error!\n");
1265                         spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1266                         list_add_tail(&mdesc->node, &mchan->free);
1267                         spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1268                         continue;
1269                 }
1270                 
1271                 if(!first_mdesc){
1272                         first_mdesc = mdesc;
1273                         spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1274                         list_add_tail(&mdesc->node, &mchan->prepared);
1275                         spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1276                 }else{
1277                         spin_lock_irqsave(&mchan->chn_lock, irq_flags);
1278                         list_add_tail(&mdesc->node, &first_mdesc->next_node);
1279                         spin_unlock_irqrestore(&mchan->chn_lock, irq_flags);
1280                 }       
1281         }
1282         
1283         mchan->re_mode = SOFTWARE_REQ;
1284
1285         if(first_mdesc)
1286                 return &first_mdesc->desc;
1287         else
1288                 return NULL;
1289 }
1290
1291 /* copy dma configuration */
1292 static void sprd_dma_copy(struct sprd_dma_chn *mchan, unsigned long arg)
1293 {
1294         unsigned int i = 0;
1295         do {
1296                 memcpy(&dma_cfg_group.dma_cfg[i],(struct sprd_dma_cfg *)arg,sizeof(struct sprd_dma_cfg));
1297                 arg += sizeof(struct sprd_dma_cfg);
1298
1299                 pr_dma("%s, i:%d block_len:0x%x,fragmens_len:0x%x,src_step:0x%x,des_addr:0x%x,"
1300                         "src_addr:0x%x,link_cfg_p:0x%x,link_cfg_v:0x%lx,is_end:%d,transcation_len:0x%x\n",
1301                         __func__,i,dma_cfg_group.dma_cfg[i].block_len,dma_cfg_group.dma_cfg[i].fragmens_len,
1302                         dma_cfg_group.dma_cfg[i].src_step,dma_cfg_group.dma_cfg[i].des_addr,
1303                         dma_cfg_group.dma_cfg[i].src_addr,dma_cfg_group.dma_cfg[i].link_cfg_p,
1304                         dma_cfg_group.dma_cfg[i].link_cfg_v,dma_cfg_group.dma_cfg[i].is_end,
1305                         dma_cfg_group.dma_cfg[i].transcation_len);
1306
1307         } while(dma_cfg_group.dma_cfg[i++].is_end == 0 && i < (DMA_CFG_COUNT - 1));
1308         dma_cfg_group.dma_cfg_cnt = i;
1309         pr_dma("Get dma configuration number is %d!\n",i);
1310 }
1311
1312 /* terminate dma channel */
1313 static int sprd_terminate_all(struct sprd_dma_chn *mchan)
1314 {
1315         struct dma_chan *chan = &mchan->chan;
1316
1317         sprd_dma_free_chan_resources(chan);
1318         return 0;
1319 }
1320
1321 /* dma control method */
1322 static int sprd_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
1323 {
1324         int ret = 0;
1325         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1326         
1327         switch(cmd){
1328         case DMA_SLAVE_CONFIG:
1329                 if(down_trylock(&dma_cfg_group.cfg_sema)){
1330                         pr_err("DMA resource is busy, try again...\n");
1331                         return -ENXIO;
1332                 }
1333                 sprd_dma_copy(mchan, arg);
1334                 break;
1335         case DMA_PAUSE:
1336                 ret = sprd_dma_stop(mchan);
1337                 break;
1338         case DMA_TERMINATE_ALL:
1339                 ret = sprd_terminate_all(mchan);
1340                 break;
1341         case DMA_RESUME:
1342                 ret = sprd_dma_execute(mchan);
1343                 break;
1344         default:
1345                 ret = -ENXIO;
1346                 break;
1347         }
1348
1349         return ret;
1350 }
1351
1352 /* for dma channel request filter*/
1353 bool sprd_dma_filter_fn(struct dma_chan *chan, void *filter_param)
1354 {
1355         struct sprd_dma_chn *mchan = NULL;
1356         unsigned int type = *(unsigned int *)filter_param;
1357         struct sprd_dma_dev *mdev = to_sprd_dma_dev(chan);
1358         u32 ap_chn_cnt = mdev->ap_chn_cnt;
1359
1360         mchan = to_sprd_dma_chan(chan);
1361         if(!mchan)
1362                 return false;
1363
1364         if(type == AP_STANDARD_DMA){
1365                 if(mchan->chn_type == STANDARD_DMA && mchan->chan_num < ap_chn_cnt)
1366                         return true;
1367         }else if(type == AP_FULL_DMA){
1368                 if(mchan->chn_type == FULL_DMA && mchan->chan_num < ap_chn_cnt)
1369                         return true;
1370         }else if(type == AON_STANDARD_DMA){
1371                 if(mchan->chn_type == STANDARD_DMA && mchan->chan_num > (ap_chn_cnt -1))
1372                         return true;
1373         }else if(type == AON_FULL_DMA){
1374                 if(mchan->chn_type == FULL_DMA && mchan->chan_num > (ap_chn_cnt -1))
1375                         return true;
1376         }else if((type & 0xf00) == NUM_REQUEST_DMA){
1377                 if(mchan->chan_num == (type & 0xff))
1378                         return true;
1379         }
1380
1381         return false;
1382 }
1383
1384 /* for dma channel request filter*/
1385 bool sprd_dma_filter_fn_t(struct dma_chan *chan, void *param)
1386 {
1387         struct sprd_dma_chn *mchan = to_sprd_dma_chan(chan);
1388         unsigned int req = *(unsigned int *)param;
1389
1390         return req == (mchan->chan_num + 1);
1391 }
1392
1393 /* for dma debug */
1394 int sprd_dma_check_register(struct dma_chan *c)
1395 {
1396         volatile struct sprd_dma_chn_reg *dma_reg = NULL;
1397         struct sprd_dma_chn *mchan = to_sprd_dma_chan(c);
1398
1399         dma_reg = (struct sprd_dma_chn_reg *)mchan->dma_chn_base;
1400         __dma_cfg_check_register((void __iomem *)dma_reg);
1401
1402         return 0;
1403 }
1404
1405 /* probe method */
1406 static int sprd_dma_probe(struct platform_device *pdev)
1407 {
1408         struct sprd_dma_dev *sdev = NULL;
1409         struct sprd_dma_chn *dma_chn = NULL;
1410         void __iomem *dma_ap_base = NULL;
1411         void __iomem *dma_aon_base = NULL;
1412         struct resource *res = NULL;
1413         u32 dma_irq = 0, aon_dma_irq = 0, aon_offset = 0;
1414         u32 dma_chn_cnt = 0, ap_chn_cnt = 0, aon_chn_cnt = 0;
1415         int ret, i;
1416
1417         if(!pdev->dev.of_node){
1418                 pr_warn("Error: Can't find the dma node!\n");
1419                 return -ENODEV;
1420         }
1421
1422         if (of_property_read_u32(pdev->dev.of_node, "#dma-channels", &dma_chn_cnt)){
1423                 pr_warn("Error: Can't get total dma channel number infor!\n");
1424                 return -EIO;
1425         }
1426
1427         if (of_property_read_u32(pdev->dev.of_node, "sprd,aon-offset", &aon_offset)){
1428                 pr_warn("Error: Can't get ap dma channel number infor!\n");
1429                 return -EIO;
1430         }
1431
1432         aon_chn_cnt = dma_chn_cnt - aon_offset;
1433         ap_chn_cnt = dma_chn_cnt - aon_chn_cnt;
1434
1435         pr_dma("DMA total chn: %d,ap chn :%d,aon chn :%d!\n",dma_chn_cnt,ap_chn_cnt,aon_chn_cnt);
1436
1437         dma_irq = platform_get_irq(pdev, 0);
1438         if (dma_irq <= 0) {
1439                 pr_warn("Error: Can't get the ap dma irq number!\n");
1440                 return -EIO;
1441         }
1442
1443         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1444         if (!res) {
1445                 pr_warn("Error: Can't get ap dma registers resource!\n");
1446                 return -EIO;
1447         }
1448
1449         dma_ap_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
1450         if(!dma_ap_base){
1451                 pr_warn("Error: Can't get the ap dma base addr!\n");
1452                 return -ENOMEM;
1453         }
1454
1455         if(aon_chn_cnt > 0){
1456                 aon_dma_irq = platform_get_irq(pdev, 1);
1457                 if (aon_dma_irq < 0) {
1458                         pr_warn("Error: Can't get the aon dma irq number!\n");
1459                         return -EIO;
1460                 }
1461
1462                 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1463                 if (!res) {
1464                         pr_warn("Error: Can't get aon dma registers resource!\n");
1465                         return -EIO;
1466                 }
1467
1468                 dma_aon_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
1469                 if(!dma_aon_base){
1470                         pr_warn("Error: Can't get the aon dma base addr!\n");
1471                         return -ENOMEM;
1472                 }
1473         }
1474
1475         pr_dma("DMA irq number is %d,aon irq num is %d!\n", dma_irq,aon_dma_irq);
1476
1477         /* dma device momery alloc */
1478         sdev = devm_kzalloc(&pdev->dev,(sizeof(*sdev) + 
1479                                                 (sizeof(struct sprd_dma_chn) * dma_chn_cnt)),GFP_KERNEL);
1480         if(!sdev){
1481                 pr_err("Error: DMA alloc dma dev failed!\n");
1482                 return -ENOMEM;
1483         }
1484
1485         /* init dma device */
1486         dma_cap_set(DMA_MEMCPY|DMA_SG, sdev->dma_dev.cap_mask);
1487         sdev->dma_dev.chancnt = dma_chn_cnt;
1488         INIT_LIST_HEAD(&sdev->dma_dev.channels);
1489         INIT_LIST_HEAD(&sdev->dma_dev.global_node);
1490         spin_lock_init(&sdev->dma_lock);
1491         sdev->dma_dev.dev = &pdev->dev;
1492
1493         sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
1494         sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
1495         sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
1496         sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
1497         sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
1498         sdev->dma_dev.device_prep_dma_sg = sprd_prep_dma_sg;
1499         sdev->dma_dev.device_control = sprd_dma_control;
1500
1501         /* init dma chn */
1502         for (i = 0; i < dma_chn_cnt; i++) {
1503                 dma_chn = &sdev->channels[i];
1504                 dma_chn->chan.device = &sdev->dma_dev;
1505                 dma_cookie_init(&dma_chn->chan);
1506                 list_add_tail(&dma_chn->chan.device_node, &sdev->dma_dev.channels);
1507
1508                 dma_chn->chan_num = i;
1509                 if(i < STANDARD_DMA_NUM || (i > (ap_chn_cnt - 1) 
1510                         && i < (ap_chn_cnt + STANDARD_DMA_NUM))){
1511                         dma_chn->chn_type = STANDARD_DMA;
1512                 } else
1513                         dma_chn->chn_type = FULL_DMA;
1514
1515                 dma_chn->chan_status = NO_USED;
1516                 dma_chn->irq_handle_enable = 0;
1517                 if( i < ap_chn_cnt)
1518                         dma_chn->dma_chn_base = (void __iomem *)((unsigned long)dma_ap_base +
1519                                                                         0x1000 + DMA_CHN_OFFSET * (i));
1520                 else {
1521                         if(dma_aon_base)
1522                                 dma_chn->dma_chn_base = (void __iomem *)((unsigned long)dma_aon_base +
1523                                                                         0x1000 + DMA_CHN_OFFSET * (i - ap_chn_cnt));
1524                         else
1525                                 dma_chn->dma_chn_base = NULL;
1526                 }
1527
1528                 pr_dma("dma_chn [%d] dma_chn_base = 0x%lx!\n",i,(unsigned long)dma_chn->dma_chn_base);
1529
1530                 spin_lock_init(&dma_chn->chn_lock);
1531                 INIT_LIST_HEAD(&dma_chn->free);
1532                 INIT_LIST_HEAD(&dma_chn->prepared);
1533                 INIT_LIST_HEAD(&dma_chn->queued);
1534                 INIT_LIST_HEAD(&dma_chn->active);
1535                 INIT_LIST_HEAD(&dma_chn->completed);
1536         }
1537
1538         sdev->dma_glb_base = dma_ap_base;
1539         sdev->irq = dma_irq;
1540         sdev->aon_dma_glb_base = dma_aon_base;
1541         sdev->aon_irq = aon_dma_irq;
1542         sdev->ap_chn_cnt = ap_chn_cnt;
1543         sdev->aon_chn_cnt = aon_chn_cnt;
1544
1545         pr_dma("dma_glb_base = 0x%lx, aon_dma_glb_base = 0x%lx!\n",
1546                         (unsigned long)sdev->dma_glb_base,(unsigned long)sdev->aon_dma_glb_base);
1547
1548         sdev->dma_desc_node_cachep = 
1549                         kmem_cache_create("dma_desc_node",sizeof(struct sprd_dma_desc), 0,
1550                         SLAB_HWCACHE_ALIGN,NULL);
1551         if(!sdev->dma_desc_node_cachep){
1552                 pr_err("Error: DMA alloc cache failed!\n");
1553                 return -ENOMEM;
1554         }
1555
1556         /* irq request */
1557         ret = devm_request_irq(&pdev->dev, dma_irq, __dma_irq_handle, 0, "sprd_dma",(void*)sdev);
1558         if (ret < 0) {
1559                 pr_err("Error: Request dma irq failed %d\n", ret);
1560                 goto irq_fail;
1561         }
1562
1563         if(aon_chn_cnt > 0){
1564                 ret = devm_request_irq(&pdev->dev, aon_dma_irq, __dma_irq_handle, 0, "sprd_aon_dma",(void*)sdev);
1565                 if (ret < 0) {
1566                         pr_err("Error: Request aon dma irq failed %d\n", ret);
1567                         goto aon_irq_fail;
1568                 }
1569         }
1570
1571         /* initial the tasklet */
1572         tasklet_init(&sdev->tasklet, sprd_dma_tasklet, (unsigned long)sdev);
1573         /* save the sdev as private data */
1574         platform_set_drvdata(pdev,sdev);
1575
1576         /* dma device register */
1577         ret = dma_async_device_register(&sdev->dma_dev);
1578         if (ret < 0) {
1579                 pr_err("SPRD-DMA: failed to register slave DMA engine device: %d\n",ret);
1580                 goto register_fail;
1581         }
1582         
1583
1584         /* Device-tree DMA controller registration */
1585         sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
1586         ret = of_dma_controller_register(pdev->dev.of_node,
1587                                 of_dma_simple_xlate, &sprd_dma_info);
1588         if (ret) {
1589                 pr_warn("OMAP-DMA: failed to register of DMA controller\n");
1590                 goto of_register_fail;
1591         }
1592
1593         sema_init(&dma_cfg_group.cfg_sema,1);
1594         dma_cfg_group.dma_cfg_cnt = 0;
1595         memset(dma_cfg_group.dma_cfg,0,sizeof(struct sprd_dma_cfg) * DMA_CFG_COUNT);
1596         pr_notice("SPRD DMA engine driver probe OK!\n");
1597         return 0;
1598
1599 of_register_fail:
1600         dma_async_device_unregister(&sdev->dma_dev);
1601 register_fail:
1602         if(aon_chn_cnt > 0){
1603                 devm_free_irq(&pdev->dev, aon_dma_irq, (void*)sdev);
1604                 irq_dispose_mapping(sdev->aon_irq);
1605         }
1606 aon_irq_fail:
1607         devm_free_irq(&pdev->dev, dma_irq, (void*)sdev);
1608         irq_dispose_mapping(sdev->irq);
1609 irq_fail:
1610         kmem_cache_destroy(sdev->dma_desc_node_cachep);
1611         return ret;
1612 }
1613
1614 /* remove method */
1615 static int sprd_dma_remove(struct platform_device *pdev)
1616 {
1617         struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
1618
1619         dma_async_device_unregister(&sdev->dma_dev);
1620         devm_free_irq(&pdev->dev,sdev->irq,(void*)sdev);
1621         irq_dispose_mapping(sdev->irq);
1622         if(sdev->aon_chn_cnt > 0){
1623                 devm_free_irq(&pdev->dev,sdev->aon_irq,(void*)sdev);
1624                 irq_dispose_mapping(sdev->aon_irq);
1625         }
1626         kmem_cache_destroy(sdev->dma_desc_node_cachep);
1627         pr_notice("SPRD DMA engine driver remove OK!\n");
1628         
1629         return 0;
1630 }
1631
1632 static const struct of_device_id sprd_dma_match[] = {
1633         { .compatible = "sprd,sharkl64-dma",
1634         },
1635         {},
1636 };
1637
1638 static struct platform_driver sprd_dma_driver = {
1639         .probe = sprd_dma_probe,
1640         .remove = sprd_dma_remove,
1641         .driver = {
1642                 .name = "sprd_dma",
1643                 .owner = THIS_MODULE,
1644                 .of_match_table = of_match_ptr(sprd_dma_match),
1645         },
1646 };
1647
1648 int __init sprd_dma_init(void)
1649 {
1650         return platform_driver_register(&sprd_dma_driver);
1651 }
1652
1653 void __exit sprd_dma_exit(void)
1654 {
1655         platform_driver_unregister(&sprd_dma_driver);
1656 }
1657
1658 subsys_initcall(sprd_dma_init);
1659 module_exit(sprd_dma_exit);
1660
1661 MODULE_LICENSE("GPL");
1662 MODULE_AUTHOR("Baolin.wang <Baolin.wang@spreadtrum.com>");