upload tizen1.0 source
[kernel/linux-2.6.36.git] / arch / arm / plat-samsung / s3c-pl330.c
1 /* linux/arch/arm/plat-samsung/s3c-pl330.c
2  *
3  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4  *      Jaswinder Singh <jassi.brar@samsung.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/slab.h>
17 #include <linux/platform_device.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
20 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
21 #include <linux/pm_runtime.h>
22 #endif
23
24 #include <asm/hardware/pl330.h>
25
26 #include <plat/s3c-pl330-pdata.h>
27
28 /**
29  * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
30  * @busy_chan: Number of channels currently busy.
31  * @peri: List of IDs of peripherals this DMAC can work with.
32  * @node: To attach to the global list of DMACs.
33  * @pi: PL330 configuration info for the DMAC.
34  * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
35  * @clk: Pointer of DMAC operation clock.
36  */
37 struct s3c_pl330_dmac {
38         unsigned                busy_chan;
39         enum dma_ch             *peri;
40         struct list_head        node;
41         struct pl330_info       *pi;
42         struct kmem_cache       *kmcache;
43         struct clk              *clk;
44 };
45
46 /**
47  * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
48  * @token: Xfer ID provided by the client.
49  * @node: To attach to the list of xfers on a channel.
50  * @px: Xfer for PL330 core.
51  * @chan: Owner channel of this xfer.
52  */
53 struct s3c_pl330_xfer {
54         void                    *token;
55         struct list_head        node;
56         struct pl330_xfer       px;
57         struct s3c_pl330_chan   *chan;
58 };
59
60 /**
61  * struct s3c_pl330_chan - Logical channel to communicate with
62  *      a Physical peripheral.
63  * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
64  *      NULL if the channel is available to be acquired.
65  * @id: ID of the peripheral that this channel can communicate with.
66  * @options: Options specified by the client.
67  * @sdaddr: Address provided via s3c2410_dma_devconfig.
68  * @node: To attach to the global list of channels.
69  * @lrq: Pointer to the last submitted pl330_req to PL330 core.
70  * @xfer_list: To manage list of xfers enqueued.
71  * @req: Two requests to communicate with the PL330 engine.
72  * @callback_fn: Callback function to the client.
73  * @rqcfg: Channel configuration for the xfers.
74  * @xfer_head: Pointer to the xfer to be next executed.
75  * @dmac: Pointer to the DMAC that manages this channel, NULL if the
76  *      channel is available to be acquired.
77  * @client: Client of this channel. NULL if the
78  *      channel is available to be acquired.
79  */
80 struct s3c_pl330_chan {
81         void                            *pl330_chan_id;
82         enum dma_ch                     id;
83         unsigned int                    options;
84         unsigned long                   sdaddr;
85         struct list_head                node;
86         struct pl330_req                *lrq;
87         struct list_head                xfer_list;
88         struct pl330_req                req[2];
89         s3c2410_dma_cbfn_t              callback_fn;
90         struct pl330_reqcfg             rqcfg;
91         struct s3c_pl330_xfer           *xfer_head;
92         struct s3c_pl330_dmac           *dmac;
93         struct s3c2410_dma_client       *client;
94 };
95
96 /* All DMACs in the platform */
97 static LIST_HEAD(dmac_list);
98
99 /* All channels to peripherals in the platform */
100 static LIST_HEAD(chan_list);
101
102 /*
103  * Since we add resources(DMACs and Channels) to the global pool,
104  * we need to guard access to the resources using a global lock
105  */
106 static DEFINE_SPINLOCK(res_lock);
107
108 /* Returns the channel with ID 'id' in the chan_list */
109 static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
110 {
111         struct s3c_pl330_chan *ch;
112
113         list_for_each_entry(ch, &chan_list, node)
114                 if (ch->id == id)
115                         return ch;
116
117         return NULL;
118 }
119
120 /* Allocate a new channel with ID 'id' and add to chan_list */
121 static void chan_add(const enum dma_ch id)
122 {
123         struct s3c_pl330_chan *ch = id_to_chan(id);
124
125         /* Return if the channel already exists */
126         if (ch)
127                 return;
128
129         ch = kmalloc(sizeof(*ch), GFP_KERNEL);
130         /* Return silently to work with other channels */
131         if (!ch)
132                 return;
133
134         ch->id = id;
135         ch->dmac = NULL;
136
137         list_add_tail(&ch->node, &chan_list);
138 }
139
140 /* If the channel is not yet acquired by any client */
141 static bool chan_free(struct s3c_pl330_chan *ch)
142 {
143         if (!ch)
144                 return false;
145
146         /* Channel points to some DMAC only when it's acquired */
147         return ch->dmac ? false : true;
148 }
149
150 /*
151  * Returns 0 is peripheral i/f is invalid or not present on the dmac.
152  * Index + 1, otherwise.
153  */
154 static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
155 {
156         enum dma_ch *id = dmac->peri;
157         int i;
158
159         /* Discount invalid markers */
160         if (ch_id == DMACH_MAX)
161                 return 0;
162
163         for (i = 0; i < PL330_MAX_PERI; i++)
164                 if (id[i] == ch_id)
165                         return i + 1;
166
167         return 0;
168 }
169
170 /* If all channel threads of the DMAC are busy */
171 static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
172 {
173         struct pl330_info *pi = dmac->pi;
174
175         return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
176 }
177
178 /*
179  * Returns the number of free channels that
180  * can be handled by this dmac only.
181  */
182 static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
183 {
184         enum dma_ch *id = dmac->peri;
185         struct s3c_pl330_dmac *d;
186         struct s3c_pl330_chan *ch;
187         unsigned found, count = 0;
188         enum dma_ch p;
189         int i;
190
191         for (i = 0; i < PL330_MAX_PERI; i++) {
192                 p = id[i];
193                 ch = id_to_chan(p);
194
195                 if (p == DMACH_MAX || !chan_free(ch))
196                         continue;
197
198                 found = 0;
199                 list_for_each_entry(d, &dmac_list, node) {
200                         if (d != dmac && iface_of_dmac(d, ch->id)) {
201                                 found = 1;
202                                 break;
203                         }
204                 }
205                 if (!found)
206                         count++;
207         }
208
209         return count;
210 }
211
212 /*
213  * Measure of suitability of 'dmac' handling 'ch'
214  *
215  * 0 indicates 'dmac' can not handle 'ch' either
216  * because it is not supported by the hardware or
217  * because all dmac channels are currently busy.
218  *
219  * >0 vlaue indicates 'dmac' has the capability.
220  * The bigger the value the more suitable the dmac.
221  */
222 #define MAX_SUIT        UINT_MAX
223 #define MIN_SUIT        0
224
225 static unsigned suitablility(struct s3c_pl330_dmac *dmac,
226                 struct s3c_pl330_chan *ch)
227 {
228         struct pl330_info *pi = dmac->pi;
229         enum dma_ch *id = dmac->peri;
230         struct s3c_pl330_dmac *d;
231         unsigned s;
232         int i;
233
234         s = MIN_SUIT;
235         /* If all the DMAC channel threads are busy */
236         if (dmac_busy(dmac))
237                 return s;
238
239         for (i = 0; i < PL330_MAX_PERI; i++)
240                 if (id[i] == ch->id)
241                         break;
242
243         /* If the 'dmac' can't talk to 'ch' */
244         if (i == PL330_MAX_PERI)
245                 return s;
246
247         s = MAX_SUIT;
248         list_for_each_entry(d, &dmac_list, node) {
249                 /*
250                  * If some other dmac can talk to this
251                  * peri and has some channel free.
252                  */
253                 if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
254                         s = 0;
255                         break;
256                 }
257         }
258         if (s)
259                 return s;
260
261         s = 100;
262
263         /* Good if free chans are more, bad otherwise */
264         s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
265
266         return s;
267 }
268
269 /* More than one DMAC may have capability to transfer data with the
270  * peripheral. This function assigns most suitable DMAC to manage the
271  * channel and hence communicate with the peripheral.
272  */
273 static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
274 {
275         struct s3c_pl330_dmac *d, *dmac = NULL;
276         unsigned sn, sl = MIN_SUIT;
277
278         list_for_each_entry(d, &dmac_list, node) {
279                 sn = suitablility(d, ch);
280
281                 if (sn == MAX_SUIT)
282                         return d;
283
284                 if (sn > sl)
285                         dmac = d;
286         }
287
288         return dmac;
289 }
290
291 /* Acquire the channel for peripheral 'id' */
292 static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
293 {
294         struct s3c_pl330_chan *ch = id_to_chan(id);
295         struct s3c_pl330_dmac *dmac;
296
297         /* If the channel doesn't exist or is already acquired */
298         if (!ch || !chan_free(ch)) {
299                 ch = NULL;
300                 goto acq_exit;
301         }
302
303         dmac = map_chan_to_dmac(ch);
304         /* If couldn't map */
305         if (!dmac) {
306                 ch = NULL;
307                 goto acq_exit;
308         }
309
310         dmac->busy_chan++;
311         ch->dmac = dmac;
312
313 acq_exit:
314         return ch;
315 }
316
317 /* Delete xfer from the queue */
318 static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
319 {
320         struct s3c_pl330_xfer *t;
321         struct s3c_pl330_chan *ch;
322         int found;
323
324         if (!xfer)
325                 return;
326
327         ch = xfer->chan;
328
329         /* Make sure xfer is in the queue */
330         found = 0;
331         list_for_each_entry(t, &ch->xfer_list, node)
332                 if (t == xfer) {
333                         found = 1;
334                         break;
335                 }
336
337         if (!found)
338                 return;
339
340         /* If xfer is last entry in the queue */
341         if (xfer->node.next == &ch->xfer_list)
342                 t = list_entry(ch->xfer_list.next,
343                                 struct s3c_pl330_xfer, node);
344         else
345                 t = list_entry(xfer->node.next,
346                                 struct s3c_pl330_xfer, node);
347
348         /* If there was only one node left */
349         if (t == xfer)
350                 ch->xfer_head = NULL;
351         else if (ch->xfer_head == xfer)
352                 ch->xfer_head = t;
353
354         list_del(&xfer->node);
355 }
356
357 /* Provides pointer to the next xfer in the queue.
358  * If CIRCULAR option is set, the list is left intact,
359  * otherwise the xfer is removed from the list.
360  * Forced delete 'pluck' can be set to override the CIRCULAR option.
361  */
362 static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
363                 int pluck)
364 {
365         struct s3c_pl330_xfer *xfer = ch->xfer_head;
366
367         if (!xfer)
368                 return NULL;
369
370         /* If xfer is last entry in the queue */
371         if (xfer->node.next == &ch->xfer_list)
372                 ch->xfer_head = list_entry(ch->xfer_list.next,
373                                         struct s3c_pl330_xfer, node);
374         else
375                 ch->xfer_head = list_entry(xfer->node.next,
376                                         struct s3c_pl330_xfer, node);
377
378         if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
379                 del_from_queue(xfer);
380
381         return xfer;
382 }
383
384 static inline void add_to_queue(struct s3c_pl330_chan *ch,
385                 struct s3c_pl330_xfer *xfer, int front)
386 {
387         struct pl330_xfer *xt;
388
389         /* If queue empty */
390         if (ch->xfer_head == NULL)
391                 ch->xfer_head = xfer;
392
393         xt = &ch->xfer_head->px;
394         /* If the head already submitted (CIRCULAR head) */
395         if (ch->options & S3C2410_DMAF_CIRCULAR &&
396                 (xt == ch->req[0].x || xt == ch->req[1].x))
397                 ch->xfer_head = xfer;
398
399         /* If this is a resubmission, it should go at the head */
400         if (front) {
401                 ch->xfer_head = xfer;
402                 list_add(&xfer->node, &ch->xfer_list);
403         } else {
404                 list_add_tail(&xfer->node, &ch->xfer_list);
405         }
406 }
407
408 static inline void _finish_off(struct s3c_pl330_xfer *xfer,
409                 enum s3c2410_dma_buffresult res, int ffree)
410 {
411         struct s3c_pl330_chan *ch;
412
413         if (!xfer)
414                 return;
415
416         ch = xfer->chan;
417
418         /* Do callback */
419         if (ch->callback_fn)
420                 ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
421
422         /* Force Free or if buffer is not needed anymore */
423         if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
424                 kmem_cache_free(ch->dmac->kmcache, xfer);
425 }
426
427 static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
428                 struct pl330_req *r)
429 {
430         struct s3c_pl330_xfer *xfer;
431         int ret = 0;
432
433         /* If already submitted */
434         if (r->x)
435                 return 0;
436
437         xfer = get_from_queue(ch, 0);
438         if (xfer) {
439                 r->x = &xfer->px;
440
441                 /* Use max bandwidth for M<->M xfers */
442                 if (r->rqtype == MEMTOMEM) {
443                         struct pl330_info *pi = xfer->chan->dmac->pi;
444                         int burst = 1 << ch->rqcfg.brst_size;
445                         u32 bytes = r->x->bytes;
446                         int bl;
447
448                         bl = pi->pcfg.data_bus_width / 8;
449                         bl *= pi->pcfg.data_buf_dep;
450                         bl /= burst;
451
452                         /* src/dst_burst_len can't be more than 16 */
453                         if (bl > 16)
454                                 bl = 16;
455
456                         while (bl > 1) {
457                                 if (!(bytes % (bl * burst)))
458                                         break;
459                                 bl--;
460                         }
461
462                         ch->rqcfg.brst_len = bl;
463                 } else {
464                         ch->rqcfg.brst_len = 1;
465                 }
466
467                 ret = pl330_submit_req(ch->pl330_chan_id, r);
468
469                 /* If submission was successful */
470                 if (!ret) {
471                         ch->lrq = r; /* latest submitted req */
472                         return 0;
473                 }
474
475                 r->x = NULL;
476
477                 /* If both of the PL330 ping-pong buffers filled */
478                 if (ret == -EAGAIN) {
479                         dev_err(ch->dmac->pi->dev, "%s:%d!\n",
480                                 __func__, __LINE__);
481                         /* Queue back again */
482                         add_to_queue(ch, xfer, 1);
483                         ret = 0;
484                 } else {
485                         dev_err(ch->dmac->pi->dev, "%s:%d!\n",
486                                 __func__, __LINE__);
487                         _finish_off(xfer, S3C2410_RES_ERR, 0);
488                 }
489         }
490
491         return ret;
492 }
493
494 static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
495         struct pl330_req *r, enum pl330_op_err err)
496 {
497         unsigned long flags;
498         struct s3c_pl330_xfer *xfer;
499         struct pl330_xfer *xl = r->x;
500         enum s3c2410_dma_buffresult res;
501
502         spin_lock_irqsave(&res_lock, flags);
503
504         if (!r->infiniteloop) {
505                 r->x = NULL;
506
507                 s3c_pl330_submit(ch, r);
508         }
509
510         spin_unlock_irqrestore(&res_lock, flags);
511
512         /* Map result to S3C DMA API */
513         if (err == PL330_ERR_NONE)
514                 res = S3C2410_RES_OK;
515         else if (err == PL330_ERR_ABORT)
516                 res = S3C2410_RES_ABORT;
517         else
518                 res = S3C2410_RES_ERR;
519
520         /* If last request had some xfer */
521         if (!r->infiniteloop) {
522                 if (xl) {
523                         xfer = container_of(xl, struct s3c_pl330_xfer, px);
524                         _finish_off(xfer, res, 0);
525                 } else {
526                         dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
527                                 __func__, __LINE__);
528                 }
529         } else {
530                 /* Do callback */
531
532                 xfer = container_of(xl, struct s3c_pl330_xfer, px);
533                 if (ch->callback_fn)
534                         ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
535         }
536 }
537
538 static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
539 {
540         struct pl330_req *r = token;
541         struct s3c_pl330_chan *ch = container_of(r,
542                                         struct s3c_pl330_chan, req[0]);
543         s3c_pl330_rq(ch, r, err);
544 }
545
546 static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
547 {
548         struct pl330_req *r = token;
549         struct s3c_pl330_chan *ch = container_of(r,
550                                         struct s3c_pl330_chan, req[1]);
551         s3c_pl330_rq(ch, r, err);
552 }
553
554 /* Release an acquired channel */
555 static void chan_release(struct s3c_pl330_chan *ch)
556 {
557         struct s3c_pl330_dmac *dmac;
558
559         if (chan_free(ch))
560                 return;
561
562         dmac = ch->dmac;
563         ch->dmac = NULL;
564         dmac->busy_chan--;
565 }
566
567 int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
568 {
569         struct s3c_pl330_xfer *xfer;
570         enum pl330_chan_op pl330op;
571         struct s3c_pl330_chan *ch;
572         unsigned long flags;
573         int idx, ret;
574
575         spin_lock_irqsave(&res_lock, flags);
576
577         ch = id_to_chan(id);
578
579         if (!ch || chan_free(ch)) {
580                 ret = -EINVAL;
581                 goto ctrl_exit;
582         }
583
584         switch (op) {
585         case S3C2410_DMAOP_START:
586                 /* Make sure both reqs are enqueued */
587                 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
588                 s3c_pl330_submit(ch, &ch->req[idx]);
589                 s3c_pl330_submit(ch, &ch->req[1 - idx]);
590                 pl330op = PL330_OP_START;
591                 break;
592
593         case S3C2410_DMAOP_STOP:
594                 pl330op = PL330_OP_ABORT;
595                 break;
596
597         case S3C2410_DMAOP_FLUSH:
598                 pl330op = PL330_OP_FLUSH;
599                 break;
600
601         case S3C2410_DMAOP_PAUSE:
602         case S3C2410_DMAOP_RESUME:
603         case S3C2410_DMAOP_TIMEOUT:
604         case S3C2410_DMAOP_STARTED:
605                 spin_unlock_irqrestore(&res_lock, flags);
606                 return 0;
607
608         default:
609                 spin_unlock_irqrestore(&res_lock, flags);
610                 return -EINVAL;
611         }
612
613         ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
614
615         if (pl330op == PL330_OP_START) {
616                 spin_unlock_irqrestore(&res_lock, flags);
617                 return ret;
618         }
619
620         idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
621
622         /* Abort the current xfer */
623         if (ch->req[idx].x) {
624                 xfer = container_of(ch->req[idx].x,
625                                 struct s3c_pl330_xfer, px);
626
627                 /* Drop xfer during FLUSH */
628                 if (pl330op == PL330_OP_FLUSH)
629                         del_from_queue(xfer);
630
631                 ch->req[idx].x = NULL;
632
633                 spin_unlock_irqrestore(&res_lock, flags);
634                 _finish_off(xfer, S3C2410_RES_ABORT,
635                                 pl330op == PL330_OP_FLUSH ? 1 : 0);
636                 spin_lock_irqsave(&res_lock, flags);
637         }
638
639         /* Flush the whole queue */
640         if (pl330op == PL330_OP_FLUSH) {
641
642                 if (ch->req[1 - idx].x) {
643                         xfer = container_of(ch->req[1 - idx].x,
644                                         struct s3c_pl330_xfer, px);
645
646                         del_from_queue(xfer);
647
648                         ch->req[1 - idx].x = NULL;
649
650                         spin_unlock_irqrestore(&res_lock, flags);
651                         _finish_off(xfer, S3C2410_RES_ABORT, 1);
652                         spin_lock_irqsave(&res_lock, flags);
653                 }
654
655                 /* Finish off the remaining in the queue */
656                 xfer = ch->xfer_head;
657                 while (xfer) {
658
659                         del_from_queue(xfer);
660
661                         spin_unlock_irqrestore(&res_lock, flags);
662                         _finish_off(xfer, S3C2410_RES_ABORT, 1);
663                         spin_lock_irqsave(&res_lock, flags);
664
665                         xfer = ch->xfer_head;
666                 }
667         }
668
669 ctrl_exit:
670         spin_unlock_irqrestore(&res_lock, flags);
671
672         return ret;
673 }
674 EXPORT_SYMBOL(s3c2410_dma_ctrl);
675
676 int s3c2410_dma_enqueue_ring(enum dma_ch id, void *token,
677                         dma_addr_t addr, int size, int numofblock)
678 {
679         struct s3c_pl330_chan *ch;
680         struct s3c_pl330_xfer *xfer;
681         unsigned long flags;
682         int idx, ret = 0;
683
684         spin_lock_irqsave(&res_lock, flags);
685         ch = id_to_chan(id);
686
687         /* Error if invalid or free channel */
688         if (!ch || chan_free(ch)) {
689                 ret = -EINVAL;
690                 goto enq_exit;
691         }
692
693         /* Error if size is unaligned */
694         if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
695                 ret = -EINVAL;
696                 goto enq_exit;
697         }
698
699         xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
700         if (!xfer) {
701                 ret = -ENOMEM;
702                 goto enq_exit;
703         }
704
705         xfer->token = token;
706         xfer->chan = ch;
707         xfer->px.bytes = size;
708         xfer->px.next = NULL; /* Single request */
709
710         /* For S3C DMA API, direction is always fixed for all xfers */
711         if (ch->req[0].rqtype == MEMTODEV) {
712                 xfer->px.src_addr = addr;
713                 xfer->px.dst_addr = ch->sdaddr;
714         } else {
715                 xfer->px.src_addr = ch->sdaddr;
716                 xfer->px.dst_addr = addr;
717         }
718
719         add_to_queue(ch, xfer, 0);
720
721         /* Try submitting on either request */
722         idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
723
724         if (!ch->req[idx].x) {
725                 ch->req[idx].infiniteloop = numofblock;
726                 s3c_pl330_submit(ch, &ch->req[idx]);
727         } else {
728                 ch->req[1 - idx].infiniteloop = numofblock;
729                 s3c_pl330_submit(ch, &ch->req[1 - idx]);
730         }
731         spin_unlock_irqrestore(&res_lock, flags);
732
733         if (ch->options & S3C2410_DMAF_AUTOSTART)
734                 s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
735
736         return 0;
737
738 enq_exit:
739         spin_unlock_irqrestore(&res_lock, flags);
740
741         return ret;
742 }
743 EXPORT_SYMBOL(s3c2410_dma_enqueue_ring);
744
745 int s3c2410_dma_request(enum dma_ch id,
746                         struct s3c2410_dma_client *client,
747                         void *dev)
748 {
749         struct s3c_pl330_dmac *dmac;
750         struct s3c_pl330_chan *ch;
751         unsigned long flags;
752         int ret = 0;
753
754         spin_lock_irqsave(&res_lock, flags);
755
756         ch = chan_acquire(id);
757         if (!ch) {
758                 ret = -EBUSY;
759                 goto req_exit;
760         }
761
762         dmac = ch->dmac;
763
764 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
765         /* enable the power domain */
766         spin_unlock_irqrestore(&res_lock, flags);
767         pm_runtime_get_sync(dmac->pi->dev);
768         spin_lock_irqsave(&res_lock, flags);
769 #endif
770         clk_enable(dmac->clk);
771
772         ch->pl330_chan_id = pl330_request_channel(dmac->pi);
773         if (!ch->pl330_chan_id) {
774                 chan_release(ch);
775                 clk_disable(dmac->clk);
776 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
777                 /* disable the power domain */
778                 spin_unlock_irqrestore(&res_lock, flags);
779                 pm_runtime_put(dmac->pi->dev);
780                 spin_lock_irqsave(&res_lock, flags);
781 #endif
782                 ret = -EBUSY;
783                 goto req_exit;
784         }
785
786         ch->client = client;
787         ch->options = 0; /* Clear any option */
788         ch->callback_fn = NULL; /* Clear any callback */
789         ch->lrq = NULL;
790
791         ch->rqcfg.brst_size = 2; /* Default word size */
792         ch->rqcfg.swap = SWAP_NO;
793         ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
794         ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
795         ch->rqcfg.privileged = 0;
796         ch->rqcfg.insnaccess = 0;
797
798         /* Set invalid direction */
799         ch->req[0].rqtype = DEVTODEV;
800         ch->req[1].rqtype = ch->req[0].rqtype;
801
802         ch->req[0].cfg = &ch->rqcfg;
803         ch->req[1].cfg = ch->req[0].cfg;
804
805         ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
806         ch->req[1].peri = ch->req[0].peri;
807
808         ch->req[0].token = &ch->req[0];
809         ch->req[0].xfer_cb = s3c_pl330_rq0;
810         ch->req[1].token = &ch->req[1];
811         ch->req[1].xfer_cb = s3c_pl330_rq1;
812
813         ch->req[0].x = NULL;
814         ch->req[1].x = NULL;
815
816         /* Reset xfer list */
817         INIT_LIST_HEAD(&ch->xfer_list);
818         ch->xfer_head = NULL;
819
820 req_exit:
821         spin_unlock_irqrestore(&res_lock, flags);
822
823         return ret;
824 }
825 EXPORT_SYMBOL(s3c2410_dma_request);
826
827 int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
828 {
829         struct s3c_pl330_chan *ch;
830         struct s3c_pl330_xfer *xfer;
831         unsigned long flags;
832         int ret = 0;
833         unsigned idx;
834
835         spin_lock_irqsave(&res_lock, flags);
836
837         ch = id_to_chan(id);
838
839         if (!ch || chan_free(ch))
840                 goto free_exit;
841
842         /* Refuse if someone else wanted to free the channel */
843         if (ch->client != client) {
844                 ret = -EBUSY;
845                 goto free_exit;
846         }
847
848         /* Stop any active xfer, Flushe the queue and do callbacks */
849         pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
850
851         /* Abort the submitted requests */
852         idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
853
854         if (ch->req[idx].x) {
855                 xfer = container_of(ch->req[idx].x,
856                                 struct s3c_pl330_xfer, px);
857
858                 ch->req[idx].x = NULL;
859                 del_from_queue(xfer);
860
861                 spin_unlock_irqrestore(&res_lock, flags);
862                 _finish_off(xfer, S3C2410_RES_ABORT, 1);
863                 spin_lock_irqsave(&res_lock, flags);
864         }
865
866         if (ch->req[1 - idx].x) {
867                 xfer = container_of(ch->req[1 - idx].x,
868                                 struct s3c_pl330_xfer, px);
869
870                 ch->req[1 - idx].x = NULL;
871                 del_from_queue(xfer);
872
873                 spin_unlock_irqrestore(&res_lock, flags);
874                 _finish_off(xfer, S3C2410_RES_ABORT, 1);
875                 spin_lock_irqsave(&res_lock, flags);
876         }
877
878         /* Pluck and Abort the queued requests in order */
879         do {
880                 xfer = get_from_queue(ch, 1);
881
882                 spin_unlock_irqrestore(&res_lock, flags);
883                 _finish_off(xfer, S3C2410_RES_ABORT, 1);
884                 spin_lock_irqsave(&res_lock, flags);
885         } while (xfer);
886
887         ch->client = NULL;
888
889         pl330_release_channel(ch->pl330_chan_id);
890
891         ch->pl330_chan_id = NULL;
892         clk_disable(ch->dmac->clk);
893
894 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
895         /* disable the power domain */
896         spin_unlock_irqrestore(&res_lock, flags);
897         pm_runtime_put(ch->dmac->pi->dev);
898         spin_lock_irqsave(&res_lock, flags);
899 #endif
900         chan_release(ch);
901
902 free_exit:
903         spin_unlock_irqrestore(&res_lock, flags);
904
905         return ret;
906 }
907 EXPORT_SYMBOL(s3c2410_dma_free);
908
909 int s3c2410_dma_config(enum dma_ch id, int xferunit)
910 {
911         struct s3c_pl330_chan *ch;
912         struct pl330_info *pi;
913         unsigned long flags;
914         int i, dbwidth, ret = 0;
915
916         spin_lock_irqsave(&res_lock, flags);
917
918         ch = id_to_chan(id);
919
920         if (!ch || chan_free(ch)) {
921                 ret = -EINVAL;
922                 goto cfg_exit;
923         }
924
925         pi = ch->dmac->pi;
926         dbwidth = pi->pcfg.data_bus_width / 8;
927
928         /* Max size of xfer can be pcfg.data_bus_width */
929         if (xferunit > dbwidth) {
930                 ret = -EINVAL;
931                 goto cfg_exit;
932         }
933
934         i = 0;
935         while (xferunit != (1 << i))
936                 i++;
937
938         /* If valid value */
939         if (xferunit == (1 << i))
940                 ch->rqcfg.brst_size = i;
941         else
942                 ret = -EINVAL;
943
944 cfg_exit:
945         spin_unlock_irqrestore(&res_lock, flags);
946
947         return ret;
948 }
949 EXPORT_SYMBOL(s3c2410_dma_config);
950
951 /* Options that are supported by this driver */
952 #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
953
954 int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
955 {
956         struct s3c_pl330_chan *ch;
957         unsigned long flags;
958         int ret = 0;
959
960         spin_lock_irqsave(&res_lock, flags);
961
962         ch = id_to_chan(id);
963
964         if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
965                 ret = -EINVAL;
966         else
967                 ch->options = options;
968
969         spin_unlock_irqrestore(&res_lock, flags);
970
971         return 0;
972 }
973 EXPORT_SYMBOL(s3c2410_dma_setflags);
974
975 int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
976 {
977         struct s3c_pl330_chan *ch;
978         unsigned long flags;
979         int ret = 0;
980
981         spin_lock_irqsave(&res_lock, flags);
982
983         ch = id_to_chan(id);
984
985         if (!ch || chan_free(ch))
986                 ret = -EINVAL;
987         else
988                 ch->callback_fn = rtn;
989
990         spin_unlock_irqrestore(&res_lock, flags);
991
992         return ret;
993 }
994 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
995
996 int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
997                           unsigned long address)
998 {
999         struct s3c_pl330_chan *ch;
1000         unsigned long flags;
1001         int ret = 0;
1002
1003         spin_lock_irqsave(&res_lock, flags);
1004
1005         ch = id_to_chan(id);
1006
1007         if (!ch || chan_free(ch)) {
1008                 ret = -EINVAL;
1009                 goto devcfg_exit;
1010         }
1011
1012         switch (source) {
1013         case S3C2410_DMASRC_HW: /* P->M */
1014                 ch->req[0].rqtype = DEVTOMEM;
1015                 ch->req[1].rqtype = DEVTOMEM;
1016                 ch->rqcfg.src_inc = 0;
1017                 ch->rqcfg.dst_inc = 1;
1018                 break;
1019         case S3C2410_DMASRC_MEM: /* M->P */
1020                 ch->req[0].rqtype = MEMTODEV;
1021                 ch->req[1].rqtype = MEMTODEV;
1022                 ch->rqcfg.src_inc = 1;
1023                 ch->rqcfg.dst_inc = 0;
1024                 break;
1025         case S3C_DMA_MEM2MEM:
1026                 ch->req[0].rqtype = MEMTOMEM;
1027                 ch->req[1].rqtype = MEMTOMEM;
1028                 ch->rqcfg.src_inc = 1;
1029                 ch->rqcfg.dst_inc = 1;
1030                 break;
1031         case S3C_DMA_MEM2MEM_SET:
1032                 ch->req[0].rqtype = MEMTOMEM;
1033                 ch->req[1].rqtype = MEMTOMEM;
1034                 ch->rqcfg.src_inc = 0;
1035                 ch->rqcfg.dst_inc = 1;
1036                 break;
1037         default:
1038                 ret = -EINVAL;
1039                 goto devcfg_exit;
1040         }
1041
1042         ch->sdaddr = address;
1043
1044 devcfg_exit:
1045         spin_unlock_irqrestore(&res_lock, flags);
1046
1047         return ret;
1048 }
1049 EXPORT_SYMBOL(s3c2410_dma_devconfig);
1050
1051 int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
1052 {
1053         struct s3c_pl330_chan *ch = id_to_chan(id);
1054         struct pl330_chanstatus status;
1055         int ret;
1056
1057         if (!ch || chan_free(ch))
1058                 return -EINVAL;
1059
1060         ret = pl330_chan_status(ch->pl330_chan_id, &status);
1061         if (ret < 0)
1062                 return ret;
1063
1064         *src = status.src_addr;
1065         *dst = status.dst_addr;
1066
1067         return 0;
1068 }
1069 EXPORT_SYMBOL(s3c2410_dma_getposition);
1070
1071 static irqreturn_t pl330_irq_handler(int irq, void *data)
1072 {
1073         if (pl330_update(data))
1074                 return IRQ_HANDLED;
1075         else
1076                 return IRQ_NONE;
1077 }
1078
1079 static int pl330_probe(struct platform_device *pdev)
1080 {
1081         struct s3c_pl330_dmac *s3c_pl330_dmac;
1082         struct s3c_pl330_platdata *pl330pd;
1083         struct pl330_info *pl330_info;
1084         struct resource *res;
1085         int i, ret, irq;
1086
1087         pl330pd = pdev->dev.platform_data;
1088
1089         /* Can't do without the list of _32_ peripherals */
1090         if (!pl330pd || !pl330pd->peri) {
1091                 dev_err(&pdev->dev, "platform data missing!\n");
1092                 return -ENODEV;
1093         }
1094
1095         pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
1096         if (!pl330_info)
1097                 return -ENOMEM;
1098
1099         pl330_info->pl330_data = NULL;
1100         pl330_info->dev = &pdev->dev;
1101
1102         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1103         if (!res) {
1104                 ret = -ENODEV;
1105                 goto probe_err1;
1106         }
1107
1108 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1109         /* to use the runtime PM helper functions */
1110         pm_runtime_enable(&pdev->dev);
1111         /* enable the power domain */
1112         if (pm_runtime_get_sync(&pdev->dev)) {
1113                 dev_err(&pdev->dev, "failed to get runtime pm\n");
1114                 ret = -ENODEV;
1115                 goto probe_err1;
1116         }
1117 #endif
1118         request_mem_region(res->start, resource_size(res), pdev->name);
1119
1120         pl330_info->base = ioremap(res->start, resource_size(res));
1121         if (!pl330_info->base) {
1122                 ret = -ENXIO;
1123                 goto probe_err2;
1124         }
1125
1126         irq = platform_get_irq(pdev, 0);
1127         if (irq < 0) {
1128                 ret = irq;
1129                 goto probe_err3;
1130         }
1131
1132         ret = request_irq(irq, pl330_irq_handler, 0,
1133                         dev_name(&pdev->dev), pl330_info);
1134         if (ret)
1135                 goto probe_err4;
1136
1137         /* Allocate a new DMAC */
1138         s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
1139         if (!s3c_pl330_dmac) {
1140                 ret = -ENOMEM;
1141                 goto probe_err5;
1142         }
1143
1144         /* Get operation clock and enable it */
1145         s3c_pl330_dmac->clk = clk_get(&pdev->dev, "dma");
1146         if (IS_ERR(s3c_pl330_dmac->clk)) {
1147                 dev_err(&pdev->dev, "Cannot get operation clock.\n");
1148                 ret = -EINVAL;
1149                 goto probe_err6;
1150         }
1151         clk_enable(s3c_pl330_dmac->clk);
1152
1153         ret = pl330_add(pl330_info);
1154         if (ret)
1155                 goto probe_err7;
1156
1157         /* Hook the info */
1158         s3c_pl330_dmac->pi = pl330_info;
1159
1160         /* No busy channels */
1161         s3c_pl330_dmac->busy_chan = 0;
1162
1163         s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
1164                                 sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
1165
1166         if (!s3c_pl330_dmac->kmcache) {
1167                 ret = -ENOMEM;
1168                 goto probe_err8;
1169         }
1170
1171         /* Get the list of peripherals */
1172         s3c_pl330_dmac->peri = pl330pd->peri;
1173
1174         /* Attach to the list of DMACs */
1175         list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
1176
1177         /* Create a channel for each peripheral in the DMAC
1178          * that is, if it doesn't already exist
1179          */
1180         for (i = 0; i < PL330_MAX_PERI; i++)
1181                 if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
1182                         chan_add(s3c_pl330_dmac->peri[i]);
1183
1184         printk(KERN_INFO
1185                 "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
1186         printk(KERN_INFO
1187                 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1188                 pl330_info->pcfg.data_buf_dep,
1189                 pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
1190                 pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
1191
1192         clk_disable(s3c_pl330_dmac->clk);
1193 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1194         /* disable the power domain */
1195         pm_runtime_put(&pdev->dev);
1196 #endif
1197         return 0;
1198
1199 probe_err8:
1200         pl330_del(pl330_info);
1201 probe_err7:
1202         clk_disable(s3c_pl330_dmac->clk);
1203         clk_put(s3c_pl330_dmac->clk);
1204 probe_err6:
1205         kfree(s3c_pl330_dmac);
1206 probe_err5:
1207         free_irq(irq, pl330_info);
1208 probe_err4:
1209 probe_err3:
1210         iounmap(pl330_info->base);
1211 probe_err2:
1212         release_mem_region(res->start, resource_size(res));
1213 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1214         /* disable the power domain */
1215         pm_runtime_put(&pdev->dev);
1216         pm_runtime_disable(&pdev->dev);
1217 #endif
1218 probe_err1:
1219         kfree(pl330_info);
1220
1221         return ret;
1222 }
1223
1224 static int pl330_remove(struct platform_device *pdev)
1225 {
1226         struct s3c_pl330_dmac *dmac, *d;
1227         struct s3c_pl330_chan *ch;
1228         unsigned long flags;
1229         int del, found;
1230
1231         if (!pdev->dev.platform_data)
1232                 return -EINVAL;
1233
1234         spin_lock_irqsave(&res_lock, flags);
1235
1236         found = 0;
1237         list_for_each_entry(d, &dmac_list, node)
1238                 if (d->pi->dev == &pdev->dev) {
1239                         found = 1;
1240                         break;
1241                 }
1242
1243         if (!found) {
1244                 spin_unlock_irqrestore(&res_lock, flags);
1245                 return 0;
1246         }
1247
1248         dmac = d;
1249
1250         /* Remove all Channels that are managed only by this DMAC */
1251         list_for_each_entry(ch, &chan_list, node) {
1252
1253                 /* Only channels that are handled by this DMAC */
1254                 if (iface_of_dmac(dmac, ch->id))
1255                         del = 1;
1256                 else
1257                         continue;
1258
1259                 /* Don't remove if some other DMAC has it too */
1260                 list_for_each_entry(d, &dmac_list, node)
1261                         if (d != dmac && iface_of_dmac(d, ch->id)) {
1262                                 del = 0;
1263                                 break;
1264                         }
1265
1266                 if (del) {
1267                         spin_unlock_irqrestore(&res_lock, flags);
1268                         s3c2410_dma_free(ch->id, ch->client);
1269                         spin_lock_irqsave(&res_lock, flags);
1270                         list_del(&ch->node);
1271                         kfree(ch);
1272                 }
1273         }
1274
1275         /* Disable operation clock */
1276         clk_put(dmac->clk);
1277
1278         /* Remove the DMAC */
1279         list_del(&dmac->node);
1280         kfree(dmac);
1281
1282 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1283         /* disable the power domain */
1284         spin_unlock_irqrestore(&res_lock, flags);
1285         pm_runtime_put(&pdev->dev);
1286         pm_runtime_disable(&pdev->dev);
1287 #else
1288         spin_unlock_irqrestore(&res_lock, flags);
1289 #endif
1290         return 0;
1291 }
1292
1293 #ifdef CONFIG_PM_RUNTIME
1294 static int pl330_runtime_suspend(struct device *dev)
1295 {
1296         return 0;
1297 }
1298
1299 static int pl330_runtime_resume(struct device *dev)
1300 {
1301         return 0;
1302 }
1303
1304 static const struct dev_pm_ops pl330_pm_ops = {
1305         .runtime_suspend = pl330_runtime_suspend,
1306         .runtime_resume = pl330_runtime_resume,
1307 };
1308 #endif
1309
1310 static struct platform_driver pl330_driver = {
1311         .driver         = {
1312                 .owner  = THIS_MODULE,
1313                 .name   = "s3c-pl330",
1314 #ifdef CONFIG_PM_RUNTIME
1315                 .pm     = &pl330_pm_ops,
1316 #endif
1317         },
1318         .probe          = pl330_probe,
1319         .remove         = pl330_remove,
1320 };
1321
1322 static int __init pl330_init(void)
1323 {
1324         return platform_driver_register(&pl330_driver);
1325 }
1326 module_init(pl330_init);
1327
1328 static void __exit pl330_exit(void)
1329 {
1330         platform_driver_unregister(&pl330_driver);
1331         return;
1332 }
1333 module_exit(pl330_exit);
1334
1335 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1336 MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1337 MODULE_LICENSE("GPL");