1 /* linux/arch/arm/plat-samsung/s3c-pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/platform_device.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
20 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
21 #include <linux/pm_runtime.h>
24 #include <asm/hardware/pl330.h>
26 #include <plat/s3c-pl330-pdata.h>
29 * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
30 * @busy_chan: Number of channels currently busy.
31 * @peri: List of IDs of peripherals this DMAC can work with.
32 * @node: To attach to the global list of DMACs.
33 * @pi: PL330 configuration info for the DMAC.
34 * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
35 * @clk: Pointer of DMAC operation clock.
37 struct s3c_pl330_dmac {
40 struct list_head node;
41 struct pl330_info *pi;
42 struct kmem_cache *kmcache;
47 * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
48 * @token: Xfer ID provided by the client.
49 * @node: To attach to the list of xfers on a channel.
50 * @px: Xfer for PL330 core.
51 * @chan: Owner channel of this xfer.
53 struct s3c_pl330_xfer {
55 struct list_head node;
57 struct s3c_pl330_chan *chan;
61 * struct s3c_pl330_chan - Logical channel to communicate with
62 * a Physical peripheral.
63 * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
64 * NULL if the channel is available to be acquired.
65 * @id: ID of the peripheral that this channel can communicate with.
66 * @options: Options specified by the client.
67 * @sdaddr: Address provided via s3c2410_dma_devconfig.
68 * @node: To attach to the global list of channels.
69 * @lrq: Pointer to the last submitted pl330_req to PL330 core.
70 * @xfer_list: To manage list of xfers enqueued.
71 * @req: Two requests to communicate with the PL330 engine.
72 * @callback_fn: Callback function to the client.
73 * @rqcfg: Channel configuration for the xfers.
74 * @xfer_head: Pointer to the xfer to be next executed.
75 * @dmac: Pointer to the DMAC that manages this channel, NULL if the
76 * channel is available to be acquired.
77 * @client: Client of this channel. NULL if the
78 * channel is available to be acquired.
80 struct s3c_pl330_chan {
85 struct list_head node;
86 struct pl330_req *lrq;
87 struct list_head xfer_list;
88 struct pl330_req req[2];
89 s3c2410_dma_cbfn_t callback_fn;
90 struct pl330_reqcfg rqcfg;
91 struct s3c_pl330_xfer *xfer_head;
92 struct s3c_pl330_dmac *dmac;
93 struct s3c2410_dma_client *client;
96 /* All DMACs in the platform */
97 static LIST_HEAD(dmac_list);
99 /* All channels to peripherals in the platform */
100 static LIST_HEAD(chan_list);
103 * Since we add resources(DMACs and Channels) to the global pool,
104 * we need to guard access to the resources using a global lock
106 static DEFINE_SPINLOCK(res_lock);
108 /* Returns the channel with ID 'id' in the chan_list */
109 static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
111 struct s3c_pl330_chan *ch;
113 list_for_each_entry(ch, &chan_list, node)
120 /* Allocate a new channel with ID 'id' and add to chan_list */
121 static void chan_add(const enum dma_ch id)
123 struct s3c_pl330_chan *ch = id_to_chan(id);
125 /* Return if the channel already exists */
129 ch = kmalloc(sizeof(*ch), GFP_KERNEL);
130 /* Return silently to work with other channels */
137 list_add_tail(&ch->node, &chan_list);
140 /* If the channel is not yet acquired by any client */
141 static bool chan_free(struct s3c_pl330_chan *ch)
146 /* Channel points to some DMAC only when it's acquired */
147 return ch->dmac ? false : true;
151 * Returns 0 is peripheral i/f is invalid or not present on the dmac.
152 * Index + 1, otherwise.
154 static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
156 enum dma_ch *id = dmac->peri;
159 /* Discount invalid markers */
160 if (ch_id == DMACH_MAX)
163 for (i = 0; i < PL330_MAX_PERI; i++)
170 /* If all channel threads of the DMAC are busy */
171 static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
173 struct pl330_info *pi = dmac->pi;
175 return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
179 * Returns the number of free channels that
180 * can be handled by this dmac only.
182 static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
184 enum dma_ch *id = dmac->peri;
185 struct s3c_pl330_dmac *d;
186 struct s3c_pl330_chan *ch;
187 unsigned found, count = 0;
191 for (i = 0; i < PL330_MAX_PERI; i++) {
195 if (p == DMACH_MAX || !chan_free(ch))
199 list_for_each_entry(d, &dmac_list, node) {
200 if (d != dmac && iface_of_dmac(d, ch->id)) {
213 * Measure of suitability of 'dmac' handling 'ch'
215 * 0 indicates 'dmac' can not handle 'ch' either
216 * because it is not supported by the hardware or
217 * because all dmac channels are currently busy.
219 * >0 vlaue indicates 'dmac' has the capability.
220 * The bigger the value the more suitable the dmac.
222 #define MAX_SUIT UINT_MAX
225 static unsigned suitablility(struct s3c_pl330_dmac *dmac,
226 struct s3c_pl330_chan *ch)
228 struct pl330_info *pi = dmac->pi;
229 enum dma_ch *id = dmac->peri;
230 struct s3c_pl330_dmac *d;
235 /* If all the DMAC channel threads are busy */
239 for (i = 0; i < PL330_MAX_PERI; i++)
243 /* If the 'dmac' can't talk to 'ch' */
244 if (i == PL330_MAX_PERI)
248 list_for_each_entry(d, &dmac_list, node) {
250 * If some other dmac can talk to this
251 * peri and has some channel free.
253 if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
263 /* Good if free chans are more, bad otherwise */
264 s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
269 /* More than one DMAC may have capability to transfer data with the
270 * peripheral. This function assigns most suitable DMAC to manage the
271 * channel and hence communicate with the peripheral.
273 static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
275 struct s3c_pl330_dmac *d, *dmac = NULL;
276 unsigned sn, sl = MIN_SUIT;
278 list_for_each_entry(d, &dmac_list, node) {
279 sn = suitablility(d, ch);
291 /* Acquire the channel for peripheral 'id' */
292 static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
294 struct s3c_pl330_chan *ch = id_to_chan(id);
295 struct s3c_pl330_dmac *dmac;
297 /* If the channel doesn't exist or is already acquired */
298 if (!ch || !chan_free(ch)) {
303 dmac = map_chan_to_dmac(ch);
304 /* If couldn't map */
317 /* Delete xfer from the queue */
318 static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
320 struct s3c_pl330_xfer *t;
321 struct s3c_pl330_chan *ch;
329 /* Make sure xfer is in the queue */
331 list_for_each_entry(t, &ch->xfer_list, node)
340 /* If xfer is last entry in the queue */
341 if (xfer->node.next == &ch->xfer_list)
342 t = list_entry(ch->xfer_list.next,
343 struct s3c_pl330_xfer, node);
345 t = list_entry(xfer->node.next,
346 struct s3c_pl330_xfer, node);
348 /* If there was only one node left */
350 ch->xfer_head = NULL;
351 else if (ch->xfer_head == xfer)
354 list_del(&xfer->node);
357 /* Provides pointer to the next xfer in the queue.
358 * If CIRCULAR option is set, the list is left intact,
359 * otherwise the xfer is removed from the list.
360 * Forced delete 'pluck' can be set to override the CIRCULAR option.
362 static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
365 struct s3c_pl330_xfer *xfer = ch->xfer_head;
370 /* If xfer is last entry in the queue */
371 if (xfer->node.next == &ch->xfer_list)
372 ch->xfer_head = list_entry(ch->xfer_list.next,
373 struct s3c_pl330_xfer, node);
375 ch->xfer_head = list_entry(xfer->node.next,
376 struct s3c_pl330_xfer, node);
378 if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
379 del_from_queue(xfer);
384 static inline void add_to_queue(struct s3c_pl330_chan *ch,
385 struct s3c_pl330_xfer *xfer, int front)
387 struct pl330_xfer *xt;
390 if (ch->xfer_head == NULL)
391 ch->xfer_head = xfer;
393 xt = &ch->xfer_head->px;
394 /* If the head already submitted (CIRCULAR head) */
395 if (ch->options & S3C2410_DMAF_CIRCULAR &&
396 (xt == ch->req[0].x || xt == ch->req[1].x))
397 ch->xfer_head = xfer;
399 /* If this is a resubmission, it should go at the head */
401 ch->xfer_head = xfer;
402 list_add(&xfer->node, &ch->xfer_list);
404 list_add_tail(&xfer->node, &ch->xfer_list);
408 static inline void _finish_off(struct s3c_pl330_xfer *xfer,
409 enum s3c2410_dma_buffresult res, int ffree)
411 struct s3c_pl330_chan *ch;
420 ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
422 /* Force Free or if buffer is not needed anymore */
423 if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
424 kmem_cache_free(ch->dmac->kmcache, xfer);
427 static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
430 struct s3c_pl330_xfer *xfer;
433 /* If already submitted */
437 xfer = get_from_queue(ch, 0);
441 /* Use max bandwidth for M<->M xfers */
442 if (r->rqtype == MEMTOMEM) {
443 struct pl330_info *pi = xfer->chan->dmac->pi;
444 int burst = 1 << ch->rqcfg.brst_size;
445 u32 bytes = r->x->bytes;
448 bl = pi->pcfg.data_bus_width / 8;
449 bl *= pi->pcfg.data_buf_dep;
452 /* src/dst_burst_len can't be more than 16 */
457 if (!(bytes % (bl * burst)))
462 ch->rqcfg.brst_len = bl;
464 ch->rqcfg.brst_len = 1;
467 ret = pl330_submit_req(ch->pl330_chan_id, r);
469 /* If submission was successful */
471 ch->lrq = r; /* latest submitted req */
477 /* If both of the PL330 ping-pong buffers filled */
478 if (ret == -EAGAIN) {
479 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
481 /* Queue back again */
482 add_to_queue(ch, xfer, 1);
485 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
487 _finish_off(xfer, S3C2410_RES_ERR, 0);
494 static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
495 struct pl330_req *r, enum pl330_op_err err)
498 struct s3c_pl330_xfer *xfer;
499 struct pl330_xfer *xl = r->x;
500 enum s3c2410_dma_buffresult res;
502 spin_lock_irqsave(&res_lock, flags);
504 if (!r->infiniteloop) {
507 s3c_pl330_submit(ch, r);
510 spin_unlock_irqrestore(&res_lock, flags);
512 /* Map result to S3C DMA API */
513 if (err == PL330_ERR_NONE)
514 res = S3C2410_RES_OK;
515 else if (err == PL330_ERR_ABORT)
516 res = S3C2410_RES_ABORT;
518 res = S3C2410_RES_ERR;
520 /* If last request had some xfer */
521 if (!r->infiniteloop) {
523 xfer = container_of(xl, struct s3c_pl330_xfer, px);
524 _finish_off(xfer, res, 0);
526 dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
532 xfer = container_of(xl, struct s3c_pl330_xfer, px);
534 ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
538 static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
540 struct pl330_req *r = token;
541 struct s3c_pl330_chan *ch = container_of(r,
542 struct s3c_pl330_chan, req[0]);
543 s3c_pl330_rq(ch, r, err);
546 static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
548 struct pl330_req *r = token;
549 struct s3c_pl330_chan *ch = container_of(r,
550 struct s3c_pl330_chan, req[1]);
551 s3c_pl330_rq(ch, r, err);
554 /* Release an acquired channel */
555 static void chan_release(struct s3c_pl330_chan *ch)
557 struct s3c_pl330_dmac *dmac;
567 int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
569 struct s3c_pl330_xfer *xfer;
570 enum pl330_chan_op pl330op;
571 struct s3c_pl330_chan *ch;
575 spin_lock_irqsave(&res_lock, flags);
579 if (!ch || chan_free(ch)) {
585 case S3C2410_DMAOP_START:
586 /* Make sure both reqs are enqueued */
587 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
588 s3c_pl330_submit(ch, &ch->req[idx]);
589 s3c_pl330_submit(ch, &ch->req[1 - idx]);
590 pl330op = PL330_OP_START;
593 case S3C2410_DMAOP_STOP:
594 pl330op = PL330_OP_ABORT;
597 case S3C2410_DMAOP_FLUSH:
598 pl330op = PL330_OP_FLUSH;
601 case S3C2410_DMAOP_PAUSE:
602 case S3C2410_DMAOP_RESUME:
603 case S3C2410_DMAOP_TIMEOUT:
604 case S3C2410_DMAOP_STARTED:
605 spin_unlock_irqrestore(&res_lock, flags);
609 spin_unlock_irqrestore(&res_lock, flags);
613 ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
615 if (pl330op == PL330_OP_START) {
616 spin_unlock_irqrestore(&res_lock, flags);
620 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
622 /* Abort the current xfer */
623 if (ch->req[idx].x) {
624 xfer = container_of(ch->req[idx].x,
625 struct s3c_pl330_xfer, px);
627 /* Drop xfer during FLUSH */
628 if (pl330op == PL330_OP_FLUSH)
629 del_from_queue(xfer);
631 ch->req[idx].x = NULL;
633 spin_unlock_irqrestore(&res_lock, flags);
634 _finish_off(xfer, S3C2410_RES_ABORT,
635 pl330op == PL330_OP_FLUSH ? 1 : 0);
636 spin_lock_irqsave(&res_lock, flags);
639 /* Flush the whole queue */
640 if (pl330op == PL330_OP_FLUSH) {
642 if (ch->req[1 - idx].x) {
643 xfer = container_of(ch->req[1 - idx].x,
644 struct s3c_pl330_xfer, px);
646 del_from_queue(xfer);
648 ch->req[1 - idx].x = NULL;
650 spin_unlock_irqrestore(&res_lock, flags);
651 _finish_off(xfer, S3C2410_RES_ABORT, 1);
652 spin_lock_irqsave(&res_lock, flags);
655 /* Finish off the remaining in the queue */
656 xfer = ch->xfer_head;
659 del_from_queue(xfer);
661 spin_unlock_irqrestore(&res_lock, flags);
662 _finish_off(xfer, S3C2410_RES_ABORT, 1);
663 spin_lock_irqsave(&res_lock, flags);
665 xfer = ch->xfer_head;
670 spin_unlock_irqrestore(&res_lock, flags);
674 EXPORT_SYMBOL(s3c2410_dma_ctrl);
676 int s3c2410_dma_enqueue_ring(enum dma_ch id, void *token,
677 dma_addr_t addr, int size, int numofblock)
679 struct s3c_pl330_chan *ch;
680 struct s3c_pl330_xfer *xfer;
684 spin_lock_irqsave(&res_lock, flags);
687 /* Error if invalid or free channel */
688 if (!ch || chan_free(ch)) {
693 /* Error if size is unaligned */
694 if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
699 xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
707 xfer->px.bytes = size;
708 xfer->px.next = NULL; /* Single request */
710 /* For S3C DMA API, direction is always fixed for all xfers */
711 if (ch->req[0].rqtype == MEMTODEV) {
712 xfer->px.src_addr = addr;
713 xfer->px.dst_addr = ch->sdaddr;
715 xfer->px.src_addr = ch->sdaddr;
716 xfer->px.dst_addr = addr;
719 add_to_queue(ch, xfer, 0);
721 /* Try submitting on either request */
722 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
724 if (!ch->req[idx].x) {
725 ch->req[idx].infiniteloop = numofblock;
726 s3c_pl330_submit(ch, &ch->req[idx]);
728 ch->req[1 - idx].infiniteloop = numofblock;
729 s3c_pl330_submit(ch, &ch->req[1 - idx]);
731 spin_unlock_irqrestore(&res_lock, flags);
733 if (ch->options & S3C2410_DMAF_AUTOSTART)
734 s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
739 spin_unlock_irqrestore(&res_lock, flags);
743 EXPORT_SYMBOL(s3c2410_dma_enqueue_ring);
745 int s3c2410_dma_request(enum dma_ch id,
746 struct s3c2410_dma_client *client,
749 struct s3c_pl330_dmac *dmac;
750 struct s3c_pl330_chan *ch;
754 spin_lock_irqsave(&res_lock, flags);
756 ch = chan_acquire(id);
764 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
765 /* enable the power domain */
766 spin_unlock_irqrestore(&res_lock, flags);
767 pm_runtime_get_sync(dmac->pi->dev);
768 spin_lock_irqsave(&res_lock, flags);
770 clk_enable(dmac->clk);
772 ch->pl330_chan_id = pl330_request_channel(dmac->pi);
773 if (!ch->pl330_chan_id) {
775 clk_disable(dmac->clk);
776 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
777 /* disable the power domain */
778 spin_unlock_irqrestore(&res_lock, flags);
779 pm_runtime_put(dmac->pi->dev);
780 spin_lock_irqsave(&res_lock, flags);
787 ch->options = 0; /* Clear any option */
788 ch->callback_fn = NULL; /* Clear any callback */
791 ch->rqcfg.brst_size = 2; /* Default word size */
792 ch->rqcfg.swap = SWAP_NO;
793 ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
794 ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
795 ch->rqcfg.privileged = 0;
796 ch->rqcfg.insnaccess = 0;
798 /* Set invalid direction */
799 ch->req[0].rqtype = DEVTODEV;
800 ch->req[1].rqtype = ch->req[0].rqtype;
802 ch->req[0].cfg = &ch->rqcfg;
803 ch->req[1].cfg = ch->req[0].cfg;
805 ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
806 ch->req[1].peri = ch->req[0].peri;
808 ch->req[0].token = &ch->req[0];
809 ch->req[0].xfer_cb = s3c_pl330_rq0;
810 ch->req[1].token = &ch->req[1];
811 ch->req[1].xfer_cb = s3c_pl330_rq1;
816 /* Reset xfer list */
817 INIT_LIST_HEAD(&ch->xfer_list);
818 ch->xfer_head = NULL;
821 spin_unlock_irqrestore(&res_lock, flags);
825 EXPORT_SYMBOL(s3c2410_dma_request);
827 int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
829 struct s3c_pl330_chan *ch;
830 struct s3c_pl330_xfer *xfer;
835 spin_lock_irqsave(&res_lock, flags);
839 if (!ch || chan_free(ch))
842 /* Refuse if someone else wanted to free the channel */
843 if (ch->client != client) {
848 /* Stop any active xfer, Flushe the queue and do callbacks */
849 pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
851 /* Abort the submitted requests */
852 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
854 if (ch->req[idx].x) {
855 xfer = container_of(ch->req[idx].x,
856 struct s3c_pl330_xfer, px);
858 ch->req[idx].x = NULL;
859 del_from_queue(xfer);
861 spin_unlock_irqrestore(&res_lock, flags);
862 _finish_off(xfer, S3C2410_RES_ABORT, 1);
863 spin_lock_irqsave(&res_lock, flags);
866 if (ch->req[1 - idx].x) {
867 xfer = container_of(ch->req[1 - idx].x,
868 struct s3c_pl330_xfer, px);
870 ch->req[1 - idx].x = NULL;
871 del_from_queue(xfer);
873 spin_unlock_irqrestore(&res_lock, flags);
874 _finish_off(xfer, S3C2410_RES_ABORT, 1);
875 spin_lock_irqsave(&res_lock, flags);
878 /* Pluck and Abort the queued requests in order */
880 xfer = get_from_queue(ch, 1);
882 spin_unlock_irqrestore(&res_lock, flags);
883 _finish_off(xfer, S3C2410_RES_ABORT, 1);
884 spin_lock_irqsave(&res_lock, flags);
889 pl330_release_channel(ch->pl330_chan_id);
891 ch->pl330_chan_id = NULL;
892 clk_disable(ch->dmac->clk);
894 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
895 /* disable the power domain */
896 spin_unlock_irqrestore(&res_lock, flags);
897 pm_runtime_put(ch->dmac->pi->dev);
898 spin_lock_irqsave(&res_lock, flags);
903 spin_unlock_irqrestore(&res_lock, flags);
907 EXPORT_SYMBOL(s3c2410_dma_free);
909 int s3c2410_dma_config(enum dma_ch id, int xferunit)
911 struct s3c_pl330_chan *ch;
912 struct pl330_info *pi;
914 int i, dbwidth, ret = 0;
916 spin_lock_irqsave(&res_lock, flags);
920 if (!ch || chan_free(ch)) {
926 dbwidth = pi->pcfg.data_bus_width / 8;
928 /* Max size of xfer can be pcfg.data_bus_width */
929 if (xferunit > dbwidth) {
935 while (xferunit != (1 << i))
939 if (xferunit == (1 << i))
940 ch->rqcfg.brst_size = i;
945 spin_unlock_irqrestore(&res_lock, flags);
949 EXPORT_SYMBOL(s3c2410_dma_config);
951 /* Options that are supported by this driver */
952 #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
954 int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
956 struct s3c_pl330_chan *ch;
960 spin_lock_irqsave(&res_lock, flags);
964 if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
967 ch->options = options;
969 spin_unlock_irqrestore(&res_lock, flags);
973 EXPORT_SYMBOL(s3c2410_dma_setflags);
975 int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
977 struct s3c_pl330_chan *ch;
981 spin_lock_irqsave(&res_lock, flags);
985 if (!ch || chan_free(ch))
988 ch->callback_fn = rtn;
990 spin_unlock_irqrestore(&res_lock, flags);
994 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
996 int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
997 unsigned long address)
999 struct s3c_pl330_chan *ch;
1000 unsigned long flags;
1003 spin_lock_irqsave(&res_lock, flags);
1005 ch = id_to_chan(id);
1007 if (!ch || chan_free(ch)) {
1013 case S3C2410_DMASRC_HW: /* P->M */
1014 ch->req[0].rqtype = DEVTOMEM;
1015 ch->req[1].rqtype = DEVTOMEM;
1016 ch->rqcfg.src_inc = 0;
1017 ch->rqcfg.dst_inc = 1;
1019 case S3C2410_DMASRC_MEM: /* M->P */
1020 ch->req[0].rqtype = MEMTODEV;
1021 ch->req[1].rqtype = MEMTODEV;
1022 ch->rqcfg.src_inc = 1;
1023 ch->rqcfg.dst_inc = 0;
1025 case S3C_DMA_MEM2MEM:
1026 ch->req[0].rqtype = MEMTOMEM;
1027 ch->req[1].rqtype = MEMTOMEM;
1028 ch->rqcfg.src_inc = 1;
1029 ch->rqcfg.dst_inc = 1;
1031 case S3C_DMA_MEM2MEM_SET:
1032 ch->req[0].rqtype = MEMTOMEM;
1033 ch->req[1].rqtype = MEMTOMEM;
1034 ch->rqcfg.src_inc = 0;
1035 ch->rqcfg.dst_inc = 1;
1042 ch->sdaddr = address;
1045 spin_unlock_irqrestore(&res_lock, flags);
1049 EXPORT_SYMBOL(s3c2410_dma_devconfig);
1051 int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
1053 struct s3c_pl330_chan *ch = id_to_chan(id);
1054 struct pl330_chanstatus status;
1057 if (!ch || chan_free(ch))
1060 ret = pl330_chan_status(ch->pl330_chan_id, &status);
1064 *src = status.src_addr;
1065 *dst = status.dst_addr;
1069 EXPORT_SYMBOL(s3c2410_dma_getposition);
1071 static irqreturn_t pl330_irq_handler(int irq, void *data)
1073 if (pl330_update(data))
1079 static int pl330_probe(struct platform_device *pdev)
1081 struct s3c_pl330_dmac *s3c_pl330_dmac;
1082 struct s3c_pl330_platdata *pl330pd;
1083 struct pl330_info *pl330_info;
1084 struct resource *res;
1087 pl330pd = pdev->dev.platform_data;
1089 /* Can't do without the list of _32_ peripherals */
1090 if (!pl330pd || !pl330pd->peri) {
1091 dev_err(&pdev->dev, "platform data missing!\n");
1095 pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
1099 pl330_info->pl330_data = NULL;
1100 pl330_info->dev = &pdev->dev;
1102 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1108 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1109 /* to use the runtime PM helper functions */
1110 pm_runtime_enable(&pdev->dev);
1111 /* enable the power domain */
1112 if (pm_runtime_get_sync(&pdev->dev)) {
1113 dev_err(&pdev->dev, "failed to get runtime pm\n");
1118 request_mem_region(res->start, resource_size(res), pdev->name);
1120 pl330_info->base = ioremap(res->start, resource_size(res));
1121 if (!pl330_info->base) {
1126 irq = platform_get_irq(pdev, 0);
1132 ret = request_irq(irq, pl330_irq_handler, 0,
1133 dev_name(&pdev->dev), pl330_info);
1137 /* Allocate a new DMAC */
1138 s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
1139 if (!s3c_pl330_dmac) {
1144 /* Get operation clock and enable it */
1145 s3c_pl330_dmac->clk = clk_get(&pdev->dev, "dma");
1146 if (IS_ERR(s3c_pl330_dmac->clk)) {
1147 dev_err(&pdev->dev, "Cannot get operation clock.\n");
1151 clk_enable(s3c_pl330_dmac->clk);
1153 ret = pl330_add(pl330_info);
1158 s3c_pl330_dmac->pi = pl330_info;
1160 /* No busy channels */
1161 s3c_pl330_dmac->busy_chan = 0;
1163 s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
1164 sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
1166 if (!s3c_pl330_dmac->kmcache) {
1171 /* Get the list of peripherals */
1172 s3c_pl330_dmac->peri = pl330pd->peri;
1174 /* Attach to the list of DMACs */
1175 list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
1177 /* Create a channel for each peripheral in the DMAC
1178 * that is, if it doesn't already exist
1180 for (i = 0; i < PL330_MAX_PERI; i++)
1181 if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
1182 chan_add(s3c_pl330_dmac->peri[i]);
1185 "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
1187 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1188 pl330_info->pcfg.data_buf_dep,
1189 pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
1190 pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
1192 clk_disable(s3c_pl330_dmac->clk);
1193 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1194 /* disable the power domain */
1195 pm_runtime_put(&pdev->dev);
1200 pl330_del(pl330_info);
1202 clk_disable(s3c_pl330_dmac->clk);
1203 clk_put(s3c_pl330_dmac->clk);
1205 kfree(s3c_pl330_dmac);
1207 free_irq(irq, pl330_info);
1210 iounmap(pl330_info->base);
1212 release_mem_region(res->start, resource_size(res));
1213 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1214 /* disable the power domain */
1215 pm_runtime_put(&pdev->dev);
1216 pm_runtime_disable(&pdev->dev);
1224 static int pl330_remove(struct platform_device *pdev)
1226 struct s3c_pl330_dmac *dmac, *d;
1227 struct s3c_pl330_chan *ch;
1228 unsigned long flags;
1231 if (!pdev->dev.platform_data)
1234 spin_lock_irqsave(&res_lock, flags);
1237 list_for_each_entry(d, &dmac_list, node)
1238 if (d->pi->dev == &pdev->dev) {
1244 spin_unlock_irqrestore(&res_lock, flags);
1250 /* Remove all Channels that are managed only by this DMAC */
1251 list_for_each_entry(ch, &chan_list, node) {
1253 /* Only channels that are handled by this DMAC */
1254 if (iface_of_dmac(dmac, ch->id))
1259 /* Don't remove if some other DMAC has it too */
1260 list_for_each_entry(d, &dmac_list, node)
1261 if (d != dmac && iface_of_dmac(d, ch->id)) {
1267 spin_unlock_irqrestore(&res_lock, flags);
1268 s3c2410_dma_free(ch->id, ch->client);
1269 spin_lock_irqsave(&res_lock, flags);
1270 list_del(&ch->node);
1275 /* Disable operation clock */
1278 /* Remove the DMAC */
1279 list_del(&dmac->node);
1282 #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
1283 /* disable the power domain */
1284 spin_unlock_irqrestore(&res_lock, flags);
1285 pm_runtime_put(&pdev->dev);
1286 pm_runtime_disable(&pdev->dev);
1288 spin_unlock_irqrestore(&res_lock, flags);
1293 #ifdef CONFIG_PM_RUNTIME
1294 static int pl330_runtime_suspend(struct device *dev)
1299 static int pl330_runtime_resume(struct device *dev)
1304 static const struct dev_pm_ops pl330_pm_ops = {
1305 .runtime_suspend = pl330_runtime_suspend,
1306 .runtime_resume = pl330_runtime_resume,
1310 static struct platform_driver pl330_driver = {
1312 .owner = THIS_MODULE,
1313 .name = "s3c-pl330",
1314 #ifdef CONFIG_PM_RUNTIME
1315 .pm = &pl330_pm_ops,
1318 .probe = pl330_probe,
1319 .remove = pl330_remove,
1322 static int __init pl330_init(void)
1324 return platform_driver_register(&pl330_driver);
1326 module_init(pl330_init);
1328 static void __exit pl330_exit(void)
1330 platform_driver_unregister(&pl330_driver);
1333 module_exit(pl330_exit);
1335 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1336 MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1337 MODULE_LICENSE("GPL");