2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/of_platform.h>
19 #include <linux/clk.h>
20 #include <linux/sirfsoc_dma.h>
22 #include "dmaengine.h"
24 #define SIRFSOC_DMA_DESCRIPTORS 16
25 #define SIRFSOC_DMA_CHANNELS 16
27 #define SIRFSOC_DMA_CH_ADDR 0x00
28 #define SIRFSOC_DMA_CH_XLEN 0x04
29 #define SIRFSOC_DMA_CH_YLEN 0x08
30 #define SIRFSOC_DMA_CH_CTRL 0x0C
32 #define SIRFSOC_DMA_WIDTH_0 0x100
33 #define SIRFSOC_DMA_CH_VALID 0x140
34 #define SIRFSOC_DMA_CH_INT 0x144
35 #define SIRFSOC_DMA_INT_EN 0x148
36 #define SIRFSOC_DMA_INT_EN_CLR 0x14C
37 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
38 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
40 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
41 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
43 /* xlen and dma_width register is in 4 bytes boundary */
44 #define SIRFSOC_DMA_WORD_LEN 4
46 struct sirfsoc_dma_desc {
47 struct dma_async_tx_descriptor desc;
48 struct list_head node;
50 /* SiRFprimaII 2D-DMA parameters */
52 int xlen; /* DMA xlen */
53 int ylen; /* DMA ylen */
54 int width; /* DMA width */
56 bool cyclic; /* is loop DMA? */
57 u32 addr; /* DMA buffer address */
60 struct sirfsoc_dma_chan {
62 struct list_head free;
63 struct list_head prepared;
64 struct list_head queued;
65 struct list_head active;
66 struct list_head completed;
67 unsigned long happened_cyclic;
68 unsigned long completed_cyclic;
70 /* Lock for this structure */
77 struct dma_device dma;
78 struct tasklet_struct tasklet;
79 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
86 #define DRV_NAME "sirfsoc_dma"
88 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
90 struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
92 return container_of(c, struct sirfsoc_dma_chan, chan);
95 /* Convert struct dma_chan to struct sirfsoc_dma */
96 static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
98 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
99 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
102 /* Execute all queued DMA descriptors */
103 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
105 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
106 int cid = schan->chan.chan_id;
107 struct sirfsoc_dma_desc *sdesc = NULL;
110 * lock has been held by functions calling this, so we don't hold
114 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
116 /* Move the first queued descriptor to active list */
117 list_move_tail(&sdesc->node, &schan->active);
119 /* Start the DMA transfer */
120 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
122 writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
123 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
124 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
125 writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
126 SIRFSOC_DMA_CH_XLEN);
127 writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
128 SIRFSOC_DMA_CH_YLEN);
129 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
130 (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
133 * writel has an implict memory write barrier to make sure data is
134 * flushed into memory before starting DMA
136 writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
139 writel((1 << cid) | 1 << (cid + 16) |
140 readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
141 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
142 schan->happened_cyclic = schan->completed_cyclic = 0;
146 /* Interrupt handler */
147 static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
149 struct sirfsoc_dma *sdma = data;
150 struct sirfsoc_dma_chan *schan;
151 struct sirfsoc_dma_desc *sdesc = NULL;
155 is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
156 while ((ch = fls(is) - 1) >= 0) {
158 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
159 schan = &sdma->channels[ch];
161 spin_lock(&schan->lock);
163 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
165 if (!sdesc->cyclic) {
166 /* Execute queued descriptors */
167 list_splice_tail_init(&schan->active, &schan->completed);
168 if (!list_empty(&schan->queued))
169 sirfsoc_dma_execute(schan);
171 schan->happened_cyclic++;
173 spin_unlock(&schan->lock);
176 /* Schedule tasklet */
177 tasklet_schedule(&sdma->tasklet);
182 /* process completed descriptors */
183 static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
185 dma_cookie_t last_cookie = 0;
186 struct sirfsoc_dma_chan *schan;
187 struct sirfsoc_dma_desc *sdesc;
188 struct dma_async_tx_descriptor *desc;
190 unsigned long happened_cyclic;
194 for (i = 0; i < sdma->dma.chancnt; i++) {
195 schan = &sdma->channels[i];
197 /* Get all completed descriptors */
198 spin_lock_irqsave(&schan->lock, flags);
199 if (!list_empty(&schan->completed)) {
200 list_splice_tail_init(&schan->completed, &list);
201 spin_unlock_irqrestore(&schan->lock, flags);
203 /* Execute callbacks and run dependencies */
204 list_for_each_entry(sdesc, &list, node) {
208 desc->callback(desc->callback_param);
210 last_cookie = desc->cookie;
211 dma_run_dependencies(desc);
214 /* Free descriptors */
215 spin_lock_irqsave(&schan->lock, flags);
216 list_splice_tail_init(&list, &schan->free);
217 schan->chan.completed_cookie = last_cookie;
218 spin_unlock_irqrestore(&schan->lock, flags);
220 /* for cyclic channel, desc is always in active list */
221 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
224 if (!sdesc || (sdesc && !sdesc->cyclic)) {
225 /* without active cyclic DMA */
226 spin_unlock_irqrestore(&schan->lock, flags);
231 happened_cyclic = schan->happened_cyclic;
232 spin_unlock_irqrestore(&schan->lock, flags);
235 while (happened_cyclic != schan->completed_cyclic) {
237 desc->callback(desc->callback_param);
238 schan->completed_cyclic++;
245 static void sirfsoc_dma_tasklet(unsigned long data)
247 struct sirfsoc_dma *sdma = (void *)data;
249 sirfsoc_dma_process_completed(sdma);
252 /* Submit descriptor to hardware */
253 static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
255 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
256 struct sirfsoc_dma_desc *sdesc;
260 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
262 spin_lock_irqsave(&schan->lock, flags);
264 /* Move descriptor to queue */
265 list_move_tail(&sdesc->node, &schan->queued);
267 cookie = dma_cookie_assign(txd);
269 spin_unlock_irqrestore(&schan->lock, flags);
274 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
275 struct dma_slave_config *config)
279 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
280 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
283 spin_lock_irqsave(&schan->lock, flags);
284 schan->mode = (config->src_maxburst == 4 ? 1 : 0);
285 spin_unlock_irqrestore(&schan->lock, flags);
290 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
292 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
293 int cid = schan->chan.chan_id;
296 spin_lock_irqsave(&schan->lock, flags);
298 if (!sdma->is_marco) {
299 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
300 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
301 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
302 & ~((1 << cid) | 1 << (cid + 16)),
303 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
305 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
306 writel_relaxed((1 << cid) | 1 << (cid + 16),
307 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
310 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
312 list_splice_tail_init(&schan->active, &schan->free);
313 list_splice_tail_init(&schan->queued, &schan->free);
315 spin_unlock_irqrestore(&schan->lock, flags);
320 static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
322 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
323 int cid = schan->chan.chan_id;
326 spin_lock_irqsave(&schan->lock, flags);
329 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
330 & ~((1 << cid) | 1 << (cid + 16)),
331 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
333 writel_relaxed((1 << cid) | 1 << (cid + 16),
334 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
336 spin_unlock_irqrestore(&schan->lock, flags);
341 static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
343 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
344 int cid = schan->chan.chan_id;
347 spin_lock_irqsave(&schan->lock, flags);
350 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
351 | ((1 << cid) | 1 << (cid + 16)),
352 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
354 writel_relaxed((1 << cid) | 1 << (cid + 16),
355 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
357 spin_unlock_irqrestore(&schan->lock, flags);
362 static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
365 struct dma_slave_config *config;
366 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
370 return sirfsoc_dma_pause_chan(schan);
372 return sirfsoc_dma_resume_chan(schan);
373 case DMA_TERMINATE_ALL:
374 return sirfsoc_dma_terminate_all(schan);
375 case DMA_SLAVE_CONFIG:
376 config = (struct dma_slave_config *)arg;
377 return sirfsoc_dma_slave_config(schan, config);
386 /* Alloc channel resources */
387 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
389 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
390 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
391 struct sirfsoc_dma_desc *sdesc;
396 /* Alloc descriptors for this channel */
397 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
398 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
400 dev_notice(sdma->dma.dev, "Memory allocation error. "
401 "Allocated only %u descriptors\n", i);
405 dma_async_tx_descriptor_init(&sdesc->desc, chan);
406 sdesc->desc.flags = DMA_CTRL_ACK;
407 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
409 list_add_tail(&sdesc->node, &descs);
412 /* Return error only if no descriptors were allocated */
416 spin_lock_irqsave(&schan->lock, flags);
418 list_splice_tail_init(&descs, &schan->free);
419 spin_unlock_irqrestore(&schan->lock, flags);
424 /* Free channel resources */
425 static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
427 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
428 struct sirfsoc_dma_desc *sdesc, *tmp;
432 spin_lock_irqsave(&schan->lock, flags);
434 /* Channel must be idle */
435 BUG_ON(!list_empty(&schan->prepared));
436 BUG_ON(!list_empty(&schan->queued));
437 BUG_ON(!list_empty(&schan->active));
438 BUG_ON(!list_empty(&schan->completed));
441 list_splice_tail_init(&schan->free, &descs);
443 spin_unlock_irqrestore(&schan->lock, flags);
445 /* Free descriptors */
446 list_for_each_entry_safe(sdesc, tmp, &descs, node)
450 /* Send pending descriptor to hardware */
451 static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
453 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
456 spin_lock_irqsave(&schan->lock, flags);
458 if (list_empty(&schan->active) && !list_empty(&schan->queued))
459 sirfsoc_dma_execute(schan);
461 spin_unlock_irqrestore(&schan->lock, flags);
464 /* Check request completion status */
465 static enum dma_status
466 sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
467 struct dma_tx_state *txstate)
469 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
470 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
473 struct sirfsoc_dma_desc *sdesc;
474 int cid = schan->chan.chan_id;
475 unsigned long dma_pos;
476 unsigned long dma_request_bytes;
477 unsigned long residue;
479 spin_lock_irqsave(&schan->lock, flags);
481 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
483 dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
484 (sdesc->width * SIRFSOC_DMA_WORD_LEN);
486 ret = dma_cookie_status(chan, cookie, txstate);
487 dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR)
489 residue = dma_request_bytes - (dma_pos - sdesc->addr);
490 dma_set_residue(txstate, residue);
492 spin_unlock_irqrestore(&schan->lock, flags);
497 static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
498 struct dma_chan *chan, struct dma_interleaved_template *xt,
501 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
502 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
503 struct sirfsoc_dma_desc *sdesc = NULL;
504 unsigned long iflags;
507 if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
512 /* Get free descriptor */
513 spin_lock_irqsave(&schan->lock, iflags);
514 if (!list_empty(&schan->free)) {
515 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
517 list_del(&sdesc->node);
519 spin_unlock_irqrestore(&schan->lock, iflags);
522 /* try to free completed descriptors */
523 sirfsoc_dma_process_completed(sdma);
528 /* Place descriptor in prepared list */
529 spin_lock_irqsave(&schan->lock, iflags);
532 * Number of chunks in a frame can only be 1 for prima2
533 * and ylen (number of frame - 1) must be at least 0
535 if ((xt->frame_size == 1) && (xt->numf > 0)) {
537 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
538 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
539 SIRFSOC_DMA_WORD_LEN;
540 sdesc->ylen = xt->numf - 1;
541 if (xt->dir == DMA_MEM_TO_DEV) {
542 sdesc->addr = xt->src_start;
545 sdesc->addr = xt->dst_start;
549 list_add_tail(&sdesc->node, &schan->prepared);
551 pr_err("sirfsoc DMA Invalid xfer\n");
555 spin_unlock_irqrestore(&schan->lock, iflags);
559 spin_unlock_irqrestore(&schan->lock, iflags);
565 static struct dma_async_tx_descriptor *
566 sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
567 size_t buf_len, size_t period_len,
568 enum dma_transfer_direction direction, unsigned long flags, void *context)
570 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
571 struct sirfsoc_dma_desc *sdesc = NULL;
572 unsigned long iflags;
575 * we only support cycle transfer with 2 period
576 * If the X-length is set to 0, it would be the loop mode.
577 * The DMA address keeps increasing until reaching the end of a loop
578 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
579 * the DMA address goes back to the beginning of this area.
580 * In loop mode, the DMA data region is divided into two parts, BUFA
581 * and BUFB. DMA controller generates interrupts twice in each loop:
582 * when the DMA address reaches the end of BUFA or the end of the
585 if (buf_len != 2 * period_len)
586 return ERR_PTR(-EINVAL);
588 /* Get free descriptor */
589 spin_lock_irqsave(&schan->lock, iflags);
590 if (!list_empty(&schan->free)) {
591 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
593 list_del(&sdesc->node);
595 spin_unlock_irqrestore(&schan->lock, iflags);
600 /* Place descriptor in prepared list */
601 spin_lock_irqsave(&schan->lock, iflags);
605 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
607 list_add_tail(&sdesc->node, &schan->prepared);
608 spin_unlock_irqrestore(&schan->lock, iflags);
614 * The DMA controller consists of 16 independent DMA channels.
615 * Each channel is allocated to a different function
617 bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
619 unsigned int ch_nr = (unsigned int) chan_id;
621 if (ch_nr == chan->chan_id +
622 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
627 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
629 static int sirfsoc_dma_probe(struct platform_device *op)
631 struct device_node *dn = op->dev.of_node;
632 struct device *dev = &op->dev;
633 struct dma_device *dma;
634 struct sirfsoc_dma *sdma;
635 struct sirfsoc_dma_chan *schan;
637 ulong regs_start, regs_size;
641 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
643 dev_err(dev, "Memory exhausted!\n");
647 if (of_device_is_compatible(dn, "sirf,marco-dmac"))
648 sdma->is_marco = true;
650 if (of_property_read_u32(dn, "cell-index", &id)) {
651 dev_err(dev, "Fail to get DMAC index\n");
655 sdma->irq = irq_of_parse_and_map(dn, 0);
656 if (sdma->irq == NO_IRQ) {
657 dev_err(dev, "Error mapping IRQ!\n");
661 sdma->clk = devm_clk_get(dev, NULL);
662 if (IS_ERR(sdma->clk)) {
663 dev_err(dev, "failed to get a clock.\n");
664 return PTR_ERR(sdma->clk);
667 ret = of_address_to_resource(dn, 0, &res);
669 dev_err(dev, "Error parsing memory region!\n");
673 regs_start = res.start;
674 regs_size = resource_size(&res);
676 sdma->base = devm_ioremap(dev, regs_start, regs_size);
678 dev_err(dev, "Error mapping memory region!\n");
683 ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
685 dev_err(dev, "Error requesting IRQ!\n");
692 dma->chancnt = SIRFSOC_DMA_CHANNELS;
694 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
695 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
696 dma->device_issue_pending = sirfsoc_dma_issue_pending;
697 dma->device_control = sirfsoc_dma_control;
698 dma->device_tx_status = sirfsoc_dma_tx_status;
699 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
700 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
702 INIT_LIST_HEAD(&dma->channels);
703 dma_cap_set(DMA_SLAVE, dma->cap_mask);
704 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
705 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
706 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
708 for (i = 0; i < dma->chancnt; i++) {
709 schan = &sdma->channels[i];
711 schan->chan.device = dma;
712 dma_cookie_init(&schan->chan);
714 INIT_LIST_HEAD(&schan->free);
715 INIT_LIST_HEAD(&schan->prepared);
716 INIT_LIST_HEAD(&schan->queued);
717 INIT_LIST_HEAD(&schan->active);
718 INIT_LIST_HEAD(&schan->completed);
720 spin_lock_init(&schan->lock);
721 list_add_tail(&schan->chan.device_node, &dma->channels);
724 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
726 clk_prepare_enable(sdma->clk);
728 /* Register DMA engine */
729 dev_set_drvdata(dev, sdma);
730 ret = dma_async_device_register(dma);
734 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
739 free_irq(sdma->irq, sdma);
741 irq_dispose_mapping(sdma->irq);
745 static int sirfsoc_dma_remove(struct platform_device *op)
747 struct device *dev = &op->dev;
748 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
750 clk_disable_unprepare(sdma->clk);
751 dma_async_device_unregister(&sdma->dma);
752 free_irq(sdma->irq, sdma);
753 irq_dispose_mapping(sdma->irq);
757 static struct of_device_id sirfsoc_dma_match[] = {
758 { .compatible = "sirf,prima2-dmac", },
759 { .compatible = "sirf,marco-dmac", },
763 static struct platform_driver sirfsoc_dma_driver = {
764 .probe = sirfsoc_dma_probe,
765 .remove = sirfsoc_dma_remove,
768 .owner = THIS_MODULE,
769 .of_match_table = sirfsoc_dma_match,
773 static __init int sirfsoc_dma_init(void)
775 return platform_driver_register(&sirfsoc_dma_driver);
778 static void __exit sirfsoc_dma_exit(void)
780 platform_driver_unregister(&sirfsoc_dma_driver);
783 subsys_initcall(sirfsoc_dma_init);
784 module_exit(sirfsoc_dma_exit);
786 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
787 "Barry Song <baohua.song@csr.com>");
788 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
789 MODULE_LICENSE("GPL v2");