2 * BCM2708 legacy DMA API
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/platform_data/dma-bcm2708.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
23 #include <linux/spinlock.h>
27 #define CACHE_LINE_MASK 31
28 #define DEFAULT_DMACHAN_BITMAP 0x10 /* channel 4 only */
30 /* valid only for channels 0 - 14, 15 has its own base address */
31 #define BCM2708_DMA_CHAN(n) ((n) << 8) /* base address */
32 #define BCM2708_DMA_CHANIO(dma_base, n) \
33 ((void __iomem *)((char *)(dma_base) + BCM2708_DMA_CHAN(n)))
36 void __iomem *dma_base;
37 u32 chan_available; /* bitmap of available channels */
38 u32 has_feature[BCM_DMA_FEATURE_COUNT]; /* bitmap of feature presence */
42 static struct device *dmaman_dev; /* we assume there's only one! */
43 static struct vc_dmaman *g_dmaman; /* DMA manager */
45 /* DMA Auxiliary Functions */
47 /* A DMA buffer on an arbitrary boundary may separate a cache line into a
48 section inside the DMA buffer and another section outside it.
49 Even if we flush DMA buffers from the cache there is always the chance that
50 during a DMA someone will access the part of a cache line that is outside
51 the DMA buffer - which will then bring in unwelcome data.
52 Without being able to dictate our own buffer pools we must insist that
53 DMA buffers consist of a whole number of cache lines.
55 extern int bcm_sg_suitable_for_dma(struct scatterlist *sg_ptr, int sg_len)
59 for (i = 0; i < sg_len; i++) {
60 if (sg_ptr[i].offset & CACHE_LINE_MASK ||
61 sg_ptr[i].length & CACHE_LINE_MASK)
67 EXPORT_SYMBOL_GPL(bcm_sg_suitable_for_dma);
69 extern void bcm_dma_start(void __iomem *dma_chan_base,
70 dma_addr_t control_block)
72 dsb(sy); /* ARM data synchronization (push) operation */
74 writel(control_block, dma_chan_base + BCM2708_DMA_ADDR);
75 writel(BCM2708_DMA_ACTIVE, dma_chan_base + BCM2708_DMA_CS);
77 EXPORT_SYMBOL_GPL(bcm_dma_start);
79 extern void bcm_dma_wait_idle(void __iomem *dma_chan_base)
83 /* ugly busy wait only option for now */
84 while (readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE)
87 EXPORT_SYMBOL_GPL(bcm_dma_wait_idle);
89 extern bool bcm_dma_is_busy(void __iomem *dma_chan_base)
93 return readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE;
95 EXPORT_SYMBOL_GPL(bcm_dma_is_busy);
97 /* Complete an ongoing DMA (assuming its results are to be ignored)
98 Does nothing if there is no DMA in progress.
99 This routine waits for the current AXI transfer to complete before
100 terminating the current DMA. If the current transfer is hung on a DREQ used
101 by an uncooperative peripheral the AXI transfer may never complete. In this
102 case the routine times out and return a non-zero error code.
103 Use of this routine doesn't guarantee that the ongoing or aborted DMA
104 does not produce an interrupt.
106 extern int bcm_dma_abort(void __iomem *dma_chan_base)
108 unsigned long int cs;
111 cs = readl(dma_chan_base + BCM2708_DMA_CS);
113 if (BCM2708_DMA_ACTIVE & cs) {
114 long int timeout = 10000;
116 /* write 0 to the active bit - pause the DMA */
117 writel(0, dma_chan_base + BCM2708_DMA_CS);
119 /* wait for any current AXI transfer to complete */
120 while (0 != (cs & BCM2708_DMA_ISPAUSED) && --timeout >= 0)
121 cs = readl(dma_chan_base + BCM2708_DMA_CS);
123 if (0 != (cs & BCM2708_DMA_ISPAUSED)) {
124 /* we'll un-pause when we set of our next DMA */
127 } else if (BCM2708_DMA_ACTIVE & cs) {
128 /* terminate the control block chain */
129 writel(0, dma_chan_base + BCM2708_DMA_NEXTCB);
131 /* abort the whole DMA */
132 writel(BCM2708_DMA_ABORT | BCM2708_DMA_ACTIVE,
133 dma_chan_base + BCM2708_DMA_CS);
139 EXPORT_SYMBOL_GPL(bcm_dma_abort);
141 /* DMA Manager Device Methods */
143 static void vc_dmaman_init(struct vc_dmaman *dmaman, void __iomem *dma_base,
146 dmaman->dma_base = dma_base;
147 dmaman->chan_available = chans_available;
148 dmaman->has_feature[BCM_DMA_FEATURE_FAST_ORD] = 0x0c; /* 2 & 3 */
149 dmaman->has_feature[BCM_DMA_FEATURE_BULK_ORD] = 0x01; /* 0 */
150 dmaman->has_feature[BCM_DMA_FEATURE_NORMAL_ORD] = 0xfe; /* 1 to 7 */
151 dmaman->has_feature[BCM_DMA_FEATURE_LITE_ORD] = 0x7f00; /* 8 to 14 */
154 static int vc_dmaman_chan_alloc(struct vc_dmaman *dmaman,
155 unsigned required_feature_set)
161 chans = dmaman->chan_available;
162 for (feature = 0; feature < BCM_DMA_FEATURE_COUNT; feature++)
163 /* select the subset of available channels with the desired
165 if (required_feature_set & (1 << feature))
166 chans &= dmaman->has_feature[feature];
171 /* return the ordinal of the first channel in the bitmap */
172 while (chans != 0 && (chans & 1) == 0) {
176 /* claim the channel */
177 dmaman->chan_available &= ~(1 << chan);
182 static int vc_dmaman_chan_free(struct vc_dmaman *dmaman, int chan)
187 if ((1 << chan) & dmaman->chan_available)
190 dmaman->chan_available |= (1 << chan);
195 /* DMA Manager Monitor */
197 extern int bcm_dma_chan_alloc(unsigned required_feature_set,
198 void __iomem **out_dma_base, int *out_dma_irq)
200 struct vc_dmaman *dmaman = g_dmaman;
201 struct platform_device *pdev = to_platform_device(dmaman_dev);
208 mutex_lock(&dmaman->lock);
209 chan = vc_dmaman_chan_alloc(dmaman, required_feature_set);
213 irq = platform_get_irq(pdev, (unsigned int)chan);
215 dev_err(dmaman_dev, "failed to get irq for DMA channel %d\n",
217 vc_dmaman_chan_free(dmaman, chan);
222 *out_dma_base = BCM2708_DMA_CHANIO(dmaman->dma_base, chan);
225 "Legacy API allocated channel=%d, base=%p, irq=%i\n",
226 chan, *out_dma_base, *out_dma_irq);
229 mutex_unlock(&dmaman->lock);
233 EXPORT_SYMBOL_GPL(bcm_dma_chan_alloc);
235 extern int bcm_dma_chan_free(int channel)
237 struct vc_dmaman *dmaman = g_dmaman;
243 mutex_lock(&dmaman->lock);
244 rc = vc_dmaman_chan_free(dmaman, channel);
245 mutex_unlock(&dmaman->lock);
249 EXPORT_SYMBOL_GPL(bcm_dma_chan_free);
251 int bcm_dmaman_probe(struct platform_device *pdev, void __iomem *base,
254 struct device *dev = &pdev->dev;
255 struct vc_dmaman *dmaman;
257 dmaman = devm_kzalloc(dev, sizeof(*dmaman), GFP_KERNEL);
261 mutex_init(&dmaman->lock);
262 vc_dmaman_init(dmaman, base, chans_available);
266 dev_info(dev, "DMA legacy API manager, dmachans=0x%x\n",
271 EXPORT_SYMBOL(bcm_dmaman_probe);
273 int bcm_dmaman_remove(struct platform_device *pdev)
279 EXPORT_SYMBOL(bcm_dmaman_remove);
281 MODULE_LICENSE("GPL");