Merge branch 'sa1100-for-next'; commit 'riscpc^{/ARM: riscpc: enable chained scatterl...
[platform/kernel/linux-rpi.git] / arch / arm / mach-rpc / dma.c
1 /*
2  *  linux/arch/arm/mach-rpc/dma.c
3  *
4  *  Copyright (C) 1998 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  DMA functions specific to RiscPC architecture
11  */
12 #include <linux/mman.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/io.h>
17
18 #include <asm/page.h>
19 #include <asm/dma.h>
20 #include <asm/fiq.h>
21 #include <asm/irq.h>
22 #include <mach/hardware.h>
23 #include <linux/uaccess.h>
24
25 #include <asm/mach/dma.h>
26 #include <asm/hardware/iomd.h>
27
28 struct iomd_dma {
29         struct dma_struct       dma;
30         void __iomem            *base;          /* Controller base address */
31         int                     irq;            /* Controller IRQ */
32         unsigned int            state;
33         dma_addr_t              cur_addr;
34         unsigned int            cur_len;
35         dma_addr_t              dma_addr;
36         unsigned int            dma_len;
37 };
38
39 #if 0
40 typedef enum {
41         dma_size_8      = 1,
42         dma_size_16     = 2,
43         dma_size_32     = 4,
44         dma_size_128    = 16
45 } dma_size_t;
46 #endif
47
48 #define TRANSFER_SIZE   2
49
50 #define CURA    (0)
51 #define ENDA    (IOMD_IO0ENDA - IOMD_IO0CURA)
52 #define CURB    (IOMD_IO0CURB - IOMD_IO0CURA)
53 #define ENDB    (IOMD_IO0ENDB - IOMD_IO0CURA)
54 #define CR      (IOMD_IO0CR - IOMD_IO0CURA)
55 #define ST      (IOMD_IO0ST - IOMD_IO0CURA)
56
57 static void iomd_get_next_sg(struct iomd_dma *idma)
58 {
59         unsigned long end, offset, flags = 0;
60
61         if (idma->dma.sg) {
62                 idma->cur_addr = idma->dma_addr;
63                 offset = idma->cur_addr & ~PAGE_MASK;
64
65                 end = offset + idma->dma_len;
66
67                 if (end > PAGE_SIZE)
68                         end = PAGE_SIZE;
69
70                 if (offset + TRANSFER_SIZE >= end)
71                         flags |= DMA_END_L;
72
73                 idma->cur_len = end - TRANSFER_SIZE;
74
75                 idma->dma_len -= end - offset;
76                 idma->dma_addr += end - offset;
77
78                 if (idma->dma_len == 0) {
79                         if (idma->dma.sgcount > 1) {
80                                 idma->dma.sg = sg_next(idma->dma.sg);
81                                 idma->dma_addr = idma->dma.sg->dma_address;
82                                 idma->dma_len = idma->dma.sg->length;
83                                 idma->dma.sgcount--;
84                         } else {
85                                 idma->dma.sg = NULL;
86                                 flags |= DMA_END_S;
87                         }
88                 }
89         } else {
90                 flags = DMA_END_S | DMA_END_L;
91                 idma->cur_addr = 0;
92                 idma->cur_len = 0;
93         }
94
95         idma->cur_len |= flags;
96 }
97
98 static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
99 {
100         struct iomd_dma *idma = dev_id;
101         void __iomem *base = idma->base;
102         unsigned int state = idma->state;
103         unsigned int status, cur, end;
104
105         do {
106                 status = readb(base + ST);
107                 if (!(status & DMA_ST_INT))
108                         goto out;
109
110                 if ((state ^ status) & DMA_ST_AB)
111                         iomd_get_next_sg(idma);
112
113                 // This efficiently implements state = OFL != AB ? AB : 0
114                 state = ((status >> 2) ^ status) & DMA_ST_AB;
115                 if (state) {
116                         cur = CURA;
117                         end = ENDA;
118                 } else {
119                         cur = CURB;
120                         end = ENDB;
121                 }
122                 writel(idma->cur_addr, base + cur);
123                 writel(idma->cur_len, base + end);
124
125                 if (status & DMA_ST_OFL &&
126                     idma->cur_len == (DMA_END_S|DMA_END_L))
127                         break;
128         } while (1);
129
130         state = ~DMA_ST_AB;
131         disable_irq_nosync(irq);
132 out:
133         idma->state = state;
134         return IRQ_HANDLED;
135 }
136
137 static int iomd_request_dma(unsigned int chan, dma_t *dma)
138 {
139         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
140
141         return request_irq(idma->irq, iomd_dma_handle,
142                            0, idma->dma.device_id, idma);
143 }
144
145 static void iomd_free_dma(unsigned int chan, dma_t *dma)
146 {
147         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
148
149         free_irq(idma->irq, idma);
150 }
151
152 static struct device isa_dma_dev = {
153         .init_name              = "fallback device",
154         .coherent_dma_mask      = ~(dma_addr_t)0,
155         .dma_mask               = &isa_dma_dev.coherent_dma_mask,
156 };
157
158 static void iomd_enable_dma(unsigned int chan, dma_t *dma)
159 {
160         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
161         void __iomem *base = idma->base;
162         unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
163
164         if (idma->dma.invalid) {
165                 idma->dma.invalid = 0;
166
167                 /*
168                  * Cope with ISA-style drivers which expect cache
169                  * coherence.
170                  */
171                 if (!idma->dma.sg) {
172                         idma->dma.sg = &idma->dma.buf;
173                         idma->dma.sgcount = 1;
174                         idma->dma.buf.length = idma->dma.count;
175                         idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev,
176                                 idma->dma.addr, idma->dma.count,
177                                 idma->dma.dma_mode == DMA_MODE_READ ?
178                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
179                 }
180
181                 idma->dma_addr = idma->dma.sg->dma_address;
182                 idma->dma_len = idma->dma.sg->length;
183
184                 writeb(DMA_CR_C, base + CR);
185                 idma->state = DMA_ST_AB;
186         }
187
188         if (idma->dma.dma_mode == DMA_MODE_READ)
189                 ctrl |= DMA_CR_D;
190
191         writeb(ctrl, base + CR);
192         enable_irq(idma->irq);
193 }
194
195 static void iomd_disable_dma(unsigned int chan, dma_t *dma)
196 {
197         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
198         void __iomem *base = idma->base;
199         unsigned long flags;
200
201         local_irq_save(flags);
202         if (idma->state != ~DMA_ST_AB)
203                 disable_irq(idma->irq);
204         writeb(0, base + CR);
205         local_irq_restore(flags);
206 }
207
208 static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
209 {
210         int tcr, speed;
211
212         if (cycle < 188)
213                 speed = 3;
214         else if (cycle <= 250)
215                 speed = 2;
216         else if (cycle < 438)
217                 speed = 1;
218         else
219                 speed = 0;
220
221         tcr = iomd_readb(IOMD_DMATCR);
222         speed &= 3;
223
224         switch (chan) {
225         case DMA_0:
226                 tcr = (tcr & ~0x03) | speed;
227                 break;
228
229         case DMA_1:
230                 tcr = (tcr & ~0x0c) | (speed << 2);
231                 break;
232
233         case DMA_2:
234                 tcr = (tcr & ~0x30) | (speed << 4);
235                 break;
236
237         case DMA_3:
238                 tcr = (tcr & ~0xc0) | (speed << 6);
239                 break;
240
241         default:
242                 break;
243         }
244
245         iomd_writeb(tcr, IOMD_DMATCR);
246
247         return speed;
248 }
249
250 static struct dma_ops iomd_dma_ops = {
251         .type           = "IOMD",
252         .request        = iomd_request_dma,
253         .free           = iomd_free_dma,
254         .enable         = iomd_enable_dma,
255         .disable        = iomd_disable_dma,
256         .setspeed       = iomd_set_dma_speed,
257 };
258
259 static struct fiq_handler fh = {
260         .name   = "floppydma"
261 };
262
263 struct floppy_dma {
264         struct dma_struct       dma;
265         unsigned int            fiq;
266 };
267
268 static void floppy_enable_dma(unsigned int chan, dma_t *dma)
269 {
270         struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
271         void *fiqhandler_start;
272         unsigned int fiqhandler_length;
273         struct pt_regs regs;
274
275         if (fdma->dma.sg)
276                 BUG();
277
278         if (fdma->dma.dma_mode == DMA_MODE_READ) {
279                 extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
280                 fiqhandler_start = &floppy_fiqin_start;
281                 fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
282         } else {
283                 extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
284                 fiqhandler_start = &floppy_fiqout_start;
285                 fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
286         }
287
288         regs.ARM_r9  = fdma->dma.count;
289         regs.ARM_r10 = (unsigned long)fdma->dma.addr;
290         regs.ARM_fp  = (unsigned long)FLOPPYDMA_BASE;
291
292         if (claim_fiq(&fh)) {
293                 printk("floppydma: couldn't claim FIQ.\n");
294                 return;
295         }
296
297         set_fiq_handler(fiqhandler_start, fiqhandler_length);
298         set_fiq_regs(&regs);
299         enable_fiq(fdma->fiq);
300 }
301
302 static void floppy_disable_dma(unsigned int chan, dma_t *dma)
303 {
304         struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
305         disable_fiq(fdma->fiq);
306         release_fiq(&fh);
307 }
308
309 static int floppy_get_residue(unsigned int chan, dma_t *dma)
310 {
311         struct pt_regs regs;
312         get_fiq_regs(&regs);
313         return regs.ARM_r9;
314 }
315
316 static struct dma_ops floppy_dma_ops = {
317         .type           = "FIQDMA",
318         .enable         = floppy_enable_dma,
319         .disable        = floppy_disable_dma,
320         .residue        = floppy_get_residue,
321 };
322
323 /*
324  * This is virtual DMA - we don't need anything here.
325  */
326 static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
327 {
328 }
329
330 static struct dma_ops sound_dma_ops = {
331         .type           = "VIRTUAL",
332         .enable         = sound_enable_disable_dma,
333         .disable        = sound_enable_disable_dma,
334 };
335
336 static struct iomd_dma iomd_dma[6];
337
338 static struct floppy_dma floppy_dma = {
339         .dma            = {
340                 .d_ops  = &floppy_dma_ops,
341         },
342         .fiq            = FIQ_FLOPPYDATA,
343 };
344
345 static dma_t sound_dma = {
346         .d_ops          = &sound_dma_ops,
347 };
348
349 static int __init rpc_dma_init(void)
350 {
351         unsigned int i;
352         int ret;
353
354         iomd_writeb(0, IOMD_IO0CR);
355         iomd_writeb(0, IOMD_IO1CR);
356         iomd_writeb(0, IOMD_IO2CR);
357         iomd_writeb(0, IOMD_IO3CR);
358
359         iomd_writeb(0xa0, IOMD_DMATCR);
360
361         /*
362          * Setup DMA channels 2,3 to be for podules
363          * and channels 0,1 for internal devices
364          */
365         iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
366
367         iomd_dma[DMA_0].base    = IOMD_BASE + IOMD_IO0CURA;
368         iomd_dma[DMA_0].irq     = IRQ_DMA0;
369         iomd_dma[DMA_1].base    = IOMD_BASE + IOMD_IO1CURA;
370         iomd_dma[DMA_1].irq     = IRQ_DMA1;
371         iomd_dma[DMA_2].base    = IOMD_BASE + IOMD_IO2CURA;
372         iomd_dma[DMA_2].irq     = IRQ_DMA2;
373         iomd_dma[DMA_3].base    = IOMD_BASE + IOMD_IO3CURA;
374         iomd_dma[DMA_3].irq     = IRQ_DMA3;
375         iomd_dma[DMA_S0].base   = IOMD_BASE + IOMD_SD0CURA;
376         iomd_dma[DMA_S0].irq    = IRQ_DMAS0;
377         iomd_dma[DMA_S1].base   = IOMD_BASE + IOMD_SD1CURA;
378         iomd_dma[DMA_S1].irq    = IRQ_DMAS1;
379
380         for (i = DMA_0; i <= DMA_S1; i++) {
381                 iomd_dma[i].dma.d_ops = &iomd_dma_ops;
382
383                 ret = isa_dma_add(i, &iomd_dma[i].dma);
384                 if (ret)
385                         printk("IOMDDMA%u: unable to register: %d\n", i, ret);
386         }
387
388         ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
389         if (ret)
390                 printk("IOMDFLOPPY: unable to register: %d\n", ret);
391         ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
392         if (ret)
393                 printk("IOMDSOUND: unable to register: %d\n", ret);
394         return 0;
395 }
396 core_initcall(rpc_dma_init);