drm/vc4: add extcon hdmi connection uevent
[platform/kernel/linux-rpi.git] / drivers / misc / bcm2835_smi.c
1 /**
2  * Broadcom Secondary Memory Interface driver
3  *
4  * Written by Luke Wren <luke@raspberrypi.org>
5  * Copyright (c) 2015, Raspberry Pi (Trading) Ltd.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The names of the above-listed copyright holders may not be used
17  *    to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * ALTERNATIVELY, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2, as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/clk.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/of.h>
41 #include <linux/platform_device.h>
42 #include <linux/of_address.h>
43 #include <linux/of_platform.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <linux/pagemap.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/dmaengine.h>
49 #include <linux/semaphore.h>
50 #include <linux/spinlock.h>
51 #include <linux/io.h>
52
53 #define BCM2835_SMI_IMPLEMENTATION
54 #include <linux/broadcom/bcm2835_smi.h>
55
56 #define DRIVER_NAME "smi-bcm2835"
57
58 #define N_PAGES_FROM_BYTES(n) ((n + PAGE_SIZE-1) / PAGE_SIZE)
59
60 #define DMA_WRITE_TO_MEM true
61 #define DMA_READ_FROM_MEM false
62
63 struct bcm2835_smi_instance {
64         struct device *dev;
65         struct smi_settings settings;
66         __iomem void *smi_regs_ptr;
67         dma_addr_t smi_regs_busaddr;
68
69         struct dma_chan *dma_chan;
70         struct dma_slave_config dma_config;
71
72         struct bcm2835_smi_bounce_info bounce;
73
74         struct scatterlist buffer_sgl;
75
76         struct clk *clk;
77
78         /* Sometimes we are called into in an atomic context (e.g. by
79            JFFS2 + MTD) so we can't use a mutex */
80         spinlock_t transaction_lock;
81 };
82
83 /****************************************************************************
84 *
85 *   SMI peripheral setup
86 *
87 ***************************************************************************/
88
89 static inline void write_smi_reg(struct bcm2835_smi_instance *inst,
90         u32 val, unsigned reg)
91 {
92         writel(val, inst->smi_regs_ptr + reg);
93 }
94
95 static inline u32 read_smi_reg(struct bcm2835_smi_instance *inst, unsigned reg)
96 {
97         return readl(inst->smi_regs_ptr + reg);
98 }
99
100 /* Token-paste macro for e.g SMIDSR_RSTROBE ->  value of SMIDSR_RSTROBE_MASK */
101 #define _CONCAT(x, y) x##y
102 #define CONCAT(x, y) _CONCAT(x, y)
103
104 #define SET_BIT_FIELD(dest, field, bits) ((dest) = \
105         ((dest) & ~CONCAT(field, _MASK)) | (((bits) << CONCAT(field, _OFFS))& \
106          CONCAT(field, _MASK)))
107 #define GET_BIT_FIELD(src, field) (((src) & \
108         CONCAT(field, _MASK)) >> CONCAT(field, _OFFS))
109
110 static void smi_dump_context_labelled(struct bcm2835_smi_instance *inst,
111         const char *label)
112 {
113         dev_err(inst->dev, "SMI context dump: %s", label);
114         dev_err(inst->dev, "SMICS:  0x%08x", read_smi_reg(inst, SMICS));
115         dev_err(inst->dev, "SMIL:   0x%08x", read_smi_reg(inst, SMIL));
116         dev_err(inst->dev, "SMIDSR: 0x%08x", read_smi_reg(inst, SMIDSR0));
117         dev_err(inst->dev, "SMIDSW: 0x%08x", read_smi_reg(inst, SMIDSW0));
118         dev_err(inst->dev, "SMIDC:  0x%08x", read_smi_reg(inst, SMIDC));
119         dev_err(inst->dev, "SMIFD:  0x%08x", read_smi_reg(inst, SMIFD));
120         dev_err(inst->dev, " ");
121 }
122
123 static inline void smi_dump_context(struct bcm2835_smi_instance *inst)
124 {
125         smi_dump_context_labelled(inst, "");
126 }
127
128 static void smi_get_default_settings(struct bcm2835_smi_instance *inst)
129 {
130         struct smi_settings *settings = &inst->settings;
131
132         settings->data_width = SMI_WIDTH_16BIT;
133         settings->pack_data = true;
134
135         settings->read_setup_time = 1;
136         settings->read_hold_time = 1;
137         settings->read_pace_time = 1;
138         settings->read_strobe_time = 3;
139
140         settings->write_setup_time = settings->read_setup_time;
141         settings->write_hold_time = settings->read_hold_time;
142         settings->write_pace_time = settings->read_pace_time;
143         settings->write_strobe_time = settings->read_strobe_time;
144
145         settings->dma_enable = true;
146         settings->dma_passthrough_enable = false;
147         settings->dma_read_thresh = 0x01;
148         settings->dma_write_thresh = 0x3f;
149         settings->dma_panic_read_thresh = 0x20;
150         settings->dma_panic_write_thresh = 0x20;
151 }
152
153 void bcm2835_smi_set_regs_from_settings(struct bcm2835_smi_instance *inst)
154 {
155         struct smi_settings *settings = &inst->settings;
156         int smidsr_temp = 0, smidsw_temp = 0, smics_temp,
157             smidcs_temp, smidc_temp = 0;
158
159         spin_lock(&inst->transaction_lock);
160
161         /* temporarily disable the peripheral: */
162         smics_temp = read_smi_reg(inst, SMICS);
163         write_smi_reg(inst, 0, SMICS);
164         smidcs_temp = read_smi_reg(inst, SMIDCS);
165         write_smi_reg(inst, 0, SMIDCS);
166
167         if (settings->pack_data)
168                 smics_temp |= SMICS_PXLDAT;
169         else
170                 smics_temp &= ~SMICS_PXLDAT;
171
172         SET_BIT_FIELD(smidsr_temp, SMIDSR_RWIDTH, settings->data_width);
173         SET_BIT_FIELD(smidsr_temp, SMIDSR_RSETUP, settings->read_setup_time);
174         SET_BIT_FIELD(smidsr_temp, SMIDSR_RHOLD, settings->read_hold_time);
175         SET_BIT_FIELD(smidsr_temp, SMIDSR_RPACE, settings->read_pace_time);
176         SET_BIT_FIELD(smidsr_temp, SMIDSR_RSTROBE, settings->read_strobe_time);
177         write_smi_reg(inst, smidsr_temp, SMIDSR0);
178
179         SET_BIT_FIELD(smidsw_temp, SMIDSW_WWIDTH, settings->data_width);
180         if (settings->data_width == SMI_WIDTH_8BIT)
181                 smidsw_temp |= SMIDSW_WSWAP;
182         else
183                 smidsw_temp &= ~SMIDSW_WSWAP;
184         SET_BIT_FIELD(smidsw_temp, SMIDSW_WSETUP, settings->write_setup_time);
185         SET_BIT_FIELD(smidsw_temp, SMIDSW_WHOLD, settings->write_hold_time);
186         SET_BIT_FIELD(smidsw_temp, SMIDSW_WPACE, settings->write_pace_time);
187         SET_BIT_FIELD(smidsw_temp, SMIDSW_WSTROBE,
188                         settings->write_strobe_time);
189         write_smi_reg(inst, smidsw_temp, SMIDSW0);
190
191         SET_BIT_FIELD(smidc_temp, SMIDC_REQR, settings->dma_read_thresh);
192         SET_BIT_FIELD(smidc_temp, SMIDC_REQW, settings->dma_write_thresh);
193         SET_BIT_FIELD(smidc_temp, SMIDC_PANICR,
194                       settings->dma_panic_read_thresh);
195         SET_BIT_FIELD(smidc_temp, SMIDC_PANICW,
196                       settings->dma_panic_write_thresh);
197         if (settings->dma_passthrough_enable) {
198                 smidc_temp |= SMIDC_DMAP;
199                 smidsr_temp |= SMIDSR_RDREQ;
200                 write_smi_reg(inst, smidsr_temp, SMIDSR0);
201                 smidsw_temp |= SMIDSW_WDREQ;
202                 write_smi_reg(inst, smidsw_temp, SMIDSW0);
203         } else
204                 smidc_temp &= ~SMIDC_DMAP;
205         if (settings->dma_enable)
206                 smidc_temp |= SMIDC_DMAEN;
207         else
208                 smidc_temp &= ~SMIDC_DMAEN;
209
210         write_smi_reg(inst, smidc_temp, SMIDC);
211
212         /* re-enable (if was previously enabled) */
213         write_smi_reg(inst, smics_temp, SMICS);
214         write_smi_reg(inst, smidcs_temp, SMIDCS);
215
216         spin_unlock(&inst->transaction_lock);
217 }
218 EXPORT_SYMBOL(bcm2835_smi_set_regs_from_settings);
219
220 struct smi_settings *bcm2835_smi_get_settings_from_regs
221         (struct bcm2835_smi_instance *inst)
222 {
223         struct smi_settings *settings = &inst->settings;
224         int smidsr, smidsw, smidc;
225
226         spin_lock(&inst->transaction_lock);
227
228         smidsr = read_smi_reg(inst, SMIDSR0);
229         smidsw = read_smi_reg(inst, SMIDSW0);
230         smidc = read_smi_reg(inst, SMIDC);
231
232         settings->pack_data = (read_smi_reg(inst, SMICS) & SMICS_PXLDAT) ?
233             true : false;
234
235         settings->data_width = GET_BIT_FIELD(smidsr, SMIDSR_RWIDTH);
236         settings->read_setup_time = GET_BIT_FIELD(smidsr, SMIDSR_RSETUP);
237         settings->read_hold_time = GET_BIT_FIELD(smidsr, SMIDSR_RHOLD);
238         settings->read_pace_time = GET_BIT_FIELD(smidsr, SMIDSR_RPACE);
239         settings->read_strobe_time = GET_BIT_FIELD(smidsr, SMIDSR_RSTROBE);
240
241         settings->write_setup_time = GET_BIT_FIELD(smidsw, SMIDSW_WSETUP);
242         settings->write_hold_time = GET_BIT_FIELD(smidsw, SMIDSW_WHOLD);
243         settings->write_pace_time = GET_BIT_FIELD(smidsw, SMIDSW_WPACE);
244         settings->write_strobe_time = GET_BIT_FIELD(smidsw, SMIDSW_WSTROBE);
245
246         settings->dma_read_thresh = GET_BIT_FIELD(smidc, SMIDC_REQR);
247         settings->dma_write_thresh = GET_BIT_FIELD(smidc, SMIDC_REQW);
248         settings->dma_panic_read_thresh = GET_BIT_FIELD(smidc, SMIDC_PANICR);
249         settings->dma_panic_write_thresh = GET_BIT_FIELD(smidc, SMIDC_PANICW);
250         settings->dma_passthrough_enable = (smidc & SMIDC_DMAP) ? true : false;
251         settings->dma_enable = (smidc & SMIDC_DMAEN) ? true : false;
252
253         spin_unlock(&inst->transaction_lock);
254
255         return settings;
256 }
257 EXPORT_SYMBOL(bcm2835_smi_get_settings_from_regs);
258
259 static inline void smi_set_address(struct bcm2835_smi_instance *inst,
260         unsigned int address)
261 {
262         int smia_temp = 0, smida_temp = 0;
263
264         SET_BIT_FIELD(smia_temp, SMIA_ADDR, address);
265         SET_BIT_FIELD(smida_temp, SMIDA_ADDR, address);
266
267         /* Write to both address registers - user doesn't care whether we're
268            doing programmed or direct transfers. */
269         write_smi_reg(inst, smia_temp, SMIA);
270         write_smi_reg(inst, smida_temp, SMIDA);
271 }
272
273 static void smi_setup_regs(struct bcm2835_smi_instance *inst)
274 {
275
276         dev_dbg(inst->dev, "Initialising SMI registers...");
277         /* Disable the peripheral if already enabled */
278         write_smi_reg(inst, 0, SMICS);
279         write_smi_reg(inst, 0, SMIDCS);
280
281         smi_get_default_settings(inst);
282         bcm2835_smi_set_regs_from_settings(inst);
283         smi_set_address(inst, 0);
284
285         write_smi_reg(inst, read_smi_reg(inst, SMICS) | SMICS_ENABLE, SMICS);
286         write_smi_reg(inst, read_smi_reg(inst, SMIDCS) | SMIDCS_ENABLE,
287                 SMIDCS);
288 }
289
290 /****************************************************************************
291 *
292 *   Low-level SMI access functions
293 *   Other modules should use the exported higher-level functions e.g.
294 *   bcm2835_smi_write_buf() unless they have a good reason to use these
295 *
296 ***************************************************************************/
297
298 static inline uint32_t smi_read_single_word(struct bcm2835_smi_instance *inst)
299 {
300         int timeout = 0;
301
302         write_smi_reg(inst, SMIDCS_ENABLE, SMIDCS);
303         write_smi_reg(inst, SMIDCS_ENABLE | SMIDCS_START, SMIDCS);
304         /* Make sure things happen in the right order...*/
305         mb();
306         while (!(read_smi_reg(inst, SMIDCS) & SMIDCS_DONE) &&
307                 ++timeout < 10000)
308                 ;
309         if (timeout < 10000)
310                 return read_smi_reg(inst, SMIDD);
311
312         dev_err(inst->dev,
313                 "SMI direct read timed out (is the clock set up correctly?)");
314         return 0;
315 }
316
317 static inline void smi_write_single_word(struct bcm2835_smi_instance *inst,
318         uint32_t data)
319 {
320         int timeout = 0;
321
322         write_smi_reg(inst, SMIDCS_ENABLE | SMIDCS_WRITE, SMIDCS);
323         write_smi_reg(inst, data, SMIDD);
324         write_smi_reg(inst, SMIDCS_ENABLE | SMIDCS_WRITE | SMIDCS_START,
325                 SMIDCS);
326
327         while (!(read_smi_reg(inst, SMIDCS) & SMIDCS_DONE) &&
328                 ++timeout < 10000)
329                 ;
330         if (timeout >= 10000)
331                 dev_err(inst->dev,
332                 "SMI direct write timed out (is the clock set up correctly?)");
333 }
334
335 /* Initiates a programmed read into the read FIFO. It is up to the caller to
336  * read data from the FIFO -  either via paced DMA transfer,
337  * or polling SMICS_RXD to check whether data is available.
338  * SMICS_ACTIVE will go low upon completion. */
339 static void smi_init_programmed_read(struct bcm2835_smi_instance *inst,
340         int num_transfers)
341 {
342         int smics_temp;
343
344         /* Disable the peripheral: */
345         smics_temp = read_smi_reg(inst, SMICS) & ~(SMICS_ENABLE | SMICS_WRITE);
346         write_smi_reg(inst, smics_temp, SMICS);
347         while (read_smi_reg(inst, SMICS) & SMICS_ENABLE)
348                 ;
349
350         /* Program the transfer count: */
351         write_smi_reg(inst, num_transfers, SMIL);
352
353         /* re-enable and start: */
354         smics_temp |= SMICS_ENABLE;
355         write_smi_reg(inst, smics_temp, SMICS);
356         smics_temp |= SMICS_CLEAR;
357         /* Just to be certain: */
358         mb();
359         while (read_smi_reg(inst, SMICS) & SMICS_ACTIVE)
360                 ;
361         write_smi_reg(inst, smics_temp, SMICS);
362         smics_temp |= SMICS_START;
363         write_smi_reg(inst, smics_temp, SMICS);
364 }
365
366 /* Initiates a programmed write sequence, using data from the write FIFO.
367  * It is up to the caller to initiate a DMA transfer before calling,
368  * or use another method to keep the write FIFO topped up.
369  * SMICS_ACTIVE will go low upon completion.
370  */
371 static void smi_init_programmed_write(struct bcm2835_smi_instance *inst,
372         int num_transfers)
373 {
374         int smics_temp;
375
376         /* Disable the peripheral: */
377         smics_temp = read_smi_reg(inst, SMICS) & ~SMICS_ENABLE;
378         write_smi_reg(inst, smics_temp, SMICS);
379         while (read_smi_reg(inst, SMICS) & SMICS_ENABLE)
380                 ;
381
382         /* Program the transfer count: */
383         write_smi_reg(inst, num_transfers, SMIL);
384
385         /* setup, re-enable and start: */
386         smics_temp |= SMICS_WRITE | SMICS_ENABLE;
387         write_smi_reg(inst, smics_temp, SMICS);
388         smics_temp |= SMICS_START;
389         write_smi_reg(inst, smics_temp, SMICS);
390 }
391
392 /* Initiate a read and then poll FIFO for data, reading out as it appears. */
393 static void smi_read_fifo(struct bcm2835_smi_instance *inst,
394         uint32_t *dest, int n_bytes)
395 {
396         if (read_smi_reg(inst, SMICS) & SMICS_RXD) {
397                 smi_dump_context_labelled(inst,
398                         "WARNING: read FIFO not empty at start of read call.");
399                 while (read_smi_reg(inst, SMICS))
400                         ;
401         }
402
403         /* Dispatch the read: */
404         if (inst->settings.data_width == SMI_WIDTH_8BIT)
405                 smi_init_programmed_read(inst, n_bytes);
406         else if (inst->settings.data_width == SMI_WIDTH_16BIT)
407                 smi_init_programmed_read(inst, n_bytes / 2);
408         else {
409                 dev_err(inst->dev, "Unsupported data width for read.");
410                 return;
411         }
412
413         /* Poll FIFO to keep it empty */
414         while (!(read_smi_reg(inst, SMICS) & SMICS_DONE))
415                 if (read_smi_reg(inst, SMICS) & SMICS_RXD)
416                         *dest++ = read_smi_reg(inst, SMID);
417
418         /* Ensure that the FIFO is emptied */
419         if (read_smi_reg(inst, SMICS) & SMICS_RXD) {
420                 int fifo_count;
421
422                 fifo_count = GET_BIT_FIELD(read_smi_reg(inst, SMIFD),
423                         SMIFD_FCNT);
424                 while (fifo_count--)
425                         *dest++ = read_smi_reg(inst, SMID);
426         }
427
428         if (!(read_smi_reg(inst, SMICS) & SMICS_DONE))
429                 smi_dump_context_labelled(inst,
430                         "WARNING: transaction finished but done bit not set.");
431
432         if (read_smi_reg(inst, SMICS) & SMICS_RXD)
433                 smi_dump_context_labelled(inst,
434                         "WARNING: read FIFO not empty at end of read call.");
435
436 }
437
438 /* Initiate a write, and then keep the FIFO topped up. */
439 static void smi_write_fifo(struct bcm2835_smi_instance *inst,
440         uint32_t *src, int n_bytes)
441 {
442         int i, timeout = 0;
443
444         /* Empty FIFOs if not already so */
445         if (!(read_smi_reg(inst, SMICS) & SMICS_TXE)) {
446                 smi_dump_context_labelled(inst,
447                     "WARNING: write fifo not empty at start of write call.");
448                 write_smi_reg(inst, read_smi_reg(inst, SMICS) | SMICS_CLEAR,
449                         SMICS);
450         }
451
452         /* Initiate the transfer */
453         if (inst->settings.data_width == SMI_WIDTH_8BIT)
454                 smi_init_programmed_write(inst, n_bytes);
455         else if (inst->settings.data_width == SMI_WIDTH_16BIT)
456                 smi_init_programmed_write(inst, n_bytes / 2);
457         else {
458                 dev_err(inst->dev, "Unsupported data width for write.");
459                 return;
460         }
461         /* Fill the FIFO: */
462         for (i = 0; i < (n_bytes - 1) / 4 + 1; ++i) {
463                 while (!(read_smi_reg(inst, SMICS) & SMICS_TXD))
464                         ;
465                 write_smi_reg(inst, *src++, SMID);
466         }
467         /* Busy wait... */
468         while (!(read_smi_reg(inst, SMICS) & SMICS_DONE) && ++timeout <
469                 1000000)
470                 ;
471         if (timeout >= 1000000)
472                 smi_dump_context_labelled(inst,
473                         "Timed out on write operation!");
474         if (!(read_smi_reg(inst, SMICS) & SMICS_TXE))
475                 smi_dump_context_labelled(inst,
476                         "WARNING: FIFO not empty at end of write operation.");
477 }
478
479 /****************************************************************************
480 *
481 *   SMI DMA operations
482 *
483 ***************************************************************************/
484
485 /* Disable SMI and put it into the correct direction before doing DMA setup.
486    Stops spurious DREQs during setup. Peripheral is re-enabled by init_*() */
487 static void smi_disable(struct bcm2835_smi_instance *inst,
488         enum dma_transfer_direction direction)
489 {
490         int smics_temp = read_smi_reg(inst, SMICS) & ~SMICS_ENABLE;
491
492         if (direction == DMA_DEV_TO_MEM)
493                 smics_temp &= ~SMICS_WRITE;
494         else
495                 smics_temp |= SMICS_WRITE;
496         write_smi_reg(inst, smics_temp, SMICS);
497         while (read_smi_reg(inst, SMICS) & SMICS_ACTIVE)
498                 ;
499 }
500
501 static struct scatterlist *smi_scatterlist_from_buffer(
502         struct bcm2835_smi_instance *inst,
503         dma_addr_t buf,
504         size_t len,
505         struct scatterlist *sg)
506 {
507         sg_init_table(sg, 1);
508         sg_dma_address(sg) = buf;
509         sg_dma_len(sg) = len;
510         return sg;
511 }
512
513 static void smi_dma_callback_user_copy(void *param)
514 {
515         /* Notify the bottom half that a chunk is ready for user copy */
516         struct bcm2835_smi_instance *inst =
517                 (struct bcm2835_smi_instance *)param;
518
519         up(&inst->bounce.callback_sem);
520 }
521
522 /* Creates a descriptor, assigns the given callback, and submits the
523    descriptor to dmaengine. Does not block - can queue up multiple
524    descriptors and then wait for them all to complete.
525    sg_len is the number of control blocks, NOT the number of bytes.
526    dir can be DMA_MEM_TO_DEV or DMA_DEV_TO_MEM.
527    callback can be NULL - in this case it is not called. */
528 static inline struct dma_async_tx_descriptor *smi_dma_submit_sgl(
529         struct bcm2835_smi_instance *inst,
530         struct scatterlist *sgl,
531         size_t sg_len,
532         enum dma_transfer_direction dir,
533         dma_async_tx_callback callback)
534 {
535         struct dma_async_tx_descriptor *desc;
536
537         desc = dmaengine_prep_slave_sg(inst->dma_chan,
538                                        sgl,
539                                        sg_len,
540                                        dir,
541                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK |
542                                        DMA_PREP_FENCE);
543         if (!desc) {
544                 dev_err(inst->dev, "read_sgl: dma slave preparation failed!");
545                 write_smi_reg(inst, read_smi_reg(inst, SMICS) & ~SMICS_ACTIVE,
546                         SMICS);
547                 while (read_smi_reg(inst, SMICS) & SMICS_ACTIVE)
548                         cpu_relax();
549                 write_smi_reg(inst, read_smi_reg(inst, SMICS) | SMICS_ACTIVE,
550                         SMICS);
551                 return NULL;
552         }
553         desc->callback = callback;
554         desc->callback_param = inst;
555         if (dmaengine_submit(desc) < 0)
556                 return NULL;
557         return desc;
558 }
559
560 /* NB this function blocks until the transfer is complete */
561 static void
562 smi_dma_read_sgl(struct bcm2835_smi_instance *inst,
563         struct scatterlist *sgl, size_t sg_len, size_t n_bytes)
564 {
565         struct dma_async_tx_descriptor *desc;
566
567         /* Disable SMI and set to read before dispatching DMA - if SMI is in
568          * write mode and TX fifo is empty, it will generate a DREQ which may
569          * cause the read DMA to complete before the SMI read command is even
570          * dispatched! We want to dispatch DMA before SMI read so that reading
571          * is gapless, for logic analyser.
572          */
573
574         smi_disable(inst, DMA_DEV_TO_MEM);
575
576         desc = smi_dma_submit_sgl(inst, sgl, sg_len, DMA_DEV_TO_MEM, NULL);
577         dma_async_issue_pending(inst->dma_chan);
578
579         if (inst->settings.data_width == SMI_WIDTH_8BIT)
580                 smi_init_programmed_read(inst, n_bytes);
581         else
582                 smi_init_programmed_read(inst, n_bytes / 2);
583
584         if (dma_wait_for_async_tx(desc) == DMA_ERROR)
585                 smi_dump_context_labelled(inst, "DMA timeout!");
586 }
587
588 static void
589 smi_dma_write_sgl(struct bcm2835_smi_instance *inst,
590         struct scatterlist *sgl, size_t sg_len, size_t n_bytes)
591 {
592         struct dma_async_tx_descriptor *desc;
593
594         if (inst->settings.data_width == SMI_WIDTH_8BIT)
595                 smi_init_programmed_write(inst, n_bytes);
596         else
597                 smi_init_programmed_write(inst, n_bytes / 2);
598
599         desc = smi_dma_submit_sgl(inst, sgl, sg_len, DMA_MEM_TO_DEV, NULL);
600         dma_async_issue_pending(inst->dma_chan);
601
602         if (dma_wait_for_async_tx(desc) == DMA_ERROR)
603                 smi_dump_context_labelled(inst, "DMA timeout!");
604         else
605                 /* Wait for SMI to finish our writes */
606                 while (!(read_smi_reg(inst, SMICS) & SMICS_DONE))
607                         cpu_relax();
608 }
609
610 ssize_t bcm2835_smi_user_dma(
611         struct bcm2835_smi_instance *inst,
612         enum dma_transfer_direction dma_dir,
613         char __user *user_ptr, size_t count,
614         struct bcm2835_smi_bounce_info **bounce)
615 {
616         int chunk_no = 0, chunk_size, count_left = count;
617         struct scatterlist *sgl;
618         void (*init_trans_func)(struct bcm2835_smi_instance *, int);
619
620         spin_lock(&inst->transaction_lock);
621
622         if (dma_dir == DMA_DEV_TO_MEM)
623                 init_trans_func = smi_init_programmed_read;
624         else
625                 init_trans_func = smi_init_programmed_write;
626
627         smi_disable(inst, dma_dir);
628
629         sema_init(&inst->bounce.callback_sem, 0);
630         if (bounce)
631                 *bounce = &inst->bounce;
632         while (count_left) {
633                 chunk_size = count_left > DMA_BOUNCE_BUFFER_SIZE ?
634                         DMA_BOUNCE_BUFFER_SIZE : count_left;
635                 if (chunk_size == DMA_BOUNCE_BUFFER_SIZE) {
636                         sgl =
637                         &inst->bounce.sgl[chunk_no % DMA_BOUNCE_BUFFER_COUNT];
638                 } else {
639                         sgl = smi_scatterlist_from_buffer(
640                                 inst,
641                                 inst->bounce.phys[
642                                         chunk_no % DMA_BOUNCE_BUFFER_COUNT],
643                                 chunk_size,
644                                 &inst->buffer_sgl);
645                 }
646
647                 if (!smi_dma_submit_sgl(inst, sgl, 1, dma_dir,
648                         smi_dma_callback_user_copy
649                 )) {
650                         dev_err(inst->dev, "sgl submit failed");
651                         count = 0;
652                         goto out;
653                 }
654                 count_left -= chunk_size;
655                 chunk_no++;
656         }
657         dma_async_issue_pending(inst->dma_chan);
658
659         if (inst->settings.data_width == SMI_WIDTH_8BIT)
660                 init_trans_func(inst, count);
661         else if (inst->settings.data_width == SMI_WIDTH_16BIT)
662                 init_trans_func(inst, count / 2);
663 out:
664         spin_unlock(&inst->transaction_lock);
665         return count;
666 }
667 EXPORT_SYMBOL(bcm2835_smi_user_dma);
668
669
670 /****************************************************************************
671 *
672 *   High level buffer transfer functions - for use by other drivers
673 *
674 ***************************************************************************/
675
676 /* Buffer must be physically contiguous - i.e. kmalloc, not vmalloc! */
677 void bcm2835_smi_write_buf(
678         struct bcm2835_smi_instance *inst,
679         const void *buf, size_t n_bytes)
680 {
681         int odd_bytes = n_bytes & 0x3;
682
683         n_bytes -= odd_bytes;
684
685         spin_lock(&inst->transaction_lock);
686
687         if (n_bytes > DMA_THRESHOLD_BYTES) {
688                 dma_addr_t phy_addr = dma_map_single(
689                         inst->dev,
690                         (void *)buf,
691                         n_bytes,
692                         DMA_TO_DEVICE);
693                 struct scatterlist *sgl =
694                         smi_scatterlist_from_buffer(inst, phy_addr, n_bytes,
695                                 &inst->buffer_sgl);
696
697                 if (!sgl) {
698                         smi_dump_context_labelled(inst,
699                         "Error: could not create scatterlist for write!");
700                         goto out;
701                 }
702                 smi_dma_write_sgl(inst, sgl, 1, n_bytes);
703
704                 dma_unmap_single
705                         (inst->dev, phy_addr, n_bytes, DMA_TO_DEVICE);
706         } else if (n_bytes) {
707                 smi_write_fifo(inst, (uint32_t *) buf, n_bytes);
708         }
709         buf += n_bytes;
710
711         if (inst->settings.data_width == SMI_WIDTH_8BIT) {
712                 while (odd_bytes--)
713                         smi_write_single_word(inst, *(uint8_t *) (buf++));
714         } else {
715                 while (odd_bytes >= 2) {
716                         smi_write_single_word(inst, *(uint16_t *)buf);
717                         buf += 2;
718                         odd_bytes -= 2;
719                 }
720                 if (odd_bytes) {
721                         /* Reading an odd number of bytes on a 16 bit bus is
722                            a user bug. It's kinder to fail early and tell them
723                            than to e.g. transparently give them the bottom byte
724                            of a 16 bit transfer. */
725                         dev_err(inst->dev,
726                 "WARNING: odd number of bytes specified for wide transfer.");
727                         dev_err(inst->dev,
728                 "At least one byte dropped as a result.");
729                         dump_stack();
730                 }
731         }
732 out:
733         spin_unlock(&inst->transaction_lock);
734 }
735 EXPORT_SYMBOL(bcm2835_smi_write_buf);
736
737 void bcm2835_smi_read_buf(struct bcm2835_smi_instance *inst,
738         void *buf, size_t n_bytes)
739 {
740
741         /* SMI is inherently 32-bit, which causes surprising amounts of mess
742            for bytes % 4 != 0. Easiest to avoid this mess altogether
743            by handling remainder separately. */
744         int odd_bytes = n_bytes & 0x3;
745
746         spin_lock(&inst->transaction_lock);
747         n_bytes -= odd_bytes;
748         if (n_bytes > DMA_THRESHOLD_BYTES) {
749                 dma_addr_t phy_addr = dma_map_single(inst->dev,
750                                                      buf, n_bytes,
751                                                      DMA_FROM_DEVICE);
752                 struct scatterlist *sgl = smi_scatterlist_from_buffer(
753                         inst, phy_addr, n_bytes,
754                         &inst->buffer_sgl);
755                 if (!sgl) {
756                         smi_dump_context_labelled(inst,
757                         "Error: could not create scatterlist for read!");
758                         goto out;
759                 }
760                 smi_dma_read_sgl(inst, sgl, 1, n_bytes);
761                 dma_unmap_single(inst->dev, phy_addr, n_bytes, DMA_FROM_DEVICE);
762         } else if (n_bytes) {
763                 smi_read_fifo(inst, (uint32_t *)buf, n_bytes);
764         }
765         buf += n_bytes;
766
767         if (inst->settings.data_width == SMI_WIDTH_8BIT) {
768                 while (odd_bytes--)
769                         *((uint8_t *) (buf++)) = smi_read_single_word(inst);
770         } else {
771                 while (odd_bytes >= 2) {
772                         *(uint16_t *) buf = smi_read_single_word(inst);
773                         buf += 2;
774                         odd_bytes -= 2;
775                 }
776                 if (odd_bytes) {
777                         dev_err(inst->dev,
778                 "WARNING: odd number of bytes specified for wide transfer.");
779                         dev_err(inst->dev,
780                 "At least one byte dropped as a result.");
781                         dump_stack();
782                 }
783         }
784 out:
785         spin_unlock(&inst->transaction_lock);
786 }
787 EXPORT_SYMBOL(bcm2835_smi_read_buf);
788
789 void bcm2835_smi_set_address(struct bcm2835_smi_instance *inst,
790         unsigned int address)
791 {
792         spin_lock(&inst->transaction_lock);
793         smi_set_address(inst, address);
794         spin_unlock(&inst->transaction_lock);
795 }
796 EXPORT_SYMBOL(bcm2835_smi_set_address);
797
798 struct bcm2835_smi_instance *bcm2835_smi_get(struct device_node *node)
799 {
800         struct platform_device *pdev;
801
802         if (!node)
803                 return NULL;
804
805         pdev = of_find_device_by_node(node);
806         if (!pdev)
807                 return NULL;
808
809         return platform_get_drvdata(pdev);
810 }
811 EXPORT_SYMBOL(bcm2835_smi_get);
812
813 /****************************************************************************
814 *
815 *   bcm2835_smi_probe - called when the driver is loaded.
816 *
817 ***************************************************************************/
818
819 static int bcm2835_smi_dma_setup(struct bcm2835_smi_instance *inst)
820 {
821         int i, rv = 0;
822
823         inst->dma_chan = dma_request_slave_channel(inst->dev, "rx-tx");
824
825         inst->dma_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
826         inst->dma_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
827         inst->dma_config.src_addr = inst->smi_regs_busaddr + SMID;
828         inst->dma_config.dst_addr = inst->dma_config.src_addr;
829         /* Direction unimportant - always overridden by prep_slave_sg */
830         inst->dma_config.direction = DMA_DEV_TO_MEM;
831         dmaengine_slave_config(inst->dma_chan, &inst->dma_config);
832         /* Alloc and map bounce buffers */
833         for (i = 0; i < DMA_BOUNCE_BUFFER_COUNT; ++i) {
834                 inst->bounce.buffer[i] =
835                 dmam_alloc_coherent(inst->dev, DMA_BOUNCE_BUFFER_SIZE,
836                                 &inst->bounce.phys[i],
837                                 GFP_KERNEL);
838                 if (!inst->bounce.buffer[i]) {
839                         dev_err(inst->dev, "Could not allocate buffer!");
840                         rv = -ENOMEM;
841                         break;
842                 }
843                 smi_scatterlist_from_buffer(
844                         inst,
845                         inst->bounce.phys[i],
846                         DMA_BOUNCE_BUFFER_SIZE,
847                         &inst->bounce.sgl[i]
848                 );
849         }
850
851         return rv;
852 }
853
854 static int bcm2835_smi_probe(struct platform_device *pdev)
855 {
856         int err;
857         struct device *dev = &pdev->dev;
858         struct device_node *node = dev->of_node;
859         struct resource *ioresource;
860         struct bcm2835_smi_instance *inst;
861         const __be32 *addr;
862
863         /* We require device tree support */
864         if (!node)
865                 return -EINVAL;
866         /* Allocate buffers and instance data */
867         inst = devm_kzalloc(dev, sizeof(struct bcm2835_smi_instance),
868                 GFP_KERNEL);
869         if (!inst)
870                 return -ENOMEM;
871
872         inst->dev = dev;
873         spin_lock_init(&inst->transaction_lock);
874
875         ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
876         inst->smi_regs_ptr = devm_ioremap_resource(dev, ioresource);
877         if (IS_ERR(inst->smi_regs_ptr)) {
878                 err = PTR_ERR(inst->smi_regs_ptr);
879                 goto err;
880         }
881         addr = of_get_address(node, 0, NULL, NULL);
882         inst->smi_regs_busaddr = be32_to_cpu(*addr);
883
884         err = bcm2835_smi_dma_setup(inst);
885         if (err)
886                 goto err;
887
888         /* request clock */
889         inst->clk = devm_clk_get(dev, NULL);
890         if (!inst->clk)
891                 goto err;
892         clk_prepare_enable(inst->clk);
893
894         /* Finally, do peripheral setup */
895         smi_setup_regs(inst);
896
897         platform_set_drvdata(pdev, inst);
898
899         dev_info(inst->dev, "initialised");
900
901         return 0;
902 err:
903         kfree(inst);
904         return err;
905 }
906
907 /****************************************************************************
908 *
909 *   bcm2835_smi_remove - called when the driver is unloaded.
910 *
911 ***************************************************************************/
912
913 static int bcm2835_smi_remove(struct platform_device *pdev)
914 {
915         struct bcm2835_smi_instance *inst = platform_get_drvdata(pdev);
916         struct device *dev = inst->dev;
917
918         dmaengine_terminate_all(inst->dma_chan);
919         dma_release_channel(inst->dma_chan);
920
921         clk_disable_unprepare(inst->clk);
922
923         dev_info(dev, "SMI device removed - OK");
924         return 0;
925 }
926
927 /****************************************************************************
928 *
929 *   Register the driver with device tree
930 *
931 ***************************************************************************/
932
933 static const struct of_device_id bcm2835_smi_of_match[] = {
934         {.compatible = "brcm,bcm2835-smi",},
935         { /* sentinel */ },
936 };
937
938 MODULE_DEVICE_TABLE(of, bcm2835_smi_of_match);
939
940 static struct platform_driver bcm2835_smi_driver = {
941         .probe = bcm2835_smi_probe,
942         .remove = bcm2835_smi_remove,
943         .driver = {
944                    .name = DRIVER_NAME,
945                    .owner = THIS_MODULE,
946                    .of_match_table = bcm2835_smi_of_match,
947                    },
948 };
949
950 module_platform_driver(bcm2835_smi_driver);
951
952 MODULE_ALIAS("platform:smi-bcm2835");
953 MODULE_LICENSE("GPL");
954 MODULE_DESCRIPTION("Device driver for BCM2835's secondary memory interface");
955 MODULE_AUTHOR("Luke Wren <luke@raspberrypi.org>");