Merge tag 'ecryptfs-3.9-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/upstream/kernel-adaptation-pc.git] / drivers / spi / spi-atmel.c
1 /*
2  * Driver for Atmel AT32 and AT91 SPI Controllers
3  *
4  * Copyright (C) 2006 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/interrupt.h>
20 #include <linux/spi/spi.h>
21 #include <linux/slab.h>
22 #include <linux/platform_data/atmel.h>
23 #include <linux/of.h>
24
25 #include <asm/io.h>
26 #include <asm/gpio.h>
27 #include <mach/cpu.h>
28
29 /* SPI register offsets */
30 #define SPI_CR                                  0x0000
31 #define SPI_MR                                  0x0004
32 #define SPI_RDR                                 0x0008
33 #define SPI_TDR                                 0x000c
34 #define SPI_SR                                  0x0010
35 #define SPI_IER                                 0x0014
36 #define SPI_IDR                                 0x0018
37 #define SPI_IMR                                 0x001c
38 #define SPI_CSR0                                0x0030
39 #define SPI_CSR1                                0x0034
40 #define SPI_CSR2                                0x0038
41 #define SPI_CSR3                                0x003c
42 #define SPI_RPR                                 0x0100
43 #define SPI_RCR                                 0x0104
44 #define SPI_TPR                                 0x0108
45 #define SPI_TCR                                 0x010c
46 #define SPI_RNPR                                0x0110
47 #define SPI_RNCR                                0x0114
48 #define SPI_TNPR                                0x0118
49 #define SPI_TNCR                                0x011c
50 #define SPI_PTCR                                0x0120
51 #define SPI_PTSR                                0x0124
52
53 /* Bitfields in CR */
54 #define SPI_SPIEN_OFFSET                        0
55 #define SPI_SPIEN_SIZE                          1
56 #define SPI_SPIDIS_OFFSET                       1
57 #define SPI_SPIDIS_SIZE                         1
58 #define SPI_SWRST_OFFSET                        7
59 #define SPI_SWRST_SIZE                          1
60 #define SPI_LASTXFER_OFFSET                     24
61 #define SPI_LASTXFER_SIZE                       1
62
63 /* Bitfields in MR */
64 #define SPI_MSTR_OFFSET                         0
65 #define SPI_MSTR_SIZE                           1
66 #define SPI_PS_OFFSET                           1
67 #define SPI_PS_SIZE                             1
68 #define SPI_PCSDEC_OFFSET                       2
69 #define SPI_PCSDEC_SIZE                         1
70 #define SPI_FDIV_OFFSET                         3
71 #define SPI_FDIV_SIZE                           1
72 #define SPI_MODFDIS_OFFSET                      4
73 #define SPI_MODFDIS_SIZE                        1
74 #define SPI_LLB_OFFSET                          7
75 #define SPI_LLB_SIZE                            1
76 #define SPI_PCS_OFFSET                          16
77 #define SPI_PCS_SIZE                            4
78 #define SPI_DLYBCS_OFFSET                       24
79 #define SPI_DLYBCS_SIZE                         8
80
81 /* Bitfields in RDR */
82 #define SPI_RD_OFFSET                           0
83 #define SPI_RD_SIZE                             16
84
85 /* Bitfields in TDR */
86 #define SPI_TD_OFFSET                           0
87 #define SPI_TD_SIZE                             16
88
89 /* Bitfields in SR */
90 #define SPI_RDRF_OFFSET                         0
91 #define SPI_RDRF_SIZE                           1
92 #define SPI_TDRE_OFFSET                         1
93 #define SPI_TDRE_SIZE                           1
94 #define SPI_MODF_OFFSET                         2
95 #define SPI_MODF_SIZE                           1
96 #define SPI_OVRES_OFFSET                        3
97 #define SPI_OVRES_SIZE                          1
98 #define SPI_ENDRX_OFFSET                        4
99 #define SPI_ENDRX_SIZE                          1
100 #define SPI_ENDTX_OFFSET                        5
101 #define SPI_ENDTX_SIZE                          1
102 #define SPI_RXBUFF_OFFSET                       6
103 #define SPI_RXBUFF_SIZE                         1
104 #define SPI_TXBUFE_OFFSET                       7
105 #define SPI_TXBUFE_SIZE                         1
106 #define SPI_NSSR_OFFSET                         8
107 #define SPI_NSSR_SIZE                           1
108 #define SPI_TXEMPTY_OFFSET                      9
109 #define SPI_TXEMPTY_SIZE                        1
110 #define SPI_SPIENS_OFFSET                       16
111 #define SPI_SPIENS_SIZE                         1
112
113 /* Bitfields in CSR0 */
114 #define SPI_CPOL_OFFSET                         0
115 #define SPI_CPOL_SIZE                           1
116 #define SPI_NCPHA_OFFSET                        1
117 #define SPI_NCPHA_SIZE                          1
118 #define SPI_CSAAT_OFFSET                        3
119 #define SPI_CSAAT_SIZE                          1
120 #define SPI_BITS_OFFSET                         4
121 #define SPI_BITS_SIZE                           4
122 #define SPI_SCBR_OFFSET                         8
123 #define SPI_SCBR_SIZE                           8
124 #define SPI_DLYBS_OFFSET                        16
125 #define SPI_DLYBS_SIZE                          8
126 #define SPI_DLYBCT_OFFSET                       24
127 #define SPI_DLYBCT_SIZE                         8
128
129 /* Bitfields in RCR */
130 #define SPI_RXCTR_OFFSET                        0
131 #define SPI_RXCTR_SIZE                          16
132
133 /* Bitfields in TCR */
134 #define SPI_TXCTR_OFFSET                        0
135 #define SPI_TXCTR_SIZE                          16
136
137 /* Bitfields in RNCR */
138 #define SPI_RXNCR_OFFSET                        0
139 #define SPI_RXNCR_SIZE                          16
140
141 /* Bitfields in TNCR */
142 #define SPI_TXNCR_OFFSET                        0
143 #define SPI_TXNCR_SIZE                          16
144
145 /* Bitfields in PTCR */
146 #define SPI_RXTEN_OFFSET                        0
147 #define SPI_RXTEN_SIZE                          1
148 #define SPI_RXTDIS_OFFSET                       1
149 #define SPI_RXTDIS_SIZE                         1
150 #define SPI_TXTEN_OFFSET                        8
151 #define SPI_TXTEN_SIZE                          1
152 #define SPI_TXTDIS_OFFSET                       9
153 #define SPI_TXTDIS_SIZE                         1
154
155 /* Constants for BITS */
156 #define SPI_BITS_8_BPT                          0
157 #define SPI_BITS_9_BPT                          1
158 #define SPI_BITS_10_BPT                         2
159 #define SPI_BITS_11_BPT                         3
160 #define SPI_BITS_12_BPT                         4
161 #define SPI_BITS_13_BPT                         5
162 #define SPI_BITS_14_BPT                         6
163 #define SPI_BITS_15_BPT                         7
164 #define SPI_BITS_16_BPT                         8
165
166 /* Bit manipulation macros */
167 #define SPI_BIT(name) \
168         (1 << SPI_##name##_OFFSET)
169 #define SPI_BF(name,value) \
170         (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
171 #define SPI_BFEXT(name,value) \
172         (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
173 #define SPI_BFINS(name,value,old) \
174         ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
175           | SPI_BF(name,value))
176
177 /* Register access macros */
178 #define spi_readl(port,reg) \
179         __raw_readl((port)->regs + SPI_##reg)
180 #define spi_writel(port,reg,value) \
181         __raw_writel((value), (port)->regs + SPI_##reg)
182
183
184 /*
185  * The core SPI transfer engine just talks to a register bank to set up
186  * DMA transfers; transfer queue progress is driven by IRQs.  The clock
187  * framework provides the base clock, subdivided for each spi_device.
188  */
189 struct atmel_spi {
190         spinlock_t              lock;
191
192         void __iomem            *regs;
193         int                     irq;
194         struct clk              *clk;
195         struct platform_device  *pdev;
196         struct spi_device       *stay;
197
198         u8                      stopping;
199         struct list_head        queue;
200         struct spi_transfer     *current_transfer;
201         unsigned long           current_remaining_bytes;
202         struct spi_transfer     *next_transfer;
203         unsigned long           next_remaining_bytes;
204
205         void                    *buffer;
206         dma_addr_t              buffer_dma;
207 };
208
209 /* Controller-specific per-slave state */
210 struct atmel_spi_device {
211         unsigned int            npcs_pin;
212         u32                     csr;
213 };
214
215 #define BUFFER_SIZE             PAGE_SIZE
216 #define INVALID_DMA_ADDRESS     0xffffffff
217
218 /*
219  * Version 2 of the SPI controller has
220  *  - CR.LASTXFER
221  *  - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
222  *  - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
223  *  - SPI_CSRx.CSAAT
224  *  - SPI_CSRx.SBCR allows faster clocking
225  *
226  * We can determine the controller version by reading the VERSION
227  * register, but I haven't checked that it exists on all chips, and
228  * this is cheaper anyway.
229  */
230 static bool atmel_spi_is_v2(void)
231 {
232         return !cpu_is_at91rm9200();
233 }
234
235 /*
236  * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
237  * they assume that spi slave device state will not change on deselect, so
238  * that automagic deselection is OK.  ("NPCSx rises if no data is to be
239  * transmitted")  Not so!  Workaround uses nCSx pins as GPIOs; or newer
240  * controllers have CSAAT and friends.
241  *
242  * Since the CSAAT functionality is a bit weird on newer controllers as
243  * well, we use GPIO to control nCSx pins on all controllers, updating
244  * MR.PCS to avoid confusing the controller.  Using GPIOs also lets us
245  * support active-high chipselects despite the controller's belief that
246  * only active-low devices/systems exists.
247  *
248  * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
249  * right when driven with GPIO.  ("Mode Fault does not allow more than one
250  * Master on Chip Select 0.")  No workaround exists for that ... so for
251  * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
252  * and (c) will trigger that first erratum in some cases.
253  *
254  * TODO: Test if the atmel_spi_is_v2() branch below works on
255  * AT91RM9200 if we use some other register than CSR0. However, don't
256  * do this unconditionally since AP7000 has an errata where the BITS
257  * field in CSR0 overrides all other CSRs.
258  */
259
260 static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
261 {
262         struct atmel_spi_device *asd = spi->controller_state;
263         unsigned active = spi->mode & SPI_CS_HIGH;
264         u32 mr;
265
266         if (atmel_spi_is_v2()) {
267                 /*
268                  * Always use CSR0. This ensures that the clock
269                  * switches to the correct idle polarity before we
270                  * toggle the CS.
271                  */
272                 spi_writel(as, CSR0, asd->csr);
273                 spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS)
274                                 | SPI_BIT(MSTR));
275                 mr = spi_readl(as, MR);
276                 gpio_set_value(asd->npcs_pin, active);
277         } else {
278                 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
279                 int i;
280                 u32 csr;
281
282                 /* Make sure clock polarity is correct */
283                 for (i = 0; i < spi->master->num_chipselect; i++) {
284                         csr = spi_readl(as, CSR0 + 4 * i);
285                         if ((csr ^ cpol) & SPI_BIT(CPOL))
286                                 spi_writel(as, CSR0 + 4 * i,
287                                                 csr ^ SPI_BIT(CPOL));
288                 }
289
290                 mr = spi_readl(as, MR);
291                 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
292                 if (spi->chip_select != 0)
293                         gpio_set_value(asd->npcs_pin, active);
294                 spi_writel(as, MR, mr);
295         }
296
297         dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
298                         asd->npcs_pin, active ? " (high)" : "",
299                         mr);
300 }
301
302 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
303 {
304         struct atmel_spi_device *asd = spi->controller_state;
305         unsigned active = spi->mode & SPI_CS_HIGH;
306         u32 mr;
307
308         /* only deactivate *this* device; sometimes transfers to
309          * another device may be active when this routine is called.
310          */
311         mr = spi_readl(as, MR);
312         if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
313                 mr = SPI_BFINS(PCS, 0xf, mr);
314                 spi_writel(as, MR, mr);
315         }
316
317         dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
318                         asd->npcs_pin, active ? " (low)" : "",
319                         mr);
320
321         if (atmel_spi_is_v2() || spi->chip_select != 0)
322                 gpio_set_value(asd->npcs_pin, !active);
323 }
324
325 static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
326                                         struct spi_transfer *xfer)
327 {
328         return msg->transfers.prev == &xfer->transfer_list;
329 }
330
331 static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
332 {
333         return xfer->delay_usecs == 0 && !xfer->cs_change;
334 }
335
336 static void atmel_spi_next_xfer_data(struct spi_master *master,
337                                 struct spi_transfer *xfer,
338                                 dma_addr_t *tx_dma,
339                                 dma_addr_t *rx_dma,
340                                 u32 *plen)
341 {
342         struct atmel_spi        *as = spi_master_get_devdata(master);
343         u32                     len = *plen;
344
345         /* use scratch buffer only when rx or tx data is unspecified */
346         if (xfer->rx_buf)
347                 *rx_dma = xfer->rx_dma + xfer->len - *plen;
348         else {
349                 *rx_dma = as->buffer_dma;
350                 if (len > BUFFER_SIZE)
351                         len = BUFFER_SIZE;
352         }
353         if (xfer->tx_buf)
354                 *tx_dma = xfer->tx_dma + xfer->len - *plen;
355         else {
356                 *tx_dma = as->buffer_dma;
357                 if (len > BUFFER_SIZE)
358                         len = BUFFER_SIZE;
359                 memset(as->buffer, 0, len);
360                 dma_sync_single_for_device(&as->pdev->dev,
361                                 as->buffer_dma, len, DMA_TO_DEVICE);
362         }
363
364         *plen = len;
365 }
366
367 /*
368  * Submit next transfer for DMA.
369  * lock is held, spi irq is blocked
370  */
371 static void atmel_spi_next_xfer(struct spi_master *master,
372                                 struct spi_message *msg)
373 {
374         struct atmel_spi        *as = spi_master_get_devdata(master);
375         struct spi_transfer     *xfer;
376         u32                     len, remaining;
377         u32                     ieval;
378         dma_addr_t              tx_dma, rx_dma;
379
380         if (!as->current_transfer)
381                 xfer = list_entry(msg->transfers.next,
382                                 struct spi_transfer, transfer_list);
383         else if (!as->next_transfer)
384                 xfer = list_entry(as->current_transfer->transfer_list.next,
385                                 struct spi_transfer, transfer_list);
386         else
387                 xfer = NULL;
388
389         if (xfer) {
390                 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
391
392                 len = xfer->len;
393                 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
394                 remaining = xfer->len - len;
395
396                 spi_writel(as, RPR, rx_dma);
397                 spi_writel(as, TPR, tx_dma);
398
399                 if (msg->spi->bits_per_word > 8)
400                         len >>= 1;
401                 spi_writel(as, RCR, len);
402                 spi_writel(as, TCR, len);
403
404                 dev_dbg(&msg->spi->dev,
405                         "  start xfer %p: len %u tx %p/%08x rx %p/%08x\n",
406                         xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
407                         xfer->rx_buf, xfer->rx_dma);
408         } else {
409                 xfer = as->next_transfer;
410                 remaining = as->next_remaining_bytes;
411         }
412
413         as->current_transfer = xfer;
414         as->current_remaining_bytes = remaining;
415
416         if (remaining > 0)
417                 len = remaining;
418         else if (!atmel_spi_xfer_is_last(msg, xfer)
419                         && atmel_spi_xfer_can_be_chained(xfer)) {
420                 xfer = list_entry(xfer->transfer_list.next,
421                                 struct spi_transfer, transfer_list);
422                 len = xfer->len;
423         } else
424                 xfer = NULL;
425
426         as->next_transfer = xfer;
427
428         if (xfer) {
429                 u32     total;
430
431                 total = len;
432                 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
433                 as->next_remaining_bytes = total - len;
434
435                 spi_writel(as, RNPR, rx_dma);
436                 spi_writel(as, TNPR, tx_dma);
437
438                 if (msg->spi->bits_per_word > 8)
439                         len >>= 1;
440                 spi_writel(as, RNCR, len);
441                 spi_writel(as, TNCR, len);
442
443                 dev_dbg(&msg->spi->dev,
444                         "  next xfer %p: len %u tx %p/%08x rx %p/%08x\n",
445                         xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
446                         xfer->rx_buf, xfer->rx_dma);
447                 ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
448         } else {
449                 spi_writel(as, RNCR, 0);
450                 spi_writel(as, TNCR, 0);
451                 ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES);
452         }
453
454         /* REVISIT: We're waiting for ENDRX before we start the next
455          * transfer because we need to handle some difficult timing
456          * issues otherwise. If we wait for ENDTX in one transfer and
457          * then starts waiting for ENDRX in the next, it's difficult
458          * to tell the difference between the ENDRX interrupt we're
459          * actually waiting for and the ENDRX interrupt of the
460          * previous transfer.
461          *
462          * It should be doable, though. Just not now...
463          */
464         spi_writel(as, IER, ieval);
465         spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
466 }
467
468 static void atmel_spi_next_message(struct spi_master *master)
469 {
470         struct atmel_spi        *as = spi_master_get_devdata(master);
471         struct spi_message      *msg;
472         struct spi_device       *spi;
473
474         BUG_ON(as->current_transfer);
475
476         msg = list_entry(as->queue.next, struct spi_message, queue);
477         spi = msg->spi;
478
479         dev_dbg(master->dev.parent, "start message %p for %s\n",
480                         msg, dev_name(&spi->dev));
481
482         /* select chip if it's not still active */
483         if (as->stay) {
484                 if (as->stay != spi) {
485                         cs_deactivate(as, as->stay);
486                         cs_activate(as, spi);
487                 }
488                 as->stay = NULL;
489         } else
490                 cs_activate(as, spi);
491
492         atmel_spi_next_xfer(master, msg);
493 }
494
495 /*
496  * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
497  *  - The buffer is either valid for CPU access, else NULL
498  *  - If the buffer is valid, so is its DMA address
499  *
500  * This driver manages the dma address unless message->is_dma_mapped.
501  */
502 static int
503 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
504 {
505         struct device   *dev = &as->pdev->dev;
506
507         xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
508         if (xfer->tx_buf) {
509                 /* tx_buf is a const void* where we need a void * for the dma
510                  * mapping */
511                 void *nonconst_tx = (void *)xfer->tx_buf;
512
513                 xfer->tx_dma = dma_map_single(dev,
514                                 nonconst_tx, xfer->len,
515                                 DMA_TO_DEVICE);
516                 if (dma_mapping_error(dev, xfer->tx_dma))
517                         return -ENOMEM;
518         }
519         if (xfer->rx_buf) {
520                 xfer->rx_dma = dma_map_single(dev,
521                                 xfer->rx_buf, xfer->len,
522                                 DMA_FROM_DEVICE);
523                 if (dma_mapping_error(dev, xfer->rx_dma)) {
524                         if (xfer->tx_buf)
525                                 dma_unmap_single(dev,
526                                                 xfer->tx_dma, xfer->len,
527                                                 DMA_TO_DEVICE);
528                         return -ENOMEM;
529                 }
530         }
531         return 0;
532 }
533
534 static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
535                                      struct spi_transfer *xfer)
536 {
537         if (xfer->tx_dma != INVALID_DMA_ADDRESS)
538                 dma_unmap_single(master->dev.parent, xfer->tx_dma,
539                                  xfer->len, DMA_TO_DEVICE);
540         if (xfer->rx_dma != INVALID_DMA_ADDRESS)
541                 dma_unmap_single(master->dev.parent, xfer->rx_dma,
542                                  xfer->len, DMA_FROM_DEVICE);
543 }
544
545 static void
546 atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
547                 struct spi_message *msg, int status, int stay)
548 {
549         if (!stay || status < 0)
550                 cs_deactivate(as, msg->spi);
551         else
552                 as->stay = msg->spi;
553
554         list_del(&msg->queue);
555         msg->status = status;
556
557         dev_dbg(master->dev.parent,
558                 "xfer complete: %u bytes transferred\n",
559                 msg->actual_length);
560
561         spin_unlock(&as->lock);
562         msg->complete(msg->context);
563         spin_lock(&as->lock);
564
565         as->current_transfer = NULL;
566         as->next_transfer = NULL;
567
568         /* continue if needed */
569         if (list_empty(&as->queue) || as->stopping)
570                 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
571         else
572                 atmel_spi_next_message(master);
573 }
574
575 static irqreturn_t
576 atmel_spi_interrupt(int irq, void *dev_id)
577 {
578         struct spi_master       *master = dev_id;
579         struct atmel_spi        *as = spi_master_get_devdata(master);
580         struct spi_message      *msg;
581         struct spi_transfer     *xfer;
582         u32                     status, pending, imr;
583         int                     ret = IRQ_NONE;
584
585         spin_lock(&as->lock);
586
587         xfer = as->current_transfer;
588         msg = list_entry(as->queue.next, struct spi_message, queue);
589
590         imr = spi_readl(as, IMR);
591         status = spi_readl(as, SR);
592         pending = status & imr;
593
594         if (pending & SPI_BIT(OVRES)) {
595                 int timeout;
596
597                 ret = IRQ_HANDLED;
598
599                 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
600                                      | SPI_BIT(OVRES)));
601
602                 /*
603                  * When we get an overrun, we disregard the current
604                  * transfer. Data will not be copied back from any
605                  * bounce buffer and msg->actual_len will not be
606                  * updated with the last xfer.
607                  *
608                  * We will also not process any remaning transfers in
609                  * the message.
610                  *
611                  * First, stop the transfer and unmap the DMA buffers.
612                  */
613                 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
614                 if (!msg->is_dma_mapped)
615                         atmel_spi_dma_unmap_xfer(master, xfer);
616
617                 /* REVISIT: udelay in irq is unfriendly */
618                 if (xfer->delay_usecs)
619                         udelay(xfer->delay_usecs);
620
621                 dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n",
622                          spi_readl(as, TCR), spi_readl(as, RCR));
623
624                 /*
625                  * Clean up DMA registers and make sure the data
626                  * registers are empty.
627                  */
628                 spi_writel(as, RNCR, 0);
629                 spi_writel(as, TNCR, 0);
630                 spi_writel(as, RCR, 0);
631                 spi_writel(as, TCR, 0);
632                 for (timeout = 1000; timeout; timeout--)
633                         if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
634                                 break;
635                 if (!timeout)
636                         dev_warn(master->dev.parent,
637                                  "timeout waiting for TXEMPTY");
638                 while (spi_readl(as, SR) & SPI_BIT(RDRF))
639                         spi_readl(as, RDR);
640
641                 /* Clear any overrun happening while cleaning up */
642                 spi_readl(as, SR);
643
644                 atmel_spi_msg_done(master, as, msg, -EIO, 0);
645         } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
646                 ret = IRQ_HANDLED;
647
648                 spi_writel(as, IDR, pending);
649
650                 if (as->current_remaining_bytes == 0) {
651                         msg->actual_length += xfer->len;
652
653                         if (!msg->is_dma_mapped)
654                                 atmel_spi_dma_unmap_xfer(master, xfer);
655
656                         /* REVISIT: udelay in irq is unfriendly */
657                         if (xfer->delay_usecs)
658                                 udelay(xfer->delay_usecs);
659
660                         if (atmel_spi_xfer_is_last(msg, xfer)) {
661                                 /* report completed message */
662                                 atmel_spi_msg_done(master, as, msg, 0,
663                                                 xfer->cs_change);
664                         } else {
665                                 if (xfer->cs_change) {
666                                         cs_deactivate(as, msg->spi);
667                                         udelay(1);
668                                         cs_activate(as, msg->spi);
669                                 }
670
671                                 /*
672                                  * Not done yet. Submit the next transfer.
673                                  *
674                                  * FIXME handle protocol options for xfer
675                                  */
676                                 atmel_spi_next_xfer(master, msg);
677                         }
678                 } else {
679                         /*
680                          * Keep going, we still have data to send in
681                          * the current transfer.
682                          */
683                         atmel_spi_next_xfer(master, msg);
684                 }
685         }
686
687         spin_unlock(&as->lock);
688
689         return ret;
690 }
691
692 static int atmel_spi_setup(struct spi_device *spi)
693 {
694         struct atmel_spi        *as;
695         struct atmel_spi_device *asd;
696         u32                     scbr, csr;
697         unsigned int            bits = spi->bits_per_word;
698         unsigned long           bus_hz;
699         unsigned int            npcs_pin;
700         int                     ret;
701
702         as = spi_master_get_devdata(spi->master);
703
704         if (as->stopping)
705                 return -ESHUTDOWN;
706
707         if (spi->chip_select > spi->master->num_chipselect) {
708                 dev_dbg(&spi->dev,
709                                 "setup: invalid chipselect %u (%u defined)\n",
710                                 spi->chip_select, spi->master->num_chipselect);
711                 return -EINVAL;
712         }
713
714         if (bits < 8 || bits > 16) {
715                 dev_dbg(&spi->dev,
716                                 "setup: invalid bits_per_word %u (8 to 16)\n",
717                                 bits);
718                 return -EINVAL;
719         }
720
721         /* see notes above re chipselect */
722         if (!atmel_spi_is_v2()
723                         && spi->chip_select == 0
724                         && (spi->mode & SPI_CS_HIGH)) {
725                 dev_dbg(&spi->dev, "setup: can't be active-high\n");
726                 return -EINVAL;
727         }
728
729         /* v1 chips start out at half the peripheral bus speed. */
730         bus_hz = clk_get_rate(as->clk);
731         if (!atmel_spi_is_v2())
732                 bus_hz /= 2;
733
734         if (spi->max_speed_hz) {
735                 /*
736                  * Calculate the lowest divider that satisfies the
737                  * constraint, assuming div32/fdiv/mbz == 0.
738                  */
739                 scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
740
741                 /*
742                  * If the resulting divider doesn't fit into the
743                  * register bitfield, we can't satisfy the constraint.
744                  */
745                 if (scbr >= (1 << SPI_SCBR_SIZE)) {
746                         dev_dbg(&spi->dev,
747                                 "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
748                                 spi->max_speed_hz, scbr, bus_hz/255);
749                         return -EINVAL;
750                 }
751         } else
752                 /* speed zero means "as slow as possible" */
753                 scbr = 0xff;
754
755         csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
756         if (spi->mode & SPI_CPOL)
757                 csr |= SPI_BIT(CPOL);
758         if (!(spi->mode & SPI_CPHA))
759                 csr |= SPI_BIT(NCPHA);
760
761         /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
762          *
763          * DLYBCT would add delays between words, slowing down transfers.
764          * It could potentially be useful to cope with DMA bottlenecks, but
765          * in those cases it's probably best to just use a lower bitrate.
766          */
767         csr |= SPI_BF(DLYBS, 0);
768         csr |= SPI_BF(DLYBCT, 0);
769
770         /* chipselect must have been muxed as GPIO (e.g. in board setup) */
771         npcs_pin = (unsigned int)spi->controller_data;
772
773         if (gpio_is_valid(spi->cs_gpio))
774                 npcs_pin = spi->cs_gpio;
775
776         asd = spi->controller_state;
777         if (!asd) {
778                 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
779                 if (!asd)
780                         return -ENOMEM;
781
782                 ret = gpio_request(npcs_pin, dev_name(&spi->dev));
783                 if (ret) {
784                         kfree(asd);
785                         return ret;
786                 }
787
788                 asd->npcs_pin = npcs_pin;
789                 spi->controller_state = asd;
790                 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
791         } else {
792                 unsigned long           flags;
793
794                 spin_lock_irqsave(&as->lock, flags);
795                 if (as->stay == spi)
796                         as->stay = NULL;
797                 cs_deactivate(as, spi);
798                 spin_unlock_irqrestore(&as->lock, flags);
799         }
800
801         asd->csr = csr;
802
803         dev_dbg(&spi->dev,
804                 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
805                 bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
806
807         if (!atmel_spi_is_v2())
808                 spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
809
810         return 0;
811 }
812
813 static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
814 {
815         struct atmel_spi        *as;
816         struct spi_transfer     *xfer;
817         unsigned long           flags;
818         struct device           *controller = spi->master->dev.parent;
819         u8                      bits;
820         struct atmel_spi_device *asd;
821
822         as = spi_master_get_devdata(spi->master);
823
824         dev_dbg(controller, "new message %p submitted for %s\n",
825                         msg, dev_name(&spi->dev));
826
827         if (unlikely(list_empty(&msg->transfers)))
828                 return -EINVAL;
829
830         if (as->stopping)
831                 return -ESHUTDOWN;
832
833         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
834                 if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
835                         dev_dbg(&spi->dev, "missing rx or tx buf\n");
836                         return -EINVAL;
837                 }
838
839                 if (xfer->bits_per_word) {
840                         asd = spi->controller_state;
841                         bits = (asd->csr >> 4) & 0xf;
842                         if (bits != xfer->bits_per_word - 8) {
843                                 dev_dbg(&spi->dev, "you can't yet change "
844                                          "bits_per_word in transfers\n");
845                                 return -ENOPROTOOPT;
846                         }
847                 }
848
849                 /* FIXME implement these protocol options!! */
850                 if (xfer->speed_hz) {
851                         dev_dbg(&spi->dev, "no protocol options yet\n");
852                         return -ENOPROTOOPT;
853                 }
854
855                 /*
856                  * DMA map early, for performance (empties dcache ASAP) and
857                  * better fault reporting.  This is a DMA-only driver.
858                  *
859                  * NOTE that if dma_unmap_single() ever starts to do work on
860                  * platforms supported by this driver, we would need to clean
861                  * up mappings for previously-mapped transfers.
862                  */
863                 if (!msg->is_dma_mapped) {
864                         if (atmel_spi_dma_map_xfer(as, xfer) < 0)
865                                 return -ENOMEM;
866                 }
867         }
868
869 #ifdef VERBOSE
870         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
871                 dev_dbg(controller,
872                         "  xfer %p: len %u tx %p/%08x rx %p/%08x\n",
873                         xfer, xfer->len,
874                         xfer->tx_buf, xfer->tx_dma,
875                         xfer->rx_buf, xfer->rx_dma);
876         }
877 #endif
878
879         msg->status = -EINPROGRESS;
880         msg->actual_length = 0;
881
882         spin_lock_irqsave(&as->lock, flags);
883         list_add_tail(&msg->queue, &as->queue);
884         if (!as->current_transfer)
885                 atmel_spi_next_message(spi->master);
886         spin_unlock_irqrestore(&as->lock, flags);
887
888         return 0;
889 }
890
891 static void atmel_spi_cleanup(struct spi_device *spi)
892 {
893         struct atmel_spi        *as = spi_master_get_devdata(spi->master);
894         struct atmel_spi_device *asd = spi->controller_state;
895         unsigned                gpio = (unsigned) spi->controller_data;
896         unsigned long           flags;
897
898         if (!asd)
899                 return;
900
901         spin_lock_irqsave(&as->lock, flags);
902         if (as->stay == spi) {
903                 as->stay = NULL;
904                 cs_deactivate(as, spi);
905         }
906         spin_unlock_irqrestore(&as->lock, flags);
907
908         spi->controller_state = NULL;
909         gpio_free(gpio);
910         kfree(asd);
911 }
912
913 /*-------------------------------------------------------------------------*/
914
915 static int atmel_spi_probe(struct platform_device *pdev)
916 {
917         struct resource         *regs;
918         int                     irq;
919         struct clk              *clk;
920         int                     ret;
921         struct spi_master       *master;
922         struct atmel_spi        *as;
923
924         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
925         if (!regs)
926                 return -ENXIO;
927
928         irq = platform_get_irq(pdev, 0);
929         if (irq < 0)
930                 return irq;
931
932         clk = clk_get(&pdev->dev, "spi_clk");
933         if (IS_ERR(clk))
934                 return PTR_ERR(clk);
935
936         /* setup spi core then atmel-specific driver state */
937         ret = -ENOMEM;
938         master = spi_alloc_master(&pdev->dev, sizeof *as);
939         if (!master)
940                 goto out_free;
941
942         /* the spi->mode bits understood by this driver: */
943         master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
944
945         master->dev.of_node = pdev->dev.of_node;
946         master->bus_num = pdev->id;
947         master->num_chipselect = master->dev.of_node ? 0 : 4;
948         master->setup = atmel_spi_setup;
949         master->transfer = atmel_spi_transfer;
950         master->cleanup = atmel_spi_cleanup;
951         platform_set_drvdata(pdev, master);
952
953         as = spi_master_get_devdata(master);
954
955         /*
956          * Scratch buffer is used for throwaway rx and tx data.
957          * It's coherent to minimize dcache pollution.
958          */
959         as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
960                                         &as->buffer_dma, GFP_KERNEL);
961         if (!as->buffer)
962                 goto out_free;
963
964         spin_lock_init(&as->lock);
965         INIT_LIST_HEAD(&as->queue);
966         as->pdev = pdev;
967         as->regs = ioremap(regs->start, resource_size(regs));
968         if (!as->regs)
969                 goto out_free_buffer;
970         as->irq = irq;
971         as->clk = clk;
972
973         ret = request_irq(irq, atmel_spi_interrupt, 0,
974                         dev_name(&pdev->dev), master);
975         if (ret)
976                 goto out_unmap_regs;
977
978         /* Initialize the hardware */
979         clk_enable(clk);
980         spi_writel(as, CR, SPI_BIT(SWRST));
981         spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
982         spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
983         spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
984         spi_writel(as, CR, SPI_BIT(SPIEN));
985
986         /* go! */
987         dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
988                         (unsigned long)regs->start, irq);
989
990         ret = spi_register_master(master);
991         if (ret)
992                 goto out_reset_hw;
993
994         return 0;
995
996 out_reset_hw:
997         spi_writel(as, CR, SPI_BIT(SWRST));
998         spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
999         clk_disable(clk);
1000         free_irq(irq, master);
1001 out_unmap_regs:
1002         iounmap(as->regs);
1003 out_free_buffer:
1004         dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
1005                         as->buffer_dma);
1006 out_free:
1007         clk_put(clk);
1008         spi_master_put(master);
1009         return ret;
1010 }
1011
1012 static int atmel_spi_remove(struct platform_device *pdev)
1013 {
1014         struct spi_master       *master = platform_get_drvdata(pdev);
1015         struct atmel_spi        *as = spi_master_get_devdata(master);
1016         struct spi_message      *msg;
1017
1018         /* reset the hardware and block queue progress */
1019         spin_lock_irq(&as->lock);
1020         as->stopping = 1;
1021         spi_writel(as, CR, SPI_BIT(SWRST));
1022         spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1023         spi_readl(as, SR);
1024         spin_unlock_irq(&as->lock);
1025
1026         /* Terminate remaining queued transfers */
1027         list_for_each_entry(msg, &as->queue, queue) {
1028                 /* REVISIT unmapping the dma is a NOP on ARM and AVR32
1029                  * but we shouldn't depend on that...
1030                  */
1031                 msg->status = -ESHUTDOWN;
1032                 msg->complete(msg->context);
1033         }
1034
1035         dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
1036                         as->buffer_dma);
1037
1038         clk_disable(as->clk);
1039         clk_put(as->clk);
1040         free_irq(as->irq, master);
1041         iounmap(as->regs);
1042
1043         spi_unregister_master(master);
1044
1045         return 0;
1046 }
1047
1048 #ifdef  CONFIG_PM
1049
1050 static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
1051 {
1052         struct spi_master       *master = platform_get_drvdata(pdev);
1053         struct atmel_spi        *as = spi_master_get_devdata(master);
1054
1055         clk_disable(as->clk);
1056         return 0;
1057 }
1058
1059 static int atmel_spi_resume(struct platform_device *pdev)
1060 {
1061         struct spi_master       *master = platform_get_drvdata(pdev);
1062         struct atmel_spi        *as = spi_master_get_devdata(master);
1063
1064         clk_enable(as->clk);
1065         return 0;
1066 }
1067
1068 #else
1069 #define atmel_spi_suspend       NULL
1070 #define atmel_spi_resume        NULL
1071 #endif
1072
1073 #if defined(CONFIG_OF)
1074 static const struct of_device_id atmel_spi_dt_ids[] = {
1075         { .compatible = "atmel,at91rm9200-spi" },
1076         { /* sentinel */ }
1077 };
1078
1079 MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
1080 #endif
1081
1082 static struct platform_driver atmel_spi_driver = {
1083         .driver         = {
1084                 .name   = "atmel_spi",
1085                 .owner  = THIS_MODULE,
1086                 .of_match_table = of_match_ptr(atmel_spi_dt_ids),
1087         },
1088         .suspend        = atmel_spi_suspend,
1089         .resume         = atmel_spi_resume,
1090         .probe          = atmel_spi_probe,
1091         .remove         = atmel_spi_remove,
1092 };
1093 module_platform_driver(atmel_spi_driver);
1094
1095 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
1096 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1097 MODULE_LICENSE("GPL");
1098 MODULE_ALIAS("platform:atmel_spi");