btrfs: fix race between quota disable and quota assign ioctls
[platform/kernel/linux-rpi.git] / drivers / spi / spi-s3c64xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2009 Samsung Electronics Co., Ltd.
4 //      Jaswinder Singh <jassi.brar@samsung.com>
5
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/clk.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 #include <linux/of_gpio.h>
20
21 #include <linux/platform_data/spi-s3c64xx.h>
22
23 #define MAX_SPI_PORTS           6
24 #define S3C64XX_SPI_QUIRK_POLL          (1 << 0)
25 #define S3C64XX_SPI_QUIRK_CS_AUTO       (1 << 1)
26 #define AUTOSUSPEND_TIMEOUT     2000
27
28 /* Registers and bit-fields */
29
30 #define S3C64XX_SPI_CH_CFG              0x00
31 #define S3C64XX_SPI_CLK_CFG             0x04
32 #define S3C64XX_SPI_MODE_CFG            0x08
33 #define S3C64XX_SPI_CS_REG              0x0C
34 #define S3C64XX_SPI_INT_EN              0x10
35 #define S3C64XX_SPI_STATUS              0x14
36 #define S3C64XX_SPI_TX_DATA             0x18
37 #define S3C64XX_SPI_RX_DATA             0x1C
38 #define S3C64XX_SPI_PACKET_CNT          0x20
39 #define S3C64XX_SPI_PENDING_CLR         0x24
40 #define S3C64XX_SPI_SWAP_CFG            0x28
41 #define S3C64XX_SPI_FB_CLK              0x2C
42
43 #define S3C64XX_SPI_CH_HS_EN            (1<<6)  /* High Speed Enable */
44 #define S3C64XX_SPI_CH_SW_RST           (1<<5)
45 #define S3C64XX_SPI_CH_SLAVE            (1<<4)
46 #define S3C64XX_SPI_CPOL_L              (1<<3)
47 #define S3C64XX_SPI_CPHA_B              (1<<2)
48 #define S3C64XX_SPI_CH_RXCH_ON          (1<<1)
49 #define S3C64XX_SPI_CH_TXCH_ON          (1<<0)
50
51 #define S3C64XX_SPI_CLKSEL_SRCMSK       (3<<9)
52 #define S3C64XX_SPI_CLKSEL_SRCSHFT      9
53 #define S3C64XX_SPI_ENCLK_ENABLE        (1<<8)
54 #define S3C64XX_SPI_PSR_MASK            0xff
55
56 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE            (0<<29)
57 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD        (1<<29)
58 #define S3C64XX_SPI_MODE_CH_TSZ_WORD            (2<<29)
59 #define S3C64XX_SPI_MODE_CH_TSZ_MASK            (3<<29)
60 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE           (0<<17)
61 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD       (1<<17)
62 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD           (2<<17)
63 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK           (3<<17)
64 #define S3C64XX_SPI_MODE_RXDMA_ON               (1<<2)
65 #define S3C64XX_SPI_MODE_TXDMA_ON               (1<<1)
66 #define S3C64XX_SPI_MODE_4BURST                 (1<<0)
67
68 #define S3C64XX_SPI_CS_NSC_CNT_2                (2<<4)
69 #define S3C64XX_SPI_CS_AUTO                     (1<<1)
70 #define S3C64XX_SPI_CS_SIG_INACT                (1<<0)
71
72 #define S3C64XX_SPI_INT_TRAILING_EN             (1<<6)
73 #define S3C64XX_SPI_INT_RX_OVERRUN_EN           (1<<5)
74 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN          (1<<4)
75 #define S3C64XX_SPI_INT_TX_OVERRUN_EN           (1<<3)
76 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN          (1<<2)
77 #define S3C64XX_SPI_INT_RX_FIFORDY_EN           (1<<1)
78 #define S3C64XX_SPI_INT_TX_FIFORDY_EN           (1<<0)
79
80 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR           (1<<5)
81 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR          (1<<4)
82 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR           (1<<3)
83 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR          (1<<2)
84 #define S3C64XX_SPI_ST_RX_FIFORDY               (1<<1)
85 #define S3C64XX_SPI_ST_TX_FIFORDY               (1<<0)
86
87 #define S3C64XX_SPI_PACKET_CNT_EN               (1<<16)
88 #define S3C64XX_SPI_PACKET_CNT_MASK             GENMASK(15, 0)
89
90 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR         (1<<4)
91 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR          (1<<3)
92 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR         (1<<2)
93 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR          (1<<1)
94 #define S3C64XX_SPI_PND_TRAILING_CLR            (1<<0)
95
96 #define S3C64XX_SPI_SWAP_RX_HALF_WORD           (1<<7)
97 #define S3C64XX_SPI_SWAP_RX_BYTE                (1<<6)
98 #define S3C64XX_SPI_SWAP_RX_BIT                 (1<<5)
99 #define S3C64XX_SPI_SWAP_RX_EN                  (1<<4)
100 #define S3C64XX_SPI_SWAP_TX_HALF_WORD           (1<<3)
101 #define S3C64XX_SPI_SWAP_TX_BYTE                (1<<2)
102 #define S3C64XX_SPI_SWAP_TX_BIT                 (1<<1)
103 #define S3C64XX_SPI_SWAP_TX_EN                  (1<<0)
104
105 #define S3C64XX_SPI_FBCLK_MSK                   (3<<0)
106
107 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
108 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
109                                 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
110 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
111 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
112                                         FIFO_LVL_MASK(i))
113
114 #define S3C64XX_SPI_MAX_TRAILCNT        0x3ff
115 #define S3C64XX_SPI_TRAILCNT_OFF        19
116
117 #define S3C64XX_SPI_TRAILCNT            S3C64XX_SPI_MAX_TRAILCNT
118
119 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
120 #define is_polling(x)   (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
121
122 #define RXBUSY    (1<<2)
123 #define TXBUSY    (1<<3)
124
125 struct s3c64xx_spi_dma_data {
126         struct dma_chan *ch;
127         dma_cookie_t cookie;
128         enum dma_transfer_direction direction;
129 };
130
131 /**
132  * struct s3c64xx_spi_port_config - SPI Controller hardware info
133  * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
134  * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
135  * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
136  * @quirks: Bitmask of known quirks
137  * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
138  * @clk_from_cmu: True, if the controller does not include a clock mux and
139  *      prescaler unit.
140  * @clk_ioclk: True if clock is present on this device
141  *
142  * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
143  * differ in some aspects such as the size of the fifo and spi bus clock
144  * setup. Such differences are specified to the driver using this structure
145  * which is provided as driver data to the driver.
146  */
147 struct s3c64xx_spi_port_config {
148         int     fifo_lvl_mask[MAX_SPI_PORTS];
149         int     rx_lvl_offset;
150         int     tx_st_done;
151         int     quirks;
152         bool    high_speed;
153         bool    clk_from_cmu;
154         bool    clk_ioclk;
155 };
156
157 /**
158  * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
159  * @clk: Pointer to the spi clock.
160  * @src_clk: Pointer to the clock used to generate SPI signals.
161  * @ioclk: Pointer to the i/o clock between master and slave
162  * @pdev: Pointer to device's platform device data
163  * @master: Pointer to the SPI Protocol master.
164  * @cntrlr_info: Platform specific data for the controller this driver manages.
165  * @lock: Controller specific lock.
166  * @state: Set of FLAGS to indicate status.
167  * @sfr_start: BUS address of SPI controller regs.
168  * @regs: Pointer to ioremap'ed controller registers.
169  * @xfer_completion: To indicate completion of xfer task.
170  * @cur_mode: Stores the active configuration of the controller.
171  * @cur_bpw: Stores the active bits per word settings.
172  * @cur_speed: Current clock speed
173  * @rx_dma: Local receive DMA data (e.g. chan and direction)
174  * @tx_dma: Local transmit DMA data (e.g. chan and direction)
175  * @port_conf: Local SPI port configuartion data
176  * @port_id: Port identification number
177  */
178 struct s3c64xx_spi_driver_data {
179         void __iomem                    *regs;
180         struct clk                      *clk;
181         struct clk                      *src_clk;
182         struct clk                      *ioclk;
183         struct platform_device          *pdev;
184         struct spi_master               *master;
185         struct s3c64xx_spi_info         *cntrlr_info;
186         spinlock_t                      lock;
187         unsigned long                   sfr_start;
188         struct completion               xfer_completion;
189         unsigned                        state;
190         unsigned                        cur_mode, cur_bpw;
191         unsigned                        cur_speed;
192         struct s3c64xx_spi_dma_data     rx_dma;
193         struct s3c64xx_spi_dma_data     tx_dma;
194         const struct s3c64xx_spi_port_config    *port_conf;
195         unsigned int                    port_id;
196 };
197
198 static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
199 {
200         void __iomem *regs = sdd->regs;
201         unsigned long loops;
202         u32 val;
203
204         writel(0, regs + S3C64XX_SPI_PACKET_CNT);
205
206         val = readl(regs + S3C64XX_SPI_CH_CFG);
207         val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
208         writel(val, regs + S3C64XX_SPI_CH_CFG);
209
210         val = readl(regs + S3C64XX_SPI_CH_CFG);
211         val |= S3C64XX_SPI_CH_SW_RST;
212         val &= ~S3C64XX_SPI_CH_HS_EN;
213         writel(val, regs + S3C64XX_SPI_CH_CFG);
214
215         /* Flush TxFIFO*/
216         loops = msecs_to_loops(1);
217         do {
218                 val = readl(regs + S3C64XX_SPI_STATUS);
219         } while (TX_FIFO_LVL(val, sdd) && loops--);
220
221         if (loops == 0)
222                 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
223
224         /* Flush RxFIFO*/
225         loops = msecs_to_loops(1);
226         do {
227                 val = readl(regs + S3C64XX_SPI_STATUS);
228                 if (RX_FIFO_LVL(val, sdd))
229                         readl(regs + S3C64XX_SPI_RX_DATA);
230                 else
231                         break;
232         } while (loops--);
233
234         if (loops == 0)
235                 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
236
237         val = readl(regs + S3C64XX_SPI_CH_CFG);
238         val &= ~S3C64XX_SPI_CH_SW_RST;
239         writel(val, regs + S3C64XX_SPI_CH_CFG);
240
241         val = readl(regs + S3C64XX_SPI_MODE_CFG);
242         val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
243         writel(val, regs + S3C64XX_SPI_MODE_CFG);
244 }
245
246 static void s3c64xx_spi_dmacb(void *data)
247 {
248         struct s3c64xx_spi_driver_data *sdd;
249         struct s3c64xx_spi_dma_data *dma = data;
250         unsigned long flags;
251
252         if (dma->direction == DMA_DEV_TO_MEM)
253                 sdd = container_of(data,
254                         struct s3c64xx_spi_driver_data, rx_dma);
255         else
256                 sdd = container_of(data,
257                         struct s3c64xx_spi_driver_data, tx_dma);
258
259         spin_lock_irqsave(&sdd->lock, flags);
260
261         if (dma->direction == DMA_DEV_TO_MEM) {
262                 sdd->state &= ~RXBUSY;
263                 if (!(sdd->state & TXBUSY))
264                         complete(&sdd->xfer_completion);
265         } else {
266                 sdd->state &= ~TXBUSY;
267                 if (!(sdd->state & RXBUSY))
268                         complete(&sdd->xfer_completion);
269         }
270
271         spin_unlock_irqrestore(&sdd->lock, flags);
272 }
273
274 static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
275                         struct sg_table *sgt)
276 {
277         struct s3c64xx_spi_driver_data *sdd;
278         struct dma_slave_config config;
279         struct dma_async_tx_descriptor *desc;
280         int ret;
281
282         memset(&config, 0, sizeof(config));
283
284         if (dma->direction == DMA_DEV_TO_MEM) {
285                 sdd = container_of((void *)dma,
286                         struct s3c64xx_spi_driver_data, rx_dma);
287                 config.direction = dma->direction;
288                 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
289                 config.src_addr_width = sdd->cur_bpw / 8;
290                 config.src_maxburst = 1;
291                 dmaengine_slave_config(dma->ch, &config);
292         } else {
293                 sdd = container_of((void *)dma,
294                         struct s3c64xx_spi_driver_data, tx_dma);
295                 config.direction = dma->direction;
296                 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
297                 config.dst_addr_width = sdd->cur_bpw / 8;
298                 config.dst_maxburst = 1;
299                 dmaengine_slave_config(dma->ch, &config);
300         }
301
302         desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
303                                        dma->direction, DMA_PREP_INTERRUPT);
304         if (!desc) {
305                 dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
306                         dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
307                 return -ENOMEM;
308         }
309
310         desc->callback = s3c64xx_spi_dmacb;
311         desc->callback_param = dma;
312
313         dma->cookie = dmaengine_submit(desc);
314         ret = dma_submit_error(dma->cookie);
315         if (ret) {
316                 dev_err(&sdd->pdev->dev, "DMA submission failed");
317                 return -EIO;
318         }
319
320         dma_async_issue_pending(dma->ch);
321         return 0;
322 }
323
324 static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
325 {
326         struct s3c64xx_spi_driver_data *sdd =
327                                         spi_master_get_devdata(spi->master);
328
329         if (sdd->cntrlr_info->no_cs)
330                 return;
331
332         if (enable) {
333                 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
334                         writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
335                 } else {
336                         u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
337
338                         ssel |= (S3C64XX_SPI_CS_AUTO |
339                                                 S3C64XX_SPI_CS_NSC_CNT_2);
340                         writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
341                 }
342         } else {
343                 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
344                         writel(S3C64XX_SPI_CS_SIG_INACT,
345                                sdd->regs + S3C64XX_SPI_CS_REG);
346         }
347 }
348
349 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
350 {
351         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
352
353         if (is_polling(sdd))
354                 return 0;
355
356         spi->dma_rx = sdd->rx_dma.ch;
357         spi->dma_tx = sdd->tx_dma.ch;
358
359         return 0;
360 }
361
362 static bool s3c64xx_spi_can_dma(struct spi_master *master,
363                                 struct spi_device *spi,
364                                 struct spi_transfer *xfer)
365 {
366         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
367
368         return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
369 }
370
371 static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
372                                     struct spi_transfer *xfer, int dma_mode)
373 {
374         void __iomem *regs = sdd->regs;
375         u32 modecfg, chcfg;
376         int ret = 0;
377
378         modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
379         modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
380
381         chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
382         chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
383
384         if (dma_mode) {
385                 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
386         } else {
387                 /* Always shift in data in FIFO, even if xfer is Tx only,
388                  * this helps setting PCKT_CNT value for generating clocks
389                  * as exactly needed.
390                  */
391                 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
392                 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
393                                         | S3C64XX_SPI_PACKET_CNT_EN,
394                                         regs + S3C64XX_SPI_PACKET_CNT);
395         }
396
397         if (xfer->tx_buf != NULL) {
398                 sdd->state |= TXBUSY;
399                 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
400                 if (dma_mode) {
401                         modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
402                         ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
403                 } else {
404                         switch (sdd->cur_bpw) {
405                         case 32:
406                                 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
407                                         xfer->tx_buf, xfer->len / 4);
408                                 break;
409                         case 16:
410                                 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
411                                         xfer->tx_buf, xfer->len / 2);
412                                 break;
413                         default:
414                                 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
415                                         xfer->tx_buf, xfer->len);
416                                 break;
417                         }
418                 }
419         }
420
421         if (xfer->rx_buf != NULL) {
422                 sdd->state |= RXBUSY;
423
424                 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
425                                         && !(sdd->cur_mode & SPI_CPHA))
426                         chcfg |= S3C64XX_SPI_CH_HS_EN;
427
428                 if (dma_mode) {
429                         modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
430                         chcfg |= S3C64XX_SPI_CH_RXCH_ON;
431                         writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
432                                         | S3C64XX_SPI_PACKET_CNT_EN,
433                                         regs + S3C64XX_SPI_PACKET_CNT);
434                         ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
435                 }
436         }
437
438         if (ret)
439                 return ret;
440
441         writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
442         writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
443
444         return 0;
445 }
446
447 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
448                                         int timeout_ms)
449 {
450         void __iomem *regs = sdd->regs;
451         unsigned long val = 1;
452         u32 status;
453
454         /* max fifo depth available */
455         u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
456
457         if (timeout_ms)
458                 val = msecs_to_loops(timeout_ms);
459
460         do {
461                 status = readl(regs + S3C64XX_SPI_STATUS);
462         } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
463
464         /* return the actual received data length */
465         return RX_FIFO_LVL(status, sdd);
466 }
467
468 static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
469                                 struct spi_transfer *xfer)
470 {
471         void __iomem *regs = sdd->regs;
472         unsigned long val;
473         u32 status;
474         int ms;
475
476         /* millisecs to xfer 'len' bytes @ 'cur_speed' */
477         ms = xfer->len * 8 * 1000 / sdd->cur_speed;
478         ms += 30;               /* some tolerance */
479         ms = max(ms, 100);      /* minimum timeout */
480
481         val = msecs_to_jiffies(ms) + 10;
482         val = wait_for_completion_timeout(&sdd->xfer_completion, val);
483
484         /*
485          * If the previous xfer was completed within timeout, then
486          * proceed further else return -EIO.
487          * DmaTx returns after simply writing data in the FIFO,
488          * w/o waiting for real transmission on the bus to finish.
489          * DmaRx returns only after Dma read data from FIFO which
490          * needs bus transmission to finish, so we don't worry if
491          * Xfer involved Rx(with or without Tx).
492          */
493         if (val && !xfer->rx_buf) {
494                 val = msecs_to_loops(10);
495                 status = readl(regs + S3C64XX_SPI_STATUS);
496                 while ((TX_FIFO_LVL(status, sdd)
497                         || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
498                        && --val) {
499                         cpu_relax();
500                         status = readl(regs + S3C64XX_SPI_STATUS);
501                 }
502
503         }
504
505         /* If timed out while checking rx/tx status return error */
506         if (!val)
507                 return -EIO;
508
509         return 0;
510 }
511
512 static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
513                                 struct spi_transfer *xfer)
514 {
515         void __iomem *regs = sdd->regs;
516         unsigned long val;
517         u32 status;
518         int loops;
519         u32 cpy_len;
520         u8 *buf;
521         int ms;
522
523         /* millisecs to xfer 'len' bytes @ 'cur_speed' */
524         ms = xfer->len * 8 * 1000 / sdd->cur_speed;
525         ms += 10; /* some tolerance */
526
527         val = msecs_to_loops(ms);
528         do {
529                 status = readl(regs + S3C64XX_SPI_STATUS);
530         } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
531
532         if (!val)
533                 return -EIO;
534
535         /* If it was only Tx */
536         if (!xfer->rx_buf) {
537                 sdd->state &= ~TXBUSY;
538                 return 0;
539         }
540
541         /*
542          * If the receive length is bigger than the controller fifo
543          * size, calculate the loops and read the fifo as many times.
544          * loops = length / max fifo size (calculated by using the
545          * fifo mask).
546          * For any size less than the fifo size the below code is
547          * executed atleast once.
548          */
549         loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
550         buf = xfer->rx_buf;
551         do {
552                 /* wait for data to be received in the fifo */
553                 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
554                                                        (loops ? ms : 0));
555
556                 switch (sdd->cur_bpw) {
557                 case 32:
558                         ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
559                                      buf, cpy_len / 4);
560                         break;
561                 case 16:
562                         ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
563                                      buf, cpy_len / 2);
564                         break;
565                 default:
566                         ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
567                                     buf, cpy_len);
568                         break;
569                 }
570
571                 buf = buf + cpy_len;
572         } while (loops--);
573         sdd->state &= ~RXBUSY;
574
575         return 0;
576 }
577
578 static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
579 {
580         void __iomem *regs = sdd->regs;
581         int ret;
582         u32 val;
583
584         /* Disable Clock */
585         if (!sdd->port_conf->clk_from_cmu) {
586                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
587                 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
588                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
589         }
590
591         /* Set Polarity and Phase */
592         val = readl(regs + S3C64XX_SPI_CH_CFG);
593         val &= ~(S3C64XX_SPI_CH_SLAVE |
594                         S3C64XX_SPI_CPOL_L |
595                         S3C64XX_SPI_CPHA_B);
596
597         if (sdd->cur_mode & SPI_CPOL)
598                 val |= S3C64XX_SPI_CPOL_L;
599
600         if (sdd->cur_mode & SPI_CPHA)
601                 val |= S3C64XX_SPI_CPHA_B;
602
603         writel(val, regs + S3C64XX_SPI_CH_CFG);
604
605         /* Set Channel & DMA Mode */
606         val = readl(regs + S3C64XX_SPI_MODE_CFG);
607         val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
608                         | S3C64XX_SPI_MODE_CH_TSZ_MASK);
609
610         switch (sdd->cur_bpw) {
611         case 32:
612                 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
613                 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
614                 break;
615         case 16:
616                 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
617                 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
618                 break;
619         default:
620                 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
621                 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
622                 break;
623         }
624
625         writel(val, regs + S3C64XX_SPI_MODE_CFG);
626
627         if (sdd->port_conf->clk_from_cmu) {
628                 /* The src_clk clock is divided internally by 2 */
629                 ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
630                 if (ret)
631                         return ret;
632                 sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2;
633         } else {
634                 /* Configure Clock */
635                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
636                 val &= ~S3C64XX_SPI_PSR_MASK;
637                 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
638                                 & S3C64XX_SPI_PSR_MASK);
639                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
640
641                 /* Enable Clock */
642                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
643                 val |= S3C64XX_SPI_ENCLK_ENABLE;
644                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
645         }
646
647         return 0;
648 }
649
650 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
651
652 static int s3c64xx_spi_prepare_message(struct spi_master *master,
653                                        struct spi_message *msg)
654 {
655         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
656         struct spi_device *spi = msg->spi;
657         struct s3c64xx_spi_csinfo *cs = spi->controller_data;
658
659         /* Configure feedback delay */
660         writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
661
662         return 0;
663 }
664
665 static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
666 {
667         struct spi_controller *ctlr = spi->controller;
668
669         return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
670 }
671
672 static int s3c64xx_spi_transfer_one(struct spi_master *master,
673                                     struct spi_device *spi,
674                                     struct spi_transfer *xfer)
675 {
676         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
677         const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
678         const void *tx_buf = NULL;
679         void *rx_buf = NULL;
680         int target_len = 0, origin_len = 0;
681         int use_dma = 0;
682         int status;
683         u32 speed;
684         u8 bpw;
685         unsigned long flags;
686
687         reinit_completion(&sdd->xfer_completion);
688
689         /* Only BPW and Speed may change across transfers */
690         bpw = xfer->bits_per_word;
691         speed = xfer->speed_hz;
692
693         if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
694                 sdd->cur_bpw = bpw;
695                 sdd->cur_speed = speed;
696                 sdd->cur_mode = spi->mode;
697                 status = s3c64xx_spi_config(sdd);
698                 if (status)
699                         return status;
700         }
701
702         if (!is_polling(sdd) && (xfer->len > fifo_len) &&
703             sdd->rx_dma.ch && sdd->tx_dma.ch) {
704                 use_dma = 1;
705
706         } else if (is_polling(sdd) && xfer->len > fifo_len) {
707                 tx_buf = xfer->tx_buf;
708                 rx_buf = xfer->rx_buf;
709                 origin_len = xfer->len;
710
711                 target_len = xfer->len;
712                 if (xfer->len > fifo_len)
713                         xfer->len = fifo_len;
714         }
715
716         do {
717                 spin_lock_irqsave(&sdd->lock, flags);
718
719                 /* Pending only which is to be done */
720                 sdd->state &= ~RXBUSY;
721                 sdd->state &= ~TXBUSY;
722
723                 /* Start the signals */
724                 s3c64xx_spi_set_cs(spi, true);
725
726                 status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
727
728                 spin_unlock_irqrestore(&sdd->lock, flags);
729
730                 if (status) {
731                         dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
732                         break;
733                 }
734
735                 if (use_dma)
736                         status = s3c64xx_wait_for_dma(sdd, xfer);
737                 else
738                         status = s3c64xx_wait_for_pio(sdd, xfer);
739
740                 if (status) {
741                         dev_err(&spi->dev,
742                                 "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
743                                 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
744                                 (sdd->state & RXBUSY) ? 'f' : 'p',
745                                 (sdd->state & TXBUSY) ? 'f' : 'p',
746                                 xfer->len, use_dma ? 1 : 0, status);
747
748                         if (use_dma) {
749                                 struct dma_tx_state s;
750
751                                 if (xfer->tx_buf && (sdd->state & TXBUSY)) {
752                                         dmaengine_pause(sdd->tx_dma.ch);
753                                         dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
754                                         dmaengine_terminate_all(sdd->tx_dma.ch);
755                                         dev_err(&spi->dev, "TX residue: %d\n", s.residue);
756
757                                 }
758                                 if (xfer->rx_buf && (sdd->state & RXBUSY)) {
759                                         dmaengine_pause(sdd->rx_dma.ch);
760                                         dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
761                                         dmaengine_terminate_all(sdd->rx_dma.ch);
762                                         dev_err(&spi->dev, "RX residue: %d\n", s.residue);
763                                 }
764                         }
765                 } else {
766                         s3c64xx_flush_fifo(sdd);
767                 }
768                 if (target_len > 0) {
769                         target_len -= xfer->len;
770
771                         if (xfer->tx_buf)
772                                 xfer->tx_buf += xfer->len;
773
774                         if (xfer->rx_buf)
775                                 xfer->rx_buf += xfer->len;
776
777                         if (target_len > fifo_len)
778                                 xfer->len = fifo_len;
779                         else
780                                 xfer->len = target_len;
781                 }
782         } while (target_len > 0);
783
784         if (origin_len) {
785                 /* Restore original xfer buffers and length */
786                 xfer->tx_buf = tx_buf;
787                 xfer->rx_buf = rx_buf;
788                 xfer->len = origin_len;
789         }
790
791         return status;
792 }
793
794 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
795                                 struct spi_device *spi)
796 {
797         struct s3c64xx_spi_csinfo *cs;
798         struct device_node *slave_np, *data_np = NULL;
799         u32 fb_delay = 0;
800
801         slave_np = spi->dev.of_node;
802         if (!slave_np) {
803                 dev_err(&spi->dev, "device node not found\n");
804                 return ERR_PTR(-EINVAL);
805         }
806
807         data_np = of_get_child_by_name(slave_np, "controller-data");
808         if (!data_np) {
809                 dev_err(&spi->dev, "child node 'controller-data' not found\n");
810                 return ERR_PTR(-EINVAL);
811         }
812
813         cs = kzalloc(sizeof(*cs), GFP_KERNEL);
814         if (!cs) {
815                 of_node_put(data_np);
816                 return ERR_PTR(-ENOMEM);
817         }
818
819         of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
820         cs->fb_delay = fb_delay;
821         of_node_put(data_np);
822         return cs;
823 }
824
825 /*
826  * Here we only check the validity of requested configuration
827  * and save the configuration in a local data-structure.
828  * The controller is actually configured only just before we
829  * get a message to transfer.
830  */
831 static int s3c64xx_spi_setup(struct spi_device *spi)
832 {
833         struct s3c64xx_spi_csinfo *cs = spi->controller_data;
834         struct s3c64xx_spi_driver_data *sdd;
835         int err;
836
837         sdd = spi_master_get_devdata(spi->master);
838         if (spi->dev.of_node) {
839                 cs = s3c64xx_get_slave_ctrldata(spi);
840                 spi->controller_data = cs;
841         } else if (cs) {
842                 /* On non-DT platforms the SPI core will set spi->cs_gpio
843                  * to -ENOENT. The GPIO pin used to drive the chip select
844                  * is defined by using platform data so spi->cs_gpio value
845                  * has to be override to have the proper GPIO pin number.
846                  */
847                 spi->cs_gpio = cs->line;
848         }
849
850         if (IS_ERR_OR_NULL(cs)) {
851                 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
852                 return -ENODEV;
853         }
854
855         if (!spi_get_ctldata(spi)) {
856                 if (gpio_is_valid(spi->cs_gpio)) {
857                         err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
858                                                dev_name(&spi->dev));
859                         if (err) {
860                                 dev_err(&spi->dev,
861                                         "Failed to get /CS gpio [%d]: %d\n",
862                                         spi->cs_gpio, err);
863                                 goto err_gpio_req;
864                         }
865                 }
866
867                 spi_set_ctldata(spi, cs);
868         }
869
870         pm_runtime_get_sync(&sdd->pdev->dev);
871
872         /* Check if we can provide the requested rate */
873         if (!sdd->port_conf->clk_from_cmu) {
874                 u32 psr, speed;
875
876                 /* Max possible */
877                 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
878
879                 if (spi->max_speed_hz > speed)
880                         spi->max_speed_hz = speed;
881
882                 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
883                 psr &= S3C64XX_SPI_PSR_MASK;
884                 if (psr == S3C64XX_SPI_PSR_MASK)
885                         psr--;
886
887                 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
888                 if (spi->max_speed_hz < speed) {
889                         if (psr+1 < S3C64XX_SPI_PSR_MASK) {
890                                 psr++;
891                         } else {
892                                 err = -EINVAL;
893                                 goto setup_exit;
894                         }
895                 }
896
897                 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
898                 if (spi->max_speed_hz >= speed) {
899                         spi->max_speed_hz = speed;
900                 } else {
901                         dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
902                                 spi->max_speed_hz);
903                         err = -EINVAL;
904                         goto setup_exit;
905                 }
906         }
907
908         pm_runtime_mark_last_busy(&sdd->pdev->dev);
909         pm_runtime_put_autosuspend(&sdd->pdev->dev);
910         s3c64xx_spi_set_cs(spi, false);
911
912         return 0;
913
914 setup_exit:
915         pm_runtime_mark_last_busy(&sdd->pdev->dev);
916         pm_runtime_put_autosuspend(&sdd->pdev->dev);
917         /* setup() returns with device de-selected */
918         s3c64xx_spi_set_cs(spi, false);
919
920         if (gpio_is_valid(spi->cs_gpio))
921                 gpio_free(spi->cs_gpio);
922         spi_set_ctldata(spi, NULL);
923
924 err_gpio_req:
925         if (spi->dev.of_node)
926                 kfree(cs);
927
928         return err;
929 }
930
931 static void s3c64xx_spi_cleanup(struct spi_device *spi)
932 {
933         struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
934
935         if (gpio_is_valid(spi->cs_gpio)) {
936                 gpio_free(spi->cs_gpio);
937                 if (spi->dev.of_node)
938                         kfree(cs);
939                 else {
940                         /* On non-DT platforms, the SPI core sets
941                          * spi->cs_gpio to -ENOENT and .setup()
942                          * overrides it with the GPIO pin value
943                          * passed using platform data.
944                          */
945                         spi->cs_gpio = -ENOENT;
946                 }
947         }
948
949         spi_set_ctldata(spi, NULL);
950 }
951
952 static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
953 {
954         struct s3c64xx_spi_driver_data *sdd = data;
955         struct spi_master *spi = sdd->master;
956         unsigned int val, clr = 0;
957
958         val = readl(sdd->regs + S3C64XX_SPI_STATUS);
959
960         if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
961                 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
962                 dev_err(&spi->dev, "RX overrun\n");
963         }
964         if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
965                 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
966                 dev_err(&spi->dev, "RX underrun\n");
967         }
968         if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
969                 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
970                 dev_err(&spi->dev, "TX overrun\n");
971         }
972         if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
973                 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
974                 dev_err(&spi->dev, "TX underrun\n");
975         }
976
977         /* Clear the pending irq by setting and then clearing it */
978         writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
979         writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
980
981         return IRQ_HANDLED;
982 }
983
984 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
985 {
986         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
987         void __iomem *regs = sdd->regs;
988         unsigned int val;
989
990         sdd->cur_speed = 0;
991
992         if (sci->no_cs)
993                 writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
994         else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
995                 writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
996
997         /* Disable Interrupts - we use Polling if not DMA mode */
998         writel(0, regs + S3C64XX_SPI_INT_EN);
999
1000         if (!sdd->port_conf->clk_from_cmu)
1001                 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
1002                                 regs + S3C64XX_SPI_CLK_CFG);
1003         writel(0, regs + S3C64XX_SPI_MODE_CFG);
1004         writel(0, regs + S3C64XX_SPI_PACKET_CNT);
1005
1006         /* Clear any irq pending bits, should set and clear the bits */
1007         val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
1008                 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1009                 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1010                 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1011         writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1012         writel(0, regs + S3C64XX_SPI_PENDING_CLR);
1013
1014         writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1015
1016         val = readl(regs + S3C64XX_SPI_MODE_CFG);
1017         val &= ~S3C64XX_SPI_MODE_4BURST;
1018         val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1019         val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1020         writel(val, regs + S3C64XX_SPI_MODE_CFG);
1021
1022         s3c64xx_flush_fifo(sdd);
1023 }
1024
1025 #ifdef CONFIG_OF
1026 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1027 {
1028         struct s3c64xx_spi_info *sci;
1029         u32 temp;
1030
1031         sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
1032         if (!sci)
1033                 return ERR_PTR(-ENOMEM);
1034
1035         if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
1036                 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1037                 sci->src_clk_nr = 0;
1038         } else {
1039                 sci->src_clk_nr = temp;
1040         }
1041
1042         if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
1043                 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
1044                 sci->num_cs = 1;
1045         } else {
1046                 sci->num_cs = temp;
1047         }
1048
1049         sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
1050
1051         return sci;
1052 }
1053 #else
1054 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1055 {
1056         return dev_get_platdata(dev);
1057 }
1058 #endif
1059
1060 static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1061                                                 struct platform_device *pdev)
1062 {
1063 #ifdef CONFIG_OF
1064         if (pdev->dev.of_node)
1065                 return of_device_get_match_data(&pdev->dev);
1066 #endif
1067         return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
1068 }
1069
1070 static int s3c64xx_spi_probe(struct platform_device *pdev)
1071 {
1072         struct resource *mem_res;
1073         struct s3c64xx_spi_driver_data *sdd;
1074         struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
1075         struct spi_master *master;
1076         int ret, irq;
1077         char clk_name[16];
1078
1079         if (!sci && pdev->dev.of_node) {
1080                 sci = s3c64xx_spi_parse_dt(&pdev->dev);
1081                 if (IS_ERR(sci))
1082                         return PTR_ERR(sci);
1083         }
1084
1085         if (!sci) {
1086                 dev_err(&pdev->dev, "platform_data missing!\n");
1087                 return -ENODEV;
1088         }
1089
1090         mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1091         if (mem_res == NULL) {
1092                 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1093                 return -ENXIO;
1094         }
1095
1096         irq = platform_get_irq(pdev, 0);
1097         if (irq < 0) {
1098                 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1099                 return irq;
1100         }
1101
1102         master = spi_alloc_master(&pdev->dev,
1103                                 sizeof(struct s3c64xx_spi_driver_data));
1104         if (master == NULL) {
1105                 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1106                 return -ENOMEM;
1107         }
1108
1109         platform_set_drvdata(pdev, master);
1110
1111         sdd = spi_master_get_devdata(master);
1112         sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1113         sdd->master = master;
1114         sdd->cntrlr_info = sci;
1115         sdd->pdev = pdev;
1116         sdd->sfr_start = mem_res->start;
1117         if (pdev->dev.of_node) {
1118                 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1119                 if (ret < 0) {
1120                         dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1121                                 ret);
1122                         goto err_deref_master;
1123                 }
1124                 sdd->port_id = ret;
1125         } else {
1126                 sdd->port_id = pdev->id;
1127         }
1128
1129         sdd->cur_bpw = 8;
1130
1131         sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1132         sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1133
1134         master->dev.of_node = pdev->dev.of_node;
1135         master->bus_num = sdd->port_id;
1136         master->setup = s3c64xx_spi_setup;
1137         master->cleanup = s3c64xx_spi_cleanup;
1138         master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1139         master->prepare_message = s3c64xx_spi_prepare_message;
1140         master->transfer_one = s3c64xx_spi_transfer_one;
1141         master->max_transfer_size = s3c64xx_spi_max_transfer_size;
1142         master->num_chipselect = sci->num_cs;
1143         master->dma_alignment = 8;
1144         master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1145                                         SPI_BPW_MASK(8);
1146         /* the spi->mode bits understood by this driver: */
1147         master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1148         master->auto_runtime_pm = true;
1149         if (!is_polling(sdd))
1150                 master->can_dma = s3c64xx_spi_can_dma;
1151
1152         sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1153         if (IS_ERR(sdd->regs)) {
1154                 ret = PTR_ERR(sdd->regs);
1155                 goto err_deref_master;
1156         }
1157
1158         if (sci->cfg_gpio && sci->cfg_gpio()) {
1159                 dev_err(&pdev->dev, "Unable to config gpio\n");
1160                 ret = -EBUSY;
1161                 goto err_deref_master;
1162         }
1163
1164         /* Setup clocks */
1165         sdd->clk = devm_clk_get(&pdev->dev, "spi");
1166         if (IS_ERR(sdd->clk)) {
1167                 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1168                 ret = PTR_ERR(sdd->clk);
1169                 goto err_deref_master;
1170         }
1171
1172         ret = clk_prepare_enable(sdd->clk);
1173         if (ret) {
1174                 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1175                 goto err_deref_master;
1176         }
1177
1178         sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1179         sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1180         if (IS_ERR(sdd->src_clk)) {
1181                 dev_err(&pdev->dev,
1182                         "Unable to acquire clock '%s'\n", clk_name);
1183                 ret = PTR_ERR(sdd->src_clk);
1184                 goto err_disable_clk;
1185         }
1186
1187         ret = clk_prepare_enable(sdd->src_clk);
1188         if (ret) {
1189                 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1190                 goto err_disable_clk;
1191         }
1192
1193         if (sdd->port_conf->clk_ioclk) {
1194                 sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
1195                 if (IS_ERR(sdd->ioclk)) {
1196                         dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
1197                         ret = PTR_ERR(sdd->ioclk);
1198                         goto err_disable_src_clk;
1199                 }
1200
1201                 ret = clk_prepare_enable(sdd->ioclk);
1202                 if (ret) {
1203                         dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
1204                         goto err_disable_src_clk;
1205                 }
1206         }
1207
1208         if (!is_polling(sdd)) {
1209                 /* Acquire DMA channels */
1210                 sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
1211                 if (IS_ERR(sdd->rx_dma.ch)) {
1212                         dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
1213                         ret = PTR_ERR(sdd->rx_dma.ch);
1214                         goto err_disable_io_clk;
1215                 }
1216                 sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
1217                 if (IS_ERR(sdd->tx_dma.ch)) {
1218                         dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
1219                         ret = PTR_ERR(sdd->tx_dma.ch);
1220                         goto err_release_rx_dma;
1221                 }
1222         }
1223
1224         pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1225         pm_runtime_use_autosuspend(&pdev->dev);
1226         pm_runtime_set_active(&pdev->dev);
1227         pm_runtime_enable(&pdev->dev);
1228         pm_runtime_get_sync(&pdev->dev);
1229
1230         /* Setup Deufult Mode */
1231         s3c64xx_spi_hwinit(sdd);
1232
1233         spin_lock_init(&sdd->lock);
1234         init_completion(&sdd->xfer_completion);
1235
1236         ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1237                                 "spi-s3c64xx", sdd);
1238         if (ret != 0) {
1239                 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1240                         irq, ret);
1241                 goto err_pm_put;
1242         }
1243
1244         writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1245                S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1246                sdd->regs + S3C64XX_SPI_INT_EN);
1247
1248         ret = devm_spi_register_master(&pdev->dev, master);
1249         if (ret != 0) {
1250                 dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
1251                 goto err_pm_put;
1252         }
1253
1254         dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1255                                         sdd->port_id, master->num_chipselect);
1256         dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
1257                                         mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
1258
1259         pm_runtime_mark_last_busy(&pdev->dev);
1260         pm_runtime_put_autosuspend(&pdev->dev);
1261
1262         return 0;
1263
1264 err_pm_put:
1265         pm_runtime_put_noidle(&pdev->dev);
1266         pm_runtime_disable(&pdev->dev);
1267         pm_runtime_set_suspended(&pdev->dev);
1268
1269         if (!is_polling(sdd))
1270                 dma_release_channel(sdd->tx_dma.ch);
1271 err_release_rx_dma:
1272         if (!is_polling(sdd))
1273                 dma_release_channel(sdd->rx_dma.ch);
1274 err_disable_io_clk:
1275         clk_disable_unprepare(sdd->ioclk);
1276 err_disable_src_clk:
1277         clk_disable_unprepare(sdd->src_clk);
1278 err_disable_clk:
1279         clk_disable_unprepare(sdd->clk);
1280 err_deref_master:
1281         spi_master_put(master);
1282
1283         return ret;
1284 }
1285
1286 static int s3c64xx_spi_remove(struct platform_device *pdev)
1287 {
1288         struct spi_master *master = platform_get_drvdata(pdev);
1289         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1290
1291         pm_runtime_get_sync(&pdev->dev);
1292
1293         writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1294
1295         if (!is_polling(sdd)) {
1296                 dma_release_channel(sdd->rx_dma.ch);
1297                 dma_release_channel(sdd->tx_dma.ch);
1298         }
1299
1300         clk_disable_unprepare(sdd->ioclk);
1301
1302         clk_disable_unprepare(sdd->src_clk);
1303
1304         clk_disable_unprepare(sdd->clk);
1305
1306         pm_runtime_put_noidle(&pdev->dev);
1307         pm_runtime_disable(&pdev->dev);
1308         pm_runtime_set_suspended(&pdev->dev);
1309
1310         return 0;
1311 }
1312
1313 #ifdef CONFIG_PM_SLEEP
1314 static int s3c64xx_spi_suspend(struct device *dev)
1315 {
1316         struct spi_master *master = dev_get_drvdata(dev);
1317         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1318
1319         int ret = spi_master_suspend(master);
1320         if (ret)
1321                 return ret;
1322
1323         ret = pm_runtime_force_suspend(dev);
1324         if (ret < 0)
1325                 return ret;
1326
1327         sdd->cur_speed = 0; /* Output Clock is stopped */
1328
1329         return 0;
1330 }
1331
1332 static int s3c64xx_spi_resume(struct device *dev)
1333 {
1334         struct spi_master *master = dev_get_drvdata(dev);
1335         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1336         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1337         int ret;
1338
1339         if (sci->cfg_gpio)
1340                 sci->cfg_gpio();
1341
1342         ret = pm_runtime_force_resume(dev);
1343         if (ret < 0)
1344                 return ret;
1345
1346         return spi_master_resume(master);
1347 }
1348 #endif /* CONFIG_PM_SLEEP */
1349
1350 #ifdef CONFIG_PM
1351 static int s3c64xx_spi_runtime_suspend(struct device *dev)
1352 {
1353         struct spi_master *master = dev_get_drvdata(dev);
1354         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1355
1356         clk_disable_unprepare(sdd->clk);
1357         clk_disable_unprepare(sdd->src_clk);
1358         clk_disable_unprepare(sdd->ioclk);
1359
1360         return 0;
1361 }
1362
1363 static int s3c64xx_spi_runtime_resume(struct device *dev)
1364 {
1365         struct spi_master *master = dev_get_drvdata(dev);
1366         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1367         int ret;
1368
1369         if (sdd->port_conf->clk_ioclk) {
1370                 ret = clk_prepare_enable(sdd->ioclk);
1371                 if (ret != 0)
1372                         return ret;
1373         }
1374
1375         ret = clk_prepare_enable(sdd->src_clk);
1376         if (ret != 0)
1377                 goto err_disable_ioclk;
1378
1379         ret = clk_prepare_enable(sdd->clk);
1380         if (ret != 0)
1381                 goto err_disable_src_clk;
1382
1383         s3c64xx_spi_hwinit(sdd);
1384
1385         writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1386                S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1387                sdd->regs + S3C64XX_SPI_INT_EN);
1388
1389         return 0;
1390
1391 err_disable_src_clk:
1392         clk_disable_unprepare(sdd->src_clk);
1393 err_disable_ioclk:
1394         clk_disable_unprepare(sdd->ioclk);
1395
1396         return ret;
1397 }
1398 #endif /* CONFIG_PM */
1399
1400 static const struct dev_pm_ops s3c64xx_spi_pm = {
1401         SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1402         SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1403                            s3c64xx_spi_runtime_resume, NULL)
1404 };
1405
1406 static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1407         .fifo_lvl_mask  = { 0x7f },
1408         .rx_lvl_offset  = 13,
1409         .tx_st_done     = 21,
1410         .high_speed     = true,
1411 };
1412
1413 static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1414         .fifo_lvl_mask  = { 0x7f, 0x7F },
1415         .rx_lvl_offset  = 13,
1416         .tx_st_done     = 21,
1417 };
1418
1419 static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1420         .fifo_lvl_mask  = { 0x1ff, 0x7F },
1421         .rx_lvl_offset  = 15,
1422         .tx_st_done     = 25,
1423         .high_speed     = true,
1424 };
1425
1426 static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1427         .fifo_lvl_mask  = { 0x1ff, 0x7F, 0x7F },
1428         .rx_lvl_offset  = 15,
1429         .tx_st_done     = 25,
1430         .high_speed     = true,
1431         .clk_from_cmu   = true,
1432         .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1433 };
1434
1435 static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
1436         .fifo_lvl_mask  = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
1437         .rx_lvl_offset  = 15,
1438         .tx_st_done     = 25,
1439         .high_speed     = true,
1440         .clk_from_cmu   = true,
1441         .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1442 };
1443
1444 static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
1445         .fifo_lvl_mask  = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
1446         .rx_lvl_offset  = 15,
1447         .tx_st_done     = 25,
1448         .high_speed     = true,
1449         .clk_from_cmu   = true,
1450         .clk_ioclk      = true,
1451         .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1452 };
1453
1454 static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
1455         {
1456                 .name           = "s3c2443-spi",
1457                 .driver_data    = (kernel_ulong_t)&s3c2443_spi_port_config,
1458         }, {
1459                 .name           = "s3c6410-spi",
1460                 .driver_data    = (kernel_ulong_t)&s3c6410_spi_port_config,
1461         },
1462         { },
1463 };
1464
1465 static const struct of_device_id s3c64xx_spi_dt_match[] = {
1466         { .compatible = "samsung,s3c2443-spi",
1467                         .data = (void *)&s3c2443_spi_port_config,
1468         },
1469         { .compatible = "samsung,s3c6410-spi",
1470                         .data = (void *)&s3c6410_spi_port_config,
1471         },
1472         { .compatible = "samsung,s5pv210-spi",
1473                         .data = (void *)&s5pv210_spi_port_config,
1474         },
1475         { .compatible = "samsung,exynos4210-spi",
1476                         .data = (void *)&exynos4_spi_port_config,
1477         },
1478         { .compatible = "samsung,exynos7-spi",
1479                         .data = (void *)&exynos7_spi_port_config,
1480         },
1481         { .compatible = "samsung,exynos5433-spi",
1482                         .data = (void *)&exynos5433_spi_port_config,
1483         },
1484         { },
1485 };
1486 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1487
1488 static struct platform_driver s3c64xx_spi_driver = {
1489         .driver = {
1490                 .name   = "s3c64xx-spi",
1491                 .pm = &s3c64xx_spi_pm,
1492                 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
1493         },
1494         .probe = s3c64xx_spi_probe,
1495         .remove = s3c64xx_spi_remove,
1496         .id_table = s3c64xx_spi_driver_ids,
1497 };
1498 MODULE_ALIAS("platform:s3c64xx-spi");
1499
1500 module_platform_driver(s3c64xx_spi_driver);
1501
1502 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1503 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1504 MODULE_LICENSE("GPL");