Merge remote-tracking branch 'stable/linux-5.15.y' into rpi-5.15.y
[platform/kernel/linux-rpi.git] / drivers / spi / spi-imx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 // Copyright (C) 2008 Juergen Beisert
4
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi_bitbang.h>
22 #include <linux/types.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/property.h>
26
27 #include <linux/platform_data/dma-imx.h>
28
29 #define DRIVER_NAME "spi_imx"
30
31 static bool use_dma = true;
32 module_param(use_dma, bool, 0644);
33 MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
34
35 #define MXC_RPM_TIMEOUT         2000 /* 2000ms */
36
37 #define MXC_CSPIRXDATA          0x00
38 #define MXC_CSPITXDATA          0x04
39 #define MXC_CSPICTRL            0x08
40 #define MXC_CSPIINT             0x0c
41 #define MXC_RESET               0x1c
42
43 /* generic defines to abstract from the different register layouts */
44 #define MXC_INT_RR      (1 << 0) /* Receive data ready interrupt */
45 #define MXC_INT_TE      (1 << 1) /* Transmit FIFO empty interrupt */
46 #define MXC_INT_RDR     BIT(4) /* Receive date threshold interrupt */
47
48 /* The maximum bytes that a sdma BD can transfer. */
49 #define MAX_SDMA_BD_BYTES (1 << 15)
50 #define MX51_ECSPI_CTRL_MAX_BURST       512
51 /* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
52 #define MX53_MAX_TRANSFER_BYTES         512
53
54 enum spi_imx_devtype {
55         IMX1_CSPI,
56         IMX21_CSPI,
57         IMX27_CSPI,
58         IMX31_CSPI,
59         IMX35_CSPI,     /* CSPI on all i.mx except above */
60         IMX51_ECSPI,    /* ECSPI on i.mx51 */
61         IMX53_ECSPI,    /* ECSPI on i.mx53 and later */
62 };
63
64 struct spi_imx_data;
65
66 struct spi_imx_devtype_data {
67         void (*intctrl)(struct spi_imx_data *, int);
68         int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
69         int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *);
70         void (*trigger)(struct spi_imx_data *);
71         int (*rx_available)(struct spi_imx_data *);
72         void (*reset)(struct spi_imx_data *);
73         void (*setup_wml)(struct spi_imx_data *);
74         void (*disable)(struct spi_imx_data *);
75         void (*disable_dma)(struct spi_imx_data *);
76         bool has_dmamode;
77         bool has_slavemode;
78         unsigned int fifo_size;
79         bool dynamic_burst;
80         /*
81          * ERR009165 fixed or not:
82          * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
83          */
84         bool tx_glitch_fixed;
85         enum spi_imx_devtype devtype;
86 };
87
88 struct spi_imx_data {
89         struct spi_bitbang bitbang;
90         struct device *dev;
91
92         struct completion xfer_done;
93         void __iomem *base;
94         unsigned long base_phys;
95
96         struct clk *clk_per;
97         struct clk *clk_ipg;
98         unsigned long spi_clk;
99         unsigned int spi_bus_clk;
100
101         unsigned int bits_per_word;
102         unsigned int spi_drctl;
103
104         unsigned int count, remainder;
105         void (*tx)(struct spi_imx_data *);
106         void (*rx)(struct spi_imx_data *);
107         void *rx_buf;
108         const void *tx_buf;
109         unsigned int txfifo; /* number of words pushed in tx FIFO */
110         unsigned int dynamic_burst;
111
112         /* Slave mode */
113         bool slave_mode;
114         bool slave_aborted;
115         unsigned int slave_burst;
116
117         /* DMA */
118         bool usedma;
119         u32 wml;
120         struct completion dma_rx_completion;
121         struct completion dma_tx_completion;
122
123         const struct spi_imx_devtype_data *devtype_data;
124 };
125
126 static inline int is_imx27_cspi(struct spi_imx_data *d)
127 {
128         return d->devtype_data->devtype == IMX27_CSPI;
129 }
130
131 static inline int is_imx35_cspi(struct spi_imx_data *d)
132 {
133         return d->devtype_data->devtype == IMX35_CSPI;
134 }
135
136 static inline int is_imx51_ecspi(struct spi_imx_data *d)
137 {
138         return d->devtype_data->devtype == IMX51_ECSPI;
139 }
140
141 static inline int is_imx53_ecspi(struct spi_imx_data *d)
142 {
143         return d->devtype_data->devtype == IMX53_ECSPI;
144 }
145
146 #define MXC_SPI_BUF_RX(type)                                            \
147 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx)         \
148 {                                                                       \
149         unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);       \
150                                                                         \
151         if (spi_imx->rx_buf) {                                          \
152                 *(type *)spi_imx->rx_buf = val;                         \
153                 spi_imx->rx_buf += sizeof(type);                        \
154         }                                                               \
155                                                                         \
156         spi_imx->remainder -= sizeof(type);                             \
157 }
158
159 #define MXC_SPI_BUF_TX(type)                                            \
160 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx)         \
161 {                                                                       \
162         type val = 0;                                                   \
163                                                                         \
164         if (spi_imx->tx_buf) {                                          \
165                 val = *(type *)spi_imx->tx_buf;                         \
166                 spi_imx->tx_buf += sizeof(type);                        \
167         }                                                               \
168                                                                         \
169         spi_imx->count -= sizeof(type);                                 \
170                                                                         \
171         writel(val, spi_imx->base + MXC_CSPITXDATA);                    \
172 }
173
174 MXC_SPI_BUF_RX(u8)
175 MXC_SPI_BUF_TX(u8)
176 MXC_SPI_BUF_RX(u16)
177 MXC_SPI_BUF_TX(u16)
178 MXC_SPI_BUF_RX(u32)
179 MXC_SPI_BUF_TX(u32)
180
181 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
182  * (which is currently not the case in this driver)
183  */
184 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
185         256, 384, 512, 768, 1024};
186
187 /* MX21, MX27 */
188 static unsigned int spi_imx_clkdiv_1(unsigned int fin,
189                 unsigned int fspi, unsigned int max, unsigned int *fres)
190 {
191         int i;
192
193         for (i = 2; i < max; i++)
194                 if (fspi * mxc_clkdivs[i] >= fin)
195                         break;
196
197         *fres = fin / mxc_clkdivs[i];
198         return i;
199 }
200
201 /* MX1, MX31, MX35, MX51 CSPI */
202 static unsigned int spi_imx_clkdiv_2(unsigned int fin,
203                 unsigned int fspi, unsigned int *fres)
204 {
205         int i, div = 4;
206
207         for (i = 0; i < 7; i++) {
208                 if (fspi * div >= fin)
209                         goto out;
210                 div <<= 1;
211         }
212
213 out:
214         *fres = fin / div;
215         return i;
216 }
217
218 static int spi_imx_bytes_per_word(const int bits_per_word)
219 {
220         if (bits_per_word <= 8)
221                 return 1;
222         else if (bits_per_word <= 16)
223                 return 2;
224         else
225                 return 4;
226 }
227
228 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
229                          struct spi_transfer *transfer)
230 {
231         struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
232
233         if (!use_dma || master->fallback)
234                 return false;
235
236         if (!master->dma_rx)
237                 return false;
238
239         if (spi_imx->slave_mode)
240                 return false;
241
242         if (transfer->len < spi_imx->devtype_data->fifo_size)
243                 return false;
244
245         spi_imx->dynamic_burst = 0;
246
247         return true;
248 }
249
250 #define MX51_ECSPI_CTRL         0x08
251 #define MX51_ECSPI_CTRL_ENABLE          (1 <<  0)
252 #define MX51_ECSPI_CTRL_XCH             (1 <<  2)
253 #define MX51_ECSPI_CTRL_SMC             (1 << 3)
254 #define MX51_ECSPI_CTRL_MODE_MASK       (0xf << 4)
255 #define MX51_ECSPI_CTRL_DRCTL(drctl)    ((drctl) << 16)
256 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET  8
257 #define MX51_ECSPI_CTRL_PREDIV_OFFSET   12
258 #define MX51_ECSPI_CTRL_CS(cs)          ((cs) << 18)
259 #define MX51_ECSPI_CTRL_BL_OFFSET       20
260 #define MX51_ECSPI_CTRL_BL_MASK         (0xfff << 20)
261
262 #define MX51_ECSPI_CONFIG       0x0c
263 #define MX51_ECSPI_CONFIG_SCLKPHA(cs)   (1 << ((cs) +  0))
264 #define MX51_ECSPI_CONFIG_SCLKPOL(cs)   (1 << ((cs) +  4))
265 #define MX51_ECSPI_CONFIG_SBBCTRL(cs)   (1 << ((cs) +  8))
266 #define MX51_ECSPI_CONFIG_SSBPOL(cs)    (1 << ((cs) + 12))
267 #define MX51_ECSPI_CONFIG_SCLKCTL(cs)   (1 << ((cs) + 20))
268
269 #define MX51_ECSPI_INT          0x10
270 #define MX51_ECSPI_INT_TEEN             (1 <<  0)
271 #define MX51_ECSPI_INT_RREN             (1 <<  3)
272 #define MX51_ECSPI_INT_RDREN            (1 <<  4)
273
274 #define MX51_ECSPI_DMA          0x14
275 #define MX51_ECSPI_DMA_TX_WML(wml)      ((wml) & 0x3f)
276 #define MX51_ECSPI_DMA_RX_WML(wml)      (((wml) & 0x3f) << 16)
277 #define MX51_ECSPI_DMA_RXT_WML(wml)     (((wml) & 0x3f) << 24)
278
279 #define MX51_ECSPI_DMA_TEDEN            (1 << 7)
280 #define MX51_ECSPI_DMA_RXDEN            (1 << 23)
281 #define MX51_ECSPI_DMA_RXTDEN           (1 << 31)
282
283 #define MX51_ECSPI_STAT         0x18
284 #define MX51_ECSPI_STAT_RR              (1 <<  3)
285
286 #define MX51_ECSPI_TESTREG      0x20
287 #define MX51_ECSPI_TESTREG_LBC  BIT(31)
288
289 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
290 {
291         unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
292 #ifdef __LITTLE_ENDIAN
293         unsigned int bytes_per_word;
294 #endif
295
296         if (spi_imx->rx_buf) {
297 #ifdef __LITTLE_ENDIAN
298                 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
299                 if (bytes_per_word == 1)
300                         val = cpu_to_be32(val);
301                 else if (bytes_per_word == 2)
302                         val = (val << 16) | (val >> 16);
303 #endif
304                 *(u32 *)spi_imx->rx_buf = val;
305                 spi_imx->rx_buf += sizeof(u32);
306         }
307
308         spi_imx->remainder -= sizeof(u32);
309 }
310
311 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
312 {
313         int unaligned;
314         u32 val;
315
316         unaligned = spi_imx->remainder % 4;
317
318         if (!unaligned) {
319                 spi_imx_buf_rx_swap_u32(spi_imx);
320                 return;
321         }
322
323         if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
324                 spi_imx_buf_rx_u16(spi_imx);
325                 return;
326         }
327
328         val = readl(spi_imx->base + MXC_CSPIRXDATA);
329
330         while (unaligned--) {
331                 if (spi_imx->rx_buf) {
332                         *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
333                         spi_imx->rx_buf++;
334                 }
335                 spi_imx->remainder--;
336         }
337 }
338
339 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
340 {
341         u32 val = 0;
342 #ifdef __LITTLE_ENDIAN
343         unsigned int bytes_per_word;
344 #endif
345
346         if (spi_imx->tx_buf) {
347                 val = *(u32 *)spi_imx->tx_buf;
348                 spi_imx->tx_buf += sizeof(u32);
349         }
350
351         spi_imx->count -= sizeof(u32);
352 #ifdef __LITTLE_ENDIAN
353         bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
354
355         if (bytes_per_word == 1)
356                 val = cpu_to_be32(val);
357         else if (bytes_per_word == 2)
358                 val = (val << 16) | (val >> 16);
359 #endif
360         writel(val, spi_imx->base + MXC_CSPITXDATA);
361 }
362
363 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
364 {
365         int unaligned;
366         u32 val = 0;
367
368         unaligned = spi_imx->count % 4;
369
370         if (!unaligned) {
371                 spi_imx_buf_tx_swap_u32(spi_imx);
372                 return;
373         }
374
375         if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
376                 spi_imx_buf_tx_u16(spi_imx);
377                 return;
378         }
379
380         while (unaligned--) {
381                 if (spi_imx->tx_buf) {
382                         val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
383                         spi_imx->tx_buf++;
384                 }
385                 spi_imx->count--;
386         }
387
388         writel(val, spi_imx->base + MXC_CSPITXDATA);
389 }
390
391 static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
392 {
393         u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
394
395         if (spi_imx->rx_buf) {
396                 int n_bytes = spi_imx->slave_burst % sizeof(val);
397
398                 if (!n_bytes)
399                         n_bytes = sizeof(val);
400
401                 memcpy(spi_imx->rx_buf,
402                        ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
403
404                 spi_imx->rx_buf += n_bytes;
405                 spi_imx->slave_burst -= n_bytes;
406         }
407
408         spi_imx->remainder -= sizeof(u32);
409 }
410
411 static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
412 {
413         u32 val = 0;
414         int n_bytes = spi_imx->count % sizeof(val);
415
416         if (!n_bytes)
417                 n_bytes = sizeof(val);
418
419         if (spi_imx->tx_buf) {
420                 memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
421                        spi_imx->tx_buf, n_bytes);
422                 val = cpu_to_be32(val);
423                 spi_imx->tx_buf += n_bytes;
424         }
425
426         spi_imx->count -= n_bytes;
427
428         writel(val, spi_imx->base + MXC_CSPITXDATA);
429 }
430
431 /* MX51 eCSPI */
432 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
433                                       unsigned int fspi, unsigned int *fres)
434 {
435         /*
436          * there are two 4-bit dividers, the pre-divider divides by
437          * $pre, the post-divider by 2^$post
438          */
439         unsigned int pre, post;
440         unsigned int fin = spi_imx->spi_clk;
441
442         fspi = min(fspi, fin);
443
444         post = fls(fin) - fls(fspi);
445         if (fin > fspi << post)
446                 post++;
447
448         /* now we have: (fin <= fspi << post) with post being minimal */
449
450         post = max(4U, post) - 4;
451         if (unlikely(post > 0xf)) {
452                 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
453                                 fspi, fin);
454                 return 0xff;
455         }
456
457         pre = DIV_ROUND_UP(fin, fspi << post) - 1;
458
459         dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
460                         __func__, fin, fspi, post, pre);
461
462         /* Resulting frequency for the SCLK line. */
463         *fres = (fin / (pre + 1)) >> post;
464
465         return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
466                 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
467 }
468
469 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
470 {
471         unsigned val = 0;
472
473         if (enable & MXC_INT_TE)
474                 val |= MX51_ECSPI_INT_TEEN;
475
476         if (enable & MXC_INT_RR)
477                 val |= MX51_ECSPI_INT_RREN;
478
479         if (enable & MXC_INT_RDR)
480                 val |= MX51_ECSPI_INT_RDREN;
481
482         writel(val, spi_imx->base + MX51_ECSPI_INT);
483 }
484
485 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
486 {
487         u32 reg;
488
489         reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
490         reg |= MX51_ECSPI_CTRL_XCH;
491         writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
492 }
493
494 static void mx51_disable_dma(struct spi_imx_data *spi_imx)
495 {
496         writel(0, spi_imx->base + MX51_ECSPI_DMA);
497 }
498
499 static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
500 {
501         u32 ctrl;
502
503         ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
504         ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
505         writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
506 }
507
508 static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
509                                       struct spi_message *msg)
510 {
511         struct spi_device *spi = msg->spi;
512         struct spi_transfer *xfer;
513         u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
514         u32 min_speed_hz = ~0U;
515         u32 testreg, delay;
516         u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
517
518         /* set Master or Slave mode */
519         if (spi_imx->slave_mode)
520                 ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
521         else
522                 ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
523
524         /*
525          * Enable SPI_RDY handling (falling edge/level triggered).
526          */
527         if (spi->mode & SPI_READY)
528                 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
529
530         /* set chip select to use */
531         ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
532
533         /*
534          * The ctrl register must be written first, with the EN bit set other
535          * registers must not be written to.
536          */
537         writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
538
539         testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
540         if (spi->mode & SPI_LOOP)
541                 testreg |= MX51_ECSPI_TESTREG_LBC;
542         else
543                 testreg &= ~MX51_ECSPI_TESTREG_LBC;
544         writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
545
546         /*
547          * eCSPI burst completion by Chip Select signal in Slave mode
548          * is not functional for imx53 Soc, config SPI burst completed when
549          * BURST_LENGTH + 1 bits are received
550          */
551         if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
552                 cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
553         else
554                 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
555
556         if (spi->mode & SPI_CPHA)
557                 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
558         else
559                 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
560
561         if (spi->mode & SPI_CPOL) {
562                 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
563                 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
564         } else {
565                 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
566                 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
567         }
568
569         if (spi->mode & SPI_CS_HIGH)
570                 cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
571         else
572                 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
573
574         writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
575
576         /*
577          * Wait until the changes in the configuration register CONFIGREG
578          * propagate into the hardware. It takes exactly one tick of the
579          * SCLK clock, but we will wait two SCLK clock just to be sure. The
580          * effect of the delay it takes for the hardware to apply changes
581          * is noticable if the SCLK clock run very slow. In such a case, if
582          * the polarity of SCLK should be inverted, the GPIO ChipSelect might
583          * be asserted before the SCLK polarity changes, which would disrupt
584          * the SPI communication as the device on the other end would consider
585          * the change of SCLK polarity as a clock tick already.
586          *
587          * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
588          * callback, iterate over all the transfers in spi_message, find the
589          * one with lowest bus frequency, and use that bus frequency for the
590          * delay calculation. In case all transfers have speed_hz == 0, then
591          * min_speed_hz is ~0 and the resulting delay is zero.
592          */
593         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
594                 if (!xfer->speed_hz)
595                         continue;
596                 min_speed_hz = min(xfer->speed_hz, min_speed_hz);
597         }
598
599         delay = (2 * 1000000) / min_speed_hz;
600         if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
601                 udelay(delay);
602         else                    /* SCLK is _very_ slow */
603                 usleep_range(delay, delay + 10);
604
605         return 0;
606 }
607
608 static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
609                                        struct spi_device *spi)
610 {
611         u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
612         u32 clk;
613
614         /* Clear BL field and set the right value */
615         ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
616         if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
617                 ctrl |= (spi_imx->slave_burst * 8 - 1)
618                         << MX51_ECSPI_CTRL_BL_OFFSET;
619         else
620                 ctrl |= (spi_imx->bits_per_word - 1)
621                         << MX51_ECSPI_CTRL_BL_OFFSET;
622
623         /* set clock speed */
624         ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
625                   0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
626         ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
627         spi_imx->spi_bus_clk = clk;
628
629         /*
630          * ERR009165: work in XHC mode instead of SMC as PIO on the chips
631          * before i.mx6ul.
632          */
633         if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
634                 ctrl |= MX51_ECSPI_CTRL_SMC;
635         else
636                 ctrl &= ~MX51_ECSPI_CTRL_SMC;
637
638         writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
639
640         return 0;
641 }
642
643 static void mx51_setup_wml(struct spi_imx_data *spi_imx)
644 {
645         u32 tx_wml = 0;
646
647         if (spi_imx->devtype_data->tx_glitch_fixed)
648                 tx_wml = spi_imx->wml;
649         /*
650          * Configure the DMA register: setup the watermark
651          * and enable DMA request.
652          */
653         writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
654                 MX51_ECSPI_DMA_TX_WML(tx_wml) |
655                 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
656                 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
657                 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
658 }
659
660 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
661 {
662         return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
663 }
664
665 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
666 {
667         /* drain receive buffer */
668         while (mx51_ecspi_rx_available(spi_imx))
669                 readl(spi_imx->base + MXC_CSPIRXDATA);
670 }
671
672 #define MX31_INTREG_TEEN        (1 << 0)
673 #define MX31_INTREG_RREN        (1 << 3)
674
675 #define MX31_CSPICTRL_ENABLE    (1 << 0)
676 #define MX31_CSPICTRL_MASTER    (1 << 1)
677 #define MX31_CSPICTRL_XCH       (1 << 2)
678 #define MX31_CSPICTRL_SMC       (1 << 3)
679 #define MX31_CSPICTRL_POL       (1 << 4)
680 #define MX31_CSPICTRL_PHA       (1 << 5)
681 #define MX31_CSPICTRL_SSCTL     (1 << 6)
682 #define MX31_CSPICTRL_SSPOL     (1 << 7)
683 #define MX31_CSPICTRL_BC_SHIFT  8
684 #define MX35_CSPICTRL_BL_SHIFT  20
685 #define MX31_CSPICTRL_CS_SHIFT  24
686 #define MX35_CSPICTRL_CS_SHIFT  12
687 #define MX31_CSPICTRL_DR_SHIFT  16
688
689 #define MX31_CSPI_DMAREG        0x10
690 #define MX31_DMAREG_RH_DEN      (1<<4)
691 #define MX31_DMAREG_TH_DEN      (1<<1)
692
693 #define MX31_CSPISTATUS         0x14
694 #define MX31_STATUS_RR          (1 << 3)
695
696 #define MX31_CSPI_TESTREG       0x1C
697 #define MX31_TEST_LBC           (1 << 14)
698
699 /* These functions also work for the i.MX35, but be aware that
700  * the i.MX35 has a slightly different register layout for bits
701  * we do not use here.
702  */
703 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
704 {
705         unsigned int val = 0;
706
707         if (enable & MXC_INT_TE)
708                 val |= MX31_INTREG_TEEN;
709         if (enable & MXC_INT_RR)
710                 val |= MX31_INTREG_RREN;
711
712         writel(val, spi_imx->base + MXC_CSPIINT);
713 }
714
715 static void mx31_trigger(struct spi_imx_data *spi_imx)
716 {
717         unsigned int reg;
718
719         reg = readl(spi_imx->base + MXC_CSPICTRL);
720         reg |= MX31_CSPICTRL_XCH;
721         writel(reg, spi_imx->base + MXC_CSPICTRL);
722 }
723
724 static int mx31_prepare_message(struct spi_imx_data *spi_imx,
725                                 struct spi_message *msg)
726 {
727         return 0;
728 }
729
730 static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
731                                  struct spi_device *spi)
732 {
733         unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
734         unsigned int clk;
735
736         reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
737                 MX31_CSPICTRL_DR_SHIFT;
738         spi_imx->spi_bus_clk = clk;
739
740         if (is_imx35_cspi(spi_imx)) {
741                 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
742                 reg |= MX31_CSPICTRL_SSCTL;
743         } else {
744                 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
745         }
746
747         if (spi->mode & SPI_CPHA)
748                 reg |= MX31_CSPICTRL_PHA;
749         if (spi->mode & SPI_CPOL)
750                 reg |= MX31_CSPICTRL_POL;
751         if (spi->mode & SPI_CS_HIGH)
752                 reg |= MX31_CSPICTRL_SSPOL;
753         if (!spi->cs_gpiod)
754                 reg |= (spi->chip_select) <<
755                         (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
756                                                   MX31_CSPICTRL_CS_SHIFT);
757
758         if (spi_imx->usedma)
759                 reg |= MX31_CSPICTRL_SMC;
760
761         writel(reg, spi_imx->base + MXC_CSPICTRL);
762
763         reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
764         if (spi->mode & SPI_LOOP)
765                 reg |= MX31_TEST_LBC;
766         else
767                 reg &= ~MX31_TEST_LBC;
768         writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
769
770         if (spi_imx->usedma) {
771                 /*
772                  * configure DMA requests when RXFIFO is half full and
773                  * when TXFIFO is half empty
774                  */
775                 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
776                         spi_imx->base + MX31_CSPI_DMAREG);
777         }
778
779         return 0;
780 }
781
782 static int mx31_rx_available(struct spi_imx_data *spi_imx)
783 {
784         return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
785 }
786
787 static void mx31_reset(struct spi_imx_data *spi_imx)
788 {
789         /* drain receive buffer */
790         while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
791                 readl(spi_imx->base + MXC_CSPIRXDATA);
792 }
793
794 #define MX21_INTREG_RR          (1 << 4)
795 #define MX21_INTREG_TEEN        (1 << 9)
796 #define MX21_INTREG_RREN        (1 << 13)
797
798 #define MX21_CSPICTRL_POL       (1 << 5)
799 #define MX21_CSPICTRL_PHA       (1 << 6)
800 #define MX21_CSPICTRL_SSPOL     (1 << 8)
801 #define MX21_CSPICTRL_XCH       (1 << 9)
802 #define MX21_CSPICTRL_ENABLE    (1 << 10)
803 #define MX21_CSPICTRL_MASTER    (1 << 11)
804 #define MX21_CSPICTRL_DR_SHIFT  14
805 #define MX21_CSPICTRL_CS_SHIFT  19
806
807 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
808 {
809         unsigned int val = 0;
810
811         if (enable & MXC_INT_TE)
812                 val |= MX21_INTREG_TEEN;
813         if (enable & MXC_INT_RR)
814                 val |= MX21_INTREG_RREN;
815
816         writel(val, spi_imx->base + MXC_CSPIINT);
817 }
818
819 static void mx21_trigger(struct spi_imx_data *spi_imx)
820 {
821         unsigned int reg;
822
823         reg = readl(spi_imx->base + MXC_CSPICTRL);
824         reg |= MX21_CSPICTRL_XCH;
825         writel(reg, spi_imx->base + MXC_CSPICTRL);
826 }
827
828 static int mx21_prepare_message(struct spi_imx_data *spi_imx,
829                                 struct spi_message *msg)
830 {
831         return 0;
832 }
833
834 static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
835                                  struct spi_device *spi)
836 {
837         unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
838         unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
839         unsigned int clk;
840
841         reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
842                 << MX21_CSPICTRL_DR_SHIFT;
843         spi_imx->spi_bus_clk = clk;
844
845         reg |= spi_imx->bits_per_word - 1;
846
847         if (spi->mode & SPI_CPHA)
848                 reg |= MX21_CSPICTRL_PHA;
849         if (spi->mode & SPI_CPOL)
850                 reg |= MX21_CSPICTRL_POL;
851         if (spi->mode & SPI_CS_HIGH)
852                 reg |= MX21_CSPICTRL_SSPOL;
853         if (!spi->cs_gpiod)
854                 reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
855
856         writel(reg, spi_imx->base + MXC_CSPICTRL);
857
858         return 0;
859 }
860
861 static int mx21_rx_available(struct spi_imx_data *spi_imx)
862 {
863         return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
864 }
865
866 static void mx21_reset(struct spi_imx_data *spi_imx)
867 {
868         writel(1, spi_imx->base + MXC_RESET);
869 }
870
871 #define MX1_INTREG_RR           (1 << 3)
872 #define MX1_INTREG_TEEN         (1 << 8)
873 #define MX1_INTREG_RREN         (1 << 11)
874
875 #define MX1_CSPICTRL_POL        (1 << 4)
876 #define MX1_CSPICTRL_PHA        (1 << 5)
877 #define MX1_CSPICTRL_XCH        (1 << 8)
878 #define MX1_CSPICTRL_ENABLE     (1 << 9)
879 #define MX1_CSPICTRL_MASTER     (1 << 10)
880 #define MX1_CSPICTRL_DR_SHIFT   13
881
882 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
883 {
884         unsigned int val = 0;
885
886         if (enable & MXC_INT_TE)
887                 val |= MX1_INTREG_TEEN;
888         if (enable & MXC_INT_RR)
889                 val |= MX1_INTREG_RREN;
890
891         writel(val, spi_imx->base + MXC_CSPIINT);
892 }
893
894 static void mx1_trigger(struct spi_imx_data *spi_imx)
895 {
896         unsigned int reg;
897
898         reg = readl(spi_imx->base + MXC_CSPICTRL);
899         reg |= MX1_CSPICTRL_XCH;
900         writel(reg, spi_imx->base + MXC_CSPICTRL);
901 }
902
903 static int mx1_prepare_message(struct spi_imx_data *spi_imx,
904                                struct spi_message *msg)
905 {
906         return 0;
907 }
908
909 static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
910                                 struct spi_device *spi)
911 {
912         unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
913         unsigned int clk;
914
915         reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
916                 MX1_CSPICTRL_DR_SHIFT;
917         spi_imx->spi_bus_clk = clk;
918
919         reg |= spi_imx->bits_per_word - 1;
920
921         if (spi->mode & SPI_CPHA)
922                 reg |= MX1_CSPICTRL_PHA;
923         if (spi->mode & SPI_CPOL)
924                 reg |= MX1_CSPICTRL_POL;
925
926         writel(reg, spi_imx->base + MXC_CSPICTRL);
927
928         return 0;
929 }
930
931 static int mx1_rx_available(struct spi_imx_data *spi_imx)
932 {
933         return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
934 }
935
936 static void mx1_reset(struct spi_imx_data *spi_imx)
937 {
938         writel(1, spi_imx->base + MXC_RESET);
939 }
940
941 static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
942         .intctrl = mx1_intctrl,
943         .prepare_message = mx1_prepare_message,
944         .prepare_transfer = mx1_prepare_transfer,
945         .trigger = mx1_trigger,
946         .rx_available = mx1_rx_available,
947         .reset = mx1_reset,
948         .fifo_size = 8,
949         .has_dmamode = false,
950         .dynamic_burst = false,
951         .has_slavemode = false,
952         .devtype = IMX1_CSPI,
953 };
954
955 static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
956         .intctrl = mx21_intctrl,
957         .prepare_message = mx21_prepare_message,
958         .prepare_transfer = mx21_prepare_transfer,
959         .trigger = mx21_trigger,
960         .rx_available = mx21_rx_available,
961         .reset = mx21_reset,
962         .fifo_size = 8,
963         .has_dmamode = false,
964         .dynamic_burst = false,
965         .has_slavemode = false,
966         .devtype = IMX21_CSPI,
967 };
968
969 static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
970         /* i.mx27 cspi shares the functions with i.mx21 one */
971         .intctrl = mx21_intctrl,
972         .prepare_message = mx21_prepare_message,
973         .prepare_transfer = mx21_prepare_transfer,
974         .trigger = mx21_trigger,
975         .rx_available = mx21_rx_available,
976         .reset = mx21_reset,
977         .fifo_size = 8,
978         .has_dmamode = false,
979         .dynamic_burst = false,
980         .has_slavemode = false,
981         .devtype = IMX27_CSPI,
982 };
983
984 static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
985         .intctrl = mx31_intctrl,
986         .prepare_message = mx31_prepare_message,
987         .prepare_transfer = mx31_prepare_transfer,
988         .trigger = mx31_trigger,
989         .rx_available = mx31_rx_available,
990         .reset = mx31_reset,
991         .fifo_size = 8,
992         .has_dmamode = false,
993         .dynamic_burst = false,
994         .has_slavemode = false,
995         .devtype = IMX31_CSPI,
996 };
997
998 static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
999         /* i.mx35 and later cspi shares the functions with i.mx31 one */
1000         .intctrl = mx31_intctrl,
1001         .prepare_message = mx31_prepare_message,
1002         .prepare_transfer = mx31_prepare_transfer,
1003         .trigger = mx31_trigger,
1004         .rx_available = mx31_rx_available,
1005         .reset = mx31_reset,
1006         .fifo_size = 8,
1007         .has_dmamode = true,
1008         .dynamic_burst = false,
1009         .has_slavemode = false,
1010         .devtype = IMX35_CSPI,
1011 };
1012
1013 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
1014         .intctrl = mx51_ecspi_intctrl,
1015         .prepare_message = mx51_ecspi_prepare_message,
1016         .prepare_transfer = mx51_ecspi_prepare_transfer,
1017         .trigger = mx51_ecspi_trigger,
1018         .rx_available = mx51_ecspi_rx_available,
1019         .reset = mx51_ecspi_reset,
1020         .setup_wml = mx51_setup_wml,
1021         .disable_dma = mx51_disable_dma,
1022         .fifo_size = 64,
1023         .has_dmamode = true,
1024         .dynamic_burst = true,
1025         .has_slavemode = true,
1026         .disable = mx51_ecspi_disable,
1027         .devtype = IMX51_ECSPI,
1028 };
1029
1030 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1031         .intctrl = mx51_ecspi_intctrl,
1032         .prepare_message = mx51_ecspi_prepare_message,
1033         .prepare_transfer = mx51_ecspi_prepare_transfer,
1034         .trigger = mx51_ecspi_trigger,
1035         .rx_available = mx51_ecspi_rx_available,
1036         .disable_dma = mx51_disable_dma,
1037         .reset = mx51_ecspi_reset,
1038         .fifo_size = 64,
1039         .has_dmamode = true,
1040         .has_slavemode = true,
1041         .disable = mx51_ecspi_disable,
1042         .devtype = IMX53_ECSPI,
1043 };
1044
1045 static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
1046         .intctrl = mx51_ecspi_intctrl,
1047         .prepare_message = mx51_ecspi_prepare_message,
1048         .prepare_transfer = mx51_ecspi_prepare_transfer,
1049         .trigger = mx51_ecspi_trigger,
1050         .rx_available = mx51_ecspi_rx_available,
1051         .reset = mx51_ecspi_reset,
1052         .setup_wml = mx51_setup_wml,
1053         .fifo_size = 64,
1054         .has_dmamode = true,
1055         .dynamic_burst = true,
1056         .has_slavemode = true,
1057         .tx_glitch_fixed = true,
1058         .disable = mx51_ecspi_disable,
1059         .devtype = IMX51_ECSPI,
1060 };
1061
1062 static const struct of_device_id spi_imx_dt_ids[] = {
1063         { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
1064         { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
1065         { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
1066         { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
1067         { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
1068         { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
1069         { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
1070         { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
1071         { /* sentinel */ }
1072 };
1073 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
1074
1075 static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
1076 {
1077         u32 ctrl;
1078
1079         ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1080         ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1081         ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
1082         writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
1083 }
1084
1085 static void spi_imx_push(struct spi_imx_data *spi_imx)
1086 {
1087         unsigned int burst_len;
1088
1089         /*
1090          * Reload the FIFO when the remaining bytes to be transferred in the
1091          * current burst is 0. This only applies when bits_per_word is a
1092          * multiple of 8.
1093          */
1094         if (!spi_imx->remainder) {
1095                 if (spi_imx->dynamic_burst) {
1096
1097                         /* We need to deal unaligned data first */
1098                         burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
1099
1100                         if (!burst_len)
1101                                 burst_len = MX51_ECSPI_CTRL_MAX_BURST;
1102
1103                         spi_imx_set_burst_len(spi_imx, burst_len * 8);
1104
1105                         spi_imx->remainder = burst_len;
1106                 } else {
1107                         spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1108                 }
1109         }
1110
1111         while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
1112                 if (!spi_imx->count)
1113                         break;
1114                 if (spi_imx->dynamic_burst &&
1115                     spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
1116                         break;
1117                 spi_imx->tx(spi_imx);
1118                 spi_imx->txfifo++;
1119         }
1120
1121         if (!spi_imx->slave_mode)
1122                 spi_imx->devtype_data->trigger(spi_imx);
1123 }
1124
1125 static irqreturn_t spi_imx_isr(int irq, void *dev_id)
1126 {
1127         struct spi_imx_data *spi_imx = dev_id;
1128
1129         while (spi_imx->txfifo &&
1130                spi_imx->devtype_data->rx_available(spi_imx)) {
1131                 spi_imx->rx(spi_imx);
1132                 spi_imx->txfifo--;
1133         }
1134
1135         if (spi_imx->count) {
1136                 spi_imx_push(spi_imx);
1137                 return IRQ_HANDLED;
1138         }
1139
1140         if (spi_imx->txfifo) {
1141                 /* No data left to push, but still waiting for rx data,
1142                  * enable receive data available interrupt.
1143                  */
1144                 spi_imx->devtype_data->intctrl(
1145                                 spi_imx, MXC_INT_RR);
1146                 return IRQ_HANDLED;
1147         }
1148
1149         spi_imx->devtype_data->intctrl(spi_imx, 0);
1150         complete(&spi_imx->xfer_done);
1151
1152         return IRQ_HANDLED;
1153 }
1154
1155 static int spi_imx_dma_configure(struct spi_master *master)
1156 {
1157         int ret;
1158         enum dma_slave_buswidth buswidth;
1159         struct dma_slave_config rx = {}, tx = {};
1160         struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1161
1162         switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
1163         case 4:
1164                 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1165                 break;
1166         case 2:
1167                 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1168                 break;
1169         case 1:
1170                 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1171                 break;
1172         default:
1173                 return -EINVAL;
1174         }
1175
1176         tx.direction = DMA_MEM_TO_DEV;
1177         tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
1178         tx.dst_addr_width = buswidth;
1179         tx.dst_maxburst = spi_imx->wml;
1180         ret = dmaengine_slave_config(master->dma_tx, &tx);
1181         if (ret) {
1182                 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
1183                 return ret;
1184         }
1185
1186         rx.direction = DMA_DEV_TO_MEM;
1187         rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
1188         rx.src_addr_width = buswidth;
1189         rx.src_maxburst = spi_imx->wml;
1190         ret = dmaengine_slave_config(master->dma_rx, &rx);
1191         if (ret) {
1192                 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
1193                 return ret;
1194         }
1195
1196         return 0;
1197 }
1198
1199 static int spi_imx_setupxfer(struct spi_device *spi,
1200                                  struct spi_transfer *t)
1201 {
1202         struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1203
1204         if (!t)
1205                 return 0;
1206
1207         if (!t->speed_hz) {
1208                 if (!spi->max_speed_hz) {
1209                         dev_err(&spi->dev, "no speed_hz provided!\n");
1210                         return -EINVAL;
1211                 }
1212                 dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
1213                 spi_imx->spi_bus_clk = spi->max_speed_hz;
1214         } else
1215                 spi_imx->spi_bus_clk = t->speed_hz;
1216
1217         spi_imx->bits_per_word = t->bits_per_word;
1218
1219         /*
1220          * Initialize the functions for transfer. To transfer non byte-aligned
1221          * words, we have to use multiple word-size bursts, we can't use
1222          * dynamic_burst in that case.
1223          */
1224         if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
1225             !(spi->mode & SPI_CS_WORD) &&
1226             (spi_imx->bits_per_word == 8 ||
1227             spi_imx->bits_per_word == 16 ||
1228             spi_imx->bits_per_word == 32)) {
1229
1230                 spi_imx->rx = spi_imx_buf_rx_swap;
1231                 spi_imx->tx = spi_imx_buf_tx_swap;
1232                 spi_imx->dynamic_burst = 1;
1233
1234         } else {
1235                 if (spi_imx->bits_per_word <= 8) {
1236                         spi_imx->rx = spi_imx_buf_rx_u8;
1237                         spi_imx->tx = spi_imx_buf_tx_u8;
1238                 } else if (spi_imx->bits_per_word <= 16) {
1239                         spi_imx->rx = spi_imx_buf_rx_u16;
1240                         spi_imx->tx = spi_imx_buf_tx_u16;
1241                 } else {
1242                         spi_imx->rx = spi_imx_buf_rx_u32;
1243                         spi_imx->tx = spi_imx_buf_tx_u32;
1244                 }
1245                 spi_imx->dynamic_burst = 0;
1246         }
1247
1248         if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
1249                 spi_imx->usedma = true;
1250         else
1251                 spi_imx->usedma = false;
1252
1253         if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
1254                 spi_imx->rx = mx53_ecspi_rx_slave;
1255                 spi_imx->tx = mx53_ecspi_tx_slave;
1256                 spi_imx->slave_burst = t->len;
1257         }
1258
1259         spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
1260
1261         return 0;
1262 }
1263
1264 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
1265 {
1266         struct spi_master *master = spi_imx->bitbang.master;
1267
1268         if (master->dma_rx) {
1269                 dma_release_channel(master->dma_rx);
1270                 master->dma_rx = NULL;
1271         }
1272
1273         if (master->dma_tx) {
1274                 dma_release_channel(master->dma_tx);
1275                 master->dma_tx = NULL;
1276         }
1277 }
1278
1279 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
1280                              struct spi_master *master)
1281 {
1282         int ret;
1283
1284         spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
1285
1286         /* Prepare for TX DMA: */
1287         master->dma_tx = dma_request_chan(dev, "tx");
1288         if (IS_ERR(master->dma_tx)) {
1289                 ret = PTR_ERR(master->dma_tx);
1290                 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
1291                 master->dma_tx = NULL;
1292                 goto err;
1293         }
1294
1295         /* Prepare for RX : */
1296         master->dma_rx = dma_request_chan(dev, "rx");
1297         if (IS_ERR(master->dma_rx)) {
1298                 ret = PTR_ERR(master->dma_rx);
1299                 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
1300                 master->dma_rx = NULL;
1301                 goto err;
1302         }
1303
1304         init_completion(&spi_imx->dma_rx_completion);
1305         init_completion(&spi_imx->dma_tx_completion);
1306         master->can_dma = spi_imx_can_dma;
1307         master->max_dma_len = MAX_SDMA_BD_BYTES;
1308         spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
1309                                          SPI_MASTER_MUST_TX;
1310
1311         return 0;
1312 err:
1313         spi_imx_sdma_exit(spi_imx);
1314         return ret;
1315 }
1316
1317 static void spi_imx_dma_rx_callback(void *cookie)
1318 {
1319         struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1320
1321         complete(&spi_imx->dma_rx_completion);
1322 }
1323
1324 static void spi_imx_dma_tx_callback(void *cookie)
1325 {
1326         struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1327
1328         complete(&spi_imx->dma_tx_completion);
1329 }
1330
1331 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
1332 {
1333         unsigned long timeout = 0;
1334
1335         /* Time with actual data transfer and CS change delay related to HW */
1336         timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
1337
1338         /* Add extra second for scheduler related activities */
1339         timeout += 1;
1340
1341         /* Double calculated timeout */
1342         return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
1343 }
1344
1345 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1346                                 struct spi_transfer *transfer)
1347 {
1348         struct dma_async_tx_descriptor *desc_tx, *desc_rx;
1349         unsigned long transfer_timeout;
1350         unsigned long timeout;
1351         struct spi_master *master = spi_imx->bitbang.master;
1352         struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
1353         struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
1354         unsigned int bytes_per_word, i;
1355         int ret;
1356
1357         /* Get the right burst length from the last sg to ensure no tail data */
1358         bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
1359         for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
1360                 if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
1361                         break;
1362         }
1363         /* Use 1 as wml in case no available burst length got */
1364         if (i == 0)
1365                 i = 1;
1366
1367         spi_imx->wml =  i;
1368
1369         ret = spi_imx_dma_configure(master);
1370         if (ret)
1371                 goto dma_failure_no_start;
1372
1373         if (!spi_imx->devtype_data->setup_wml) {
1374                 dev_err(spi_imx->dev, "No setup_wml()?\n");
1375                 ret = -EINVAL;
1376                 goto dma_failure_no_start;
1377         }
1378         spi_imx->devtype_data->setup_wml(spi_imx);
1379
1380         /*
1381          * The TX DMA setup starts the transfer, so make sure RX is configured
1382          * before TX.
1383          */
1384         desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
1385                                 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
1386                                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1387         if (!desc_rx) {
1388                 ret = -EINVAL;
1389                 goto dma_failure_no_start;
1390         }
1391
1392         desc_rx->callback = spi_imx_dma_rx_callback;
1393         desc_rx->callback_param = (void *)spi_imx;
1394         dmaengine_submit(desc_rx);
1395         reinit_completion(&spi_imx->dma_rx_completion);
1396         dma_async_issue_pending(master->dma_rx);
1397
1398         desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
1399                                 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
1400                                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1401         if (!desc_tx) {
1402                 dmaengine_terminate_all(master->dma_tx);
1403                 dmaengine_terminate_all(master->dma_rx);
1404                 return -EINVAL;
1405         }
1406
1407         desc_tx->callback = spi_imx_dma_tx_callback;
1408         desc_tx->callback_param = (void *)spi_imx;
1409         dmaengine_submit(desc_tx);
1410         reinit_completion(&spi_imx->dma_tx_completion);
1411         dma_async_issue_pending(master->dma_tx);
1412
1413         transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1414
1415         /* Wait SDMA to finish the data transfer.*/
1416         timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
1417                                                 transfer_timeout);
1418         if (!timeout) {
1419                 dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
1420                 dmaengine_terminate_all(master->dma_tx);
1421                 dmaengine_terminate_all(master->dma_rx);
1422                 return -ETIMEDOUT;
1423         }
1424
1425         timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
1426                                               transfer_timeout);
1427         if (!timeout) {
1428                 dev_err(&master->dev, "I/O Error in DMA RX\n");
1429                 spi_imx->devtype_data->reset(spi_imx);
1430                 dmaengine_terminate_all(master->dma_rx);
1431                 return -ETIMEDOUT;
1432         }
1433
1434         return transfer->len;
1435 /* fallback to pio */
1436 dma_failure_no_start:
1437         transfer->error |= SPI_TRANS_FAIL_NO_START;
1438         return ret;
1439 }
1440
1441 static int spi_imx_pio_transfer(struct spi_device *spi,
1442                                 struct spi_transfer *transfer)
1443 {
1444         struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1445         unsigned long transfer_timeout;
1446         unsigned long timeout;
1447
1448         spi_imx->tx_buf = transfer->tx_buf;
1449         spi_imx->rx_buf = transfer->rx_buf;
1450         spi_imx->count = transfer->len;
1451         spi_imx->txfifo = 0;
1452         spi_imx->remainder = 0;
1453
1454         reinit_completion(&spi_imx->xfer_done);
1455
1456         spi_imx_push(spi_imx);
1457
1458         spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
1459
1460         transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1461
1462         timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
1463                                               transfer_timeout);
1464         if (!timeout) {
1465                 dev_err(&spi->dev, "I/O Error in PIO\n");
1466                 spi_imx->devtype_data->reset(spi_imx);
1467                 return -ETIMEDOUT;
1468         }
1469
1470         return transfer->len;
1471 }
1472
1473 static int spi_imx_pio_transfer_slave(struct spi_device *spi,
1474                                       struct spi_transfer *transfer)
1475 {
1476         struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1477         int ret = transfer->len;
1478
1479         if (is_imx53_ecspi(spi_imx) &&
1480             transfer->len > MX53_MAX_TRANSFER_BYTES) {
1481                 dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
1482                         MX53_MAX_TRANSFER_BYTES);
1483                 return -EMSGSIZE;
1484         }
1485
1486         spi_imx->tx_buf = transfer->tx_buf;
1487         spi_imx->rx_buf = transfer->rx_buf;
1488         spi_imx->count = transfer->len;
1489         spi_imx->txfifo = 0;
1490         spi_imx->remainder = 0;
1491
1492         reinit_completion(&spi_imx->xfer_done);
1493         spi_imx->slave_aborted = false;
1494
1495         spi_imx_push(spi_imx);
1496
1497         spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
1498
1499         if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
1500             spi_imx->slave_aborted) {
1501                 dev_dbg(&spi->dev, "interrupted\n");
1502                 ret = -EINTR;
1503         }
1504
1505         /* ecspi has a HW issue when works in Slave mode,
1506          * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
1507          * ECSPI_TXDATA keeps shift out the last word data,
1508          * so we have to disable ECSPI when in slave mode after the
1509          * transfer completes
1510          */
1511         if (spi_imx->devtype_data->disable)
1512                 spi_imx->devtype_data->disable(spi_imx);
1513
1514         return ret;
1515 }
1516
1517 static int spi_imx_transfer(struct spi_device *spi,
1518                                 struct spi_transfer *transfer)
1519 {
1520         struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1521
1522         transfer->effective_speed_hz = spi_imx->spi_bus_clk;
1523
1524         /* flush rxfifo before transfer */
1525         while (spi_imx->devtype_data->rx_available(spi_imx))
1526                 readl(spi_imx->base + MXC_CSPIRXDATA);
1527
1528         if (spi_imx->slave_mode)
1529                 return spi_imx_pio_transfer_slave(spi, transfer);
1530
1531         if (spi_imx->usedma)
1532                 return spi_imx_dma_transfer(spi_imx, transfer);
1533
1534         return spi_imx_pio_transfer(spi, transfer);
1535 }
1536
1537 static int spi_imx_setup(struct spi_device *spi)
1538 {
1539         dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
1540                  spi->mode, spi->bits_per_word, spi->max_speed_hz);
1541
1542         return 0;
1543 }
1544
1545 static void spi_imx_cleanup(struct spi_device *spi)
1546 {
1547 }
1548
1549 static int
1550 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
1551 {
1552         struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1553         int ret;
1554
1555         ret = pm_runtime_get_sync(spi_imx->dev);
1556         if (ret < 0) {
1557                 pm_runtime_put_noidle(spi_imx->dev);
1558                 dev_err(spi_imx->dev, "failed to enable clock\n");
1559                 return ret;
1560         }
1561
1562         ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
1563         if (ret) {
1564                 pm_runtime_mark_last_busy(spi_imx->dev);
1565                 pm_runtime_put_autosuspend(spi_imx->dev);
1566         }
1567
1568         return ret;
1569 }
1570
1571 static int
1572 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
1573 {
1574         struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1575
1576         pm_runtime_mark_last_busy(spi_imx->dev);
1577         pm_runtime_put_autosuspend(spi_imx->dev);
1578         return 0;
1579 }
1580
1581 static int spi_imx_slave_abort(struct spi_master *master)
1582 {
1583         struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1584
1585         spi_imx->slave_aborted = true;
1586         complete(&spi_imx->xfer_done);
1587
1588         return 0;
1589 }
1590
1591 static int spi_imx_probe(struct platform_device *pdev)
1592 {
1593         struct device_node *np = pdev->dev.of_node;
1594         struct spi_master *master;
1595         struct spi_imx_data *spi_imx;
1596         struct resource *res;
1597         int ret, irq, spi_drctl;
1598         const struct spi_imx_devtype_data *devtype_data =
1599                         of_device_get_match_data(&pdev->dev);
1600         bool slave_mode;
1601         u32 val;
1602
1603         slave_mode = devtype_data->has_slavemode &&
1604                         of_property_read_bool(np, "spi-slave");
1605         if (slave_mode)
1606                 master = spi_alloc_slave(&pdev->dev,
1607                                          sizeof(struct spi_imx_data));
1608         else
1609                 master = spi_alloc_master(&pdev->dev,
1610                                           sizeof(struct spi_imx_data));
1611         if (!master)
1612                 return -ENOMEM;
1613
1614         ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
1615         if ((ret < 0) || (spi_drctl >= 0x3)) {
1616                 /* '11' is reserved */
1617                 spi_drctl = 0;
1618         }
1619
1620         platform_set_drvdata(pdev, master);
1621
1622         master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1623         master->bus_num = np ? -1 : pdev->id;
1624         master->use_gpio_descriptors = true;
1625
1626         spi_imx = spi_master_get_devdata(master);
1627         spi_imx->bitbang.master = master;
1628         spi_imx->dev = &pdev->dev;
1629         spi_imx->slave_mode = slave_mode;
1630
1631         spi_imx->devtype_data = devtype_data;
1632
1633         /*
1634          * Get number of chip selects from device properties. This can be
1635          * coming from device tree or boardfiles, if it is not defined,
1636          * a default value of 3 chip selects will be used, as all the legacy
1637          * board files have <= 3 chip selects.
1638          */
1639         if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
1640                 master->num_chipselect = val;
1641         else
1642                 master->num_chipselect = 3;
1643
1644         spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
1645         spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
1646         spi_imx->bitbang.master->setup = spi_imx_setup;
1647         spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
1648         spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
1649         spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
1650         spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
1651         spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1652                                              | SPI_NO_CS;
1653         if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
1654             is_imx53_ecspi(spi_imx))
1655                 spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
1656
1657         if (is_imx51_ecspi(spi_imx) &&
1658             device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
1659                 /*
1660                  * When using HW-CS implementing SPI_CS_WORD can be done by just
1661                  * setting the burst length to the word size. This is
1662                  * considerably faster than manually controlling the CS.
1663                  */
1664                 spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
1665
1666         spi_imx->spi_drctl = spi_drctl;
1667
1668         init_completion(&spi_imx->xfer_done);
1669
1670         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1671         spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
1672         if (IS_ERR(spi_imx->base)) {
1673                 ret = PTR_ERR(spi_imx->base);
1674                 goto out_master_put;
1675         }
1676         spi_imx->base_phys = res->start;
1677
1678         irq = platform_get_irq(pdev, 0);
1679         if (irq < 0) {
1680                 ret = irq;
1681                 goto out_master_put;
1682         }
1683
1684         ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1685                                dev_name(&pdev->dev), spi_imx);
1686         if (ret) {
1687                 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1688                 goto out_master_put;
1689         }
1690
1691         spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1692         if (IS_ERR(spi_imx->clk_ipg)) {
1693                 ret = PTR_ERR(spi_imx->clk_ipg);
1694                 goto out_master_put;
1695         }
1696
1697         spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
1698         if (IS_ERR(spi_imx->clk_per)) {
1699                 ret = PTR_ERR(spi_imx->clk_per);
1700                 goto out_master_put;
1701         }
1702
1703         ret = clk_prepare_enable(spi_imx->clk_per);
1704         if (ret)
1705                 goto out_master_put;
1706
1707         ret = clk_prepare_enable(spi_imx->clk_ipg);
1708         if (ret)
1709                 goto out_put_per;
1710
1711         pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
1712         pm_runtime_use_autosuspend(spi_imx->dev);
1713         pm_runtime_get_noresume(spi_imx->dev);
1714         pm_runtime_set_active(spi_imx->dev);
1715         pm_runtime_enable(spi_imx->dev);
1716
1717         spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
1718         /*
1719          * Only validated on i.mx35 and i.mx6 now, can remove the constraint
1720          * if validated on other chips.
1721          */
1722         if (spi_imx->devtype_data->has_dmamode) {
1723                 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
1724                 if (ret == -EPROBE_DEFER)
1725                         goto out_runtime_pm_put;
1726
1727                 if (ret < 0)
1728                         dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
1729                                 ret);
1730         }
1731
1732         spi_imx->devtype_data->reset(spi_imx);
1733
1734         spi_imx->devtype_data->intctrl(spi_imx, 0);
1735
1736         master->dev.of_node = pdev->dev.of_node;
1737         ret = spi_bitbang_start(&spi_imx->bitbang);
1738         if (ret) {
1739                 dev_err_probe(&pdev->dev, ret, "bitbang start failed\n");
1740                 goto out_bitbang_start;
1741         }
1742
1743         pm_runtime_mark_last_busy(spi_imx->dev);
1744         pm_runtime_put_autosuspend(spi_imx->dev);
1745
1746         return ret;
1747
1748 out_bitbang_start:
1749         if (spi_imx->devtype_data->has_dmamode)
1750                 spi_imx_sdma_exit(spi_imx);
1751 out_runtime_pm_put:
1752         pm_runtime_dont_use_autosuspend(spi_imx->dev);
1753         pm_runtime_set_suspended(&pdev->dev);
1754         pm_runtime_disable(spi_imx->dev);
1755
1756         clk_disable_unprepare(spi_imx->clk_ipg);
1757 out_put_per:
1758         clk_disable_unprepare(spi_imx->clk_per);
1759 out_master_put:
1760         spi_master_put(master);
1761
1762         return ret;
1763 }
1764
1765 static int spi_imx_remove(struct platform_device *pdev)
1766 {
1767         struct spi_master *master = platform_get_drvdata(pdev);
1768         struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1769         int ret;
1770
1771         spi_bitbang_stop(&spi_imx->bitbang);
1772
1773         ret = pm_runtime_get_sync(spi_imx->dev);
1774         if (ret < 0) {
1775                 pm_runtime_put_noidle(spi_imx->dev);
1776                 dev_err(spi_imx->dev, "failed to enable clock\n");
1777                 return ret;
1778         }
1779
1780         writel(0, spi_imx->base + MXC_CSPICTRL);
1781
1782         pm_runtime_dont_use_autosuspend(spi_imx->dev);
1783         pm_runtime_put_sync(spi_imx->dev);
1784         pm_runtime_disable(spi_imx->dev);
1785
1786         spi_imx_sdma_exit(spi_imx);
1787         spi_master_put(master);
1788
1789         return 0;
1790 }
1791
1792 static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
1793 {
1794         struct spi_master *master = dev_get_drvdata(dev);
1795         struct spi_imx_data *spi_imx;
1796         int ret;
1797
1798         spi_imx = spi_master_get_devdata(master);
1799
1800         ret = clk_prepare_enable(spi_imx->clk_per);
1801         if (ret)
1802                 return ret;
1803
1804         ret = clk_prepare_enable(spi_imx->clk_ipg);
1805         if (ret) {
1806                 clk_disable_unprepare(spi_imx->clk_per);
1807                 return ret;
1808         }
1809
1810         return 0;
1811 }
1812
1813 static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
1814 {
1815         struct spi_master *master = dev_get_drvdata(dev);
1816         struct spi_imx_data *spi_imx;
1817
1818         spi_imx = spi_master_get_devdata(master);
1819
1820         clk_disable_unprepare(spi_imx->clk_per);
1821         clk_disable_unprepare(spi_imx->clk_ipg);
1822
1823         return 0;
1824 }
1825
1826 static int __maybe_unused spi_imx_suspend(struct device *dev)
1827 {
1828         pinctrl_pm_select_sleep_state(dev);
1829         return 0;
1830 }
1831
1832 static int __maybe_unused spi_imx_resume(struct device *dev)
1833 {
1834         pinctrl_pm_select_default_state(dev);
1835         return 0;
1836 }
1837
1838 static const struct dev_pm_ops imx_spi_pm = {
1839         SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
1840                                 spi_imx_runtime_resume, NULL)
1841         SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
1842 };
1843
1844 static struct platform_driver spi_imx_driver = {
1845         .driver = {
1846                    .name = DRIVER_NAME,
1847                    .of_match_table = spi_imx_dt_ids,
1848                    .pm = &imx_spi_pm,
1849         },
1850         .probe = spi_imx_probe,
1851         .remove = spi_imx_remove,
1852 };
1853 module_platform_driver(spi_imx_driver);
1854
1855 MODULE_DESCRIPTION("i.MX SPI Controller driver");
1856 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1857 MODULE_LICENSE("GPL");
1858 MODULE_ALIAS("platform:" DRIVER_NAME);