ASoC: dt-bindings: qcom,q6asm: convert to dtschema
[platform/kernel/linux-starfive.git] / drivers / spi / spi-tegra210-quad.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2020 NVIDIA CORPORATION.
4
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/err.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/reset.h>
23 #include <linux/spi/spi.h>
24 #include <linux/acpi.h>
25 #include <linux/property.h>
26
27 #define QSPI_COMMAND1                           0x000
28 #define QSPI_BIT_LENGTH(x)                      (((x) & 0x1f) << 0)
29 #define QSPI_PACKED                             BIT(5)
30 #define QSPI_INTERFACE_WIDTH_MASK               (0x03 << 7)
31 #define QSPI_INTERFACE_WIDTH(x)                 (((x) & 0x03) << 7)
32 #define QSPI_INTERFACE_WIDTH_SINGLE             QSPI_INTERFACE_WIDTH(0)
33 #define QSPI_INTERFACE_WIDTH_DUAL               QSPI_INTERFACE_WIDTH(1)
34 #define QSPI_INTERFACE_WIDTH_QUAD               QSPI_INTERFACE_WIDTH(2)
35 #define QSPI_SDR_DDR_SEL                        BIT(9)
36 #define QSPI_TX_EN                              BIT(11)
37 #define QSPI_RX_EN                              BIT(12)
38 #define QSPI_CS_SW_VAL                          BIT(20)
39 #define QSPI_CS_SW_HW                           BIT(21)
40
41 #define QSPI_CS_POL_INACTIVE(n)                 (1 << (22 + (n)))
42 #define QSPI_CS_POL_INACTIVE_MASK               (0xF << 22)
43 #define QSPI_CS_SEL_0                           (0 << 26)
44 #define QSPI_CS_SEL_1                           (1 << 26)
45 #define QSPI_CS_SEL_2                           (2 << 26)
46 #define QSPI_CS_SEL_3                           (3 << 26)
47 #define QSPI_CS_SEL_MASK                        (3 << 26)
48 #define QSPI_CS_SEL(x)                          (((x) & 0x3) << 26)
49
50 #define QSPI_CONTROL_MODE_0                     (0 << 28)
51 #define QSPI_CONTROL_MODE_3                     (3 << 28)
52 #define QSPI_CONTROL_MODE_MASK                  (3 << 28)
53 #define QSPI_M_S                                BIT(30)
54 #define QSPI_PIO                                BIT(31)
55
56 #define QSPI_COMMAND2                           0x004
57 #define QSPI_TX_TAP_DELAY(x)                    (((x) & 0x3f) << 10)
58 #define QSPI_RX_TAP_DELAY(x)                    (((x) & 0xff) << 0)
59
60 #define QSPI_CS_TIMING1                         0x008
61 #define QSPI_SETUP_HOLD(setup, hold)            (((setup) << 4) | (hold))
62
63 #define QSPI_CS_TIMING2                         0x00c
64 #define CYCLES_BETWEEN_PACKETS_0(x)             (((x) & 0x1f) << 0)
65 #define CS_ACTIVE_BETWEEN_PACKETS_0             BIT(5)
66
67 #define QSPI_TRANS_STATUS                       0x010
68 #define QSPI_BLK_CNT(val)                       (((val) >> 0) & 0xffff)
69 #define QSPI_RDY                                BIT(30)
70
71 #define QSPI_FIFO_STATUS                        0x014
72 #define QSPI_RX_FIFO_EMPTY                      BIT(0)
73 #define QSPI_RX_FIFO_FULL                       BIT(1)
74 #define QSPI_TX_FIFO_EMPTY                      BIT(2)
75 #define QSPI_TX_FIFO_FULL                       BIT(3)
76 #define QSPI_RX_FIFO_UNF                        BIT(4)
77 #define QSPI_RX_FIFO_OVF                        BIT(5)
78 #define QSPI_TX_FIFO_UNF                        BIT(6)
79 #define QSPI_TX_FIFO_OVF                        BIT(7)
80 #define QSPI_ERR                                BIT(8)
81 #define QSPI_TX_FIFO_FLUSH                      BIT(14)
82 #define QSPI_RX_FIFO_FLUSH                      BIT(15)
83 #define QSPI_TX_FIFO_EMPTY_COUNT(val)           (((val) >> 16) & 0x7f)
84 #define QSPI_RX_FIFO_FULL_COUNT(val)            (((val) >> 23) & 0x7f)
85
86 #define QSPI_FIFO_ERROR                         (QSPI_RX_FIFO_UNF | \
87                                                  QSPI_RX_FIFO_OVF | \
88                                                  QSPI_TX_FIFO_UNF | \
89                                                  QSPI_TX_FIFO_OVF)
90 #define QSPI_FIFO_EMPTY                         (QSPI_RX_FIFO_EMPTY | \
91                                                  QSPI_TX_FIFO_EMPTY)
92
93 #define QSPI_TX_DATA                            0x018
94 #define QSPI_RX_DATA                            0x01c
95
96 #define QSPI_DMA_CTL                            0x020
97 #define QSPI_TX_TRIG(n)                         (((n) & 0x3) << 15)
98 #define QSPI_TX_TRIG_1                          QSPI_TX_TRIG(0)
99 #define QSPI_TX_TRIG_4                          QSPI_TX_TRIG(1)
100 #define QSPI_TX_TRIG_8                          QSPI_TX_TRIG(2)
101 #define QSPI_TX_TRIG_16                         QSPI_TX_TRIG(3)
102
103 #define QSPI_RX_TRIG(n)                         (((n) & 0x3) << 19)
104 #define QSPI_RX_TRIG_1                          QSPI_RX_TRIG(0)
105 #define QSPI_RX_TRIG_4                          QSPI_RX_TRIG(1)
106 #define QSPI_RX_TRIG_8                          QSPI_RX_TRIG(2)
107 #define QSPI_RX_TRIG_16                         QSPI_RX_TRIG(3)
108
109 #define QSPI_DMA_EN                             BIT(31)
110
111 #define QSPI_DMA_BLK                            0x024
112 #define QSPI_DMA_BLK_SET(x)                     (((x) & 0xffff) << 0)
113
114 #define QSPI_TX_FIFO                            0x108
115 #define QSPI_RX_FIFO                            0x188
116
117 #define QSPI_FIFO_DEPTH                         64
118
119 #define QSPI_INTR_MASK                          0x18c
120 #define QSPI_INTR_RX_FIFO_UNF_MASK              BIT(25)
121 #define QSPI_INTR_RX_FIFO_OVF_MASK              BIT(26)
122 #define QSPI_INTR_TX_FIFO_UNF_MASK              BIT(27)
123 #define QSPI_INTR_TX_FIFO_OVF_MASK              BIT(28)
124 #define QSPI_INTR_RDY_MASK                      BIT(29)
125 #define QSPI_INTR_RX_TX_FIFO_ERR                (QSPI_INTR_RX_FIFO_UNF_MASK | \
126                                                  QSPI_INTR_RX_FIFO_OVF_MASK | \
127                                                  QSPI_INTR_TX_FIFO_UNF_MASK | \
128                                                  QSPI_INTR_TX_FIFO_OVF_MASK)
129
130 #define QSPI_MISC_REG                           0x194
131 #define QSPI_NUM_DUMMY_CYCLE(x)                 (((x) & 0xff) << 0)
132 #define QSPI_DUMMY_CYCLES_MAX                   0xff
133
134 #define QSPI_CMB_SEQ_CMD                        0x19c
135 #define QSPI_COMMAND_VALUE_SET(X)               (((x) & 0xFF) << 0)
136
137 #define QSPI_CMB_SEQ_CMD_CFG                    0x1a0
138 #define QSPI_COMMAND_X1_X2_X4(x)                (((x) & 0x3) << 13)
139 #define QSPI_COMMAND_X1_X2_X4_MASK              (0x03 << 13)
140 #define QSPI_COMMAND_SDR_DDR                    BIT(12)
141 #define QSPI_COMMAND_SIZE_SET(x)                (((x) & 0xFF) << 0)
142
143 #define QSPI_GLOBAL_CONFIG                      0X1a4
144 #define QSPI_CMB_SEQ_EN                         BIT(0)
145
146 #define QSPI_CMB_SEQ_ADDR                       0x1a8
147 #define QSPI_ADDRESS_VALUE_SET(X)               (((x) & 0xFFFF) << 0)
148
149 #define QSPI_CMB_SEQ_ADDR_CFG                   0x1ac
150 #define QSPI_ADDRESS_X1_X2_X4(x)                (((x) & 0x3) << 13)
151 #define QSPI_ADDRESS_X1_X2_X4_MASK              (0x03 << 13)
152 #define QSPI_ADDRESS_SDR_DDR                    BIT(12)
153 #define QSPI_ADDRESS_SIZE_SET(x)                (((x) & 0xFF) << 0)
154
155 #define DATA_DIR_TX                             BIT(0)
156 #define DATA_DIR_RX                             BIT(1)
157
158 #define QSPI_DMA_TIMEOUT                        (msecs_to_jiffies(1000))
159 #define DEFAULT_QSPI_DMA_BUF_LEN                (64 * 1024)
160 #define CMD_TRANSFER                            0
161 #define ADDR_TRANSFER                           1
162 #define DATA_TRANSFER                           2
163
164 struct tegra_qspi_soc_data {
165         bool has_dma;
166         bool cmb_xfer_capable;
167         unsigned int cs_count;
168 };
169
170 struct tegra_qspi_client_data {
171         int tx_clk_tap_delay;
172         int rx_clk_tap_delay;
173 };
174
175 struct tegra_qspi {
176         struct device                           *dev;
177         struct spi_master                       *master;
178         /* lock to protect data accessed by irq */
179         spinlock_t                              lock;
180
181         struct clk                              *clk;
182         void __iomem                            *base;
183         phys_addr_t                             phys;
184         unsigned int                            irq;
185
186         u32                                     cur_speed;
187         unsigned int                            cur_pos;
188         unsigned int                            words_per_32bit;
189         unsigned int                            bytes_per_word;
190         unsigned int                            curr_dma_words;
191         unsigned int                            cur_direction;
192
193         unsigned int                            cur_rx_pos;
194         unsigned int                            cur_tx_pos;
195
196         unsigned int                            dma_buf_size;
197         unsigned int                            max_buf_size;
198         bool                                    is_curr_dma_xfer;
199
200         struct completion                       rx_dma_complete;
201         struct completion                       tx_dma_complete;
202
203         u32                                     tx_status;
204         u32                                     rx_status;
205         u32                                     status_reg;
206         bool                                    is_packed;
207         bool                                    use_dma;
208
209         u32                                     command1_reg;
210         u32                                     dma_control_reg;
211         u32                                     def_command1_reg;
212         u32                                     def_command2_reg;
213         u32                                     spi_cs_timing1;
214         u32                                     spi_cs_timing2;
215         u8                                      dummy_cycles;
216
217         struct completion                       xfer_completion;
218         struct spi_transfer                     *curr_xfer;
219
220         struct dma_chan                         *rx_dma_chan;
221         u32                                     *rx_dma_buf;
222         dma_addr_t                              rx_dma_phys;
223         struct dma_async_tx_descriptor          *rx_dma_desc;
224
225         struct dma_chan                         *tx_dma_chan;
226         u32                                     *tx_dma_buf;
227         dma_addr_t                              tx_dma_phys;
228         struct dma_async_tx_descriptor          *tx_dma_desc;
229         const struct tegra_qspi_soc_data        *soc_data;
230 };
231
232 static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
233 {
234         return readl(tqspi->base + offset);
235 }
236
237 static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
238 {
239         writel(value, tqspi->base + offset);
240
241         /* read back register to make sure that register writes completed */
242         if (offset != QSPI_TX_FIFO)
243                 readl(tqspi->base + QSPI_COMMAND1);
244 }
245
246 static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
247 {
248         u32 value;
249
250         /* write 1 to clear status register */
251         value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
252         tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
253
254         value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
255         if (!(value & QSPI_INTR_RDY_MASK)) {
256                 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
257                 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
258         }
259
260         /* clear fifo status error if any */
261         value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
262         if (value & QSPI_ERR)
263                 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
264 }
265
266 static unsigned int
267 tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
268 {
269         unsigned int max_word, max_len, total_fifo_words;
270         unsigned int remain_len = t->len - tqspi->cur_pos;
271         unsigned int bits_per_word = t->bits_per_word;
272
273         tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
274
275         /*
276          * Tegra QSPI controller supports packed or unpacked mode transfers.
277          * Packed mode is used for data transfers using 8, 16, or 32 bits per
278          * word with a minimum transfer of 1 word and for all other transfers
279          * unpacked mode will be used.
280          */
281
282         if ((bits_per_word == 8 || bits_per_word == 16 ||
283              bits_per_word == 32) && t->len > 3) {
284                 tqspi->is_packed = true;
285                 tqspi->words_per_32bit = 32 / bits_per_word;
286         } else {
287                 tqspi->is_packed = false;
288                 tqspi->words_per_32bit = 1;
289         }
290
291         if (tqspi->is_packed) {
292                 max_len = min(remain_len, tqspi->max_buf_size);
293                 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
294                 total_fifo_words = (max_len + 3) / 4;
295         } else {
296                 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
297                 max_word = min(max_word, tqspi->max_buf_size / 4);
298                 tqspi->curr_dma_words = max_word;
299                 total_fifo_words = max_word;
300         }
301
302         return total_fifo_words;
303 }
304
305 static unsigned int
306 tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
307 {
308         unsigned int written_words, fifo_words_left, count;
309         unsigned int len, tx_empty_count, max_n_32bit, i;
310         u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
311         u32 fifo_status;
312
313         fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
314         tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
315
316         if (tqspi->is_packed) {
317                 fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
318                 written_words = min(fifo_words_left, tqspi->curr_dma_words);
319                 len = written_words * tqspi->bytes_per_word;
320                 max_n_32bit = DIV_ROUND_UP(len, 4);
321                 for (count = 0; count < max_n_32bit; count++) {
322                         u32 x = 0;
323
324                         for (i = 0; (i < 4) && len; i++, len--)
325                                 x |= (u32)(*tx_buf++) << (i * 8);
326                         tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
327                 }
328
329                 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
330         } else {
331                 unsigned int write_bytes;
332                 u8 bytes_per_word = tqspi->bytes_per_word;
333
334                 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
335                 written_words = max_n_32bit;
336                 len = written_words * tqspi->bytes_per_word;
337                 if (len > t->len - tqspi->cur_pos)
338                         len = t->len - tqspi->cur_pos;
339                 write_bytes = len;
340                 for (count = 0; count < max_n_32bit; count++) {
341                         u32 x = 0;
342
343                         for (i = 0; len && (i < bytes_per_word); i++, len--)
344                                 x |= (u32)(*tx_buf++) << (i * 8);
345                         tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
346                 }
347
348                 tqspi->cur_tx_pos += write_bytes;
349         }
350
351         return written_words;
352 }
353
354 static unsigned int
355 tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
356 {
357         u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
358         unsigned int len, rx_full_count, count, i;
359         unsigned int read_words = 0;
360         u32 fifo_status, x;
361
362         fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
363         rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
364         if (tqspi->is_packed) {
365                 len = tqspi->curr_dma_words * tqspi->bytes_per_word;
366                 for (count = 0; count < rx_full_count; count++) {
367                         x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
368
369                         for (i = 0; len && (i < 4); i++, len--)
370                                 *rx_buf++ = (x >> i * 8) & 0xff;
371                 }
372
373                 read_words += tqspi->curr_dma_words;
374                 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
375         } else {
376                 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
377                 u8 bytes_per_word = tqspi->bytes_per_word;
378                 unsigned int read_bytes;
379
380                 len = rx_full_count * bytes_per_word;
381                 if (len > t->len - tqspi->cur_pos)
382                         len = t->len - tqspi->cur_pos;
383                 read_bytes = len;
384                 for (count = 0; count < rx_full_count; count++) {
385                         x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
386
387                         for (i = 0; len && (i < bytes_per_word); i++, len--)
388                                 *rx_buf++ = (x >> (i * 8)) & 0xff;
389                 }
390
391                 read_words += rx_full_count;
392                 tqspi->cur_rx_pos += read_bytes;
393         }
394
395         return read_words;
396 }
397
398 static void
399 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
400 {
401         dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
402                                 tqspi->dma_buf_size, DMA_TO_DEVICE);
403
404         /*
405          * In packed mode, each word in FIFO may contain multiple packets
406          * based on bits per word. So all bytes in each FIFO word are valid.
407          *
408          * In unpacked mode, each word in FIFO contains single packet and
409          * based on bits per word any remaining bits in FIFO word will be
410          * ignored by the hardware and are invalid bits.
411          */
412         if (tqspi->is_packed) {
413                 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
414         } else {
415                 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
416                 unsigned int i, count, consume, write_bytes;
417
418                 /*
419                  * Fill tx_dma_buf to contain single packet in each word based
420                  * on bits per word from SPI core tx_buf.
421                  */
422                 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
423                 if (consume > t->len - tqspi->cur_pos)
424                         consume = t->len - tqspi->cur_pos;
425                 write_bytes = consume;
426                 for (count = 0; count < tqspi->curr_dma_words; count++) {
427                         u32 x = 0;
428
429                         for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
430                                 x |= (u32)(*tx_buf++) << (i * 8);
431                         tqspi->tx_dma_buf[count] = x;
432                 }
433
434                 tqspi->cur_tx_pos += write_bytes;
435         }
436
437         dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
438                                    tqspi->dma_buf_size, DMA_TO_DEVICE);
439 }
440
441 static void
442 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
443 {
444         dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
445                                 tqspi->dma_buf_size, DMA_FROM_DEVICE);
446
447         if (tqspi->is_packed) {
448                 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
449         } else {
450                 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
451                 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
452                 unsigned int i, count, consume, read_bytes;
453
454                 /*
455                  * Each FIFO word contains single data packet.
456                  * Skip invalid bits in each FIFO word based on bits per word
457                  * and align bytes while filling in SPI core rx_buf.
458                  */
459                 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
460                 if (consume > t->len - tqspi->cur_pos)
461                         consume = t->len - tqspi->cur_pos;
462                 read_bytes = consume;
463                 for (count = 0; count < tqspi->curr_dma_words; count++) {
464                         u32 x = tqspi->rx_dma_buf[count] & rx_mask;
465
466                         for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
467                                 *rx_buf++ = (x >> (i * 8)) & 0xff;
468                 }
469
470                 tqspi->cur_rx_pos += read_bytes;
471         }
472
473         dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
474                                    tqspi->dma_buf_size, DMA_FROM_DEVICE);
475 }
476
477 static void tegra_qspi_dma_complete(void *args)
478 {
479         struct completion *dma_complete = args;
480
481         complete(dma_complete);
482 }
483
484 static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
485 {
486         dma_addr_t tx_dma_phys;
487
488         reinit_completion(&tqspi->tx_dma_complete);
489
490         if (tqspi->is_packed)
491                 tx_dma_phys = t->tx_dma;
492         else
493                 tx_dma_phys = tqspi->tx_dma_phys;
494
495         tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
496                                                          len, DMA_MEM_TO_DEV,
497                                                          DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
498
499         if (!tqspi->tx_dma_desc) {
500                 dev_err(tqspi->dev, "Unable to get TX descriptor\n");
501                 return -EIO;
502         }
503
504         tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
505         tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
506         dmaengine_submit(tqspi->tx_dma_desc);
507         dma_async_issue_pending(tqspi->tx_dma_chan);
508
509         return 0;
510 }
511
512 static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
513 {
514         dma_addr_t rx_dma_phys;
515
516         reinit_completion(&tqspi->rx_dma_complete);
517
518         if (tqspi->is_packed)
519                 rx_dma_phys = t->rx_dma;
520         else
521                 rx_dma_phys = tqspi->rx_dma_phys;
522
523         tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
524                                                          len, DMA_DEV_TO_MEM,
525                                                          DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
526
527         if (!tqspi->rx_dma_desc) {
528                 dev_err(tqspi->dev, "Unable to get RX descriptor\n");
529                 return -EIO;
530         }
531
532         tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
533         tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
534         dmaengine_submit(tqspi->rx_dma_desc);
535         dma_async_issue_pending(tqspi->rx_dma_chan);
536
537         return 0;
538 }
539
540 static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
541 {
542         void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
543         u32 val;
544
545         val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
546         if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
547                 return 0;
548
549         val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
550         tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
551
552         if (!atomic)
553                 return readl_relaxed_poll_timeout(addr, val,
554                                                   (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
555                                                   1000, 1000000);
556
557         return readl_relaxed_poll_timeout_atomic(addr, val,
558                                                  (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
559                                                  1000, 1000000);
560 }
561
562 static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
563 {
564         u32 intr_mask;
565
566         intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
567         intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
568         tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
569 }
570
571 static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
572 {
573         u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
574         u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
575         unsigned int len;
576
577         len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
578
579         if (t->tx_buf) {
580                 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
581                 if (dma_mapping_error(tqspi->dev, t->tx_dma))
582                         return -ENOMEM;
583         }
584
585         if (t->rx_buf) {
586                 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
587                 if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
588                         dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
589                         return -ENOMEM;
590                 }
591         }
592
593         return 0;
594 }
595
596 static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
597 {
598         unsigned int len;
599
600         len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
601
602         dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
603         dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
604 }
605
606 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
607 {
608         struct dma_slave_config dma_sconfig = { 0 };
609         unsigned int len;
610         u8 dma_burst;
611         int ret = 0;
612         u32 val;
613
614         if (tqspi->is_packed) {
615                 ret = tegra_qspi_dma_map_xfer(tqspi, t);
616                 if (ret < 0)
617                         return ret;
618         }
619
620         val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
621         tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
622
623         tegra_qspi_unmask_irq(tqspi);
624
625         if (tqspi->is_packed)
626                 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
627         else
628                 len = tqspi->curr_dma_words * 4;
629
630         /* set attention level based on length of transfer */
631         val = 0;
632         if (len & 0xf) {
633                 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
634                 dma_burst = 1;
635         } else if (((len) >> 4) & 0x1) {
636                 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
637                 dma_burst = 4;
638         } else {
639                 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
640                 dma_burst = 8;
641         }
642
643         tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
644         tqspi->dma_control_reg = val;
645
646         dma_sconfig.device_fc = true;
647         if (tqspi->cur_direction & DATA_DIR_TX) {
648                 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
649                 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
650                 dma_sconfig.dst_maxburst = dma_burst;
651                 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
652                 if (ret < 0) {
653                         dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
654                         return ret;
655                 }
656
657                 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
658                 ret = tegra_qspi_start_tx_dma(tqspi, t, len);
659                 if (ret < 0) {
660                         dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
661                         return ret;
662                 }
663         }
664
665         if (tqspi->cur_direction & DATA_DIR_RX) {
666                 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
667                 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
668                 dma_sconfig.src_maxburst = dma_burst;
669                 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
670                 if (ret < 0) {
671                         dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
672                         return ret;
673                 }
674
675                 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
676                                            tqspi->dma_buf_size,
677                                            DMA_FROM_DEVICE);
678
679                 ret = tegra_qspi_start_rx_dma(tqspi, t, len);
680                 if (ret < 0) {
681                         dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
682                         if (tqspi->cur_direction & DATA_DIR_TX)
683                                 dmaengine_terminate_all(tqspi->tx_dma_chan);
684                         return ret;
685                 }
686         }
687
688         tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
689
690         tqspi->is_curr_dma_xfer = true;
691         tqspi->dma_control_reg = val;
692         val |= QSPI_DMA_EN;
693         tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
694
695         return ret;
696 }
697
698 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
699 {
700         u32 val;
701         unsigned int cur_words;
702
703         if (qspi->cur_direction & DATA_DIR_TX)
704                 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
705         else
706                 cur_words = qspi->curr_dma_words;
707
708         val = QSPI_DMA_BLK_SET(cur_words - 1);
709         tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
710
711         tegra_qspi_unmask_irq(qspi);
712
713         qspi->is_curr_dma_xfer = false;
714         val = qspi->command1_reg;
715         val |= QSPI_PIO;
716         tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
717
718         return 0;
719 }
720
721 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
722 {
723         if (tqspi->tx_dma_buf) {
724                 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
725                                   tqspi->tx_dma_buf, tqspi->tx_dma_phys);
726                 tqspi->tx_dma_buf = NULL;
727         }
728
729         if (tqspi->tx_dma_chan) {
730                 dma_release_channel(tqspi->tx_dma_chan);
731                 tqspi->tx_dma_chan = NULL;
732         }
733
734         if (tqspi->rx_dma_buf) {
735                 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
736                                   tqspi->rx_dma_buf, tqspi->rx_dma_phys);
737                 tqspi->rx_dma_buf = NULL;
738         }
739
740         if (tqspi->rx_dma_chan) {
741                 dma_release_channel(tqspi->rx_dma_chan);
742                 tqspi->rx_dma_chan = NULL;
743         }
744 }
745
746 static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
747 {
748         struct dma_chan *dma_chan;
749         dma_addr_t dma_phys;
750         u32 *dma_buf;
751         int err;
752
753         dma_chan = dma_request_chan(tqspi->dev, "rx");
754         if (IS_ERR(dma_chan)) {
755                 err = PTR_ERR(dma_chan);
756                 goto err_out;
757         }
758
759         tqspi->rx_dma_chan = dma_chan;
760
761         dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
762         if (!dma_buf) {
763                 err = -ENOMEM;
764                 goto err_out;
765         }
766
767         tqspi->rx_dma_buf = dma_buf;
768         tqspi->rx_dma_phys = dma_phys;
769
770         dma_chan = dma_request_chan(tqspi->dev, "tx");
771         if (IS_ERR(dma_chan)) {
772                 err = PTR_ERR(dma_chan);
773                 goto err_out;
774         }
775
776         tqspi->tx_dma_chan = dma_chan;
777
778         dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
779         if (!dma_buf) {
780                 err = -ENOMEM;
781                 goto err_out;
782         }
783
784         tqspi->tx_dma_buf = dma_buf;
785         tqspi->tx_dma_phys = dma_phys;
786         tqspi->use_dma = true;
787
788         return 0;
789
790 err_out:
791         tegra_qspi_deinit_dma(tqspi);
792
793         if (err != -EPROBE_DEFER) {
794                 dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
795                 dev_err(tqspi->dev, "falling back to PIO\n");
796                 return 0;
797         }
798
799         return err;
800 }
801
802 static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
803                                          bool is_first_of_msg)
804 {
805         struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
806         struct tegra_qspi_client_data *cdata = spi->controller_data;
807         u32 command1, command2, speed = t->speed_hz;
808         u8 bits_per_word = t->bits_per_word;
809         u32 tx_tap = 0, rx_tap = 0;
810         int req_mode;
811
812         if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
813                 clk_set_rate(tqspi->clk, speed);
814                 tqspi->cur_speed = speed;
815         }
816
817         tqspi->cur_pos = 0;
818         tqspi->cur_rx_pos = 0;
819         tqspi->cur_tx_pos = 0;
820         tqspi->curr_xfer = t;
821
822         if (is_first_of_msg) {
823                 tegra_qspi_mask_clear_irq(tqspi);
824
825                 command1 = tqspi->def_command1_reg;
826                 command1 |= QSPI_CS_SEL(spi->chip_select);
827                 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
828
829                 command1 &= ~QSPI_CONTROL_MODE_MASK;
830                 req_mode = spi->mode & 0x3;
831                 if (req_mode == SPI_MODE_3)
832                         command1 |= QSPI_CONTROL_MODE_3;
833                 else
834                         command1 |= QSPI_CONTROL_MODE_0;
835
836                 if (spi->mode & SPI_CS_HIGH)
837                         command1 |= QSPI_CS_SW_VAL;
838                 else
839                         command1 &= ~QSPI_CS_SW_VAL;
840                 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
841
842                 if (cdata && cdata->tx_clk_tap_delay)
843                         tx_tap = cdata->tx_clk_tap_delay;
844
845                 if (cdata && cdata->rx_clk_tap_delay)
846                         rx_tap = cdata->rx_clk_tap_delay;
847
848                 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
849                 if (command2 != tqspi->def_command2_reg)
850                         tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
851
852         } else {
853                 command1 = tqspi->command1_reg;
854                 command1 &= ~QSPI_BIT_LENGTH(~0);
855                 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
856         }
857
858         command1 &= ~QSPI_SDR_DDR_SEL;
859
860         return command1;
861 }
862
863 static int tegra_qspi_start_transfer_one(struct spi_device *spi,
864                                          struct spi_transfer *t, u32 command1)
865 {
866         struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
867         unsigned int total_fifo_words;
868         u8 bus_width = 0;
869         int ret;
870
871         total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
872
873         command1 &= ~QSPI_PACKED;
874         if (tqspi->is_packed)
875                 command1 |= QSPI_PACKED;
876         tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
877
878         tqspi->cur_direction = 0;
879
880         command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
881         if (t->rx_buf) {
882                 command1 |= QSPI_RX_EN;
883                 tqspi->cur_direction |= DATA_DIR_RX;
884                 bus_width = t->rx_nbits;
885         }
886
887         if (t->tx_buf) {
888                 command1 |= QSPI_TX_EN;
889                 tqspi->cur_direction |= DATA_DIR_TX;
890                 bus_width = t->tx_nbits;
891         }
892
893         command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
894
895         if (bus_width == SPI_NBITS_QUAD)
896                 command1 |= QSPI_INTERFACE_WIDTH_QUAD;
897         else if (bus_width == SPI_NBITS_DUAL)
898                 command1 |= QSPI_INTERFACE_WIDTH_DUAL;
899         else
900                 command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
901
902         tqspi->command1_reg = command1;
903
904         tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
905
906         ret = tegra_qspi_flush_fifos(tqspi, false);
907         if (ret < 0)
908                 return ret;
909
910         if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
911                 ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
912         else
913                 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
914
915         return ret;
916 }
917
918 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
919 {
920         struct tegra_qspi_client_data *cdata;
921
922         cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
923         if (!cdata)
924                 return NULL;
925
926         device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
927                                  &cdata->tx_clk_tap_delay);
928         device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
929                                  &cdata->rx_clk_tap_delay);
930
931         return cdata;
932 }
933
934 static int tegra_qspi_setup(struct spi_device *spi)
935 {
936         struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
937         struct tegra_qspi_client_data *cdata = spi->controller_data;
938         unsigned long flags;
939         u32 val;
940         int ret;
941
942         ret = pm_runtime_resume_and_get(tqspi->dev);
943         if (ret < 0) {
944                 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
945                 return ret;
946         }
947
948         if (!cdata) {
949                 cdata = tegra_qspi_parse_cdata_dt(spi);
950                 spi->controller_data = cdata;
951         }
952         spin_lock_irqsave(&tqspi->lock, flags);
953
954         /* keep default cs state to inactive */
955         val = tqspi->def_command1_reg;
956         val |= QSPI_CS_SEL(spi->chip_select);
957         if (spi->mode & SPI_CS_HIGH)
958                 val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select);
959         else
960                 val |= QSPI_CS_POL_INACTIVE(spi->chip_select);
961
962         tqspi->def_command1_reg = val;
963         tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
964
965         spin_unlock_irqrestore(&tqspi->lock, flags);
966
967         pm_runtime_put(tqspi->dev);
968
969         return 0;
970 }
971
972 static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
973 {
974         dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
975         dev_dbg(tqspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
976                 tegra_qspi_readl(tqspi, QSPI_COMMAND1),
977                 tegra_qspi_readl(tqspi, QSPI_COMMAND2));
978         dev_dbg(tqspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
979                 tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
980                 tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
981         dev_dbg(tqspi->dev, "INTR_MASK:  0x%08x | MISC: 0x%08x\n",
982                 tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
983                 tegra_qspi_readl(tqspi, QSPI_MISC_REG));
984         dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
985                 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
986                 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
987 }
988
989 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
990 {
991         dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
992         tegra_qspi_dump_regs(tqspi);
993         tegra_qspi_flush_fifos(tqspi, true);
994         if (device_reset(tqspi->dev) < 0)
995                 dev_warn_once(tqspi->dev, "device reset failed\n");
996 }
997
998 static void tegra_qspi_transfer_end(struct spi_device *spi)
999 {
1000         struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
1001         int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1002
1003         if (cs_val)
1004                 tqspi->command1_reg |= QSPI_CS_SW_VAL;
1005         else
1006                 tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
1007         tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1008         tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1009 }
1010
1011 static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
1012 {
1013         u32 cmd_config = 0;
1014
1015         /* Extract Command configuration and value */
1016         if (is_ddr)
1017                 cmd_config |= QSPI_COMMAND_SDR_DDR;
1018         else
1019                 cmd_config &= ~QSPI_COMMAND_SDR_DDR;
1020
1021         cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
1022         cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
1023
1024         return cmd_config;
1025 }
1026
1027 static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
1028 {
1029         u32 addr_config = 0;
1030
1031         /* Extract Address configuration and value */
1032         is_ddr = 0; //Only SDR mode supported
1033         bus_width = 0; //X1 mode
1034
1035         if (is_ddr)
1036                 addr_config |= QSPI_ADDRESS_SDR_DDR;
1037         else
1038                 addr_config &= ~QSPI_ADDRESS_SDR_DDR;
1039
1040         addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
1041         addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
1042
1043         return addr_config;
1044 }
1045
1046 static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1047                                         struct spi_message *msg)
1048 {
1049         bool is_first_msg = true;
1050         struct spi_transfer *xfer;
1051         struct spi_device *spi = msg->spi;
1052         u8 transfer_phase = 0;
1053         u32 cmd1 = 0, dma_ctl = 0;
1054         int ret = 0;
1055         u32 address_value = 0;
1056         u32 cmd_config = 0, addr_config = 0;
1057         u8 cmd_value = 0, val = 0;
1058
1059         /* Enable Combined sequence mode */
1060         val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1061         val |= QSPI_CMB_SEQ_EN;
1062         tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1063         /* Process individual transfer list */
1064         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1065                 switch (transfer_phase) {
1066                 case CMD_TRANSFER:
1067                         /* X1 SDR mode */
1068                         cmd_config = tegra_qspi_cmd_config(false, 0,
1069                                                            xfer->len);
1070                         cmd_value = *((const u8 *)(xfer->tx_buf));
1071                         break;
1072                 case ADDR_TRANSFER:
1073                         /* X1 SDR mode */
1074                         addr_config = tegra_qspi_addr_config(false, 0,
1075                                                              xfer->len);
1076                         address_value = *((const u32 *)(xfer->tx_buf));
1077                         break;
1078                 case DATA_TRANSFER:
1079                         /* Program Command, Address value in register */
1080                         tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
1081                         tegra_qspi_writel(tqspi, address_value,
1082                                           QSPI_CMB_SEQ_ADDR);
1083                         /* Program Command and Address config in register */
1084                         tegra_qspi_writel(tqspi, cmd_config,
1085                                           QSPI_CMB_SEQ_CMD_CFG);
1086                         tegra_qspi_writel(tqspi, addr_config,
1087                                           QSPI_CMB_SEQ_ADDR_CFG);
1088
1089                         reinit_completion(&tqspi->xfer_completion);
1090                         cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
1091                                                              is_first_msg);
1092                         ret = tegra_qspi_start_transfer_one(spi, xfer,
1093                                                             cmd1);
1094
1095                         if (ret < 0) {
1096                                 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
1097                                         ret);
1098                                 return ret;
1099                         }
1100
1101                         is_first_msg = false;
1102                         ret = wait_for_completion_timeout
1103                                         (&tqspi->xfer_completion,
1104                                         QSPI_DMA_TIMEOUT);
1105
1106                         if (WARN_ON(ret == 0)) {
1107                                 dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
1108                                         ret);
1109                                 if (tqspi->is_curr_dma_xfer &&
1110                                     (tqspi->cur_direction & DATA_DIR_TX))
1111                                         dmaengine_terminate_all
1112                                                 (tqspi->tx_dma_chan);
1113
1114                                 if (tqspi->is_curr_dma_xfer &&
1115                                     (tqspi->cur_direction & DATA_DIR_RX))
1116                                         dmaengine_terminate_all
1117                                                 (tqspi->rx_dma_chan);
1118
1119                                 /* Abort transfer by resetting pio/dma bit */
1120                                 if (!tqspi->is_curr_dma_xfer) {
1121                                         cmd1 = tegra_qspi_readl
1122                                                         (tqspi,
1123                                                          QSPI_COMMAND1);
1124                                         cmd1 &= ~QSPI_PIO;
1125                                         tegra_qspi_writel
1126                                                         (tqspi, cmd1,
1127                                                          QSPI_COMMAND1);
1128                                 } else {
1129                                         dma_ctl = tegra_qspi_readl
1130                                                         (tqspi,
1131                                                          QSPI_DMA_CTL);
1132                                         dma_ctl &= ~QSPI_DMA_EN;
1133                                         tegra_qspi_writel(tqspi, dma_ctl,
1134                                                           QSPI_DMA_CTL);
1135                                 }
1136
1137                                 /* Reset controller if timeout happens */
1138                                 if (device_reset(tqspi->dev) < 0)
1139                                         dev_warn_once(tqspi->dev,
1140                                                       "device reset failed\n");
1141                                 ret = -EIO;
1142                                 goto exit;
1143                         }
1144
1145                         if (tqspi->tx_status ||  tqspi->rx_status) {
1146                                 dev_err(tqspi->dev, "QSPI Transfer failed\n");
1147                                 tqspi->tx_status = 0;
1148                                 tqspi->rx_status = 0;
1149                                 ret = -EIO;
1150                                 goto exit;
1151                         }
1152                         break;
1153                 default:
1154                         ret = -EINVAL;
1155                         goto exit;
1156                 }
1157                 msg->actual_length += xfer->len;
1158                 transfer_phase++;
1159         }
1160
1161 exit:
1162         msg->status = ret;
1163
1164         return ret;
1165 }
1166
1167 static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1168                                             struct spi_message *msg)
1169 {
1170         struct spi_device *spi = msg->spi;
1171         struct spi_transfer *transfer;
1172         bool is_first_msg = true;
1173         int ret = 0, val = 0;
1174
1175         msg->status = 0;
1176         msg->actual_length = 0;
1177         tqspi->tx_status = 0;
1178         tqspi->rx_status = 0;
1179
1180         /* Disable Combined sequence mode */
1181         val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1182         val &= ~QSPI_CMB_SEQ_EN;
1183         tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1184         list_for_each_entry(transfer, &msg->transfers, transfer_list) {
1185                 struct spi_transfer *xfer = transfer;
1186                 u8 dummy_bytes = 0;
1187                 u32 cmd1;
1188
1189                 tqspi->dummy_cycles = 0;
1190                 /*
1191                  * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
1192                  * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
1193                  * So, check if the next transfer is dummy data transfer and program dummy
1194                  * clock cycles along with the current transfer and skip next transfer.
1195                  */
1196                 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
1197                         struct spi_transfer *next_xfer;
1198
1199                         next_xfer = list_next_entry(xfer, transfer_list);
1200                         if (next_xfer->dummy_data) {
1201                                 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1202
1203                                 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1204                                         tqspi->dummy_cycles = dummy_cycles;
1205                                         dummy_bytes = next_xfer->len;
1206                                         transfer = next_xfer;
1207                                 }
1208                         }
1209                 }
1210
1211                 reinit_completion(&tqspi->xfer_completion);
1212
1213                 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1214
1215                 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1216                 if (ret < 0) {
1217                         dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1218                         goto complete_xfer;
1219                 }
1220
1221                 ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1222                                                   QSPI_DMA_TIMEOUT);
1223                 if (WARN_ON(ret == 0)) {
1224                         dev_err(tqspi->dev, "transfer timeout\n");
1225                         if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1226                                 dmaengine_terminate_all(tqspi->tx_dma_chan);
1227                         if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1228                                 dmaengine_terminate_all(tqspi->rx_dma_chan);
1229                         tegra_qspi_handle_error(tqspi);
1230                         ret = -EIO;
1231                         goto complete_xfer;
1232                 }
1233
1234                 if (tqspi->tx_status ||  tqspi->rx_status) {
1235                         tegra_qspi_handle_error(tqspi);
1236                         ret = -EIO;
1237                         goto complete_xfer;
1238                 }
1239
1240                 msg->actual_length += xfer->len + dummy_bytes;
1241
1242 complete_xfer:
1243                 if (ret < 0) {
1244                         tegra_qspi_transfer_end(spi);
1245                         spi_transfer_delay_exec(xfer);
1246                         goto exit;
1247                 }
1248
1249                 if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1250                         /* de-activate CS after last transfer only when cs_change is not set */
1251                         if (!xfer->cs_change) {
1252                                 tegra_qspi_transfer_end(spi);
1253                                 spi_transfer_delay_exec(xfer);
1254                         }
1255                 } else if (xfer->cs_change) {
1256                          /* de-activated CS between the transfers only when cs_change is set */
1257                         tegra_qspi_transfer_end(spi);
1258                         spi_transfer_delay_exec(xfer);
1259                 }
1260         }
1261
1262         ret = 0;
1263 exit:
1264         msg->status = ret;
1265
1266         return ret;
1267 }
1268
1269 static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1270                                         struct spi_message *msg)
1271 {
1272         int transfer_count = 0;
1273         struct spi_transfer *xfer;
1274
1275         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1276                 transfer_count++;
1277         }
1278         if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
1279                 return false;
1280         xfer = list_first_entry(&msg->transfers, typeof(*xfer),
1281                                 transfer_list);
1282         if (xfer->len > 2)
1283                 return false;
1284         xfer = list_next_entry(xfer, transfer_list);
1285         if (xfer->len > 4 || xfer->len < 3)
1286                 return false;
1287         xfer = list_next_entry(xfer, transfer_list);
1288         if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
1289                 return false;
1290
1291         return true;
1292 }
1293
1294 static int tegra_qspi_transfer_one_message(struct spi_master *master,
1295                                            struct spi_message *msg)
1296 {
1297         struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1298         int ret;
1299
1300         if (tegra_qspi_validate_cmb_seq(tqspi, msg))
1301                 ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
1302         else
1303                 ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
1304
1305         spi_finalize_current_message(master);
1306
1307         return ret;
1308 }
1309
1310 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1311 {
1312         struct spi_transfer *t = tqspi->curr_xfer;
1313         unsigned long flags;
1314
1315         spin_lock_irqsave(&tqspi->lock, flags);
1316
1317         if (tqspi->tx_status ||  tqspi->rx_status) {
1318                 tegra_qspi_handle_error(tqspi);
1319                 complete(&tqspi->xfer_completion);
1320                 goto exit;
1321         }
1322
1323         if (tqspi->cur_direction & DATA_DIR_RX)
1324                 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1325
1326         if (tqspi->cur_direction & DATA_DIR_TX)
1327                 tqspi->cur_pos = tqspi->cur_tx_pos;
1328         else
1329                 tqspi->cur_pos = tqspi->cur_rx_pos;
1330
1331         if (tqspi->cur_pos == t->len) {
1332                 complete(&tqspi->xfer_completion);
1333                 goto exit;
1334         }
1335
1336         tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1337         tegra_qspi_start_cpu_based_transfer(tqspi, t);
1338 exit:
1339         spin_unlock_irqrestore(&tqspi->lock, flags);
1340         return IRQ_HANDLED;
1341 }
1342
1343 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1344 {
1345         struct spi_transfer *t = tqspi->curr_xfer;
1346         unsigned int total_fifo_words;
1347         unsigned long flags;
1348         long wait_status;
1349         int err = 0;
1350
1351         if (tqspi->cur_direction & DATA_DIR_TX) {
1352                 if (tqspi->tx_status) {
1353                         dmaengine_terminate_all(tqspi->tx_dma_chan);
1354                         err += 1;
1355                 } else {
1356                         wait_status = wait_for_completion_interruptible_timeout(
1357                                 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1358                         if (wait_status <= 0) {
1359                                 dmaengine_terminate_all(tqspi->tx_dma_chan);
1360                                 dev_err(tqspi->dev, "failed TX DMA transfer\n");
1361                                 err += 1;
1362                         }
1363                 }
1364         }
1365
1366         if (tqspi->cur_direction & DATA_DIR_RX) {
1367                 if (tqspi->rx_status) {
1368                         dmaengine_terminate_all(tqspi->rx_dma_chan);
1369                         err += 2;
1370                 } else {
1371                         wait_status = wait_for_completion_interruptible_timeout(
1372                                 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1373                         if (wait_status <= 0) {
1374                                 dmaengine_terminate_all(tqspi->rx_dma_chan);
1375                                 dev_err(tqspi->dev, "failed RX DMA transfer\n");
1376                                 err += 2;
1377                         }
1378                 }
1379         }
1380
1381         spin_lock_irqsave(&tqspi->lock, flags);
1382
1383         if (err) {
1384                 tegra_qspi_dma_unmap_xfer(tqspi, t);
1385                 tegra_qspi_handle_error(tqspi);
1386                 complete(&tqspi->xfer_completion);
1387                 goto exit;
1388         }
1389
1390         if (tqspi->cur_direction & DATA_DIR_RX)
1391                 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1392
1393         if (tqspi->cur_direction & DATA_DIR_TX)
1394                 tqspi->cur_pos = tqspi->cur_tx_pos;
1395         else
1396                 tqspi->cur_pos = tqspi->cur_rx_pos;
1397
1398         if (tqspi->cur_pos == t->len) {
1399                 tegra_qspi_dma_unmap_xfer(tqspi, t);
1400                 complete(&tqspi->xfer_completion);
1401                 goto exit;
1402         }
1403
1404         tegra_qspi_dma_unmap_xfer(tqspi, t);
1405
1406         /* continue transfer in current message */
1407         total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1408         if (total_fifo_words > QSPI_FIFO_DEPTH)
1409                 err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1410         else
1411                 err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1412
1413 exit:
1414         spin_unlock_irqrestore(&tqspi->lock, flags);
1415         return IRQ_HANDLED;
1416 }
1417
1418 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1419 {
1420         struct tegra_qspi *tqspi = context_data;
1421
1422         tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1423
1424         if (tqspi->cur_direction & DATA_DIR_TX)
1425                 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1426
1427         if (tqspi->cur_direction & DATA_DIR_RX)
1428                 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1429
1430         tegra_qspi_mask_clear_irq(tqspi);
1431
1432         if (!tqspi->is_curr_dma_xfer)
1433                 return handle_cpu_based_xfer(tqspi);
1434
1435         return handle_dma_based_xfer(tqspi);
1436 }
1437
1438 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1439         .has_dma = true,
1440         .cmb_xfer_capable = false,
1441         .cs_count = 1,
1442 };
1443
1444 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1445         .has_dma = true,
1446         .cmb_xfer_capable = true,
1447         .cs_count = 1,
1448 };
1449
1450 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1451         .has_dma = false,
1452         .cmb_xfer_capable = true,
1453         .cs_count = 1,
1454 };
1455
1456 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1457         .has_dma = false,
1458         .cmb_xfer_capable = true,
1459         .cs_count = 4,
1460 };
1461
1462 static const struct of_device_id tegra_qspi_of_match[] = {
1463         {
1464                 .compatible = "nvidia,tegra210-qspi",
1465                 .data       = &tegra210_qspi_soc_data,
1466         }, {
1467                 .compatible = "nvidia,tegra186-qspi",
1468                 .data       = &tegra186_qspi_soc_data,
1469         }, {
1470                 .compatible = "nvidia,tegra194-qspi",
1471                 .data       = &tegra186_qspi_soc_data,
1472         }, {
1473                 .compatible = "nvidia,tegra234-qspi",
1474                 .data       = &tegra234_qspi_soc_data,
1475         }, {
1476                 .compatible = "nvidia,tegra241-qspi",
1477                 .data       = &tegra241_qspi_soc_data,
1478         },
1479         {}
1480 };
1481
1482 MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1483
1484 #ifdef CONFIG_ACPI
1485 static const struct acpi_device_id tegra_qspi_acpi_match[] = {
1486         {
1487                 .id = "NVDA1213",
1488                 .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
1489         }, {
1490                 .id = "NVDA1313",
1491                 .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
1492         }, {
1493                 .id = "NVDA1413",
1494                 .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
1495         }, {
1496                 .id = "NVDA1513",
1497                 .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
1498         },
1499         {}
1500 };
1501
1502 MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
1503 #endif
1504
1505 static int tegra_qspi_probe(struct platform_device *pdev)
1506 {
1507         struct spi_master       *master;
1508         struct tegra_qspi       *tqspi;
1509         struct resource         *r;
1510         int ret, qspi_irq;
1511         int bus_num;
1512
1513         master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
1514         if (!master)
1515                 return -ENOMEM;
1516
1517         platform_set_drvdata(pdev, master);
1518         tqspi = spi_master_get_devdata(master);
1519
1520         master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1521                             SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1522         master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1523         master->setup = tegra_qspi_setup;
1524         master->transfer_one_message = tegra_qspi_transfer_one_message;
1525         master->num_chipselect = 1;
1526         master->auto_runtime_pm = true;
1527
1528         bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1529         if (bus_num >= 0)
1530                 master->bus_num = bus_num;
1531
1532         tqspi->master = master;
1533         tqspi->dev = &pdev->dev;
1534         spin_lock_init(&tqspi->lock);
1535
1536         tqspi->soc_data = device_get_match_data(&pdev->dev);
1537         master->num_chipselect = tqspi->soc_data->cs_count;
1538         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1539         tqspi->base = devm_ioremap_resource(&pdev->dev, r);
1540         if (IS_ERR(tqspi->base))
1541                 return PTR_ERR(tqspi->base);
1542
1543         tqspi->phys = r->start;
1544         qspi_irq = platform_get_irq(pdev, 0);
1545         if (qspi_irq < 0)
1546                 return qspi_irq;
1547         tqspi->irq = qspi_irq;
1548
1549         if (!has_acpi_companion(tqspi->dev)) {
1550                 tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1551                 if (IS_ERR(tqspi->clk)) {
1552                         ret = PTR_ERR(tqspi->clk);
1553                         dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1554                         return ret;
1555                 }
1556
1557         }
1558
1559         tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1560         tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1561
1562         ret = tegra_qspi_init_dma(tqspi);
1563         if (ret < 0)
1564                 return ret;
1565
1566         if (tqspi->use_dma)
1567                 tqspi->max_buf_size = tqspi->dma_buf_size;
1568
1569         init_completion(&tqspi->tx_dma_complete);
1570         init_completion(&tqspi->rx_dma_complete);
1571         init_completion(&tqspi->xfer_completion);
1572
1573         pm_runtime_enable(&pdev->dev);
1574         ret = pm_runtime_resume_and_get(&pdev->dev);
1575         if (ret < 0) {
1576                 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1577                 goto exit_pm_disable;
1578         }
1579
1580         if (device_reset(tqspi->dev) < 0)
1581                 dev_warn_once(tqspi->dev, "device reset failed\n");
1582
1583         tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW |  QSPI_CS_SW_VAL;
1584         tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1585         tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1586         tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1587         tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1588
1589         pm_runtime_put(&pdev->dev);
1590
1591         ret = request_threaded_irq(tqspi->irq, NULL,
1592                                    tegra_qspi_isr_thread, IRQF_ONESHOT,
1593                                    dev_name(&pdev->dev), tqspi);
1594         if (ret < 0) {
1595                 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1596                 goto exit_pm_disable;
1597         }
1598
1599         master->dev.of_node = pdev->dev.of_node;
1600         ret = spi_register_master(master);
1601         if (ret < 0) {
1602                 dev_err(&pdev->dev, "failed to register master: %d\n", ret);
1603                 goto exit_free_irq;
1604         }
1605
1606         return 0;
1607
1608 exit_free_irq:
1609         free_irq(qspi_irq, tqspi);
1610 exit_pm_disable:
1611         pm_runtime_force_suspend(&pdev->dev);
1612         tegra_qspi_deinit_dma(tqspi);
1613         return ret;
1614 }
1615
1616 static int tegra_qspi_remove(struct platform_device *pdev)
1617 {
1618         struct spi_master *master = platform_get_drvdata(pdev);
1619         struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1620
1621         spi_unregister_master(master);
1622         free_irq(tqspi->irq, tqspi);
1623         pm_runtime_force_suspend(&pdev->dev);
1624         tegra_qspi_deinit_dma(tqspi);
1625
1626         return 0;
1627 }
1628
1629 static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1630 {
1631         struct spi_master *master = dev_get_drvdata(dev);
1632
1633         return spi_master_suspend(master);
1634 }
1635
1636 static int __maybe_unused tegra_qspi_resume(struct device *dev)
1637 {
1638         struct spi_master *master = dev_get_drvdata(dev);
1639         struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1640         int ret;
1641
1642         ret = pm_runtime_resume_and_get(dev);
1643         if (ret < 0) {
1644                 dev_err(dev, "failed to get runtime PM: %d\n", ret);
1645                 return ret;
1646         }
1647
1648         tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1649         tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1650         pm_runtime_put(dev);
1651
1652         return spi_master_resume(master);
1653 }
1654
1655 static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1656 {
1657         struct spi_master *master = dev_get_drvdata(dev);
1658         struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1659
1660         /* Runtime pm disabled with ACPI */
1661         if (has_acpi_companion(tqspi->dev))
1662                 return 0;
1663         /* flush all write which are in PPSB queue by reading back */
1664         tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1665
1666         clk_disable_unprepare(tqspi->clk);
1667
1668         return 0;
1669 }
1670
1671 static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1672 {
1673         struct spi_master *master = dev_get_drvdata(dev);
1674         struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1675         int ret;
1676
1677         /* Runtime pm disabled with ACPI */
1678         if (has_acpi_companion(tqspi->dev))
1679                 return 0;
1680         ret = clk_prepare_enable(tqspi->clk);
1681         if (ret < 0)
1682                 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1683
1684         return ret;
1685 }
1686
1687 static const struct dev_pm_ops tegra_qspi_pm_ops = {
1688         SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1689         SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1690 };
1691
1692 static struct platform_driver tegra_qspi_driver = {
1693         .driver = {
1694                 .name           = "tegra-qspi",
1695                 .pm             = &tegra_qspi_pm_ops,
1696                 .of_match_table = tegra_qspi_of_match,
1697                 .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
1698         },
1699         .probe =        tegra_qspi_probe,
1700         .remove =       tegra_qspi_remove,
1701 };
1702 module_platform_driver(tegra_qspi_driver);
1703
1704 MODULE_ALIAS("platform:qspi-tegra");
1705 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1706 MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1707 MODULE_LICENSE("GPL v2");