1 // SPDX-License-Identifier: GPL-2.0+
4 * Freescale QuadSPI driver.
6 * Copyright (C) 2013 Freescale Semiconductor, Inc.
7 * Copyright (C) 2018 Bootlin
8 * Copyright (C) 2018 exceet electronics GmbH
9 * Copyright (C) 2018 Kontron Electronics GmbH
10 * Copyright 2019-2020 NXP
12 * This driver is a ported version of Linux Freescale QSPI driver taken from
13 * v5.5-rc1 tag having following information.
15 * Transition to SPI MEM interface:
17 * Boris Brezillon <bbrezillon@kernel.org>
18 * Frieder Schrempf <frieder.schrempf@kontron.de>
19 * Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
20 * Suresh Gupta <suresh.gupta@nxp.com>
22 * Based on the original fsl-quadspi.c spi-nor driver.
23 * Transition to spi-mem in spi-fsl-qspi.c
28 #include <dm/device_compat.h>
32 #include <asm/global_data.h>
33 #include <linux/bitops.h>
34 #include <linux/delay.h>
35 #include <linux/libfdt.h>
36 #include <linux/sizes.h>
37 #include <linux/iopoll.h>
38 #include <linux/iopoll.h>
39 #include <linux/sizes.h>
40 #include <linux/err.h>
43 DECLARE_GLOBAL_DATA_PTR;
46 * The driver only uses one single LUT entry, that is updated on
47 * each call of exec_op(). Index 0 is preset at boot with a basic
48 * read operation, so let's use the last entry (15).
51 #define SEQID_LUT_AHB 14
53 /* Registers used by the driver */
54 #define QUADSPI_MCR 0x00
55 #define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
56 #define QUADSPI_MCR_MDIS_MASK BIT(14)
57 #define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
58 #define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
59 #define QUADSPI_MCR_DDR_EN_MASK BIT(7)
60 #define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
61 #define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
62 #define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
64 #define QUADSPI_IPCR 0x08
65 #define QUADSPI_IPCR_SEQID(x) ((x) << 24)
66 #define QUADSPI_FLSHCR 0x0c
67 #define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
68 #define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
69 #define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
71 #define QUADSPI_BUF3CR 0x1c
72 #define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
73 #define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
74 #define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
76 #define QUADSPI_BFGENCR 0x20
77 #define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
79 #define QUADSPI_BUF0IND 0x30
80 #define QUADSPI_BUF1IND 0x34
81 #define QUADSPI_BUF2IND 0x38
82 #define QUADSPI_SFAR 0x100
84 #define QUADSPI_SMPR 0x108
85 #define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
86 #define QUADSPI_SMPR_FSDLY_MASK BIT(6)
87 #define QUADSPI_SMPR_FSPHS_MASK BIT(5)
88 #define QUADSPI_SMPR_HSENA_MASK BIT(0)
90 #define QUADSPI_RBCT 0x110
91 #define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
92 #define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
94 #define QUADSPI_TBDR 0x154
96 #define QUADSPI_SR 0x15c
97 #define QUADSPI_SR_IP_ACC_MASK BIT(1)
98 #define QUADSPI_SR_AHB_ACC_MASK BIT(2)
100 #define QUADSPI_FR 0x160
101 #define QUADSPI_FR_TFF_MASK BIT(0)
103 #define QUADSPI_RSER 0x164
104 #define QUADSPI_RSER_TFIE BIT(0)
106 #define QUADSPI_SPTRCLR 0x16c
107 #define QUADSPI_SPTRCLR_IPPTRC BIT(8)
108 #define QUADSPI_SPTRCLR_BFPTRC BIT(0)
110 #define QUADSPI_SFA1AD 0x180
111 #define QUADSPI_SFA2AD 0x184
112 #define QUADSPI_SFB1AD 0x188
113 #define QUADSPI_SFB2AD 0x18c
114 #define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
116 #define QUADSPI_LUTKEY 0x300
117 #define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
119 #define QUADSPI_LCKCR 0x304
120 #define QUADSPI_LCKER_LOCK BIT(0)
121 #define QUADSPI_LCKER_UNLOCK BIT(1)
123 #define QUADSPI_LUT_BASE 0x310
124 #define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
125 #define QUADSPI_LUT_REG(idx) \
126 (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
128 #define QUADSPI_AHB_LUT_OFFSET (SEQID_LUT_AHB * 4 * 4)
129 #define QUADSPI_AHB_LUT_REG(idx) \
130 (QUADSPI_LUT_BASE + QUADSPI_AHB_LUT_OFFSET + (idx) * 4)
132 /* Instruction set for the LUT register */
140 #define LUT_FSL_READ 7
141 #define LUT_FSL_WRITE 8
142 #define LUT_JMP_ON_CS 9
143 #define LUT_ADDR_DDR 10
144 #define LUT_MODE_DDR 11
145 #define LUT_MODE2_DDR 12
146 #define LUT_MODE4_DDR 13
147 #define LUT_FSL_READ_DDR 14
148 #define LUT_FSL_WRITE_DDR 15
149 #define LUT_DATA_LEARN 16
152 * The PAD definitions for LUT register.
154 * The pad stands for the number of IO lines [0:3].
155 * For example, the quad read needs four IO lines,
156 * so you should use LUT_PAD(4).
158 #define LUT_PAD(x) (fls(x) - 1)
161 * Macro for constructing the LUT entries with the following
164 * ---------------------------------------------------
165 * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
166 * ---------------------------------------------------
168 #define LUT_DEF(idx, ins, pad, opr) \
169 ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
171 /* Controller needs driver to swap endianness */
172 #define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
174 /* Controller needs 4x internal clock */
175 #define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
178 * TKT253890, the controller needs the driver to fill the txfifo with
179 * 16 bytes at least to trigger a data transfer, even though the extra
180 * data won't be transferred.
182 #define QUADSPI_QUIRK_TKT253890 BIT(2)
184 /* TKT245618, the controller cannot wake up from wait mode */
185 #define QUADSPI_QUIRK_TKT245618 BIT(3)
188 * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
189 * internally. No need to add it when setting SFXXAD and SFAR registers
191 #define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
194 * Controller uses TDH bits in register QUADSPI_FLSHCR.
195 * They need to be set in accordance with the DDR/SDR mode.
197 #define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
200 * Controller only has Two CS on flash A, no flash B port
202 #define QUADSPI_QUIRK_SINGLE_BUS BIT(6)
204 struct fsl_qspi_devtype_data {
207 unsigned int ahb_buf_size;
212 static const struct fsl_qspi_devtype_data vybrid_data = {
215 .ahb_buf_size = SZ_1K,
216 .quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
217 .little_endian = true,
220 static const struct fsl_qspi_devtype_data imx6sx_data = {
223 .ahb_buf_size = SZ_1K,
224 .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
225 .little_endian = true,
228 static const struct fsl_qspi_devtype_data imx7d_data = {
231 .ahb_buf_size = SZ_1K,
232 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
233 QUADSPI_QUIRK_USE_TDH_SETTING,
234 .little_endian = true,
237 static const struct fsl_qspi_devtype_data imx6ul_data = {
240 .ahb_buf_size = SZ_1K,
241 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
242 QUADSPI_QUIRK_USE_TDH_SETTING,
243 .little_endian = true,
246 static const struct fsl_qspi_devtype_data imx7ulp_data = {
249 .ahb_buf_size = SZ_128,
250 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
251 QUADSPI_QUIRK_USE_TDH_SETTING | QUADSPI_QUIRK_SINGLE_BUS,
252 .little_endian = true,
255 static const struct fsl_qspi_devtype_data ls1021a_data = {
258 .ahb_buf_size = SZ_1K,
260 .little_endian = false,
263 static const struct fsl_qspi_devtype_data ls2080a_data = {
266 .ahb_buf_size = SZ_1K,
267 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
268 .little_endian = true,
273 void __iomem *iobase;
274 void __iomem *ahb_addr;
277 const struct fsl_qspi_devtype_data *devtype_data;
281 static inline int needs_swap_endian(struct fsl_qspi *q)
283 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
286 static inline int needs_4x_clock(struct fsl_qspi *q)
288 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
291 static inline int needs_fill_txfifo(struct fsl_qspi *q)
293 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
296 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
298 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
301 static inline int needs_amba_base_offset(struct fsl_qspi *q)
303 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
306 static inline int needs_tdh_setting(struct fsl_qspi *q)
308 return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
311 static inline int needs_single_bus(struct fsl_qspi *q)
313 return q->devtype_data->quirks & QUADSPI_QUIRK_SINGLE_BUS;
317 * An IC bug makes it necessary to rearrange the 32-bit data.
318 * Later chips, such as IMX6SLX, have fixed this bug.
320 static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
322 return needs_swap_endian(q) ? __swab32(a) : a;
326 * R/W functions for big- or little-endian registers:
327 * The QSPI controller's endianness is independent of
328 * the CPU core's endianness. So far, although the CPU
329 * core is little-endian the QSPI controller can use
330 * big-endian or little-endian.
332 static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
334 if (q->devtype_data->little_endian)
340 static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
342 if (q->devtype_data->little_endian)
343 return in_le32(addr);
345 return in_be32(addr);
348 static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
360 static bool fsl_qspi_supports_op(struct spi_slave *slave,
361 const struct spi_mem_op *op)
363 struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
366 ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
369 ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
371 if (op->dummy.nbytes)
372 ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
375 ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
381 * The number of instructions needed for the op, needs
382 * to fit into a single LUT entry.
384 if (op->addr.nbytes +
385 (op->dummy.nbytes ? 1 : 0) +
386 (op->data.nbytes ? 1 : 0) > 6)
389 /* Max 64 dummy clock cycles supported */
390 if (op->dummy.nbytes &&
391 (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
394 /* Max data length, check controller limits and alignment */
395 if (op->data.dir == SPI_MEM_DATA_IN &&
396 (op->data.nbytes > q->devtype_data->ahb_buf_size ||
397 (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
398 !IS_ALIGNED(op->data.nbytes, 8))))
401 if (op->data.dir == SPI_MEM_DATA_OUT &&
402 op->data.nbytes > q->devtype_data->txfifo)
405 return spi_mem_default_supports_op(slave, op);
408 static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
409 const struct spi_mem_op *op)
411 void __iomem *base = q->iobase;
415 lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
418 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
419 if (op->addr.nbytes) {
420 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
421 LUT_PAD(op->addr.buswidth),
422 (op->addr.nbytes == 4) ? 0x20 : 0x18);
427 * For some unknown reason, using LUT_ADDR doesn't work in some
428 * cases (at least with only one byte long addresses), so
429 * let's use LUT_MODE to write the address bytes one by one
431 for (i = 0; i < op->addr.nbytes; i++) {
432 u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
434 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
435 LUT_PAD(op->addr.buswidth),
441 if (op->dummy.nbytes) {
442 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
443 LUT_PAD(op->dummy.buswidth),
444 op->dummy.nbytes * 8 /
449 if (op->data.nbytes) {
450 lutval[lutidx / 2] |= LUT_DEF(lutidx,
451 op->data.dir == SPI_MEM_DATA_IN ?
452 LUT_FSL_READ : LUT_FSL_WRITE,
453 LUT_PAD(op->data.buswidth),
458 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
461 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
462 qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
464 dev_dbg(q->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n",
465 op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
468 for (i = 0; i < ARRAY_SIZE(lutval); i++)
469 qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
471 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
472 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN &&
474 for (i = 0; i < ARRAY_SIZE(lutval); i++)
475 qspi_writel(q, lutval[i], base + QUADSPI_AHB_LUT_REG(i));
480 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
481 qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
485 * If we have changed the content of the flash by writing or erasing, or if we
486 * read from flash with a different offset into the page buffer, we need to
487 * invalidate the AHB buffer. If we do not do so, we may read out the wrong
488 * data. The spec tells us reset the AHB domain and Serial Flash domain at
491 static void fsl_qspi_invalidate(struct fsl_qspi *q)
495 reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
496 reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
497 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
500 * The minimum delay : 1 AHB + 2 SFCK clocks.
501 * Delay 1 us is enough.
505 reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
506 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
509 static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_slave *slave)
511 struct dm_spi_slave_plat *plat =
512 dev_get_parent_plat(slave->dev);
514 if (q->selected == plat->cs)
517 q->selected = plat->cs;
518 fsl_qspi_invalidate(q);
521 static u32 fsl_qspi_memsize_per_cs(struct fsl_qspi *q)
523 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
524 if (needs_single_bus(q))
525 return q->memmap_size / 2;
527 return q->memmap_size / 4;
529 return ALIGN(q->devtype_data->ahb_buf_size, 0x400);
533 static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
535 void __iomem *ahb_read_addr = q->ahb_addr;
537 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
539 ahb_read_addr += op->addr.val;
542 memcpy_fromio(op->data.buf.in,
543 ahb_read_addr + q->selected * fsl_qspi_memsize_per_cs(q),
547 static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
548 const struct spi_mem_op *op)
550 void __iomem *base = q->iobase;
554 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
555 memcpy(&val, op->data.buf.out + i, 4);
556 val = fsl_qspi_endian_xchg(q, val);
557 qspi_writel(q, val, base + QUADSPI_TBDR);
560 if (i < op->data.nbytes) {
561 memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
562 val = fsl_qspi_endian_xchg(q, val);
563 qspi_writel(q, val, base + QUADSPI_TBDR);
566 if (needs_fill_txfifo(q)) {
567 for (i = op->data.nbytes; i < 16; i += 4)
568 qspi_writel(q, 0, base + QUADSPI_TBDR);
572 static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
573 const struct spi_mem_op *op)
575 void __iomem *base = q->iobase;
577 u8 *buf = op->data.buf.in;
580 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
581 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
582 val = fsl_qspi_endian_xchg(q, val);
583 memcpy(buf + i, &val, 4);
586 if (i < op->data.nbytes) {
587 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
588 val = fsl_qspi_endian_xchg(q, val);
589 memcpy(buf + i, &val, op->data.nbytes - i);
593 static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
594 u32 mask, u32 delay_us, u32 timeout_us)
598 if (!q->devtype_data->little_endian)
599 mask = (u32)cpu_to_be32(mask);
601 return readl_poll_timeout(base, reg, !(reg & mask), timeout_us);
604 static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
606 void __iomem *base = q->iobase;
610 * Always start the sequence at the same index since we update
611 * the LUT at each exec_op() call. And also specify the DATA
612 * length, since it's has not been specified in the LUT.
614 qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
615 base + QUADSPI_IPCR);
617 /* wait for the controller being ready */
618 err = fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR,
619 (QUADSPI_SR_IP_ACC_MASK |
620 QUADSPI_SR_AHB_ACC_MASK),
623 if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
624 fsl_qspi_read_rxfifo(q, op);
629 static int fsl_qspi_exec_op(struct spi_slave *slave,
630 const struct spi_mem_op *op)
632 struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
633 void __iomem *base = q->iobase;
637 /* wait for the controller being ready */
638 fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
639 QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
641 fsl_qspi_select_mem(q, slave);
643 if (needs_amba_base_offset(q))
644 addr_offset = q->memmap_phy;
646 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
648 addr_offset += op->addr.val;
652 q->selected * fsl_qspi_memsize_per_cs(q) + addr_offset,
653 base + QUADSPI_SFAR);
655 qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
656 QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
659 qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
660 base + QUADSPI_SPTRCLR);
662 fsl_qspi_prepare_lut(q, op);
665 * If we have large chunks of data, we read them through the AHB bus
666 * by accessing the mapped memory. In all other cases we use
667 * IP commands to access the flash.
669 if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
670 op->data.dir == SPI_MEM_DATA_IN) {
671 fsl_qspi_read_ahb(q, op);
673 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
674 QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
676 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
677 fsl_qspi_fill_txfifo(q, op);
679 err = fsl_qspi_do_op(q, op);
682 /* Invalidate the data in the AHB buffer. */
683 fsl_qspi_invalidate(q);
688 static int fsl_qspi_adjust_op_size(struct spi_slave *slave,
689 struct spi_mem_op *op)
691 struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
693 if (op->data.dir == SPI_MEM_DATA_OUT) {
694 if (op->data.nbytes > q->devtype_data->txfifo)
695 op->data.nbytes = q->devtype_data->txfifo;
697 if (op->data.nbytes > q->devtype_data->ahb_buf_size)
698 op->data.nbytes = q->devtype_data->ahb_buf_size;
699 else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
700 op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
706 static int fsl_qspi_default_setup(struct fsl_qspi *q)
708 void __iomem *base = q->iobase;
709 u32 reg, addr_offset = 0, memsize_cs;
711 /* Reset the module */
712 qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
716 /* Disable the module */
717 qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
721 * Previous boot stages (BootROM, bootloader) might have used DDR
722 * mode and did not clear the TDH bits. As we currently use SDR mode
723 * only, clear the TDH bits if necessary.
725 if (needs_tdh_setting(q))
726 qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
727 ~QUADSPI_FLSHCR_TDH_MASK,
728 base + QUADSPI_FLSHCR);
730 reg = qspi_readl(q, base + QUADSPI_SMPR);
731 qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
732 | QUADSPI_SMPR_FSPHS_MASK
733 | QUADSPI_SMPR_HSENA_MASK
734 | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
736 /* We only use the buffer3 for AHB read */
737 qspi_writel(q, 0, base + QUADSPI_BUF0IND);
738 qspi_writel(q, 0, base + QUADSPI_BUF1IND);
739 qspi_writel(q, 0, base + QUADSPI_BUF2IND);
741 if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP))
742 qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT_AHB),
743 q->iobase + QUADSPI_BFGENCR);
745 qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
746 q->iobase + QUADSPI_BFGENCR);
748 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
749 qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
750 QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
751 base + QUADSPI_BUF3CR);
753 if (needs_amba_base_offset(q))
754 addr_offset = q->memmap_phy;
757 * In HW there can be a maximum of four chips on two buses with
758 * two chip selects on each bus. We use four chip selects in SW
759 * to differentiate between the four chips.
760 * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
761 * SFB2AD accordingly.
763 memsize_cs = fsl_qspi_memsize_per_cs(q);
764 qspi_writel(q, memsize_cs + addr_offset,
765 base + QUADSPI_SFA1AD);
766 qspi_writel(q, memsize_cs * 2 + addr_offset,
767 base + QUADSPI_SFA2AD);
768 if (!needs_single_bus(q)) {
769 qspi_writel(q, memsize_cs * 3 + addr_offset,
770 base + QUADSPI_SFB1AD);
771 qspi_writel(q, memsize_cs * 4 + addr_offset,
772 base + QUADSPI_SFB2AD);
777 /* Enable the module */
778 qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
783 static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
784 .adjust_op_size = fsl_qspi_adjust_op_size,
785 .supports_op = fsl_qspi_supports_op,
786 .exec_op = fsl_qspi_exec_op,
789 static int fsl_qspi_probe(struct udevice *bus)
791 struct dm_spi_bus *dm_bus = dev_get_uclass_priv(bus);
792 struct fsl_qspi *q = dev_get_priv(bus);
793 const void *blob = gd->fdt_blob;
794 int node = dev_of_offset(bus);
795 struct fdt_resource res;
799 q->devtype_data = (struct fsl_qspi_devtype_data *)
800 dev_get_driver_data(bus);
802 /* find the resources */
803 ret = fdt_get_named_resource(blob, node, "reg", "reg-names", "QuadSPI",
806 dev_err(bus, "Can't get regs base addresses(ret = %d)!\n", ret);
810 q->iobase = map_physmem(res.start, res.end - res.start, MAP_NOCACHE);
812 ret = fdt_get_named_resource(blob, node, "reg", "reg-names",
813 "QuadSPI-memory", &res);
815 dev_err(bus, "Can't get AMBA base addresses(ret = %d)!\n", ret);
819 q->ahb_addr = map_physmem(res.start, res.end - res.start, MAP_NOCACHE);
820 q->memmap_phy = res.start;
821 q->memmap_size = res.end - res.start;
823 dm_bus->max_hz = fdtdec_get_int(blob, node, "spi-max-frequency",
826 fsl_qspi_default_setup(q);
831 static int fsl_qspi_xfer(struct udevice *dev, unsigned int bitlen,
832 const void *dout, void *din, unsigned long flags)
837 static int fsl_qspi_claim_bus(struct udevice *dev)
842 static int fsl_qspi_release_bus(struct udevice *dev)
847 static int fsl_qspi_set_speed(struct udevice *bus, uint speed)
852 static int fsl_qspi_set_mode(struct udevice *bus, uint mode)
857 static const struct dm_spi_ops fsl_qspi_ops = {
858 .claim_bus = fsl_qspi_claim_bus,
859 .release_bus = fsl_qspi_release_bus,
860 .xfer = fsl_qspi_xfer,
861 .set_speed = fsl_qspi_set_speed,
862 .set_mode = fsl_qspi_set_mode,
863 .mem_ops = &fsl_qspi_mem_ops,
866 static const struct udevice_id fsl_qspi_ids[] = {
867 { .compatible = "fsl,vf610-qspi", .data = (ulong)&vybrid_data, },
868 { .compatible = "fsl,imx6sx-qspi", .data = (ulong)&imx6sx_data, },
869 { .compatible = "fsl,imx6ul-qspi", .data = (ulong)&imx6ul_data, },
870 { .compatible = "fsl,imx7d-qspi", .data = (ulong)&imx7d_data, },
871 { .compatible = "fsl,imx7ulp-qspi", .data = (ulong)&imx7ulp_data, },
872 { .compatible = "fsl,ls1021a-qspi", .data = (ulong)&ls1021a_data, },
873 { .compatible = "fsl,ls1088a-qspi", .data = (ulong)&ls2080a_data, },
874 { .compatible = "fsl,ls2080a-qspi", .data = (ulong)&ls2080a_data, },
878 U_BOOT_DRIVER(fsl_qspi) = {
881 .of_match = fsl_qspi_ids,
882 .ops = &fsl_qspi_ops,
883 .priv_auto = sizeof(struct fsl_qspi),
884 .probe = fsl_qspi_probe,