1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2018 Xilinx
5 * Xilinx ZynqMP Generic Quad-SPI(QSPI) controller driver(master mode only)
11 #include <asm/arch/sys_proto.h>
12 #include <asm/cache.h>
20 #include <ubi_uboot.h>
22 #include <dm/device_compat.h>
23 #include <linux/bitops.h>
24 #include <linux/err.h>
25 #include <linux/sizes.h>
26 #include <zynqmp_firmware.h>
28 #define GQSPI_GFIFO_STRT_MODE_MASK BIT(29)
29 #define GQSPI_CONFIG_MODE_EN_MASK (3 << 30)
30 #define GQSPI_CONFIG_DMA_MODE (2 << 30)
31 #define GQSPI_CONFIG_CPHA_MASK BIT(2)
32 #define GQSPI_CONFIG_CPOL_MASK BIT(1)
35 * QSPI Interrupt Registers bit Masks
37 * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
40 #define GQSPI_IXR_TXNFULL_MASK 0x00000004 /* QSPI TX FIFO Overflow */
41 #define GQSPI_IXR_TXFULL_MASK 0x00000008 /* QSPI TX FIFO is full */
42 #define GQSPI_IXR_TXFIFOEMPTY_MASK 0x00000100 /* QSPI TX FIFO is Empty */
43 #define GQSPI_IXR_RXNEMTY_MASK 0x00000010 /* QSPI RX FIFO Not Empty */
44 #define GQSPI_IXR_GFEMTY_MASK 0x00000080 /* QSPI Generic FIFO Empty */
45 #define GQSPI_IXR_GFNFULL_MASK 0x00000200 /* QSPI GENFIFO not full */
46 #define GQSPI_IXR_ALL_MASK (GQSPI_IXR_TXNFULL_MASK | \
47 GQSPI_IXR_RXNEMTY_MASK)
50 * QSPI Enable Register bit Masks
52 * This register is used to enable or disable the QSPI controller
54 #define GQSPI_ENABLE_ENABLE_MASK 0x00000001 /* QSPI Enable Bit Mask */
56 #define GQSPI_GFIFO_LOW_BUS BIT(14)
57 #define GQSPI_GFIFO_CS_LOWER BIT(12)
58 #define GQSPI_GFIFO_UP_BUS BIT(15)
59 #define GQSPI_GFIFO_CS_UPPER BIT(13)
60 #define GQSPI_SPI_MODE_QSPI (3 << 10)
61 #define GQSPI_SPI_MODE_SPI BIT(10)
62 #define GQSPI_SPI_MODE_DUAL_SPI (2 << 10)
63 #define GQSPI_IMD_DATA_CS_ASSERT 5
64 #define GQSPI_IMD_DATA_CS_DEASSERT 5
65 #define GQSPI_GFIFO_TX BIT(16)
66 #define GQSPI_GFIFO_RX BIT(17)
67 #define GQSPI_GFIFO_STRIPE_MASK BIT(18)
68 #define GQSPI_GFIFO_IMD_MASK 0xFF
69 #define GQSPI_GFIFO_EXP_MASK BIT(9)
70 #define GQSPI_GFIFO_DATA_XFR_MASK BIT(8)
71 #define GQSPI_STRT_GEN_FIFO BIT(28)
72 #define GQSPI_GEN_FIFO_STRT_MOD BIT(29)
73 #define GQSPI_GFIFO_WP_HOLD BIT(19)
74 #define GQSPI_BAUD_DIV_MASK (7 << 3)
75 #define GQSPI_DFLT_BAUD_RATE_DIV BIT(3)
76 #define GQSPI_GFIFO_ALL_INT_MASK 0xFBE
77 #define GQSPI_DMA_DST_I_STS_DONE BIT(1)
78 #define GQSPI_DMA_DST_I_STS_MASK 0xFE
81 #define GQSPI_GFIFO_SELECT BIT(0)
82 #define GQSPI_FIFO_THRESHOLD 1
83 #define GQSPI_GENFIFO_THRESHOLD 31
85 #define SPI_XFER_ON_BOTH 0
86 #define SPI_XFER_ON_LOWER 1
87 #define SPI_XFER_ON_UPPER 2
89 #define GQSPI_DMA_ALIGN 0x4
90 #define GQSPI_MAX_BAUD_RATE_VAL 7
91 #define GQSPI_DFLT_BAUD_RATE_VAL 2
93 #define GQSPI_TIMEOUT 100000000
95 #define GQSPI_BAUD_DIV_SHIFT 2
96 #define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
97 #define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
98 #define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
99 #define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
100 #define GQSPI_USE_DATA_DLY 0x1
101 #define GQSPI_USE_DATA_DLY_SHIFT 31
102 #define GQSPI_DATA_DLY_ADJ_VALUE 0x2
103 #define GQSPI_DATA_DLY_ADJ_SHIFT 28
104 #define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
105 #define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
106 #define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
107 #define IOU_TAPDLY_BYPASS_OFST !IS_ENABLED(CONFIG_ARCH_VERSAL) ? \
108 0xFF180390 : 0xF103003C
109 #define GQSPI_LPBK_DLY_ADJ_LPBK_MASK 0x00000020
110 #define GQSPI_FREQ_37_5MHZ 37500000
111 #define GQSPI_FREQ_40MHZ 40000000
112 #define GQSPI_FREQ_100MHZ 100000000
113 #define GQSPI_FREQ_150MHZ 150000000
114 #define IOU_TAPDLY_BYPASS_MASK 0x7
116 #define GQSPI_REG_OFFSET 0x100
117 #define GQSPI_DMA_REG_OFFSET 0x800
119 /* QSPI register offsets */
120 struct zynqmp_qspi_regs {
121 u32 confr; /* 0x00 */
124 u32 idisr; /* 0x0C */
125 u32 imaskr; /* 0x10 */
128 u32 txd0r; /* 0x1C */
131 u32 txftr; /* 0x28 */
132 u32 rxftr; /* 0x2C */
133 u32 gpior; /* 0x30 */
134 u32 reserved0; /* 0x34 */
135 u32 lpbkdly; /* 0x38 */
136 u32 reserved1; /* 0x3C */
137 u32 genfifo; /* 0x40 */
138 u32 gqspisel; /* 0x44 */
139 u32 reserved2; /* 0x48 */
140 u32 gqfifoctrl; /* 0x4C */
141 u32 gqfthr; /* 0x50 */
142 u32 gqpollcfg; /* 0x54 */
143 u32 gqpollto; /* 0x58 */
144 u32 gqxfersts; /* 0x5C */
145 u32 gqfifosnap; /* 0x60 */
146 u32 gqrxcpy; /* 0x64 */
147 u32 reserved3[36]; /* 0x68 */
148 u32 gqspidlyadj; /* 0xF8 */
151 struct zynqmp_qspi_dma_regs {
152 u32 dmadst; /* 0x00 */
153 u32 dmasize; /* 0x04 */
154 u32 dmasts; /* 0x08 */
155 u32 dmactrl; /* 0x0C */
156 u32 reserved0; /* 0x10 */
157 u32 dmaisr; /* 0x14 */
158 u32 dmaier; /* 0x18 */
159 u32 dmaidr; /* 0x1C */
160 u32 dmaimr; /* 0x20 */
161 u32 dmactrl2; /* 0x24 */
162 u32 dmadstmsb; /* 0x28 */
165 struct zynqmp_qspi_plat {
166 struct zynqmp_qspi_regs *regs;
167 struct zynqmp_qspi_dma_regs *dma_regs;
170 unsigned int io_mode;
173 struct zynqmp_qspi_priv {
174 struct zynqmp_qspi_regs *regs;
175 struct zynqmp_qspi_dma_regs *dma_regs;
179 unsigned int io_mode;
180 int bytes_to_transfer;
181 int bytes_to_receive;
182 const struct spi_mem_op *op;
185 static int zynqmp_qspi_of_to_plat(struct udevice *bus)
187 struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
189 debug("%s\n", __func__);
191 plat->regs = (struct zynqmp_qspi_regs *)(dev_read_addr(bus) +
193 plat->dma_regs = (struct zynqmp_qspi_dma_regs *)
194 (dev_read_addr(bus) + GQSPI_DMA_REG_OFFSET);
196 plat->io_mode = dev_read_bool(bus, "has-io-mode");
201 static void zynqmp_qspi_init_hw(struct zynqmp_qspi_priv *priv)
204 struct zynqmp_qspi_regs *regs = priv->regs;
206 writel(GQSPI_GFIFO_SELECT, ®s->gqspisel);
207 writel(GQSPI_GFIFO_ALL_INT_MASK, ®s->idisr);
208 writel(GQSPI_FIFO_THRESHOLD, ®s->txftr);
209 writel(GQSPI_FIFO_THRESHOLD, ®s->rxftr);
210 writel(GQSPI_GENFIFO_THRESHOLD, ®s->gqfthr);
211 writel(GQSPI_GFIFO_ALL_INT_MASK, ®s->isr);
212 writel(~GQSPI_ENABLE_ENABLE_MASK, ®s->enbr);
214 config_reg = readl(®s->confr);
215 config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK |
216 GQSPI_CONFIG_MODE_EN_MASK);
217 config_reg |= GQSPI_GFIFO_WP_HOLD | GQSPI_DFLT_BAUD_RATE_DIV;
218 config_reg |= GQSPI_GFIFO_STRT_MODE_MASK;
220 config_reg |= GQSPI_CONFIG_DMA_MODE;
222 writel(config_reg, ®s->confr);
224 writel(GQSPI_ENABLE_ENABLE_MASK, ®s->enbr);
227 static u32 zynqmp_qspi_bus_select(struct zynqmp_qspi_priv *priv)
229 u32 gqspi_fifo_reg = 0;
231 gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS |
232 GQSPI_GFIFO_CS_LOWER;
234 return gqspi_fifo_reg;
237 static u32 zynqmp_qspi_genfifo_mode(u8 buswidth)
241 return GQSPI_SPI_MODE_SPI;
243 return GQSPI_SPI_MODE_DUAL_SPI;
245 return GQSPI_SPI_MODE_QSPI;
247 debug("Unsupported bus width %u\n", buswidth);
248 return GQSPI_SPI_MODE_SPI;
252 static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv,
255 struct zynqmp_qspi_regs *regs = priv->regs;
259 writel(gqspi_fifo_reg, ®s->genfifo);
261 config_reg = readl(®s->confr);
262 /* Manual start if needed */
263 config_reg |= GQSPI_STRT_GEN_FIFO;
264 writel(config_reg, ®s->confr);
266 /* Enable interrupts */
267 ier = readl(®s->ier);
268 ier |= GQSPI_IXR_GFEMTY_MASK;
269 writel(ier, ®s->ier);
271 /* Wait until the gen fifo is empty to write the new command */
272 ret = wait_for_bit_le32(®s->isr, GQSPI_IXR_GFEMTY_MASK, 1,
275 printf("%s Timeout\n", __func__);
279 static void zynqmp_qspi_chipselect(struct zynqmp_qspi_priv *priv, int is_on)
281 u32 gqspi_fifo_reg = 0;
284 gqspi_fifo_reg = zynqmp_qspi_bus_select(priv);
285 gqspi_fifo_reg |= GQSPI_SPI_MODE_SPI |
286 GQSPI_IMD_DATA_CS_ASSERT;
288 gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS;
289 gqspi_fifo_reg |= GQSPI_IMD_DATA_CS_DEASSERT;
292 debug("GFIFO_CMD_CS: 0x%x\n", gqspi_fifo_reg);
294 zynqmp_qspi_fill_gen_fifo(priv, gqspi_fifo_reg);
297 void zynqmp_qspi_set_tapdelay(struct udevice *bus, u32 baudrateval)
299 struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
300 struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
301 struct zynqmp_qspi_regs *regs = priv->regs;
302 u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
305 clk_rate = plat->frequency;
306 reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
308 debug("%s, req_hz:%d, clk_rate:%d, baudrateval:%d\n",
309 __func__, reqhz, clk_rate, baudrateval);
311 if (!(IS_ENABLED(CONFIG_ARCH_VERSAL) ||
312 IS_ENABLED(CONFIG_ARCH_VERSAL_NET))) {
313 if (reqhz <= GQSPI_FREQ_40MHZ) {
314 tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
315 TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
316 } else if (reqhz <= GQSPI_FREQ_100MHZ) {
317 tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
318 TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
319 lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK;
320 datadlyadj = (GQSPI_USE_DATA_DLY <<
321 GQSPI_USE_DATA_DLY_SHIFT) |
322 (GQSPI_DATA_DLY_ADJ_VALUE <<
323 GQSPI_DATA_DLY_ADJ_SHIFT);
324 } else if (reqhz <= GQSPI_FREQ_150MHZ) {
325 lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK |
326 GQSPI_LPBK_DLY_ADJ_DLY_0;
328 zynqmp_mmio_write(IOU_TAPDLY_BYPASS_OFST,
329 IOU_TAPDLY_BYPASS_MASK, tapdlybypass);
331 if (reqhz <= GQSPI_FREQ_37_5MHZ) {
332 tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
333 TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
334 } else if (reqhz <= GQSPI_FREQ_100MHZ) {
335 tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
336 TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
337 lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK;
338 datadlyadj = GQSPI_USE_DATA_DLY <<
339 GQSPI_USE_DATA_DLY_SHIFT;
340 } else if (reqhz <= GQSPI_FREQ_150MHZ) {
341 lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK |
342 (GQSPI_LPBK_DLY_ADJ_DLY_1 <<
343 GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT);
345 writel(tapdlybypass, IOU_TAPDLY_BYPASS_OFST);
347 writel(lpbkdlyadj, ®s->lpbkdly);
348 writel(datadlyadj, ®s->gqspidlyadj);
351 static int zynqmp_qspi_set_speed(struct udevice *bus, uint speed)
353 struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
354 struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
355 struct zynqmp_qspi_regs *regs = priv->regs;
357 u8 baud_rate_val = 0;
359 debug("%s\n", __func__);
360 if (speed > plat->frequency)
361 speed = plat->frequency;
363 if (plat->speed_hz != speed) {
364 /* Set the clock frequency */
365 /* If speed == 0, default to lowest speed */
366 while ((baud_rate_val < 8) &&
368 (2 << baud_rate_val)) > speed))
371 if (baud_rate_val > GQSPI_MAX_BAUD_RATE_VAL)
372 baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
374 plat->speed_hz = plat->frequency / (2 << baud_rate_val);
376 confr = readl(®s->confr);
377 confr &= ~GQSPI_BAUD_DIV_MASK;
378 confr |= (baud_rate_val << 3);
379 writel(confr, ®s->confr);
380 zynqmp_qspi_set_tapdelay(bus, baud_rate_val);
382 debug("regs=%p, speed=%d\n", priv->regs, plat->speed_hz);
388 static int zynqmp_qspi_probe(struct udevice *bus)
390 struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
391 struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
396 debug("%s: bus:%p, priv:%p\n", __func__, bus, priv);
398 priv->regs = plat->regs;
399 priv->dma_regs = plat->dma_regs;
400 priv->io_mode = plat->io_mode;
402 ret = clk_get_by_index(bus, 0, &clk);
404 dev_err(bus, "failed to get clock\n");
408 clock = clk_get_rate(&clk);
409 if (IS_ERR_VALUE(clock)) {
410 dev_err(bus, "failed to get rate\n");
413 debug("%s: CLK %ld\n", __func__, clock);
415 ret = clk_enable(&clk);
417 dev_err(bus, "failed to enable clock\n");
420 plat->frequency = clock;
421 plat->speed_hz = plat->frequency / 2;
423 /* init the zynq spi hw */
424 zynqmp_qspi_init_hw(priv);
429 static int zynqmp_qspi_set_mode(struct udevice *bus, uint mode)
431 struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
432 struct zynqmp_qspi_regs *regs = priv->regs;
435 debug("%s\n", __func__);
436 /* Set the SPI Clock phase and polarities */
437 confr = readl(®s->confr);
438 confr &= ~(GQSPI_CONFIG_CPHA_MASK | GQSPI_CONFIG_CPOL_MASK);
441 confr |= GQSPI_CONFIG_CPHA_MASK;
443 confr |= GQSPI_CONFIG_CPOL_MASK;
445 writel(confr, ®s->confr);
450 static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size)
454 struct zynqmp_qspi_regs *regs = priv->regs;
455 u32 *buf = (u32 *)priv->tx_buf;
458 debug("TxFIFO: 0x%x, size: 0x%x\n", readl(®s->isr),
462 ret = wait_for_bit_le32(®s->isr, GQSPI_IXR_TXNFULL_MASK, 1,
465 printf("%s: Timeout\n", __func__);
470 writel(*buf, ®s->txd0r);
478 data |= GENMASK(31, 8);
481 data = *((u16 *)buf);
483 data |= GENMASK(31, 16);
488 data |= GENMASK(31, 24);
491 writel(data, ®s->txd0r);
496 ret = wait_for_bit_le32(®s->isr, GQSPI_IXR_TXFIFOEMPTY_MASK, 1,
499 printf("%s: Timeout\n", __func__);
507 static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv)
509 const struct spi_mem_op *op = priv->op;
511 u8 i, dummy_cycles, addr;
514 gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
515 gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->cmd.buswidth);
516 gen_fifo_cmd |= GQSPI_GFIFO_TX;
517 gen_fifo_cmd |= op->cmd.opcode;
518 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
521 for (i = 0; i < op->addr.nbytes; i++) {
522 addr = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
524 gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
525 gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->addr.buswidth);
526 gen_fifo_cmd |= GQSPI_GFIFO_TX;
527 gen_fifo_cmd |= addr;
529 debug("GFIFO_CMD_Cmd = 0x%x\n", gen_fifo_cmd);
531 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
535 if (op->dummy.nbytes) {
536 dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
538 gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
539 gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->dummy.buswidth);
540 gen_fifo_cmd &= ~(GQSPI_GFIFO_TX | GQSPI_GFIFO_RX);
541 gen_fifo_cmd |= GQSPI_GFIFO_DATA_XFR_MASK;
542 gen_fifo_cmd |= dummy_cycles;
543 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
547 static u32 zynqmp_qspi_calc_exp(struct zynqmp_qspi_priv *priv,
554 if (priv->len > 255) {
555 if (priv->len & (1 << expval)) {
556 *gen_fifo_cmd &= ~GQSPI_GFIFO_IMD_MASK;
557 *gen_fifo_cmd |= GQSPI_GFIFO_EXP_MASK;
558 *gen_fifo_cmd |= expval;
559 priv->len -= (1 << expval);
564 *gen_fifo_cmd &= ~(GQSPI_GFIFO_IMD_MASK |
565 GQSPI_GFIFO_EXP_MASK);
566 *gen_fifo_cmd |= (u8)priv->len;
574 static int zynqmp_qspi_genfifo_fill_tx(struct zynqmp_qspi_priv *priv)
580 gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
581 gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
582 gen_fifo_cmd |= GQSPI_GFIFO_TX | GQSPI_GFIFO_DATA_XFR_MASK;
585 len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
586 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
588 debug("GFIFO_CMD_TX:0x%x\n", gen_fifo_cmd);
590 if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
591 ret = zynqmp_qspi_fill_tx_fifo(priv, 1 << len);
593 ret = zynqmp_qspi_fill_tx_fifo(priv, len);
601 static int zynqmp_qspi_start_io(struct zynqmp_qspi_priv *priv,
602 u32 gen_fifo_cmd, u32 *buf)
605 u32 actuallen = priv->len;
606 u32 config_reg, ier, isr;
607 u32 timeout = GQSPI_TIMEOUT;
608 struct zynqmp_qspi_regs *regs = priv->regs;
613 len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
614 /* If exponent bit is set, reset immediate to be 2^len */
615 if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
616 priv->bytes_to_receive = (1 << len);
618 priv->bytes_to_receive = len;
619 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
620 debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd);
622 config_reg = readl(®s->confr);
623 config_reg |= GQSPI_STRT_GEN_FIFO;
624 writel(config_reg, ®s->confr);
625 /* Enable RX interrupts for IO mode */
626 ier = readl(®s->ier);
627 ier |= GQSPI_IXR_ALL_MASK;
628 writel(ier, ®s->ier);
629 while (priv->bytes_to_receive && timeout) {
630 isr = readl(®s->isr);
631 if (isr & GQSPI_IXR_RXNEMTY_MASK) {
632 if (priv->bytes_to_receive >= 4) {
633 *traverse = readl(®s->drxr);
635 priv->bytes_to_receive -= 4;
637 last_bits = readl(®s->drxr);
638 memcpy(traverse, &last_bits,
639 priv->bytes_to_receive);
640 priv->bytes_to_receive = 0;
642 timeout = GQSPI_TIMEOUT;
649 debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n",
650 (unsigned long)buf, (unsigned long)priv->rx_buf,
653 printf("IO timeout: %d\n", readl(®s->isr));
661 static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv,
662 u32 gen_fifo_cmd, u32 *buf)
666 u32 actuallen = priv->len;
667 u32 totallen = priv->len;
669 struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs;
672 if (totallen >= SZ_512M)
675 priv->len = totallen;
677 totallen -= priv->len; /* Save remaining bytes length to read */
678 actuallen = priv->len; /* Actual number of bytes reading */
680 writel((unsigned long)buf, &dma_regs->dmadst);
681 writel(roundup(priv->len, GQSPI_DMA_ALIGN), &dma_regs->dmasize);
682 writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier);
683 addr = (unsigned long)buf;
684 size = roundup(priv->len, GQSPI_DMA_ALIGN);
685 flush_dcache_range(addr, addr + size);
688 zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
689 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
691 debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd);
694 ret = wait_for_bit_le32(&dma_regs->dmaisr,
695 GQSPI_DMA_DST_I_STS_DONE, 1,
698 printf("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr));
702 writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr);
704 debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n",
705 (unsigned long)buf, (unsigned long)priv->rx_buf, *buf,
708 if (buf != priv->rx_buf)
709 memcpy(priv->rx_buf, buf, actuallen);
711 buf = (u32 *)((u8 *)buf + actuallen);
712 priv->rx_buf = (u8 *)priv->rx_buf + actuallen;
718 static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv)
722 u32 actuallen = priv->len;
724 gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
725 gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
726 gen_fifo_cmd |= GQSPI_GFIFO_RX | GQSPI_GFIFO_DATA_XFR_MASK;
729 * Check if receive buffer is aligned to 4 byte and length
730 * is multiples of four byte as we are using dma to receive.
732 if ((!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
733 !(actuallen % GQSPI_DMA_ALIGN)) || priv->io_mode) {
734 buf = (u32 *)priv->rx_buf;
736 return zynqmp_qspi_start_io(priv, gen_fifo_cmd, buf);
738 return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
741 ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len, GQSPI_DMA_ALIGN));
743 return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
746 static int zynqmp_qspi_claim_bus(struct udevice *dev)
748 struct udevice *bus = dev->parent;
749 struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
750 struct zynqmp_qspi_regs *regs = priv->regs;
752 writel(GQSPI_ENABLE_ENABLE_MASK, ®s->enbr);
757 static int zynqmp_qspi_release_bus(struct udevice *dev)
759 struct udevice *bus = dev->parent;
760 struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
761 struct zynqmp_qspi_regs *regs = priv->regs;
763 writel(~GQSPI_ENABLE_ENABLE_MASK, ®s->enbr);
768 static int zynqmp_qspi_exec_op(struct spi_slave *slave,
769 const struct spi_mem_op *op)
771 struct zynqmp_qspi_priv *priv = dev_get_priv(slave->dev->parent);
775 priv->tx_buf = op->data.buf.out;
776 priv->rx_buf = op->data.buf.in;
777 priv->len = op->data.nbytes;
779 zynqmp_qspi_chipselect(priv, 1);
781 /* Send opcode, addr, dummy */
782 zynqmp_qspi_genfifo_cmd(priv);
784 /* Request the transfer */
785 if (op->data.dir == SPI_MEM_DATA_IN)
786 ret = zynqmp_qspi_genfifo_fill_rx(priv);
787 else if (op->data.dir == SPI_MEM_DATA_OUT)
788 ret = zynqmp_qspi_genfifo_fill_tx(priv);
790 zynqmp_qspi_chipselect(priv, 0);
795 static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
796 .exec_op = zynqmp_qspi_exec_op,
799 static const struct dm_spi_ops zynqmp_qspi_ops = {
800 .claim_bus = zynqmp_qspi_claim_bus,
801 .release_bus = zynqmp_qspi_release_bus,
802 .set_speed = zynqmp_qspi_set_speed,
803 .set_mode = zynqmp_qspi_set_mode,
804 .mem_ops = &zynqmp_qspi_mem_ops,
807 static const struct udevice_id zynqmp_qspi_ids[] = {
808 { .compatible = "xlnx,zynqmp-qspi-1.0" },
809 { .compatible = "xlnx,versal-qspi-1.0" },
813 U_BOOT_DRIVER(zynqmp_qspi) = {
814 .name = "zynqmp_qspi",
816 .of_match = zynqmp_qspi_ids,
817 .ops = &zynqmp_qspi_ops,
818 .of_to_plat = zynqmp_qspi_of_to_plat,
819 .plat_auto = sizeof(struct zynqmp_qspi_plat),
820 .priv_auto = sizeof(struct zynqmp_qspi_priv),
821 .probe = zynqmp_qspi_probe,