1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2022 MediaTek Inc. All Rights Reserved.
5 * Author: SkyLake.Huang <skylake.huang@mediatek.com>
16 #include <dm/device.h>
17 #include <dm/device_compat.h>
18 #include <dm/devres.h>
19 #include <dm/pinctrl.h>
20 #include <linux/bitops.h>
21 #include <linux/completion.h>
22 #include <linux/dma-mapping.h>
24 #include <linux/iopoll.h>
25 #include <linux/sizes.h>
27 #define SPI_CFG0_REG 0x0000
28 #define SPI_CFG1_REG 0x0004
29 #define SPI_TX_SRC_REG 0x0008
30 #define SPI_RX_DST_REG 0x000c
31 #define SPI_TX_DATA_REG 0x0010
32 #define SPI_RX_DATA_REG 0x0014
33 #define SPI_CMD_REG 0x0018
34 #define SPI_IRQ_REG 0x001c
35 #define SPI_STATUS_REG 0x0020
36 #define SPI_PAD_SEL_REG 0x0024
37 #define SPI_CFG2_REG 0x0028
38 #define SPI_TX_SRC_REG_64 0x002c
39 #define SPI_RX_DST_REG_64 0x0030
40 #define SPI_CFG3_IPM_REG 0x0040
42 #define SPI_CFG0_SCK_HIGH_OFFSET 0
43 #define SPI_CFG0_SCK_LOW_OFFSET 8
44 #define SPI_CFG0_CS_HOLD_OFFSET 16
45 #define SPI_CFG0_CS_SETUP_OFFSET 24
46 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
47 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
49 #define SPI_CFG1_CS_IDLE_OFFSET 0
50 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
51 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
52 #define SPI_CFG1_GET_TICKDLY_OFFSET 29
54 #define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
55 #define SPI_CFG1_CS_IDLE_MASK 0xff
56 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
57 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
58 #define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
59 #define SPI_CFG2_SCK_HIGH_OFFSET 0
60 #define SPI_CFG2_SCK_LOW_OFFSET 16
61 #define SPI_CFG2_SCK_HIGH_MASK GENMASK(15, 0)
62 #define SPI_CFG2_SCK_LOW_MASK GENMASK(31, 16)
64 #define SPI_CMD_ACT BIT(0)
65 #define SPI_CMD_RESUME BIT(1)
66 #define SPI_CMD_RST BIT(2)
67 #define SPI_CMD_PAUSE_EN BIT(4)
68 #define SPI_CMD_DEASSERT BIT(5)
69 #define SPI_CMD_SAMPLE_SEL BIT(6)
70 #define SPI_CMD_CS_POL BIT(7)
71 #define SPI_CMD_CPHA BIT(8)
72 #define SPI_CMD_CPOL BIT(9)
73 #define SPI_CMD_RX_DMA BIT(10)
74 #define SPI_CMD_TX_DMA BIT(11)
75 #define SPI_CMD_TXMSBF BIT(12)
76 #define SPI_CMD_RXMSBF BIT(13)
77 #define SPI_CMD_RX_ENDIAN BIT(14)
78 #define SPI_CMD_TX_ENDIAN BIT(15)
79 #define SPI_CMD_FINISH_IE BIT(16)
80 #define SPI_CMD_PAUSE_IE BIT(17)
81 #define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
82 #define SPI_CMD_IPM_SPIM_LOOP BIT(21)
83 #define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
85 #define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
87 #define PIN_MODE_CFG(x) ((x) / 2)
89 #define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
90 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
91 #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
92 #define SPI_CFG3_IPM_XMODE_EN BIT(4)
93 #define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
94 #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
95 #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
96 #define SPI_CFG3_IPM_DUMMY_BYTELEN_OFFSET 16
98 #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
99 #define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
100 #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
101 #define SPI_CFG3_IPM_DUMMY_BYTELEN_MASK GENMASK(19, 16)
103 #define MT8173_SPI_MAX_PAD_SEL 3
105 #define MTK_SPI_PAUSE_INT_STATUS 0x2
107 #define MTK_SPI_IDLE 0
108 #define MTK_SPI_PAUSED 1
110 #define MTK_SPI_MAX_FIFO_SIZE 32U
111 #define MTK_SPI_PACKET_SIZE 1024
112 #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
113 #define MTK_SPI_IPM_PACKET_LOOP SZ_256
115 #define MTK_SPI_32BITS_MASK 0xffffffff
117 #define DMA_ADDR_EXT_BITS 36
118 #define DMA_ADDR_DEF_BITS 32
120 #define CLK_TO_US(freq, clkcnt) DIV_ROUND_UP((clkcnt), (freq) / 1000000)
122 /* struct mtk_spim_capability
123 * @enhance_timing: Some IC design adjust cfg register to enhance time accuracy
124 * @dma_ext: Some IC support DMA addr extension
125 * @ipm_design: The IPM IP design improves some features, and supports dual/quad mode
126 * @support_quad: Whether quad mode is supported
128 struct mtk_spim_capability {
135 /* struct mtk_spim_priv
136 * @base: Base address of the spi controller
137 * @state: Controller state
138 * @sel_clk: Pad clock
139 * @spi_clk: Core clock
140 * @xfer_len: Current length of data for transfer
141 * @hw_cap: Controller capabilities
142 * @tick_dly: Used to postpone SPI sampling time
143 * @sample_sel: Sample edge of MISO
144 * @dev: udevice of this spi controller
145 * @tx_dma: Tx DMA address
146 * @rx_dma: Rx DMA address
148 struct mtk_spim_priv {
151 struct clk sel_clk, spi_clk;
153 struct mtk_spim_capability hw_cap;
162 static void mtk_spim_reset(struct mtk_spim_priv *priv)
164 /* set the software reset bit in SPI_CMD_REG. */
165 setbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
166 clrbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
169 static int mtk_spim_hw_init(struct spi_slave *slave)
171 struct udevice *bus = dev_get_parent(slave->dev);
172 struct mtk_spim_priv *priv = dev_get_priv(bus);
176 cpha = slave->mode & SPI_CPHA ? 1 : 0;
177 cpol = slave->mode & SPI_CPOL ? 1 : 0;
179 if (priv->hw_cap.enhance_timing) {
180 if (priv->hw_cap.ipm_design) {
181 /* CFG3 reg only used for spi-mem,
182 * here write to default value
184 writel(0x0, priv->base + SPI_CFG3_IPM_REG);
185 clrsetbits_le32(priv->base + SPI_CMD_REG,
186 SPI_CMD_IPM_GET_TICKDLY_MASK,
188 SPI_CMD_IPM_GET_TICKDLY_OFFSET);
190 clrsetbits_le32(priv->base + SPI_CFG1_REG,
191 SPI_CFG1_GET_TICKDLY_MASK,
193 SPI_CFG1_GET_TICKDLY_OFFSET);
197 reg_val = readl(priv->base + SPI_CMD_REG);
198 if (priv->hw_cap.ipm_design) {
199 /* SPI transfer without idle time until packet length done */
200 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
201 if (slave->mode & SPI_LOOP)
202 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
204 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
208 reg_val |= SPI_CMD_CPHA;
210 reg_val &= ~SPI_CMD_CPHA;
212 reg_val |= SPI_CMD_CPOL;
214 reg_val &= ~SPI_CMD_CPOL;
216 /* set the mlsbx and mlsbtx */
217 if (slave->mode & SPI_LSB_FIRST) {
218 reg_val &= ~SPI_CMD_TXMSBF;
219 reg_val &= ~SPI_CMD_RXMSBF;
221 reg_val |= SPI_CMD_TXMSBF;
222 reg_val |= SPI_CMD_RXMSBF;
225 /* do not reverse tx/rx endian */
226 reg_val &= ~SPI_CMD_TX_ENDIAN;
227 reg_val &= ~SPI_CMD_RX_ENDIAN;
229 if (priv->hw_cap.enhance_timing) {
230 /* set CS polarity */
231 if (slave->mode & SPI_CS_HIGH)
232 reg_val |= SPI_CMD_CS_POL;
234 reg_val &= ~SPI_CMD_CS_POL;
236 if (priv->sample_sel)
237 reg_val |= SPI_CMD_SAMPLE_SEL;
239 reg_val &= ~SPI_CMD_SAMPLE_SEL;
242 /* disable dma mode */
243 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
245 /* disable deassert mode */
246 reg_val &= ~SPI_CMD_DEASSERT;
248 writel(reg_val, priv->base + SPI_CMD_REG);
253 static void mtk_spim_prepare_transfer(struct mtk_spim_priv *priv,
256 u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
258 spi_clk_hz = clk_get_rate(&priv->spi_clk);
259 if (speed_hz <= spi_clk_hz / 4)
260 div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
264 sck_time = (div + 1) / 2;
265 cs_time = sck_time * 2;
267 if (priv->hw_cap.enhance_timing) {
268 reg_val = ((sck_time - 1) & 0xffff)
269 << SPI_CFG2_SCK_HIGH_OFFSET;
270 reg_val |= ((sck_time - 1) & 0xffff)
271 << SPI_CFG2_SCK_LOW_OFFSET;
272 writel(reg_val, priv->base + SPI_CFG2_REG);
274 reg_val = ((cs_time - 1) & 0xffff)
275 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET;
276 reg_val |= ((cs_time - 1) & 0xffff)
277 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET;
278 writel(reg_val, priv->base + SPI_CFG0_REG);
280 reg_val = ((sck_time - 1) & 0xff)
281 << SPI_CFG0_SCK_HIGH_OFFSET;
282 reg_val |= ((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET;
283 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET;
284 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET;
285 writel(reg_val, priv->base + SPI_CFG0_REG);
288 reg_val = readl(priv->base + SPI_CFG1_REG);
289 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
290 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET;
291 writel(reg_val, priv->base + SPI_CFG1_REG);
295 * mtk_spim_setup_packet() - setup packet format.
296 * @priv: controller priv
298 * This controller sents/receives data in packets. The packet size is
301 * This function calculates the maximum packet size available for current
302 * data, and calculates the number of packets required to sent/receive data
303 * as much as possible.
305 static void mtk_spim_setup_packet(struct mtk_spim_priv *priv)
307 u32 packet_size, packet_loop, reg_val;
309 /* Calculate maximum packet size */
310 if (priv->hw_cap.ipm_design)
311 packet_size = min_t(u32,
313 MTK_SPI_IPM_PACKET_SIZE);
315 packet_size = min_t(u32,
317 MTK_SPI_PACKET_SIZE);
319 /* Calculates number of packets to sent/receive */
320 packet_loop = priv->xfer_len / packet_size;
322 reg_val = readl(priv->base + SPI_CFG1_REG);
323 if (priv->hw_cap.ipm_design)
324 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
326 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
328 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
330 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
332 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
334 writel(reg_val, priv->base + SPI_CFG1_REG);
337 static void mtk_spim_enable_transfer(struct mtk_spim_priv *priv)
341 cmd = readl(priv->base + SPI_CMD_REG);
342 if (priv->state == MTK_SPI_IDLE)
345 cmd |= SPI_CMD_RESUME;
346 writel(cmd, priv->base + SPI_CMD_REG);
349 static bool mtk_spim_supports_op(struct spi_slave *slave,
350 const struct spi_mem_op *op)
352 struct udevice *bus = dev_get_parent(slave->dev);
353 struct mtk_spim_priv *priv = dev_get_priv(bus);
355 if (op->cmd.buswidth == 0 || op->cmd.buswidth > 4 ||
356 op->addr.buswidth > 4 || op->dummy.buswidth > 4 ||
357 op->data.buswidth > 4)
360 if (!priv->hw_cap.support_quad && (op->cmd.buswidth > 2 ||
361 op->addr.buswidth > 2 || op->dummy.buswidth > 2 ||
362 op->data.buswidth > 2))
365 if (op->addr.nbytes && op->dummy.nbytes &&
366 op->addr.buswidth != op->dummy.buswidth)
369 if (op->addr.nbytes + op->dummy.nbytes > 16)
372 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
373 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
374 MTK_SPI_IPM_PACKET_LOOP ||
375 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
382 static void mtk_spim_setup_dma_xfer(struct mtk_spim_priv *priv,
383 const struct spi_mem_op *op)
385 writel((u32)(priv->tx_dma & MTK_SPI_32BITS_MASK),
386 priv->base + SPI_TX_SRC_REG);
388 if (priv->hw_cap.dma_ext)
389 writel((u32)(priv->tx_dma >> 32),
390 priv->base + SPI_TX_SRC_REG_64);
392 if (op->data.dir == SPI_MEM_DATA_IN) {
393 writel((u32)(priv->rx_dma & MTK_SPI_32BITS_MASK),
394 priv->base + SPI_RX_DST_REG);
396 if (priv->hw_cap.dma_ext)
397 writel((u32)(priv->rx_dma >> 32),
398 priv->base + SPI_RX_DST_REG_64);
402 static int mtk_spim_transfer_wait(struct spi_slave *slave,
403 const struct spi_mem_op *op)
405 struct udevice *bus = dev_get_parent(slave->dev);
406 struct mtk_spim_priv *priv = dev_get_priv(bus);
407 u32 sck_l, sck_h, spi_bus_clk, clk_count, reg;
411 if (op->data.dir == SPI_MEM_NO_DATA)
414 clk_count = op->data.nbytes;
416 spi_bus_clk = clk_get_rate(&priv->spi_clk);
417 sck_l = readl(priv->base + SPI_CFG2_REG) >> SPI_CFG2_SCK_LOW_OFFSET;
418 sck_h = readl(priv->base + SPI_CFG2_REG) & SPI_CFG2_SCK_HIGH_MASK;
419 do_div(spi_bus_clk, sck_l + sck_h + 2);
421 us = CLK_TO_US(spi_bus_clk, clk_count * 8);
422 us += 1000 * 1000; /* 1s tolerance */
427 ret = readl_poll_timeout(priv->base + SPI_STATUS_REG, reg,
430 dev_err(priv->dev, "transfer timeout, val: 0x%lx\n", us);
437 static int mtk_spim_exec_op(struct spi_slave *slave,
438 const struct spi_mem_op *op)
440 struct udevice *bus = dev_get_parent(slave->dev);
441 struct mtk_spim_priv *priv = dev_get_priv(bus);
442 u32 reg_val, nio = 1, tx_size;
447 mtk_spim_reset(priv);
448 mtk_spim_hw_init(slave);
449 mtk_spim_prepare_transfer(priv, slave->max_hz);
451 reg_val = readl(priv->base + SPI_CFG3_IPM_REG);
452 /* opcode byte len */
453 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
454 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
456 /* addr & dummy byte len */
457 if (op->addr.nbytes || op->dummy.nbytes)
458 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
459 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
462 if (!op->data.nbytes) {
463 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
464 writel(0, priv->base + SPI_CFG1_REG);
466 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
467 priv->xfer_len = op->data.nbytes;
468 mtk_spim_setup_packet(priv);
471 if (op->addr.nbytes || op->dummy.nbytes) {
472 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
473 reg_val |= SPI_CFG3_IPM_XMODE_EN;
475 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
478 if (op->addr.buswidth == 2 ||
479 op->dummy.buswidth == 2 ||
480 op->data.buswidth == 2)
482 else if (op->addr.buswidth == 4 ||
483 op->dummy.buswidth == 4 ||
484 op->data.buswidth == 4)
487 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
488 reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
490 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
491 if (op->data.dir == SPI_MEM_DATA_IN)
492 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
494 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
495 writel(reg_val, priv->base + SPI_CFG3_IPM_REG);
497 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
498 if (op->data.dir == SPI_MEM_DATA_OUT)
499 tx_size += op->data.nbytes;
501 tx_size = max(tx_size, (u32)32);
503 /* Fill up tx data */
504 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL);
510 tx_tmp_buf[0] = op->cmd.opcode;
512 if (op->addr.nbytes) {
513 for (i = 0; i < op->addr.nbytes; i++)
514 tx_tmp_buf[i + 1] = op->addr.val >>
515 (8 * (op->addr.nbytes - i - 1));
518 if (op->dummy.nbytes)
519 memset(tx_tmp_buf + op->addr.nbytes + 1, 0xff,
522 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
523 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
524 op->data.buf.out, op->data.nbytes);
525 /* Finish filling up tx data */
527 priv->tx_dma = dma_map_single(tx_tmp_buf, tx_size, DMA_TO_DEVICE);
528 if (dma_mapping_error(priv->dev, priv->tx_dma)) {
533 if (op->data.dir == SPI_MEM_DATA_IN) {
534 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
535 rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL);
541 rx_tmp_buf = op->data.buf.in;
544 priv->rx_dma = dma_map_single(rx_tmp_buf, op->data.nbytes,
546 if (dma_mapping_error(priv->dev, priv->rx_dma)) {
552 reg_val = readl(priv->base + SPI_CMD_REG);
553 reg_val |= SPI_CMD_TX_DMA;
554 if (op->data.dir == SPI_MEM_DATA_IN)
555 reg_val |= SPI_CMD_RX_DMA;
557 writel(reg_val, priv->base + SPI_CMD_REG);
559 mtk_spim_setup_dma_xfer(priv, op);
561 mtk_spim_enable_transfer(priv);
563 /* Wait for the interrupt. */
564 ret = mtk_spim_transfer_wait(slave, op);
568 if (op->data.dir == SPI_MEM_DATA_IN &&
569 !IS_ALIGNED((size_t)op->data.buf.in, 4))
570 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
573 /* spi disable dma */
574 reg_val = readl(priv->base + SPI_CMD_REG);
575 reg_val &= ~SPI_CMD_TX_DMA;
576 if (op->data.dir == SPI_MEM_DATA_IN)
577 reg_val &= ~SPI_CMD_RX_DMA;
578 writel(reg_val, priv->base + SPI_CMD_REG);
580 writel(0, priv->base + SPI_TX_SRC_REG);
581 writel(0, priv->base + SPI_RX_DST_REG);
583 if (op->data.dir == SPI_MEM_DATA_IN)
584 dma_unmap_single(priv->rx_dma,
585 op->data.nbytes, DMA_FROM_DEVICE);
587 if (op->data.dir == SPI_MEM_DATA_IN &&
588 !IS_ALIGNED((size_t)op->data.buf.in, 4))
591 dma_unmap_single(priv->tx_dma,
592 tx_size, DMA_TO_DEVICE);
599 static int mtk_spim_adjust_op_size(struct spi_slave *slave,
600 struct spi_mem_op *op)
604 if (!op->data.nbytes)
607 if (op->data.dir != SPI_MEM_NO_DATA) {
608 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
609 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
610 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
611 /* force data buffer dma-aligned. */
612 op->data.nbytes -= op->data.nbytes % 4;
619 static int mtk_spim_get_attr(struct mtk_spim_priv *priv, struct udevice *dev)
623 priv->hw_cap.enhance_timing = dev_read_bool(dev, "enhance_timing");
624 priv->hw_cap.dma_ext = dev_read_bool(dev, "dma_ext");
625 priv->hw_cap.ipm_design = dev_read_bool(dev, "ipm_design");
626 priv->hw_cap.support_quad = dev_read_bool(dev, "support_quad");
628 ret = dev_read_u32(dev, "tick_dly", &priv->tick_dly);
630 dev_err(priv->dev, "tick dly not set.\n");
632 ret = dev_read_u32(dev, "sample_sel", &priv->sample_sel);
634 dev_err(priv->dev, "sample sel not set.\n");
639 static int mtk_spim_probe(struct udevice *dev)
641 struct mtk_spim_priv *priv = dev_get_priv(dev);
644 priv->base = (void __iomem *)devfdt_get_addr(dev);
648 mtk_spim_get_attr(priv, dev);
650 ret = clk_get_by_name(dev, "sel-clk", &priv->sel_clk);
652 dev_err(dev, "failed to get sel-clk\n");
656 ret = clk_get_by_name(dev, "spi-clk", &priv->spi_clk);
658 dev_err(dev, "failed to get spi-clk\n");
662 clk_enable(&priv->sel_clk);
663 clk_enable(&priv->spi_clk);
668 static int mtk_spim_set_speed(struct udevice *dev, uint speed)
673 static int mtk_spim_set_mode(struct udevice *dev, uint mode)
678 static const struct spi_controller_mem_ops mtk_spim_mem_ops = {
679 .adjust_op_size = mtk_spim_adjust_op_size,
680 .supports_op = mtk_spim_supports_op,
681 .exec_op = mtk_spim_exec_op
684 static const struct dm_spi_ops mtk_spim_ops = {
685 .mem_ops = &mtk_spim_mem_ops,
686 .set_speed = mtk_spim_set_speed,
687 .set_mode = mtk_spim_set_mode,
690 static const struct udevice_id mtk_spim_ids[] = {
691 { .compatible = "mediatek,ipm-spi" },
695 U_BOOT_DRIVER(mtk_spim) = {
698 .of_match = mtk_spim_ids,
699 .ops = &mtk_spim_ops,
700 .priv_auto = sizeof(struct mtk_spim_priv),
701 .probe = mtk_spim_probe,