1 // SPDX-License-Identifier: GPL-2.0+
3 // Freescale i.MX7ULP LPSPI driver
5 // Copyright 2016 Freescale Semiconductor, Inc.
6 // Copyright 2018 NXP Semiconductors
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma/imx-dma.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spi/spi.h>
26 #include <linux/spi/spi_bitbang.h>
27 #include <linux/types.h>
29 #define DRIVER_NAME "fsl_lpspi"
31 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
33 /* The maximum bytes that edma can transfer once.*/
34 #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
36 /* i.MX7ULP LPSPI registers */
37 #define IMX7ULP_VERID 0x0
38 #define IMX7ULP_PARAM 0x4
39 #define IMX7ULP_CR 0x10
40 #define IMX7ULP_SR 0x14
41 #define IMX7ULP_IER 0x18
42 #define IMX7ULP_DER 0x1c
43 #define IMX7ULP_CFGR0 0x20
44 #define IMX7ULP_CFGR1 0x24
45 #define IMX7ULP_DMR0 0x30
46 #define IMX7ULP_DMR1 0x34
47 #define IMX7ULP_CCR 0x40
48 #define IMX7ULP_FCR 0x58
49 #define IMX7ULP_FSR 0x5c
50 #define IMX7ULP_TCR 0x60
51 #define IMX7ULP_TDR 0x64
52 #define IMX7ULP_RSR 0x70
53 #define IMX7ULP_RDR 0x74
55 /* General control register field define */
60 #define SR_MBF BIT(24)
61 #define SR_TCF BIT(10)
65 #define IER_TCIE BIT(10)
66 #define IER_FCIE BIT(9)
67 #define IER_RDIE BIT(1)
68 #define IER_TDIE BIT(0)
69 #define DER_RDDE BIT(1)
70 #define DER_TDDE BIT(0)
71 #define CFGR1_PCSCFG BIT(27)
72 #define CFGR1_PINCFG (BIT(24)|BIT(25))
73 #define CFGR1_PCSPOL BIT(8)
74 #define CFGR1_NOSTALL BIT(3)
75 #define CFGR1_HOST BIT(0)
76 #define FSR_TXCOUNT (0xFF)
77 #define RSR_RXEMPTY BIT(1)
78 #define TCR_CPOL BIT(31)
79 #define TCR_CPHA BIT(30)
80 #define TCR_CONT BIT(21)
81 #define TCR_CONTC BIT(20)
82 #define TCR_RXMSK BIT(19)
83 #define TCR_TXMSK BIT(18)
93 struct fsl_lpspi_data {
96 unsigned long base_phys;
105 void (*tx)(struct fsl_lpspi_data *);
106 void (*rx)(struct fsl_lpspi_data *);
113 struct lpspi_config config;
114 struct completion xfer_done;
120 struct completion dma_rx_completion;
121 struct completion dma_tx_completion;
124 static const struct of_device_id fsl_lpspi_dt_ids[] = {
125 { .compatible = "fsl,imx7ulp-spi", },
128 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
130 #define LPSPI_BUF_RX(type) \
131 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
133 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
135 if (fsl_lpspi->rx_buf) { \
136 *(type *)fsl_lpspi->rx_buf = val; \
137 fsl_lpspi->rx_buf += sizeof(type); \
141 #define LPSPI_BUF_TX(type) \
142 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
146 if (fsl_lpspi->tx_buf) { \
147 val = *(type *)fsl_lpspi->tx_buf; \
148 fsl_lpspi->tx_buf += sizeof(type); \
151 fsl_lpspi->remain -= sizeof(type); \
152 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
162 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
165 writel(enable, fsl_lpspi->base + IMX7ULP_IER);
168 static int fsl_lpspi_bytes_per_word(const int bpw)
170 return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
173 static bool fsl_lpspi_can_dma(struct spi_controller *controller,
174 struct spi_device *spi,
175 struct spi_transfer *transfer)
177 unsigned int bytes_per_word;
179 if (!controller->dma_rx)
182 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
184 switch (bytes_per_word) {
196 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
198 struct fsl_lpspi_data *fsl_lpspi =
199 spi_controller_get_devdata(controller);
202 ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
204 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
211 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
213 struct fsl_lpspi_data *fsl_lpspi =
214 spi_controller_get_devdata(controller);
216 pm_runtime_mark_last_busy(fsl_lpspi->dev);
217 pm_runtime_put_autosuspend(fsl_lpspi->dev);
222 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
227 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
229 while (txfifo_cnt < fsl_lpspi->txfifosize) {
230 if (!fsl_lpspi->remain)
232 fsl_lpspi->tx(fsl_lpspi);
236 if (txfifo_cnt < fsl_lpspi->txfifosize) {
237 if (!fsl_lpspi->is_target) {
238 temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
240 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
243 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
245 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
248 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
250 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
251 fsl_lpspi->rx(fsl_lpspi);
254 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
258 temp |= fsl_lpspi->config.bpw - 1;
259 temp |= (fsl_lpspi->config.mode & 0x3) << 30;
260 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
261 if (!fsl_lpspi->is_target) {
262 temp |= fsl_lpspi->config.prescale << 27;
264 * Set TCR_CONT will keep SS asserted after current transfer.
265 * For the first transfer, clear TCR_CONTC to assert SS.
266 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
268 if (!fsl_lpspi->usedma) {
270 if (fsl_lpspi->is_first_byte)
276 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
278 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
281 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
285 if (!fsl_lpspi->usedma)
286 temp = fsl_lpspi->watermark >> 1 |
287 (fsl_lpspi->watermark >> 1) << 16;
289 temp = fsl_lpspi->watermark >> 1;
291 writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
293 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
296 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
298 struct lpspi_config config = fsl_lpspi->config;
299 unsigned int perclk_rate, scldiv;
302 perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
304 if (!config.speed_hz) {
305 dev_err(fsl_lpspi->dev,
306 "error: the transmission speed provided is 0!\n");
310 if (config.speed_hz > perclk_rate / 2) {
311 dev_err(fsl_lpspi->dev,
312 "per-clk should be at least two times of transfer speed");
316 for (prescale = 0; prescale < 8; prescale++) {
317 scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
319 fsl_lpspi->config.prescale = prescale;
327 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
328 fsl_lpspi->base + IMX7ULP_CCR);
330 dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
331 perclk_rate, config.speed_hz, prescale, scldiv);
336 static int fsl_lpspi_dma_configure(struct spi_controller *controller)
339 enum dma_slave_buswidth buswidth;
340 struct dma_slave_config rx = {}, tx = {};
341 struct fsl_lpspi_data *fsl_lpspi =
342 spi_controller_get_devdata(controller);
344 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
346 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
349 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
352 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
358 tx.direction = DMA_MEM_TO_DEV;
359 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
360 tx.dst_addr_width = buswidth;
362 ret = dmaengine_slave_config(controller->dma_tx, &tx);
364 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
369 rx.direction = DMA_DEV_TO_MEM;
370 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
371 rx.src_addr_width = buswidth;
373 ret = dmaengine_slave_config(controller->dma_rx, &rx);
375 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
383 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
388 if (!fsl_lpspi->is_target) {
389 ret = fsl_lpspi_set_bitrate(fsl_lpspi);
394 fsl_lpspi_set_watermark(fsl_lpspi);
396 if (!fsl_lpspi->is_target)
400 if (fsl_lpspi->config.mode & SPI_CS_HIGH)
401 temp |= CFGR1_PCSPOL;
402 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
404 temp = readl(fsl_lpspi->base + IMX7ULP_CR);
405 temp |= CR_RRF | CR_RTF | CR_MEN;
406 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
409 if (fsl_lpspi->usedma)
410 temp = DER_TDDE | DER_RDDE;
411 writel(temp, fsl_lpspi->base + IMX7ULP_DER);
416 static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
417 struct spi_device *spi,
418 struct spi_transfer *t)
420 struct fsl_lpspi_data *fsl_lpspi =
421 spi_controller_get_devdata(spi->controller);
426 fsl_lpspi->config.mode = spi->mode;
427 fsl_lpspi->config.bpw = t->bits_per_word;
428 fsl_lpspi->config.speed_hz = t->speed_hz;
429 if (fsl_lpspi->is_only_cs1)
430 fsl_lpspi->config.chip_select = 1;
432 fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
434 if (!fsl_lpspi->config.speed_hz)
435 fsl_lpspi->config.speed_hz = spi->max_speed_hz;
436 if (!fsl_lpspi->config.bpw)
437 fsl_lpspi->config.bpw = spi->bits_per_word;
439 /* Initialize the functions for transfer */
440 if (fsl_lpspi->config.bpw <= 8) {
441 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
442 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
443 } else if (fsl_lpspi->config.bpw <= 16) {
444 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
445 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
447 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
448 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
451 if (t->len <= fsl_lpspi->txfifosize)
452 fsl_lpspi->watermark = t->len;
454 fsl_lpspi->watermark = fsl_lpspi->txfifosize;
456 if (fsl_lpspi_can_dma(controller, spi, t))
457 fsl_lpspi->usedma = true;
459 fsl_lpspi->usedma = false;
461 return fsl_lpspi_config(fsl_lpspi);
464 static int fsl_lpspi_target_abort(struct spi_controller *controller)
466 struct fsl_lpspi_data *fsl_lpspi =
467 spi_controller_get_devdata(controller);
469 fsl_lpspi->target_aborted = true;
470 if (!fsl_lpspi->usedma)
471 complete(&fsl_lpspi->xfer_done);
473 complete(&fsl_lpspi->dma_tx_completion);
474 complete(&fsl_lpspi->dma_rx_completion);
480 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
482 struct fsl_lpspi_data *fsl_lpspi =
483 spi_controller_get_devdata(controller);
485 if (fsl_lpspi->is_target) {
486 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
487 fsl_lpspi->target_aborted) {
488 dev_dbg(fsl_lpspi->dev, "interrupted\n");
492 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
493 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
501 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
505 if (!fsl_lpspi->usedma) {
506 /* Disable all interrupt */
507 fsl_lpspi_intctrl(fsl_lpspi, 0);
510 /* W1C for all flags in SR */
512 writel(temp, fsl_lpspi->base + IMX7ULP_SR);
514 /* Clear FIFO and disable module */
515 temp = CR_RRF | CR_RTF;
516 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
521 static void fsl_lpspi_dma_rx_callback(void *cookie)
523 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
525 complete(&fsl_lpspi->dma_rx_completion);
528 static void fsl_lpspi_dma_tx_callback(void *cookie)
530 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
532 complete(&fsl_lpspi->dma_tx_completion);
535 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
538 unsigned long timeout = 0;
540 /* Time with actual data transfer and CS change delay related to HW */
541 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
543 /* Add extra second for scheduler related activities */
546 /* Double calculated timeout */
547 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
550 static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
551 struct fsl_lpspi_data *fsl_lpspi,
552 struct spi_transfer *transfer)
554 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
555 unsigned long transfer_timeout;
556 unsigned long timeout;
557 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
560 ret = fsl_lpspi_dma_configure(controller);
564 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
565 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
566 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
570 desc_rx->callback = fsl_lpspi_dma_rx_callback;
571 desc_rx->callback_param = (void *)fsl_lpspi;
572 dmaengine_submit(desc_rx);
573 reinit_completion(&fsl_lpspi->dma_rx_completion);
574 dma_async_issue_pending(controller->dma_rx);
576 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
577 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
578 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
580 dmaengine_terminate_all(controller->dma_tx);
584 desc_tx->callback = fsl_lpspi_dma_tx_callback;
585 desc_tx->callback_param = (void *)fsl_lpspi;
586 dmaengine_submit(desc_tx);
587 reinit_completion(&fsl_lpspi->dma_tx_completion);
588 dma_async_issue_pending(controller->dma_tx);
590 fsl_lpspi->target_aborted = false;
592 if (!fsl_lpspi->is_target) {
593 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
596 /* Wait eDMA to finish the data transfer.*/
597 timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
600 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
601 dmaengine_terminate_all(controller->dma_tx);
602 dmaengine_terminate_all(controller->dma_rx);
603 fsl_lpspi_reset(fsl_lpspi);
607 timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
610 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
611 dmaengine_terminate_all(controller->dma_tx);
612 dmaengine_terminate_all(controller->dma_rx);
613 fsl_lpspi_reset(fsl_lpspi);
617 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
618 fsl_lpspi->target_aborted) {
619 dev_dbg(fsl_lpspi->dev,
620 "I/O Error in DMA TX interrupted\n");
621 dmaengine_terminate_all(controller->dma_tx);
622 dmaengine_terminate_all(controller->dma_rx);
623 fsl_lpspi_reset(fsl_lpspi);
627 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
628 fsl_lpspi->target_aborted) {
629 dev_dbg(fsl_lpspi->dev,
630 "I/O Error in DMA RX interrupted\n");
631 dmaengine_terminate_all(controller->dma_tx);
632 dmaengine_terminate_all(controller->dma_rx);
633 fsl_lpspi_reset(fsl_lpspi);
638 fsl_lpspi_reset(fsl_lpspi);
643 static void fsl_lpspi_dma_exit(struct spi_controller *controller)
645 if (controller->dma_rx) {
646 dma_release_channel(controller->dma_rx);
647 controller->dma_rx = NULL;
650 if (controller->dma_tx) {
651 dma_release_channel(controller->dma_tx);
652 controller->dma_tx = NULL;
656 static int fsl_lpspi_dma_init(struct device *dev,
657 struct fsl_lpspi_data *fsl_lpspi,
658 struct spi_controller *controller)
662 /* Prepare for TX DMA: */
663 controller->dma_tx = dma_request_chan(dev, "tx");
664 if (IS_ERR(controller->dma_tx)) {
665 ret = PTR_ERR(controller->dma_tx);
666 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
667 controller->dma_tx = NULL;
671 /* Prepare for RX DMA: */
672 controller->dma_rx = dma_request_chan(dev, "rx");
673 if (IS_ERR(controller->dma_rx)) {
674 ret = PTR_ERR(controller->dma_rx);
675 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
676 controller->dma_rx = NULL;
680 init_completion(&fsl_lpspi->dma_rx_completion);
681 init_completion(&fsl_lpspi->dma_tx_completion);
682 controller->can_dma = fsl_lpspi_can_dma;
683 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
687 fsl_lpspi_dma_exit(controller);
691 static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
692 struct spi_transfer *t)
694 struct fsl_lpspi_data *fsl_lpspi =
695 spi_controller_get_devdata(controller);
698 fsl_lpspi->tx_buf = t->tx_buf;
699 fsl_lpspi->rx_buf = t->rx_buf;
700 fsl_lpspi->remain = t->len;
702 reinit_completion(&fsl_lpspi->xfer_done);
703 fsl_lpspi->target_aborted = false;
705 fsl_lpspi_write_tx_fifo(fsl_lpspi);
707 ret = fsl_lpspi_wait_for_completion(controller);
711 fsl_lpspi_reset(fsl_lpspi);
716 static int fsl_lpspi_transfer_one(struct spi_controller *controller,
717 struct spi_device *spi,
718 struct spi_transfer *t)
720 struct fsl_lpspi_data *fsl_lpspi =
721 spi_controller_get_devdata(controller);
724 fsl_lpspi->is_first_byte = true;
725 ret = fsl_lpspi_setup_transfer(controller, spi, t);
729 fsl_lpspi_set_cmd(fsl_lpspi);
730 fsl_lpspi->is_first_byte = false;
732 if (fsl_lpspi->usedma)
733 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
735 ret = fsl_lpspi_pio_transfer(controller, t);
742 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
744 u32 temp_SR, temp_IER;
745 struct fsl_lpspi_data *fsl_lpspi = dev_id;
747 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
748 fsl_lpspi_intctrl(fsl_lpspi, 0);
749 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
751 fsl_lpspi_read_rx_fifo(fsl_lpspi);
753 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
754 fsl_lpspi_write_tx_fifo(fsl_lpspi);
758 if (temp_SR & SR_MBF ||
759 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
760 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
761 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
765 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
766 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
767 complete(&fsl_lpspi->xfer_done);
775 static int fsl_lpspi_runtime_resume(struct device *dev)
777 struct spi_controller *controller = dev_get_drvdata(dev);
778 struct fsl_lpspi_data *fsl_lpspi;
781 fsl_lpspi = spi_controller_get_devdata(controller);
783 ret = clk_prepare_enable(fsl_lpspi->clk_per);
787 ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
789 clk_disable_unprepare(fsl_lpspi->clk_per);
796 static int fsl_lpspi_runtime_suspend(struct device *dev)
798 struct spi_controller *controller = dev_get_drvdata(dev);
799 struct fsl_lpspi_data *fsl_lpspi;
801 fsl_lpspi = spi_controller_get_devdata(controller);
803 clk_disable_unprepare(fsl_lpspi->clk_per);
804 clk_disable_unprepare(fsl_lpspi->clk_ipg);
810 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
812 struct device *dev = fsl_lpspi->dev;
814 pm_runtime_enable(dev);
815 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
816 pm_runtime_use_autosuspend(dev);
821 static int fsl_lpspi_probe(struct platform_device *pdev)
823 struct fsl_lpspi_data *fsl_lpspi;
824 struct spi_controller *controller;
825 struct resource *res;
831 is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
833 controller = spi_alloc_target(&pdev->dev,
834 sizeof(struct fsl_lpspi_data));
836 controller = spi_alloc_host(&pdev->dev,
837 sizeof(struct fsl_lpspi_data));
842 platform_set_drvdata(pdev, controller);
844 fsl_lpspi = spi_controller_get_devdata(controller);
845 fsl_lpspi->dev = &pdev->dev;
846 fsl_lpspi->is_target = is_target;
847 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
848 "fsl,spi-only-use-cs1-sel");
850 init_completion(&fsl_lpspi->xfer_done);
852 fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
853 if (IS_ERR(fsl_lpspi->base)) {
854 ret = PTR_ERR(fsl_lpspi->base);
855 goto out_controller_put;
857 fsl_lpspi->base_phys = res->start;
859 irq = platform_get_irq(pdev, 0);
862 goto out_controller_put;
865 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
866 dev_name(&pdev->dev), fsl_lpspi);
868 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
869 goto out_controller_put;
872 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
873 if (IS_ERR(fsl_lpspi->clk_per)) {
874 ret = PTR_ERR(fsl_lpspi->clk_per);
875 goto out_controller_put;
878 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
879 if (IS_ERR(fsl_lpspi->clk_ipg)) {
880 ret = PTR_ERR(fsl_lpspi->clk_ipg);
881 goto out_controller_put;
884 /* enable the clock */
885 ret = fsl_lpspi_init_rpm(fsl_lpspi);
887 goto out_controller_put;
889 ret = pm_runtime_get_sync(fsl_lpspi->dev);
891 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
895 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
896 fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
897 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
898 if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
900 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
901 num_cs = ((temp >> 16) & 0xf);
906 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
907 controller->transfer_one = fsl_lpspi_transfer_one;
908 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
909 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
910 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
911 controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
912 controller->dev.of_node = pdev->dev.of_node;
913 controller->bus_num = pdev->id;
914 controller->num_chipselect = num_cs;
915 controller->target_abort = fsl_lpspi_target_abort;
916 if (!fsl_lpspi->is_target)
917 controller->use_gpio_descriptors = true;
919 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
920 if (ret == -EPROBE_DEFER)
923 dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
926 * disable LPSPI module IRQ when enable DMA mode successfully,
927 * to prevent the unexpected LPSPI module IRQ events.
931 ret = devm_spi_register_controller(&pdev->dev, controller);
933 dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
937 pm_runtime_mark_last_busy(fsl_lpspi->dev);
938 pm_runtime_put_autosuspend(fsl_lpspi->dev);
943 fsl_lpspi_dma_exit(controller);
945 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
946 pm_runtime_put_sync(fsl_lpspi->dev);
947 pm_runtime_disable(fsl_lpspi->dev);
949 spi_controller_put(controller);
954 static void fsl_lpspi_remove(struct platform_device *pdev)
956 struct spi_controller *controller = platform_get_drvdata(pdev);
957 struct fsl_lpspi_data *fsl_lpspi =
958 spi_controller_get_devdata(controller);
960 fsl_lpspi_dma_exit(controller);
962 pm_runtime_disable(fsl_lpspi->dev);
965 static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
967 pinctrl_pm_select_sleep_state(dev);
968 return pm_runtime_force_suspend(dev);
971 static int __maybe_unused fsl_lpspi_resume(struct device *dev)
975 ret = pm_runtime_force_resume(dev);
977 dev_err(dev, "Error in resume: %d\n", ret);
981 pinctrl_pm_select_default_state(dev);
986 static const struct dev_pm_ops fsl_lpspi_pm_ops = {
987 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
988 fsl_lpspi_runtime_resume, NULL)
989 SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
992 static struct platform_driver fsl_lpspi_driver = {
995 .of_match_table = fsl_lpspi_dt_ids,
996 .pm = &fsl_lpspi_pm_ops,
998 .probe = fsl_lpspi_probe,
999 .remove_new = fsl_lpspi_remove,
1001 module_platform_driver(fsl_lpspi_driver);
1003 MODULE_DESCRIPTION("LPSPI Controller driver");
1004 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1005 MODULE_LICENSE("GPL");