1 // SPDX-License-Identifier: GPL-2.0-only
3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
5 * Copyright (c) 2009, Intel Corporation.
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/highmem.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
18 #ifdef CONFIG_DEBUG_FS
19 #include <linux/debugfs.h>
22 /* Slave spi_dev related */
24 u8 tmode; /* TR/TO/RO/EEPROM */
25 u8 type; /* SPI/SSP/MicroWire */
27 u8 poll_mode; /* 1 means use poll mode */
29 u16 clk_div; /* baud rate divider */
30 u32 speed_hz; /* baud rate */
31 void (*cs_control)(u32 command);
34 #ifdef CONFIG_DEBUG_FS
35 #define SPI_REGS_BUFSIZE 1024
36 static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos)
39 struct dw_spi *dws = file->private_data;
44 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
48 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
49 "%s registers:\n", dev_name(&dws->master->dev));
50 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
51 "=================================\n");
52 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
53 "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
54 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
55 "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
56 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
57 "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
58 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
59 "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
60 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
61 "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
62 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
63 "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
64 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
65 "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
66 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
67 "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
68 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
69 "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
70 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
71 "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
72 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
73 "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
74 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
75 "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
76 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
77 "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
78 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
79 "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
80 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
81 "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
82 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
83 "=================================\n");
85 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
90 static const struct file_operations dw_spi_regs_ops = {
93 .read = dw_spi_show_regs,
94 .llseek = default_llseek,
97 static int dw_spi_debugfs_init(struct dw_spi *dws)
101 snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
102 dws->debugfs = debugfs_create_dir(name, NULL);
106 debugfs_create_file("registers", S_IFREG | S_IRUGO,
107 dws->debugfs, (void *)dws, &dw_spi_regs_ops);
111 static void dw_spi_debugfs_remove(struct dw_spi *dws)
113 debugfs_remove_recursive(dws->debugfs);
117 static inline int dw_spi_debugfs_init(struct dw_spi *dws)
122 static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
125 #endif /* CONFIG_DEBUG_FS */
127 void dw_spi_set_cs(struct spi_device *spi, bool enable)
129 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
130 struct chip_data *chip = spi_get_ctldata(spi);
132 if (chip && chip->cs_control)
133 chip->cs_control(enable);
136 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
137 else if (dws->cs_override)
138 dw_writel(dws, DW_SPI_SER, 0);
140 EXPORT_SYMBOL_GPL(dw_spi_set_cs);
142 /* Return the max entries we can fill into tx fifo */
143 static inline u32 tx_max(struct dw_spi *dws)
145 u32 tx_left, tx_room, rxtx_gap;
147 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
148 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
151 * Another concern is about the tx/rx mismatch, we
152 * though to use (dws->fifo_len - rxflr - txflr) as
153 * one maximum value for tx, but it doesn't cover the
154 * data which is out of tx/rx fifo and inside the
155 * shift registers. So a control from sw point of
158 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
161 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
164 /* Return the max entries we should read out of rx fifo */
165 static inline u32 rx_max(struct dw_spi *dws)
167 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
169 return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
172 static void dw_writer(struct dw_spi *dws)
174 u32 max = tx_max(dws);
178 /* Set the tx word if the transfer's original "tx" is not null */
179 if (dws->tx_end - dws->len) {
180 if (dws->n_bytes == 1)
181 txw = *(u8 *)(dws->tx);
183 txw = *(u16 *)(dws->tx);
185 dw_write_io_reg(dws, DW_SPI_DR, txw);
186 dws->tx += dws->n_bytes;
190 static void dw_reader(struct dw_spi *dws)
192 u32 max = rx_max(dws);
196 rxw = dw_read_io_reg(dws, DW_SPI_DR);
197 /* Care rx only if the transfer's original "rx" is not null */
198 if (dws->rx_end - dws->len) {
199 if (dws->n_bytes == 1)
200 *(u8 *)(dws->rx) = rxw;
202 *(u16 *)(dws->rx) = rxw;
204 dws->rx += dws->n_bytes;
208 static void int_error_stop(struct dw_spi *dws, const char *msg)
212 dev_err(&dws->master->dev, "%s\n", msg);
213 dws->master->cur_msg->status = -EIO;
214 spi_finalize_current_transfer(dws->master);
217 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
219 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
222 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
223 dw_readl(dws, DW_SPI_ICR);
224 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
229 if (dws->rx_end == dws->rx) {
230 spi_mask_intr(dws, SPI_INT_TXEI);
231 spi_finalize_current_transfer(dws->master);
234 if (irq_status & SPI_INT_TXEI) {
235 spi_mask_intr(dws, SPI_INT_TXEI);
237 /* Enable TX irq always, it will be disabled when RX finished */
238 spi_umask_intr(dws, SPI_INT_TXEI);
244 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
246 struct spi_controller *master = dev_id;
247 struct dw_spi *dws = spi_controller_get_devdata(master);
248 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
253 if (!master->cur_msg) {
254 spi_mask_intr(dws, SPI_INT_TXEI);
258 return dws->transfer_handler(dws);
261 /* Must be called inside pump_transfers() */
262 static int poll_transfer(struct dw_spi *dws)
268 } while (dws->rx_end > dws->rx);
273 static int dw_spi_transfer_one(struct spi_controller *master,
274 struct spi_device *spi, struct spi_transfer *transfer)
276 struct dw_spi *dws = spi_controller_get_devdata(master);
277 struct chip_data *chip = spi_get_ctldata(spi);
285 dws->tx = (void *)transfer->tx_buf;
286 dws->tx_end = dws->tx + transfer->len;
287 dws->rx = transfer->rx_buf;
288 dws->rx_end = dws->rx + transfer->len;
289 dws->len = transfer->len;
291 spi_enable_chip(dws, 0);
293 /* Handle per transfer options for bpw and speed */
294 if (transfer->speed_hz != dws->current_freq) {
295 if (transfer->speed_hz != chip->speed_hz) {
296 /* clk_div doesn't support odd number */
297 chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
298 chip->speed_hz = transfer->speed_hz;
300 dws->current_freq = transfer->speed_hz;
301 spi_set_clk(dws, chip->clk_div);
304 dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
305 dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
307 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
308 cr0 = (transfer->bits_per_word - 1)
309 | (chip->type << SPI_FRF_OFFSET)
310 | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
311 (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
312 (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
313 | (chip->tmode << SPI_TMOD_OFFSET);
316 * Adjust transfer mode if necessary. Requires platform dependent
317 * chipselect mechanism.
319 if (chip->cs_control) {
320 if (dws->rx && dws->tx)
321 chip->tmode = SPI_TMOD_TR;
323 chip->tmode = SPI_TMOD_RO;
325 chip->tmode = SPI_TMOD_TO;
327 cr0 &= ~SPI_TMOD_MASK;
328 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
331 dw_writel(dws, DW_SPI_CTRL0, cr0);
333 /* Check if current transfer is a DMA transaction */
334 if (master->can_dma && master->can_dma(master, spi, transfer))
335 dws->dma_mapped = master->cur_msg_mapped;
337 /* For poll mode just disable all interrupts */
338 spi_mask_intr(dws, 0xff);
342 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
344 if (dws->dma_mapped) {
345 ret = dws->dma_ops->dma_setup(dws, transfer);
347 spi_enable_chip(dws, 1);
350 } else if (!chip->poll_mode) {
351 txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
352 dw_writel(dws, DW_SPI_TXFLTR, txlevel);
354 /* Set the interrupt mask */
355 imask |= SPI_INT_TXEI | SPI_INT_TXOI |
356 SPI_INT_RXUI | SPI_INT_RXOI;
357 spi_umask_intr(dws, imask);
359 dws->transfer_handler = interrupt_transfer;
362 spi_enable_chip(dws, 1);
364 if (dws->dma_mapped) {
365 ret = dws->dma_ops->dma_transfer(dws, transfer);
371 return poll_transfer(dws);
376 static void dw_spi_handle_err(struct spi_controller *master,
377 struct spi_message *msg)
379 struct dw_spi *dws = spi_controller_get_devdata(master);
382 dws->dma_ops->dma_stop(dws);
387 /* This may be called twice for each spi dev */
388 static int dw_spi_setup(struct spi_device *spi)
390 struct dw_spi_chip *chip_info = NULL;
391 struct chip_data *chip;
393 /* Only alloc on first setup */
394 chip = spi_get_ctldata(spi);
396 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
399 spi_set_ctldata(spi, chip);
403 * Protocol drivers may change the chip settings, so...
404 * if chip_info exists, use it
406 chip_info = spi->controller_data;
408 /* chip_info doesn't always exist */
410 if (chip_info->cs_control)
411 chip->cs_control = chip_info->cs_control;
413 chip->poll_mode = chip_info->poll_mode;
414 chip->type = chip_info->type;
417 chip->tmode = SPI_TMOD_TR;
422 static void dw_spi_cleanup(struct spi_device *spi)
424 struct chip_data *chip = spi_get_ctldata(spi);
427 spi_set_ctldata(spi, NULL);
430 /* Restart the controller, disable all interrupts, clean rx fifo */
431 static void spi_hw_init(struct device *dev, struct dw_spi *dws)
436 * Try to detect the FIFO depth if not set by interface driver,
437 * the depth could be from 2 to 256 from HW spec
439 if (!dws->fifo_len) {
442 for (fifo = 1; fifo < 256; fifo++) {
443 dw_writel(dws, DW_SPI_TXFLTR, fifo);
444 if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
447 dw_writel(dws, DW_SPI_TXFLTR, 0);
449 dws->fifo_len = (fifo == 1) ? 0 : fifo;
450 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
453 /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
454 if (dws->cs_override)
455 dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
458 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
460 struct spi_controller *master;
465 master = spi_alloc_master(dev, 0);
469 dws->master = master;
470 dws->type = SSI_MOTO_SPI;
472 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
474 spi_controller_set_devdata(master, dws);
476 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
479 dev_err(dev, "can not get IRQ\n");
480 goto err_free_master;
483 master->use_gpio_descriptors = true;
484 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
485 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
486 master->bus_num = dws->bus_num;
487 master->num_chipselect = dws->num_cs;
488 master->setup = dw_spi_setup;
489 master->cleanup = dw_spi_cleanup;
490 master->set_cs = dw_spi_set_cs;
491 master->transfer_one = dw_spi_transfer_one;
492 master->handle_err = dw_spi_handle_err;
493 master->max_speed_hz = dws->max_freq;
494 master->dev.of_node = dev->of_node;
495 master->dev.fwnode = dev->fwnode;
496 master->flags = SPI_MASTER_GPIO_SS;
497 master->auto_runtime_pm = true;
500 master->set_cs = dws->set_cs;
503 spi_hw_init(dev, dws);
505 if (dws->dma_ops && dws->dma_ops->dma_init) {
506 ret = dws->dma_ops->dma_init(dws);
508 dev_warn(dev, "DMA init failed\n");
511 master->can_dma = dws->dma_ops->can_dma;
515 ret = devm_spi_register_controller(dev, master);
517 dev_err(&master->dev, "problem registering spi master\n");
521 dw_spi_debugfs_init(dws);
525 if (dws->dma_ops && dws->dma_ops->dma_exit)
526 dws->dma_ops->dma_exit(dws);
527 spi_enable_chip(dws, 0);
528 free_irq(dws->irq, master);
530 spi_controller_put(master);
533 EXPORT_SYMBOL_GPL(dw_spi_add_host);
535 void dw_spi_remove_host(struct dw_spi *dws)
537 dw_spi_debugfs_remove(dws);
539 if (dws->dma_ops && dws->dma_ops->dma_exit)
540 dws->dma_ops->dma_exit(dws);
542 spi_shutdown_chip(dws);
544 free_irq(dws->irq, dws->master);
546 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
548 int dw_spi_suspend_host(struct dw_spi *dws)
552 ret = spi_controller_suspend(dws->master);
556 spi_shutdown_chip(dws);
559 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
561 int dw_spi_resume_host(struct dw_spi *dws)
563 spi_hw_init(&dws->master->dev, dws);
564 return spi_controller_resume(dws->master);
566 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
568 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
569 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
570 MODULE_LICENSE("GPL v2");