1 // SPDX-License-Identifier: GPL-2.0-only
3 * Special handling for DW DMA core
5 * Copyright (c) 2009, 2014 Intel Corporation.
8 #include <linux/completion.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/irqreturn.h>
12 #include <linux/jiffies.h>
13 #include <linux/pci.h>
14 #include <linux/platform_data/dma-dw.h>
15 #include <linux/spi/spi.h>
16 #include <linux/types.h>
21 #define RX_BURST_LEVEL 16
23 #define TX_BURST_LEVEL 16
25 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
27 struct dw_dma_slave *s = param;
29 if (s->dma_dev != chan->device->dev)
36 static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
38 struct dma_slave_caps caps;
39 u32 max_burst, def_burst;
42 def_burst = dws->fifo_len / 2;
44 ret = dma_get_slave_caps(dws->rxchan, &caps);
45 if (!ret && caps.max_burst)
46 max_burst = caps.max_burst;
48 max_burst = RX_BURST_LEVEL;
50 dws->rxburst = min(max_burst, def_burst);
51 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
53 ret = dma_get_slave_caps(dws->txchan, &caps);
54 if (!ret && caps.max_burst)
55 max_burst = caps.max_burst;
57 max_burst = TX_BURST_LEVEL;
60 * Having a Rx DMA channel serviced with higher priority than a Tx DMA
61 * channel might not be enough to provide a well balanced DMA-based
62 * SPI transfer interface. There might still be moments when the Tx DMA
63 * channel is occasionally handled faster than the Rx DMA channel.
64 * That in its turn will eventually cause the SPI Rx FIFO overflow if
65 * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
66 * cleared by the Rx DMA channel. In order to fix the problem the Tx
67 * DMA activity is intentionally slowed down by limiting the SPI Tx
68 * FIFO depth with a value twice bigger than the Tx burst length.
70 dws->txburst = min(max_burst, def_burst);
71 dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
74 static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
76 struct dma_slave_caps tx = {0}, rx = {0};
78 dma_get_slave_caps(dws->txchan, &tx);
79 dma_get_slave_caps(dws->rxchan, &rx);
81 if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
82 dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
83 else if (tx.max_sg_burst > 0)
84 dws->dma_sg_burst = tx.max_sg_burst;
85 else if (rx.max_sg_burst > 0)
86 dws->dma_sg_burst = rx.max_sg_burst;
88 dws->dma_sg_burst = 0;
91 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
93 struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
94 struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
95 struct pci_dev *dma_dev;
99 * Get pci device for DMA controller, currently it could only
100 * be the DMA controller of Medfield
102 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
107 dma_cap_set(DMA_SLAVE, mask);
109 /* 1. Init rx channel */
110 rx->dma_dev = &dma_dev->dev;
111 dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
115 /* 2. Init tx channel */
116 tx->dma_dev = &dma_dev->dev;
117 dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
121 dws->master->dma_rx = dws->rxchan;
122 dws->master->dma_tx = dws->txchan;
124 init_completion(&dws->dma_completion);
126 dw_spi_dma_maxburst_init(dws);
128 dw_spi_dma_sg_burst_init(dws);
130 pci_dev_put(dma_dev);
135 dma_release_channel(dws->rxchan);
138 pci_dev_put(dma_dev);
142 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
144 dws->rxchan = dma_request_slave_channel(dev, "rx");
148 dws->txchan = dma_request_slave_channel(dev, "tx");
150 dma_release_channel(dws->rxchan);
155 dws->master->dma_rx = dws->rxchan;
156 dws->master->dma_tx = dws->txchan;
158 init_completion(&dws->dma_completion);
160 dw_spi_dma_maxburst_init(dws);
162 dw_spi_dma_sg_burst_init(dws);
167 static void dw_spi_dma_exit(struct dw_spi *dws)
170 dmaengine_terminate_sync(dws->txchan);
171 dma_release_channel(dws->txchan);
175 dmaengine_terminate_sync(dws->rxchan);
176 dma_release_channel(dws->rxchan);
180 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
182 dw_spi_check_status(dws, false);
184 complete(&dws->dma_completion);
189 static bool dw_spi_can_dma(struct spi_controller *master,
190 struct spi_device *spi, struct spi_transfer *xfer)
192 struct dw_spi *dws = spi_controller_get_devdata(master);
194 return xfer->len > dws->fifo_len;
197 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
200 return DMA_SLAVE_BUSWIDTH_1_BYTE;
201 else if (n_bytes == 2)
202 return DMA_SLAVE_BUSWIDTH_2_BYTES;
204 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
207 static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
209 unsigned long long ms;
211 ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
218 ms = wait_for_completion_timeout(&dws->dma_completion,
219 msecs_to_jiffies(ms));
222 dev_err(&dws->master->cur_msg->spi->dev,
223 "DMA transaction timed out\n");
230 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
232 return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT);
235 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
236 struct spi_transfer *xfer)
238 int retry = SPI_WAIT_RETRIES;
239 struct spi_delay delay;
242 nents = dw_readl(dws, DW_SPI_TXFLR);
243 delay.unit = SPI_DELAY_UNIT_SCK;
244 delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
246 while (dw_spi_dma_tx_busy(dws) && retry--)
247 spi_delay_exec(&delay, xfer);
250 dev_err(&dws->master->dev, "Tx hanged up\n");
258 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
259 * channel will clear a corresponding bit.
261 static void dw_spi_dma_tx_done(void *arg)
263 struct dw_spi *dws = arg;
265 clear_bit(TX_BUSY, &dws->dma_chan_busy);
266 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
269 complete(&dws->dma_completion);
272 static int dw_spi_dma_config_tx(struct dw_spi *dws)
274 struct dma_slave_config txconf;
276 memset(&txconf, 0, sizeof(txconf));
277 txconf.direction = DMA_MEM_TO_DEV;
278 txconf.dst_addr = dws->dma_addr;
279 txconf.dst_maxburst = dws->txburst;
280 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
281 txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
282 txconf.device_fc = false;
284 return dmaengine_slave_config(dws->txchan, &txconf);
287 static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
290 struct dma_async_tx_descriptor *txdesc;
294 txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
296 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
300 txdesc->callback = dw_spi_dma_tx_done;
301 txdesc->callback_param = dws;
303 cookie = dmaengine_submit(txdesc);
304 ret = dma_submit_error(cookie);
306 dmaengine_terminate_sync(dws->txchan);
310 set_bit(TX_BUSY, &dws->dma_chan_busy);
315 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
317 return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT);
320 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
322 int retry = SPI_WAIT_RETRIES;
323 struct spi_delay delay;
324 unsigned long ns, us;
328 * It's unlikely that DMA engine is still doing the data fetching, but
329 * if it's let's give it some reasonable time. The timeout calculation
330 * is based on the synchronous APB/SSI reference clock rate, on a
331 * number of data entries left in the Rx FIFO, times a number of clock
332 * periods normally needed for a single APB read/write transaction
333 * without PREADY signal utilized (which is true for the DW APB SSI
336 nents = dw_readl(dws, DW_SPI_RXFLR);
337 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
338 if (ns <= NSEC_PER_USEC) {
339 delay.unit = SPI_DELAY_UNIT_NSECS;
342 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
343 delay.unit = SPI_DELAY_UNIT_USECS;
344 delay.value = clamp_val(us, 0, USHRT_MAX);
347 while (dw_spi_dma_rx_busy(dws) && retry--)
348 spi_delay_exec(&delay, NULL);
351 dev_err(&dws->master->dev, "Rx hanged up\n");
359 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
360 * channel will clear a corresponding bit.
362 static void dw_spi_dma_rx_done(void *arg)
364 struct dw_spi *dws = arg;
366 clear_bit(RX_BUSY, &dws->dma_chan_busy);
367 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
370 complete(&dws->dma_completion);
373 static int dw_spi_dma_config_rx(struct dw_spi *dws)
375 struct dma_slave_config rxconf;
377 memset(&rxconf, 0, sizeof(rxconf));
378 rxconf.direction = DMA_DEV_TO_MEM;
379 rxconf.src_addr = dws->dma_addr;
380 rxconf.src_maxburst = dws->rxburst;
381 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
382 rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
383 rxconf.device_fc = false;
385 return dmaengine_slave_config(dws->rxchan, &rxconf);
388 static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
391 struct dma_async_tx_descriptor *rxdesc;
395 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
397 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
401 rxdesc->callback = dw_spi_dma_rx_done;
402 rxdesc->callback_param = dws;
404 cookie = dmaengine_submit(rxdesc);
405 ret = dma_submit_error(cookie);
407 dmaengine_terminate_sync(dws->rxchan);
411 set_bit(RX_BUSY, &dws->dma_chan_busy);
416 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
424 /* Setup DMA channels */
425 ret = dw_spi_dma_config_tx(dws);
430 ret = dw_spi_dma_config_rx(dws);
435 /* Set the DMA handshaking interface */
436 dma_ctrl = SPI_DMA_TDMAE;
438 dma_ctrl |= SPI_DMA_RDMAE;
439 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
441 /* Set the interrupt mask */
444 imr |= SPI_INT_RXUI | SPI_INT_RXOI;
445 spi_umask_intr(dws, imr);
447 reinit_completion(&dws->dma_completion);
449 dws->transfer_handler = dw_spi_dma_transfer_handler;
454 static int dw_spi_dma_transfer_all(struct dw_spi *dws,
455 struct spi_transfer *xfer)
459 /* Submit the DMA Tx transfer */
460 ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
464 /* Submit the DMA Rx transfer if required */
466 ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
471 /* rx must be started before tx due to spi instinct */
472 dma_async_issue_pending(dws->rxchan);
475 dma_async_issue_pending(dws->txchan);
477 ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
480 dw_writel(dws, DW_SPI_DMACR, 0);
486 * In case if at least one of the requested DMA channels doesn't support the
487 * hardware accelerated SG list entries traverse, the DMA driver will most
488 * likely work that around by performing the IRQ-based SG list entries
489 * resubmission. That might and will cause a problem if the DMA Tx channel is
490 * recharged and re-executed before the Rx DMA channel. Due to
491 * non-deterministic IRQ-handler execution latency the DMA Tx channel will
492 * start pushing data to the SPI bus before the Rx DMA channel is even
493 * reinitialized with the next inbound SG list entry. By doing so the DMA Tx
494 * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while
495 * the DMA Rx channel being recharged and re-executed will eventually be
498 * In order to solve the problem we have to feed the DMA engine with SG list
499 * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs
500 * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg
501 * and rx_sg lists may have different number of entries of different lengths
502 * (though total length should match) let's virtually split the SG-lists to the
503 * set of DMA transfers, which length is a minimum of the ordered SG-entries
504 * lengths. An ASCII-sketch of the implemented algo is following:
507 * tx_sg list: |___|____|__|
508 * rx_sg list: |_|____|____|
509 * DMA transfers: |_|_|__|_|__|
511 * Note in order to have this workaround solving the denoted problem the DMA
512 * engine driver should properly initialize the max_sg_burst capability and set
513 * the DMA device max segment size parameter with maximum data block size the
514 * DMA engine supports.
517 static int dw_spi_dma_transfer_one(struct dw_spi *dws,
518 struct spi_transfer *xfer)
520 struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
521 unsigned int tx_len = 0, rx_len = 0;
522 unsigned int base, len;
525 sg_init_table(&tx_tmp, 1);
526 sg_init_table(&rx_tmp, 1);
528 for (base = 0, len = 0; base < xfer->len; base += len) {
529 /* Fetch next Tx DMA data chunk */
531 tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
532 sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
533 tx_len = sg_dma_len(tx_sg);
536 /* Fetch next Rx DMA data chunk */
538 rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
539 sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
540 rx_len = sg_dma_len(rx_sg);
543 len = min(tx_len, rx_len);
545 sg_dma_len(&tx_tmp) = len;
546 sg_dma_len(&rx_tmp) = len;
548 /* Submit DMA Tx transfer */
549 ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
553 /* Submit DMA Rx transfer */
554 ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
558 /* Rx must be started before Tx due to SPI instinct */
559 dma_async_issue_pending(dws->rxchan);
561 dma_async_issue_pending(dws->txchan);
564 * Here we only need to wait for the DMA transfer to be
565 * finished since SPI controller is kept enabled during the
566 * procedure this loop implements and there is no risk to lose
567 * data left in the Tx/Rx FIFOs.
569 ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
573 reinit_completion(&dws->dma_completion);
575 sg_dma_address(&tx_tmp) += len;
576 sg_dma_address(&rx_tmp) += len;
581 dw_writel(dws, DW_SPI_DMACR, 0);
586 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
591 nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
594 * Execute normal DMA-based transfer (which submits the Rx and Tx SG
595 * lists directly to the DMA engine at once) if either full hardware
596 * accelerated SG list traverse is supported by both channels, or the
597 * Tx-only SPI transfer is requested, or the DMA engine is capable to
598 * handle both SG lists on hardware accelerated basis.
600 if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
601 ret = dw_spi_dma_transfer_all(dws, xfer);
603 ret = dw_spi_dma_transfer_one(dws, xfer);
607 if (dws->master->cur_msg->status == -EINPROGRESS) {
608 ret = dw_spi_dma_wait_tx_done(dws, xfer);
613 if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
614 ret = dw_spi_dma_wait_rx_done(dws);
619 static void dw_spi_dma_stop(struct dw_spi *dws)
621 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
622 dmaengine_terminate_sync(dws->txchan);
623 clear_bit(TX_BUSY, &dws->dma_chan_busy);
625 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
626 dmaengine_terminate_sync(dws->rxchan);
627 clear_bit(RX_BUSY, &dws->dma_chan_busy);
631 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
632 .dma_init = dw_spi_dma_init_mfld,
633 .dma_exit = dw_spi_dma_exit,
634 .dma_setup = dw_spi_dma_setup,
635 .can_dma = dw_spi_can_dma,
636 .dma_transfer = dw_spi_dma_transfer,
637 .dma_stop = dw_spi_dma_stop,
640 void dw_spi_dma_setup_mfld(struct dw_spi *dws)
642 dws->dma_ops = &dw_spi_dma_mfld_ops;
644 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld);
646 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
647 .dma_init = dw_spi_dma_init_generic,
648 .dma_exit = dw_spi_dma_exit,
649 .dma_setup = dw_spi_dma_setup,
650 .can_dma = dw_spi_can_dma,
651 .dma_transfer = dw_spi_dma_transfer,
652 .dma_stop = dw_spi_dma_stop,
655 void dw_spi_dma_setup_generic(struct dw_spi *dws)
657 dws->dma_ops = &dw_spi_dma_generic_ops;
659 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic);