/* RSPI on SH only */
#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
-/* QSPI on R-Car M2 only */
+/* QSPI on R-Car Gen2 only */
#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
wake_up_interruptible(&rspi->wait);
}
-static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
+static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
+ struct sg_table *rx)
{
- struct dma_async_tx_descriptor *desc;
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ u8 irq_mask = 0;
+ unsigned int other_irq = 0;
+ dma_cookie_t cookie;
int ret;
- desc = dmaengine_prep_slave_sg(rspi->master->dma_tx, t->tx_sg.sgl,
- t->tx_sg.nents, DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc)
- return -EIO;
+ /* First prepare and submit the DMA request(s), as this may fail */
+ if (rx) {
+ desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
+ rx->sgl, rx->nents, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ ret = -EAGAIN;
+ goto no_dma_rx;
+ }
+
+ desc_rx->callback = rspi_dma_complete;
+ desc_rx->callback_param = rspi;
+ cookie = dmaengine_submit(desc_rx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_rx;
+ }
+
+ irq_mask |= SPCR_SPRIE;
+ }
+
+ if (tx) {
+ desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
+ tx->sgl, tx->nents, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EAGAIN;
+ goto no_dma_tx;
+ }
+
+ if (rx) {
+ /* No callback */
+ desc_tx->callback = NULL;
+ } else {
+ desc_tx->callback = rspi_dma_complete;
+ desc_tx->callback_param = rspi;
+ }
+ cookie = dmaengine_submit(desc_tx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_tx;
+ }
+
+ irq_mask |= SPCR_SPTIE;
+ }
/*
- * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
+ * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->tx_irq);
+ if (tx)
+ disable_irq(other_irq = rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ disable_irq(rspi->rx_irq);
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
- rspi_enable_irq(rspi, SPCR_SPTIE);
+ rspi_enable_irq(rspi, irq_mask);
rspi->dma_callbacked = 0;
- desc->callback = rspi_dma_complete;
- desc->callback_param = rspi;
- dmaengine_submit(desc);
- dma_async_issue_pending(rspi->master->dma_tx);
+ /* Now start DMA */
+ if (rx)
+ dma_async_issue_pending(rspi->master->dma_rx);
+ if (tx)
+ dma_async_issue_pending(rspi->master->dma_tx);
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked)
ret = 0;
- else if (!ret)
+ else if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
- rspi_disable_irq(rspi, SPCR_SPTIE);
+ if (tx)
+ dmaengine_terminate_all(rspi->master->dma_tx);
+ if (rx)
+ dmaengine_terminate_all(rspi->master->dma_rx);
+ }
+
+ rspi_disable_irq(rspi, irq_mask);
+
+ if (tx)
+ enable_irq(rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ enable_irq(rspi->rx_irq);
- enable_irq(rspi->tx_irq);
+ return ret;
+
+no_dma_tx:
+ if (rx)
+ dmaengine_terminate_all(rspi->master->dma_rx);
+no_dma_rx:
+ if (ret == -EAGAIN) {
+ pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
+ dev_driver_string(&rspi->master->dev),
+ dev_name(&rspi->master->dev));
+ }
return ret;
}
rspi_write8(rspi, 0, QSPI_SPBFCR);
}
-static int rspi_send_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
-{
- struct dma_async_tx_descriptor *desc_tx, *desc_rx;
- int ret;
-
- /* prepare transmit transfer */
- desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, t->tx_sg.sgl,
- t->tx_sg.nents, DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_tx)
- return -EIO;
-
- /* prepare receive transfer */
- desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, t->rx_sg.sgl,
- t->rx_sg.nents, DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_rx)
- return -EIO;
-
- rspi_receive_init(rspi);
-
- /*
- * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
- * called. So, this driver disables the IRQ while DMA transfer.
- */
- disable_irq(rspi->tx_irq);
- if (rspi->rx_irq != rspi->tx_irq)
- disable_irq(rspi->rx_irq);
-
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
- rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
- rspi->dma_callbacked = 0;
-
- desc_rx->callback = rspi_dma_complete;
- desc_rx->callback_param = rspi;
- dmaengine_submit(desc_rx);
- dma_async_issue_pending(rspi->master->dma_rx);
-
- desc_tx->callback = NULL; /* No callback */
- dmaengine_submit(desc_tx);
- dma_async_issue_pending(rspi->master->dma_tx);
-
- ret = wait_event_interruptible_timeout(rspi->wait,
- rspi->dma_callbacked, HZ);
- if (ret > 0 && rspi->dma_callbacked)
- ret = 0;
- else if (!ret)
- ret = -ETIMEDOUT;
- rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
-
- enable_irq(rspi->tx_irq);
- if (rspi->rx_irq != rspi->tx_irq)
- enable_irq(rspi->rx_irq);
-
- return ret;
-}
-
static bool __rspi_can_dma(const struct rspi_data *rspi,
const struct spi_transfer *xfer)
{
return __rspi_can_dma(rspi, xfer);
}
-static int rspi_transfer_out_in(struct rspi_data *rspi,
+static int rspi_common_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
- u8 spcr;
int ret;
- spcr = rspi_read8(rspi, RSPI_SPCR);
- if (xfer->rx_buf) {
- rspi_receive_init(rspi);
- spcr &= ~SPCR_TXMD;
- } else {
- spcr |= SPCR_TXMD;
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
+ ret = rspi_dma_transfer(rspi, &xfer->tx_sg,
+ xfer->rx_buf ? &xfer->rx_sg : NULL);
+ if (ret != -EAGAIN)
+ return ret;
}
- rspi_write8(rspi, spcr, RSPI_SPCR);
ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
if (ret < 0)
struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
+ u8 spcr;
- if (!master->can_dma || !__rspi_can_dma(rspi, xfer))
- return rspi_transfer_out_in(rspi, xfer);
-
- if (xfer->rx_buf)
- return rspi_send_receive_dma(rspi, xfer);
- else
- return rspi_send_dma(rspi, xfer);
-}
-
-static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
- struct spi_transfer *xfer)
-{
- int ret;
-
- rspi_rz_receive_init(rspi);
-
- ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
- if (ret < 0)
- return ret;
-
- /* Wait for the last transmission */
- rspi_wait_for_tx_empty(rspi);
+ spcr = rspi_read8(rspi, RSPI_SPCR);
+ if (xfer->rx_buf) {
+ rspi_receive_init(rspi);
+ spcr &= ~SPCR_TXMD;
+ } else {
+ spcr |= SPCR_TXMD;
+ }
+ rspi_write8(rspi, spcr, RSPI_SPCR);
- return 0;
+ return rspi_common_transfer(rspi, xfer);
}
static int rspi_rz_transfer_one(struct spi_master *master,
{
struct rspi_data *rspi = spi_master_get_devdata(master);
- return rspi_rz_transfer_out_in(rspi, xfer);
+ rspi_rz_receive_init(rspi);
+
+ return rspi_common_transfer(rspi, xfer);
}
static int qspi_transfer_out_in(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
- int ret;
-
qspi_receive_init(rspi);
- ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
- if (ret < 0)
- return ret;
-
- /* Wait for the last transmission */
- rspi_wait_for_tx_empty(rspi);
-
- return 0;
+ return rspi_common_transfer(rspi, xfer);
}
static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
{
int ret;
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
if (ret < 0)
return ret;
static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
{
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
}
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- chan = dma_request_channel(mask, shdma_chan_filter,
- (void *)(unsigned long)id);
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)id, dev,
+ dir == DMA_MEM_TO_DEV ? "tx" : "rx");
if (!chan) {
- dev_warn(dev, "dma_request_channel failed\n");
+ dev_warn(dev, "dma_request_slave_channel_compat failed\n");
return NULL;
}
memset(&cfg, 0, sizeof(cfg));
cfg.slave_id = id;
cfg.direction = dir;
- if (dir == DMA_MEM_TO_DEV)
+ if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
- else
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else {
cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
const struct resource *res)
{
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
+ unsigned int dma_tx_id, dma_rx_id;
+
+ if (dev->of_node) {
+ /* In the OF case we will get the slave IDs from the DT */
+ dma_tx_id = 0;
+ dma_rx_id = 0;
+ } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
+ dma_tx_id = rspi_pd->dma_tx_id;
+ dma_rx_id = rspi_pd->dma_rx_id;
+ } else {
+ /* The driver assumes no error. */
+ return 0;
+ }
- if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id)
- return 0; /* The driver assumes no error. */
-
- master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM,
- rspi_pd->dma_rx_id,
+ master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
res->start + RSPI_SPDR);
- if (!master->dma_rx)
+ if (!master->dma_tx)
return -ENODEV;
- master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV,
- rspi_pd->dma_tx_id,
+ master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
res->start + RSPI_SPDR);
- if (!master->dma_tx) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (!master->dma_rx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
return -ENODEV;
}
return 0;
}
-static void rspi_release_dma(struct rspi_data *rspi)
+static void rspi_release_dma(struct spi_master *master)
{
- if (rspi->master->dma_tx)
- dma_release_channel(rspi->master->dma_tx);
- if (rspi->master->dma_rx)
- dma_release_channel(rspi->master->dma_rx);
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
}
static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
- rspi_release_dma(rspi);
+ rspi_release_dma(rspi->master);
pm_runtime_disable(&pdev->dev);
return 0;
irq_handler_t handler, const char *suffix,
void *dev_id)
{
- const char *base = dev_name(dev);
- size_t len = strlen(base) + strlen(suffix) + 2;
- char *name = devm_kzalloc(dev, len, GFP_KERNEL);
+ const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+ dev_name(dev), suffix);
if (!name)
return -ENOMEM;
- snprintf(name, len, "%s:%s", base, suffix);
+
return devm_request_irq(dev, irq, handler, 0, name, dev_id);
}
master->num_chipselect = rspi_pd->num_chipselect;
else
master->num_chipselect = 2; /* default */
- };
+ }
/* ops parameter check */
if (!ops->set_config_register) {
return 0;
error3:
- rspi_release_dma(rspi);
+ rspi_release_dma(master);
error2:
pm_runtime_disable(&pdev->dev);
error1: