unsigned int count;
unsigned long flags;
- if (!data) {
+ if (host->chan_tx || host->chan_rx) {
+ pr_err("PIO IRQ in DMA mode!\n");
+ return;
+ } else if (!data) {
pr_debug("Spurious PIO IRQ\n");
return;
}
if (host->data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
enable_mmc_irqs(host, TMIO_MASK_READOP);
+ else
+ tasklet_schedule(&host->dma_issue);
} else {
if (!host->chan_tx)
enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
#endif
}
-static void tmio_dma_complete(void *arg)
-{
- struct tmio_mmc_host *host = arg;
-
- dev_dbg(&host->pdev->dev, "Command completed\n");
-
- if (!host->data)
- dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
- else
- enable_mmc_irqs(host, TMIO_STAT_DATAEND);
-}
-
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
goto pio;
}
+ disable_mmc_irqs(host, TMIO_STAT_RXRDY);
+
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_FROM_DEVICE, DMA_CTRL_ACK);
- if (desc) {
- desc->callback = tmio_dma_complete;
- desc->callback_param = host;
+ if (desc)
cookie = dmaengine_submit(desc);
- dma_async_issue_pending(chan);
- }
+
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
goto pio;
}
+ disable_mmc_irqs(host, TMIO_STAT_TXRQ);
+
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
unsigned long flags;
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_TO_DEVICE, DMA_CTRL_ACK);
- if (desc) {
- desc->callback = tmio_dma_complete;
- desc->callback_param = host;
+ if (desc)
cookie = dmaengine_submit(desc);
- }
+
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
static void tmio_issue_tasklet_fn(unsigned long priv)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
- struct dma_chan *chan = host->chan_tx;
+ struct dma_chan *chan = NULL;
+
+ spin_lock_irq(&host->lock);
+
+ if (host && host->data) {
+ if (host->data->flags & MMC_DATA_READ)
+ chan = host->chan_rx;
+ else
+ chan = host->chan_tx;
+ }
+
+ spin_unlock_irq(&host->lock);
- dma_async_issue_pending(chan);
+ enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+
+ if (chan)
+ dma_async_issue_pending(chan);
}
static void tmio_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
- unsigned long flags;
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock_irq(&host->lock);
if (!host->data)
goto out;
tmio_mmc_do_data_irq(host);
out:
- spin_unlock_irqrestore(&host->lock, flags);
+ spin_unlock_irq(&host->lock);
}
/* It might be necessary to make filter MFD specific */