return;
}
+ /* initialize pre request cookie */
+ host->next_data.cookie = 1;
+
/* Try to acquire a generic DMA engine slave channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dir = DMA_FROM_DEVICE;
}
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ if (!data->host_cookie)
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
/*
* Use of DMA with scatter-gather is impossible.
dmaengine_terminate_all(host->dma_current);
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct mmci_host_next *next)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
};
- struct mmc_data *data = host->data;
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
int nr_sg;
- host->dma_current = NULL;
+ /* Check if next job is already prepared */
+ if (data->host_cookie && !next &&
+ host->dma_current && host->dma_desc_current)
+ return 0;
+
+ if (!next) {
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ }
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_FROM_DEVICE;
return -EINVAL;
/* If less than or equal to the fifo size, don't bother with DMA */
- if (host->size <= variant->fifosize)
+ if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
device = chan->device;
if (!desc)
goto unmap_exit;
- /* Okay, go for it. */
- host->dma_current = chan;
+ if (next) {
+ next->dma_chan = chan;
+ next->dma_desc = desc;
+ } else {
+ host->dma_current = chan;
+ host->dma_desc_current = desc;
+ }
+
+ return 0;
+ unmap_exit:
+ if (!next)
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ int ret;
+ struct mmc_data *data = host->data;
+
+ ret = mmci_dma_prep_data(host, host->data, NULL);
+ if (ret)
+ return ret;
+
+ /* Okay, go for it. */
dev_vdbg(mmc_dev(host->mmc),
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- dmaengine_submit(desc);
- dma_async_issue_pending(chan);
+ dmaengine_submit(host->dma_desc_current);
+ dma_async_issue_pending(host->dma_current);
datactrl |= MCI_DPSM_DMAENABLE;
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
host->base + MMCIMASK0);
return 0;
+}
-unmap_exit:
- dmaengine_terminate_all(chan);
- dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
- return -ENOMEM;
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+ struct mmci_host_next *next = &host->next_data;
+
+ if (data->host_cookie && data->host_cookie != next->cookie) {
+ printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
+ " host->next_data.cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ if (!data->host_cookie)
+ return;
+
+ host->dma_desc_current = next->dma_desc;
+ host->dma_current = next->dma_chan;
+
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
}
+
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct mmci_host_next *nd = &host->next_data;
+
+ if (!data)
+ return;
+
+ if (data->host_cookie) {
+ data->host_cookie = 0;
+ return;
+ }
+
+ /* if config for dma */
+ if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
+ ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
+ if (mmci_dma_prep_data(host, data, nd))
+ data->host_cookie = 0;
+ else
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
+ }
+}
+
+static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+
+ if (!data)
+ return;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+
+ /* if config for dma */
+ if (chan) {
+ if (err)
+ dmaengine_terminate_all(chan);
+ if (err || data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, dir);
+ mrq->data->host_cookie = 0;
+ }
+}
+
#else
/* Blank functions if the DMA engine is not available */
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+}
static inline void mmci_dma_setup(struct mmci_host *host)
{
}
{
return -ENOSYS;
}
+
+#define mmci_pre_request NULL
+#define mmci_post_request NULL
+
#endif
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->mrq = mrq;
+ if (mrq->data)
+ mmci_get_next_data(host, mrq->data);
+
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
mmci_start_data(host, mrq->data);
static const struct mmc_host_ops mmci_ops = {
.request = mmci_request,
+ .pre_req = mmci_pre_request,
+ .post_req = mmci_post_request,
.set_ios = mmci_set_ios,
.get_ro = mmci_get_ro,
.get_cd = mmci_get_cd,