host->data = data;
host->size = data->blksz * data->blocks;
- -- host->data_xfered = 0;
- host->blockend = false;
- host->dataend = false;
- --
- -- mmci_init_sg(host, data);
+ ++ data->bytes_xfered = 0;
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, 1000000000UL);
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
- struct variant_data *variant = host->variant;
-
/* First check for errors */
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
- if (status & MCI_DATACRCFAIL)
- data->error = -EILSEQ;
- else if (status & MCI_DATATIMEOUT)
- data->error = -ETIMEDOUT;
- else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
- data->error = -EIO;
+ u32 remain, success;
- /* Force-complete the transaction */
- host->blockend = true;
- host->dataend = true;
-- /* Calculate how far we are into the transfer */
+ ++ /* Terminate the DMA transfer */
+ ++ if (dma_inprogress(host))
+ ++ mmci_dma_data_error(host);
++
++ /*
- * We hit an error condition. Ensure that any data
- * partially written to a page is properly coherent.
+ ++ * Calculate how far we are into the transfer. Note that
+ ++ * the data counter gives the number of bytes transferred
+ ++ * on the MMC bus, not on the host side. On reads, this
+ ++ * can be as much as a FIFO-worth of data ahead. This
+ ++ * matters for FIFO overruns only.
++ */
- if (data->flags & MMC_DATA_READ) {
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- unsigned long flags;
-
- local_irq_save(flags);
- if (sg_miter_next(sg_miter)) {
- flush_dcache_page(sg_miter->page);
- sg_miter_stop(sg_miter);
- }
- local_irq_restore(flags);
+ remain = readl(host->base + MMCIDATACNT);
+ success = data->blksz * data->blocks - remain;
+
-- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+ ++ dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
+ ++ status, success);
+ if (status & MCI_DATACRCFAIL) {
+ /* Last block was not successful */
-- host->data_xfered = round_down(success - 1, data->blksz);
+ ++ success -= 1;
+ data->error = -EILSEQ;
+ } else if (status & MCI_DATATIMEOUT) {
-- host->data_xfered = round_down(success, data->blksz);
+ data->error = -ETIMEDOUT;
-- } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
-- host->data_xfered = round_down(success, data->blksz);
+ ++ } else if (status & MCI_TXUNDERRUN) {
+ ++ data->error = -EIO;
+ ++ } else if (status & MCI_RXOVERRUN) {
+ ++ if (success > host->variant->fifosize)
+ ++ success -= host->variant->fifosize;
+ ++ else
+ ++ success = 0;
+ data->error = -EIO;
}
--
-- /*
-- * We hit an error condition. Ensure that any data
-- * partially written to a page is properly coherent.
-- */
-- if (data->flags & MMC_DATA_READ) {
-- struct sg_mapping_iter *sg_miter = &host->sg_miter;
-- unsigned long flags;
--
-- local_irq_save(flags);
-- if (sg_miter_next(sg_miter)) {
-- flush_dcache_page(sg_miter->page);
-- sg_miter_stop(sg_miter);
-- }
-- local_irq_restore(flags);
-- }
+ ++ data->bytes_xfered = round_down(success, data->blksz);
}
- /*
- * On ARM variants in PIO mode, MCI_DATABLOCKEND
- * is always sent first, and we increase the
- * transfered number of bytes for that IRQ. Then
- * MCI_DATAEND follows and we conclude the transaction.
- *
- * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
- * doesn't seem to immediately clear from the status,
- * so we can't use it keep count when only one irq is
- * used because the irq will hit for other reasons, and
- * then the flag is still up. So we use the MCI_DATAEND
- * IRQ at the end of the entire transfer because
- * MCI_DATABLOCKEND is broken.
- *
- * In the U300, the IRQs can arrive out-of-order,
- * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
- * so for this case we use the flags "blockend" and
- * "dataend" to make sure both IRQs have arrived before
- * concluding the transaction. (This does not apply
- * to the Ux500 which doesn't fire MCI_DATABLOCKEND
- * at all.) In DMA mode it suffers from the same problem
- * as the Ux500.
- */
- if (status & MCI_DATABLOCKEND) {
- /*
- * Just being a little over-cautious, we do not
- * use this progressive update if the hardware blockend
- * flag is unreliable: since it can stay high between
- * IRQs it will corrupt the transfer counter.
- */
- if (!variant->broken_blockend)
- host->data_xfered += data->blksz;
- host->blockend = true;
- }
+ if (status & MCI_DATABLOCKEND)
+ dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
- if (status & MCI_DATAEND)
- host->dataend = true;
-
- /*
- * On variants with broken blockend we shall only wait for dataend,
- * on others we must sync with the blockend signal since they can
- * appear out-of-order.
- */
- if (host->dataend && (host->blockend || variant->broken_blockend)) {
+ if (status & MCI_DATAEND || data->error) {
+ ++ if (dma_inprogress(host))
+ ++ mmci_dma_unmap(host, data);
mmci_stop_data(host);
- /* Reset these flags */
- host->blockend = false;
- host->dataend = false;
-
- /*
- * Variants with broken blockend flags need to handle the
- * end of the entire transfer here.
- */
- if (variant->broken_blockend && !data->error)
- host->data_xfered += data->blksz * data->blocks;
+ if (!data->error)
+ /* The error clause is handled above, success! */
-- host->data_xfered += data->blksz * data->blocks;
+ ++ data->bytes_xfered = data->blksz * data->blocks;
if (!data->stop) {
mmci_request_end(host, data->mrq);