2 * linux/drivers/mmc/host/tmio_mmc_pio.c
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Driver for the MMC / SD / SDIO IP found in:
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
21 * Investigate using a workqueue for PIO transfers
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/tmio.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/mmc.h>
39 #include <linux/mmc/slot-gpio.h>
40 #include <linux/mmc/tmio.h>
41 #include <linux/module.h>
42 #include <linux/pagemap.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_qos.h>
45 #include <linux/pm_runtime.h>
46 #include <linux/regulator/consumer.h>
47 #include <linux/scatterlist.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
53 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
55 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
56 sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
59 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
61 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
62 sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
65 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
67 sd_ctrl_write32(host, CTL_STATUS, ~i);
70 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
72 host->sg_len = data->sg_len;
73 host->sg_ptr = data->sg;
74 host->sg_orig = data->sg;
78 static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
80 host->sg_ptr = sg_next(host->sg_ptr);
82 return --host->sg_len;
85 #ifdef CONFIG_MMC_DEBUG
87 #define STATUS_TO_TEXT(a, status, i) \
89 if (status & TMIO_STAT_##a) { \
96 static void pr_debug_status(u32 status)
99 pr_debug("status: %08x = ", status);
100 STATUS_TO_TEXT(CARD_REMOVE, status, i);
101 STATUS_TO_TEXT(CARD_INSERT, status, i);
102 STATUS_TO_TEXT(SIGSTATE, status, i);
103 STATUS_TO_TEXT(WRPROTECT, status, i);
104 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
105 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
106 STATUS_TO_TEXT(SIGSTATE_A, status, i);
107 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
108 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
109 STATUS_TO_TEXT(ILL_FUNC, status, i);
110 STATUS_TO_TEXT(CMD_BUSY, status, i);
111 STATUS_TO_TEXT(CMDRESPEND, status, i);
112 STATUS_TO_TEXT(DATAEND, status, i);
113 STATUS_TO_TEXT(CRCFAIL, status, i);
114 STATUS_TO_TEXT(DATATIMEOUT, status, i);
115 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
116 STATUS_TO_TEXT(RXOVERFLOW, status, i);
117 STATUS_TO_TEXT(TXUNDERRUN, status, i);
118 STATUS_TO_TEXT(RXRDY, status, i);
119 STATUS_TO_TEXT(TXRQ, status, i);
120 STATUS_TO_TEXT(ILL_ACCESS, status, i);
125 #define pr_debug_status(s) do { } while (0)
128 static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
130 struct tmio_mmc_host *host = mmc_priv(mmc);
132 if (enable && !host->sdio_irq_enabled) {
133 /* Keep device active while SDIO irq is enabled */
134 pm_runtime_get_sync(mmc_dev(mmc));
135 host->sdio_irq_enabled = true;
137 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
138 ~TMIO_SDIO_STAT_IOIRQ;
139 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
140 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
141 } else if (!enable && host->sdio_irq_enabled) {
142 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
143 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
144 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
146 host->sdio_irq_enabled = false;
147 pm_runtime_mark_last_busy(mmc_dev(mmc));
148 pm_runtime_put_autosuspend(mmc_dev(mmc));
152 static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
153 unsigned int new_clock)
158 for (clock = host->mmc->f_min, clk = 0x80000080;
159 new_clock >= (clock<<1); clk >>= 1)
164 if (host->set_clk_div)
165 host->set_clk_div(host->pdev, (clk>>22) & 1);
167 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
171 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
173 /* implicit BUG_ON(!res) */
174 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
175 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
179 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
180 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
184 static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
186 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
187 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
190 /* implicit BUG_ON(!res) */
191 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
192 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
197 static void tmio_mmc_reset(struct tmio_mmc_host *host)
199 /* FIXME - should we set stop clock reg here */
200 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
201 /* implicit BUG_ON(!res) */
202 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
203 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
205 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
206 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
207 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
211 static void tmio_mmc_reset_work(struct work_struct *work)
213 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
214 delayed_reset_work.work);
215 struct mmc_request *mrq;
218 spin_lock_irqsave(&host->lock, flags);
222 * is request already finished? Since we use a non-blocking
223 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
224 * us, so, have to check for IS_ERR(host->mrq)
226 if (IS_ERR_OR_NULL(mrq)
227 || time_is_after_jiffies(host->last_req_ts +
228 msecs_to_jiffies(2000))) {
229 spin_unlock_irqrestore(&host->lock, flags);
233 dev_warn(&host->pdev->dev,
234 "timeout waiting for hardware interrupt (CMD%u)\n",
238 host->data->error = -ETIMEDOUT;
240 host->cmd->error = -ETIMEDOUT;
242 mrq->cmd->error = -ETIMEDOUT;
246 host->force_pio = false;
248 spin_unlock_irqrestore(&host->lock, flags);
250 tmio_mmc_reset(host);
252 /* Ready for new calls */
255 tmio_mmc_abort_dma(host);
256 mmc_request_done(host->mmc, mrq);
258 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
259 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
262 /* called with host->lock held, interrupts disabled */
263 static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
265 struct mmc_request *mrq;
268 spin_lock_irqsave(&host->lock, flags);
271 if (IS_ERR_OR_NULL(mrq)) {
272 spin_unlock_irqrestore(&host->lock, flags);
278 host->force_pio = false;
280 cancel_delayed_work(&host->delayed_reset_work);
283 spin_unlock_irqrestore(&host->lock, flags);
285 if (mrq->cmd->error || (mrq->data && mrq->data->error))
286 tmio_mmc_abort_dma(host);
288 mmc_request_done(host->mmc, mrq);
290 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
291 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
294 static void tmio_mmc_done_work(struct work_struct *work)
296 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
298 tmio_mmc_finish_request(host);
301 /* These are the bitmasks the tmio chip requires to implement the MMC response
302 * types. Note that R1 and R6 are the same in this scheme. */
303 #define APP_CMD 0x0040
304 #define RESP_NONE 0x0300
305 #define RESP_R1 0x0400
306 #define RESP_R1B 0x0500
307 #define RESP_R2 0x0600
308 #define RESP_R3 0x0700
309 #define DATA_PRESENT 0x0800
310 #define TRANSFER_READ 0x1000
311 #define TRANSFER_MULTI 0x2000
312 #define SECURITY_CMD 0x4000
314 static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
316 struct mmc_data *data = host->data;
318 u32 irq_mask = TMIO_MASK_CMD;
320 /* CMD12 is handled by hardware */
321 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
322 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
326 switch (mmc_resp_type(cmd)) {
327 case MMC_RSP_NONE: c |= RESP_NONE; break;
328 case MMC_RSP_R1: c |= RESP_R1; break;
329 case MMC_RSP_R1B: c |= RESP_R1B; break;
330 case MMC_RSP_R2: c |= RESP_R2; break;
331 case MMC_RSP_R3: c |= RESP_R3; break;
333 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
339 /* FIXME - this seems to be ok commented out but the spec suggest this bit
340 * should be set when issuing app commands.
341 * if(cmd->flags & MMC_FLAG_ACMD)
346 if (data->blocks > 1) {
347 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
350 if (data->flags & MMC_DATA_READ)
354 if (!host->native_hotplug)
355 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
356 tmio_mmc_enable_mmc_irqs(host, irq_mask);
358 /* Fire off the command */
359 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
360 sd_ctrl_write16(host, CTL_SD_CMD, c);
366 * This chip always returns (at least?) as much data as you ask for.
367 * I'm unsure what happens if you ask for less than a block. This should be
368 * looked into to ensure that a funny length read doesn't hose the controller.
370 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
372 struct mmc_data *data = host->data;
378 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
379 pr_err("PIO IRQ in DMA mode!\n");
382 pr_debug("Spurious PIO IRQ\n");
386 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
387 buf = (unsigned short *)(sg_virt + host->sg_off);
389 count = host->sg_ptr->length - host->sg_off;
390 if (count > data->blksz)
393 pr_debug("count: %08x offset: %08x flags %08x\n",
394 count, host->sg_off, data->flags);
396 /* Transfer the data */
397 if (data->flags & MMC_DATA_READ)
398 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
400 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
402 host->sg_off += count;
404 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
406 if (host->sg_off == host->sg_ptr->length)
407 tmio_mmc_next_sg(host);
412 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
414 if (host->sg_ptr == &host->bounce_sg) {
416 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
417 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
418 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
422 /* needs to be called with host->lock held */
423 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
425 struct mmc_data *data = host->data;
426 struct mmc_command *stop;
431 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
436 /* FIXME - return correct transfer count on errors */
438 data->bytes_xfered = data->blocks * data->blksz;
440 data->bytes_xfered = 0;
442 pr_debug("Completed data request\n");
445 * FIXME: other drivers allow an optional stop command of any given type
446 * which we dont do, as the chip can auto generate them.
447 * Perhaps we can be smarter about when to use auto CMD12 and
448 * only issue the auto request when we know this is the desired
449 * stop command, allowing fallback to the stop command the
450 * upper layers expect. For now, we do what works.
453 if (data->flags & MMC_DATA_READ) {
454 if (host->chan_rx && !host->force_pio)
455 tmio_mmc_check_bounce_buffer(host);
456 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
459 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
464 if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
465 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
470 schedule_work(&host->done);
473 static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
475 struct mmc_data *data;
476 spin_lock(&host->lock);
482 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
484 * Has all data been written out yet? Testing on SuperH showed,
485 * that in most cases the first interrupt comes already with the
486 * BUSY status bit clear, but on some operations, like mount or
487 * in the beginning of a write / sync / umount, there is one
488 * DATAEND interrupt with the BUSY bit set, in this cases
489 * waiting for one more interrupt fixes the problem.
491 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
492 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
493 tasklet_schedule(&host->dma_complete);
495 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
496 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
497 tasklet_schedule(&host->dma_complete);
499 tmio_mmc_do_data_irq(host);
500 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
503 spin_unlock(&host->lock);
506 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
509 struct mmc_command *cmd = host->cmd;
512 spin_lock(&host->lock);
515 pr_debug("Spurious CMD irq\n");
521 /* This controller is sicker than the PXA one. Not only do we need to
522 * drop the top 8 bits of the first response word, we also need to
523 * modify the order of the response for short response command types.
526 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
527 cmd->resp[i] = sd_ctrl_read32(host, addr);
529 if (cmd->flags & MMC_RSP_136) {
530 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
531 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
532 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
534 } else if (cmd->flags & MMC_RSP_R3) {
535 cmd->resp[0] = cmd->resp[3];
538 if (stat & TMIO_STAT_CMDTIMEOUT)
539 cmd->error = -ETIMEDOUT;
540 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
541 cmd->error = -EILSEQ;
543 /* If there is data to handle we enable data IRQs here, and
544 * we will ultimatley finish the request in the data_end handler.
545 * If theres no data or we encountered an error, finish now.
547 if (host->data && !cmd->error) {
548 if (host->data->flags & MMC_DATA_READ) {
549 if (host->force_pio || !host->chan_rx)
550 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
552 tasklet_schedule(&host->dma_issue);
554 if (host->force_pio || !host->chan_tx)
555 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
557 tasklet_schedule(&host->dma_issue);
560 schedule_work(&host->done);
564 spin_unlock(&host->lock);
567 static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
568 int *ireg, int *status)
570 *status = sd_ctrl_read32(host, CTL_STATUS);
571 *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
573 pr_debug_status(*status);
574 pr_debug_status(*ireg);
577 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
578 int ireg, int status)
580 struct mmc_host *mmc = host->mmc;
582 /* Card insert / remove attempts */
583 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
584 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
585 TMIO_STAT_CARD_REMOVE);
586 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
587 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
588 !work_pending(&mmc->detect.work))
589 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
596 irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid)
598 unsigned int ireg, status;
599 struct tmio_mmc_host *host = devid;
601 tmio_mmc_card_irq_status(host, &ireg, &status);
602 __tmio_mmc_card_detect_irq(host, ireg, status);
606 EXPORT_SYMBOL(tmio_mmc_card_detect_irq);
608 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
609 int ireg, int status)
611 /* Command completion */
612 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
613 tmio_mmc_ack_mmc_irqs(host,
614 TMIO_STAT_CMDRESPEND |
615 TMIO_STAT_CMDTIMEOUT);
616 tmio_mmc_cmd_irq(host, status);
621 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
622 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
623 tmio_mmc_pio_irq(host);
627 /* Data transfer completion */
628 if (ireg & TMIO_STAT_DATAEND) {
629 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
630 tmio_mmc_data_irq(host);
637 irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid)
639 unsigned int ireg, status;
640 struct tmio_mmc_host *host = devid;
642 tmio_mmc_card_irq_status(host, &ireg, &status);
643 __tmio_mmc_sdcard_irq(host, ireg, status);
647 EXPORT_SYMBOL(tmio_mmc_sdcard_irq);
649 irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
651 struct tmio_mmc_host *host = devid;
652 struct mmc_host *mmc = host->mmc;
653 struct tmio_mmc_data *pdata = host->pdata;
654 unsigned int ireg, status;
656 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
659 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
660 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
662 sd_ctrl_write16(host, CTL_SDIO_STATUS, status & ~TMIO_SDIO_MASK_ALL);
664 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
665 mmc_signal_sdio_irq(mmc);
669 EXPORT_SYMBOL(tmio_mmc_sdio_irq);
671 irqreturn_t tmio_mmc_irq(int irq, void *devid)
673 struct tmio_mmc_host *host = devid;
674 unsigned int ireg, status;
676 pr_debug("MMC IRQ begin\n");
678 tmio_mmc_card_irq_status(host, &ireg, &status);
679 if (__tmio_mmc_card_detect_irq(host, ireg, status))
681 if (__tmio_mmc_sdcard_irq(host, ireg, status))
684 tmio_mmc_sdio_irq(irq, devid);
688 EXPORT_SYMBOL(tmio_mmc_irq);
690 static int tmio_mmc_start_data(struct tmio_mmc_host *host,
691 struct mmc_data *data)
693 struct tmio_mmc_data *pdata = host->pdata;
695 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
696 data->blksz, data->blocks);
698 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
699 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
700 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
702 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
703 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
704 mmc_hostname(host->mmc), data->blksz);
709 tmio_mmc_init_sg(host, data);
712 /* Set transfer length / blocksize */
713 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
714 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
716 tmio_mmc_start_dma(host, data);
721 /* Process requests from the MMC layer */
722 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
724 struct tmio_mmc_host *host = mmc_priv(mmc);
728 spin_lock_irqsave(&host->lock, flags);
731 pr_debug("request not null\n");
732 if (IS_ERR(host->mrq)) {
733 spin_unlock_irqrestore(&host->lock, flags);
734 mrq->cmd->error = -EAGAIN;
735 mmc_request_done(mmc, mrq);
740 host->last_req_ts = jiffies;
744 spin_unlock_irqrestore(&host->lock, flags);
746 pm_runtime_get_sync(mmc_dev(mmc));
749 ret = tmio_mmc_start_data(host, mrq->data);
754 ret = tmio_mmc_start_command(host, mrq->cmd);
756 schedule_delayed_work(&host->delayed_reset_work,
757 msecs_to_jiffies(2000));
762 host->force_pio = false;
764 mrq->cmd->error = ret;
765 mmc_request_done(mmc, mrq);
767 pm_runtime_mark_last_busy(mmc_dev(mmc));
768 pm_runtime_put_autosuspend(mmc_dev(mmc));
771 static int tmio_mmc_clk_update(struct tmio_mmc_host *host)
773 struct mmc_host *mmc = host->mmc;
774 struct tmio_mmc_data *pdata = host->pdata;
777 if (!pdata->clk_enable)
780 ret = pdata->clk_enable(host->pdev, &mmc->f_max);
782 mmc->f_min = mmc->f_max / 512;
787 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
789 struct mmc_host *mmc = host->mmc;
792 /* .set_ios() is returning void, so, no chance to report an error */
795 host->set_pwr(host->pdev, 1);
797 if (!IS_ERR(mmc->supply.vmmc)) {
798 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
800 * Attention: empiric value. With a b43 WiFi SDIO card this
801 * delay proved necessary for reliable card-insertion probing.
802 * 100us were not enough. Is this the same 140us delay, as in
803 * tmio_mmc_set_ios()?
808 * It seems, VccQ should be switched on after Vcc, this is also what the
809 * omap_hsmmc.c driver does.
811 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
812 ret = regulator_enable(mmc->supply.vqmmc);
817 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
821 static void tmio_mmc_power_off(struct tmio_mmc_host *host)
823 struct mmc_host *mmc = host->mmc;
825 if (!IS_ERR(mmc->supply.vqmmc))
826 regulator_disable(mmc->supply.vqmmc);
828 if (!IS_ERR(mmc->supply.vmmc))
829 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
832 host->set_pwr(host->pdev, 0);
835 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
836 unsigned char bus_width)
839 case MMC_BUS_WIDTH_1:
840 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
842 case MMC_BUS_WIDTH_4:
843 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
848 /* Set MMC clock / power.
849 * Note: This controller uses a simple divider scheme therefore it cannot
850 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
851 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
854 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
856 struct tmio_mmc_host *host = mmc_priv(mmc);
857 struct device *dev = &host->pdev->dev;
860 pm_runtime_get_sync(mmc_dev(mmc));
862 mutex_lock(&host->ios_lock);
864 spin_lock_irqsave(&host->lock, flags);
866 if (IS_ERR(host->mrq)) {
868 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
869 current->comm, task_pid_nr(current),
870 ios->clock, ios->power_mode);
871 host->mrq = ERR_PTR(-EINTR);
874 "%s.%d: CMD%u active since %lu, now %lu!\n",
875 current->comm, task_pid_nr(current),
876 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
878 spin_unlock_irqrestore(&host->lock, flags);
880 mutex_unlock(&host->ios_lock);
884 host->mrq = ERR_PTR(-EBUSY);
886 spin_unlock_irqrestore(&host->lock, flags);
888 switch (ios->power_mode) {
890 tmio_mmc_power_off(host);
891 tmio_mmc_clk_stop(host);
894 tmio_mmc_set_clock(host, ios->clock);
895 tmio_mmc_power_on(host, ios->vdd);
896 tmio_mmc_clk_start(host);
897 tmio_mmc_set_bus_width(host, ios->bus_width);
900 tmio_mmc_set_clock(host, ios->clock);
901 tmio_mmc_clk_start(host);
902 tmio_mmc_set_bus_width(host, ios->bus_width);
906 /* Let things settle. delay taken from winCE driver */
908 if (PTR_ERR(host->mrq) == -EINTR)
909 dev_dbg(&host->pdev->dev,
910 "%s.%d: IOS interrupted: clk %u, mode %u",
911 current->comm, task_pid_nr(current),
912 ios->clock, ios->power_mode);
915 host->clk_cache = ios->clock;
917 mutex_unlock(&host->ios_lock);
919 pm_runtime_mark_last_busy(mmc_dev(mmc));
920 pm_runtime_put_autosuspend(mmc_dev(mmc));
923 static int tmio_mmc_get_ro(struct mmc_host *mmc)
925 struct tmio_mmc_host *host = mmc_priv(mmc);
926 struct tmio_mmc_data *pdata = host->pdata;
927 int ret = mmc_gpio_get_ro(mmc);
931 pm_runtime_get_sync(mmc_dev(mmc));
932 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
933 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
934 pm_runtime_mark_last_busy(mmc_dev(mmc));
935 pm_runtime_put_autosuspend(mmc_dev(mmc));
940 static const struct mmc_host_ops tmio_mmc_ops = {
941 .request = tmio_mmc_request,
942 .set_ios = tmio_mmc_set_ios,
943 .get_ro = tmio_mmc_get_ro,
944 .get_cd = mmc_gpio_get_cd,
945 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
948 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
950 struct tmio_mmc_data *pdata = host->pdata;
951 struct mmc_host *mmc = host->mmc;
953 mmc_regulator_get_supply(mmc);
955 /* use ocr_mask if no regulator */
957 mmc->ocr_avail = pdata->ocr_mask;
961 * There is possibility that regulator has not been probed
964 return -EPROBE_DEFER;
969 static void tmio_mmc_of_parse(struct platform_device *pdev,
970 struct tmio_mmc_data *pdata)
972 const struct device_node *np = pdev->dev.of_node;
976 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
977 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
980 int tmio_mmc_host_probe(struct tmio_mmc_host **host,
981 struct platform_device *pdev,
982 struct tmio_mmc_data *pdata)
984 struct tmio_mmc_host *_host;
985 struct mmc_host *mmc;
986 struct resource *res_ctl;
988 u32 irq_mask = TMIO_MASK_CMD;
990 tmio_mmc_of_parse(pdev, pdata);
992 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
993 pdata->write16_hook = NULL;
995 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
999 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1003 ret = mmc_of_parse(mmc);
1007 pdata->dev = &pdev->dev;
1008 _host = mmc_priv(mmc);
1009 _host->pdata = pdata;
1012 platform_set_drvdata(pdev, mmc);
1014 _host->set_pwr = pdata->set_pwr;
1015 _host->set_clk_div = pdata->set_clk_div;
1017 ret = tmio_mmc_init_ocr(_host);
1021 _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
1027 mmc->ops = &tmio_mmc_ops;
1028 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1029 mmc->caps2 |= pdata->capabilities2;
1031 mmc->max_blk_size = 512;
1032 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
1034 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1035 mmc->max_seg_size = mmc->max_req_size;
1037 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
1038 mmc->caps & MMC_CAP_NEEDS_POLL ||
1039 mmc->caps & MMC_CAP_NONREMOVABLE ||
1040 mmc->slot.cd_irq >= 0);
1042 if (tmio_mmc_clk_update(_host) < 0) {
1043 mmc->f_max = pdata->hclk;
1044 mmc->f_min = mmc->f_max / 512;
1048 * While using internal tmio hardware logic for card detection, we need
1049 * to ensure it stays powered for it to work.
1051 if (_host->native_hotplug)
1052 pm_runtime_get_noresume(&pdev->dev);
1054 tmio_mmc_clk_stop(_host);
1055 tmio_mmc_reset(_host);
1057 _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
1058 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1060 /* Unmask the IRQs we want to know about */
1061 if (!_host->chan_rx)
1062 irq_mask |= TMIO_MASK_READOP;
1063 if (!_host->chan_tx)
1064 irq_mask |= TMIO_MASK_WRITEOP;
1065 if (!_host->native_hotplug)
1066 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1068 _host->sdcard_irq_mask &= ~irq_mask;
1070 _host->sdio_irq_enabled = false;
1071 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
1072 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1073 sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
1074 sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
1077 spin_lock_init(&_host->lock);
1078 mutex_init(&_host->ios_lock);
1080 /* Init delayed work for request timeouts */
1081 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1082 INIT_WORK(&_host->done, tmio_mmc_done_work);
1084 /* See if we also get DMA */
1085 tmio_mmc_request_dma(_host, pdata);
1087 pm_runtime_set_active(&pdev->dev);
1088 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1089 pm_runtime_use_autosuspend(&pdev->dev);
1090 pm_runtime_enable(&pdev->dev);
1092 ret = mmc_add_host(mmc);
1094 tmio_mmc_host_remove(_host);
1098 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1100 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1101 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1103 tmio_mmc_host_remove(_host);
1117 EXPORT_SYMBOL(tmio_mmc_host_probe);
1119 void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1121 struct platform_device *pdev = host->pdev;
1122 struct mmc_host *mmc = host->mmc;
1124 if (!host->native_hotplug)
1125 pm_runtime_get_sync(&pdev->dev);
1127 dev_pm_qos_hide_latency_limit(&pdev->dev);
1129 mmc_remove_host(mmc);
1130 cancel_work_sync(&host->done);
1131 cancel_delayed_work_sync(&host->delayed_reset_work);
1132 tmio_mmc_release_dma(host);
1134 pm_runtime_put_sync(&pdev->dev);
1135 pm_runtime_disable(&pdev->dev);
1140 EXPORT_SYMBOL(tmio_mmc_host_remove);
1143 int tmio_mmc_host_runtime_suspend(struct device *dev)
1145 struct mmc_host *mmc = dev_get_drvdata(dev);
1146 struct tmio_mmc_host *host = mmc_priv(mmc);
1148 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1150 if (host->clk_cache)
1151 tmio_mmc_clk_stop(host);
1153 if (host->pdata->clk_disable)
1154 host->pdata->clk_disable(host->pdev);
1158 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1160 int tmio_mmc_host_runtime_resume(struct device *dev)
1162 struct mmc_host *mmc = dev_get_drvdata(dev);
1163 struct tmio_mmc_host *host = mmc_priv(mmc);
1165 tmio_mmc_reset(host);
1166 tmio_mmc_clk_update(host);
1168 if (host->clk_cache) {
1169 tmio_mmc_set_clock(host, host->clk_cache);
1170 tmio_mmc_clk_start(host);
1173 tmio_mmc_enable_dma(host, true);
1177 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1180 MODULE_LICENSE("GPL v2");