2 * Driver for the MMC / SD / SDIO IP found in:
4 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
6 * Copyright (C) 2016 Sang Engineering, Wolfram Sang
7 * Copyright (C) 2015-16 Renesas Electronics Corporation
8 * Copyright (C) 2011 Guennadi Liakhovetski
9 * Copyright (C) 2007 Ian Molton
10 * Copyright (C) 2004 Ian Molton
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
21 * Investigate using a workqueue for PIO transfers
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/highmem.h>
32 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/mfd/tmio.h>
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/mmc.h>
39 #include <linux/mmc/slot-gpio.h>
40 #include <linux/module.h>
41 #include <linux/pagemap.h>
42 #include <linux/platform_device.h>
43 #include <linux/pm_qos.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/regulator/consumer.h>
46 #include <linux/mmc/sdio.h>
47 #include <linux/scatterlist.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
53 static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
54 struct mmc_data *data)
57 host->dma_ops->start(host, data);
60 static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
63 host->dma_ops->enable(host, enable);
66 static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
67 struct tmio_mmc_data *pdata)
70 host->dma_ops->request(host, pdata);
77 static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
80 host->dma_ops->release(host);
83 static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
86 host->dma_ops->abort(host);
89 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
91 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
92 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
94 EXPORT_SYMBOL(tmio_mmc_enable_mmc_irqs);
96 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
98 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
99 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
101 EXPORT_SYMBOL(tmio_mmc_disable_mmc_irqs);
103 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
105 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
108 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
110 host->sg_len = data->sg_len;
111 host->sg_ptr = data->sg;
112 host->sg_orig = data->sg;
116 static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
118 host->sg_ptr = sg_next(host->sg_ptr);
120 return --host->sg_len;
123 #define CMDREQ_TIMEOUT 5000
125 #ifdef CONFIG_MMC_DEBUG
127 #define STATUS_TO_TEXT(a, status, i) \
129 if (status & TMIO_STAT_##a) { \
136 static void pr_debug_status(u32 status)
139 pr_debug("status: %08x = ", status);
140 STATUS_TO_TEXT(CARD_REMOVE, status, i);
141 STATUS_TO_TEXT(CARD_INSERT, status, i);
142 STATUS_TO_TEXT(SIGSTATE, status, i);
143 STATUS_TO_TEXT(WRPROTECT, status, i);
144 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
145 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
146 STATUS_TO_TEXT(SIGSTATE_A, status, i);
147 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
148 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
149 STATUS_TO_TEXT(ILL_FUNC, status, i);
150 STATUS_TO_TEXT(CMD_BUSY, status, i);
151 STATUS_TO_TEXT(CMDRESPEND, status, i);
152 STATUS_TO_TEXT(DATAEND, status, i);
153 STATUS_TO_TEXT(CRCFAIL, status, i);
154 STATUS_TO_TEXT(DATATIMEOUT, status, i);
155 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
156 STATUS_TO_TEXT(RXOVERFLOW, status, i);
157 STATUS_TO_TEXT(TXUNDERRUN, status, i);
158 STATUS_TO_TEXT(RXRDY, status, i);
159 STATUS_TO_TEXT(TXRQ, status, i);
160 STATUS_TO_TEXT(ILL_ACCESS, status, i);
165 #define pr_debug_status(s) do { } while (0)
168 static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
170 struct tmio_mmc_host *host = mmc_priv(mmc);
172 if (enable && !host->sdio_irq_enabled) {
175 /* Keep device active while SDIO irq is enabled */
176 pm_runtime_get_sync(mmc_dev(mmc));
178 host->sdio_irq_enabled = true;
179 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
180 ~TMIO_SDIO_STAT_IOIRQ;
182 /* Clear obsolete interrupts before enabling */
183 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
184 if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
185 sdio_status |= TMIO_SDIO_SETBITS_MASK;
186 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
188 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
189 } else if (!enable && host->sdio_irq_enabled) {
190 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
191 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
193 host->sdio_irq_enabled = false;
194 pm_runtime_mark_last_busy(mmc_dev(mmc));
195 pm_runtime_put_autosuspend(mmc_dev(mmc));
199 static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
201 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
202 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
203 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
205 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
206 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
211 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
213 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
214 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
218 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
219 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
220 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
223 static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
224 unsigned int new_clock)
228 if (new_clock == 0) {
229 tmio_mmc_clk_stop(host);
233 if (host->clk_update)
234 clock = host->clk_update(host, new_clock) / 512;
236 clock = host->mmc->f_min;
238 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
241 /* 1/1 clock is option */
242 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
245 if (host->set_clk_div)
246 host->set_clk_div(host->pdev, (clk >> 22) & 1);
248 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
249 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
250 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
251 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
254 tmio_mmc_clk_start(host);
257 static void tmio_mmc_reset(struct tmio_mmc_host *host)
259 /* FIXME - should we set stop clock reg here */
260 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
261 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
262 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
264 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
265 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
266 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
270 static void tmio_mmc_reset_work(struct work_struct *work)
272 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
273 delayed_reset_work.work);
274 struct mmc_request *mrq;
277 spin_lock_irqsave(&host->lock, flags);
281 * is request already finished? Since we use a non-blocking
282 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
283 * us, so, have to check for IS_ERR(host->mrq)
285 if (IS_ERR_OR_NULL(mrq)
286 || time_is_after_jiffies(host->last_req_ts +
287 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
288 spin_unlock_irqrestore(&host->lock, flags);
292 dev_warn(&host->pdev->dev,
293 "timeout waiting for hardware interrupt (CMD%u)\n",
297 host->data->error = -ETIMEDOUT;
299 host->cmd->error = -ETIMEDOUT;
301 mrq->cmd->error = -ETIMEDOUT;
305 host->force_pio = false;
307 spin_unlock_irqrestore(&host->lock, flags);
309 tmio_mmc_reset(host);
311 /* Ready for new calls */
314 tmio_mmc_abort_dma(host);
315 mmc_request_done(host->mmc, mrq);
318 /* called with host->lock held, interrupts disabled */
319 static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
321 struct mmc_request *mrq;
324 spin_lock_irqsave(&host->lock, flags);
327 if (IS_ERR_OR_NULL(mrq)) {
328 spin_unlock_irqrestore(&host->lock, flags);
334 host->force_pio = false;
336 cancel_delayed_work(&host->delayed_reset_work);
339 spin_unlock_irqrestore(&host->lock, flags);
341 if (mrq->cmd->error || (mrq->data && mrq->data->error))
342 tmio_mmc_abort_dma(host);
344 if (host->check_scc_error)
345 host->check_scc_error(host);
347 mmc_request_done(host->mmc, mrq);
350 static void tmio_mmc_done_work(struct work_struct *work)
352 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
354 tmio_mmc_finish_request(host);
357 /* These are the bitmasks the tmio chip requires to implement the MMC response
358 * types. Note that R1 and R6 are the same in this scheme. */
359 #define APP_CMD 0x0040
360 #define RESP_NONE 0x0300
361 #define RESP_R1 0x0400
362 #define RESP_R1B 0x0500
363 #define RESP_R2 0x0600
364 #define RESP_R3 0x0700
365 #define DATA_PRESENT 0x0800
366 #define TRANSFER_READ 0x1000
367 #define TRANSFER_MULTI 0x2000
368 #define SECURITY_CMD 0x4000
369 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
371 static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
373 struct mmc_data *data = host->data;
375 u32 irq_mask = TMIO_MASK_CMD;
377 /* CMD12 is handled by hardware */
378 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
379 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_STP);
383 switch (mmc_resp_type(cmd)) {
384 case MMC_RSP_NONE: c |= RESP_NONE; break;
386 case MMC_RSP_R1_NO_CRC:
388 case MMC_RSP_R1B: c |= RESP_R1B; break;
389 case MMC_RSP_R2: c |= RESP_R2; break;
390 case MMC_RSP_R3: c |= RESP_R3; break;
392 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
398 /* FIXME - this seems to be ok commented out but the spec suggest this bit
399 * should be set when issuing app commands.
400 * if(cmd->flags & MMC_FLAG_ACMD)
405 if (data->blocks > 1) {
406 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
410 * Disable auto CMD12 at IO_RW_EXTENDED when
411 * multiple block transfer
413 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
414 (cmd->opcode == SD_IO_RW_EXTENDED))
417 if (data->flags & MMC_DATA_READ)
421 if (!host->native_hotplug)
422 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
423 tmio_mmc_enable_mmc_irqs(host, irq_mask);
425 /* Fire off the command */
426 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
427 sd_ctrl_write16(host, CTL_SD_CMD, c);
432 static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
436 int is_read = host->data->flags & MMC_DATA_READ;
442 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
446 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
449 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
452 /* if count was multiple of 4 */
456 buf8 = (u8 *)(buf + (count >> 2));
460 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
462 memcpy(buf8, data, count);
464 memcpy(data, buf8, count);
465 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
473 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
475 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
477 /* if count was even number */
481 /* if count was odd number */
482 buf8 = (u8 *)(buf + (count >> 1));
487 * driver and this function are assuming that
488 * it is used as little endian
491 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
493 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
497 * This chip always returns (at least?) as much data as you ask for.
498 * I'm unsure what happens if you ask for less than a block. This should be
499 * looked into to ensure that a funny length read doesn't hose the controller.
501 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
503 struct mmc_data *data = host->data;
509 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
510 pr_err("PIO IRQ in DMA mode!\n");
513 pr_debug("Spurious PIO IRQ\n");
517 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
518 buf = (unsigned short *)(sg_virt + host->sg_off);
520 count = host->sg_ptr->length - host->sg_off;
521 if (count > data->blksz)
524 pr_debug("count: %08x offset: %08x flags %08x\n",
525 count, host->sg_off, data->flags);
527 /* Transfer the data */
528 tmio_mmc_transfer_data(host, buf, count);
530 host->sg_off += count;
532 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
534 if (host->sg_off == host->sg_ptr->length)
535 tmio_mmc_next_sg(host);
540 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
542 if (host->sg_ptr == &host->bounce_sg) {
544 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
545 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
546 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
550 /* needs to be called with host->lock held */
551 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
553 struct mmc_data *data = host->data;
554 struct mmc_command *stop;
559 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
564 /* FIXME - return correct transfer count on errors */
566 data->bytes_xfered = data->blocks * data->blksz;
568 data->bytes_xfered = 0;
570 pr_debug("Completed data request\n");
573 * FIXME: other drivers allow an optional stop command of any given type
574 * which we dont do, as the chip can auto generate them.
575 * Perhaps we can be smarter about when to use auto CMD12 and
576 * only issue the auto request when we know this is the desired
577 * stop command, allowing fallback to the stop command the
578 * upper layers expect. For now, we do what works.
581 if (data->flags & MMC_DATA_READ) {
582 if (host->chan_rx && !host->force_pio)
583 tmio_mmc_check_bounce_buffer(host);
584 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
587 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
592 if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
593 dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
594 stop->opcode, stop->arg);
596 /* fill in response from auto CMD12 */
597 stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
599 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
602 schedule_work(&host->done);
604 EXPORT_SYMBOL(tmio_mmc_do_data_irq);
606 static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
608 struct mmc_data *data;
609 spin_lock(&host->lock);
615 if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
616 stat & TMIO_STAT_TXUNDERRUN)
617 data->error = -EILSEQ;
618 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
619 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
623 * Has all data been written out yet? Testing on SuperH showed,
624 * that in most cases the first interrupt comes already with the
625 * BUSY status bit clear, but on some operations, like mount or
626 * in the beginning of a write / sync / umount, there is one
627 * DATAEND interrupt with the BUSY bit set, in this cases
628 * waiting for one more interrupt fixes the problem.
630 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
631 if (status & TMIO_STAT_SCLKDIVEN)
634 if (!(status & TMIO_STAT_CMD_BUSY))
639 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
640 complete(&host->dma_dataend);
642 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
643 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
644 complete(&host->dma_dataend);
646 tmio_mmc_do_data_irq(host);
647 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
650 spin_unlock(&host->lock);
653 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
656 struct mmc_command *cmd = host->cmd;
659 spin_lock(&host->lock);
662 pr_debug("Spurious CMD irq\n");
666 /* This controller is sicker than the PXA one. Not only do we need to
667 * drop the top 8 bits of the first response word, we also need to
668 * modify the order of the response for short response command types.
671 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
672 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
674 if (cmd->flags & MMC_RSP_136) {
675 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
676 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
677 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
679 } else if (cmd->flags & MMC_RSP_R3) {
680 cmd->resp[0] = cmd->resp[3];
683 if (stat & TMIO_STAT_CMDTIMEOUT)
684 cmd->error = -ETIMEDOUT;
685 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
686 stat & TMIO_STAT_STOPBIT_ERR ||
687 stat & TMIO_STAT_CMD_IDX_ERR)
688 cmd->error = -EILSEQ;
690 /* If there is data to handle we enable data IRQs here, and
691 * we will ultimatley finish the request in the data_end handler.
692 * If theres no data or we encountered an error, finish now.
694 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
695 if (host->data->flags & MMC_DATA_READ) {
696 if (host->force_pio || !host->chan_rx)
697 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
699 tasklet_schedule(&host->dma_issue);
701 if (host->force_pio || !host->chan_tx)
702 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
704 tasklet_schedule(&host->dma_issue);
707 schedule_work(&host->done);
711 spin_unlock(&host->lock);
714 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
715 int ireg, int status)
717 struct mmc_host *mmc = host->mmc;
719 /* Card insert / remove attempts */
720 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
721 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
722 TMIO_STAT_CARD_REMOVE);
723 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
724 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
725 !work_pending(&mmc->detect.work))
726 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
733 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
734 int ireg, int status)
736 /* Command completion */
737 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
738 tmio_mmc_ack_mmc_irqs(host,
739 TMIO_STAT_CMDRESPEND |
740 TMIO_STAT_CMDTIMEOUT);
741 tmio_mmc_cmd_irq(host, status);
746 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
747 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
748 tmio_mmc_pio_irq(host);
752 /* Data transfer completion */
753 if (ireg & TMIO_STAT_DATAEND) {
754 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
755 tmio_mmc_data_irq(host, status);
762 static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
764 struct mmc_host *mmc = host->mmc;
765 struct tmio_mmc_data *pdata = host->pdata;
766 unsigned int ireg, status;
767 unsigned int sdio_status;
769 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
772 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
773 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
775 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
776 if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
777 sdio_status |= TMIO_SDIO_SETBITS_MASK;
779 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
781 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
782 mmc_signal_sdio_irq(mmc);
785 irqreturn_t tmio_mmc_irq(int irq, void *devid)
787 struct tmio_mmc_host *host = devid;
788 unsigned int ireg, status;
790 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
791 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
793 pr_debug_status(status);
794 pr_debug_status(ireg);
796 /* Clear the status except the interrupt status */
797 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
799 if (__tmio_mmc_card_detect_irq(host, ireg, status))
801 if (__tmio_mmc_sdcard_irq(host, ireg, status))
804 __tmio_mmc_sdio_irq(host);
808 EXPORT_SYMBOL(tmio_mmc_irq);
810 static int tmio_mmc_start_data(struct tmio_mmc_host *host,
811 struct mmc_data *data)
813 struct tmio_mmc_data *pdata = host->pdata;
815 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
816 data->blksz, data->blocks);
818 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
819 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
820 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
821 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
823 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
824 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
825 mmc_hostname(host->mmc), data->blksz);
830 tmio_mmc_init_sg(host, data);
833 /* Set transfer length / blocksize */
834 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
835 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
837 tmio_mmc_start_dma(host, data);
842 static void tmio_mmc_hw_reset(struct mmc_host *mmc)
844 struct tmio_mmc_host *host = mmc_priv(mmc);
847 host->hw_reset(host);
850 static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
852 struct tmio_mmc_host *host = mmc_priv(mmc);
855 if (!host->init_tuning || !host->select_tuning)
856 /* Tuning is not supported */
859 host->tap_num = host->init_tuning(host);
861 /* Tuning is not supported */
864 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
865 dev_warn_once(&host->pdev->dev,
866 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
870 bitmap_zero(host->taps, host->tap_num * 2);
872 /* Issue CMD19 twice for each tap */
873 for (i = 0; i < 2 * host->tap_num; i++) {
874 if (host->prepare_tuning)
875 host->prepare_tuning(host, i % host->tap_num);
877 ret = mmc_send_tuning(mmc, opcode, NULL);
878 if (ret && ret != -EILSEQ)
881 set_bit(i, host->taps);
886 ret = host->select_tuning(host);
890 dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
891 tmio_mmc_hw_reset(mmc);
897 /* Process requests from the MMC layer */
898 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
900 struct tmio_mmc_host *host = mmc_priv(mmc);
904 spin_lock_irqsave(&host->lock, flags);
907 pr_debug("request not null\n");
908 if (IS_ERR(host->mrq)) {
909 spin_unlock_irqrestore(&host->lock, flags);
910 mrq->cmd->error = -EAGAIN;
911 mmc_request_done(mmc, mrq);
916 host->last_req_ts = jiffies;
920 spin_unlock_irqrestore(&host->lock, flags);
923 ret = tmio_mmc_start_data(host, mrq->data);
928 ret = tmio_mmc_start_command(host, mrq->cmd);
932 schedule_delayed_work(&host->delayed_reset_work,
933 msecs_to_jiffies(CMDREQ_TIMEOUT));
937 host->force_pio = false;
939 mrq->cmd->error = ret;
940 mmc_request_done(mmc, mrq);
943 static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
945 if (!host->clk_enable)
948 return host->clk_enable(host);
951 static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
953 if (host->clk_disable)
954 host->clk_disable(host);
957 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
959 struct mmc_host *mmc = host->mmc;
962 /* .set_ios() is returning void, so, no chance to report an error */
965 host->set_pwr(host->pdev, 1);
967 if (!IS_ERR(mmc->supply.vmmc)) {
968 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
970 * Attention: empiric value. With a b43 WiFi SDIO card this
971 * delay proved necessary for reliable card-insertion probing.
972 * 100us were not enough. Is this the same 140us delay, as in
973 * tmio_mmc_set_ios()?
978 * It seems, VccQ should be switched on after Vcc, this is also what the
979 * omap_hsmmc.c driver does.
981 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
982 ret = regulator_enable(mmc->supply.vqmmc);
987 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
991 static void tmio_mmc_power_off(struct tmio_mmc_host *host)
993 struct mmc_host *mmc = host->mmc;
995 if (!IS_ERR(mmc->supply.vqmmc))
996 regulator_disable(mmc->supply.vqmmc);
998 if (!IS_ERR(mmc->supply.vmmc))
999 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1002 host->set_pwr(host->pdev, 0);
1005 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
1006 unsigned char bus_width)
1008 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
1009 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
1011 /* reg now applies to MMC_BUS_WIDTH_4 */
1012 if (bus_width == MMC_BUS_WIDTH_1)
1013 reg |= CARD_OPT_WIDTH;
1014 else if (bus_width == MMC_BUS_WIDTH_8)
1015 reg |= CARD_OPT_WIDTH8;
1017 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
1020 /* Set MMC clock / power.
1021 * Note: This controller uses a simple divider scheme therefore it cannot
1022 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
1023 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
1026 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1028 struct tmio_mmc_host *host = mmc_priv(mmc);
1029 struct device *dev = &host->pdev->dev;
1030 unsigned long flags;
1032 mutex_lock(&host->ios_lock);
1034 spin_lock_irqsave(&host->lock, flags);
1036 if (IS_ERR(host->mrq)) {
1038 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
1039 current->comm, task_pid_nr(current),
1040 ios->clock, ios->power_mode);
1041 host->mrq = ERR_PTR(-EINTR);
1044 "%s.%d: CMD%u active since %lu, now %lu!\n",
1045 current->comm, task_pid_nr(current),
1046 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
1048 spin_unlock_irqrestore(&host->lock, flags);
1050 mutex_unlock(&host->ios_lock);
1054 host->mrq = ERR_PTR(-EBUSY);
1056 spin_unlock_irqrestore(&host->lock, flags);
1058 switch (ios->power_mode) {
1060 tmio_mmc_power_off(host);
1061 tmio_mmc_clk_stop(host);
1064 tmio_mmc_power_on(host, ios->vdd);
1065 tmio_mmc_set_clock(host, ios->clock);
1066 tmio_mmc_set_bus_width(host, ios->bus_width);
1069 tmio_mmc_set_clock(host, ios->clock);
1070 tmio_mmc_set_bus_width(host, ios->bus_width);
1074 /* Let things settle. delay taken from winCE driver */
1076 if (PTR_ERR(host->mrq) == -EINTR)
1077 dev_dbg(&host->pdev->dev,
1078 "%s.%d: IOS interrupted: clk %u, mode %u",
1079 current->comm, task_pid_nr(current),
1080 ios->clock, ios->power_mode);
1083 host->clk_cache = ios->clock;
1085 mutex_unlock(&host->ios_lock);
1088 static int tmio_mmc_get_ro(struct mmc_host *mmc)
1090 struct tmio_mmc_host *host = mmc_priv(mmc);
1091 struct tmio_mmc_data *pdata = host->pdata;
1092 int ret = mmc_gpio_get_ro(mmc);
1096 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
1097 (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
1102 static int tmio_multi_io_quirk(struct mmc_card *card,
1103 unsigned int direction, int blk_size)
1105 struct tmio_mmc_host *host = mmc_priv(card->host);
1107 if (host->multi_io_quirk)
1108 return host->multi_io_quirk(card, direction, blk_size);
1113 static struct mmc_host_ops tmio_mmc_ops = {
1114 .request = tmio_mmc_request,
1115 .set_ios = tmio_mmc_set_ios,
1116 .get_ro = tmio_mmc_get_ro,
1117 .get_cd = mmc_gpio_get_cd,
1118 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1119 .multi_io_quirk = tmio_multi_io_quirk,
1120 .hw_reset = tmio_mmc_hw_reset,
1121 .execute_tuning = tmio_mmc_execute_tuning,
1124 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1126 struct tmio_mmc_data *pdata = host->pdata;
1127 struct mmc_host *mmc = host->mmc;
1129 mmc_regulator_get_supply(mmc);
1131 /* use ocr_mask if no regulator */
1132 if (!mmc->ocr_avail)
1133 mmc->ocr_avail = pdata->ocr_mask;
1137 * There is possibility that regulator has not been probed
1139 if (!mmc->ocr_avail)
1140 return -EPROBE_DEFER;
1145 static void tmio_mmc_of_parse(struct platform_device *pdev,
1146 struct tmio_mmc_data *pdata)
1148 const struct device_node *np = pdev->dev.of_node;
1152 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1153 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
1156 struct tmio_mmc_host*
1157 tmio_mmc_host_alloc(struct platform_device *pdev)
1159 struct tmio_mmc_host *host;
1160 struct mmc_host *mmc;
1162 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1166 host = mmc_priv(mmc);
1172 EXPORT_SYMBOL(tmio_mmc_host_alloc);
1174 void tmio_mmc_host_free(struct tmio_mmc_host *host)
1176 mmc_free_host(host->mmc);
1178 EXPORT_SYMBOL(tmio_mmc_host_free);
1180 int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1181 struct tmio_mmc_data *pdata,
1182 const struct tmio_mmc_dma_ops *dma_ops)
1184 struct platform_device *pdev = _host->pdev;
1185 struct mmc_host *mmc = _host->mmc;
1186 struct resource *res_ctl;
1188 u32 irq_mask = TMIO_MASK_CMD;
1190 tmio_mmc_of_parse(pdev, pdata);
1192 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1193 _host->write16_hook = NULL;
1195 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1199 ret = mmc_of_parse(mmc);
1203 _host->pdata = pdata;
1204 platform_set_drvdata(pdev, mmc);
1206 _host->set_pwr = pdata->set_pwr;
1207 _host->set_clk_div = pdata->set_clk_div;
1209 ret = tmio_mmc_init_ocr(_host);
1213 _host->ctl = devm_ioremap(&pdev->dev,
1214 res_ctl->start, resource_size(res_ctl));
1218 tmio_mmc_ops.card_busy = _host->card_busy;
1219 tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
1220 mmc->ops = &tmio_mmc_ops;
1222 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1223 mmc->caps2 |= pdata->capabilities2;
1225 mmc->max_blk_size = 512;
1226 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1228 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1229 mmc->max_seg_size = mmc->max_req_size;
1231 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
1232 mmc->caps & MMC_CAP_NEEDS_POLL ||
1233 !mmc_card_is_removable(mmc));
1236 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1237 * hotplug gets disabled. It seems RuntimePM related yet we need further
1238 * research. Since we are planning a PM overhaul anyway, let's enforce
1239 * for now the device being active by enabling native hotplug always.
1241 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1242 _host->native_hotplug = true;
1244 if (tmio_mmc_clk_enable(_host) < 0) {
1245 mmc->f_max = pdata->hclk;
1246 mmc->f_min = mmc->f_max / 512;
1250 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1251 * looping forever...
1253 if (mmc->f_min == 0)
1257 * While using internal tmio hardware logic for card detection, we need
1258 * to ensure it stays powered for it to work.
1260 if (_host->native_hotplug)
1261 pm_runtime_get_noresume(&pdev->dev);
1263 tmio_mmc_clk_stop(_host);
1264 tmio_mmc_reset(_host);
1266 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1267 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1269 /* Unmask the IRQs we want to know about */
1270 if (!_host->chan_rx)
1271 irq_mask |= TMIO_MASK_READOP;
1272 if (!_host->chan_tx)
1273 irq_mask |= TMIO_MASK_WRITEOP;
1274 if (!_host->native_hotplug)
1275 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1277 _host->sdcard_irq_mask &= ~irq_mask;
1279 _host->sdio_irq_enabled = false;
1280 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
1281 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1282 sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
1283 sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0001);
1286 spin_lock_init(&_host->lock);
1287 mutex_init(&_host->ios_lock);
1289 /* Init delayed work for request timeouts */
1290 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1291 INIT_WORK(&_host->done, tmio_mmc_done_work);
1293 /* See if we also get DMA */
1294 _host->dma_ops = dma_ops;
1295 tmio_mmc_request_dma(_host, pdata);
1297 pm_runtime_set_active(&pdev->dev);
1298 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1299 pm_runtime_use_autosuspend(&pdev->dev);
1300 pm_runtime_enable(&pdev->dev);
1302 ret = mmc_add_host(mmc);
1304 tmio_mmc_host_remove(_host);
1308 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1310 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1311 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1313 tmio_mmc_host_remove(_host);
1316 mmc_gpiod_request_cd_irq(mmc);
1321 EXPORT_SYMBOL(tmio_mmc_host_probe);
1323 void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1325 struct platform_device *pdev = host->pdev;
1326 struct mmc_host *mmc = host->mmc;
1328 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1329 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1331 if (!host->native_hotplug)
1332 pm_runtime_get_sync(&pdev->dev);
1334 dev_pm_qos_hide_latency_limit(&pdev->dev);
1336 mmc_remove_host(mmc);
1337 cancel_work_sync(&host->done);
1338 cancel_delayed_work_sync(&host->delayed_reset_work);
1339 tmio_mmc_release_dma(host);
1341 pm_runtime_put_sync(&pdev->dev);
1342 pm_runtime_disable(&pdev->dev);
1344 tmio_mmc_clk_disable(host);
1346 EXPORT_SYMBOL(tmio_mmc_host_remove);
1349 int tmio_mmc_host_runtime_suspend(struct device *dev)
1351 struct mmc_host *mmc = dev_get_drvdata(dev);
1352 struct tmio_mmc_host *host = mmc_priv(mmc);
1354 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1356 if (host->clk_cache)
1357 tmio_mmc_clk_stop(host);
1359 tmio_mmc_clk_disable(host);
1363 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1365 static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1367 return host->tap_num && mmc_can_retune(host->mmc);
1370 int tmio_mmc_host_runtime_resume(struct device *dev)
1372 struct mmc_host *mmc = dev_get_drvdata(dev);
1373 struct tmio_mmc_host *host = mmc_priv(mmc);
1375 tmio_mmc_reset(host);
1376 tmio_mmc_clk_enable(host);
1378 if (host->clk_cache)
1379 tmio_mmc_set_clock(host, host->clk_cache);
1381 tmio_mmc_enable_dma(host, true);
1383 if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1384 dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1388 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1391 MODULE_LICENSE("GPL v2");