2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
64 struct idmac_desc_64addr {
65 u32 des0; /* Control Descriptor */
67 u32 des1; /* Reserved */
69 u32 des2; /*Buffer sizes */
70 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
71 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
72 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
74 u32 des3; /* Reserved */
76 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
77 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
79 u32 des6; /* Lower 32-bits of Next Descriptor Address */
80 u32 des7; /* Upper 32-bits of Next Descriptor Address */
84 __le32 des0; /* Control Descriptor */
85 #define IDMAC_DES0_DIC BIT(1)
86 #define IDMAC_DES0_LD BIT(2)
87 #define IDMAC_DES0_FD BIT(3)
88 #define IDMAC_DES0_CH BIT(4)
89 #define IDMAC_DES0_ER BIT(5)
90 #define IDMAC_DES0_CES BIT(30)
91 #define IDMAC_DES0_OWN BIT(31)
93 __le32 des1; /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
97 __le32 des2; /* buffer 1 physical address */
99 __le32 des3; /* buffer 2 physical address */
102 /* Each descriptor can transfer up to 4KB of data in chained mode */
103 #define DW_MCI_DESC_DATA_LENGTH 0x1000
105 static bool dw_mci_reset(struct dw_mci *host);
106 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
107 static int dw_mci_card_busy(struct mmc_host *mmc);
109 #if defined(CONFIG_DEBUG_FS)
110 static int dw_mci_req_show(struct seq_file *s, void *v)
112 struct dw_mci_slot *slot = s->private;
113 struct mmc_request *mrq;
114 struct mmc_command *cmd;
115 struct mmc_command *stop;
116 struct mmc_data *data;
118 /* Make sure we get a consistent snapshot */
119 spin_lock_bh(&slot->host->lock);
129 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
130 cmd->opcode, cmd->arg, cmd->flags,
131 cmd->resp[0], cmd->resp[1], cmd->resp[2],
132 cmd->resp[2], cmd->error);
134 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
135 data->bytes_xfered, data->blocks,
136 data->blksz, data->flags, data->error);
139 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
140 stop->opcode, stop->arg, stop->flags,
141 stop->resp[0], stop->resp[1], stop->resp[2],
142 stop->resp[2], stop->error);
145 spin_unlock_bh(&slot->host->lock);
150 static int dw_mci_req_open(struct inode *inode, struct file *file)
152 return single_open(file, dw_mci_req_show, inode->i_private);
155 static const struct file_operations dw_mci_req_fops = {
156 .owner = THIS_MODULE,
157 .open = dw_mci_req_open,
160 .release = single_release,
163 static int dw_mci_regs_show(struct seq_file *s, void *v)
165 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
166 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
167 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
168 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
169 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
170 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
175 static int dw_mci_regs_open(struct inode *inode, struct file *file)
177 return single_open(file, dw_mci_regs_show, inode->i_private);
180 static const struct file_operations dw_mci_regs_fops = {
181 .owner = THIS_MODULE,
182 .open = dw_mci_regs_open,
185 .release = single_release,
188 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
190 struct mmc_host *mmc = slot->mmc;
191 struct dw_mci *host = slot->host;
195 root = mmc->debugfs_root;
199 node = debugfs_create_file("regs", S_IRUSR, root, host,
204 node = debugfs_create_file("req", S_IRUSR, root, slot,
209 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
213 node = debugfs_create_x32("pending_events", S_IRUSR, root,
214 (u32 *)&host->pending_events);
218 node = debugfs_create_x32("completed_events", S_IRUSR, root,
219 (u32 *)&host->completed_events);
226 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
228 #endif /* defined(CONFIG_DEBUG_FS) */
230 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
232 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
234 struct mmc_data *data;
235 struct dw_mci_slot *slot = mmc_priv(mmc);
236 struct dw_mci *host = slot->host;
239 cmd->error = -EINPROGRESS;
242 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
243 cmd->opcode == MMC_GO_IDLE_STATE ||
244 cmd->opcode == MMC_GO_INACTIVE_STATE ||
245 (cmd->opcode == SD_IO_RW_DIRECT &&
246 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
247 cmdr |= SDMMC_CMD_STOP;
248 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
249 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
251 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
254 /* Special bit makes CMD11 not die */
255 cmdr |= SDMMC_CMD_VOLT_SWITCH;
257 /* Change state to continue to handle CMD11 weirdness */
258 WARN_ON(slot->host->state != STATE_SENDING_CMD);
259 slot->host->state = STATE_SENDING_CMD11;
262 * We need to disable low power mode (automatic clock stop)
263 * while doing voltage switch so we don't confuse the card,
264 * since stopping the clock is a specific part of the UHS
265 * voltage change dance.
267 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
268 * unconditionally turned back on in dw_mci_setup_bus() if it's
269 * ever called with a non-zero clock. That shouldn't happen
270 * until the voltage change is all done.
272 clk_en_a = mci_readl(host, CLKENA);
273 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
274 mci_writel(host, CLKENA, clk_en_a);
275 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
276 SDMMC_CMD_PRV_DAT_WAIT, 0);
279 if (cmd->flags & MMC_RSP_PRESENT) {
280 /* We expect a response, so set this bit */
281 cmdr |= SDMMC_CMD_RESP_EXP;
282 if (cmd->flags & MMC_RSP_136)
283 cmdr |= SDMMC_CMD_RESP_LONG;
286 if (cmd->flags & MMC_RSP_CRC)
287 cmdr |= SDMMC_CMD_RESP_CRC;
291 cmdr |= SDMMC_CMD_DAT_EXP;
292 if (data->flags & MMC_DATA_WRITE)
293 cmdr |= SDMMC_CMD_DAT_WR;
296 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
297 cmdr |= SDMMC_CMD_USE_HOLD_REG;
302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
304 struct mmc_command *stop;
310 stop = &host->stop_abort;
312 memset(stop, 0, sizeof(struct mmc_command));
314 if (cmdr == MMC_READ_SINGLE_BLOCK ||
315 cmdr == MMC_READ_MULTIPLE_BLOCK ||
316 cmdr == MMC_WRITE_BLOCK ||
317 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
318 cmdr == MMC_SEND_TUNING_BLOCK ||
319 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
320 stop->opcode = MMC_STOP_TRANSMISSION;
322 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
323 } else if (cmdr == SD_IO_RW_EXTENDED) {
324 stop->opcode = SD_IO_RW_DIRECT;
325 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
326 ((cmd->arg >> 28) & 0x7);
327 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
332 cmdr = stop->opcode | SDMMC_CMD_STOP |
333 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
338 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
340 unsigned long timeout = jiffies + msecs_to_jiffies(500);
343 * Databook says that before issuing a new data transfer command
344 * we need to check to see if the card is busy. Data transfer commands
345 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
347 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
350 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
351 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
352 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
353 if (time_after(jiffies, timeout)) {
354 /* Command will fail; we'll pass error then */
355 dev_err(host->dev, "Busy; trying anyway\n");
363 static void dw_mci_start_command(struct dw_mci *host,
364 struct mmc_command *cmd, u32 cmd_flags)
368 "start command: ARGR=0x%08x CMDR=0x%08x\n",
369 cmd->arg, cmd_flags);
371 mci_writel(host, CMDARG, cmd->arg);
372 wmb(); /* drain writebuffer */
373 dw_mci_wait_while_busy(host, cmd_flags);
375 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
378 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
380 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
382 dw_mci_start_command(host, stop, host->stop_cmdr);
385 /* DMA interface functions */
386 static void dw_mci_stop_dma(struct dw_mci *host)
388 if (host->using_dma) {
389 host->dma_ops->stop(host);
390 host->dma_ops->cleanup(host);
393 /* Data transfer was stopped by the interrupt handler */
394 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
397 static int dw_mci_get_dma_dir(struct mmc_data *data)
399 if (data->flags & MMC_DATA_WRITE)
400 return DMA_TO_DEVICE;
402 return DMA_FROM_DEVICE;
405 static void dw_mci_dma_cleanup(struct dw_mci *host)
407 struct mmc_data *data = host->data;
410 if (!data->host_cookie)
411 dma_unmap_sg(host->dev,
414 dw_mci_get_dma_dir(data));
417 static void dw_mci_idmac_reset(struct dw_mci *host)
419 u32 bmod = mci_readl(host, BMOD);
420 /* Software reset of DMA */
421 bmod |= SDMMC_IDMAC_SWRESET;
422 mci_writel(host, BMOD, bmod);
425 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
429 /* Disable and reset the IDMAC interface */
430 temp = mci_readl(host, CTRL);
431 temp &= ~SDMMC_CTRL_USE_IDMAC;
432 temp |= SDMMC_CTRL_DMA_RESET;
433 mci_writel(host, CTRL, temp);
435 /* Stop the IDMAC running */
436 temp = mci_readl(host, BMOD);
437 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
438 temp |= SDMMC_IDMAC_SWRESET;
439 mci_writel(host, BMOD, temp);
442 static void dw_mci_dmac_complete_dma(void *arg)
444 struct dw_mci *host = arg;
445 struct mmc_data *data = host->data;
447 dev_vdbg(host->dev, "DMA complete\n");
449 if ((host->use_dma == TRANS_MODE_EDMAC) &&
450 data && (data->flags & MMC_DATA_READ))
451 /* Invalidate cache after read */
452 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
457 host->dma_ops->cleanup(host);
460 * If the card was removed, data will be NULL. No point in trying to
461 * send the stop command or waiting for NBUSY in this case.
464 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
465 tasklet_schedule(&host->tasklet);
469 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
472 unsigned int desc_len;
475 if (host->dma_64bit_address == 1) {
476 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
478 desc_first = desc_last = desc = host->sg_cpu;
480 for (i = 0; i < sg_len; i++) {
481 unsigned int length = sg_dma_len(&data->sg[i]);
483 u64 mem_addr = sg_dma_address(&data->sg[i]);
485 for ( ; length ; desc++) {
486 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
487 length : DW_MCI_DESC_DATA_LENGTH;
492 * Set the OWN bit and disable interrupts
493 * for this descriptor
495 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
499 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
501 /* Physical address to DMA to/from */
502 desc->des4 = mem_addr & 0xffffffff;
503 desc->des5 = mem_addr >> 32;
505 /* Update physical address for the next desc */
506 mem_addr += desc_len;
508 /* Save pointer to the last descriptor */
513 /* Set first descriptor */
514 desc_first->des0 |= IDMAC_DES0_FD;
516 /* Set last descriptor */
517 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
518 desc_last->des0 |= IDMAC_DES0_LD;
521 struct idmac_desc *desc_first, *desc_last, *desc;
523 desc_first = desc_last = desc = host->sg_cpu;
525 for (i = 0; i < sg_len; i++) {
526 unsigned int length = sg_dma_len(&data->sg[i]);
528 u32 mem_addr = sg_dma_address(&data->sg[i]);
530 for ( ; length ; desc++) {
531 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
532 length : DW_MCI_DESC_DATA_LENGTH;
537 * Set the OWN bit and disable interrupts
538 * for this descriptor
540 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
545 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
547 /* Physical address to DMA to/from */
548 desc->des2 = cpu_to_le32(mem_addr);
550 /* Update physical address for the next desc */
551 mem_addr += desc_len;
553 /* Save pointer to the last descriptor */
558 /* Set first descriptor */
559 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
561 /* Set last descriptor */
562 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
564 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
567 wmb(); /* drain writebuffer */
570 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
574 dw_mci_translate_sglist(host, host->data, sg_len);
576 /* Make sure to reset DMA in case we did PIO before this */
577 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
578 dw_mci_idmac_reset(host);
580 /* Select IDMAC interface */
581 temp = mci_readl(host, CTRL);
582 temp |= SDMMC_CTRL_USE_IDMAC;
583 mci_writel(host, CTRL, temp);
585 /* drain writebuffer */
588 /* Enable the IDMAC */
589 temp = mci_readl(host, BMOD);
590 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
591 mci_writel(host, BMOD, temp);
593 /* Start it running */
594 mci_writel(host, PLDMND, 1);
599 static int dw_mci_idmac_init(struct dw_mci *host)
603 if (host->dma_64bit_address == 1) {
604 struct idmac_desc_64addr *p;
605 /* Number of descriptors in the ring buffer */
606 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
608 /* Forward link the descriptor list */
609 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
611 p->des6 = (host->sg_dma +
612 (sizeof(struct idmac_desc_64addr) *
613 (i + 1))) & 0xffffffff;
615 p->des7 = (u64)(host->sg_dma +
616 (sizeof(struct idmac_desc_64addr) *
618 /* Initialize reserved and buffer size fields to "0" */
624 /* Set the last descriptor as the end-of-ring descriptor */
625 p->des6 = host->sg_dma & 0xffffffff;
626 p->des7 = (u64)host->sg_dma >> 32;
627 p->des0 = IDMAC_DES0_ER;
630 struct idmac_desc *p;
631 /* Number of descriptors in the ring buffer */
632 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
634 /* Forward link the descriptor list */
635 for (i = 0, p = host->sg_cpu;
636 i < host->ring_size - 1;
638 p->des3 = cpu_to_le32(host->sg_dma +
639 (sizeof(struct idmac_desc) * (i + 1)));
643 /* Set the last descriptor as the end-of-ring descriptor */
644 p->des3 = cpu_to_le32(host->sg_dma);
645 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
648 dw_mci_idmac_reset(host);
650 if (host->dma_64bit_address == 1) {
651 /* Mask out interrupts - get Tx & Rx complete only */
652 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
653 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
654 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
656 /* Set the descriptor base address */
657 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
658 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
661 /* Mask out interrupts - get Tx & Rx complete only */
662 mci_writel(host, IDSTS, IDMAC_INT_CLR);
663 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
664 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
666 /* Set the descriptor base address */
667 mci_writel(host, DBADDR, host->sg_dma);
673 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
674 .init = dw_mci_idmac_init,
675 .start = dw_mci_idmac_start_dma,
676 .stop = dw_mci_idmac_stop_dma,
677 .complete = dw_mci_dmac_complete_dma,
678 .cleanup = dw_mci_dma_cleanup,
681 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
683 dmaengine_terminate_async(host->dms->ch);
686 static int dw_mci_edmac_start_dma(struct dw_mci *host,
689 struct dma_slave_config cfg;
690 struct dma_async_tx_descriptor *desc = NULL;
691 struct scatterlist *sgl = host->data->sg;
692 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
693 u32 sg_elems = host->data->sg_len;
695 u32 fifo_offset = host->fifo_reg - host->regs;
698 /* Set external dma config: burst size, burst width */
699 cfg.dst_addr = host->phy_regs + fifo_offset;
700 cfg.src_addr = cfg.dst_addr;
701 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
702 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
704 /* Match burst msize with external dma config */
705 fifoth_val = mci_readl(host, FIFOTH);
706 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
707 cfg.src_maxburst = cfg.dst_maxburst;
709 if (host->data->flags & MMC_DATA_WRITE)
710 cfg.direction = DMA_MEM_TO_DEV;
712 cfg.direction = DMA_DEV_TO_MEM;
714 ret = dmaengine_slave_config(host->dms->ch, &cfg);
716 dev_err(host->dev, "Failed to config edmac.\n");
720 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
721 sg_len, cfg.direction,
722 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
724 dev_err(host->dev, "Can't prepare slave sg.\n");
728 /* Set dw_mci_dmac_complete_dma as callback */
729 desc->callback = dw_mci_dmac_complete_dma;
730 desc->callback_param = (void *)host;
731 dmaengine_submit(desc);
733 /* Flush cache before write */
734 if (host->data->flags & MMC_DATA_WRITE)
735 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
736 sg_elems, DMA_TO_DEVICE);
738 dma_async_issue_pending(host->dms->ch);
743 static int dw_mci_edmac_init(struct dw_mci *host)
745 /* Request external dma channel */
746 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
750 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
751 if (!host->dms->ch) {
752 dev_err(host->dev, "Failed to get external DMA channel.\n");
761 static void dw_mci_edmac_exit(struct dw_mci *host)
765 dma_release_channel(host->dms->ch);
766 host->dms->ch = NULL;
773 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
774 .init = dw_mci_edmac_init,
775 .exit = dw_mci_edmac_exit,
776 .start = dw_mci_edmac_start_dma,
777 .stop = dw_mci_edmac_stop_dma,
778 .complete = dw_mci_dmac_complete_dma,
779 .cleanup = dw_mci_dma_cleanup,
782 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
783 struct mmc_data *data,
786 struct scatterlist *sg;
787 unsigned int i, sg_len;
789 if (!next && data->host_cookie)
790 return data->host_cookie;
793 * We don't do DMA on "complex" transfers, i.e. with
794 * non-word-aligned buffers or lengths. Also, we don't bother
795 * with all the DMA setup overhead for short transfers.
797 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
803 for_each_sg(data->sg, sg, data->sg_len, i) {
804 if (sg->offset & 3 || sg->length & 3)
808 sg_len = dma_map_sg(host->dev,
811 dw_mci_get_dma_dir(data));
816 data->host_cookie = sg_len;
821 static void dw_mci_pre_req(struct mmc_host *mmc,
822 struct mmc_request *mrq,
825 struct dw_mci_slot *slot = mmc_priv(mmc);
826 struct mmc_data *data = mrq->data;
828 if (!slot->host->use_dma || !data)
831 if (data->host_cookie) {
832 data->host_cookie = 0;
836 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
837 data->host_cookie = 0;
840 static void dw_mci_post_req(struct mmc_host *mmc,
841 struct mmc_request *mrq,
844 struct dw_mci_slot *slot = mmc_priv(mmc);
845 struct mmc_data *data = mrq->data;
847 if (!slot->host->use_dma || !data)
850 if (data->host_cookie)
851 dma_unmap_sg(slot->host->dev,
854 dw_mci_get_dma_dir(data));
855 data->host_cookie = 0;
858 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
860 unsigned int blksz = data->blksz;
861 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
862 u32 fifo_width = 1 << host->data_shift;
863 u32 blksz_depth = blksz / fifo_width, fifoth_val;
864 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
865 int idx = ARRAY_SIZE(mszs) - 1;
867 /* pio should ship this scenario */
871 tx_wmark = (host->fifo_depth) / 2;
872 tx_wmark_invers = host->fifo_depth - tx_wmark;
876 * if blksz is not a multiple of the FIFO width
878 if (blksz % fifo_width) {
885 if (!((blksz_depth % mszs[idx]) ||
886 (tx_wmark_invers % mszs[idx]))) {
888 rx_wmark = mszs[idx] - 1;
893 * If idx is '0', it won't be tried
894 * Thus, initial values are uesed
897 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
898 mci_writel(host, FIFOTH, fifoth_val);
901 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
903 unsigned int blksz = data->blksz;
904 u32 blksz_depth, fifo_depth;
907 WARN_ON(!(data->flags & MMC_DATA_READ));
910 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
911 * in the FIFO region, so we really shouldn't access it).
913 if (host->verid < DW_MMC_240A)
916 if (host->timing != MMC_TIMING_MMC_HS200 &&
917 host->timing != MMC_TIMING_MMC_HS400 &&
918 host->timing != MMC_TIMING_UHS_SDR104)
921 blksz_depth = blksz / (1 << host->data_shift);
922 fifo_depth = host->fifo_depth;
924 if (blksz_depth > fifo_depth)
928 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
929 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
930 * Currently just choose blksz.
933 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
937 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
940 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
942 unsigned long irqflags;
948 /* If we don't have a channel, we can't do DMA */
952 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
954 host->dma_ops->stop(host);
960 if (host->use_dma == TRANS_MODE_IDMAC)
962 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
963 (unsigned long)host->sg_cpu,
964 (unsigned long)host->sg_dma,
968 * Decide the MSIZE and RX/TX Watermark.
969 * If current block size is same with previous size,
970 * no need to update fifoth.
972 if (host->prev_blksz != data->blksz)
973 dw_mci_adjust_fifoth(host, data);
975 /* Enable the DMA interface */
976 temp = mci_readl(host, CTRL);
977 temp |= SDMMC_CTRL_DMA_ENABLE;
978 mci_writel(host, CTRL, temp);
980 /* Disable RX/TX IRQs, let DMA handle it */
981 spin_lock_irqsave(&host->irq_lock, irqflags);
982 temp = mci_readl(host, INTMASK);
983 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
984 mci_writel(host, INTMASK, temp);
985 spin_unlock_irqrestore(&host->irq_lock, irqflags);
987 if (host->dma_ops->start(host, sg_len)) {
988 /* We can't do DMA */
989 dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
996 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
998 unsigned long irqflags;
999 int flags = SG_MITER_ATOMIC;
1002 data->error = -EINPROGRESS;
1004 WARN_ON(host->data);
1008 if (data->flags & MMC_DATA_READ) {
1009 host->dir_status = DW_MCI_RECV_STATUS;
1010 dw_mci_ctrl_rd_thld(host, data);
1012 host->dir_status = DW_MCI_SEND_STATUS;
1015 if (dw_mci_submit_data_dma(host, data)) {
1016 if (host->data->flags & MMC_DATA_READ)
1017 flags |= SG_MITER_TO_SG;
1019 flags |= SG_MITER_FROM_SG;
1021 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1022 host->sg = data->sg;
1023 host->part_buf_start = 0;
1024 host->part_buf_count = 0;
1026 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1028 spin_lock_irqsave(&host->irq_lock, irqflags);
1029 temp = mci_readl(host, INTMASK);
1030 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1031 mci_writel(host, INTMASK, temp);
1032 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1034 temp = mci_readl(host, CTRL);
1035 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1036 mci_writel(host, CTRL, temp);
1039 * Use the initial fifoth_val for PIO mode.
1040 * If next issued data may be transfered by DMA mode,
1041 * prev_blksz should be invalidated.
1043 mci_writel(host, FIFOTH, host->fifoth_val);
1044 host->prev_blksz = 0;
1047 * Keep the current block size.
1048 * It will be used to decide whether to update
1049 * fifoth register next time.
1051 host->prev_blksz = data->blksz;
1055 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1057 struct dw_mci *host = slot->host;
1058 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1059 unsigned int cmd_status = 0;
1061 mci_writel(host, CMDARG, arg);
1062 wmb(); /* drain writebuffer */
1063 dw_mci_wait_while_busy(host, cmd);
1064 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1066 while (time_before(jiffies, timeout)) {
1067 cmd_status = mci_readl(host, CMD);
1068 if (!(cmd_status & SDMMC_CMD_START))
1071 dev_err(&slot->mmc->class_dev,
1072 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1073 cmd, arg, cmd_status);
1076 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1078 struct dw_mci *host = slot->host;
1079 unsigned int clock = slot->clock;
1082 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1084 /* We must continue to set bit 28 in CMD until the change is complete */
1085 if (host->state == STATE_WAITING_CMD11_DONE)
1086 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1089 mci_writel(host, CLKENA, 0);
1090 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1091 } else if (clock != host->current_speed || force_clkinit) {
1092 div = host->bus_hz / clock;
1093 if (host->bus_hz % clock && host->bus_hz > clock)
1095 * move the + 1 after the divide to prevent
1096 * over-clocking the card.
1100 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1102 if ((clock << div) != slot->__clk_old || force_clkinit)
1103 dev_info(&slot->mmc->class_dev,
1104 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1105 slot->id, host->bus_hz, clock,
1106 div ? ((host->bus_hz / div) >> 1) :
1110 mci_writel(host, CLKENA, 0);
1111 mci_writel(host, CLKSRC, 0);
1114 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1116 /* set clock to desired speed */
1117 mci_writel(host, CLKDIV, div);
1120 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1122 /* enable clock; only low power if no SDIO */
1123 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1124 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1125 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1126 mci_writel(host, CLKENA, clk_en_a);
1129 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1131 /* keep the clock with reflecting clock dividor */
1132 slot->__clk_old = clock << div;
1135 host->current_speed = clock;
1137 /* Set the current slot bus width */
1138 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1141 static void __dw_mci_start_request(struct dw_mci *host,
1142 struct dw_mci_slot *slot,
1143 struct mmc_command *cmd)
1145 struct mmc_request *mrq;
1146 struct mmc_data *data;
1151 host->cur_slot = slot;
1154 host->pending_events = 0;
1155 host->completed_events = 0;
1156 host->cmd_status = 0;
1157 host->data_status = 0;
1158 host->dir_status = 0;
1162 mci_writel(host, TMOUT, 0xFFFFFFFF);
1163 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1164 mci_writel(host, BLKSIZ, data->blksz);
1167 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1169 /* this is the first command, send the initialization clock */
1170 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1171 cmdflags |= SDMMC_CMD_INIT;
1174 dw_mci_submit_data(host, data);
1175 wmb(); /* drain writebuffer */
1178 dw_mci_start_command(host, cmd, cmdflags);
1180 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1181 unsigned long irqflags;
1184 * Databook says to fail after 2ms w/ no response, but evidence
1185 * shows that sometimes the cmd11 interrupt takes over 130ms.
1186 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1187 * is just about to roll over.
1189 * We do this whole thing under spinlock and only if the
1190 * command hasn't already completed (indicating the the irq
1191 * already ran so we don't want the timeout).
1193 spin_lock_irqsave(&host->irq_lock, irqflags);
1194 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1195 mod_timer(&host->cmd11_timer,
1196 jiffies + msecs_to_jiffies(500) + 1);
1197 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1201 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1203 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1206 static void dw_mci_start_request(struct dw_mci *host,
1207 struct dw_mci_slot *slot)
1209 struct mmc_request *mrq = slot->mrq;
1210 struct mmc_command *cmd;
1212 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1213 __dw_mci_start_request(host, slot, cmd);
1216 /* must be called with host->lock held */
1217 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1218 struct mmc_request *mrq)
1220 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1225 if (host->state == STATE_WAITING_CMD11_DONE) {
1226 dev_warn(&slot->mmc->class_dev,
1227 "Voltage change didn't complete\n");
1229 * this case isn't expected to happen, so we can
1230 * either crash here or just try to continue on
1231 * in the closest possible state
1233 host->state = STATE_IDLE;
1236 if (host->state == STATE_IDLE) {
1237 host->state = STATE_SENDING_CMD;
1238 dw_mci_start_request(host, slot);
1240 list_add_tail(&slot->queue_node, &host->queue);
1244 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1246 struct dw_mci_slot *slot = mmc_priv(mmc);
1247 struct dw_mci *host = slot->host;
1252 * The check for card presence and queueing of the request must be
1253 * atomic, otherwise the card could be removed in between and the
1254 * request wouldn't fail until another card was inserted.
1256 spin_lock_bh(&host->lock);
1258 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1259 spin_unlock_bh(&host->lock);
1260 mrq->cmd->error = -ENOMEDIUM;
1261 mmc_request_done(mmc, mrq);
1265 dw_mci_queue_request(host, slot, mrq);
1267 spin_unlock_bh(&host->lock);
1270 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1272 struct dw_mci_slot *slot = mmc_priv(mmc);
1273 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1277 switch (ios->bus_width) {
1278 case MMC_BUS_WIDTH_4:
1279 slot->ctype = SDMMC_CTYPE_4BIT;
1281 case MMC_BUS_WIDTH_8:
1282 slot->ctype = SDMMC_CTYPE_8BIT;
1285 /* set default 1 bit mode */
1286 slot->ctype = SDMMC_CTYPE_1BIT;
1289 regs = mci_readl(slot->host, UHS_REG);
1292 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1293 ios->timing == MMC_TIMING_UHS_DDR50 ||
1294 ios->timing == MMC_TIMING_MMC_HS400)
1295 regs |= ((0x1 << slot->id) << 16);
1297 regs &= ~((0x1 << slot->id) << 16);
1299 mci_writel(slot->host, UHS_REG, regs);
1300 slot->host->timing = ios->timing;
1303 * Use mirror of ios->clock to prevent race with mmc
1304 * core ios update when finding the minimum.
1306 slot->clock = ios->clock;
1308 if (drv_data && drv_data->set_ios)
1309 drv_data->set_ios(slot->host, ios);
1311 switch (ios->power_mode) {
1313 if (!IS_ERR(mmc->supply.vmmc)) {
1314 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1317 dev_err(slot->host->dev,
1318 "failed to enable vmmc regulator\n");
1319 /*return, if failed turn on vmmc*/
1323 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1324 regs = mci_readl(slot->host, PWREN);
1325 regs |= (1 << slot->id);
1326 mci_writel(slot->host, PWREN, regs);
1329 if (!slot->host->vqmmc_enabled) {
1330 if (!IS_ERR(mmc->supply.vqmmc)) {
1331 ret = regulator_enable(mmc->supply.vqmmc);
1333 dev_err(slot->host->dev,
1334 "failed to enable vqmmc\n");
1336 slot->host->vqmmc_enabled = true;
1339 /* Keep track so we don't reset again */
1340 slot->host->vqmmc_enabled = true;
1343 /* Reset our state machine after powering on */
1344 dw_mci_ctrl_reset(slot->host,
1345 SDMMC_CTRL_ALL_RESET_FLAGS);
1348 /* Adjust clock / bus width after power is up */
1349 dw_mci_setup_bus(slot, false);
1353 /* Turn clock off before power goes down */
1354 dw_mci_setup_bus(slot, false);
1356 if (!IS_ERR(mmc->supply.vmmc))
1357 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1359 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1360 regulator_disable(mmc->supply.vqmmc);
1361 slot->host->vqmmc_enabled = false;
1363 regs = mci_readl(slot->host, PWREN);
1364 regs &= ~(1 << slot->id);
1365 mci_writel(slot->host, PWREN, regs);
1371 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1372 slot->host->state = STATE_IDLE;
1375 static int dw_mci_card_busy(struct mmc_host *mmc)
1377 struct dw_mci_slot *slot = mmc_priv(mmc);
1381 * Check the busy bit which is low when DAT[3:0]
1382 * (the data lines) are 0000
1384 status = mci_readl(slot->host, STATUS);
1386 return !!(status & SDMMC_STATUS_BUSY);
1389 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1391 struct dw_mci_slot *slot = mmc_priv(mmc);
1392 struct dw_mci *host = slot->host;
1393 const struct dw_mci_drv_data *drv_data = host->drv_data;
1395 u32 v18 = SDMMC_UHS_18V << slot->id;
1398 if (drv_data && drv_data->switch_voltage)
1399 return drv_data->switch_voltage(mmc, ios);
1402 * Program the voltage. Note that some instances of dw_mmc may use
1403 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1404 * does no harm but you need to set the regulator directly. Try both.
1406 uhs = mci_readl(host, UHS_REG);
1407 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1412 if (!IS_ERR(mmc->supply.vqmmc)) {
1413 ret = mmc_regulator_set_vqmmc(mmc, ios);
1416 dev_dbg(&mmc->class_dev,
1417 "Regulator set error %d - %s V\n",
1418 ret, uhs & v18 ? "1.8" : "3.3");
1422 mci_writel(host, UHS_REG, uhs);
1427 static int dw_mci_get_ro(struct mmc_host *mmc)
1430 struct dw_mci_slot *slot = mmc_priv(mmc);
1431 int gpio_ro = mmc_gpio_get_ro(mmc);
1433 /* Use platform get_ro function, else try on board write protect */
1435 read_only = gpio_ro;
1438 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1440 dev_dbg(&mmc->class_dev, "card is %s\n",
1441 read_only ? "read-only" : "read-write");
1446 static int dw_mci_get_cd(struct mmc_host *mmc)
1449 struct dw_mci_slot *slot = mmc_priv(mmc);
1450 struct dw_mci *host = slot->host;
1451 int gpio_cd = mmc_gpio_get_cd(mmc);
1453 /* Use platform get_cd function, else try onboard card detect */
1454 if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc))
1456 else if (gpio_cd >= 0)
1459 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1462 spin_lock_bh(&host->lock);
1464 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1465 dev_dbg(&mmc->class_dev, "card is present\n");
1467 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1468 dev_dbg(&mmc->class_dev, "card is not present\n");
1470 spin_unlock_bh(&host->lock);
1475 static void dw_mci_hw_reset(struct mmc_host *mmc)
1477 struct dw_mci_slot *slot = mmc_priv(mmc);
1478 struct dw_mci *host = slot->host;
1481 if (host->use_dma == TRANS_MODE_IDMAC)
1482 dw_mci_idmac_reset(host);
1484 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1485 SDMMC_CTRL_FIFO_RESET))
1489 * According to eMMC spec, card reset procedure:
1490 * tRstW >= 1us: RST_n pulse width
1491 * tRSCA >= 200us: RST_n to Command time
1492 * tRSTH >= 1us: RST_n high period
1494 reset = mci_readl(host, RST_N);
1495 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1496 mci_writel(host, RST_N, reset);
1498 reset |= SDMMC_RST_HWACTIVE << slot->id;
1499 mci_writel(host, RST_N, reset);
1500 usleep_range(200, 300);
1503 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1505 struct dw_mci_slot *slot = mmc_priv(mmc);
1506 struct dw_mci *host = slot->host;
1509 * Low power mode will stop the card clock when idle. According to the
1510 * description of the CLKENA register we should disable low power mode
1511 * for SDIO cards if we need SDIO interrupts to work.
1513 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1514 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1518 clk_en_a_old = mci_readl(host, CLKENA);
1520 if (card->type == MMC_TYPE_SDIO ||
1521 card->type == MMC_TYPE_SD_COMBO) {
1522 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1523 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1525 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1526 clk_en_a = clk_en_a_old | clken_low_pwr;
1529 if (clk_en_a != clk_en_a_old) {
1530 mci_writel(host, CLKENA, clk_en_a);
1531 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1532 SDMMC_CMD_PRV_DAT_WAIT, 0);
1537 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1539 struct dw_mci_slot *slot = mmc_priv(mmc);
1540 struct dw_mci *host = slot->host;
1541 unsigned long irqflags;
1544 spin_lock_irqsave(&host->irq_lock, irqflags);
1546 /* Enable/disable Slot Specific SDIO interrupt */
1547 int_mask = mci_readl(host, INTMASK);
1549 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1551 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1552 mci_writel(host, INTMASK, int_mask);
1554 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1557 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1559 struct dw_mci_slot *slot = mmc_priv(mmc);
1560 struct dw_mci *host = slot->host;
1561 const struct dw_mci_drv_data *drv_data = host->drv_data;
1564 if (drv_data && drv_data->execute_tuning)
1565 err = drv_data->execute_tuning(slot, opcode);
1569 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1570 struct mmc_ios *ios)
1572 struct dw_mci_slot *slot = mmc_priv(mmc);
1573 struct dw_mci *host = slot->host;
1574 const struct dw_mci_drv_data *drv_data = host->drv_data;
1576 if (drv_data && drv_data->prepare_hs400_tuning)
1577 return drv_data->prepare_hs400_tuning(host, ios);
1582 static const struct mmc_host_ops dw_mci_ops = {
1583 .request = dw_mci_request,
1584 .pre_req = dw_mci_pre_req,
1585 .post_req = dw_mci_post_req,
1586 .set_ios = dw_mci_set_ios,
1587 .get_ro = dw_mci_get_ro,
1588 .get_cd = dw_mci_get_cd,
1589 .hw_reset = dw_mci_hw_reset,
1590 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1591 .execute_tuning = dw_mci_execute_tuning,
1592 .card_busy = dw_mci_card_busy,
1593 .start_signal_voltage_switch = dw_mci_switch_voltage,
1594 .init_card = dw_mci_init_card,
1595 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1598 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1599 __releases(&host->lock)
1600 __acquires(&host->lock)
1602 struct dw_mci_slot *slot;
1603 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1605 WARN_ON(host->cmd || host->data);
1607 host->cur_slot->mrq = NULL;
1609 if (!list_empty(&host->queue)) {
1610 slot = list_entry(host->queue.next,
1611 struct dw_mci_slot, queue_node);
1612 list_del(&slot->queue_node);
1613 dev_vdbg(host->dev, "list not empty: %s is next\n",
1614 mmc_hostname(slot->mmc));
1615 host->state = STATE_SENDING_CMD;
1616 dw_mci_start_request(host, slot);
1618 dev_vdbg(host->dev, "list empty\n");
1620 if (host->state == STATE_SENDING_CMD11)
1621 host->state = STATE_WAITING_CMD11_DONE;
1623 host->state = STATE_IDLE;
1626 spin_unlock(&host->lock);
1627 mmc_request_done(prev_mmc, mrq);
1628 spin_lock(&host->lock);
1631 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1633 u32 status = host->cmd_status;
1635 host->cmd_status = 0;
1637 /* Read the response from the card (up to 16 bytes) */
1638 if (cmd->flags & MMC_RSP_PRESENT) {
1639 if (cmd->flags & MMC_RSP_136) {
1640 cmd->resp[3] = mci_readl(host, RESP0);
1641 cmd->resp[2] = mci_readl(host, RESP1);
1642 cmd->resp[1] = mci_readl(host, RESP2);
1643 cmd->resp[0] = mci_readl(host, RESP3);
1645 cmd->resp[0] = mci_readl(host, RESP0);
1652 if (status & SDMMC_INT_RTO)
1653 cmd->error = -ETIMEDOUT;
1654 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1655 cmd->error = -EILSEQ;
1656 else if (status & SDMMC_INT_RESP_ERR)
1664 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1666 u32 status = host->data_status;
1668 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1669 if (status & SDMMC_INT_DRTO) {
1670 data->error = -ETIMEDOUT;
1671 } else if (status & SDMMC_INT_DCRC) {
1672 data->error = -EILSEQ;
1673 } else if (status & SDMMC_INT_EBE) {
1674 if (host->dir_status ==
1675 DW_MCI_SEND_STATUS) {
1677 * No data CRC status was returned.
1678 * The number of bytes transferred
1679 * will be exaggerated in PIO mode.
1681 data->bytes_xfered = 0;
1682 data->error = -ETIMEDOUT;
1683 } else if (host->dir_status ==
1684 DW_MCI_RECV_STATUS) {
1688 /* SDMMC_INT_SBE is included */
1692 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1695 * After an error, there may be data lingering
1700 data->bytes_xfered = data->blocks * data->blksz;
1707 static void dw_mci_set_drto(struct dw_mci *host)
1709 unsigned int drto_clks;
1710 unsigned int drto_ms;
1712 drto_clks = mci_readl(host, TMOUT) >> 8;
1713 drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
1715 /* add a bit spare time */
1718 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
1721 static void dw_mci_tasklet_func(unsigned long priv)
1723 struct dw_mci *host = (struct dw_mci *)priv;
1724 struct mmc_data *data;
1725 struct mmc_command *cmd;
1726 struct mmc_request *mrq;
1727 enum dw_mci_state state;
1728 enum dw_mci_state prev_state;
1731 spin_lock(&host->lock);
1733 state = host->state;
1742 case STATE_WAITING_CMD11_DONE:
1745 case STATE_SENDING_CMD11:
1746 case STATE_SENDING_CMD:
1747 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1748 &host->pending_events))
1753 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1754 err = dw_mci_command_complete(host, cmd);
1755 if (cmd == mrq->sbc && !err) {
1756 prev_state = state = STATE_SENDING_CMD;
1757 __dw_mci_start_request(host, host->cur_slot,
1762 if (cmd->data && err) {
1763 dw_mci_stop_dma(host);
1764 send_stop_abort(host, data);
1765 state = STATE_SENDING_STOP;
1769 if (!cmd->data || err) {
1770 dw_mci_request_end(host, mrq);
1774 prev_state = state = STATE_SENDING_DATA;
1777 case STATE_SENDING_DATA:
1779 * We could get a data error and never a transfer
1780 * complete so we'd better check for it here.
1782 * Note that we don't really care if we also got a
1783 * transfer complete; stopping the DMA and sending an
1786 if (test_and_clear_bit(EVENT_DATA_ERROR,
1787 &host->pending_events)) {
1788 dw_mci_stop_dma(host);
1790 !(host->data_status & (SDMMC_INT_DRTO |
1792 send_stop_abort(host, data);
1793 state = STATE_DATA_ERROR;
1797 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1798 &host->pending_events)) {
1800 * If all data-related interrupts don't come
1801 * within the given time in reading data state.
1803 if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
1804 (host->dir_status == DW_MCI_RECV_STATUS))
1805 dw_mci_set_drto(host);
1809 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1812 * Handle an EVENT_DATA_ERROR that might have shown up
1813 * before the transfer completed. This might not have
1814 * been caught by the check above because the interrupt
1815 * could have gone off between the previous check and
1816 * the check for transfer complete.
1818 * Technically this ought not be needed assuming we
1819 * get a DATA_COMPLETE eventually (we'll notice the
1820 * error and end the request), but it shouldn't hurt.
1822 * This has the advantage of sending the stop command.
1824 if (test_and_clear_bit(EVENT_DATA_ERROR,
1825 &host->pending_events)) {
1826 dw_mci_stop_dma(host);
1828 !(host->data_status & (SDMMC_INT_DRTO |
1830 send_stop_abort(host, data);
1831 state = STATE_DATA_ERROR;
1834 prev_state = state = STATE_DATA_BUSY;
1838 case STATE_DATA_BUSY:
1839 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1840 &host->pending_events)) {
1842 * If data error interrupt comes but data over
1843 * interrupt doesn't come within the given time.
1844 * in reading data state.
1846 if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
1847 (host->dir_status == DW_MCI_RECV_STATUS))
1848 dw_mci_set_drto(host);
1853 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1854 err = dw_mci_data_complete(host, data);
1857 if (!data->stop || mrq->sbc) {
1858 if (mrq->sbc && data->stop)
1859 data->stop->error = 0;
1860 dw_mci_request_end(host, mrq);
1864 /* stop command for open-ended transfer*/
1866 send_stop_abort(host, data);
1869 * If we don't have a command complete now we'll
1870 * never get one since we just reset everything;
1871 * better end the request.
1873 * If we do have a command complete we'll fall
1874 * through to the SENDING_STOP command and
1875 * everything will be peachy keen.
1877 if (!test_bit(EVENT_CMD_COMPLETE,
1878 &host->pending_events)) {
1880 dw_mci_request_end(host, mrq);
1886 * If err has non-zero,
1887 * stop-abort command has been already issued.
1889 prev_state = state = STATE_SENDING_STOP;
1893 case STATE_SENDING_STOP:
1894 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1895 &host->pending_events))
1898 /* CMD error in data command */
1899 if (mrq->cmd->error && mrq->data)
1906 dw_mci_command_complete(host, mrq->stop);
1908 host->cmd_status = 0;
1910 dw_mci_request_end(host, mrq);
1913 case STATE_DATA_ERROR:
1914 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1915 &host->pending_events))
1918 state = STATE_DATA_BUSY;
1921 } while (state != prev_state);
1923 host->state = state;
1925 spin_unlock(&host->lock);
1929 /* push final bytes to part_buf, only use during push */
1930 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1932 memcpy((void *)&host->part_buf, buf, cnt);
1933 host->part_buf_count = cnt;
1936 /* append bytes to part_buf, only use during push */
1937 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1939 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1940 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1941 host->part_buf_count += cnt;
1945 /* pull first bytes from part_buf, only use during pull */
1946 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1948 cnt = min_t(int, cnt, host->part_buf_count);
1950 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1952 host->part_buf_count -= cnt;
1953 host->part_buf_start += cnt;
1958 /* pull final bytes from the part_buf, assuming it's just been filled */
1959 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1961 memcpy(buf, &host->part_buf, cnt);
1962 host->part_buf_start = cnt;
1963 host->part_buf_count = (1 << host->data_shift) - cnt;
1966 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1968 struct mmc_data *data = host->data;
1971 /* try and push anything in the part_buf */
1972 if (unlikely(host->part_buf_count)) {
1973 int len = dw_mci_push_part_bytes(host, buf, cnt);
1977 if (host->part_buf_count == 2) {
1978 mci_fifo_writew(host->fifo_reg, host->part_buf16);
1979 host->part_buf_count = 0;
1982 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1983 if (unlikely((unsigned long)buf & 0x1)) {
1985 u16 aligned_buf[64];
1986 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1987 int items = len >> 1;
1989 /* memcpy from input buffer into aligned buffer */
1990 memcpy(aligned_buf, buf, len);
1993 /* push data from aligned buffer into fifo */
1994 for (i = 0; i < items; ++i)
1995 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2002 for (; cnt >= 2; cnt -= 2)
2003 mci_fifo_writew(host->fifo_reg, *pdata++);
2006 /* put anything remaining in the part_buf */
2008 dw_mci_set_part_bytes(host, buf, cnt);
2009 /* Push data if we have reached the expected data length */
2010 if ((data->bytes_xfered + init_cnt) ==
2011 (data->blksz * data->blocks))
2012 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2016 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2018 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2019 if (unlikely((unsigned long)buf & 0x1)) {
2021 /* pull data from fifo into aligned buffer */
2022 u16 aligned_buf[64];
2023 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2024 int items = len >> 1;
2027 for (i = 0; i < items; ++i)
2028 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2029 /* memcpy from aligned buffer into output buffer */
2030 memcpy(buf, aligned_buf, len);
2039 for (; cnt >= 2; cnt -= 2)
2040 *pdata++ = mci_fifo_readw(host->fifo_reg);
2044 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2045 dw_mci_pull_final_bytes(host, buf, cnt);
2049 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2051 struct mmc_data *data = host->data;
2054 /* try and push anything in the part_buf */
2055 if (unlikely(host->part_buf_count)) {
2056 int len = dw_mci_push_part_bytes(host, buf, cnt);
2060 if (host->part_buf_count == 4) {
2061 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2062 host->part_buf_count = 0;
2065 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2066 if (unlikely((unsigned long)buf & 0x3)) {
2068 u32 aligned_buf[32];
2069 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2070 int items = len >> 2;
2072 /* memcpy from input buffer into aligned buffer */
2073 memcpy(aligned_buf, buf, len);
2076 /* push data from aligned buffer into fifo */
2077 for (i = 0; i < items; ++i)
2078 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2085 for (; cnt >= 4; cnt -= 4)
2086 mci_fifo_writel(host->fifo_reg, *pdata++);
2089 /* put anything remaining in the part_buf */
2091 dw_mci_set_part_bytes(host, buf, cnt);
2092 /* Push data if we have reached the expected data length */
2093 if ((data->bytes_xfered + init_cnt) ==
2094 (data->blksz * data->blocks))
2095 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2099 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2101 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2102 if (unlikely((unsigned long)buf & 0x3)) {
2104 /* pull data from fifo into aligned buffer */
2105 u32 aligned_buf[32];
2106 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2107 int items = len >> 2;
2110 for (i = 0; i < items; ++i)
2111 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2112 /* memcpy from aligned buffer into output buffer */
2113 memcpy(buf, aligned_buf, len);
2122 for (; cnt >= 4; cnt -= 4)
2123 *pdata++ = mci_fifo_readl(host->fifo_reg);
2127 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2128 dw_mci_pull_final_bytes(host, buf, cnt);
2132 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2134 struct mmc_data *data = host->data;
2137 /* try and push anything in the part_buf */
2138 if (unlikely(host->part_buf_count)) {
2139 int len = dw_mci_push_part_bytes(host, buf, cnt);
2144 if (host->part_buf_count == 8) {
2145 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2146 host->part_buf_count = 0;
2149 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2150 if (unlikely((unsigned long)buf & 0x7)) {
2152 u64 aligned_buf[16];
2153 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2154 int items = len >> 3;
2156 /* memcpy from input buffer into aligned buffer */
2157 memcpy(aligned_buf, buf, len);
2160 /* push data from aligned buffer into fifo */
2161 for (i = 0; i < items; ++i)
2162 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2169 for (; cnt >= 8; cnt -= 8)
2170 mci_fifo_writeq(host->fifo_reg, *pdata++);
2173 /* put anything remaining in the part_buf */
2175 dw_mci_set_part_bytes(host, buf, cnt);
2176 /* Push data if we have reached the expected data length */
2177 if ((data->bytes_xfered + init_cnt) ==
2178 (data->blksz * data->blocks))
2179 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2183 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 if (unlikely((unsigned long)buf & 0x7)) {
2188 /* pull data from fifo into aligned buffer */
2189 u64 aligned_buf[16];
2190 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2191 int items = len >> 3;
2194 for (i = 0; i < items; ++i)
2195 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2197 /* memcpy from aligned buffer into output buffer */
2198 memcpy(buf, aligned_buf, len);
2207 for (; cnt >= 8; cnt -= 8)
2208 *pdata++ = mci_fifo_readq(host->fifo_reg);
2212 host->part_buf = mci_fifo_readq(host->fifo_reg);
2213 dw_mci_pull_final_bytes(host, buf, cnt);
2217 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2221 /* get remaining partial bytes */
2222 len = dw_mci_pull_part_bytes(host, buf, cnt);
2223 if (unlikely(len == cnt))
2228 /* get the rest of the data */
2229 host->pull_data(host, buf, cnt);
2232 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2234 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2236 unsigned int offset;
2237 struct mmc_data *data = host->data;
2238 int shift = host->data_shift;
2241 unsigned int remain, fcnt;
2244 if (!sg_miter_next(sg_miter))
2247 host->sg = sg_miter->piter.sg;
2248 buf = sg_miter->addr;
2249 remain = sg_miter->length;
2253 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2254 << shift) + host->part_buf_count;
2255 len = min(remain, fcnt);
2258 dw_mci_pull_data(host, (void *)(buf + offset), len);
2259 data->bytes_xfered += len;
2264 sg_miter->consumed = offset;
2265 status = mci_readl(host, MINTSTS);
2266 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2267 /* if the RXDR is ready read again */
2268 } while ((status & SDMMC_INT_RXDR) ||
2269 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2272 if (!sg_miter_next(sg_miter))
2274 sg_miter->consumed = 0;
2276 sg_miter_stop(sg_miter);
2280 sg_miter_stop(sg_miter);
2282 smp_wmb(); /* drain writebuffer */
2283 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2286 static void dw_mci_write_data_pio(struct dw_mci *host)
2288 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2290 unsigned int offset;
2291 struct mmc_data *data = host->data;
2292 int shift = host->data_shift;
2295 unsigned int fifo_depth = host->fifo_depth;
2296 unsigned int remain, fcnt;
2299 if (!sg_miter_next(sg_miter))
2302 host->sg = sg_miter->piter.sg;
2303 buf = sg_miter->addr;
2304 remain = sg_miter->length;
2308 fcnt = ((fifo_depth -
2309 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2310 << shift) - host->part_buf_count;
2311 len = min(remain, fcnt);
2314 host->push_data(host, (void *)(buf + offset), len);
2315 data->bytes_xfered += len;
2320 sg_miter->consumed = offset;
2321 status = mci_readl(host, MINTSTS);
2322 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2323 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2326 if (!sg_miter_next(sg_miter))
2328 sg_miter->consumed = 0;
2330 sg_miter_stop(sg_miter);
2334 sg_miter_stop(sg_miter);
2336 smp_wmb(); /* drain writebuffer */
2337 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2340 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2342 if (!host->cmd_status)
2343 host->cmd_status = status;
2345 smp_wmb(); /* drain writebuffer */
2347 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2348 tasklet_schedule(&host->tasklet);
2351 static void dw_mci_handle_cd(struct dw_mci *host)
2355 for (i = 0; i < host->num_slots; i++) {
2356 struct dw_mci_slot *slot = host->slot[i];
2361 if (slot->mmc->ops->card_event)
2362 slot->mmc->ops->card_event(slot->mmc);
2363 mmc_detect_change(slot->mmc,
2364 msecs_to_jiffies(host->pdata->detect_delay_ms));
2368 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2370 struct dw_mci *host = dev_id;
2374 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2377 /* Check volt switch first, since it can look like an error */
2378 if ((host->state == STATE_SENDING_CMD11) &&
2379 (pending & SDMMC_INT_VOLT_SWITCH)) {
2380 unsigned long irqflags;
2382 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2383 pending &= ~SDMMC_INT_VOLT_SWITCH;
2386 * Hold the lock; we know cmd11_timer can't be kicked
2387 * off after the lock is released, so safe to delete.
2389 spin_lock_irqsave(&host->irq_lock, irqflags);
2390 dw_mci_cmd_interrupt(host, pending);
2391 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2393 del_timer(&host->cmd11_timer);
2396 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2397 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2398 host->cmd_status = pending;
2399 smp_wmb(); /* drain writebuffer */
2400 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2403 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2404 /* if there is an error report DATA_ERROR */
2405 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2406 host->data_status = pending;
2407 smp_wmb(); /* drain writebuffer */
2408 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2409 tasklet_schedule(&host->tasklet);
2412 if (pending & SDMMC_INT_DATA_OVER) {
2413 if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
2414 del_timer(&host->dto_timer);
2416 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2417 if (!host->data_status)
2418 host->data_status = pending;
2419 smp_wmb(); /* drain writebuffer */
2420 if (host->dir_status == DW_MCI_RECV_STATUS) {
2421 if (host->sg != NULL)
2422 dw_mci_read_data_pio(host, true);
2424 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2425 tasklet_schedule(&host->tasklet);
2428 if (pending & SDMMC_INT_RXDR) {
2429 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2430 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2431 dw_mci_read_data_pio(host, false);
2434 if (pending & SDMMC_INT_TXDR) {
2435 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2436 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2437 dw_mci_write_data_pio(host);
2440 if (pending & SDMMC_INT_CMD_DONE) {
2441 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2442 dw_mci_cmd_interrupt(host, pending);
2445 if (pending & SDMMC_INT_CD) {
2446 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2447 dw_mci_handle_cd(host);
2450 /* Handle SDIO Interrupts */
2451 for (i = 0; i < host->num_slots; i++) {
2452 struct dw_mci_slot *slot = host->slot[i];
2457 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2458 mci_writel(host, RINTSTS,
2459 SDMMC_INT_SDIO(slot->sdio_id));
2460 mmc_signal_sdio_irq(slot->mmc);
2466 if (host->use_dma != TRANS_MODE_IDMAC)
2469 /* Handle IDMA interrupts */
2470 if (host->dma_64bit_address == 1) {
2471 pending = mci_readl(host, IDSTS64);
2472 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2473 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2474 SDMMC_IDMAC_INT_RI);
2475 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2476 host->dma_ops->complete((void *)host);
2479 pending = mci_readl(host, IDSTS);
2480 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2481 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2482 SDMMC_IDMAC_INT_RI);
2483 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2484 host->dma_ops->complete((void *)host);
2492 /* given a slot, find out the device node representing that slot */
2493 static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot)
2495 struct device *dev = slot->mmc->parent;
2496 struct device_node *np;
2500 if (!dev || !dev->of_node)
2503 for_each_child_of_node(dev->of_node, np) {
2504 addr = of_get_property(np, "reg", &len);
2505 if (!addr || (len < sizeof(int)))
2507 if (be32_to_cpup(addr) == slot->id)
2513 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
2515 struct device_node *np = dw_mci_of_find_slot_node(slot);
2520 if (of_property_read_bool(np, "disable-wp")) {
2521 slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
2522 dev_warn(slot->mmc->parent,
2523 "Slot quirk 'disable-wp' is deprecated\n");
2526 #else /* CONFIG_OF */
2527 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
2530 #endif /* CONFIG_OF */
2532 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2534 struct mmc_host *mmc;
2535 struct dw_mci_slot *slot;
2536 const struct dw_mci_drv_data *drv_data = host->drv_data;
2540 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2544 slot = mmc_priv(mmc);
2546 slot->sdio_id = host->sdio_id0 + id;
2549 host->slot[id] = slot;
2551 mmc->ops = &dw_mci_ops;
2552 if (of_property_read_u32_array(host->dev->of_node,
2553 "clock-freq-min-max", freq, 2)) {
2554 mmc->f_min = DW_MCI_FREQ_MIN;
2555 mmc->f_max = DW_MCI_FREQ_MAX;
2557 mmc->f_min = freq[0];
2558 mmc->f_max = freq[1];
2561 /*if there are external regulators, get them*/
2562 ret = mmc_regulator_get_supply(mmc);
2563 if (ret == -EPROBE_DEFER)
2564 goto err_host_allocated;
2566 if (!mmc->ocr_avail)
2567 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2569 if (host->pdata->caps)
2570 mmc->caps = host->pdata->caps;
2572 if (host->pdata->pm_caps)
2573 mmc->pm_caps = host->pdata->pm_caps;
2575 if (host->dev->of_node) {
2576 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2580 ctrl_id = to_platform_device(host->dev)->id;
2582 if (drv_data && drv_data->caps)
2583 mmc->caps |= drv_data->caps[ctrl_id];
2585 if (host->pdata->caps2)
2586 mmc->caps2 = host->pdata->caps2;
2588 dw_mci_slot_of_parse(slot);
2590 ret = mmc_of_parse(mmc);
2592 goto err_host_allocated;
2594 /* Useful defaults if platform data is unset. */
2595 if (host->use_dma == TRANS_MODE_IDMAC) {
2596 mmc->max_segs = host->ring_size;
2597 mmc->max_blk_size = 65535;
2598 mmc->max_seg_size = 0x1000;
2599 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2600 mmc->max_blk_count = mmc->max_req_size / 512;
2601 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2603 mmc->max_blk_size = 65535;
2604 mmc->max_blk_count = 65535;
2606 mmc->max_blk_size * mmc->max_blk_count;
2607 mmc->max_seg_size = mmc->max_req_size;
2609 /* TRANS_MODE_PIO */
2611 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2612 mmc->max_blk_count = 512;
2613 mmc->max_req_size = mmc->max_blk_size *
2615 mmc->max_seg_size = mmc->max_req_size;
2618 if (dw_mci_get_cd(mmc))
2619 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2621 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2623 ret = mmc_add_host(mmc);
2625 goto err_host_allocated;
2627 #if defined(CONFIG_DEBUG_FS)
2628 dw_mci_init_debugfs(slot);
2638 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2640 /* Debugfs stuff is cleaned up by mmc core */
2641 mmc_remove_host(slot->mmc);
2642 slot->host->slot[id] = NULL;
2643 mmc_free_host(slot->mmc);
2646 static void dw_mci_init_dma(struct dw_mci *host)
2649 struct device *dev = host->dev;
2650 struct device_node *np = dev->of_node;
2653 * Check tansfer mode from HCON[17:16]
2654 * Clear the ambiguous description of dw_mmc databook:
2655 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2656 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2657 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2658 * 2b'11: Non DW DMA Interface -> pio only
2659 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2660 * simpler request/acknowledge handshake mechanism and both of them
2661 * are regarded as external dma master for dw_mmc.
2663 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2664 if (host->use_dma == DMA_INTERFACE_IDMA) {
2665 host->use_dma = TRANS_MODE_IDMAC;
2666 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2667 host->use_dma == DMA_INTERFACE_GDMA) {
2668 host->use_dma = TRANS_MODE_EDMAC;
2673 /* Determine which DMA interface to use */
2674 if (host->use_dma == TRANS_MODE_IDMAC) {
2676 * Check ADDR_CONFIG bit in HCON to find
2677 * IDMAC address bus width
2679 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2681 if (addr_config == 1) {
2682 /* host supports IDMAC in 64-bit address mode */
2683 host->dma_64bit_address = 1;
2685 "IDMAC supports 64-bit address mode.\n");
2686 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2687 dma_set_coherent_mask(host->dev,
2690 /* host supports IDMAC in 32-bit address mode */
2691 host->dma_64bit_address = 0;
2693 "IDMAC supports 32-bit address mode.\n");
2696 /* Alloc memory for sg translation */
2697 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2698 &host->sg_dma, GFP_KERNEL);
2699 if (!host->sg_cpu) {
2701 "%s: could not alloc DMA memory\n",
2706 host->dma_ops = &dw_mci_idmac_ops;
2707 dev_info(host->dev, "Using internal DMA controller.\n");
2709 /* TRANS_MODE_EDMAC: check dma bindings again */
2710 if ((of_property_count_strings(np, "dma-names") < 0) ||
2711 (!of_find_property(np, "dmas", NULL))) {
2714 host->dma_ops = &dw_mci_edmac_ops;
2715 dev_info(host->dev, "Using external DMA controller.\n");
2718 if (host->dma_ops->init && host->dma_ops->start &&
2719 host->dma_ops->stop && host->dma_ops->cleanup) {
2720 if (host->dma_ops->init(host)) {
2721 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2726 dev_err(host->dev, "DMA initialization not found.\n");
2733 dev_info(host->dev, "Using PIO mode.\n");
2734 host->use_dma = TRANS_MODE_PIO;
2737 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2739 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2742 ctrl = mci_readl(host, CTRL);
2744 mci_writel(host, CTRL, ctrl);
2746 /* wait till resets clear */
2748 ctrl = mci_readl(host, CTRL);
2749 if (!(ctrl & reset))
2751 } while (time_before(jiffies, timeout));
2754 "Timeout resetting block (ctrl reset %#x)\n",
2760 static bool dw_mci_reset(struct dw_mci *host)
2762 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2766 * Reseting generates a block interrupt, hence setting
2767 * the scatter-gather pointer to NULL.
2770 sg_miter_stop(&host->sg_miter);
2775 flags |= SDMMC_CTRL_DMA_RESET;
2777 if (dw_mci_ctrl_reset(host, flags)) {
2779 * In all cases we clear the RAWINTS register to clear any
2782 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2784 /* if using dma we wait for dma_req to clear */
2785 if (host->use_dma) {
2786 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2790 status = mci_readl(host, STATUS);
2791 if (!(status & SDMMC_STATUS_DMA_REQ))
2794 } while (time_before(jiffies, timeout));
2796 if (status & SDMMC_STATUS_DMA_REQ) {
2798 "%s: Timeout waiting for dma_req to clear during reset\n",
2803 /* when using DMA next we reset the fifo again */
2804 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2808 /* if the controller reset bit did clear, then set clock regs */
2809 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2811 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
2817 if (host->use_dma == TRANS_MODE_IDMAC)
2818 /* It is also recommended that we reset and reprogram idmac */
2819 dw_mci_idmac_reset(host);
2824 /* After a CTRL reset we need to have CIU set clock registers */
2825 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2830 static void dw_mci_cmd11_timer(unsigned long arg)
2832 struct dw_mci *host = (struct dw_mci *)arg;
2834 if (host->state != STATE_SENDING_CMD11) {
2835 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2839 host->cmd_status = SDMMC_INT_RTO;
2840 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2841 tasklet_schedule(&host->tasklet);
2844 static void dw_mci_dto_timer(unsigned long arg)
2846 struct dw_mci *host = (struct dw_mci *)arg;
2848 switch (host->state) {
2849 case STATE_SENDING_DATA:
2850 case STATE_DATA_BUSY:
2852 * If DTO interrupt does NOT come in sending data state,
2853 * we should notify the driver to terminate current transfer
2854 * and report a data timeout to the core.
2856 host->data_status = SDMMC_INT_DRTO;
2857 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2858 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2859 tasklet_schedule(&host->tasklet);
2867 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2869 struct dw_mci_board *pdata;
2870 struct device *dev = host->dev;
2871 struct device_node *np = dev->of_node;
2872 const struct dw_mci_drv_data *drv_data = host->drv_data;
2874 u32 clock_frequency;
2876 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2878 return ERR_PTR(-ENOMEM);
2880 /* find out number of slots supported */
2881 of_property_read_u32(np, "num-slots", &pdata->num_slots);
2883 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2885 "fifo-depth property not found, using value of FIFOTH register as default\n");
2887 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2889 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2890 pdata->bus_hz = clock_frequency;
2892 if (drv_data && drv_data->parse_dt) {
2893 ret = drv_data->parse_dt(host);
2895 return ERR_PTR(ret);
2898 if (of_find_property(np, "supports-highspeed", NULL)) {
2899 dev_info(dev, "supports-highspeed property is deprecated.\n");
2900 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2906 #else /* CONFIG_OF */
2907 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2909 return ERR_PTR(-EINVAL);
2911 #endif /* CONFIG_OF */
2913 static void dw_mci_enable_cd(struct dw_mci *host)
2915 unsigned long irqflags;
2918 struct dw_mci_slot *slot;
2921 * No need for CD if all slots have a non-error GPIO
2922 * as well as broken card detection is found.
2924 for (i = 0; i < host->num_slots; i++) {
2925 slot = host->slot[i];
2926 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
2929 if (mmc_gpio_get_cd(slot->mmc) < 0)
2932 if (i == host->num_slots)
2935 spin_lock_irqsave(&host->irq_lock, irqflags);
2936 temp = mci_readl(host, INTMASK);
2937 temp |= SDMMC_INT_CD;
2938 mci_writel(host, INTMASK, temp);
2939 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2942 int dw_mci_probe(struct dw_mci *host)
2944 const struct dw_mci_drv_data *drv_data = host->drv_data;
2945 int width, i, ret = 0;
2950 host->pdata = dw_mci_parse_dt(host);
2951 if (IS_ERR(host->pdata)) {
2952 dev_err(host->dev, "platform data not available\n");
2957 host->biu_clk = devm_clk_get(host->dev, "biu");
2958 if (IS_ERR(host->biu_clk)) {
2959 dev_dbg(host->dev, "biu clock not available\n");
2961 ret = clk_prepare_enable(host->biu_clk);
2963 dev_err(host->dev, "failed to enable biu clock\n");
2968 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2969 if (IS_ERR(host->ciu_clk)) {
2970 dev_dbg(host->dev, "ciu clock not available\n");
2971 host->bus_hz = host->pdata->bus_hz;
2973 ret = clk_prepare_enable(host->ciu_clk);
2975 dev_err(host->dev, "failed to enable ciu clock\n");
2979 if (host->pdata->bus_hz) {
2980 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2983 "Unable to set bus rate to %uHz\n",
2984 host->pdata->bus_hz);
2986 host->bus_hz = clk_get_rate(host->ciu_clk);
2989 if (!host->bus_hz) {
2991 "Platform data must supply bus speed\n");
2996 if (drv_data && drv_data->init) {
2997 ret = drv_data->init(host);
3000 "implementation specific init failed\n");
3005 setup_timer(&host->cmd11_timer,
3006 dw_mci_cmd11_timer, (unsigned long)host);
3008 host->quirks = host->pdata->quirks;
3010 if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
3011 setup_timer(&host->dto_timer,
3012 dw_mci_dto_timer, (unsigned long)host);
3014 spin_lock_init(&host->lock);
3015 spin_lock_init(&host->irq_lock);
3016 INIT_LIST_HEAD(&host->queue);
3019 * Get the host data width - this assumes that HCON has been set with
3020 * the correct values.
3022 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3024 host->push_data = dw_mci_push_data16;
3025 host->pull_data = dw_mci_pull_data16;
3027 host->data_shift = 1;
3028 } else if (i == 2) {
3029 host->push_data = dw_mci_push_data64;
3030 host->pull_data = dw_mci_pull_data64;
3032 host->data_shift = 3;
3034 /* Check for a reserved value, and warn if it is */
3036 "HCON reports a reserved host data width!\n"
3037 "Defaulting to 32-bit access.\n");
3038 host->push_data = dw_mci_push_data32;
3039 host->pull_data = dw_mci_pull_data32;
3041 host->data_shift = 2;
3044 /* Reset all blocks */
3045 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3050 host->dma_ops = host->pdata->dma_ops;
3051 dw_mci_init_dma(host);
3053 /* Clear the interrupts for the host controller */
3054 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3055 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3057 /* Put in max timeout */
3058 mci_writel(host, TMOUT, 0xFFFFFFFF);
3061 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3062 * Tx Mark = fifo_size / 2 DMA Size = 8
3064 if (!host->pdata->fifo_depth) {
3066 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3067 * have been overwritten by the bootloader, just like we're
3068 * about to do, so if you know the value for your hardware, you
3069 * should put it in the platform data.
3071 fifo_size = mci_readl(host, FIFOTH);
3072 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3074 fifo_size = host->pdata->fifo_depth;
3076 host->fifo_depth = fifo_size;
3078 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3079 mci_writel(host, FIFOTH, host->fifoth_val);
3081 /* disable clock to CIU */
3082 mci_writel(host, CLKENA, 0);
3083 mci_writel(host, CLKSRC, 0);
3086 * In 2.40a spec, Data offset is changed.
3087 * Need to check the version-id and set data-offset for DATA register.
3089 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3090 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3092 if (host->verid < DW_MMC_240A)
3093 host->fifo_reg = host->regs + DATA_OFFSET;
3095 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3097 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3098 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3099 host->irq_flags, "dw-mci", host);
3103 if (host->pdata->num_slots)
3104 host->num_slots = host->pdata->num_slots;
3106 host->num_slots = 1;
3108 if (host->num_slots < 1 ||
3109 host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
3111 "Platform data must supply correct num_slots.\n");
3117 * Enable interrupts for command done, data over, data empty,
3118 * receive ready and error such as transmit, receive timeout, crc error
3120 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3121 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3122 DW_MCI_ERROR_FLAGS);
3123 /* Enable mci interrupt */
3124 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3127 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3128 host->irq, width, fifo_size);
3130 /* We need at least one slot to succeed */
3131 for (i = 0; i < host->num_slots; i++) {
3132 ret = dw_mci_init_slot(host, i);
3134 dev_dbg(host->dev, "slot %d init failed\n", i);
3140 dev_info(host->dev, "%d slots initialized\n", init_slots);
3143 "attempted to initialize %d slots, but failed on all\n",
3148 /* Now that slots are all setup, we can enable card detect */
3149 dw_mci_enable_cd(host);
3154 if (host->use_dma && host->dma_ops->exit)
3155 host->dma_ops->exit(host);
3158 if (!IS_ERR(host->ciu_clk))
3159 clk_disable_unprepare(host->ciu_clk);
3162 if (!IS_ERR(host->biu_clk))
3163 clk_disable_unprepare(host->biu_clk);
3167 EXPORT_SYMBOL(dw_mci_probe);
3169 void dw_mci_remove(struct dw_mci *host)
3173 for (i = 0; i < host->num_slots; i++) {
3174 dev_dbg(host->dev, "remove slot %d\n", i);
3176 dw_mci_cleanup_slot(host->slot[i], i);
3179 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3180 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3182 /* disable clock to CIU */
3183 mci_writel(host, CLKENA, 0);
3184 mci_writel(host, CLKSRC, 0);
3186 if (host->use_dma && host->dma_ops->exit)
3187 host->dma_ops->exit(host);
3189 if (!IS_ERR(host->ciu_clk))
3190 clk_disable_unprepare(host->ciu_clk);
3192 if (!IS_ERR(host->biu_clk))
3193 clk_disable_unprepare(host->biu_clk);
3195 EXPORT_SYMBOL(dw_mci_remove);
3199 #ifdef CONFIG_PM_SLEEP
3201 * TODO: we should probably disable the clock to the card in the suspend path.
3203 int dw_mci_suspend(struct dw_mci *host)
3205 if (host->use_dma && host->dma_ops->exit)
3206 host->dma_ops->exit(host);
3210 EXPORT_SYMBOL(dw_mci_suspend);
3212 int dw_mci_resume(struct dw_mci *host)
3216 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3221 if (host->use_dma && host->dma_ops->init)
3222 host->dma_ops->init(host);
3225 * Restore the initial value at FIFOTH register
3226 * And Invalidate the prev_blksz with zero
3228 mci_writel(host, FIFOTH, host->fifoth_val);
3229 host->prev_blksz = 0;
3231 /* Put in max timeout */
3232 mci_writel(host, TMOUT, 0xFFFFFFFF);
3234 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3235 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3236 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3237 DW_MCI_ERROR_FLAGS);
3238 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3240 for (i = 0; i < host->num_slots; i++) {
3241 struct dw_mci_slot *slot = host->slot[i];
3245 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3246 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3247 dw_mci_setup_bus(slot, true);
3251 /* Now that slots are all setup, we can enable card detect */
3252 dw_mci_enable_cd(host);
3256 EXPORT_SYMBOL(dw_mci_resume);
3257 #endif /* CONFIG_PM_SLEEP */
3259 static int __init dw_mci_init(void)
3261 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3265 static void __exit dw_mci_exit(void)
3269 module_init(dw_mci_init);
3270 module_exit(dw_mci_exit);
3272 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3273 MODULE_AUTHOR("NXP Semiconductor VietNam");
3274 MODULE_AUTHOR("Imagination Technologies Ltd");
3275 MODULE_LICENSE("GPL v2");