2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS 1
52 #define DW_MCI_RECV_STATUS 2
53 #define DW_MCI_DMA_THRESHOLD 16
55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
65 u32 des0; /* Control Descriptor */
66 #define IDMAC_DES0_DIC BIT(1)
67 #define IDMAC_DES0_LD BIT(2)
68 #define IDMAC_DES0_FD BIT(3)
69 #define IDMAC_DES0_CH BIT(4)
70 #define IDMAC_DES0_ER BIT(5)
71 #define IDMAC_DES0_CES BIT(30)
72 #define IDMAC_DES0_OWN BIT(31)
74 u32 des1; /* Buffer sizes */
75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
78 u32 des2; /* buffer 1 physical address */
80 u32 des3; /* buffer 2 physical address */
82 #endif /* CONFIG_MMC_DW_IDMAC */
84 static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
95 static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
114 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
117 #if defined(CONFIG_DEBUG_FS)
118 static int dw_mci_req_show(struct seq_file *s, void *v)
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
153 spin_unlock_bh(&slot->host->lock);
158 static int dw_mci_req_open(struct inode *inode, struct file *file)
160 return single_open(file, dw_mci_req_show, inode->i_private);
163 static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
168 .release = single_release,
171 static int dw_mci_regs_show(struct seq_file *s, void *v)
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
183 static int dw_mci_regs_open(struct inode *inode, struct file *file)
185 return single_open(file, dw_mci_regs_show, inode->i_private);
188 static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
193 .release = single_release,
196 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
203 root = mmc->debugfs_root;
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
236 #endif /* defined(CONFIG_DEBUG_FS) */
238 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
240 struct mmc_data *data;
241 struct dw_mci_slot *slot = mmc_priv(mmc);
242 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
244 cmd->error = -EINPROGRESS;
248 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
249 cmd->opcode == MMC_GO_IDLE_STATE ||
250 cmd->opcode == MMC_GO_INACTIVE_STATE ||
251 (cmd->opcode == SD_IO_RW_DIRECT &&
252 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
253 cmdr |= SDMMC_CMD_STOP;
254 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
255 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
257 if (cmd->flags & MMC_RSP_PRESENT) {
258 /* We expect a response, so set this bit */
259 cmdr |= SDMMC_CMD_RESP_EXP;
260 if (cmd->flags & MMC_RSP_136)
261 cmdr |= SDMMC_CMD_RESP_LONG;
264 if (cmd->flags & MMC_RSP_CRC)
265 cmdr |= SDMMC_CMD_RESP_CRC;
269 cmdr |= SDMMC_CMD_DAT_EXP;
270 if (data->flags & MMC_DATA_STREAM)
271 cmdr |= SDMMC_CMD_STRM_MODE;
272 if (data->flags & MMC_DATA_WRITE)
273 cmdr |= SDMMC_CMD_DAT_WR;
276 if (drv_data && drv_data->prepare_command)
277 drv_data->prepare_command(slot->host, &cmdr);
282 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
284 struct mmc_command *stop;
290 stop = &host->stop_abort;
292 memset(stop, 0, sizeof(struct mmc_command));
294 if (cmdr == MMC_READ_SINGLE_BLOCK ||
295 cmdr == MMC_READ_MULTIPLE_BLOCK ||
296 cmdr == MMC_WRITE_BLOCK ||
297 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
298 stop->opcode = MMC_STOP_TRANSMISSION;
300 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
301 } else if (cmdr == SD_IO_RW_EXTENDED) {
302 stop->opcode = SD_IO_RW_DIRECT;
303 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
304 ((cmd->arg >> 28) & 0x7);
305 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
310 cmdr = stop->opcode | SDMMC_CMD_STOP |
311 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
316 static void dw_mci_start_command(struct dw_mci *host,
317 struct mmc_command *cmd, u32 cmd_flags)
321 "start command: ARGR=0x%08x CMDR=0x%08x\n",
322 cmd->arg, cmd_flags);
324 mci_writel(host, CMDARG, cmd->arg);
327 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
330 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
332 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
333 dw_mci_start_command(host, stop, host->stop_cmdr);
336 /* DMA interface functions */
337 static void dw_mci_stop_dma(struct dw_mci *host)
339 if (host->using_dma) {
340 host->dma_ops->stop(host);
341 host->dma_ops->cleanup(host);
344 /* Data transfer was stopped by the interrupt handler */
345 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
348 static int dw_mci_get_dma_dir(struct mmc_data *data)
350 if (data->flags & MMC_DATA_WRITE)
351 return DMA_TO_DEVICE;
353 return DMA_FROM_DEVICE;
356 #ifdef CONFIG_MMC_DW_IDMAC
357 static void dw_mci_dma_cleanup(struct dw_mci *host)
359 struct mmc_data *data = host->data;
362 if (!data->host_cookie)
363 dma_unmap_sg(host->dev,
366 dw_mci_get_dma_dir(data));
369 static void dw_mci_idmac_reset(struct dw_mci *host)
371 u32 bmod = mci_readl(host, BMOD);
372 /* Software reset of DMA */
373 bmod |= SDMMC_IDMAC_SWRESET;
374 mci_writel(host, BMOD, bmod);
377 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
381 /* Disable and reset the IDMAC interface */
382 temp = mci_readl(host, CTRL);
383 temp &= ~SDMMC_CTRL_USE_IDMAC;
384 temp |= SDMMC_CTRL_DMA_RESET;
385 mci_writel(host, CTRL, temp);
387 /* Stop the IDMAC running */
388 temp = mci_readl(host, BMOD);
389 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
390 temp |= SDMMC_IDMAC_SWRESET;
391 mci_writel(host, BMOD, temp);
394 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
396 struct mmc_data *data = host->data;
398 dev_vdbg(host->dev, "DMA complete\n");
400 host->dma_ops->cleanup(host);
403 * If the card was removed, data will be NULL. No point in trying to
404 * send the stop command or waiting for NBUSY in this case.
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 tasklet_schedule(&host->tasklet);
412 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
416 struct idmac_desc *desc = host->sg_cpu;
418 for (i = 0; i < sg_len; i++, desc++) {
419 unsigned int length = sg_dma_len(&data->sg[i]);
420 u32 mem_addr = sg_dma_address(&data->sg[i]);
422 /* Set the OWN bit and disable interrupts for this descriptor */
423 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
426 IDMAC_SET_BUFFER1_SIZE(desc, length);
428 /* Physical address to DMA to/from */
429 desc->des2 = mem_addr;
432 /* Set first descriptor */
434 desc->des0 |= IDMAC_DES0_FD;
436 /* Set last descriptor */
437 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
438 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
439 desc->des0 |= IDMAC_DES0_LD;
444 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
448 dw_mci_translate_sglist(host, host->data, sg_len);
450 /* Select IDMAC interface */
451 temp = mci_readl(host, CTRL);
452 temp |= SDMMC_CTRL_USE_IDMAC;
453 mci_writel(host, CTRL, temp);
457 /* Enable the IDMAC */
458 temp = mci_readl(host, BMOD);
459 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
460 mci_writel(host, BMOD, temp);
462 /* Start it running */
463 mci_writel(host, PLDMND, 1);
466 static int dw_mci_idmac_init(struct dw_mci *host)
468 struct idmac_desc *p;
471 /* Number of descriptors in the ring buffer */
472 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
474 /* Forward link the descriptor list */
475 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
476 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
478 /* Set the last descriptor as the end-of-ring descriptor */
479 p->des3 = host->sg_dma;
480 p->des0 = IDMAC_DES0_ER;
482 dw_mci_idmac_reset(host);
484 /* Mask out interrupts - get Tx & Rx complete only */
485 mci_writel(host, IDSTS, IDMAC_INT_CLR);
486 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
489 /* Set the descriptor base address */
490 mci_writel(host, DBADDR, host->sg_dma);
494 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
495 .init = dw_mci_idmac_init,
496 .start = dw_mci_idmac_start_dma,
497 .stop = dw_mci_idmac_stop_dma,
498 .complete = dw_mci_idmac_complete_dma,
499 .cleanup = dw_mci_dma_cleanup,
501 #endif /* CONFIG_MMC_DW_IDMAC */
503 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
504 struct mmc_data *data,
507 struct scatterlist *sg;
508 unsigned int i, sg_len;
510 if (!next && data->host_cookie)
511 return data->host_cookie;
514 * We don't do DMA on "complex" transfers, i.e. with
515 * non-word-aligned buffers or lengths. Also, we don't bother
516 * with all the DMA setup overhead for short transfers.
518 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
524 for_each_sg(data->sg, sg, data->sg_len, i) {
525 if (sg->offset & 3 || sg->length & 3)
529 sg_len = dma_map_sg(host->dev,
532 dw_mci_get_dma_dir(data));
537 data->host_cookie = sg_len;
542 static void dw_mci_pre_req(struct mmc_host *mmc,
543 struct mmc_request *mrq,
546 struct dw_mci_slot *slot = mmc_priv(mmc);
547 struct mmc_data *data = mrq->data;
549 if (!slot->host->use_dma || !data)
552 if (data->host_cookie) {
553 data->host_cookie = 0;
557 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
558 data->host_cookie = 0;
561 static void dw_mci_post_req(struct mmc_host *mmc,
562 struct mmc_request *mrq,
565 struct dw_mci_slot *slot = mmc_priv(mmc);
566 struct mmc_data *data = mrq->data;
568 if (!slot->host->use_dma || !data)
571 if (data->host_cookie)
572 dma_unmap_sg(slot->host->dev,
575 dw_mci_get_dma_dir(data));
576 data->host_cookie = 0;
579 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
581 #ifdef CONFIG_MMC_DW_IDMAC
582 unsigned int blksz = data->blksz;
583 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
584 u32 fifo_width = 1 << host->data_shift;
585 u32 blksz_depth = blksz / fifo_width, fifoth_val;
586 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
587 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
589 tx_wmark = (host->fifo_depth) / 2;
590 tx_wmark_invers = host->fifo_depth - tx_wmark;
594 * if blksz is not a multiple of the FIFO width
596 if (blksz % fifo_width) {
603 if (!((blksz_depth % mszs[idx]) ||
604 (tx_wmark_invers % mszs[idx]))) {
606 rx_wmark = mszs[idx] - 1;
611 * If idx is '0', it won't be tried
612 * Thus, initial values are uesed
615 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
616 mci_writel(host, FIFOTH, fifoth_val);
620 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
622 unsigned int blksz = data->blksz;
623 u32 blksz_depth, fifo_depth;
626 WARN_ON(!(data->flags & MMC_DATA_READ));
628 if (host->timing != MMC_TIMING_MMC_HS200 &&
629 host->timing != MMC_TIMING_UHS_SDR104)
632 blksz_depth = blksz / (1 << host->data_shift);
633 fifo_depth = host->fifo_depth;
635 if (blksz_depth > fifo_depth)
639 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
640 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
641 * Currently just choose blksz.
644 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
648 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
651 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
658 /* If we don't have a channel, we can't do DMA */
662 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
664 host->dma_ops->stop(host);
671 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
672 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
676 * Decide the MSIZE and RX/TX Watermark.
677 * If current block size is same with previous size,
678 * no need to update fifoth.
680 if (host->prev_blksz != data->blksz)
681 dw_mci_adjust_fifoth(host, data);
683 /* Enable the DMA interface */
684 temp = mci_readl(host, CTRL);
685 temp |= SDMMC_CTRL_DMA_ENABLE;
686 mci_writel(host, CTRL, temp);
688 /* Disable RX/TX IRQs, let DMA handle it */
689 temp = mci_readl(host, INTMASK);
690 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
691 mci_writel(host, INTMASK, temp);
693 host->dma_ops->start(host, sg_len);
698 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
702 data->error = -EINPROGRESS;
708 if (data->flags & MMC_DATA_READ) {
709 host->dir_status = DW_MCI_RECV_STATUS;
710 dw_mci_ctrl_rd_thld(host, data);
712 host->dir_status = DW_MCI_SEND_STATUS;
715 if (dw_mci_submit_data_dma(host, data)) {
716 int flags = SG_MITER_ATOMIC;
717 if (host->data->flags & MMC_DATA_READ)
718 flags |= SG_MITER_TO_SG;
720 flags |= SG_MITER_FROM_SG;
722 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
724 host->part_buf_start = 0;
725 host->part_buf_count = 0;
727 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
728 temp = mci_readl(host, INTMASK);
729 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
730 mci_writel(host, INTMASK, temp);
732 temp = mci_readl(host, CTRL);
733 temp &= ~SDMMC_CTRL_DMA_ENABLE;
734 mci_writel(host, CTRL, temp);
737 * Use the initial fifoth_val for PIO mode.
738 * If next issued data may be transfered by DMA mode,
739 * prev_blksz should be invalidated.
741 mci_writel(host, FIFOTH, host->fifoth_val);
742 host->prev_blksz = 0;
745 * Keep the current block size.
746 * It will be used to decide whether to update
747 * fifoth register next time.
749 host->prev_blksz = data->blksz;
753 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
755 struct dw_mci *host = slot->host;
756 unsigned long timeout = jiffies + msecs_to_jiffies(500);
757 unsigned int cmd_status = 0;
759 mci_writel(host, CMDARG, arg);
761 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
763 while (time_before(jiffies, timeout)) {
764 cmd_status = mci_readl(host, CMD);
765 if (!(cmd_status & SDMMC_CMD_START))
768 dev_err(&slot->mmc->class_dev,
769 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
770 cmd, arg, cmd_status);
773 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
775 struct dw_mci *host = slot->host;
776 unsigned int clock = slot->clock;
781 mci_writel(host, CLKENA, 0);
783 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
784 } else if (clock != host->current_speed || force_clkinit) {
785 div = host->bus_hz / clock;
786 if (host->bus_hz % clock && host->bus_hz > clock)
788 * move the + 1 after the divide to prevent
789 * over-clocking the card.
793 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
795 if ((clock << div) != slot->__clk_old || force_clkinit)
796 dev_info(&slot->mmc->class_dev,
797 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
798 slot->id, host->bus_hz, clock,
799 div ? ((host->bus_hz / div) >> 1) :
803 mci_writel(host, CLKENA, 0);
804 mci_writel(host, CLKSRC, 0);
808 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
810 /* set clock to desired speed */
811 mci_writel(host, CLKDIV, div);
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
817 /* enable clock; only low power if no SDIO */
818 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
819 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
820 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
821 mci_writel(host, CLKENA, clk_en_a);
825 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
827 /* keep the clock with reflecting clock dividor */
828 slot->__clk_old = clock << div;
831 host->current_speed = clock;
833 /* Set the current slot bus width */
834 mci_writel(host, CTYPE, (slot->ctype << slot->id));
837 static void __dw_mci_start_request(struct dw_mci *host,
838 struct dw_mci_slot *slot,
839 struct mmc_command *cmd)
841 struct mmc_request *mrq;
842 struct mmc_data *data;
847 host->cur_slot = slot;
850 host->pending_events = 0;
851 host->completed_events = 0;
852 host->cmd_status = 0;
853 host->data_status = 0;
854 host->dir_status = 0;
858 mci_writel(host, TMOUT, 0xFFFFFFFF);
859 mci_writel(host, BYTCNT, data->blksz*data->blocks);
860 mci_writel(host, BLKSIZ, data->blksz);
863 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
865 /* this is the first command, send the initialization clock */
866 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
867 cmdflags |= SDMMC_CMD_INIT;
870 dw_mci_submit_data(host, data);
874 dw_mci_start_command(host, cmd, cmdflags);
877 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
879 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
882 static void dw_mci_start_request(struct dw_mci *host,
883 struct dw_mci_slot *slot)
885 struct mmc_request *mrq = slot->mrq;
886 struct mmc_command *cmd;
888 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
889 __dw_mci_start_request(host, slot, cmd);
892 /* must be called with host->lock held */
893 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
894 struct mmc_request *mrq)
896 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
901 if (host->state == STATE_IDLE) {
902 host->state = STATE_SENDING_CMD;
903 dw_mci_start_request(host, slot);
905 list_add_tail(&slot->queue_node, &host->queue);
909 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct dw_mci *host = slot->host;
917 * The check for card presence and queueing of the request must be
918 * atomic, otherwise the card could be removed in between and the
919 * request wouldn't fail until another card was inserted.
921 spin_lock_bh(&host->lock);
923 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
924 spin_unlock_bh(&host->lock);
925 mrq->cmd->error = -ENOMEDIUM;
926 mmc_request_done(mmc, mrq);
930 dw_mci_queue_request(host, slot, mrq);
932 spin_unlock_bh(&host->lock);
935 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
937 struct dw_mci_slot *slot = mmc_priv(mmc);
938 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
941 switch (ios->bus_width) {
942 case MMC_BUS_WIDTH_4:
943 slot->ctype = SDMMC_CTYPE_4BIT;
945 case MMC_BUS_WIDTH_8:
946 slot->ctype = SDMMC_CTYPE_8BIT;
949 /* set default 1 bit mode */
950 slot->ctype = SDMMC_CTYPE_1BIT;
953 regs = mci_readl(slot->host, UHS_REG);
956 if (ios->timing == MMC_TIMING_MMC_DDR52)
957 regs |= ((0x1 << slot->id) << 16);
959 regs &= ~((0x1 << slot->id) << 16);
961 mci_writel(slot->host, UHS_REG, regs);
962 slot->host->timing = ios->timing;
965 * Use mirror of ios->clock to prevent race with mmc
966 * core ios update when finding the minimum.
968 slot->clock = ios->clock;
970 if (drv_data && drv_data->set_ios)
971 drv_data->set_ios(slot->host, ios);
973 /* Slot specific timing and width adjustment */
974 dw_mci_setup_bus(slot, false);
976 switch (ios->power_mode) {
978 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
979 regs = mci_readl(slot->host, PWREN);
980 regs |= (1 << slot->id);
981 mci_writel(slot->host, PWREN, regs);
984 regs = mci_readl(slot->host, PWREN);
985 regs &= ~(1 << slot->id);
986 mci_writel(slot->host, PWREN, regs);
993 static int dw_mci_get_ro(struct mmc_host *mmc)
996 struct dw_mci_slot *slot = mmc_priv(mmc);
998 /* Use platform get_ro function, else try on board write protect */
999 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1001 else if (gpio_is_valid(slot->wp_gpio))
1002 read_only = gpio_get_value(slot->wp_gpio);
1005 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1007 dev_dbg(&mmc->class_dev, "card is %s\n",
1008 read_only ? "read-only" : "read-write");
1013 static int dw_mci_get_cd(struct mmc_host *mmc)
1016 struct dw_mci_slot *slot = mmc_priv(mmc);
1017 struct dw_mci_board *brd = slot->host->pdata;
1018 struct dw_mci *host = slot->host;
1019 int gpio_cd = mmc_gpio_get_cd(mmc);
1021 /* Use platform get_cd function, else try onboard card detect */
1022 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1024 else if (!IS_ERR_VALUE(gpio_cd))
1027 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1030 spin_lock_bh(&host->lock);
1032 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1033 dev_dbg(&mmc->class_dev, "card is present\n");
1035 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1036 dev_dbg(&mmc->class_dev, "card is not present\n");
1038 spin_unlock_bh(&host->lock);
1044 * Disable lower power mode.
1046 * Low power mode will stop the card clock when idle. According to the
1047 * description of the CLKENA register we should disable low power mode
1048 * for SDIO cards if we need SDIO interrupts to work.
1050 * This function is fast if low power mode is already disabled.
1052 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1054 struct dw_mci *host = slot->host;
1056 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1058 clk_en_a = mci_readl(host, CLKENA);
1060 if (clk_en_a & clken_low_pwr) {
1061 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1063 SDMMC_CMD_PRV_DAT_WAIT, 0);
1067 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1069 struct dw_mci_slot *slot = mmc_priv(mmc);
1070 struct dw_mci *host = slot->host;
1073 /* Enable/disable Slot Specific SDIO interrupt */
1074 int_mask = mci_readl(host, INTMASK);
1077 * Turn off low power mode if it was enabled. This is a bit of
1078 * a heavy operation and we disable / enable IRQs a lot, so
1079 * we'll leave low power mode disabled and it will get
1080 * re-enabled again in dw_mci_setup_bus().
1082 dw_mci_disable_low_power(slot);
1084 mci_writel(host, INTMASK,
1085 (int_mask | SDMMC_INT_SDIO(slot->id)));
1087 mci_writel(host, INTMASK,
1088 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1092 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1094 struct dw_mci_slot *slot = mmc_priv(mmc);
1095 struct dw_mci *host = slot->host;
1096 const struct dw_mci_drv_data *drv_data = host->drv_data;
1097 struct dw_mci_tuning_data tuning_data;
1100 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1101 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1102 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1103 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1104 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1105 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1106 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1110 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1111 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1112 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1115 "Undefined command(%d) for tuning\n", opcode);
1119 if (drv_data && drv_data->execute_tuning)
1120 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1124 static const struct mmc_host_ops dw_mci_ops = {
1125 .request = dw_mci_request,
1126 .pre_req = dw_mci_pre_req,
1127 .post_req = dw_mci_post_req,
1128 .set_ios = dw_mci_set_ios,
1129 .get_ro = dw_mci_get_ro,
1130 .get_cd = dw_mci_get_cd,
1131 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1132 .execute_tuning = dw_mci_execute_tuning,
1135 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1136 __releases(&host->lock)
1137 __acquires(&host->lock)
1139 struct dw_mci_slot *slot;
1140 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1142 WARN_ON(host->cmd || host->data);
1144 host->cur_slot->mrq = NULL;
1146 if (!list_empty(&host->queue)) {
1147 slot = list_entry(host->queue.next,
1148 struct dw_mci_slot, queue_node);
1149 list_del(&slot->queue_node);
1150 dev_vdbg(host->dev, "list not empty: %s is next\n",
1151 mmc_hostname(slot->mmc));
1152 host->state = STATE_SENDING_CMD;
1153 dw_mci_start_request(host, slot);
1155 dev_vdbg(host->dev, "list empty\n");
1156 host->state = STATE_IDLE;
1159 spin_unlock(&host->lock);
1160 mmc_request_done(prev_mmc, mrq);
1161 spin_lock(&host->lock);
1164 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1166 u32 status = host->cmd_status;
1168 host->cmd_status = 0;
1170 /* Read the response from the card (up to 16 bytes) */
1171 if (cmd->flags & MMC_RSP_PRESENT) {
1172 if (cmd->flags & MMC_RSP_136) {
1173 cmd->resp[3] = mci_readl(host, RESP0);
1174 cmd->resp[2] = mci_readl(host, RESP1);
1175 cmd->resp[1] = mci_readl(host, RESP2);
1176 cmd->resp[0] = mci_readl(host, RESP3);
1178 cmd->resp[0] = mci_readl(host, RESP0);
1185 if (status & SDMMC_INT_RTO)
1186 cmd->error = -ETIMEDOUT;
1187 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1188 cmd->error = -EILSEQ;
1189 else if (status & SDMMC_INT_RESP_ERR)
1195 /* newer ip versions need a delay between retries */
1196 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1203 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1205 u32 status = host->data_status;
1207 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1208 if (status & SDMMC_INT_DRTO) {
1209 data->error = -ETIMEDOUT;
1210 } else if (status & SDMMC_INT_DCRC) {
1211 data->error = -EILSEQ;
1212 } else if (status & SDMMC_INT_EBE) {
1213 if (host->dir_status ==
1214 DW_MCI_SEND_STATUS) {
1216 * No data CRC status was returned.
1217 * The number of bytes transferred
1218 * will be exaggerated in PIO mode.
1220 data->bytes_xfered = 0;
1221 data->error = -ETIMEDOUT;
1222 } else if (host->dir_status ==
1223 DW_MCI_RECV_STATUS) {
1227 /* SDMMC_INT_SBE is included */
1231 dev_err(host->dev, "data error, status 0x%08x\n", status);
1234 * After an error, there may be data lingering
1237 dw_mci_fifo_reset(host);
1239 data->bytes_xfered = data->blocks * data->blksz;
1246 static void dw_mci_tasklet_func(unsigned long priv)
1248 struct dw_mci *host = (struct dw_mci *)priv;
1249 struct mmc_data *data;
1250 struct mmc_command *cmd;
1251 struct mmc_request *mrq;
1252 enum dw_mci_state state;
1253 enum dw_mci_state prev_state;
1256 spin_lock(&host->lock);
1258 state = host->state;
1269 case STATE_SENDING_CMD:
1270 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1271 &host->pending_events))
1276 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1277 err = dw_mci_command_complete(host, cmd);
1278 if (cmd == mrq->sbc && !err) {
1279 prev_state = state = STATE_SENDING_CMD;
1280 __dw_mci_start_request(host, host->cur_slot,
1285 if (cmd->data && err) {
1286 dw_mci_stop_dma(host);
1287 send_stop_abort(host, data);
1288 state = STATE_SENDING_STOP;
1292 if (!cmd->data || err) {
1293 dw_mci_request_end(host, mrq);
1297 prev_state = state = STATE_SENDING_DATA;
1300 case STATE_SENDING_DATA:
1301 if (test_and_clear_bit(EVENT_DATA_ERROR,
1302 &host->pending_events)) {
1303 dw_mci_stop_dma(host);
1304 send_stop_abort(host, data);
1305 state = STATE_DATA_ERROR;
1309 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1310 &host->pending_events))
1313 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1314 prev_state = state = STATE_DATA_BUSY;
1317 case STATE_DATA_BUSY:
1318 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1319 &host->pending_events))
1323 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1324 err = dw_mci_data_complete(host, data);
1327 if (!data->stop || mrq->sbc) {
1328 if (mrq->sbc && data->stop)
1329 data->stop->error = 0;
1330 dw_mci_request_end(host, mrq);
1334 /* stop command for open-ended transfer*/
1336 send_stop_abort(host, data);
1340 * If err has non-zero,
1341 * stop-abort command has been already issued.
1343 prev_state = state = STATE_SENDING_STOP;
1347 case STATE_SENDING_STOP:
1348 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1349 &host->pending_events))
1352 /* CMD error in data command */
1353 if (mrq->cmd->error && mrq->data)
1354 dw_mci_fifo_reset(host);
1360 dw_mci_command_complete(host, mrq->stop);
1362 host->cmd_status = 0;
1364 dw_mci_request_end(host, mrq);
1367 case STATE_DATA_ERROR:
1368 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1369 &host->pending_events))
1372 state = STATE_DATA_BUSY;
1375 } while (state != prev_state);
1377 host->state = state;
1379 spin_unlock(&host->lock);
1383 /* push final bytes to part_buf, only use during push */
1384 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1386 memcpy((void *)&host->part_buf, buf, cnt);
1387 host->part_buf_count = cnt;
1390 /* append bytes to part_buf, only use during push */
1391 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1393 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1394 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1395 host->part_buf_count += cnt;
1399 /* pull first bytes from part_buf, only use during pull */
1400 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402 cnt = min(cnt, (int)host->part_buf_count);
1404 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1406 host->part_buf_count -= cnt;
1407 host->part_buf_start += cnt;
1412 /* pull final bytes from the part_buf, assuming it's just been filled */
1413 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1415 memcpy(buf, &host->part_buf, cnt);
1416 host->part_buf_start = cnt;
1417 host->part_buf_count = (1 << host->data_shift) - cnt;
1420 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1422 struct mmc_data *data = host->data;
1425 /* try and push anything in the part_buf */
1426 if (unlikely(host->part_buf_count)) {
1427 int len = dw_mci_push_part_bytes(host, buf, cnt);
1430 if (host->part_buf_count == 2) {
1431 mci_writew(host, DATA(host->data_offset),
1433 host->part_buf_count = 0;
1436 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1437 if (unlikely((unsigned long)buf & 0x1)) {
1439 u16 aligned_buf[64];
1440 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1441 int items = len >> 1;
1443 /* memcpy from input buffer into aligned buffer */
1444 memcpy(aligned_buf, buf, len);
1447 /* push data from aligned buffer into fifo */
1448 for (i = 0; i < items; ++i)
1449 mci_writew(host, DATA(host->data_offset),
1456 for (; cnt >= 2; cnt -= 2)
1457 mci_writew(host, DATA(host->data_offset), *pdata++);
1460 /* put anything remaining in the part_buf */
1462 dw_mci_set_part_bytes(host, buf, cnt);
1463 /* Push data if we have reached the expected data length */
1464 if ((data->bytes_xfered + init_cnt) ==
1465 (data->blksz * data->blocks))
1466 mci_writew(host, DATA(host->data_offset),
1471 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1473 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1474 if (unlikely((unsigned long)buf & 0x1)) {
1476 /* pull data from fifo into aligned buffer */
1477 u16 aligned_buf[64];
1478 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1479 int items = len >> 1;
1481 for (i = 0; i < items; ++i)
1482 aligned_buf[i] = mci_readw(host,
1483 DATA(host->data_offset));
1484 /* memcpy from aligned buffer into output buffer */
1485 memcpy(buf, aligned_buf, len);
1493 for (; cnt >= 2; cnt -= 2)
1494 *pdata++ = mci_readw(host, DATA(host->data_offset));
1498 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1499 dw_mci_pull_final_bytes(host, buf, cnt);
1503 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1505 struct mmc_data *data = host->data;
1508 /* try and push anything in the part_buf */
1509 if (unlikely(host->part_buf_count)) {
1510 int len = dw_mci_push_part_bytes(host, buf, cnt);
1513 if (host->part_buf_count == 4) {
1514 mci_writel(host, DATA(host->data_offset),
1516 host->part_buf_count = 0;
1519 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1520 if (unlikely((unsigned long)buf & 0x3)) {
1522 u32 aligned_buf[32];
1523 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1524 int items = len >> 2;
1526 /* memcpy from input buffer into aligned buffer */
1527 memcpy(aligned_buf, buf, len);
1530 /* push data from aligned buffer into fifo */
1531 for (i = 0; i < items; ++i)
1532 mci_writel(host, DATA(host->data_offset),
1539 for (; cnt >= 4; cnt -= 4)
1540 mci_writel(host, DATA(host->data_offset), *pdata++);
1543 /* put anything remaining in the part_buf */
1545 dw_mci_set_part_bytes(host, buf, cnt);
1546 /* Push data if we have reached the expected data length */
1547 if ((data->bytes_xfered + init_cnt) ==
1548 (data->blksz * data->blocks))
1549 mci_writel(host, DATA(host->data_offset),
1554 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1556 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1557 if (unlikely((unsigned long)buf & 0x3)) {
1559 /* pull data from fifo into aligned buffer */
1560 u32 aligned_buf[32];
1561 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1562 int items = len >> 2;
1564 for (i = 0; i < items; ++i)
1565 aligned_buf[i] = mci_readl(host,
1566 DATA(host->data_offset));
1567 /* memcpy from aligned buffer into output buffer */
1568 memcpy(buf, aligned_buf, len);
1576 for (; cnt >= 4; cnt -= 4)
1577 *pdata++ = mci_readl(host, DATA(host->data_offset));
1581 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1582 dw_mci_pull_final_bytes(host, buf, cnt);
1586 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1588 struct mmc_data *data = host->data;
1591 /* try and push anything in the part_buf */
1592 if (unlikely(host->part_buf_count)) {
1593 int len = dw_mci_push_part_bytes(host, buf, cnt);
1597 if (host->part_buf_count == 8) {
1598 mci_writeq(host, DATA(host->data_offset),
1600 host->part_buf_count = 0;
1603 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1604 if (unlikely((unsigned long)buf & 0x7)) {
1606 u64 aligned_buf[16];
1607 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1608 int items = len >> 3;
1610 /* memcpy from input buffer into aligned buffer */
1611 memcpy(aligned_buf, buf, len);
1614 /* push data from aligned buffer into fifo */
1615 for (i = 0; i < items; ++i)
1616 mci_writeq(host, DATA(host->data_offset),
1623 for (; cnt >= 8; cnt -= 8)
1624 mci_writeq(host, DATA(host->data_offset), *pdata++);
1627 /* put anything remaining in the part_buf */
1629 dw_mci_set_part_bytes(host, buf, cnt);
1630 /* Push data if we have reached the expected data length */
1631 if ((data->bytes_xfered + init_cnt) ==
1632 (data->blksz * data->blocks))
1633 mci_writeq(host, DATA(host->data_offset),
1638 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1640 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1641 if (unlikely((unsigned long)buf & 0x7)) {
1643 /* pull data from fifo into aligned buffer */
1644 u64 aligned_buf[16];
1645 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1646 int items = len >> 3;
1648 for (i = 0; i < items; ++i)
1649 aligned_buf[i] = mci_readq(host,
1650 DATA(host->data_offset));
1651 /* memcpy from aligned buffer into output buffer */
1652 memcpy(buf, aligned_buf, len);
1660 for (; cnt >= 8; cnt -= 8)
1661 *pdata++ = mci_readq(host, DATA(host->data_offset));
1665 host->part_buf = mci_readq(host, DATA(host->data_offset));
1666 dw_mci_pull_final_bytes(host, buf, cnt);
1670 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1674 /* get remaining partial bytes */
1675 len = dw_mci_pull_part_bytes(host, buf, cnt);
1676 if (unlikely(len == cnt))
1681 /* get the rest of the data */
1682 host->pull_data(host, buf, cnt);
1685 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1687 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1689 unsigned int offset;
1690 struct mmc_data *data = host->data;
1691 int shift = host->data_shift;
1694 unsigned int remain, fcnt;
1697 if (!sg_miter_next(sg_miter))
1700 host->sg = sg_miter->piter.sg;
1701 buf = sg_miter->addr;
1702 remain = sg_miter->length;
1706 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1707 << shift) + host->part_buf_count;
1708 len = min(remain, fcnt);
1711 dw_mci_pull_data(host, (void *)(buf + offset), len);
1712 data->bytes_xfered += len;
1717 sg_miter->consumed = offset;
1718 status = mci_readl(host, MINTSTS);
1719 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1720 /* if the RXDR is ready read again */
1721 } while ((status & SDMMC_INT_RXDR) ||
1722 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1725 if (!sg_miter_next(sg_miter))
1727 sg_miter->consumed = 0;
1729 sg_miter_stop(sg_miter);
1733 sg_miter_stop(sg_miter);
1736 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1739 static void dw_mci_write_data_pio(struct dw_mci *host)
1741 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1743 unsigned int offset;
1744 struct mmc_data *data = host->data;
1745 int shift = host->data_shift;
1748 unsigned int fifo_depth = host->fifo_depth;
1749 unsigned int remain, fcnt;
1752 if (!sg_miter_next(sg_miter))
1755 host->sg = sg_miter->piter.sg;
1756 buf = sg_miter->addr;
1757 remain = sg_miter->length;
1761 fcnt = ((fifo_depth -
1762 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1763 << shift) - host->part_buf_count;
1764 len = min(remain, fcnt);
1767 host->push_data(host, (void *)(buf + offset), len);
1768 data->bytes_xfered += len;
1773 sg_miter->consumed = offset;
1774 status = mci_readl(host, MINTSTS);
1775 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1776 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1779 if (!sg_miter_next(sg_miter))
1781 sg_miter->consumed = 0;
1783 sg_miter_stop(sg_miter);
1787 sg_miter_stop(sg_miter);
1790 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1793 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1795 if (!host->cmd_status)
1796 host->cmd_status = status;
1800 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1801 tasklet_schedule(&host->tasklet);
1804 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1806 struct dw_mci *host = dev_id;
1810 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1813 * DTO fix - version 2.10a and below, and only if internal DMA
1816 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1818 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1819 pending |= SDMMC_INT_DATA_OVER;
1823 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1824 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1825 host->cmd_status = pending;
1827 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1830 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1831 /* if there is an error report DATA_ERROR */
1832 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1833 host->data_status = pending;
1835 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1836 tasklet_schedule(&host->tasklet);
1839 if (pending & SDMMC_INT_DATA_OVER) {
1840 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1841 if (!host->data_status)
1842 host->data_status = pending;
1844 if (host->dir_status == DW_MCI_RECV_STATUS) {
1845 if (host->sg != NULL)
1846 dw_mci_read_data_pio(host, true);
1848 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1849 tasklet_schedule(&host->tasklet);
1852 if (pending & SDMMC_INT_RXDR) {
1853 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1854 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1855 dw_mci_read_data_pio(host, false);
1858 if (pending & SDMMC_INT_TXDR) {
1859 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1860 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1861 dw_mci_write_data_pio(host);
1864 if (pending & SDMMC_INT_CMD_DONE) {
1865 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1866 dw_mci_cmd_interrupt(host, pending);
1869 if (pending & SDMMC_INT_CD) {
1870 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1871 queue_work(host->card_workqueue, &host->card_work);
1874 /* Handle SDIO Interrupts */
1875 for (i = 0; i < host->num_slots; i++) {
1876 struct dw_mci_slot *slot = host->slot[i];
1877 if (pending & SDMMC_INT_SDIO(i)) {
1878 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1879 mmc_signal_sdio_irq(slot->mmc);
1885 #ifdef CONFIG_MMC_DW_IDMAC
1886 /* Handle DMA interrupts */
1887 pending = mci_readl(host, IDSTS);
1888 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1889 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1891 host->dma_ops->complete(host);
1898 static void dw_mci_work_routine_card(struct work_struct *work)
1900 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1903 for (i = 0; i < host->num_slots; i++) {
1904 struct dw_mci_slot *slot = host->slot[i];
1905 struct mmc_host *mmc = slot->mmc;
1906 struct mmc_request *mrq;
1909 present = dw_mci_get_cd(mmc);
1910 while (present != slot->last_detect_state) {
1911 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1912 present ? "inserted" : "removed");
1914 spin_lock_bh(&host->lock);
1916 /* Card change detected */
1917 slot->last_detect_state = present;
1919 /* Clean up queue if present */
1922 if (mrq == host->mrq) {
1926 switch (host->state) {
1929 case STATE_SENDING_CMD:
1930 mrq->cmd->error = -ENOMEDIUM;
1934 case STATE_SENDING_DATA:
1935 mrq->data->error = -ENOMEDIUM;
1936 dw_mci_stop_dma(host);
1938 case STATE_DATA_BUSY:
1939 case STATE_DATA_ERROR:
1940 if (mrq->data->error == -EINPROGRESS)
1941 mrq->data->error = -ENOMEDIUM;
1943 case STATE_SENDING_STOP:
1945 mrq->stop->error = -ENOMEDIUM;
1949 dw_mci_request_end(host, mrq);
1951 list_del(&slot->queue_node);
1952 mrq->cmd->error = -ENOMEDIUM;
1954 mrq->data->error = -ENOMEDIUM;
1956 mrq->stop->error = -ENOMEDIUM;
1958 spin_unlock(&host->lock);
1959 mmc_request_done(slot->mmc, mrq);
1960 spin_lock(&host->lock);
1964 /* Power down slot */
1966 /* Clear down the FIFO */
1967 dw_mci_fifo_reset(host);
1968 #ifdef CONFIG_MMC_DW_IDMAC
1969 dw_mci_idmac_reset(host);
1974 spin_unlock_bh(&host->lock);
1976 present = dw_mci_get_cd(mmc);
1979 mmc_detect_change(slot->mmc,
1980 msecs_to_jiffies(host->pdata->detect_delay_ms));
1985 /* given a slot id, find out the device node representing that slot */
1986 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1988 struct device_node *np;
1992 if (!dev || !dev->of_node)
1995 for_each_child_of_node(dev->of_node, np) {
1996 addr = of_get_property(np, "reg", &len);
1997 if (!addr || (len < sizeof(int)))
1999 if (be32_to_cpup(addr) == slot)
2005 static struct dw_mci_of_slot_quirks {
2008 } of_slot_quirks[] = {
2010 .quirk = "disable-wp",
2011 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2015 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2017 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2022 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2023 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2024 quirks |= of_slot_quirks[idx].id;
2029 /* find the write protect gpio for a given slot; or -1 if none specified */
2030 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2032 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2040 /* Having a missing entry is valid; return silently */
2041 if (!gpio_is_valid(gpio))
2044 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2045 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2052 /* find the cd gpio for a given slot */
2053 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2054 struct mmc_host *mmc)
2056 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2062 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2064 /* Having a missing entry is valid; return silently */
2065 if (!gpio_is_valid(gpio))
2068 if (mmc_gpio_request_cd(mmc, gpio, 0))
2069 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2071 #else /* CONFIG_OF */
2072 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2076 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2080 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2084 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2085 struct mmc_host *mmc)
2089 #endif /* CONFIG_OF */
2091 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2093 struct mmc_host *mmc;
2094 struct dw_mci_slot *slot;
2095 const struct dw_mci_drv_data *drv_data = host->drv_data;
2099 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2103 slot = mmc_priv(mmc);
2107 host->slot[id] = slot;
2109 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2111 mmc->ops = &dw_mci_ops;
2112 if (of_property_read_u32_array(host->dev->of_node,
2113 "clock-freq-min-max", freq, 2)) {
2114 mmc->f_min = DW_MCI_FREQ_MIN;
2115 mmc->f_max = DW_MCI_FREQ_MAX;
2117 mmc->f_min = freq[0];
2118 mmc->f_max = freq[1];
2121 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2123 if (host->pdata->caps)
2124 mmc->caps = host->pdata->caps;
2126 if (host->pdata->pm_caps)
2127 mmc->pm_caps = host->pdata->pm_caps;
2129 if (host->dev->of_node) {
2130 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2134 ctrl_id = to_platform_device(host->dev)->id;
2136 if (drv_data && drv_data->caps)
2137 mmc->caps |= drv_data->caps[ctrl_id];
2139 if (host->pdata->caps2)
2140 mmc->caps2 = host->pdata->caps2;
2144 if (host->pdata->blk_settings) {
2145 mmc->max_segs = host->pdata->blk_settings->max_segs;
2146 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2147 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2148 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2149 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2151 /* Useful defaults if platform data is unset. */
2152 #ifdef CONFIG_MMC_DW_IDMAC
2153 mmc->max_segs = host->ring_size;
2154 mmc->max_blk_size = 65536;
2155 mmc->max_blk_count = host->ring_size;
2156 mmc->max_seg_size = 0x1000;
2157 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2160 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2161 mmc->max_blk_count = 512;
2162 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2163 mmc->max_seg_size = mmc->max_req_size;
2164 #endif /* CONFIG_MMC_DW_IDMAC */
2167 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2168 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2170 ret = mmc_add_host(mmc);
2174 #if defined(CONFIG_DEBUG_FS)
2175 dw_mci_init_debugfs(slot);
2178 /* Card initially undetected */
2179 slot->last_detect_state = 0;
2188 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2190 /* Debugfs stuff is cleaned up by mmc core */
2191 mmc_remove_host(slot->mmc);
2192 slot->host->slot[id] = NULL;
2193 mmc_free_host(slot->mmc);
2196 static void dw_mci_init_dma(struct dw_mci *host)
2198 /* Alloc memory for sg translation */
2199 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2200 &host->sg_dma, GFP_KERNEL);
2201 if (!host->sg_cpu) {
2202 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2207 /* Determine which DMA interface to use */
2208 #ifdef CONFIG_MMC_DW_IDMAC
2209 host->dma_ops = &dw_mci_idmac_ops;
2210 dev_info(host->dev, "Using internal DMA controller.\n");
2216 if (host->dma_ops->init && host->dma_ops->start &&
2217 host->dma_ops->stop && host->dma_ops->cleanup) {
2218 if (host->dma_ops->init(host)) {
2219 dev_err(host->dev, "%s: Unable to initialize "
2220 "DMA Controller.\n", __func__);
2224 dev_err(host->dev, "DMA initialization not found.\n");
2232 dev_info(host->dev, "Using PIO mode.\n");
2237 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2239 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2242 ctrl = mci_readl(host, CTRL);
2244 mci_writel(host, CTRL, ctrl);
2246 /* wait till resets clear */
2248 ctrl = mci_readl(host, CTRL);
2249 if (!(ctrl & reset))
2251 } while (time_before(jiffies, timeout));
2254 "Timeout resetting block (ctrl reset %#x)\n",
2260 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2263 * Reseting generates a block interrupt, hence setting
2264 * the scatter-gather pointer to NULL.
2267 sg_miter_stop(&host->sg_miter);
2271 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2274 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2276 return dw_mci_ctrl_reset(host,
2277 SDMMC_CTRL_FIFO_RESET |
2279 SDMMC_CTRL_DMA_RESET);
2283 static struct dw_mci_of_quirks {
2288 .quirk = "broken-cd",
2289 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2293 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2295 struct dw_mci_board *pdata;
2296 struct device *dev = host->dev;
2297 struct device_node *np = dev->of_node;
2298 const struct dw_mci_drv_data *drv_data = host->drv_data;
2300 u32 clock_frequency;
2302 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2304 dev_err(dev, "could not allocate memory for pdata\n");
2305 return ERR_PTR(-ENOMEM);
2308 /* find out number of slots supported */
2309 if (of_property_read_u32(dev->of_node, "num-slots",
2310 &pdata->num_slots)) {
2311 dev_info(dev, "num-slots property not found, "
2312 "assuming 1 slot is available\n");
2313 pdata->num_slots = 1;
2317 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2318 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2319 pdata->quirks |= of_quirks[idx].id;
2321 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2322 dev_info(dev, "fifo-depth property not found, using "
2323 "value of FIFOTH register as default\n");
2325 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2327 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2328 pdata->bus_hz = clock_frequency;
2330 if (drv_data && drv_data->parse_dt) {
2331 ret = drv_data->parse_dt(host);
2333 return ERR_PTR(ret);
2336 if (of_find_property(np, "supports-highspeed", NULL))
2337 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2342 #else /* CONFIG_OF */
2343 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2345 return ERR_PTR(-EINVAL);
2347 #endif /* CONFIG_OF */
2349 int dw_mci_probe(struct dw_mci *host)
2351 const struct dw_mci_drv_data *drv_data = host->drv_data;
2352 int width, i, ret = 0;
2357 host->pdata = dw_mci_parse_dt(host);
2358 if (IS_ERR(host->pdata)) {
2359 dev_err(host->dev, "platform data not available\n");
2364 if (host->pdata->num_slots > 1) {
2366 "Platform data must supply num_slots.\n");
2370 host->biu_clk = devm_clk_get(host->dev, "biu");
2371 if (IS_ERR(host->biu_clk)) {
2372 dev_dbg(host->dev, "biu clock not available\n");
2374 ret = clk_prepare_enable(host->biu_clk);
2376 dev_err(host->dev, "failed to enable biu clock\n");
2381 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2382 if (IS_ERR(host->ciu_clk)) {
2383 dev_dbg(host->dev, "ciu clock not available\n");
2384 host->bus_hz = host->pdata->bus_hz;
2386 ret = clk_prepare_enable(host->ciu_clk);
2388 dev_err(host->dev, "failed to enable ciu clock\n");
2392 if (host->pdata->bus_hz) {
2393 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2396 "Unable to set bus rate to %uHz\n",
2397 host->pdata->bus_hz);
2399 host->bus_hz = clk_get_rate(host->ciu_clk);
2402 if (!host->bus_hz) {
2404 "Platform data must supply bus speed\n");
2409 if (drv_data && drv_data->init) {
2410 ret = drv_data->init(host);
2413 "implementation specific init failed\n");
2418 if (drv_data && drv_data->setup_clock) {
2419 ret = drv_data->setup_clock(host);
2422 "implementation specific clock setup failed\n");
2427 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2428 if (IS_ERR(host->vmmc)) {
2429 ret = PTR_ERR(host->vmmc);
2430 if (ret == -EPROBE_DEFER)
2433 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2436 ret = regulator_enable(host->vmmc);
2438 if (ret != -EPROBE_DEFER)
2440 "regulator_enable fail: %d\n", ret);
2445 host->quirks = host->pdata->quirks;
2447 spin_lock_init(&host->lock);
2448 INIT_LIST_HEAD(&host->queue);
2451 * Get the host data width - this assumes that HCON has been set with
2452 * the correct values.
2454 i = (mci_readl(host, HCON) >> 7) & 0x7;
2456 host->push_data = dw_mci_push_data16;
2457 host->pull_data = dw_mci_pull_data16;
2459 host->data_shift = 1;
2460 } else if (i == 2) {
2461 host->push_data = dw_mci_push_data64;
2462 host->pull_data = dw_mci_pull_data64;
2464 host->data_shift = 3;
2466 /* Check for a reserved value, and warn if it is */
2468 "HCON reports a reserved host data width!\n"
2469 "Defaulting to 32-bit access.\n");
2470 host->push_data = dw_mci_push_data32;
2471 host->pull_data = dw_mci_pull_data32;
2473 host->data_shift = 2;
2476 /* Reset all blocks */
2477 if (!dw_mci_ctrl_all_reset(host))
2480 host->dma_ops = host->pdata->dma_ops;
2481 dw_mci_init_dma(host);
2483 /* Clear the interrupts for the host controller */
2484 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2485 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2487 /* Put in max timeout */
2488 mci_writel(host, TMOUT, 0xFFFFFFFF);
2491 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2492 * Tx Mark = fifo_size / 2 DMA Size = 8
2494 if (!host->pdata->fifo_depth) {
2496 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2497 * have been overwritten by the bootloader, just like we're
2498 * about to do, so if you know the value for your hardware, you
2499 * should put it in the platform data.
2501 fifo_size = mci_readl(host, FIFOTH);
2502 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2504 fifo_size = host->pdata->fifo_depth;
2506 host->fifo_depth = fifo_size;
2508 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2509 mci_writel(host, FIFOTH, host->fifoth_val);
2511 /* disable clock to CIU */
2512 mci_writel(host, CLKENA, 0);
2513 mci_writel(host, CLKSRC, 0);
2516 * In 2.40a spec, Data offset is changed.
2517 * Need to check the version-id and set data-offset for DATA register.
2519 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2520 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2522 if (host->verid < DW_MMC_240A)
2523 host->data_offset = DATA_OFFSET;
2525 host->data_offset = DATA_240A_OFFSET;
2527 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2528 host->card_workqueue = alloc_workqueue("dw-mci-card",
2530 if (!host->card_workqueue) {
2534 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2535 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2536 host->irq_flags, "dw-mci", host);
2540 if (host->pdata->num_slots)
2541 host->num_slots = host->pdata->num_slots;
2543 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2546 * Enable interrupts for command done, data over, data empty, card det,
2547 * receive ready and error such as transmit, receive timeout, crc error
2549 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2550 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2551 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2552 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2553 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2555 dev_info(host->dev, "DW MMC controller at irq %d, "
2556 "%d bit host data width, "
2558 host->irq, width, fifo_size);
2560 /* We need at least one slot to succeed */
2561 for (i = 0; i < host->num_slots; i++) {
2562 ret = dw_mci_init_slot(host, i);
2564 dev_dbg(host->dev, "slot %d init failed\n", i);
2570 dev_info(host->dev, "%d slots initialized\n", init_slots);
2572 dev_dbg(host->dev, "attempted to initialize %d slots, "
2573 "but failed on all\n", host->num_slots);
2577 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2578 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2583 destroy_workqueue(host->card_workqueue);
2586 if (host->use_dma && host->dma_ops->exit)
2587 host->dma_ops->exit(host);
2589 regulator_disable(host->vmmc);
2592 if (!IS_ERR(host->ciu_clk))
2593 clk_disable_unprepare(host->ciu_clk);
2596 if (!IS_ERR(host->biu_clk))
2597 clk_disable_unprepare(host->biu_clk);
2601 EXPORT_SYMBOL(dw_mci_probe);
2603 void dw_mci_remove(struct dw_mci *host)
2607 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2608 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2610 for (i = 0; i < host->num_slots; i++) {
2611 dev_dbg(host->dev, "remove slot %d\n", i);
2613 dw_mci_cleanup_slot(host->slot[i], i);
2616 /* disable clock to CIU */
2617 mci_writel(host, CLKENA, 0);
2618 mci_writel(host, CLKSRC, 0);
2620 destroy_workqueue(host->card_workqueue);
2622 if (host->use_dma && host->dma_ops->exit)
2623 host->dma_ops->exit(host);
2626 regulator_disable(host->vmmc);
2628 if (!IS_ERR(host->ciu_clk))
2629 clk_disable_unprepare(host->ciu_clk);
2631 if (!IS_ERR(host->biu_clk))
2632 clk_disable_unprepare(host->biu_clk);
2634 EXPORT_SYMBOL(dw_mci_remove);
2638 #ifdef CONFIG_PM_SLEEP
2640 * TODO: we should probably disable the clock to the card in the suspend path.
2642 int dw_mci_suspend(struct dw_mci *host)
2645 regulator_disable(host->vmmc);
2649 EXPORT_SYMBOL(dw_mci_suspend);
2651 int dw_mci_resume(struct dw_mci *host)
2656 ret = regulator_enable(host->vmmc);
2659 "failed to enable regulator: %d\n", ret);
2664 if (!dw_mci_ctrl_all_reset(host)) {
2669 if (host->use_dma && host->dma_ops->init)
2670 host->dma_ops->init(host);
2673 * Restore the initial value at FIFOTH register
2674 * And Invalidate the prev_blksz with zero
2676 mci_writel(host, FIFOTH, host->fifoth_val);
2677 host->prev_blksz = 0;
2679 /* Put in max timeout */
2680 mci_writel(host, TMOUT, 0xFFFFFFFF);
2682 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2683 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2684 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2685 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2686 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2688 for (i = 0; i < host->num_slots; i++) {
2689 struct dw_mci_slot *slot = host->slot[i];
2692 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2693 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2694 dw_mci_setup_bus(slot, true);
2699 EXPORT_SYMBOL(dw_mci_resume);
2700 #endif /* CONFIG_PM_SLEEP */
2702 static int __init dw_mci_init(void)
2704 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2708 static void __exit dw_mci_exit(void)
2712 module_init(dw_mci_init);
2713 module_exit(dw_mci_exit);
2715 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2716 MODULE_AUTHOR("NXP Semiconductor VietNam");
2717 MODULE_AUTHOR("Imagination Technologies Ltd");
2718 MODULE_LICENSE("GPL v2");