2 * linux/drivers/mmc/host/mshci.c - Mobile Storage Host Controller Interface driver
4 * Copyright (C) 2011 Samsung Electronics, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/delay.h>
13 #include <linux/highmem.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/scatterlist.h>
18 #include <linux/regulator/consumer.h>
20 #include <linux/leds.h>
22 #include <linux/mmc/host.h>
26 #define DRIVER_NAME "mshci"
28 #define DBG(f, x...) \
29 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
31 static unsigned int debug_quirks = 0;
33 static void mshci_prepare_data(struct mshci_host *, struct mmc_data *);
34 static void mshci_finish_data(struct mshci_host *);
36 static void mshci_send_command(struct mshci_host *, struct mmc_command *);
37 static void mshci_finish_command(struct mshci_host *);
40 static void mshci_dumpregs(struct mshci_host *host)
46 /*****************************************************************************\
48 * Low level functions *
50 \*****************************************************************************/
52 static void mshci_clear_set_irqs(struct mshci_host *host, u32 clear, u32 set)
56 ier = mshci_readl(host, MSHCI_INTMSK);
59 mshci_writel(host, ier, MSHCI_INTMSK);
62 static void mshci_unmask_irqs(struct mshci_host *host, u32 irqs)
64 mshci_clear_set_irqs(host, 0, irqs);
67 static void mshci_mask_irqs(struct mshci_host *host, u32 irqs)
69 mshci_clear_set_irqs(host, irqs, 0);
72 static void mshci_set_card_detection(struct mshci_host *host, bool enable)
74 u32 irqs = INTMSK_CDETECT;
77 mshci_unmask_irqs(host, irqs);
79 mshci_mask_irqs(host, irqs);
82 static void mshci_enable_card_detection(struct mshci_host *host)
84 mshci_set_card_detection(host, true);
87 static void mshci_disable_card_detection(struct mshci_host *host)
89 mshci_set_card_detection(host, false);
92 static void mshci_reset(struct mshci_host *host, u8 mask)
97 ier = mshci_readl(host, MSHCI_CTRL);
100 mshci_writel(host, ier, MSHCI_CTRL);
101 while (mshci_readl(host, MSHCI_CTRL) & DMA_RESET) {
103 printk(KERN_ERR "%s: Reset never completed.\n",
104 mmc_hostname(host->mmc));
105 mshci_dumpregs(host);
113 static void mshci_init(struct mshci_host *host)
115 mshci_reset(host, RESET_ALL);
117 /* clear interrupt status */
118 mshci_writel(host, INTMSK_ALL, MSHCI_RINTSTS);
120 mshci_clear_set_irqs(host, INTMSK_ALL,
121 INTMSK_CDETECT | INTMSK_RE |
122 INTMSK_CDONE | INTMSK_DTO | INTMSK_TXDR | INTMSK_RXDR |
123 INTMSK_RCRC | INTMSK_DCRC | INTMSK_RTO | INTMSK_DRTO |
124 INTMSK_HTO | INTMSK_FRUN | INTMSK_HLE | INTMSK_SBE |
128 static void mshci_reinit(struct mshci_host *host)
131 mshci_enable_card_detection(host);
134 /*****************************************************************************\
138 \*****************************************************************************/
140 static void mshci_read_block_pio(struct mshci_host *host)
143 size_t fifo_cnt, len;
144 u32 uninitialized_var(scratch);
147 DBG("PIO reading\n");
149 fifo_cnt = (mshci_readl(host,MSHCI_STATUS)&FIFO_COUNT) >> 17;
150 fifo_cnt *= FIFO_WIDTH;
155 local_irq_save(flags);
158 if (!sg_miter_next(&host->sg_miter))
161 len = min(host->sg_miter.length, fifo_cnt);
164 host->sg_miter.consumed = len;
166 buf = host->sg_miter.addr;
169 scratch = mshci_readl(host, MSHCI_FIFODAT);
171 *buf = scratch & 0xFF;
173 *((u32*)buf) = scratch;
179 sg_miter_stop(&host->sg_miter);
181 local_irq_restore(flags);
184 static void mshci_write_block_pio(struct mshci_host *host)
187 size_t fifo_cnt, len, chunk;
191 DBG("PIO writing\n");
195 fifo_cnt *= FIFO_WIDTH;
199 local_irq_save(flags);
202 if (!sg_miter_next(&host->sg_miter)) {
204 /* Even though transfer is complete,
205 * TXDR interrupt occurs again.
206 * So, it has to check that it has really
207 * no next sg buffer or just DTO interrupt
208 * has not occured yet.
211 if (( host->data->blocks * host->data->blksz ) ==
212 host->data_transfered )
213 break; /* transfer done but DTO not yet */
216 len = min(host->sg_miter.length, fifo_cnt);
219 host->sg_miter.consumed = len;
220 host->data_transfered += len;
222 buf = (host->sg_miter.addr);
225 scratch |= (u32)*buf << (chunk * 8);
231 if ((chunk == 4) || ((len == 0) && (fifo_cnt == 0))) {
232 mshci_writel(host, scratch, MSHCI_FIFODAT);
239 sg_miter_stop(&host->sg_miter);
241 local_irq_restore(flags);
244 static void mshci_transfer_pio(struct mshci_host *host)
248 if (host->blocks == 0)
251 if (host->data->flags & MMC_DATA_READ)
252 mshci_read_block_pio(host);
254 mshci_write_block_pio(host);
256 DBG("PIO transfer complete.\n");
259 static void mshci_set_mdma_desc(u8 *desc_vir, u8 *desc_phy,
260 u32 des0, u32 des1, u32 des2)
262 ((struct mshci_idmac *)(desc_vir))->des[0] = des0;
263 ((struct mshci_idmac *)(desc_vir))->des[1] = des1;
264 ((struct mshci_idmac *)(desc_vir))->des[2] = des2;
265 ((struct mshci_idmac *)(desc_vir))->des[3] = (u32)desc_phy +
266 sizeof(struct mshci_idmac);
269 static int mshci_mdma_table_pre(struct mshci_host *host,
270 struct mmc_data *data)
274 u8 *desc_vir, *desc_phy;
278 struct scatterlist *sg;
281 u32 size_idmac = sizeof(struct mshci_idmac);
283 if (data->flags & MMC_DATA_READ)
284 direction = DMA_FROM_DEVICE;
286 direction = DMA_TO_DEVICE;
288 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
289 data->sg, data->sg_len, direction);
290 if (host->sg_count == 0)
293 desc_vir = host->idma_desc;
295 /* to know phy address */
296 host->idma_addr = dma_map_single(mmc_dev(host->mmc),
297 host->idma_desc, 128 * size_idmac, DMA_TO_DEVICE);
298 if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
300 BUG_ON(host->idma_addr & 0x3);
302 desc_phy = (u8 *)host->idma_addr;
304 for_each_sg(data->sg, sg, host->sg_count, i) {
305 addr = sg_dma_address(sg);
306 len = sg_dma_len(sg);
309 des_flag = (MSHCI_IDMAC_OWN|MSHCI_IDMAC_CH);
310 des_flag |= (i==0) ? MSHCI_IDMAC_FS:0;
312 mshci_set_mdma_desc(desc_vir, desc_phy, des_flag, len, addr);
313 desc_vir += size_idmac;
314 desc_phy += size_idmac;
317 * If this triggers then we have a calculation bug
320 WARN_ON((desc_vir - host->idma_desc) > 128 * size_idmac);
324 * Add a terminating flag.
326 ((struct mshci_idmac *)(desc_vir-size_idmac))->des[0] |= MSHCI_IDMAC_LD;
328 /* it has to dma map again to resync vir data to phy data */
329 host->idma_addr = dma_map_single(mmc_dev(host->mmc),
330 host->idma_desc, 128 * size_idmac, DMA_TO_DEVICE);
331 if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
333 BUG_ON(host->idma_addr & 0x3);
338 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
339 data->sg_len, direction);
344 static void mshci_idma_table_post(struct mshci_host *host,
345 struct mmc_data *data)
349 if (data->flags & MMC_DATA_READ)
350 direction = DMA_FROM_DEVICE;
352 direction = DMA_TO_DEVICE;
354 dma_unmap_single(mmc_dev(host->mmc), host->idma_addr,
355 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
357 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
358 data->sg_len, direction);
361 static u32 mshci_calc_timeout(struct mshci_host *host, struct mmc_data *data)
363 return 0xffffffff; /* this value SHOULD be optimized */
366 static void mshci_set_transfer_irqs(struct mshci_host *host)
368 u32 dma_irqs = INTMSK_DMA;
369 u32 pio_irqs = INTMSK_TXDR | INTMSK_RXDR;
371 if (host->flags & MSHCI_REQ_USE_DMA) {
372 /* Next codes are the W/A for DDR */
373 if(mshci_readl(host, MSHCI_UHS_REG) && (1 << 16))
374 dma_irqs |= INTMSK_DCRC;
375 /* clear interrupts for PIO */
376 mshci_clear_set_irqs(host, dma_irqs, 0);
378 /* Next codes are the W/A for DDR */
379 if(mshci_readl(host, MSHCI_UHS_REG) && (1 << 16))
380 mshci_clear_set_irqs(host, INTMSK_DCRC, pio_irqs);
382 mshci_clear_set_irqs(host, 0, pio_irqs);
386 static void mshci_prepare_data(struct mshci_host *host, struct mmc_data *data)
396 BUG_ON(data->blksz * data->blocks > (host->mmc->max_req_size *
397 host->mmc->max_hw_segs));
398 BUG_ON(data->blksz > host->mmc->max_blk_size);
399 BUG_ON(data->blocks > 400000);
402 host->data_early = 0;
404 count = mshci_calc_timeout(host, data);
405 mshci_writel(host, count, MSHCI_TMOUT);
407 mshci_reset(host, FIFO_RESET);
409 if (host->flags & (MSHCI_USE_IDMA))
410 host->flags |= MSHCI_REQ_USE_DMA;
413 * FIXME: This doesn't account for merging when mapping the
416 if (host->flags & MSHCI_REQ_USE_DMA) {
417 /* mshc's IDMAC can't transfer data that is not aligned
418 * or has length not divided by 4 byte. */
420 struct scatterlist *sg;
422 for_each_sg(data->sg, sg, data->sg_len, i) {
423 if (sg->length & 0x3) {
424 DBG("Reverting to PIO because of "
425 "transfer size (%d)\n",
427 host->flags &= ~MSHCI_REQ_USE_DMA;
429 } else if (sg->offset & 0x3) {
430 DBG("Reverting to PIO because of "
432 host->flags &= ~MSHCI_REQ_USE_DMA;
438 if (host->flags & MSHCI_REQ_USE_DMA) {
439 ret = mshci_mdma_table_pre(host, data);
442 * This only happens when someone fed
443 * us an invalid request.
446 host->flags &= ~MSHCI_REQ_USE_DMA;
448 mshci_writel(host, host->idma_addr,
453 if (host->flags & MSHCI_REQ_USE_DMA) {
454 /* enable DMA, IDMA interrupts and IDMAC */
455 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) |
456 ENABLE_IDMAC|DMA_ENABLE),MSHCI_CTRL);
457 mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
458 (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)),
460 mshci_writel(host, INTMSK_IDMAC_ERROR, MSHCI_IDINTEN);
463 if (!(host->flags & MSHCI_REQ_USE_DMA)) {
466 flags = SG_MITER_ATOMIC;
467 if (host->data->flags & MMC_DATA_READ)
468 flags |= SG_MITER_TO_SG;
470 flags |= SG_MITER_FROM_SG;
472 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
473 host->blocks = data->blocks;
475 printk(KERN_ERR "it starts transfer on PIO\n");
477 /* set transfered data as 0. this value only uses for PIO write */
478 host->data_transfered = 0;
479 mshci_set_transfer_irqs(host);
481 mshci_writel(host, data->blksz, MSHCI_BLKSIZ);
482 mshci_writel(host, (data->blocks * data->blksz), MSHCI_BYTCNT);
485 static u32 mshci_set_transfer_mode(struct mshci_host *host,
486 struct mmc_data *data)
494 WARN_ON(!host->data);
496 /* this cmd has data to transmit */
497 ret |= CMD_DATA_EXP_BIT;
499 if (data->flags & MMC_DATA_WRITE)
501 if (data->flags & MMC_DATA_STREAM)
502 ret |= CMD_TRANSMODE_BIT;
507 static void mshci_finish_data(struct mshci_host *host)
509 struct mmc_data *data;
516 if (host->flags & MSHCI_REQ_USE_DMA) {
517 mshci_idma_table_post(host, data);
518 /* disable IDMAC and DMA interrupt */
519 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) &
520 ~(DMA_ENABLE|ENABLE_IDMAC)), MSHCI_CTRL);
521 /* mask all interrupt source of IDMAC */
522 mshci_writel(host, 0x0, MSHCI_IDINTEN);
526 mshci_reset(host, DMA_RESET);
527 data->bytes_xfered = 0;
530 data->bytes_xfered = data->blksz * data->blocks;
533 mshci_send_command(host, data->stop);
535 tasklet_schedule(&host->finish_tasklet);
538 static void mshci_clock_onoff(struct mshci_host *host, bool val)
540 volatile u32 loop_count = 0x100000;
543 mshci_writel(host, CLK_ENABLE, MSHCI_CLKENA);
545 mshci_writel(host, CLK_DISABLE, MSHCI_CLKENA);
547 mshci_writel(host, 0, MSHCI_CMD);
548 mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
551 if (!(mshci_readl(host, MSHCI_CMD) & CMD_STRT_BIT))
554 } while (loop_count);
556 if (loop_count == 0) {
557 printk(KERN_ERR "%s: Clock %s has been failed.\n "
558 , mmc_hostname(host->mmc),val ? "ON":"OFF");
562 static void mshci_send_command(struct mshci_host *host, struct mmc_command *cmd)
568 /* disable interrupt before issuing cmd to the card. */
569 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) & ~INT_ENABLE),
572 mod_timer(&host->timer, jiffies + 10 * HZ);
576 mshci_prepare_data(host, cmd->data);
578 mshci_writel(host, cmd->arg, MSHCI_CMDARG);
580 flags = mshci_set_transfer_mode(host, cmd->data);
582 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
583 printk(KERN_ERR "%s: Unsupported response type!\n",
584 mmc_hostname(host->mmc));
585 cmd->error = -EINVAL;
586 tasklet_schedule(&host->finish_tasklet);
590 if (cmd->flags & MMC_RSP_PRESENT) {
591 flags |= CMD_RESP_EXP_BIT;
592 if (cmd->flags & MMC_RSP_136)
593 flags |= CMD_RESP_LENGTH_BIT;
595 if (cmd->flags & MMC_RSP_CRC)
596 flags |= CMD_CHECK_CRC_BIT;
597 flags |= (cmd->opcode | CMD_STRT_BIT | CMD_WAIT_PRV_DAT_BIT);
599 ret = mshci_readl(host, MSHCI_CMD);
600 if (ret & CMD_STRT_BIT)
601 printk(KERN_ERR "CMD busy. current cmd %d. last cmd reg 0x%x\n",
604 mshci_writel(host, flags, MSHCI_CMD);
606 /* enable interrupt upon it sends a command to the card. */
607 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
611 static void mshci_finish_command(struct mshci_host *host)
615 BUG_ON(host->cmd == NULL);
617 if (host->cmd->flags & MMC_RSP_PRESENT) {
618 if (host->cmd->flags & MMC_RSP_136) {
620 * response data are overturned.
622 for (i = 0;i < 4;i++) {
623 host->cmd->resp[i] = mshci_readl(host,
624 MSHCI_RESP0 + (3-i)*4);
627 host->cmd->resp[0] = mshci_readl(host, MSHCI_RESP0);
631 host->cmd->error = 0;
633 /* if data interrupt occurs earlier than command interrupt */
634 if (host->data && host->data_early)
635 mshci_finish_data(host);
637 if (!host->cmd->data)
638 tasklet_schedule(&host->finish_tasklet);
643 static void mshci_set_clock(struct mshci_host *host, unsigned int clock)
646 volatile u32 loop_count;
648 if (clock == host->clock)
651 /* befor changing clock. clock needs to be off. */
652 mshci_clock_onoff(host, CLK_DISABLE);
657 if (clock >= host->max_clk) {
660 for (div = 1;div < 255;div++) {
661 if ((host->max_clk / (div<<1)) <= clock)
666 mshci_writel(host, div, MSHCI_CLKDIV);
668 mshci_writel(host, 0, MSHCI_CMD);
669 mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
670 loop_count = 0x10000;
673 if (!(mshci_readl(host, MSHCI_CMD) & CMD_STRT_BIT))
678 if (loop_count == 0) {
679 printk(KERN_ERR "%s: Changing clock has been failed.\n "
680 , mmc_hostname(host->mmc));
682 mshci_writel(host, mshci_readl(host, MSHCI_CMD)&(~CMD_SEND_CLK_ONLY),
685 mshci_clock_onoff(host, CLK_ENABLE);
691 static void mshci_set_power(struct mshci_host *host, unsigned short power)
695 if (power == (unsigned short)-1)
698 if (host->pwr == pwr)
704 mshci_writel(host, 0x0, MSHCI_PWREN);
706 mshci_writel(host, 0x1, MSHCI_PWREN);
709 /*****************************************************************************\
713 \*****************************************************************************/
715 static void mshci_request(struct mmc_host *mmc, struct mmc_request *mrq)
717 struct mshci_host *host;
721 host = mmc_priv(mmc);
723 spin_lock_irqsave(&host->lock, flags);
725 WARN_ON(host->mrq != NULL);
729 present = !(mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT);
731 if (host->quirks && MSHCI_QUIRK_BROKEN_CARD_DETECTION)
734 if (!present || host->flags & MSHCI_DEVICE_DEAD) {
735 host->mrq->cmd->error = -ENOMEDIUM;
736 tasklet_schedule(&host->finish_tasklet);
738 mshci_send_command(host, mrq->cmd);
742 spin_unlock_irqrestore(&host->lock, flags);
745 static void mshci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
747 struct mshci_host *host;
750 host = mmc_priv(mmc);
752 spin_lock_irqsave(&host->lock, flags);
754 if (host->flags & MSHCI_DEVICE_DEAD)
757 if (ios->power_mode == MMC_POWER_OFF)
760 if (host->ops->set_ios)
761 host->ops->set_ios(host, ios);
763 mshci_set_clock(host, ios->clock);
765 if (ios->power_mode == MMC_POWER_OFF)
766 mshci_set_power(host, -1);
768 mshci_set_power(host, ios->vdd);
770 if (ios->bus_width == MMC_BUS_WIDTH_8)
771 mshci_writel(host, (0x1<<16), MSHCI_CTYPE);
772 else if (ios->bus_width == MMC_BUS_WIDTH_4)
773 mshci_writel(host, (0x1<<0), MSHCI_CTYPE);
774 else if (ios->bus_width == MMC_BUS_WIDTH_8_DDR) {
775 mshci_writel(host, (0x1<<16), MSHCI_CTYPE);
776 mshci_writel(host, (0x1<<16), MSHCI_UHS_REG);
777 } else if (ios->bus_width == MMC_BUS_WIDTH_4_DDR) {
778 mshci_writel(host, (0x1<<0), MSHCI_CTYPE);
779 mshci_writel(host, (0x1<<16), MSHCI_UHS_REG);
781 mshci_writel(host, (0x0<<0), MSHCI_CTYPE);
784 spin_unlock_irqrestore(&host->lock, flags);
787 static int mshci_get_ro(struct mmc_host *mmc)
789 struct mshci_host *host;
795 host = mmc_priv(mmc);
797 spin_lock_irqsave(&host->lock, flags);
799 if (host->quirks & MSHCI_QUIRK_ALWAYS_WRITABLE)
801 else if (host->quirks & MSHCI_QUIRK_NO_WP_BIT)
802 wrtprt = host->ops->get_ro(mmc) ? 0:WRTPRT_ON;
803 else if (host->flags & MSHCI_DEVICE_DEAD)
806 wrtprt = mshci_readl(host, MSHCI_WRTPRT);
808 spin_unlock_irqrestore(&host->lock, flags);
810 return (wrtprt & WRTPRT_ON);
813 static void mshci_enable_sdio_irq(struct mmc_host *mmc, int enable)
815 struct mshci_host *host;
818 host = mmc_priv(mmc);
820 spin_lock_irqsave(&host->lock, flags);
822 if (host->flags & MSHCI_DEVICE_DEAD)
826 mshci_unmask_irqs(host, SDIO_INT_ENABLE);
828 mshci_mask_irqs(host, SDIO_INT_ENABLE);
832 spin_unlock_irqrestore(&host->lock, flags);
835 static struct mmc_host_ops mshci_ops = {
836 .request = mshci_request,
837 .set_ios = mshci_set_ios,
838 .get_ro = mshci_get_ro,
839 .enable_sdio_irq = mshci_enable_sdio_irq,
842 /*****************************************************************************\
846 \*****************************************************************************/
848 static void mshci_tasklet_card(unsigned long param)
850 struct mshci_host *host;
853 host = (struct mshci_host*)param;
855 spin_lock_irqsave(&host->lock, flags);
857 if (!mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT) {
859 printk(KERN_ERR "%s: Card removed during transfer!\n",
860 mmc_hostname(host->mmc));
861 printk(KERN_ERR "%s: Resetting controller.\n",
862 mmc_hostname(host->mmc));
864 host->mrq->cmd->error = -ENOMEDIUM;
865 tasklet_schedule(&host->finish_tasklet);
869 spin_unlock_irqrestore(&host->lock, flags);
871 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
874 static void mshci_tasklet_finish(unsigned long param)
876 struct mshci_host *host;
878 struct mmc_request *mrq;
880 host = (struct mshci_host*)param;
882 spin_lock_irqsave(&host->lock, flags);
884 del_timer(&host->timer);
889 * The controller needs a reset of internal state machines
890 * upon error conditions.
892 if (!(host->flags & MSHCI_DEVICE_DEAD) &&
894 (mrq->data && (mrq->data->error ||
895 (mrq->data->stop && mrq->data->stop->error))))) {
897 /* Spec says we should do both at the same time, but Ricoh
898 controllers do not like that. */
899 mshci_reset(host, FIFO_RESET);
907 spin_unlock_irqrestore(&host->lock, flags);
909 mmc_request_done(host->mmc, mrq);
912 static void mshci_timeout_timer(unsigned long data)
914 struct mshci_host *host;
917 host = (struct mshci_host*)data;
919 spin_lock_irqsave(&host->lock, flags);
922 printk(KERN_ERR "%s: Timeout waiting for hardware "
923 "interrupt.\n", mmc_hostname(host->mmc));
924 mshci_dumpregs(host);
927 host->data->error = -ETIMEDOUT;
928 mshci_finish_data(host);
931 host->cmd->error = -ETIMEDOUT;
933 host->mrq->cmd->error = -ETIMEDOUT;
935 tasklet_schedule(&host->finish_tasklet);
940 spin_unlock_irqrestore(&host->lock, flags);
943 /*****************************************************************************\
945 * Interrupt handling *
947 \*****************************************************************************/
949 static void mshci_cmd_irq(struct mshci_host *host, u32 intmask)
951 BUG_ON(intmask == 0);
954 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
955 "though no command operation was in progress.\n",
956 mmc_hostname(host->mmc), (unsigned)intmask);
957 mshci_dumpregs(host);
961 if (intmask & INTMSK_RTO)
962 host->cmd->error = -ETIMEDOUT;
963 else if (intmask & (INTMSK_RCRC | INTMSK_RE))
964 host->cmd->error = -EILSEQ;
966 if (host->cmd->error) {
967 tasklet_schedule(&host->finish_tasklet);
971 if (intmask & INTMSK_CDONE)
972 mshci_finish_command(host);
975 static void mshci_data_irq(struct mshci_host *host, u32 intmask, u8 intr_src)
977 BUG_ON(intmask == 0);
981 * The "data complete" interrupt is also used to
982 * indicate that a busy state has ended. See comment
983 * above in mshci_cmd_irq().
985 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
986 if (intmask & INTMSK_DTO) {
987 mshci_finish_command(host);
992 printk(KERN_ERR "%s: Got data interrupt 0x%08x from %s "
993 "even though no data operation was in progress.\n",
994 mmc_hostname(host->mmc), (unsigned)intmask,
995 intr_src ? "MINT":"IDMAC");
996 mshci_dumpregs(host);
1000 if (intr_src == INT_SRC_MINT) {
1001 if (intmask & INTMSK_DATA_TIMEOUT)
1002 host->data->error = -ETIMEDOUT;
1003 else if (intmask & INTMSK_DATA_CRC)
1004 host->data->error = -EILSEQ;
1005 else if (intmask & INTMSK_FRUN) {
1006 printk(KERN_ERR "%s: FIFO underrun/overrun error\n",
1007 mmc_hostname(host->mmc));
1008 host->data->error = -EIO;
1011 if (intmask & (IDSTS_FBE | IDSTS_CES | IDSTS_DU)) {
1012 printk(KERN_ERR "%s: Fatal Bus error on DMA\n",
1013 mmc_hostname(host->mmc));
1014 host->data->error = -EIO;
1018 if (host->data->error)
1019 mshci_finish_data(host);
1021 if (!(host->flags & MSHCI_REQ_USE_DMA) &&
1022 (((host->data->flags & MMC_DATA_READ)&&
1023 (intmask & (INTMSK_RXDR | INTMSK_DTO))) ||
1024 ((host->data->flags & MMC_DATA_WRITE)&&
1025 (intmask & (INTMSK_TXDR)))))
1026 mshci_transfer_pio(host);
1028 if (intmask & INTMSK_DTO) {
1031 * Data managed to finish before the
1032 * command completed. Make sure we do
1033 * things in the proper order.
1035 host->data_early = 1;
1037 mshci_finish_data(host);
1043 static irqreturn_t mshci_irq(int irq, void *dev_id)
1046 struct mshci_host* host = dev_id;
1051 spin_lock(&host->lock);
1053 intmask = mshci_readl(host, MSHCI_MINTSTS);
1055 if (!intmask || intmask == 0xffffffff) {
1056 /* check if there is a interrupt for IDMAC */
1057 intmask = mshci_readl(host, MSHCI_IDSTS);
1059 mshci_writel(host, intmask,MSHCI_IDSTS);
1060 mshci_data_irq(host, intmask, INT_SRC_IDMAC);
1061 result = IRQ_HANDLED;
1067 DBG("*** %s got interrupt: 0x%08x\n",
1068 mmc_hostname(host->mmc), intmask);
1070 mshci_writel(host, intmask, MSHCI_RINTSTS);
1072 if (intmask & (INTMSK_CDETECT))
1073 tasklet_schedule(&host->card_tasklet);
1075 intmask &= ~INTMSK_CDETECT;
1077 if (intmask & CMD_STATUS) {
1078 if ( !(intmask & INTMSK_CDONE) && (intmask & INTMSK_RTO)) {
1080 * when a error about command timeout occurs,
1081 * cmd done intr comes together.
1082 * cmd done intr comes later than error intr.
1083 * so, it has to wait for cmd done intr.
1085 while ( --timeout &&
1086 !(mshci_readl(host, MSHCI_MINTSTS)
1089 printk(KERN_ERR"*** %s time out for\
1091 mmc_hostname(host->mmc));
1093 mshci_writel(host, INTMSK_CDONE,
1095 mshci_cmd_irq(host, intmask & CMD_STATUS);
1097 mshci_cmd_irq(host, intmask & CMD_STATUS);
1101 if (intmask & DATA_STATUS) {
1102 if ( !(intmask & INTMSK_DTO) && (intmask & INTMSK_DRTO)) {
1104 * when a error about data timout occurs,
1105 * DTO intr comes together.
1106 * DTO intr comes later than error intr.
1107 * so, it has to wait for DTO intr.
1109 while ( --timeout &&
1110 !(mshci_readl(host, MSHCI_MINTSTS)
1113 printk(KERN_ERR"*** %s time out for\
1115 mmc_hostname(host->mmc));
1117 mshci_writel(host, INTMSK_DTO,
1119 mshci_data_irq(host, intmask & DATA_STATUS,INT_SRC_MINT);
1121 mshci_data_irq(host, intmask & DATA_STATUS,INT_SRC_MINT);
1125 intmask &= ~(CMD_STATUS | DATA_STATUS);
1127 if (intmask & SDIO_INT_ENABLE)
1130 intmask &= ~SDIO_INT_ENABLE;
1133 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1134 mmc_hostname(host->mmc), intmask);
1135 mshci_dumpregs(host);
1138 result = IRQ_HANDLED;
1142 spin_unlock(&host->lock);
1145 * We have to delay this as it calls back into the driver.
1148 mmc_signal_sdio_irq(host->mmc);
1153 /*****************************************************************************\
1157 \*****************************************************************************/
1161 int mshci_suspend_host(struct mshci_host *host, pm_message_t state)
1165 mshci_disable_card_detection(host);
1167 ret = mmc_suspend_host(host->mmc);
1171 free_irq(host->irq, host);
1174 ret = regulator_disable(host->vmmc);
1179 EXPORT_SYMBOL_GPL(mshci_suspend_host);
1181 int mshci_resume_host(struct mshci_host *host)
1186 ret = regulator_enable(host->vmmc);
1191 if (host->flags & (MSHCI_USE_IDMA)) {
1192 if (host->ops->enable_dma)
1193 host->ops->enable_dma(host);
1196 ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
1197 mmc_hostname(host->mmc), host);
1204 ret = mmc_resume_host(host->mmc);
1205 mshci_enable_card_detection(host);
1210 EXPORT_SYMBOL_GPL(mshci_resume_host);
1212 #endif /* CONFIG_PM */
1214 /*****************************************************************************\
1216 * Device allocation/registration *
1218 \*****************************************************************************/
1220 struct mshci_host *mshci_alloc_host(struct device *dev,
1223 struct mmc_host *mmc;
1224 struct mshci_host *host;
1226 WARN_ON(dev == NULL);
1228 mmc = mmc_alloc_host(sizeof(struct mshci_host) + priv_size, dev);
1230 return ERR_PTR(-ENOMEM);
1232 host = mmc_priv(mmc);
1238 EXPORT_SYMBOL_GPL(mshci_alloc_host);
1240 static void mshci_fifo_init(struct mshci_host *host)
1242 int fifo_val, fifo_depth, fifo_threshold;
1244 fifo_val = mshci_readl(host, MSHCI_FIFOTH);
1245 fifo_depth = ((fifo_val & RX_WMARK) >> 16) + 1;
1246 fifo_threshold = fifo_depth / 2;
1247 host->fifo_threshold = fifo_threshold;
1248 host->fifo_depth = fifo_threshold * 2;
1250 printk(KERN_INFO "%s: FIFO WMARK FOR RX 0x%x WX 0x%x.\n",
1251 mmc_hostname(host->mmc), fifo_depth, ((fifo_val & TX_WMARK)>>16)+1);
1253 fifo_val &= ~(RX_WMARK | TX_WMARK | MSIZE_MASK);
1255 fifo_val |= (fifo_threshold | (fifo_threshold<<16));
1256 fifo_val |= MSIZE_8;
1258 mshci_writel(host, fifo_val, MSHCI_FIFOTH);
1261 int mshci_add_host(struct mshci_host *host)
1263 struct mmc_host *mmc;
1266 WARN_ON(host == NULL);
1273 host->quirks = debug_quirks;
1275 mshci_reset(host, RESET_ALL);
1277 host->version = mshci_readl(host, MSHCI_VERID);
1279 /* there are no reasons not to use DMA */
1280 host->flags |= MSHCI_USE_IDMA;
1282 if (host->flags & MSHCI_USE_IDMA) {
1283 /* We need to allocate descriptors for all sg entries
1284 * 128 transfer for each of those entries. */
1285 host->idma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1286 if (!host->idma_desc) {
1287 kfree(host->idma_desc);
1288 printk(KERN_WARNING "%s: Unable to allocate IDMA "
1289 "buffers. Falling back to standard DMA.\n",
1291 host->flags &= ~MSHCI_USE_IDMA;
1296 * If we use DMA, then it's up to the caller to set the DMA
1297 * mask, but PIO does not need the hw shim so we set a new
1298 * mask here in that case.
1300 if (!(host->flags & (MSHCI_USE_IDMA))) {
1301 host->dma_mask = DMA_BIT_MASK(64);
1302 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1305 printk("%s: Version ID 0x%x.\n",
1306 mmc_hostname(host->mmc), host->version);
1310 if (host->max_clk == 0) {
1311 if (!host->ops->get_max_clock) {
1313 "%s: Hardware doesn't specify base clock "
1314 "frequency.\n", mmc_hostname(mmc));
1317 host->max_clk = host->ops->get_max_clock(host);
1321 * Set host parameters.
1323 if(host->ops->get_ro)
1324 mshci_ops.get_ro = host->ops->get_ro;
1326 mmc->ops = &mshci_ops;
1327 mmc->f_min = host->max_clk / 512;
1328 mmc->f_max = host->max_clk;
1330 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_4_BIT_DATA;
1332 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34 |
1333 MMC_VDD_29_30 | MMC_VDD_30_31;
1335 spin_lock_init(&host->lock);
1337 mmc->max_hw_segs = 128;
1338 mmc->max_phys_segs = 128;
1340 /* Maximum number of sectors in one transfer */
1341 mmc->max_req_size = 524288;
1344 * Maximum segment size. Could be one segment with the maximum number
1345 * of bytes. When doing hardware scatter/gather, each entry cannot
1346 * be larger than 4 KiB though.
1348 if (host->flags & MSHCI_USE_IDMA)
1349 mmc->max_seg_size = 0x1000;
1351 mmc->max_seg_size = mmc->max_req_size;
1354 * from SD spec 2.0 and MMC spec 4.2, block size has been
1357 mmc->max_blk_size = 512;
1360 * Maximum block count.
1362 mmc->max_blk_count = 65535;
1367 tasklet_init(&host->card_tasklet,
1368 mshci_tasklet_card, (unsigned long)host);
1369 tasklet_init(&host->finish_tasklet,
1370 mshci_tasklet_finish, (unsigned long)host);
1372 setup_timer(&host->timer, mshci_timeout_timer, (unsigned long)host);
1374 ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
1375 mmc_hostname(mmc), host);
1379 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1380 if (IS_ERR(host->vmmc)) {
1381 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
1384 regulator_enable(host->vmmc);
1388 mshci_fifo_init(host);
1390 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
1393 /* set debounce filter value*/
1394 mshci_writel(host, 0xffffff, MSHCI_DEBNCE);
1396 /* clear card type. set 1bit mode */
1397 mshci_writel(host, 0x0, MSHCI_CTYPE);
1399 /* set bus mode register for IDMAC */
1400 if (host->flags & MSHCI_USE_IDMA) {
1401 mshci_writel(host, BMOD_IDMAC_RESET, MSHCI_BMOD);
1403 while( (mshci_readl(host, MSHCI_BMOD) & BMOD_IDMAC_RESET )
1404 && --count ) ; /* nothing to do */
1406 mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
1407 (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)), MSHCI_BMOD);
1409 #ifdef CONFIG_MMC_DEBUG
1410 mshci_dumpregs(host);
1417 printk(KERN_INFO "%s: MSHCI controller on %s [%s] using %s\n",
1418 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1419 (host->flags & MSHCI_USE_IDMA) ? "IDMA" : "PIO");
1421 mshci_enable_card_detection(host);
1426 tasklet_kill(&host->card_tasklet);
1427 tasklet_kill(&host->finish_tasklet);
1432 EXPORT_SYMBOL_GPL(mshci_add_host);
1434 void mshci_remove_host(struct mshci_host *host, int dead)
1436 unsigned long flags;
1439 spin_lock_irqsave(&host->lock, flags);
1441 host->flags |= MSHCI_DEVICE_DEAD;
1444 printk(KERN_ERR "%s: Controller removed during "
1445 " transfer!\n", mmc_hostname(host->mmc));
1447 host->mrq->cmd->error = -ENOMEDIUM;
1448 tasklet_schedule(&host->finish_tasklet);
1451 spin_unlock_irqrestore(&host->lock, flags);
1454 mshci_disable_card_detection(host);
1456 mmc_remove_host(host->mmc);
1459 mshci_reset(host, RESET_ALL);
1461 free_irq(host->irq, host);
1463 del_timer_sync(&host->timer);
1465 tasklet_kill(&host->card_tasklet);
1466 tasklet_kill(&host->finish_tasklet);
1468 kfree(host->idma_desc);
1470 host->idma_desc = NULL;
1471 host->align_buffer = NULL;
1474 EXPORT_SYMBOL_GPL(mshci_remove_host);
1476 void mshci_free_host(struct mshci_host *host)
1478 mmc_free_host(host->mmc);
1481 EXPORT_SYMBOL_GPL(mshci_free_host);
1483 /*****************************************************************************\
1485 * Driver init/exit *
1487 \*****************************************************************************/
1489 static int __init mshci_drv_init(void)
1491 printk(KERN_INFO DRIVER_NAME
1492 ": Mobile Storage Host Controller Interface driver\n");
1497 static void __exit mshci_drv_exit(void)
1501 module_init(mshci_drv_init);
1502 module_exit(mshci_drv_exit);
1504 module_param(debug_quirks, uint, 0444);
1506 MODULE_AUTHOR("Jaehoon Chung <jh80.chung@samsung.com>");
1507 MODULE_DESCRIPTION("Mobile Storage Host Controller Interface core driver");
1508 MODULE_LICENSE("GPL");
1510 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");