1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale eSDHC controller driver.
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
8 * Authors: Xiaobo Xie <X.Xie@freescale.com>
9 * Anton Vorontsov <avorontsov@ru.mvista.com>
12 #include <linux/err.h>
15 #include <linux/of_address.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/sys_soc.h>
19 #include <linux/clk.h>
20 #include <linux/ktime.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/mmc.h>
24 #include "sdhci-pltfm.h"
25 #include "sdhci-esdhc.h"
27 #define VENDOR_V_22 0x12
28 #define VENDOR_V_23 0x13
30 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
32 struct esdhc_clk_fixup {
33 const unsigned int sd_dflt_max_clk;
34 const unsigned int max_clk[MMC_TIMING_NUM];
37 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
38 .sd_dflt_max_clk = 25000000,
39 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
40 .max_clk[MMC_TIMING_SD_HS] = 46500000,
43 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
44 .sd_dflt_max_clk = 25000000,
45 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
46 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
49 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
50 .sd_dflt_max_clk = 25000000,
51 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
52 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
55 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
56 .sd_dflt_max_clk = 20000000,
57 .max_clk[MMC_TIMING_LEGACY] = 20000000,
58 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
59 .max_clk[MMC_TIMING_SD_HS] = 40000000,
62 static const struct of_device_id sdhci_esdhc_of_match[] = {
63 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
64 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
65 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
66 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
67 { .compatible = "fsl,mpc8379-esdhc" },
68 { .compatible = "fsl,mpc8536-esdhc" },
69 { .compatible = "fsl,esdhc" },
72 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
77 bool quirk_incorrect_hostver;
78 bool quirk_limited_clk_division;
79 bool quirk_unreliable_pulse_detection;
80 bool quirk_tuning_erratum_type1;
81 bool quirk_tuning_erratum_type2;
82 bool quirk_ignore_data_inhibit;
83 bool quirk_delay_before_data_reset;
84 bool quirk_trans_complete_erratum;
86 unsigned int peripheral_clock;
87 const struct esdhc_clk_fixup *clk_fixup;
92 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
93 * to make it compatible with SD spec.
95 * @host: pointer to sdhci_host
96 * @spec_reg: SD spec register address
97 * @value: 32bit eSDHC register value on spec_reg address
99 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
100 * registers are 32 bits. There are differences in register size, register
101 * address, register function, bit position and function between eSDHC spec
104 * Return a fixed up register value
106 static u32 esdhc_readl_fixup(struct sdhci_host *host,
107 int spec_reg, u32 value)
109 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
110 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
114 * The bit of ADMA flag in eSDHC is not compatible with standard
115 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
116 * supported by eSDHC.
117 * And for many FSL eSDHC controller, the reset value of field
118 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
119 * only these vendor version is greater than 2.2/0x12 support ADMA.
121 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
122 if (esdhc->vendor_ver > VENDOR_V_22) {
123 ret = value | SDHCI_CAN_DO_ADMA2;
128 * The DAT[3:0] line signal levels and the CMD line signal level are
129 * not compatible with standard SDHC register. The line signal levels
130 * DAT[7:0] are at bits 31:24 and the command line signal level is at
131 * bit 23. All other bits are the same as in the standard SDHC
134 if (spec_reg == SDHCI_PRESENT_STATE) {
135 ret = value & 0x000fffff;
136 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
137 ret |= (value << 1) & SDHCI_CMD_LVL;
142 * DTS properties of mmc host are used to enable each speed mode
143 * according to soc and board capability. So clean up
144 * SDR50/SDR104/DDR50 support bits here.
146 if (spec_reg == SDHCI_CAPABILITIES_1) {
147 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
148 SDHCI_SUPPORT_DDR50);
153 * Some controllers have unreliable Data Line Active
154 * bit for commands with busy signal. This affects
155 * Command Inhibit (data) bit. Just ignore it since
156 * MMC core driver has already polled card status
157 * with CMD13 after any command with busy siganl.
159 if ((spec_reg == SDHCI_PRESENT_STATE) &&
160 (esdhc->quirk_ignore_data_inhibit == true)) {
161 ret = value & ~SDHCI_DATA_INHIBIT;
169 static u16 esdhc_readw_fixup(struct sdhci_host *host,
170 int spec_reg, u32 value)
172 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
173 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
175 int shift = (spec_reg & 0x2) * 8;
177 if (spec_reg == SDHCI_TRANSFER_MODE)
178 return pltfm_host->xfer_mode_shadow;
180 if (spec_reg == SDHCI_HOST_VERSION)
181 ret = value & 0xffff;
183 ret = (value >> shift) & 0xffff;
184 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
185 * vendor version and spec version information.
187 if ((spec_reg == SDHCI_HOST_VERSION) &&
188 (esdhc->quirk_incorrect_hostver))
189 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
193 static u8 esdhc_readb_fixup(struct sdhci_host *host,
194 int spec_reg, u32 value)
198 int shift = (spec_reg & 0x3) * 8;
200 ret = (value >> shift) & 0xff;
203 * "DMA select" locates at offset 0x28 in SD specification, but on
204 * P5020 or P3041, it locates at 0x29.
206 if (spec_reg == SDHCI_HOST_CONTROL) {
207 /* DMA select is 22,23 bits in Protocol Control Register */
208 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
209 /* fixup the result */
210 ret &= ~SDHCI_CTRL_DMA_MASK;
217 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
218 * written into eSDHC register.
220 * @host: pointer to sdhci_host
221 * @spec_reg: SD spec register address
222 * @value: 8/16/32bit SD spec register value that would be written
223 * @old_value: 32bit eSDHC register value on spec_reg address
225 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
226 * registers are 32 bits. There are differences in register size, register
227 * address, register function, bit position and function between eSDHC spec
230 * Return a fixed up register value
232 static u32 esdhc_writel_fixup(struct sdhci_host *host,
233 int spec_reg, u32 value, u32 old_value)
238 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
239 * when SYSCTL[RSTD] is set for some special operations.
240 * No any impact on other operation.
242 if (spec_reg == SDHCI_INT_ENABLE)
243 ret = value | SDHCI_INT_BLK_GAP;
250 static u32 esdhc_writew_fixup(struct sdhci_host *host,
251 int spec_reg, u16 value, u32 old_value)
253 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
254 int shift = (spec_reg & 0x2) * 8;
258 case SDHCI_TRANSFER_MODE:
260 * Postpone this write, we must do it together with a
261 * command write that is down below. Return old value.
263 pltfm_host->xfer_mode_shadow = value;
266 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
270 ret = old_value & (~(0xffff << shift));
271 ret |= (value << shift);
273 if (spec_reg == SDHCI_BLOCK_SIZE) {
275 * Two last DMA bits are reserved, and first one is used for
276 * non-standard blksz of 4096 bytes that we don't support
277 * yet. So clear the DMA boundary bits.
279 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
284 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
285 int spec_reg, u8 value, u32 old_value)
290 int shift = (spec_reg & 0x3) * 8;
293 * eSDHC doesn't have a standard power control register, so we do
294 * nothing here to avoid incorrect operation.
296 if (spec_reg == SDHCI_POWER_CONTROL)
299 * "DMA select" location is offset 0x28 in SD specification, but on
300 * P5020 or P3041, it's located at 0x29.
302 if (spec_reg == SDHCI_HOST_CONTROL) {
304 * If host control register is not standard, exit
307 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
310 /* DMA select is 22,23 bits in Protocol Control Register */
311 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
312 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
313 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
314 (old_value & SDHCI_CTRL_DMA_MASK);
315 ret = (ret & (~0xff)) | tmp;
317 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
318 ret &= ~ESDHC_HOST_CONTROL_RES;
322 ret = (old_value & (~(0xff << shift))) | (value << shift);
326 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
331 if (reg == SDHCI_CAPABILITIES_1)
332 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
334 value = ioread32be(host->ioaddr + reg);
336 ret = esdhc_readl_fixup(host, reg, value);
341 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
346 if (reg == SDHCI_CAPABILITIES_1)
347 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
349 value = ioread32(host->ioaddr + reg);
351 ret = esdhc_readl_fixup(host, reg, value);
356 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
360 int base = reg & ~0x3;
362 value = ioread32be(host->ioaddr + base);
363 ret = esdhc_readw_fixup(host, reg, value);
367 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
371 int base = reg & ~0x3;
373 value = ioread32(host->ioaddr + base);
374 ret = esdhc_readw_fixup(host, reg, value);
378 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
382 int base = reg & ~0x3;
384 value = ioread32be(host->ioaddr + base);
385 ret = esdhc_readb_fixup(host, reg, value);
389 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
393 int base = reg & ~0x3;
395 value = ioread32(host->ioaddr + base);
396 ret = esdhc_readb_fixup(host, reg, value);
400 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
404 value = esdhc_writel_fixup(host, reg, val, 0);
405 iowrite32be(value, host->ioaddr + reg);
408 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
412 value = esdhc_writel_fixup(host, reg, val, 0);
413 iowrite32(value, host->ioaddr + reg);
416 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
418 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
419 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
420 int base = reg & ~0x3;
424 value = ioread32be(host->ioaddr + base);
425 ret = esdhc_writew_fixup(host, reg, val, value);
426 if (reg != SDHCI_TRANSFER_MODE)
427 iowrite32be(ret, host->ioaddr + base);
429 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
430 * 1us later after ESDHC_EXTN is set.
432 if (base == ESDHC_SYSTEM_CONTROL_2) {
433 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
434 esdhc->in_sw_tuning) {
436 ret |= ESDHC_SMPCLKSEL;
437 iowrite32be(ret, host->ioaddr + base);
442 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
444 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
445 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
446 int base = reg & ~0x3;
450 value = ioread32(host->ioaddr + base);
451 ret = esdhc_writew_fixup(host, reg, val, value);
452 if (reg != SDHCI_TRANSFER_MODE)
453 iowrite32(ret, host->ioaddr + base);
455 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
456 * 1us later after ESDHC_EXTN is set.
458 if (base == ESDHC_SYSTEM_CONTROL_2) {
459 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
460 esdhc->in_sw_tuning) {
462 ret |= ESDHC_SMPCLKSEL;
463 iowrite32(ret, host->ioaddr + base);
468 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
470 int base = reg & ~0x3;
474 value = ioread32be(host->ioaddr + base);
475 ret = esdhc_writeb_fixup(host, reg, val, value);
476 iowrite32be(ret, host->ioaddr + base);
479 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
481 int base = reg & ~0x3;
485 value = ioread32(host->ioaddr + base);
486 ret = esdhc_writeb_fixup(host, reg, val, value);
487 iowrite32(ret, host->ioaddr + base);
491 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
492 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
493 * and Block Gap Event(IRQSTAT[BGE]) are also set.
494 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
495 * and re-issue the entire read transaction from beginning.
497 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
499 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
500 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
505 applicable = (intmask & SDHCI_INT_DATA_END) &&
506 (intmask & SDHCI_INT_BLK_GAP) &&
507 (esdhc->vendor_ver == VENDOR_V_23);
511 host->data->error = 0;
512 dmastart = sg_dma_address(host->data->sg);
513 dmanow = dmastart + host->data->bytes_xfered;
515 * Force update to the next DMA block boundary.
517 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
518 SDHCI_DEFAULT_BOUNDARY_SIZE;
519 host->data->bytes_xfered = dmanow - dmastart;
520 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
523 static int esdhc_of_enable_dma(struct sdhci_host *host)
526 struct device *dev = mmc_dev(host->mmc);
528 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
529 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
530 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
532 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
534 if (of_dma_is_coherent(dev->of_node))
535 value |= ESDHC_DMA_SNOOP;
537 value &= ~ESDHC_DMA_SNOOP;
539 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
543 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
545 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
546 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
548 if (esdhc->peripheral_clock)
549 return esdhc->peripheral_clock;
551 return pltfm_host->clock;
554 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
556 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
557 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
560 if (esdhc->peripheral_clock)
561 clock = esdhc->peripheral_clock;
563 clock = pltfm_host->clock;
564 return clock / 256 / 16;
567 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
569 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
570 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
574 clk_en = ESDHC_CLOCK_SDCLKEN;
577 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
580 if (esdhc->vendor_ver <= VENDOR_V_22)
581 clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
584 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
591 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
594 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
595 * wait clock stable bit which does not exist.
597 timeout = ktime_add_ms(ktime_get(), 20);
598 while (esdhc->vendor_ver > VENDOR_V_22) {
599 bool timedout = ktime_after(ktime_get(), timeout);
601 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
604 pr_err("%s: Internal clock never stabilised.\n",
605 mmc_hostname(host->mmc));
608 usleep_range(10, 20);
612 static void esdhc_flush_async_fifo(struct sdhci_host *host)
617 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
618 val |= ESDHC_FLUSH_ASYNC_FIFO;
619 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
622 timeout = ktime_add_ms(ktime_get(), 20);
624 bool timedout = ktime_after(ktime_get(), timeout);
626 if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
627 ESDHC_FLUSH_ASYNC_FIFO))
630 pr_err("%s: flushing asynchronous FIFO timeout.\n",
631 mmc_hostname(host->mmc));
634 usleep_range(10, 20);
638 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
640 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
641 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
642 unsigned int pre_div = 1, div = 1;
643 unsigned int clock_fixup = 0;
648 host->mmc->actual_clock = 0;
649 esdhc_clock_enable(host, false);
653 /* Start pre_div at 2 for vendor version < 2.3. */
654 if (esdhc->vendor_ver < VENDOR_V_23)
657 /* Fix clock value. */
658 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
659 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
660 clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
661 else if (esdhc->clk_fixup)
662 clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
664 if (clock_fixup == 0 || clock < clock_fixup)
667 /* Calculate pre_div and div. */
668 while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
671 while (host->max_clk / pre_div / div > clock_fixup && div < 16)
674 esdhc->div_ratio = pre_div * div;
676 /* Limit clock division for HS400 200MHz clock for quirk. */
677 if (esdhc->quirk_limited_clk_division &&
678 clock == MMC_HS200_MAX_DTR &&
679 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
680 host->flags & SDHCI_HS400_TUNING)) {
681 if (esdhc->div_ratio <= 4) {
684 } else if (esdhc->div_ratio <= 8) {
687 } else if (esdhc->div_ratio <= 12) {
691 pr_warn("%s: using unsupported clock division.\n",
692 mmc_hostname(host->mmc));
694 esdhc->div_ratio = pre_div * div;
697 host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
699 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
700 clock, host->mmc->actual_clock);
702 /* Set clock division into register. */
706 esdhc_clock_enable(host, false);
708 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
709 temp &= ~ESDHC_CLOCK_MASK;
710 temp |= ((div << ESDHC_DIVIDER_SHIFT) |
711 (pre_div << ESDHC_PREDIV_SHIFT));
712 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
715 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
716 * wait clock stable bit which does not exist.
718 timeout = ktime_add_ms(ktime_get(), 20);
719 while (esdhc->vendor_ver > VENDOR_V_22) {
720 bool timedout = ktime_after(ktime_get(), timeout);
722 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
725 pr_err("%s: Internal clock never stabilised.\n",
726 mmc_hostname(host->mmc));
729 usleep_range(10, 20);
732 /* Additional setting for HS400. */
733 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
734 clock == MMC_HS200_MAX_DTR) {
735 temp = sdhci_readl(host, ESDHC_TBCTL);
736 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
737 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
738 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
739 esdhc_clock_enable(host, true);
741 temp = sdhci_readl(host, ESDHC_DLLCFG0);
742 temp |= ESDHC_DLL_ENABLE;
743 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
744 temp |= ESDHC_DLL_FREQ_SEL;
745 sdhci_writel(host, temp, ESDHC_DLLCFG0);
746 temp = sdhci_readl(host, ESDHC_TBCTL);
747 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
749 esdhc_clock_enable(host, false);
750 esdhc_flush_async_fifo(host);
752 esdhc_clock_enable(host, true);
755 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
759 ctrl = sdhci_readl(host, ESDHC_PROCTL);
760 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
762 case MMC_BUS_WIDTH_8:
763 ctrl |= ESDHC_CTRL_8BITBUS;
766 case MMC_BUS_WIDTH_4:
767 ctrl |= ESDHC_CTRL_4BITBUS;
774 sdhci_writel(host, ctrl, ESDHC_PROCTL);
777 static void esdhc_reset(struct sdhci_host *host, u8 mask)
779 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
780 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
781 u32 val, bus_width = 0;
784 * Add delay to make sure all the DMA transfers are finished
787 if (esdhc->quirk_delay_before_data_reset &&
788 (mask & SDHCI_RESET_DATA) &&
789 (host->flags & SDHCI_REQ_USE_DMA))
793 * Save bus-width for eSDHC whose vendor version is 2.2
794 * or lower for data reset.
796 if ((mask & SDHCI_RESET_DATA) &&
797 (esdhc->vendor_ver <= VENDOR_V_22)) {
798 val = sdhci_readl(host, ESDHC_PROCTL);
799 bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
802 sdhci_reset(host, mask);
805 * Restore bus-width setting and interrupt registers for eSDHC
806 * whose vendor version is 2.2 or lower for data reset.
808 if ((mask & SDHCI_RESET_DATA) &&
809 (esdhc->vendor_ver <= VENDOR_V_22)) {
810 val = sdhci_readl(host, ESDHC_PROCTL);
811 val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
813 sdhci_writel(host, val, ESDHC_PROCTL);
815 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
816 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
820 * Some bits have to be cleaned manually for eSDHC whose spec
821 * version is higher than 3.0 for all reset.
823 if ((mask & SDHCI_RESET_ALL) &&
824 (esdhc->spec_ver >= SDHCI_SPEC_300)) {
825 val = sdhci_readl(host, ESDHC_TBCTL);
827 sdhci_writel(host, val, ESDHC_TBCTL);
830 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
833 if (esdhc->quirk_unreliable_pulse_detection) {
834 val = sdhci_readl(host, ESDHC_DLLCFG1);
835 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
836 sdhci_writel(host, val, ESDHC_DLLCFG1);
841 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
842 * configuration and status registers for the device. There is a
843 * SDHC IO VSEL control register on SCFG for some platforms. It's
844 * used to support SDHC IO voltage switching.
846 static const struct of_device_id scfg_device_ids[] = {
847 { .compatible = "fsl,t1040-scfg", },
848 { .compatible = "fsl,ls1012a-scfg", },
849 { .compatible = "fsl,ls1046a-scfg", },
853 /* SDHC IO VSEL control register definition */
854 #define SCFG_SDHCIOVSELCR 0x408
855 #define SDHCIOVSELCR_TGLEN 0x80000000
856 #define SDHCIOVSELCR_VSELVAL 0x60000000
857 #define SDHCIOVSELCR_SDHC_VS 0x00000001
859 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
862 struct sdhci_host *host = mmc_priv(mmc);
863 struct device_node *scfg_node;
864 void __iomem *scfg_base = NULL;
869 * Signal Voltage Switching is only applicable for Host Controllers
872 if (host->version < SDHCI_SPEC_300)
875 val = sdhci_readl(host, ESDHC_PROCTL);
877 switch (ios->signal_voltage) {
878 case MMC_SIGNAL_VOLTAGE_330:
879 val &= ~ESDHC_VOLT_SEL;
880 sdhci_writel(host, val, ESDHC_PROCTL);
882 case MMC_SIGNAL_VOLTAGE_180:
883 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
885 scfg_base = of_iomap(scfg_node, 0);
887 sdhciovselcr = SDHCIOVSELCR_TGLEN |
888 SDHCIOVSELCR_VSELVAL;
889 iowrite32be(sdhciovselcr,
890 scfg_base + SCFG_SDHCIOVSELCR);
892 val |= ESDHC_VOLT_SEL;
893 sdhci_writel(host, val, ESDHC_PROCTL);
896 sdhciovselcr = SDHCIOVSELCR_TGLEN |
897 SDHCIOVSELCR_SDHC_VS;
898 iowrite32be(sdhciovselcr,
899 scfg_base + SCFG_SDHCIOVSELCR);
902 val |= ESDHC_VOLT_SEL;
903 sdhci_writel(host, val, ESDHC_PROCTL);
911 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
912 { .family = "QorIQ T1023", },
913 { .family = "QorIQ T1040", },
914 { .family = "QorIQ T2080", },
915 { .family = "QorIQ LS1021A", },
919 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
920 { .family = "QorIQ LS1012A", },
921 { .family = "QorIQ LS1043A", },
922 { .family = "QorIQ LS1046A", },
923 { .family = "QorIQ LS1080A", },
924 { .family = "QorIQ LS2080A", },
925 { .family = "QorIQ LA1575A", },
929 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
933 esdhc_clock_enable(host, false);
934 esdhc_flush_async_fifo(host);
936 val = sdhci_readl(host, ESDHC_TBCTL);
941 sdhci_writel(host, val, ESDHC_TBCTL);
943 esdhc_clock_enable(host, true);
946 static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
951 /* Write TBCTL[11:8]=4'h8 */
952 val = sdhci_readl(host, ESDHC_TBCTL);
955 sdhci_writel(host, val, ESDHC_TBCTL);
959 /* Read TBCTL[31:0] register and rewrite again */
960 val = sdhci_readl(host, ESDHC_TBCTL);
961 sdhci_writel(host, val, ESDHC_TBCTL);
965 /* Read the TBSTAT[31:0] register twice */
966 val = sdhci_readl(host, ESDHC_TBSTAT);
967 val = sdhci_readl(host, ESDHC_TBSTAT);
969 *window_end = val & 0xff;
970 *window_start = (val >> 8) & 0xff;
973 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
976 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
977 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
978 u8 start_ptr, end_ptr;
980 if (esdhc->quirk_tuning_erratum_type1) {
981 *window_start = 5 * esdhc->div_ratio;
982 *window_end = 3 * esdhc->div_ratio;
986 esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
988 /* Reset data lines by setting ESDHCCTL[RSTD] */
989 sdhci_reset(host, SDHCI_RESET_DATA);
990 /* Write 32'hFFFF_FFFF to IRQSTAT register */
991 sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
993 /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
994 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
995 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
996 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
999 if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1000 *window_start = 8 * esdhc->div_ratio;
1001 *window_end = 4 * esdhc->div_ratio;
1003 *window_start = 5 * esdhc->div_ratio;
1004 *window_end = 3 * esdhc->div_ratio;
1008 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1009 u8 window_start, u8 window_end)
1011 struct sdhci_host *host = mmc_priv(mmc);
1012 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1013 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1017 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1018 val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1019 ESDHC_WNDW_STRT_PTR_MASK;
1020 val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1021 sdhci_writel(host, val, ESDHC_TBPTR);
1023 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1024 val = sdhci_readl(host, ESDHC_TBCTL);
1025 val &= ~ESDHC_TB_MODE_MASK;
1026 val |= ESDHC_TB_MODE_SW;
1027 sdhci_writel(host, val, ESDHC_TBCTL);
1029 esdhc->in_sw_tuning = true;
1030 ret = sdhci_execute_tuning(mmc, opcode);
1031 esdhc->in_sw_tuning = false;
1035 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1037 struct sdhci_host *host = mmc_priv(mmc);
1038 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1039 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1040 u8 window_start, window_end;
1041 int ret, retries = 1;
1046 /* For tuning mode, the sd clock divisor value
1047 * must be larger than 3 according to reference manual.
1049 clk = esdhc->peripheral_clock / 3;
1050 if (host->clock > clk)
1051 esdhc_of_set_clock(host, clk);
1053 esdhc_tuning_block_enable(host, true);
1055 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1058 if (esdhc->quirk_limited_clk_division &&
1060 esdhc_of_set_clock(host, host->clock);
1063 val = sdhci_readl(host, ESDHC_TBCTL);
1064 val &= ~ESDHC_TB_MODE_MASK;
1065 val |= ESDHC_TB_MODE_3;
1066 sdhci_writel(host, val, ESDHC_TBCTL);
1068 ret = sdhci_execute_tuning(mmc, opcode);
1072 /* For type2 affected platforms of the tuning erratum,
1073 * tuning may succeed although eSDHC might not have
1074 * tuned properly. Need to check tuning window.
1076 if (esdhc->quirk_tuning_erratum_type2 &&
1077 !host->tuning_err) {
1078 esdhc_tuning_window_ptr(host, &window_start,
1080 if (abs(window_start - window_end) >
1081 (4 * esdhc->div_ratio + 2))
1082 host->tuning_err = -EAGAIN;
1085 /* If HW tuning fails and triggers erratum,
1088 ret = host->tuning_err;
1089 if (ret == -EAGAIN &&
1090 (esdhc->quirk_tuning_erratum_type1 ||
1091 esdhc->quirk_tuning_erratum_type2)) {
1092 /* Recover HS400 tuning flag */
1094 host->flags |= SDHCI_HS400_TUNING;
1095 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1098 esdhc_prepare_sw_tuning(host, &window_start,
1100 ret = esdhc_execute_sw_tuning(mmc, opcode,
1106 /* Retry both HW/SW tuning with reduced clock. */
1107 ret = host->tuning_err;
1108 if (ret == -EAGAIN && retries) {
1109 /* Recover HS400 tuning flag */
1111 host->flags |= SDHCI_HS400_TUNING;
1113 clk = host->max_clk / (esdhc->div_ratio + 1);
1114 esdhc_of_set_clock(host, clk);
1115 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1123 } while (retries--);
1126 esdhc_tuning_block_enable(host, false);
1127 } else if (hs400_tuning) {
1128 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1129 val |= ESDHC_FLW_CTL_BG;
1130 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1136 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1137 unsigned int timing)
1142 * There are specific registers setting for HS400 mode.
1143 * Clean all of them if controller is in HS400 mode to
1144 * exit HS400 mode before re-setting any speed mode.
1146 val = sdhci_readl(host, ESDHC_TBCTL);
1147 if (val & ESDHC_HS400_MODE) {
1148 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1149 val &= ~ESDHC_FLW_CTL_BG;
1150 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1152 val = sdhci_readl(host, ESDHC_SDCLKCTL);
1153 val &= ~ESDHC_CMD_CLK_CTL;
1154 sdhci_writel(host, val, ESDHC_SDCLKCTL);
1156 esdhc_clock_enable(host, false);
1157 val = sdhci_readl(host, ESDHC_TBCTL);
1158 val &= ~ESDHC_HS400_MODE;
1159 sdhci_writel(host, val, ESDHC_TBCTL);
1160 esdhc_clock_enable(host, true);
1162 val = sdhci_readl(host, ESDHC_DLLCFG0);
1163 val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1164 sdhci_writel(host, val, ESDHC_DLLCFG0);
1166 val = sdhci_readl(host, ESDHC_TBCTL);
1167 val &= ~ESDHC_HS400_WNDW_ADJUST;
1168 sdhci_writel(host, val, ESDHC_TBCTL);
1170 esdhc_tuning_block_enable(host, false);
1173 if (timing == MMC_TIMING_MMC_HS400)
1174 esdhc_tuning_block_enable(host, true);
1176 sdhci_set_uhs_signaling(host, timing);
1179 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1181 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1182 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1185 if (esdhc->quirk_trans_complete_erratum) {
1186 command = SDHCI_GET_CMD(sdhci_readw(host,
1188 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1189 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1190 intmask & SDHCI_INT_DATA_END) {
1191 intmask &= ~SDHCI_INT_DATA_END;
1192 sdhci_writel(host, SDHCI_INT_DATA_END,
1199 #ifdef CONFIG_PM_SLEEP
1200 static u32 esdhc_proctl;
1201 static int esdhc_of_suspend(struct device *dev)
1203 struct sdhci_host *host = dev_get_drvdata(dev);
1205 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1207 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1208 mmc_retune_needed(host->mmc);
1210 return sdhci_suspend_host(host);
1213 static int esdhc_of_resume(struct device *dev)
1215 struct sdhci_host *host = dev_get_drvdata(dev);
1216 int ret = sdhci_resume_host(host);
1219 /* Isn't this already done by sdhci_resume_host() ? --rmk */
1220 esdhc_of_enable_dma(host);
1221 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1227 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1231 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1232 .read_l = esdhc_be_readl,
1233 .read_w = esdhc_be_readw,
1234 .read_b = esdhc_be_readb,
1235 .write_l = esdhc_be_writel,
1236 .write_w = esdhc_be_writew,
1237 .write_b = esdhc_be_writeb,
1238 .set_clock = esdhc_of_set_clock,
1239 .enable_dma = esdhc_of_enable_dma,
1240 .get_max_clock = esdhc_of_get_max_clock,
1241 .get_min_clock = esdhc_of_get_min_clock,
1242 .adma_workaround = esdhc_of_adma_workaround,
1243 .set_bus_width = esdhc_pltfm_set_bus_width,
1244 .reset = esdhc_reset,
1245 .set_uhs_signaling = esdhc_set_uhs_signaling,
1249 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1250 .read_l = esdhc_le_readl,
1251 .read_w = esdhc_le_readw,
1252 .read_b = esdhc_le_readb,
1253 .write_l = esdhc_le_writel,
1254 .write_w = esdhc_le_writew,
1255 .write_b = esdhc_le_writeb,
1256 .set_clock = esdhc_of_set_clock,
1257 .enable_dma = esdhc_of_enable_dma,
1258 .get_max_clock = esdhc_of_get_max_clock,
1259 .get_min_clock = esdhc_of_get_min_clock,
1260 .adma_workaround = esdhc_of_adma_workaround,
1261 .set_bus_width = esdhc_pltfm_set_bus_width,
1262 .reset = esdhc_reset,
1263 .set_uhs_signaling = esdhc_set_uhs_signaling,
1267 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1268 .quirks = ESDHC_DEFAULT_QUIRKS |
1270 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1272 SDHCI_QUIRK_NO_CARD_NO_RESET |
1273 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1274 .ops = &sdhci_esdhc_be_ops,
1277 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1278 .quirks = ESDHC_DEFAULT_QUIRKS |
1279 SDHCI_QUIRK_NO_CARD_NO_RESET |
1280 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1281 .ops = &sdhci_esdhc_le_ops,
1284 static struct soc_device_attribute soc_incorrect_hostver[] = {
1285 { .family = "QorIQ T4240", .revision = "1.0", },
1286 { .family = "QorIQ T4240", .revision = "2.0", },
1290 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1291 { .family = "QorIQ LX2160A", .revision = "1.0", },
1292 { .family = "QorIQ LX2160A", .revision = "2.0", },
1293 { .family = "QorIQ LS1028A", .revision = "1.0", },
1297 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1298 { .family = "QorIQ LX2160A", .revision = "1.0", },
1302 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1304 const struct of_device_id *match;
1305 struct sdhci_pltfm_host *pltfm_host;
1306 struct sdhci_esdhc *esdhc;
1307 struct device_node *np;
1312 pltfm_host = sdhci_priv(host);
1313 esdhc = sdhci_pltfm_priv(pltfm_host);
1315 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1316 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1317 SDHCI_VENDOR_VER_SHIFT;
1318 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1319 if (soc_device_match(soc_incorrect_hostver))
1320 esdhc->quirk_incorrect_hostver = true;
1322 esdhc->quirk_incorrect_hostver = false;
1324 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1325 esdhc->quirk_limited_clk_division = true;
1327 esdhc->quirk_limited_clk_division = false;
1329 if (soc_device_match(soc_unreliable_pulse_detection))
1330 esdhc->quirk_unreliable_pulse_detection = true;
1332 esdhc->quirk_unreliable_pulse_detection = false;
1334 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1336 esdhc->clk_fixup = match->data;
1337 np = pdev->dev.of_node;
1339 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1340 esdhc->quirk_delay_before_data_reset = true;
1341 esdhc->quirk_trans_complete_erratum = true;
1344 clk = of_clk_get(np, 0);
1347 * esdhc->peripheral_clock would be assigned with a value
1348 * which is eSDHC base clock when use periperal clock.
1349 * For some platforms, the clock value got by common clk
1350 * API is peripheral clock while the eSDHC base clock is
1351 * 1/2 peripheral clock.
1353 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1354 of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1355 of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1356 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1358 esdhc->peripheral_clock = clk_get_rate(clk);
1363 esdhc_clock_enable(host, false);
1364 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1366 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1367 * initialize it as 1 or 0 once, to override the different value
1368 * which may be configured in bootloader.
1370 if (esdhc->peripheral_clock)
1371 val |= ESDHC_PERIPHERAL_CLK_SEL;
1373 val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1374 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1375 esdhc_clock_enable(host, true);
1378 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1380 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1384 static int sdhci_esdhc_probe(struct platform_device *pdev)
1386 struct sdhci_host *host;
1387 struct device_node *np;
1388 struct sdhci_pltfm_host *pltfm_host;
1389 struct sdhci_esdhc *esdhc;
1392 np = pdev->dev.of_node;
1394 if (of_property_read_bool(np, "little-endian"))
1395 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1396 sizeof(struct sdhci_esdhc));
1398 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1399 sizeof(struct sdhci_esdhc));
1402 return PTR_ERR(host);
1404 host->mmc_host_ops.start_signal_voltage_switch =
1405 esdhc_signal_voltage_switch;
1406 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1407 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1408 host->tuning_delay = 1;
1410 esdhc_init(pdev, host);
1412 sdhci_get_of_property(pdev);
1414 pltfm_host = sdhci_priv(host);
1415 esdhc = sdhci_pltfm_priv(pltfm_host);
1416 if (soc_device_match(soc_tuning_erratum_type1))
1417 esdhc->quirk_tuning_erratum_type1 = true;
1419 esdhc->quirk_tuning_erratum_type1 = false;
1421 if (soc_device_match(soc_tuning_erratum_type2))
1422 esdhc->quirk_tuning_erratum_type2 = true;
1424 esdhc->quirk_tuning_erratum_type2 = false;
1426 if (esdhc->vendor_ver == VENDOR_V_22)
1427 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1429 if (esdhc->vendor_ver > VENDOR_V_22)
1430 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1432 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1433 host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1434 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1437 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1438 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1439 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1440 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1441 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1442 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1444 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1445 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1447 esdhc->quirk_ignore_data_inhibit = false;
1448 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1450 * Freescale messed up with P2020 as it has a non-standard
1451 * host control register
1453 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1454 esdhc->quirk_ignore_data_inhibit = true;
1457 /* call to generic mmc_of_parse to support additional capabilities */
1458 ret = mmc_of_parse(host->mmc);
1462 mmc_of_parse_voltage(np, &host->ocr_mask);
1464 ret = sdhci_add_host(host);
1470 sdhci_pltfm_free(pdev);
1474 static struct platform_driver sdhci_esdhc_driver = {
1476 .name = "sdhci-esdhc",
1477 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1478 .of_match_table = sdhci_esdhc_of_match,
1479 .pm = &esdhc_of_dev_pm_ops,
1481 .probe = sdhci_esdhc_probe,
1482 .remove = sdhci_pltfm_unregister,
1485 module_platform_driver(sdhci_esdhc_driver);
1487 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1488 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1489 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1490 MODULE_LICENSE("GPL v2");