1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2007, 2010-2011 Freescale Semiconductor, Inc
4 * Copyright 2019-2021 NXP
7 * Based vaguely on the pxa mmc code:
9 * Kyle Harris, Nexus Technologies, Inc. kharris@nexus-tech.net
21 #include <fsl_esdhc.h>
22 #include <fdt_support.h>
23 #include <asm/cache.h>
24 #include <asm/global_data.h>
27 #include <dm/device_compat.h>
28 #include <linux/bitops.h>
29 #include <linux/delay.h>
30 #include <linux/iopoll.h>
31 #include <linux/dma-mapping.h>
33 #include "../../board/freescale/common/qixis.h"
35 DECLARE_GLOBAL_DATA_PTR;
38 uint dsaddr; /* SDMA system address register */
39 uint blkattr; /* Block attributes register */
40 uint cmdarg; /* Command argument register */
41 uint xfertyp; /* Transfer type register */
42 uint cmdrsp0; /* Command response 0 register */
43 uint cmdrsp1; /* Command response 1 register */
44 uint cmdrsp2; /* Command response 2 register */
45 uint cmdrsp3; /* Command response 3 register */
46 uint datport; /* Buffer data port register */
47 uint prsstat; /* Present state register */
48 uint proctl; /* Protocol control register */
49 uint sysctl; /* System Control Register */
50 uint irqstat; /* Interrupt status register */
51 uint irqstaten; /* Interrupt status enable register */
52 uint irqsigen; /* Interrupt signal enable register */
53 uint autoc12err; /* Auto CMD error status register */
54 uint hostcapblt; /* Host controller capabilities register */
55 uint wml; /* Watermark level register */
56 char reserved1[8]; /* reserved */
57 uint fevt; /* Force event register */
58 uint admaes; /* ADMA error status register */
59 uint adsaddrl; /* ADMA system address low register */
60 uint adsaddrh; /* ADMA system address high register */
62 uint hostver; /* Host controller version register */
63 char reserved3[4]; /* reserved */
64 uint dmaerraddr; /* DMA error address register */
65 char reserved4[4]; /* reserved */
66 uint dmaerrattr; /* DMA error attribute register */
67 char reserved5[4]; /* reserved */
68 uint hostcapblt2; /* Host controller capabilities register 2 */
69 char reserved6[8]; /* reserved */
70 uint tbctl; /* Tuning block control register */
71 char reserved7[32]; /* reserved */
72 uint sdclkctl; /* SD clock control register */
73 uint sdtimingctl; /* SD timing control register */
74 char reserved8[20]; /* reserved */
75 uint dllcfg0; /* DLL config 0 register */
76 uint dllcfg1; /* DLL config 1 register */
77 char reserved9[8]; /* reserved */
78 uint dllstat0; /* DLL status 0 register */
79 char reserved10[664];/* reserved */
80 uint esdhcctl; /* eSDHC control register */
83 struct fsl_esdhc_plat {
84 struct mmc_config cfg;
89 * struct fsl_esdhc_priv
91 * @esdhc_regs: registers of the sdhc controller
92 * @sdhc_clk: Current clk of the sdhc controller
93 * @bus_width: bus width, 1bit, 4bit or 8bit
96 * Following is used when Driver Model is enabled for MMC
97 * @dev: pointer for the device
98 * @cd_gpio: gpio for card detection
99 * @wp_gpio: gpio for write protection
101 struct fsl_esdhc_priv {
102 struct fsl_esdhc *esdhc_regs;
103 unsigned int sdhc_clk;
104 bool is_sdhc_per_clk;
106 #if !CONFIG_IS_ENABLED(DM_MMC)
110 struct sdhci_adma_desc *adma_desc_table;
114 /* Return the XFERTYP flags for a given command and data packet */
115 static uint esdhc_xfertyp(struct mmc_cmd *cmd, struct mmc_data *data)
120 xfertyp |= XFERTYP_DPSEL;
121 if (!IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO) &&
122 cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK &&
123 cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK_HS200)
124 xfertyp |= XFERTYP_DMAEN;
125 if (data->blocks > 1) {
126 xfertyp |= XFERTYP_MSBSEL;
127 xfertyp |= XFERTYP_BCEN;
128 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC111))
129 xfertyp |= XFERTYP_AC12EN;
132 if (data->flags & MMC_DATA_READ)
133 xfertyp |= XFERTYP_DTDSEL;
136 if (cmd->resp_type & MMC_RSP_CRC)
137 xfertyp |= XFERTYP_CCCEN;
138 if (cmd->resp_type & MMC_RSP_OPCODE)
139 xfertyp |= XFERTYP_CICEN;
140 if (cmd->resp_type & MMC_RSP_136)
141 xfertyp |= XFERTYP_RSPTYP_136;
142 else if (cmd->resp_type & MMC_RSP_BUSY)
143 xfertyp |= XFERTYP_RSPTYP_48_BUSY;
144 else if (cmd->resp_type & MMC_RSP_PRESENT)
145 xfertyp |= XFERTYP_RSPTYP_48;
147 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
148 xfertyp |= XFERTYP_CMDTYP_ABORT;
150 return XFERTYP_CMD(cmd->cmdidx) | xfertyp;
154 * PIO Read/Write Mode reduce the performace as DMA is not used in this mode.
156 static void esdhc_pio_read_write(struct fsl_esdhc_priv *priv,
157 struct mmc_data *data)
159 struct fsl_esdhc *regs = priv->esdhc_regs;
167 if (data->flags & MMC_DATA_READ) {
168 blocks = data->blocks;
171 start = get_timer(0);
172 size = data->blocksize;
173 irqstat = esdhc_read32(®s->irqstat);
174 while (!(esdhc_read32(®s->prsstat) & PRSSTAT_BREN)) {
175 if (get_timer(start) > PIO_TIMEOUT) {
176 printf("\nData Read Failed in PIO Mode.");
180 while (size && (!(irqstat & IRQSTAT_TC))) {
181 udelay(100); /* Wait before last byte transfer complete */
182 irqstat = esdhc_read32(®s->irqstat);
183 databuf = in_le32(®s->datport);
184 *((uint *)buffer) = databuf;
191 blocks = data->blocks;
192 buffer = (char *)data->src;
194 start = get_timer(0);
195 size = data->blocksize;
196 irqstat = esdhc_read32(®s->irqstat);
197 while (!(esdhc_read32(®s->prsstat) & PRSSTAT_BWEN)) {
198 if (get_timer(start) > PIO_TIMEOUT) {
199 printf("\nData Write Failed in PIO Mode.");
203 while (size && (!(irqstat & IRQSTAT_TC))) {
204 udelay(100); /* Wait before last byte transfer complete */
205 databuf = *((uint *)buffer);
208 irqstat = esdhc_read32(®s->irqstat);
209 out_le32(®s->datport, databuf);
216 static void esdhc_setup_watermark_level(struct fsl_esdhc_priv *priv,
217 struct mmc_data *data)
219 struct fsl_esdhc *regs = priv->esdhc_regs;
220 uint wml_value = data->blocksize / 4;
222 if (data->flags & MMC_DATA_READ) {
223 if (wml_value > WML_RD_WML_MAX)
224 wml_value = WML_RD_WML_MAX_VAL;
226 esdhc_clrsetbits32(®s->wml, WML_RD_WML_MASK, wml_value);
228 if (wml_value > WML_WR_WML_MAX)
229 wml_value = WML_WR_WML_MAX_VAL;
231 esdhc_clrsetbits32(®s->wml, WML_WR_WML_MASK,
236 static void esdhc_setup_dma(struct fsl_esdhc_priv *priv, struct mmc_data *data)
238 uint trans_bytes = data->blocksize * data->blocks;
239 struct fsl_esdhc *regs = priv->esdhc_regs;
240 phys_addr_t adma_addr;
243 if (data->flags & MMC_DATA_WRITE)
244 buf = (void *)data->src;
248 priv->dma_addr = dma_map_single(buf, trans_bytes,
249 mmc_get_dma_dir(data));
251 if (IS_ENABLED(CONFIG_FSL_ESDHC_SUPPORT_ADMA2) &&
252 priv->adma_desc_table) {
253 debug("Using ADMA2\n");
254 /* prefer ADMA2 if it is available */
255 sdhci_prepare_adma_table(priv->adma_desc_table, data,
258 adma_addr = virt_to_phys(priv->adma_desc_table);
259 esdhc_write32(®s->adsaddrl, lower_32_bits(adma_addr));
260 if (IS_ENABLED(CONFIG_DMA_ADDR_T_64BIT))
261 esdhc_write32(®s->adsaddrh, upper_32_bits(adma_addr));
262 esdhc_clrsetbits32(®s->proctl, PROCTL_DMAS_MASK,
265 debug("Using SDMA\n");
266 if (upper_32_bits(priv->dma_addr))
267 printf("Cannot use 64 bit addresses with SDMA\n");
268 esdhc_write32(®s->dsaddr, lower_32_bits(priv->dma_addr));
269 esdhc_clrsetbits32(®s->proctl, PROCTL_DMAS_MASK,
273 esdhc_write32(®s->blkattr, data->blocks << 16 | data->blocksize);
276 static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc,
277 struct mmc_data *data)
280 bool is_write = data->flags & MMC_DATA_WRITE;
281 struct fsl_esdhc *regs = priv->esdhc_regs;
283 if (is_write && !(esdhc_read32(®s->prsstat) & PRSSTAT_WPSPL)) {
284 printf("Can not write to locked SD card.\n");
288 if (IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO))
289 esdhc_setup_watermark_level(priv, data);
291 esdhc_setup_dma(priv, data);
293 /* Calculate the timeout period for data transactions */
295 * 1)Timeout period = (2^(timeout+13)) SD Clock cycles
296 * 2)Timeout period should be minimum 0.250sec as per SD Card spec
297 * So, Number of SD Clock cycles for 0.25sec should be minimum
298 * (SD Clock/sec * 0.25 sec) SD Clock cycles
299 * = (mmc->clock * 1/4) SD Clock cycles
301 * => (2^(timeout+13)) >= mmc->clock * 1/4
302 * Taking log2 both the sides
303 * => timeout + 13 >= log2(mmc->clock/4)
304 * Rounding up to next power of 2
305 * => timeout + 13 = log2(mmc->clock/4) + 1
306 * => timeout + 13 = fls(mmc->clock/4)
308 * However, the MMC spec "It is strongly recommended for hosts to
309 * implement more than 500ms timeout value even if the card
310 * indicates the 250ms maximum busy length." Even the previous
311 * value of 300ms is known to be insufficient for some cards.
313 * => timeout + 13 = fls(mmc->clock/2)
315 timeout = fls(mmc->clock/2);
324 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC_A001) &&
325 (timeout == 4 || timeout == 8 || timeout == 12))
328 if (IS_ENABLED(ESDHCI_QUIRK_BROKEN_TIMEOUT_VALUE))
331 esdhc_clrsetbits32(®s->sysctl, SYSCTL_TIMEOUT_MASK, timeout << 16);
337 * Sends a command out on the bus. Takes the mmc pointer,
338 * a command pointer, and an optional data pointer.
340 static int esdhc_send_cmd_common(struct fsl_esdhc_priv *priv, struct mmc *mmc,
341 struct mmc_cmd *cmd, struct mmc_data *data)
346 u32 flags = IRQSTAT_CC | IRQSTAT_CTOE;
347 struct fsl_esdhc *regs = priv->esdhc_regs;
350 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC111) &&
351 cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
354 esdhc_write32(®s->irqstat, -1);
358 /* Wait for the bus to be idle */
359 while ((esdhc_read32(®s->prsstat) & PRSSTAT_CICHB) ||
360 (esdhc_read32(®s->prsstat) & PRSSTAT_CIDHB))
363 while (esdhc_read32(®s->prsstat) & PRSSTAT_DLA)
366 /* Set up for a data transfer if we have one */
368 err = esdhc_setup_data(priv, mmc, data);
373 /* Figure out the transfer arguments */
374 xfertyp = esdhc_xfertyp(cmd, data);
377 esdhc_write32(®s->irqsigen, 0);
379 /* Send the command */
380 esdhc_write32(®s->cmdarg, cmd->cmdarg);
381 esdhc_write32(®s->xfertyp, xfertyp);
383 if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
384 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
387 /* Wait for the command to complete */
388 start = get_timer(0);
389 while (!(esdhc_read32(®s->irqstat) & flags)) {
390 if (get_timer(start) > 1000) {
396 irqstat = esdhc_read32(®s->irqstat);
398 if (irqstat & CMD_ERR) {
403 if (irqstat & IRQSTAT_CTOE) {
408 /* Workaround for ESDHC errata ENGcm03648 */
409 if (!data && (cmd->resp_type & MMC_RSP_BUSY)) {
412 /* Poll on DATA0 line for cmd with busy signal for 600 ms */
413 while (timeout > 0 && !(esdhc_read32(®s->prsstat) &
420 printf("Timeout waiting for DAT0 to go high!\n");
426 /* Copy the response to the response buffer */
427 if (cmd->resp_type & MMC_RSP_136) {
428 u32 cmdrsp3, cmdrsp2, cmdrsp1, cmdrsp0;
430 cmdrsp3 = esdhc_read32(®s->cmdrsp3);
431 cmdrsp2 = esdhc_read32(®s->cmdrsp2);
432 cmdrsp1 = esdhc_read32(®s->cmdrsp1);
433 cmdrsp0 = esdhc_read32(®s->cmdrsp0);
434 cmd->response[0] = (cmdrsp3 << 8) | (cmdrsp2 >> 24);
435 cmd->response[1] = (cmdrsp2 << 8) | (cmdrsp1 >> 24);
436 cmd->response[2] = (cmdrsp1 << 8) | (cmdrsp0 >> 24);
437 cmd->response[3] = (cmdrsp0 << 8);
439 cmd->response[0] = esdhc_read32(®s->cmdrsp0);
441 /* Wait until all of the blocks are transferred */
443 if (IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO)) {
444 esdhc_pio_read_write(priv, data);
446 flags = DATA_COMPLETE;
447 if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
448 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
452 irqstat = esdhc_read32(®s->irqstat);
454 if (irqstat & IRQSTAT_DTOE) {
459 if (irqstat & DATA_ERR) {
463 } while ((irqstat & flags) != flags);
466 * Need invalidate the dcache here again to avoid any
467 * cache-fill during the DMA operations such as the
468 * speculative pre-fetching etc.
470 dma_unmap_single(priv->dma_addr,
471 data->blocks * data->blocksize,
472 mmc_get_dma_dir(data));
477 /* Reset CMD and DATA portions on error */
479 esdhc_write32(®s->sysctl, esdhc_read32(®s->sysctl) |
481 while (esdhc_read32(®s->sysctl) & SYSCTL_RSTC)
485 esdhc_write32(®s->sysctl,
486 esdhc_read32(®s->sysctl) |
488 while ((esdhc_read32(®s->sysctl) & SYSCTL_RSTD))
493 esdhc_write32(®s->irqstat, -1);
498 static void set_sysctl(struct fsl_esdhc_priv *priv, struct mmc *mmc, uint clock)
500 struct fsl_esdhc *regs = priv->esdhc_regs;
503 unsigned int sdhc_clk = priv->sdhc_clk;
509 if (clock < mmc->cfg->f_min)
510 clock = mmc->cfg->f_min;
512 while (sdhc_clk / (16 * pre_div) > clock && pre_div < 256)
515 while (sdhc_clk / (div * pre_div) > clock && div < 16)
518 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_A011334) &&
519 clock == 200000000 && mmc->selected_mode == MMC_HS_400) {
520 u32 div_ratio = pre_div * div;
522 if (div_ratio <= 4) {
525 } else if (div_ratio <= 8) {
528 } else if (div_ratio <= 12) {
532 printf("unsupported clock division.\n");
536 mmc->clock = sdhc_clk / pre_div / div;
537 priv->clock = mmc->clock;
542 clk = (pre_div << 8) | (div << 4);
544 esdhc_clrbits32(®s->sysctl, SYSCTL_CKEN);
546 esdhc_clrsetbits32(®s->sysctl, SYSCTL_CLOCK_MASK, clk);
548 /* Only newer eSDHC controllers set PRSSTAT_SDSTB flag */
549 hostver = esdhc_read32(&priv->esdhc_regs->hostver);
550 if (HOSTVER_VENDOR(hostver) <= VENDOR_V_22) {
552 esdhc_setbits32(®s->sysctl, SYSCTL_PEREN | SYSCTL_CKEN);
557 value = PRSSTAT_SDSTB;
558 while (!(esdhc_read32(®s->prsstat) & value)) {
560 printf("fsl_esdhc: Internal clock never stabilised.\n");
567 esdhc_setbits32(®s->sysctl, SYSCTL_PEREN | SYSCTL_CKEN);
570 static void esdhc_clock_control(struct fsl_esdhc_priv *priv, bool enable)
572 struct fsl_esdhc *regs = priv->esdhc_regs;
577 value = esdhc_read32(®s->sysctl);
580 value |= SYSCTL_CKEN;
582 value &= ~SYSCTL_CKEN;
584 esdhc_write32(®s->sysctl, value);
586 /* Only newer eSDHC controllers set PRSSTAT_SDSTB flag */
587 hostver = esdhc_read32(&priv->esdhc_regs->hostver);
588 if (HOSTVER_VENDOR(hostver) <= VENDOR_V_22) {
594 value = PRSSTAT_SDSTB;
595 while (!(esdhc_read32(®s->prsstat) & value)) {
597 printf("fsl_esdhc: Internal clock never stabilised.\n");
605 static void esdhc_flush_async_fifo(struct fsl_esdhc_priv *priv)
607 struct fsl_esdhc *regs = priv->esdhc_regs;
610 esdhc_setbits32(®s->esdhcctl, ESDHCCTL_FAF);
613 while (esdhc_read32(®s->esdhcctl) & ESDHCCTL_FAF) {
615 printf("fsl_esdhc: Flush asynchronous FIFO timeout.\n");
623 static void esdhc_tuning_block_enable(struct fsl_esdhc_priv *priv,
626 struct fsl_esdhc *regs = priv->esdhc_regs;
628 esdhc_clock_control(priv, false);
629 esdhc_flush_async_fifo(priv);
631 esdhc_setbits32(®s->tbctl, TBCTL_TB_EN);
633 esdhc_clrbits32(®s->tbctl, TBCTL_TB_EN);
634 esdhc_clock_control(priv, true);
637 static void esdhc_exit_hs400(struct fsl_esdhc_priv *priv)
639 struct fsl_esdhc *regs = priv->esdhc_regs;
641 esdhc_clrbits32(®s->sdtimingctl, FLW_CTL_BG);
642 esdhc_clrbits32(®s->sdclkctl, CMD_CLK_CTL);
644 esdhc_clock_control(priv, false);
645 esdhc_clrbits32(®s->tbctl, HS400_MODE);
646 esdhc_clock_control(priv, true);
648 esdhc_clrbits32(®s->dllcfg0, DLL_FREQ_SEL | DLL_ENABLE);
649 esdhc_clrbits32(®s->tbctl, HS400_WNDW_ADJUST);
651 esdhc_tuning_block_enable(priv, false);
654 static int esdhc_set_timing(struct fsl_esdhc_priv *priv, enum bus_mode mode)
656 struct fsl_esdhc *regs = priv->esdhc_regs;
660 /* Exit HS400 mode before setting any other mode */
661 if (esdhc_read32(®s->tbctl) & HS400_MODE &&
663 esdhc_exit_hs400(priv);
665 esdhc_clock_control(priv, false);
667 if (mode == MMC_HS_200)
668 esdhc_clrsetbits32(®s->autoc12err, UHSM_MASK,
670 if (mode == MMC_HS_400) {
671 esdhc_setbits32(®s->tbctl, HS400_MODE);
672 esdhc_setbits32(®s->sdclkctl, CMD_CLK_CTL);
673 esdhc_clock_control(priv, true);
675 if (priv->clock == 200000000)
676 esdhc_setbits32(®s->dllcfg0, DLL_FREQ_SEL);
678 esdhc_setbits32(®s->dllcfg0, DLL_ENABLE);
680 esdhc_setbits32(®s->dllcfg0, DLL_RESET);
682 esdhc_clrbits32(®s->dllcfg0, DLL_RESET);
684 start = get_timer(0);
685 val = DLL_STS_SLV_LOCK;
686 while (!(esdhc_read32(®s->dllstat0) & val)) {
687 if (get_timer(start) > 1000) {
688 printf("fsl_esdhc: delay chain lock timeout\n");
693 esdhc_setbits32(®s->tbctl, HS400_WNDW_ADJUST);
695 esdhc_clock_control(priv, false);
696 esdhc_flush_async_fifo(priv);
698 esdhc_clock_control(priv, true);
702 static int esdhc_set_ios_common(struct fsl_esdhc_priv *priv, struct mmc *mmc)
704 struct fsl_esdhc *regs = priv->esdhc_regs;
707 if (priv->is_sdhc_per_clk) {
708 /* Select to use peripheral clock */
709 esdhc_clock_control(priv, false);
710 esdhc_setbits32(®s->esdhcctl, ESDHCCTL_PCS);
711 esdhc_clock_control(priv, true);
714 if (mmc->selected_mode == MMC_HS_400)
715 esdhc_tuning_block_enable(priv, true);
717 /* Set the clock speed */
718 if (priv->clock != mmc->clock)
719 set_sysctl(priv, mmc, mmc->clock);
722 ret = esdhc_set_timing(priv, mmc->selected_mode);
726 /* Set the bus width */
727 esdhc_clrbits32(®s->proctl, PROCTL_DTW_4 | PROCTL_DTW_8);
729 if (mmc->bus_width == 4)
730 esdhc_setbits32(®s->proctl, PROCTL_DTW_4);
731 else if (mmc->bus_width == 8)
732 esdhc_setbits32(®s->proctl, PROCTL_DTW_8);
737 static void esdhc_enable_cache_snooping(struct fsl_esdhc *regs)
739 #ifdef CONFIG_ARCH_MPC830X
740 immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
741 sysconf83xx_t *sysconf = &immr->sysconf;
743 setbits_be32(&sysconf->sdhccr, 0x02000000);
745 esdhc_write32(®s->esdhcctl, ESDHCCTL_SNOOP);
749 static int esdhc_init_common(struct fsl_esdhc_priv *priv, struct mmc *mmc)
751 struct fsl_esdhc *regs = priv->esdhc_regs;
754 /* Reset the entire host controller */
755 esdhc_setbits32(®s->sysctl, SYSCTL_RSTA);
757 /* Wait until the controller is available */
758 start = get_timer(0);
759 while ((esdhc_read32(®s->sysctl) & SYSCTL_RSTA)) {
760 if (get_timer(start) > 1000)
764 /* Clean TBCTL[TB_EN] which is not able to be reset by reset all */
765 esdhc_clrbits32(®s->tbctl, TBCTL_TB_EN);
767 esdhc_enable_cache_snooping(regs);
769 esdhc_setbits32(®s->sysctl, SYSCTL_HCKEN | SYSCTL_IPGEN);
771 /* Set the initial clock speed */
772 set_sysctl(priv, mmc, 400000);
774 /* Disable the BRR and BWR bits in IRQSTAT */
775 esdhc_clrbits32(®s->irqstaten, IRQSTATEN_BRR | IRQSTATEN_BWR);
777 /* Put the PROCTL reg back to the default */
778 esdhc_write32(®s->proctl, PROCTL_INIT);
780 /* Set timout to the maximum value */
781 esdhc_clrsetbits32(®s->sysctl, SYSCTL_TIMEOUT_MASK, 14 << 16);
783 if (IS_ENABLED(CONFIG_SYS_FSL_ESDHC_UNRELIABLE_PULSE_DETECTION_WORKAROUND))
784 esdhc_clrbits32(®s->dllcfg1, DLL_PD_PULSE_STRETCH_SEL);
789 static int esdhc_getcd_common(struct fsl_esdhc_priv *priv)
791 struct fsl_esdhc *regs = priv->esdhc_regs;
793 #ifdef CONFIG_ESDHC_DETECT_QUIRK
794 if (qixis_esdhc_detect_quirk())
797 if (esdhc_read32(®s->prsstat) & PRSSTAT_CINS)
803 static void fsl_esdhc_get_cfg_common(struct fsl_esdhc_priv *priv,
804 struct mmc_config *cfg)
806 struct fsl_esdhc *regs = priv->esdhc_regs;
809 caps = esdhc_read32(®s->hostcapblt);
812 * For eSDHC, power supply is through peripheral circuit. Some eSDHC
813 * versions have value 0 of the bit but that does not reflect the
814 * truth. 3.3V is common for SD/MMC, and is supported for all boards
815 * with eSDHC in current u-boot. So, make 3.3V is supported in
816 * default in code. CONFIG_FSL_ESDHC_VS33_NOT_SUPPORT can be enabled
817 * if future board does not support 3.3V.
819 caps |= HOSTCAPBLT_VS33;
820 if (IS_ENABLED(CONFIG_FSL_ESDHC_VS33_NOT_SUPPORT))
821 caps &= ~HOSTCAPBLT_VS33;
823 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC135))
824 caps &= ~(HOSTCAPBLT_SRS | HOSTCAPBLT_VS18 | HOSTCAPBLT_VS30);
825 if (caps & HOSTCAPBLT_VS18)
826 cfg->voltages |= MMC_VDD_165_195;
827 if (caps & HOSTCAPBLT_VS30)
828 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
829 if (caps & HOSTCAPBLT_VS33)
830 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
832 cfg->name = "FSL_SDHC";
834 if (caps & HOSTCAPBLT_HSS)
835 cfg->host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
838 cfg->f_max = min(priv->sdhc_clk, (u32)200000000);
839 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
842 #ifdef CONFIG_OF_LIBFDT
843 __weak int esdhc_status_fixup(void *blob, const char *compat)
845 if (IS_ENABLED(CONFIG_FSL_ESDHC_PIN_MUX) && !hwconfig("esdhc")) {
846 do_fixup_by_compat(blob, compat, "status", "disabled",
847 sizeof("disabled"), 1);
855 #if CONFIG_IS_ENABLED(DM_MMC)
856 static int fsl_esdhc_get_cd(struct udevice *dev);
857 static void esdhc_disable_for_no_card(void *blob)
861 for (uclass_first_device(UCLASS_MMC, &dev);
863 uclass_next_device(&dev)) {
866 if (fsl_esdhc_get_cd(dev))
869 snprintf(esdhc_path, sizeof(esdhc_path), "/soc/esdhc@%lx",
870 (unsigned long)dev_read_addr(dev));
871 do_fixup_by_path(blob, esdhc_path, "status", "disabled",
872 sizeof("disabled"), 1);
876 static void esdhc_disable_for_no_card(void *blob)
881 void fdt_fixup_esdhc(void *blob, struct bd_info *bd)
883 const char *compat = "fsl,esdhc";
885 if (esdhc_status_fixup(blob, compat))
888 if (IS_ENABLED(CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND))
889 esdhc_disable_for_no_card(blob);
891 do_fixup_by_compat_u32(blob, compat, "clock-frequency",
892 gd->arch.sdhc_clk, 1);
896 #if !CONFIG_IS_ENABLED(DM_MMC)
897 static int esdhc_getcd(struct mmc *mmc)
899 struct fsl_esdhc_priv *priv = mmc->priv;
901 return esdhc_getcd_common(priv);
904 static int esdhc_init(struct mmc *mmc)
906 struct fsl_esdhc_priv *priv = mmc->priv;
908 return esdhc_init_common(priv, mmc);
911 static int esdhc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
912 struct mmc_data *data)
914 struct fsl_esdhc_priv *priv = mmc->priv;
916 return esdhc_send_cmd_common(priv, mmc, cmd, data);
919 static int esdhc_set_ios(struct mmc *mmc)
921 struct fsl_esdhc_priv *priv = mmc->priv;
923 return esdhc_set_ios_common(priv, mmc);
926 static const struct mmc_ops esdhc_ops = {
927 .getcd = esdhc_getcd,
929 .send_cmd = esdhc_send_cmd,
930 .set_ios = esdhc_set_ios,
933 int fsl_esdhc_initialize(struct bd_info *bis, struct fsl_esdhc_cfg *cfg)
935 struct fsl_esdhc_plat *plat;
936 struct fsl_esdhc_priv *priv;
937 struct mmc_config *mmc_cfg;
943 priv = calloc(sizeof(struct fsl_esdhc_priv), 1);
946 plat = calloc(sizeof(struct fsl_esdhc_plat), 1);
952 priv->esdhc_regs = (struct fsl_esdhc *)(unsigned long)(cfg->esdhc_base);
953 priv->sdhc_clk = cfg->sdhc_clk;
954 if (gd->arch.sdhc_per_clk)
955 priv->is_sdhc_per_clk = true;
957 mmc_cfg = &plat->cfg;
959 if (cfg->max_bus_width == 8) {
960 mmc_cfg->host_caps |= MMC_MODE_1BIT | MMC_MODE_4BIT |
962 } else if (cfg->max_bus_width == 4) {
963 mmc_cfg->host_caps |= MMC_MODE_1BIT | MMC_MODE_4BIT;
964 } else if (cfg->max_bus_width == 1) {
965 mmc_cfg->host_caps |= MMC_MODE_1BIT;
967 mmc_cfg->host_caps |= MMC_MODE_1BIT;
968 printf("No max bus width provided. Fallback to 1-bit mode.\n");
971 if (IS_ENABLED(CONFIG_ESDHC_DETECT_8_BIT_QUIRK))
972 mmc_cfg->host_caps &= ~MMC_MODE_8BIT;
974 mmc_cfg->ops = &esdhc_ops;
976 fsl_esdhc_get_cfg_common(priv, mmc_cfg);
978 mmc = mmc_create(mmc_cfg, priv);
986 int fsl_esdhc_mmc_init(struct bd_info *bis)
988 struct fsl_esdhc_cfg *cfg;
990 cfg = calloc(sizeof(struct fsl_esdhc_cfg), 1);
991 cfg->esdhc_base = CONFIG_SYS_FSL_ESDHC_ADDR;
992 cfg->max_bus_width = CONFIG_SYS_FSL_ESDHC_DEFAULT_BUS_WIDTH;
993 /* Prefer peripheral clock which provides higher frequency. */
994 if (gd->arch.sdhc_per_clk)
995 cfg->sdhc_clk = gd->arch.sdhc_per_clk;
997 cfg->sdhc_clk = gd->arch.sdhc_clk;
998 return fsl_esdhc_initialize(bis, cfg);
1001 static int fsl_esdhc_probe(struct udevice *dev)
1003 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
1004 struct fsl_esdhc_plat *plat = dev_get_plat(dev);
1005 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1011 addr = dev_read_addr(dev);
1012 if (addr == FDT_ADDR_T_NONE)
1015 priv->esdhc_regs = (struct fsl_esdhc *)lower_32_bits(addr);
1017 priv->esdhc_regs = (struct fsl_esdhc *)addr;
1021 if (IS_ENABLED(CONFIG_FSL_ESDHC_SUPPORT_ADMA2)) {
1023 * Only newer eSDHC controllers can do ADMA2 if the ADMA flag
1024 * is set in the host capabilities register.
1026 caps = esdhc_read32(&priv->esdhc_regs->hostcapblt);
1027 hostver = esdhc_read32(&priv->esdhc_regs->hostver);
1028 if (caps & HOSTCAPBLT_DMAS &&
1029 HOSTVER_VENDOR(hostver) > VENDOR_V_22) {
1030 priv->adma_desc_table = sdhci_adma_init();
1031 if (!priv->adma_desc_table)
1032 debug("Could not allocate ADMA tables, falling back to SDMA\n");
1036 if (gd->arch.sdhc_per_clk) {
1037 priv->sdhc_clk = gd->arch.sdhc_per_clk;
1038 priv->is_sdhc_per_clk = true;
1040 priv->sdhc_clk = gd->arch.sdhc_clk;
1043 if (priv->sdhc_clk <= 0) {
1044 dev_err(dev, "Unable to get clk for %s\n", dev->name);
1048 fsl_esdhc_get_cfg_common(priv, &plat->cfg);
1050 mmc_of_parse(dev, &plat->cfg);
1053 mmc->cfg = &plat->cfg;
1058 ret = esdhc_init_common(priv, mmc);
1062 if (IS_ENABLED(CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND) &&
1063 !fsl_esdhc_get_cd(dev))
1064 esdhc_setbits32(&priv->esdhc_regs->proctl, PROCTL_VOLT_SEL);
1069 static int fsl_esdhc_get_cd(struct udevice *dev)
1071 struct fsl_esdhc_plat *plat = dev_get_plat(dev);
1072 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1074 if (plat->cfg.host_caps & MMC_CAP_NONREMOVABLE)
1077 return esdhc_getcd_common(priv);
1080 static int fsl_esdhc_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
1081 struct mmc_data *data)
1083 struct fsl_esdhc_plat *plat = dev_get_plat(dev);
1084 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1086 return esdhc_send_cmd_common(priv, &plat->mmc, cmd, data);
1089 static int fsl_esdhc_set_ios(struct udevice *dev)
1091 struct fsl_esdhc_plat *plat = dev_get_plat(dev);
1092 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1094 return esdhc_set_ios_common(priv, &plat->mmc);
1097 static int fsl_esdhc_reinit(struct udevice *dev)
1099 struct fsl_esdhc_plat *plat = dev_get_plat(dev);
1100 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1102 return esdhc_init_common(priv, &plat->mmc);
1105 #ifdef MMC_SUPPORTS_TUNING
1106 static int fsl_esdhc_execute_tuning(struct udevice *dev, uint32_t opcode)
1108 struct fsl_esdhc_plat *plat = dev_get_plat(dev);
1109 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1110 struct fsl_esdhc *regs = priv->esdhc_regs;
1111 struct mmc *mmc = &plat->mmc;
1115 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_A011334) &&
1116 plat->mmc.hs400_tuning)
1117 set_sysctl(priv, mmc, mmc->clock);
1119 esdhc_tuning_block_enable(priv, true);
1120 esdhc_setbits32(®s->autoc12err, EXECUTE_TUNING);
1122 irqstaten = esdhc_read32(®s->irqstaten);
1123 esdhc_write32(®s->irqstaten, IRQSTATEN_BRR);
1125 for (i = 0; i < MAX_TUNING_LOOP; i++) {
1126 mmc_send_tuning(mmc, opcode, NULL);
1129 val = esdhc_read32(®s->autoc12err);
1130 if (!(val & EXECUTE_TUNING)) {
1131 if (val & SMPCLKSEL)
1136 esdhc_write32(®s->irqstaten, irqstaten);
1138 if (i != MAX_TUNING_LOOP) {
1139 if (plat->mmc.hs400_tuning)
1140 esdhc_setbits32(®s->sdtimingctl, FLW_CTL_BG);
1144 printf("fsl_esdhc: tuning failed!\n");
1145 esdhc_clrbits32(®s->autoc12err, SMPCLKSEL);
1146 esdhc_clrbits32(®s->autoc12err, EXECUTE_TUNING);
1147 esdhc_tuning_block_enable(priv, false);
1152 int fsl_esdhc_hs400_prepare_ddr(struct udevice *dev)
1154 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1156 esdhc_tuning_block_enable(priv, false);
1160 static int fsl_esdhc_wait_dat0(struct udevice *dev, int state,
1165 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1166 struct fsl_esdhc *regs = priv->esdhc_regs;
1168 ret = readx_poll_timeout(esdhc_read32, ®s->prsstat, tmp,
1169 !!(tmp & PRSSTAT_DAT0) == !!state,
1174 static const struct dm_mmc_ops fsl_esdhc_ops = {
1175 .get_cd = fsl_esdhc_get_cd,
1176 .send_cmd = fsl_esdhc_send_cmd,
1177 .set_ios = fsl_esdhc_set_ios,
1178 #ifdef MMC_SUPPORTS_TUNING
1179 .execute_tuning = fsl_esdhc_execute_tuning,
1181 .reinit = fsl_esdhc_reinit,
1182 .hs400_prepare_ddr = fsl_esdhc_hs400_prepare_ddr,
1183 .wait_dat0 = fsl_esdhc_wait_dat0,
1186 static const struct udevice_id fsl_esdhc_ids[] = {
1187 { .compatible = "fsl,esdhc", },
1191 static int fsl_esdhc_bind(struct udevice *dev)
1193 struct fsl_esdhc_plat *plat = dev_get_plat(dev);
1195 return mmc_bind(dev, &plat->mmc, &plat->cfg);
1198 U_BOOT_DRIVER(fsl_esdhc) = {
1199 .name = "fsl-esdhc-mmc",
1201 .of_match = fsl_esdhc_ids,
1202 .ops = &fsl_esdhc_ops,
1203 .bind = fsl_esdhc_bind,
1204 .probe = fsl_esdhc_probe,
1205 .plat_auto = sizeof(struct fsl_esdhc_plat),
1206 .priv_auto = sizeof(struct fsl_esdhc_priv),