1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2007, 2010-2011 Freescale Semiconductor, Inc
4 * Copyright 2019-2020 NXP
7 * Based vaguely on the pxa mmc code:
9 * Kyle Harris, Nexus Technologies, Inc. kharris@nexus-tech.net
21 #include <fsl_esdhc.h>
22 #include <fdt_support.h>
23 #include <asm/cache.h>
26 #include <dm/device_compat.h>
27 #include <linux/bitops.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
32 DECLARE_GLOBAL_DATA_PTR;
35 uint dsaddr; /* SDMA system address register */
36 uint blkattr; /* Block attributes register */
37 uint cmdarg; /* Command argument register */
38 uint xfertyp; /* Transfer type register */
39 uint cmdrsp0; /* Command response 0 register */
40 uint cmdrsp1; /* Command response 1 register */
41 uint cmdrsp2; /* Command response 2 register */
42 uint cmdrsp3; /* Command response 3 register */
43 uint datport; /* Buffer data port register */
44 uint prsstat; /* Present state register */
45 uint proctl; /* Protocol control register */
46 uint sysctl; /* System Control Register */
47 uint irqstat; /* Interrupt status register */
48 uint irqstaten; /* Interrupt status enable register */
49 uint irqsigen; /* Interrupt signal enable register */
50 uint autoc12err; /* Auto CMD error status register */
51 uint hostcapblt; /* Host controller capabilities register */
52 uint wml; /* Watermark level register */
53 char reserved1[8]; /* reserved */
54 uint fevt; /* Force event register */
55 uint admaes; /* ADMA error status register */
56 uint adsaddrl; /* ADMA system address low register */
57 uint adsaddrh; /* ADMA system address high register */
59 uint hostver; /* Host controller version register */
60 char reserved3[4]; /* reserved */
61 uint dmaerraddr; /* DMA error address register */
62 char reserved4[4]; /* reserved */
63 uint dmaerrattr; /* DMA error attribute register */
64 char reserved5[4]; /* reserved */
65 uint hostcapblt2; /* Host controller capabilities register 2 */
66 char reserved6[8]; /* reserved */
67 uint tbctl; /* Tuning block control register */
68 char reserved7[32]; /* reserved */
69 uint sdclkctl; /* SD clock control register */
70 uint sdtimingctl; /* SD timing control register */
71 char reserved8[20]; /* reserved */
72 uint dllcfg0; /* DLL config 0 register */
73 char reserved9[12]; /* reserved */
74 uint dllstat0; /* DLL status 0 register */
75 char reserved10[664];/* reserved */
76 uint esdhcctl; /* eSDHC control register */
79 struct fsl_esdhc_plat {
80 struct mmc_config cfg;
85 * struct fsl_esdhc_priv
87 * @esdhc_regs: registers of the sdhc controller
88 * @sdhc_clk: Current clk of the sdhc controller
89 * @bus_width: bus width, 1bit, 4bit or 8bit
92 * Following is used when Driver Model is enabled for MMC
93 * @dev: pointer for the device
94 * @cd_gpio: gpio for card detection
95 * @wp_gpio: gpio for write protection
97 struct fsl_esdhc_priv {
98 struct fsl_esdhc *esdhc_regs;
99 unsigned int sdhc_clk;
100 bool is_sdhc_per_clk;
102 #if !CONFIG_IS_ENABLED(DM_MMC)
106 struct sdhci_adma_desc *adma_desc_table;
110 /* Return the XFERTYP flags for a given command and data packet */
111 static uint esdhc_xfertyp(struct mmc_cmd *cmd, struct mmc_data *data)
116 xfertyp |= XFERTYP_DPSEL;
117 if (!IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO) &&
118 cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK &&
119 cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK_HS200)
120 xfertyp |= XFERTYP_DMAEN;
121 if (data->blocks > 1) {
122 xfertyp |= XFERTYP_MSBSEL;
123 xfertyp |= XFERTYP_BCEN;
124 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC111))
125 xfertyp |= XFERTYP_AC12EN;
128 if (data->flags & MMC_DATA_READ)
129 xfertyp |= XFERTYP_DTDSEL;
132 if (cmd->resp_type & MMC_RSP_CRC)
133 xfertyp |= XFERTYP_CCCEN;
134 if (cmd->resp_type & MMC_RSP_OPCODE)
135 xfertyp |= XFERTYP_CICEN;
136 if (cmd->resp_type & MMC_RSP_136)
137 xfertyp |= XFERTYP_RSPTYP_136;
138 else if (cmd->resp_type & MMC_RSP_BUSY)
139 xfertyp |= XFERTYP_RSPTYP_48_BUSY;
140 else if (cmd->resp_type & MMC_RSP_PRESENT)
141 xfertyp |= XFERTYP_RSPTYP_48;
143 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
144 xfertyp |= XFERTYP_CMDTYP_ABORT;
146 return XFERTYP_CMD(cmd->cmdidx) | xfertyp;
150 * PIO Read/Write Mode reduce the performace as DMA is not used in this mode.
152 static void esdhc_pio_read_write(struct fsl_esdhc_priv *priv,
153 struct mmc_data *data)
155 struct fsl_esdhc *regs = priv->esdhc_regs;
163 if (data->flags & MMC_DATA_READ) {
164 blocks = data->blocks;
167 start = get_timer(0);
168 size = data->blocksize;
169 irqstat = esdhc_read32(®s->irqstat);
170 while (!(esdhc_read32(®s->prsstat) & PRSSTAT_BREN)) {
171 if (get_timer(start) > PIO_TIMEOUT) {
172 printf("\nData Read Failed in PIO Mode.");
176 while (size && (!(irqstat & IRQSTAT_TC))) {
177 udelay(100); /* Wait before last byte transfer complete */
178 irqstat = esdhc_read32(®s->irqstat);
179 databuf = in_le32(®s->datport);
180 *((uint *)buffer) = databuf;
187 blocks = data->blocks;
188 buffer = (char *)data->src;
190 start = get_timer(0);
191 size = data->blocksize;
192 irqstat = esdhc_read32(®s->irqstat);
193 while (!(esdhc_read32(®s->prsstat) & PRSSTAT_BWEN)) {
194 if (get_timer(start) > PIO_TIMEOUT) {
195 printf("\nData Write Failed in PIO Mode.");
199 while (size && (!(irqstat & IRQSTAT_TC))) {
200 udelay(100); /* Wait before last byte transfer complete */
201 databuf = *((uint *)buffer);
204 irqstat = esdhc_read32(®s->irqstat);
205 out_le32(®s->datport, databuf);
212 static void esdhc_setup_watermark_level(struct fsl_esdhc_priv *priv,
213 struct mmc_data *data)
215 struct fsl_esdhc *regs = priv->esdhc_regs;
216 uint wml_value = data->blocksize / 4;
218 if (data->flags & MMC_DATA_READ) {
219 if (wml_value > WML_RD_WML_MAX)
220 wml_value = WML_RD_WML_MAX_VAL;
222 esdhc_clrsetbits32(®s->wml, WML_RD_WML_MASK, wml_value);
224 if (wml_value > WML_WR_WML_MAX)
225 wml_value = WML_WR_WML_MAX_VAL;
227 esdhc_clrsetbits32(®s->wml, WML_WR_WML_MASK,
232 static void esdhc_setup_dma(struct fsl_esdhc_priv *priv, struct mmc_data *data)
234 uint trans_bytes = data->blocksize * data->blocks;
235 struct fsl_esdhc *regs = priv->esdhc_regs;
236 phys_addr_t adma_addr;
239 if (data->flags & MMC_DATA_WRITE)
240 buf = (void *)data->src;
244 priv->dma_addr = dma_map_single(buf, trans_bytes,
245 mmc_get_dma_dir(data));
247 if (IS_ENABLED(CONFIG_FSL_ESDHC_SUPPORT_ADMA2) &&
248 priv->adma_desc_table) {
249 debug("Using ADMA2\n");
250 /* prefer ADMA2 if it is available */
251 sdhci_prepare_adma_table(priv->adma_desc_table, data,
254 adma_addr = virt_to_phys(priv->adma_desc_table);
255 esdhc_write32(®s->adsaddrl, lower_32_bits(adma_addr));
256 if (IS_ENABLED(CONFIG_DMA_ADDR_T_64BIT))
257 esdhc_write32(®s->adsaddrh, upper_32_bits(adma_addr));
258 esdhc_clrsetbits32(®s->proctl, PROCTL_DMAS_MASK,
261 debug("Using SDMA\n");
262 if (upper_32_bits(priv->dma_addr))
263 printf("Cannot use 64 bit addresses with SDMA\n");
264 esdhc_write32(®s->dsaddr, lower_32_bits(priv->dma_addr));
265 esdhc_clrsetbits32(®s->proctl, PROCTL_DMAS_MASK,
269 esdhc_write32(®s->blkattr, data->blocks << 16 | data->blocksize);
272 static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc,
273 struct mmc_data *data)
276 bool is_write = data->flags & MMC_DATA_WRITE;
277 struct fsl_esdhc *regs = priv->esdhc_regs;
279 if (is_write && !(esdhc_read32(®s->prsstat) & PRSSTAT_WPSPL)) {
280 printf("Can not write to locked SD card.\n");
284 if (IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO))
285 esdhc_setup_watermark_level(priv, data);
287 esdhc_setup_dma(priv, data);
289 /* Calculate the timeout period for data transactions */
291 * 1)Timeout period = (2^(timeout+13)) SD Clock cycles
292 * 2)Timeout period should be minimum 0.250sec as per SD Card spec
293 * So, Number of SD Clock cycles for 0.25sec should be minimum
294 * (SD Clock/sec * 0.25 sec) SD Clock cycles
295 * = (mmc->clock * 1/4) SD Clock cycles
297 * => (2^(timeout+13)) >= mmc->clock * 1/4
298 * Taking log2 both the sides
299 * => timeout + 13 >= log2(mmc->clock/4)
300 * Rounding up to next power of 2
301 * => timeout + 13 = log2(mmc->clock/4) + 1
302 * => timeout + 13 = fls(mmc->clock/4)
304 * However, the MMC spec "It is strongly recommended for hosts to
305 * implement more than 500ms timeout value even if the card
306 * indicates the 250ms maximum busy length." Even the previous
307 * value of 300ms is known to be insufficient for some cards.
309 * => timeout + 13 = fls(mmc->clock/2)
311 timeout = fls(mmc->clock/2);
320 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC_A001) &&
321 (timeout == 4 || timeout == 8 || timeout == 12))
324 if (IS_ENABLED(ESDHCI_QUIRK_BROKEN_TIMEOUT_VALUE))
327 esdhc_clrsetbits32(®s->sysctl, SYSCTL_TIMEOUT_MASK, timeout << 16);
333 * Sends a command out on the bus. Takes the mmc pointer,
334 * a command pointer, and an optional data pointer.
336 static int esdhc_send_cmd_common(struct fsl_esdhc_priv *priv, struct mmc *mmc,
337 struct mmc_cmd *cmd, struct mmc_data *data)
342 u32 flags = IRQSTAT_CC | IRQSTAT_CTOE;
343 struct fsl_esdhc *regs = priv->esdhc_regs;
346 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC111) &&
347 cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
350 esdhc_write32(®s->irqstat, -1);
354 /* Wait for the bus to be idle */
355 while ((esdhc_read32(®s->prsstat) & PRSSTAT_CICHB) ||
356 (esdhc_read32(®s->prsstat) & PRSSTAT_CIDHB))
359 while (esdhc_read32(®s->prsstat) & PRSSTAT_DLA)
362 /* Wait at least 8 SD clock cycles before the next command */
364 * Note: This is way more than 8 cycles, but 1ms seems to
365 * resolve timing issues with some cards
369 /* Set up for a data transfer if we have one */
371 err = esdhc_setup_data(priv, mmc, data);
376 /* Figure out the transfer arguments */
377 xfertyp = esdhc_xfertyp(cmd, data);
380 esdhc_write32(®s->irqsigen, 0);
382 /* Send the command */
383 esdhc_write32(®s->cmdarg, cmd->cmdarg);
384 esdhc_write32(®s->xfertyp, xfertyp);
386 if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
387 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
390 /* Wait for the command to complete */
391 start = get_timer(0);
392 while (!(esdhc_read32(®s->irqstat) & flags)) {
393 if (get_timer(start) > 1000) {
399 irqstat = esdhc_read32(®s->irqstat);
401 if (irqstat & CMD_ERR) {
406 if (irqstat & IRQSTAT_CTOE) {
411 /* Workaround for ESDHC errata ENGcm03648 */
412 if (!data && (cmd->resp_type & MMC_RSP_BUSY)) {
415 /* Poll on DATA0 line for cmd with busy signal for 600 ms */
416 while (timeout > 0 && !(esdhc_read32(®s->prsstat) &
423 printf("Timeout waiting for DAT0 to go high!\n");
429 /* Copy the response to the response buffer */
430 if (cmd->resp_type & MMC_RSP_136) {
431 u32 cmdrsp3, cmdrsp2, cmdrsp1, cmdrsp0;
433 cmdrsp3 = esdhc_read32(®s->cmdrsp3);
434 cmdrsp2 = esdhc_read32(®s->cmdrsp2);
435 cmdrsp1 = esdhc_read32(®s->cmdrsp1);
436 cmdrsp0 = esdhc_read32(®s->cmdrsp0);
437 cmd->response[0] = (cmdrsp3 << 8) | (cmdrsp2 >> 24);
438 cmd->response[1] = (cmdrsp2 << 8) | (cmdrsp1 >> 24);
439 cmd->response[2] = (cmdrsp1 << 8) | (cmdrsp0 >> 24);
440 cmd->response[3] = (cmdrsp0 << 8);
442 cmd->response[0] = esdhc_read32(®s->cmdrsp0);
444 /* Wait until all of the blocks are transferred */
446 if (IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO)) {
447 esdhc_pio_read_write(priv, data);
449 flags = DATA_COMPLETE;
450 if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
451 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
455 irqstat = esdhc_read32(®s->irqstat);
457 if (irqstat & IRQSTAT_DTOE) {
462 if (irqstat & DATA_ERR) {
466 } while ((irqstat & flags) != flags);
469 * Need invalidate the dcache here again to avoid any
470 * cache-fill during the DMA operations such as the
471 * speculative pre-fetching etc.
473 dma_unmap_single(priv->dma_addr,
474 data->blocks * data->blocksize,
475 mmc_get_dma_dir(data));
480 /* Reset CMD and DATA portions on error */
482 esdhc_write32(®s->sysctl, esdhc_read32(®s->sysctl) |
484 while (esdhc_read32(®s->sysctl) & SYSCTL_RSTC)
488 esdhc_write32(®s->sysctl,
489 esdhc_read32(®s->sysctl) |
491 while ((esdhc_read32(®s->sysctl) & SYSCTL_RSTD))
496 esdhc_write32(®s->irqstat, -1);
501 static void set_sysctl(struct fsl_esdhc_priv *priv, struct mmc *mmc, uint clock)
503 struct fsl_esdhc *regs = priv->esdhc_regs;
506 unsigned int sdhc_clk = priv->sdhc_clk;
511 if (clock < mmc->cfg->f_min)
512 clock = mmc->cfg->f_min;
514 while (sdhc_clk / (16 * pre_div) > clock && pre_div < 256)
517 while (sdhc_clk / (div * pre_div) > clock && div < 16)
520 mmc->clock = sdhc_clk / pre_div / div;
521 priv->clock = mmc->clock;
526 clk = (pre_div << 8) | (div << 4);
528 esdhc_clrbits32(®s->sysctl, SYSCTL_CKEN);
530 esdhc_clrsetbits32(®s->sysctl, SYSCTL_CLOCK_MASK, clk);
533 value = PRSSTAT_SDSTB;
534 while (!(esdhc_read32(®s->prsstat) & value)) {
536 printf("fsl_esdhc: Internal clock never stabilised.\n");
543 esdhc_setbits32(®s->sysctl, SYSCTL_PEREN | SYSCTL_CKEN);
546 static void esdhc_clock_control(struct fsl_esdhc_priv *priv, bool enable)
548 struct fsl_esdhc *regs = priv->esdhc_regs;
552 value = esdhc_read32(®s->sysctl);
555 value |= SYSCTL_CKEN;
557 value &= ~SYSCTL_CKEN;
559 esdhc_write32(®s->sysctl, value);
562 value = PRSSTAT_SDSTB;
563 while (!(esdhc_read32(®s->prsstat) & value)) {
565 printf("fsl_esdhc: Internal clock never stabilised.\n");
573 static void esdhc_flush_async_fifo(struct fsl_esdhc_priv *priv)
575 struct fsl_esdhc *regs = priv->esdhc_regs;
578 esdhc_setbits32(®s->esdhcctl, ESDHCCTL_FAF);
581 while (esdhc_read32(®s->esdhcctl) & ESDHCCTL_FAF) {
583 printf("fsl_esdhc: Flush asynchronous FIFO timeout.\n");
591 static void esdhc_tuning_block_enable(struct fsl_esdhc_priv *priv,
594 struct fsl_esdhc *regs = priv->esdhc_regs;
596 esdhc_clock_control(priv, false);
597 esdhc_flush_async_fifo(priv);
599 esdhc_setbits32(®s->tbctl, TBCTL_TB_EN);
601 esdhc_clrbits32(®s->tbctl, TBCTL_TB_EN);
602 esdhc_clock_control(priv, true);
605 static void esdhc_exit_hs400(struct fsl_esdhc_priv *priv)
607 struct fsl_esdhc *regs = priv->esdhc_regs;
609 esdhc_clrbits32(®s->sdtimingctl, FLW_CTL_BG);
610 esdhc_clrbits32(®s->sdclkctl, CMD_CLK_CTL);
612 esdhc_clock_control(priv, false);
613 esdhc_clrbits32(®s->tbctl, HS400_MODE);
614 esdhc_clock_control(priv, true);
616 esdhc_clrbits32(®s->dllcfg0, DLL_FREQ_SEL | DLL_ENABLE);
617 esdhc_clrbits32(®s->tbctl, HS400_WNDW_ADJUST);
619 esdhc_tuning_block_enable(priv, false);
622 static int esdhc_set_timing(struct fsl_esdhc_priv *priv, enum bus_mode mode)
624 struct fsl_esdhc *regs = priv->esdhc_regs;
628 /* Exit HS400 mode before setting any other mode */
629 if (esdhc_read32(®s->tbctl) & HS400_MODE &&
631 esdhc_exit_hs400(priv);
633 esdhc_clock_control(priv, false);
635 if (mode == MMC_HS_200)
636 esdhc_clrsetbits32(®s->autoc12err, UHSM_MASK,
638 if (mode == MMC_HS_400) {
639 esdhc_setbits32(®s->tbctl, HS400_MODE);
640 esdhc_setbits32(®s->sdclkctl, CMD_CLK_CTL);
641 esdhc_clock_control(priv, true);
643 if (priv->clock == 200000000)
644 esdhc_setbits32(®s->dllcfg0, DLL_FREQ_SEL);
646 esdhc_setbits32(®s->dllcfg0, DLL_ENABLE);
648 esdhc_setbits32(®s->dllcfg0, DLL_RESET);
650 esdhc_clrbits32(®s->dllcfg0, DLL_RESET);
652 start = get_timer(0);
653 val = DLL_STS_SLV_LOCK;
654 while (!(esdhc_read32(®s->dllstat0) & val)) {
655 if (get_timer(start) > 1000) {
656 printf("fsl_esdhc: delay chain lock timeout\n");
661 esdhc_setbits32(®s->tbctl, HS400_WNDW_ADJUST);
663 esdhc_clock_control(priv, false);
664 esdhc_flush_async_fifo(priv);
666 esdhc_clock_control(priv, true);
670 static int esdhc_set_ios_common(struct fsl_esdhc_priv *priv, struct mmc *mmc)
672 struct fsl_esdhc *regs = priv->esdhc_regs;
675 if (priv->is_sdhc_per_clk) {
676 /* Select to use peripheral clock */
677 esdhc_clock_control(priv, false);
678 esdhc_setbits32(®s->esdhcctl, ESDHCCTL_PCS);
679 esdhc_clock_control(priv, true);
682 if (mmc->selected_mode == MMC_HS_400)
683 esdhc_tuning_block_enable(priv, true);
685 /* Set the clock speed */
686 if (priv->clock != mmc->clock)
687 set_sysctl(priv, mmc, mmc->clock);
690 ret = esdhc_set_timing(priv, mmc->selected_mode);
694 /* Set the bus width */
695 esdhc_clrbits32(®s->proctl, PROCTL_DTW_4 | PROCTL_DTW_8);
697 if (mmc->bus_width == 4)
698 esdhc_setbits32(®s->proctl, PROCTL_DTW_4);
699 else if (mmc->bus_width == 8)
700 esdhc_setbits32(®s->proctl, PROCTL_DTW_8);
705 static void esdhc_enable_cache_snooping(struct fsl_esdhc *regs)
707 #ifdef CONFIG_ARCH_MPC830X
708 immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
709 sysconf83xx_t *sysconf = &immr->sysconf;
711 setbits_be32(&sysconf->sdhccr, 0x02000000);
713 esdhc_write32(®s->esdhcctl, 0x00000040);
717 static int esdhc_init_common(struct fsl_esdhc_priv *priv, struct mmc *mmc)
719 struct fsl_esdhc *regs = priv->esdhc_regs;
722 /* Reset the entire host controller */
723 esdhc_setbits32(®s->sysctl, SYSCTL_RSTA);
725 /* Wait until the controller is available */
726 start = get_timer(0);
727 while ((esdhc_read32(®s->sysctl) & SYSCTL_RSTA)) {
728 if (get_timer(start) > 1000)
732 /* Clean TBCTL[TB_EN] which is not able to be reset by reset all */
733 esdhc_clrbits32(®s->tbctl, TBCTL_TB_EN);
735 esdhc_enable_cache_snooping(regs);
737 esdhc_setbits32(®s->sysctl, SYSCTL_HCKEN | SYSCTL_IPGEN);
739 /* Set the initial clock speed */
740 set_sysctl(priv, mmc, 400000);
742 /* Disable the BRR and BWR bits in IRQSTAT */
743 esdhc_clrbits32(®s->irqstaten, IRQSTATEN_BRR | IRQSTATEN_BWR);
745 /* Put the PROCTL reg back to the default */
746 esdhc_write32(®s->proctl, PROCTL_INIT);
748 /* Set timout to the maximum value */
749 esdhc_clrsetbits32(®s->sysctl, SYSCTL_TIMEOUT_MASK, 14 << 16);
754 static int esdhc_getcd_common(struct fsl_esdhc_priv *priv)
756 struct fsl_esdhc *regs = priv->esdhc_regs;
758 #ifdef CONFIG_ESDHC_DETECT_QUIRK
759 if (CONFIG_ESDHC_DETECT_QUIRK)
762 if (esdhc_read32(®s->prsstat) & PRSSTAT_CINS)
768 static void fsl_esdhc_get_cfg_common(struct fsl_esdhc_priv *priv,
769 struct mmc_config *cfg)
771 struct fsl_esdhc *regs = priv->esdhc_regs;
774 caps = esdhc_read32(®s->hostcapblt);
775 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC135))
776 caps &= ~(HOSTCAPBLT_SRS | HOSTCAPBLT_VS18 | HOSTCAPBLT_VS30);
777 if (IS_ENABLED(CONFIG_SYS_FSL_MMC_HAS_CAPBLT_VS33))
778 caps |= HOSTCAPBLT_VS33;
779 if (caps & HOSTCAPBLT_VS18)
780 cfg->voltages |= MMC_VDD_165_195;
781 if (caps & HOSTCAPBLT_VS30)
782 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
783 if (caps & HOSTCAPBLT_VS33)
784 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
786 cfg->name = "FSL_SDHC";
788 if (caps & HOSTCAPBLT_HSS)
789 cfg->host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
792 cfg->f_max = min(priv->sdhc_clk, (u32)200000000);
793 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
796 #ifdef CONFIG_OF_LIBFDT
797 __weak int esdhc_status_fixup(void *blob, const char *compat)
799 if (IS_ENABLED(CONFIG_FSL_ESDHC_PIN_MUX) && !hwconfig("esdhc")) {
800 do_fixup_by_compat(blob, compat, "status", "disabled",
801 sizeof("disabled"), 1);
809 #if CONFIG_IS_ENABLED(DM_MMC)
810 static int fsl_esdhc_get_cd(struct udevice *dev);
811 static void esdhc_disable_for_no_card(void *blob)
815 for (uclass_first_device(UCLASS_MMC, &dev);
817 uclass_next_device(&dev)) {
820 if (fsl_esdhc_get_cd(dev))
823 snprintf(esdhc_path, sizeof(esdhc_path), "/soc/esdhc@%lx",
824 (unsigned long)dev_read_addr(dev));
825 do_fixup_by_path(blob, esdhc_path, "status", "disabled",
826 sizeof("disabled"), 1);
830 static void esdhc_disable_for_no_card(void *blob)
835 void fdt_fixup_esdhc(void *blob, struct bd_info *bd)
837 const char *compat = "fsl,esdhc";
839 if (esdhc_status_fixup(blob, compat))
842 if (IS_ENABLED(CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND))
843 esdhc_disable_for_no_card(blob);
845 do_fixup_by_compat_u32(blob, compat, "clock-frequency",
846 gd->arch.sdhc_clk, 1);
850 #if !CONFIG_IS_ENABLED(DM_MMC)
851 static int esdhc_getcd(struct mmc *mmc)
853 struct fsl_esdhc_priv *priv = mmc->priv;
855 return esdhc_getcd_common(priv);
858 static int esdhc_init(struct mmc *mmc)
860 struct fsl_esdhc_priv *priv = mmc->priv;
862 return esdhc_init_common(priv, mmc);
865 static int esdhc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
866 struct mmc_data *data)
868 struct fsl_esdhc_priv *priv = mmc->priv;
870 return esdhc_send_cmd_common(priv, mmc, cmd, data);
873 static int esdhc_set_ios(struct mmc *mmc)
875 struct fsl_esdhc_priv *priv = mmc->priv;
877 return esdhc_set_ios_common(priv, mmc);
880 static const struct mmc_ops esdhc_ops = {
881 .getcd = esdhc_getcd,
883 .send_cmd = esdhc_send_cmd,
884 .set_ios = esdhc_set_ios,
887 int fsl_esdhc_initialize(struct bd_info *bis, struct fsl_esdhc_cfg *cfg)
889 struct fsl_esdhc_plat *plat;
890 struct fsl_esdhc_priv *priv;
891 struct mmc_config *mmc_cfg;
897 priv = calloc(sizeof(struct fsl_esdhc_priv), 1);
900 plat = calloc(sizeof(struct fsl_esdhc_plat), 1);
906 priv->esdhc_regs = (struct fsl_esdhc *)(unsigned long)(cfg->esdhc_base);
907 priv->sdhc_clk = cfg->sdhc_clk;
908 if (gd->arch.sdhc_per_clk)
909 priv->is_sdhc_per_clk = true;
911 mmc_cfg = &plat->cfg;
913 if (cfg->max_bus_width == 8) {
914 mmc_cfg->host_caps |= MMC_MODE_1BIT | MMC_MODE_4BIT |
916 } else if (cfg->max_bus_width == 4) {
917 mmc_cfg->host_caps |= MMC_MODE_1BIT | MMC_MODE_4BIT;
918 } else if (cfg->max_bus_width == 1) {
919 mmc_cfg->host_caps |= MMC_MODE_1BIT;
921 mmc_cfg->host_caps |= MMC_MODE_1BIT | MMC_MODE_4BIT |
923 printf("No max bus width provided. Assume 8-bit supported.\n");
926 if (IS_ENABLED(CONFIG_ESDHC_DETECT_8_BIT_QUIRK))
927 mmc_cfg->host_caps &= ~MMC_MODE_8BIT;
929 mmc_cfg->ops = &esdhc_ops;
931 fsl_esdhc_get_cfg_common(priv, mmc_cfg);
933 mmc = mmc_create(mmc_cfg, priv);
941 int fsl_esdhc_mmc_init(struct bd_info *bis)
943 struct fsl_esdhc_cfg *cfg;
945 cfg = calloc(sizeof(struct fsl_esdhc_cfg), 1);
946 cfg->esdhc_base = CONFIG_SYS_FSL_ESDHC_ADDR;
947 /* Prefer peripheral clock which provides higher frequency. */
948 if (gd->arch.sdhc_per_clk)
949 cfg->sdhc_clk = gd->arch.sdhc_per_clk;
951 cfg->sdhc_clk = gd->arch.sdhc_clk;
952 return fsl_esdhc_initialize(bis, cfg);
955 static int fsl_esdhc_probe(struct udevice *dev)
957 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
958 struct fsl_esdhc_plat *plat = dev_get_platdata(dev);
959 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
965 addr = dev_read_addr(dev);
966 if (addr == FDT_ADDR_T_NONE)
969 priv->esdhc_regs = (struct fsl_esdhc *)lower_32_bits(addr);
971 priv->esdhc_regs = (struct fsl_esdhc *)addr;
975 if (IS_ENABLED(CONFIG_FSL_ESDHC_SUPPORT_ADMA2)) {
977 * Only newer eSDHC controllers can do ADMA2 if the ADMA flag
978 * is set in the host capabilities register.
980 caps = esdhc_read32(&priv->esdhc_regs->hostcapblt);
981 hostver = esdhc_read32(&priv->esdhc_regs->hostver);
982 if (caps & HOSTCAPBLT_DMAS &&
983 HOSTVER_VENDOR(hostver) > VENDOR_V_22) {
984 priv->adma_desc_table = sdhci_adma_init();
985 if (!priv->adma_desc_table)
986 debug("Could not allocate ADMA tables, falling back to SDMA\n");
990 if (gd->arch.sdhc_per_clk) {
991 priv->sdhc_clk = gd->arch.sdhc_per_clk;
992 priv->is_sdhc_per_clk = true;
994 priv->sdhc_clk = gd->arch.sdhc_clk;
997 if (priv->sdhc_clk <= 0) {
998 dev_err(dev, "Unable to get clk for %s\n", dev->name);
1002 fsl_esdhc_get_cfg_common(priv, &plat->cfg);
1004 mmc_of_parse(dev, &plat->cfg);
1007 mmc->cfg = &plat->cfg;
1012 ret = esdhc_init_common(priv, mmc);
1016 if (IS_ENABLED(CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND) &&
1017 !fsl_esdhc_get_cd(dev))
1018 esdhc_setbits32(&priv->esdhc_regs->proctl, PROCTL_VOLT_SEL);
1023 static int fsl_esdhc_get_cd(struct udevice *dev)
1025 struct fsl_esdhc_plat *plat = dev_get_platdata(dev);
1026 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1028 if (plat->cfg.host_caps & MMC_CAP_NONREMOVABLE)
1031 return esdhc_getcd_common(priv);
1034 static int fsl_esdhc_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
1035 struct mmc_data *data)
1037 struct fsl_esdhc_plat *plat = dev_get_platdata(dev);
1038 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1040 return esdhc_send_cmd_common(priv, &plat->mmc, cmd, data);
1043 static int fsl_esdhc_set_ios(struct udevice *dev)
1045 struct fsl_esdhc_plat *plat = dev_get_platdata(dev);
1046 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1048 return esdhc_set_ios_common(priv, &plat->mmc);
1051 static int fsl_esdhc_reinit(struct udevice *dev)
1053 struct fsl_esdhc_plat *plat = dev_get_platdata(dev);
1054 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1056 return esdhc_init_common(priv, &plat->mmc);
1059 #ifdef MMC_SUPPORTS_TUNING
1060 static int fsl_esdhc_execute_tuning(struct udevice *dev, uint32_t opcode)
1062 struct fsl_esdhc_plat *plat = dev_get_platdata(dev);
1063 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1064 struct fsl_esdhc *regs = priv->esdhc_regs;
1068 esdhc_tuning_block_enable(priv, true);
1069 esdhc_setbits32(®s->autoc12err, EXECUTE_TUNING);
1071 irqstaten = esdhc_read32(®s->irqstaten);
1072 esdhc_write32(®s->irqstaten, IRQSTATEN_BRR);
1074 for (i = 0; i < MAX_TUNING_LOOP; i++) {
1075 mmc_send_tuning(&plat->mmc, opcode, NULL);
1078 val = esdhc_read32(®s->autoc12err);
1079 if (!(val & EXECUTE_TUNING)) {
1080 if (val & SMPCLKSEL)
1085 esdhc_write32(®s->irqstaten, irqstaten);
1087 if (i != MAX_TUNING_LOOP) {
1088 if (plat->mmc.hs400_tuning)
1089 esdhc_setbits32(®s->sdtimingctl, FLW_CTL_BG);
1093 printf("fsl_esdhc: tuning failed!\n");
1094 esdhc_clrbits32(®s->autoc12err, SMPCLKSEL);
1095 esdhc_clrbits32(®s->autoc12err, EXECUTE_TUNING);
1096 esdhc_tuning_block_enable(priv, false);
1101 int fsl_esdhc_hs400_prepare_ddr(struct udevice *dev)
1103 struct fsl_esdhc_priv *priv = dev_get_priv(dev);
1105 esdhc_tuning_block_enable(priv, false);
1109 static const struct dm_mmc_ops fsl_esdhc_ops = {
1110 .get_cd = fsl_esdhc_get_cd,
1111 .send_cmd = fsl_esdhc_send_cmd,
1112 .set_ios = fsl_esdhc_set_ios,
1113 #ifdef MMC_SUPPORTS_TUNING
1114 .execute_tuning = fsl_esdhc_execute_tuning,
1116 .reinit = fsl_esdhc_reinit,
1117 .hs400_prepare_ddr = fsl_esdhc_hs400_prepare_ddr,
1120 static const struct udevice_id fsl_esdhc_ids[] = {
1121 { .compatible = "fsl,esdhc", },
1125 static int fsl_esdhc_bind(struct udevice *dev)
1127 struct fsl_esdhc_plat *plat = dev_get_platdata(dev);
1129 return mmc_bind(dev, &plat->mmc, &plat->cfg);
1132 U_BOOT_DRIVER(fsl_esdhc) = {
1133 .name = "fsl-esdhc-mmc",
1135 .of_match = fsl_esdhc_ids,
1136 .ops = &fsl_esdhc_ops,
1137 .bind = fsl_esdhc_bind,
1138 .probe = fsl_esdhc_probe,
1139 .plat_auto = sizeof(struct fsl_esdhc_plat),
1140 .priv_auto = sizeof(struct fsl_esdhc_priv),