1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
16 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
17 void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
22 static void sdhci_reset(struct sdhci_host *host, u8 mask)
24 unsigned long timeout;
28 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
29 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
31 printf("%s: Reset 0x%x never completed.\n",
40 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
43 if (cmd->resp_type & MMC_RSP_136) {
44 /* CRC is stripped so we need to do some shifting. */
45 for (i = 0; i < 4; i++) {
46 cmd->response[i] = sdhci_readl(host,
47 SDHCI_RESPONSE + (3-i)*4) << 8;
49 cmd->response[i] |= sdhci_readb(host,
50 SDHCI_RESPONSE + (3-i)*4-1);
53 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
57 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
61 for (i = 0; i < data->blocksize; i += 4) {
62 offs = data->dest + i;
63 if (data->flags == MMC_DATA_READ)
64 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
66 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
70 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
71 static void sdhci_adma_desc(struct sdhci_host *host, char *buf, u16 len,
74 struct sdhci_adma_desc *desc;
77 desc = &host->adma_desc_table[host->desc_slot];
79 attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
83 attr |= ADMA_DESC_ATTR_END;
88 desc->addr_lo = (dma_addr_t)buf;
89 #ifdef CONFIG_DMA_ADDR_T_64BIT
90 desc->addr_hi = (u64)buf >> 32;
94 static void sdhci_prepare_adma_table(struct sdhci_host *host,
95 struct mmc_data *data)
97 uint trans_bytes = data->blocksize * data->blocks;
98 uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN);
104 if (data->flags & MMC_DATA_READ)
107 buf = (char *)data->src;
110 sdhci_adma_desc(host, buf, ADMA_MAX_LEN, false);
112 trans_bytes -= ADMA_MAX_LEN;
115 sdhci_adma_desc(host, buf, trans_bytes, true);
117 flush_cache((dma_addr_t)host->adma_desc_table,
118 ROUND(desc_count * sizeof(struct sdhci_adma_desc),
121 #elif defined(CONFIG_MMC_SDHCI_SDMA)
122 static void sdhci_prepare_adma_table(struct sdhci_host *host,
123 struct mmc_data *data)
126 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
127 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
128 int *is_aligned, int trans_bytes)
132 if (data->flags == MMC_DATA_READ)
133 host->start_addr = (dma_addr_t)data->dest;
135 host->start_addr = (dma_addr_t)data->src;
137 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
138 ctrl &= ~SDHCI_CTRL_DMA_MASK;
139 if (host->flags & USE_ADMA64)
140 ctrl |= SDHCI_CTRL_ADMA64;
141 else if (host->flags & USE_ADMA)
142 ctrl |= SDHCI_CTRL_ADMA32;
143 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
145 if (host->flags & USE_SDMA) {
146 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
147 (host->start_addr & 0x7) != 0x0) {
149 host->start_addr = (unsigned long)aligned_buffer;
150 if (data->flags != MMC_DATA_READ)
151 memcpy(aligned_buffer, data->src, trans_bytes);
154 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
156 * Always use this bounce-buffer when
157 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined
160 host->start_addr = (unsigned long)aligned_buffer;
161 if (data->flags != MMC_DATA_READ)
162 memcpy(aligned_buffer, data->src, trans_bytes);
164 sdhci_writel(host, host->start_addr, SDHCI_DMA_ADDRESS);
166 } else if (host->flags & (USE_ADMA | USE_ADMA64)) {
167 sdhci_prepare_adma_table(host, data);
169 sdhci_writel(host, (u32)host->adma_addr, SDHCI_ADMA_ADDRESS);
170 if (host->flags & USE_ADMA64)
171 sdhci_writel(host, (u64)host->adma_addr >> 32,
172 SDHCI_ADMA_ADDRESS_HI);
175 flush_cache(host->start_addr, ROUND(trans_bytes, ARCH_DMA_MINALIGN));
178 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
179 int *is_aligned, int trans_bytes)
182 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
184 dma_addr_t start_addr = host->start_addr;
185 unsigned int stat, rdy, mask, timeout, block = 0;
186 bool transfer_done = false;
189 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
190 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
192 stat = sdhci_readl(host, SDHCI_INT_STATUS);
193 if (stat & SDHCI_INT_ERROR) {
194 pr_debug("%s: Error detected in status(0x%X)!\n",
198 if (!transfer_done && (stat & rdy)) {
199 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
201 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
202 sdhci_transfer_pio(host, data);
203 data->dest += data->blocksize;
204 if (++block >= data->blocks) {
205 /* Keep looping until the SDHCI_INT_DATA_END is
206 * cleared, even if we finished sending all the
209 transfer_done = true;
213 if ((host->flags & USE_DMA) && !transfer_done &&
214 (stat & SDHCI_INT_DMA_END)) {
215 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
216 if (host->flags & USE_SDMA) {
218 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
219 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
220 sdhci_writel(host, start_addr,
227 printf("%s: Transfer data timeout\n", __func__);
230 } while (!(stat & SDHCI_INT_DATA_END));
235 * No command will be sent by driver if card is busy, so driver must wait
236 * for card ready state.
237 * Every time when card is busy after timeout then (last) timeout value will be
238 * increased twice but only if it doesn't exceed global defined maximum.
239 * Each function call will use last timeout value.
241 #define SDHCI_CMD_MAX_TIMEOUT 3200
242 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
243 #define SDHCI_READ_STATUS_TIMEOUT 1000
246 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
247 struct mmc_data *data)
249 struct mmc *mmc = mmc_get_mmc_dev(dev);
252 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
253 struct mmc_data *data)
256 struct sdhci_host *host = mmc->priv;
257 unsigned int stat = 0;
259 int trans_bytes = 0, is_aligned = 1;
260 u32 mask, flags, mode;
261 unsigned int time = 0;
262 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
263 ulong start = get_timer(0);
265 host->start_addr = 0;
266 /* Timeout unit - ms */
267 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
269 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
271 /* We shouldn't wait for data inihibit for stop commands, even
272 though they might use busy signaling */
273 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
274 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
275 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
276 mask &= ~SDHCI_DATA_INHIBIT;
278 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
279 if (time >= cmd_timeout) {
280 printf("%s: MMC: %d busy ", __func__, mmc_dev);
281 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
282 cmd_timeout += cmd_timeout;
283 printf("timeout increasing to: %u ms.\n",
294 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
296 mask = SDHCI_INT_RESPONSE;
297 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
298 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
299 mask = SDHCI_INT_DATA_AVAIL;
301 if (!(cmd->resp_type & MMC_RSP_PRESENT))
302 flags = SDHCI_CMD_RESP_NONE;
303 else if (cmd->resp_type & MMC_RSP_136)
304 flags = SDHCI_CMD_RESP_LONG;
305 else if (cmd->resp_type & MMC_RSP_BUSY) {
306 flags = SDHCI_CMD_RESP_SHORT_BUSY;
308 mask |= SDHCI_INT_DATA_END;
310 flags = SDHCI_CMD_RESP_SHORT;
312 if (cmd->resp_type & MMC_RSP_CRC)
313 flags |= SDHCI_CMD_CRC;
314 if (cmd->resp_type & MMC_RSP_OPCODE)
315 flags |= SDHCI_CMD_INDEX;
316 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
317 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
318 flags |= SDHCI_CMD_DATA;
320 /* Set Transfer mode regarding to data flag */
322 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
323 mode = SDHCI_TRNS_BLK_CNT_EN;
324 trans_bytes = data->blocks * data->blocksize;
325 if (data->blocks > 1)
326 mode |= SDHCI_TRNS_MULTI;
328 if (data->flags == MMC_DATA_READ)
329 mode |= SDHCI_TRNS_READ;
331 if (host->flags & USE_DMA) {
332 mode |= SDHCI_TRNS_DMA;
333 sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
336 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
339 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
340 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
341 } else if (cmd->resp_type & MMC_RSP_BUSY) {
342 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
345 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
346 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
347 start = get_timer(0);
349 stat = sdhci_readl(host, SDHCI_INT_STATUS);
350 if (stat & SDHCI_INT_ERROR)
353 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
354 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
357 printf("%s: Timeout for status update!\n",
362 } while ((stat & mask) != mask);
364 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
365 sdhci_cmd_done(host, cmd);
366 sdhci_writel(host, mask, SDHCI_INT_STATUS);
371 ret = sdhci_transfer_data(host, data);
373 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
376 stat = sdhci_readl(host, SDHCI_INT_STATUS);
377 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
379 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
380 !is_aligned && (data->flags == MMC_DATA_READ))
381 memcpy(data->dest, aligned_buffer, trans_bytes);
385 sdhci_reset(host, SDHCI_RESET_CMD);
386 sdhci_reset(host, SDHCI_RESET_DATA);
387 if (stat & SDHCI_INT_TIMEOUT)
393 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
394 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
397 struct mmc *mmc = mmc_get_mmc_dev(dev);
398 struct sdhci_host *host = mmc->priv;
400 debug("%s\n", __func__);
402 if (host->ops && host->ops->platform_execute_tuning) {
403 err = host->ops->platform_execute_tuning(mmc, opcode);
411 static int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
413 struct sdhci_host *host = mmc->priv;
414 unsigned int div, clk = 0, timeout;
418 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
419 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
421 printf("%s: Timeout to wait cmd & data inhibit\n",
430 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
435 if (host->ops && host->ops->set_delay)
436 host->ops->set_delay(host);
438 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
440 * Check if the Host Controller supports Programmable Clock
444 for (div = 1; div <= 1024; div++) {
445 if ((host->max_clk / div) <= clock)
450 * Set Programmable Clock Mode in the Clock
453 clk = SDHCI_PROG_CLOCK_MODE;
456 /* Version 3.00 divisors must be a multiple of 2. */
457 if (host->max_clk <= clock) {
461 div < SDHCI_MAX_DIV_SPEC_300;
463 if ((host->max_clk / div) <= clock)
470 /* Version 2.00 divisors must be a power of 2. */
471 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
472 if ((host->max_clk / div) <= clock)
478 if (host->ops && host->ops->set_clock)
479 host->ops->set_clock(host, div);
481 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
482 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
483 << SDHCI_DIVIDER_HI_SHIFT;
484 clk |= SDHCI_CLOCK_INT_EN;
485 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
489 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
490 & SDHCI_CLOCK_INT_STABLE)) {
492 printf("%s: Internal clock never stabilised.\n",
500 clk |= SDHCI_CLOCK_CARD_EN;
501 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
505 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
509 if (power != (unsigned short)-1) {
510 switch (1 << power) {
511 case MMC_VDD_165_195:
512 pwr = SDHCI_POWER_180;
516 pwr = SDHCI_POWER_300;
520 pwr = SDHCI_POWER_330;
526 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
530 pwr |= SDHCI_POWER_ON;
532 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
536 static int sdhci_set_ios(struct udevice *dev)
538 struct mmc *mmc = mmc_get_mmc_dev(dev);
540 static int sdhci_set_ios(struct mmc *mmc)
544 struct sdhci_host *host = mmc->priv;
546 if (host->ops && host->ops->set_control_reg)
547 host->ops->set_control_reg(host);
549 if (mmc->clock != host->clock)
550 sdhci_set_clock(mmc, mmc->clock);
552 if (mmc->clk_disable)
553 sdhci_set_clock(mmc, 0);
556 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
557 if (mmc->bus_width == 8) {
558 ctrl &= ~SDHCI_CTRL_4BITBUS;
559 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
560 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
561 ctrl |= SDHCI_CTRL_8BITBUS;
563 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
564 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
565 ctrl &= ~SDHCI_CTRL_8BITBUS;
566 if (mmc->bus_width == 4)
567 ctrl |= SDHCI_CTRL_4BITBUS;
569 ctrl &= ~SDHCI_CTRL_4BITBUS;
572 if (mmc->clock > 26000000)
573 ctrl |= SDHCI_CTRL_HISPD;
575 ctrl &= ~SDHCI_CTRL_HISPD;
577 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
578 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE))
579 ctrl &= ~SDHCI_CTRL_HISPD;
581 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
583 /* If available, call the driver specific "post" set_ios() function */
584 if (host->ops && host->ops->set_ios_post)
585 host->ops->set_ios_post(host);
590 static int sdhci_init(struct mmc *mmc)
592 struct sdhci_host *host = mmc->priv;
593 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
594 struct udevice *dev = mmc->dev;
596 gpio_request_by_name(dev, "cd-gpio", 0,
597 &host->cd_gpio, GPIOD_IS_IN);
600 sdhci_reset(host, SDHCI_RESET_ALL);
602 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) {
603 aligned_buffer = memalign(8, 512*1024);
604 if (!aligned_buffer) {
605 printf("%s: Aligned buffer alloc failed!!!\n",
611 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
613 if (host->ops && host->ops->get_cd)
614 host->ops->get_cd(host);
616 /* Enable only interrupts served by the SD controller */
617 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
619 /* Mask all sdhci interrupt sources */
620 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
626 int sdhci_probe(struct udevice *dev)
628 struct mmc *mmc = mmc_get_mmc_dev(dev);
630 return sdhci_init(mmc);
633 const struct dm_mmc_ops sdhci_ops = {
634 .send_cmd = sdhci_send_command,
635 .set_ios = sdhci_set_ios,
636 #ifdef MMC_SUPPORTS_TUNING
637 .execute_tuning = sdhci_execute_tuning,
641 static const struct mmc_ops sdhci_ops = {
642 .send_cmd = sdhci_send_command,
643 .set_ios = sdhci_set_ios,
648 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
649 u32 f_max, u32 f_min)
651 u32 caps, caps_1 = 0;
653 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
655 #ifdef CONFIG_MMC_SDHCI_SDMA
656 if (!(caps & SDHCI_CAN_DO_SDMA)) {
657 printf("%s: Your controller doesn't support SDMA!!\n",
662 host->flags |= USE_SDMA;
664 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
665 if (!(caps & SDHCI_CAN_DO_ADMA2)) {
666 printf("%s: Your controller doesn't support SDMA!!\n",
670 host->adma_desc_table = (struct sdhci_adma_desc *)
671 memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
673 host->adma_addr = (dma_addr_t)host->adma_desc_table;
674 #ifdef CONFIG_DMA_ADDR_T_64BIT
675 host->flags |= USE_ADMA64;
677 host->flags |= USE_ADMA;
680 if (host->quirks & SDHCI_QUIRK_REG32_RW)
682 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
684 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
686 cfg->name = host->name;
687 #ifndef CONFIG_DM_MMC
688 cfg->ops = &sdhci_ops;
691 /* Check whether the clock multiplier is supported or not */
692 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
693 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
694 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
695 SDHCI_CLOCK_MUL_SHIFT;
698 if (host->max_clk == 0) {
699 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
700 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
701 SDHCI_CLOCK_BASE_SHIFT;
703 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
704 SDHCI_CLOCK_BASE_SHIFT;
705 host->max_clk *= 1000000;
707 host->max_clk *= host->clk_mul;
709 if (host->max_clk == 0) {
710 printf("%s: Hardware doesn't specify base clock frequency\n",
714 if (f_max && (f_max < host->max_clk))
717 cfg->f_max = host->max_clk;
721 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
722 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
724 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
727 if (caps & SDHCI_CAN_VDD_330)
728 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
729 if (caps & SDHCI_CAN_VDD_300)
730 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
731 if (caps & SDHCI_CAN_VDD_180)
732 cfg->voltages |= MMC_VDD_165_195;
734 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
735 cfg->voltages |= host->voltages;
737 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;
739 /* Since Host Controller Version3.0 */
740 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
741 if (!(caps & SDHCI_CAN_DO_8BIT))
742 cfg->host_caps &= ~MMC_MODE_8BIT;
745 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
746 cfg->host_caps &= ~MMC_MODE_HS;
747 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
750 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
751 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
753 if (!(cfg->voltages & MMC_VDD_165_195) ||
754 (host->quirks & SDHCI_QUIRK_NO_1_8_V))
755 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
756 SDHCI_SUPPORT_DDR50);
758 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
759 SDHCI_SUPPORT_DDR50))
760 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
762 if (caps_1 & SDHCI_SUPPORT_SDR104) {
763 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
765 * SD3.0: SDR104 is supported so (for eMMC) the caps2
766 * field can be promoted to support HS200.
768 cfg->host_caps |= MMC_CAP(MMC_HS_200);
769 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
770 cfg->host_caps |= MMC_CAP(UHS_SDR50);
773 if (caps_1 & SDHCI_SUPPORT_DDR50)
774 cfg->host_caps |= MMC_CAP(UHS_DDR50);
777 cfg->host_caps |= host->host_caps;
779 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
785 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
787 return mmc_bind(dev, mmc, cfg);
790 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
794 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
798 host->mmc = mmc_create(&host->cfg, host);
799 if (host->mmc == NULL) {
800 printf("%s: mmc create fail!\n", __func__);