1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
18 #include <asm/cache.h>
19 #include <linux/bitops.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
24 static void sdhci_reset(struct sdhci_host *host, u8 mask)
26 unsigned long timeout;
30 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
31 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
33 printf("%s: Reset 0x%x never completed.\n",
42 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
45 if (cmd->resp_type & MMC_RSP_136) {
46 /* CRC is stripped so we need to do some shifting. */
47 for (i = 0; i < 4; i++) {
48 cmd->response[i] = sdhci_readl(host,
49 SDHCI_RESPONSE + (3-i)*4) << 8;
51 cmd->response[i] |= sdhci_readb(host,
52 SDHCI_RESPONSE + (3-i)*4-1);
55 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
59 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
63 for (i = 0; i < data->blocksize; i += 4) {
64 offs = data->dest + i;
65 if (data->flags == MMC_DATA_READ)
66 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
68 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
72 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
73 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
74 int *is_aligned, int trans_bytes)
79 if (data->flags == MMC_DATA_READ)
82 buf = (void *)data->src;
84 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
85 ctrl &= ~SDHCI_CTRL_DMA_MASK;
86 if (host->flags & USE_ADMA64)
87 ctrl |= SDHCI_CTRL_ADMA64;
88 else if (host->flags & USE_ADMA)
89 ctrl |= SDHCI_CTRL_ADMA32;
90 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
92 if (host->flags & USE_SDMA &&
93 (host->force_align_buffer ||
94 (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
95 ((unsigned long)buf & 0x7) != 0x0))) {
97 if (data->flags != MMC_DATA_READ)
98 memcpy(host->align_buffer, buf, trans_bytes);
99 buf = host->align_buffer;
102 host->start_addr = dma_map_single(buf, trans_bytes,
103 mmc_get_dma_dir(data));
105 if (host->flags & USE_SDMA) {
106 sdhci_writel(host, phys_to_bus((ulong)host->start_addr),
109 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
110 else if (host->flags & (USE_ADMA | USE_ADMA64)) {
111 sdhci_prepare_adma_table(host->adma_desc_table, data,
114 sdhci_writel(host, lower_32_bits(host->adma_addr),
116 if (host->flags & USE_ADMA64)
117 sdhci_writel(host, upper_32_bits(host->adma_addr),
118 SDHCI_ADMA_ADDRESS_HI);
123 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
124 int *is_aligned, int trans_bytes)
127 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
129 dma_addr_t start_addr = host->start_addr;
130 unsigned int stat, rdy, mask, timeout, block = 0;
131 bool transfer_done = false;
134 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
135 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
137 stat = sdhci_readl(host, SDHCI_INT_STATUS);
138 if (stat & SDHCI_INT_ERROR) {
139 pr_debug("%s: Error detected in status(0x%X)!\n",
143 if (!transfer_done && (stat & rdy)) {
144 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
146 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
147 sdhci_transfer_pio(host, data);
148 data->dest += data->blocksize;
149 if (++block >= data->blocks) {
150 /* Keep looping until the SDHCI_INT_DATA_END is
151 * cleared, even if we finished sending all the
154 transfer_done = true;
158 if ((host->flags & USE_DMA) && !transfer_done &&
159 (stat & SDHCI_INT_DMA_END)) {
160 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
161 if (host->flags & USE_SDMA) {
163 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
164 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
165 sdhci_writel(host, phys_to_bus((ulong)start_addr),
172 printf("%s: Transfer data timeout\n", __func__);
175 } while (!(stat & SDHCI_INT_DATA_END));
177 dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
178 mmc_get_dma_dir(data));
184 * No command will be sent by driver if card is busy, so driver must wait
185 * for card ready state.
186 * Every time when card is busy after timeout then (last) timeout value will be
187 * increased twice but only if it doesn't exceed global defined maximum.
188 * Each function call will use last timeout value.
190 #define SDHCI_CMD_MAX_TIMEOUT 3200
191 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
192 #define SDHCI_READ_STATUS_TIMEOUT 1000
195 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
196 struct mmc_data *data)
198 struct mmc *mmc = mmc_get_mmc_dev(dev);
201 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
202 struct mmc_data *data)
205 struct sdhci_host *host = mmc->priv;
206 unsigned int stat = 0;
208 int trans_bytes = 0, is_aligned = 1;
209 u32 mask, flags, mode;
210 unsigned int time = 0;
211 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
212 ulong start = get_timer(0);
214 host->start_addr = 0;
215 /* Timeout unit - ms */
216 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
218 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
220 /* We shouldn't wait for data inihibit for stop commands, even
221 though they might use busy signaling */
222 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
223 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
224 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
225 mask &= ~SDHCI_DATA_INHIBIT;
227 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
228 if (time >= cmd_timeout) {
229 printf("%s: MMC: %d busy ", __func__, mmc_dev);
230 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
231 cmd_timeout += cmd_timeout;
232 printf("timeout increasing to: %u ms.\n",
243 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
245 mask = SDHCI_INT_RESPONSE;
246 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
247 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
248 mask = SDHCI_INT_DATA_AVAIL;
250 if (!(cmd->resp_type & MMC_RSP_PRESENT))
251 flags = SDHCI_CMD_RESP_NONE;
252 else if (cmd->resp_type & MMC_RSP_136)
253 flags = SDHCI_CMD_RESP_LONG;
254 else if (cmd->resp_type & MMC_RSP_BUSY) {
255 flags = SDHCI_CMD_RESP_SHORT_BUSY;
257 mask |= SDHCI_INT_DATA_END;
259 flags = SDHCI_CMD_RESP_SHORT;
261 if (cmd->resp_type & MMC_RSP_CRC)
262 flags |= SDHCI_CMD_CRC;
263 if (cmd->resp_type & MMC_RSP_OPCODE)
264 flags |= SDHCI_CMD_INDEX;
265 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
266 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
267 flags |= SDHCI_CMD_DATA;
269 /* Set Transfer mode regarding to data flag */
271 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
272 mode = SDHCI_TRNS_BLK_CNT_EN;
273 trans_bytes = data->blocks * data->blocksize;
274 if (data->blocks > 1)
275 mode |= SDHCI_TRNS_MULTI;
277 if (data->flags == MMC_DATA_READ)
278 mode |= SDHCI_TRNS_READ;
280 if (host->flags & USE_DMA) {
281 mode |= SDHCI_TRNS_DMA;
282 sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
285 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
288 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
289 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
290 } else if (cmd->resp_type & MMC_RSP_BUSY) {
291 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
294 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
295 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
296 start = get_timer(0);
298 stat = sdhci_readl(host, SDHCI_INT_STATUS);
299 if (stat & SDHCI_INT_ERROR)
302 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
303 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
306 printf("%s: Timeout for status update!\n",
311 } while ((stat & mask) != mask);
313 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
314 sdhci_cmd_done(host, cmd);
315 sdhci_writel(host, mask, SDHCI_INT_STATUS);
320 ret = sdhci_transfer_data(host, data);
322 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
325 stat = sdhci_readl(host, SDHCI_INT_STATUS);
326 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
328 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
329 !is_aligned && (data->flags == MMC_DATA_READ))
330 memcpy(data->dest, host->align_buffer, trans_bytes);
334 sdhci_reset(host, SDHCI_RESET_CMD);
335 sdhci_reset(host, SDHCI_RESET_DATA);
336 if (stat & SDHCI_INT_TIMEOUT)
342 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
343 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
346 struct mmc *mmc = mmc_get_mmc_dev(dev);
347 struct sdhci_host *host = mmc->priv;
349 debug("%s\n", __func__);
351 if (host->ops && host->ops->platform_execute_tuning) {
352 err = host->ops->platform_execute_tuning(mmc, opcode);
360 int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
362 struct sdhci_host *host = mmc->priv;
363 unsigned int div, clk = 0, timeout;
367 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
368 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
370 printf("%s: Timeout to wait cmd & data inhibit\n",
379 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
384 if (host->ops && host->ops->set_delay)
385 host->ops->set_delay(host);
387 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
389 * Check if the Host Controller supports Programmable Clock
393 for (div = 1; div <= 1024; div++) {
394 if ((host->max_clk / div) <= clock)
399 * Set Programmable Clock Mode in the Clock
402 clk = SDHCI_PROG_CLOCK_MODE;
405 /* Version 3.00 divisors must be a multiple of 2. */
406 if (host->max_clk <= clock) {
410 div < SDHCI_MAX_DIV_SPEC_300;
412 if ((host->max_clk / div) <= clock)
419 /* Version 2.00 divisors must be a power of 2. */
420 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
421 if ((host->max_clk / div) <= clock)
427 if (host->ops && host->ops->set_clock)
428 host->ops->set_clock(host, div);
430 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
431 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
432 << SDHCI_DIVIDER_HI_SHIFT;
433 clk |= SDHCI_CLOCK_INT_EN;
434 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
438 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
439 & SDHCI_CLOCK_INT_STABLE)) {
441 printf("%s: Internal clock never stabilised.\n",
449 clk |= SDHCI_CLOCK_CARD_EN;
450 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
454 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
458 if (power != (unsigned short)-1) {
459 switch (1 << power) {
460 case MMC_VDD_165_195:
461 pwr = SDHCI_POWER_180;
465 pwr = SDHCI_POWER_300;
469 pwr = SDHCI_POWER_330;
475 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
479 pwr |= SDHCI_POWER_ON;
481 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
484 void sdhci_set_uhs_timing(struct sdhci_host *host)
486 struct mmc *mmc = host->mmc;
489 reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
490 reg &= ~SDHCI_CTRL_UHS_MASK;
492 switch (mmc->selected_mode) {
495 reg |= SDHCI_CTRL_UHS_SDR50;
499 reg |= SDHCI_CTRL_UHS_DDR50;
503 reg |= SDHCI_CTRL_UHS_SDR104;
506 reg |= SDHCI_CTRL_UHS_SDR12;
509 sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
513 static int sdhci_set_ios(struct udevice *dev)
515 struct mmc *mmc = mmc_get_mmc_dev(dev);
517 static int sdhci_set_ios(struct mmc *mmc)
521 struct sdhci_host *host = mmc->priv;
522 bool no_hispd_bit = false;
524 if (host->ops && host->ops->set_control_reg)
525 host->ops->set_control_reg(host);
527 if (mmc->clock != host->clock)
528 sdhci_set_clock(mmc, mmc->clock);
530 if (mmc->clk_disable)
531 sdhci_set_clock(mmc, 0);
534 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
535 if (mmc->bus_width == 8) {
536 ctrl &= ~SDHCI_CTRL_4BITBUS;
537 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
538 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
539 ctrl |= SDHCI_CTRL_8BITBUS;
541 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
542 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
543 ctrl &= ~SDHCI_CTRL_8BITBUS;
544 if (mmc->bus_width == 4)
545 ctrl |= SDHCI_CTRL_4BITBUS;
547 ctrl &= ~SDHCI_CTRL_4BITBUS;
550 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
551 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) {
552 ctrl &= ~SDHCI_CTRL_HISPD;
557 if (mmc->selected_mode == MMC_HS ||
558 mmc->selected_mode == SD_HS ||
559 mmc->selected_mode == MMC_DDR_52 ||
560 mmc->selected_mode == MMC_HS_200 ||
561 mmc->selected_mode == MMC_HS_400 ||
562 mmc->selected_mode == UHS_SDR25 ||
563 mmc->selected_mode == UHS_SDR50 ||
564 mmc->selected_mode == UHS_SDR104 ||
565 mmc->selected_mode == UHS_DDR50)
566 ctrl |= SDHCI_CTRL_HISPD;
568 ctrl &= ~SDHCI_CTRL_HISPD;
571 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
573 /* If available, call the driver specific "post" set_ios() function */
574 if (host->ops && host->ops->set_ios_post)
575 return host->ops->set_ios_post(host);
580 static int sdhci_init(struct mmc *mmc)
582 struct sdhci_host *host = mmc->priv;
583 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
584 struct udevice *dev = mmc->dev;
586 gpio_request_by_name(dev, "cd-gpios", 0,
587 &host->cd_gpio, GPIOD_IS_IN);
590 sdhci_reset(host, SDHCI_RESET_ALL);
592 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
593 host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
595 * Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
598 host->force_align_buffer = true;
600 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
601 host->align_buffer = memalign(8, 512 * 1024);
602 if (!host->align_buffer) {
603 printf("%s: Aligned buffer alloc failed!!!\n",
610 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
612 if (host->ops && host->ops->get_cd)
613 host->ops->get_cd(host);
615 /* Enable only interrupts served by the SD controller */
616 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
618 /* Mask all sdhci interrupt sources */
619 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
625 int sdhci_probe(struct udevice *dev)
627 struct mmc *mmc = mmc_get_mmc_dev(dev);
629 return sdhci_init(mmc);
632 static int sdhci_deferred_probe(struct udevice *dev)
635 struct mmc *mmc = mmc_get_mmc_dev(dev);
636 struct sdhci_host *host = mmc->priv;
638 if (host->ops && host->ops->deferred_probe) {
639 err = host->ops->deferred_probe(host);
646 static int sdhci_get_cd(struct udevice *dev)
648 struct mmc *mmc = mmc_get_mmc_dev(dev);
649 struct sdhci_host *host = mmc->priv;
652 /* If nonremovable, assume that the card is always present. */
653 if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
655 /* If polling, assume that the card is always present. */
656 if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
659 #if CONFIG_IS_ENABLED(DM_GPIO)
660 value = dm_gpio_get_value(&host->cd_gpio);
662 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
668 value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
670 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
676 const struct dm_mmc_ops sdhci_ops = {
677 .send_cmd = sdhci_send_command,
678 .set_ios = sdhci_set_ios,
679 .get_cd = sdhci_get_cd,
680 .deferred_probe = sdhci_deferred_probe,
681 #ifdef MMC_SUPPORTS_TUNING
682 .execute_tuning = sdhci_execute_tuning,
686 static const struct mmc_ops sdhci_ops = {
687 .send_cmd = sdhci_send_command,
688 .set_ios = sdhci_set_ios,
693 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
694 u32 f_max, u32 f_min)
696 u32 caps, caps_1 = 0;
697 #if CONFIG_IS_ENABLED(DM_MMC)
698 u64 dt_caps, dt_caps_mask;
700 dt_caps_mask = dev_read_u64_default(host->mmc->dev,
701 "sdhci-caps-mask", 0);
702 dt_caps = dev_read_u64_default(host->mmc->dev,
704 caps = ~lower_32_bits(dt_caps_mask) &
705 sdhci_readl(host, SDHCI_CAPABILITIES);
706 caps |= lower_32_bits(dt_caps);
708 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
710 debug("%s, caps: 0x%x\n", __func__, caps);
712 #ifdef CONFIG_MMC_SDHCI_SDMA
713 if ((caps & SDHCI_CAN_DO_SDMA)) {
714 host->flags |= USE_SDMA;
716 debug("%s: Your controller doesn't support SDMA!!\n",
720 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
721 if (!(caps & SDHCI_CAN_DO_ADMA2)) {
722 printf("%s: Your controller doesn't support SDMA!!\n",
726 host->adma_desc_table = sdhci_adma_init();
727 host->adma_addr = (dma_addr_t)host->adma_desc_table;
729 #ifdef CONFIG_DMA_ADDR_T_64BIT
730 host->flags |= USE_ADMA64;
732 host->flags |= USE_ADMA;
735 if (host->quirks & SDHCI_QUIRK_REG32_RW)
737 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
739 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
741 cfg->name = host->name;
742 #ifndef CONFIG_DM_MMC
743 cfg->ops = &sdhci_ops;
746 /* Check whether the clock multiplier is supported or not */
747 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
748 #if CONFIG_IS_ENABLED(DM_MMC)
749 caps_1 = ~upper_32_bits(dt_caps_mask) &
750 sdhci_readl(host, SDHCI_CAPABILITIES_1);
751 caps_1 |= upper_32_bits(dt_caps);
753 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
755 debug("%s, caps_1: 0x%x\n", __func__, caps_1);
756 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
757 SDHCI_CLOCK_MUL_SHIFT;
760 if (host->max_clk == 0) {
761 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
762 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
763 SDHCI_CLOCK_BASE_SHIFT;
765 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
766 SDHCI_CLOCK_BASE_SHIFT;
767 host->max_clk *= 1000000;
769 host->max_clk *= host->clk_mul;
771 if (host->max_clk == 0) {
772 printf("%s: Hardware doesn't specify base clock frequency\n",
776 if (f_max && (f_max < host->max_clk))
779 cfg->f_max = host->max_clk;
783 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
784 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
786 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
789 if (caps & SDHCI_CAN_VDD_330)
790 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
791 if (caps & SDHCI_CAN_VDD_300)
792 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
793 if (caps & SDHCI_CAN_VDD_180)
794 cfg->voltages |= MMC_VDD_165_195;
796 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
797 cfg->voltages |= host->voltages;
799 if (caps & SDHCI_CAN_DO_HISPD)
800 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
802 cfg->host_caps |= MMC_MODE_4BIT;
804 /* Since Host Controller Version3.0 */
805 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
806 if (!(caps & SDHCI_CAN_DO_8BIT))
807 cfg->host_caps &= ~MMC_MODE_8BIT;
810 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
811 cfg->host_caps &= ~MMC_MODE_HS;
812 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
815 if (!(cfg->voltages & MMC_VDD_165_195) ||
816 (host->quirks & SDHCI_QUIRK_NO_1_8_V))
817 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
818 SDHCI_SUPPORT_DDR50);
820 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
821 SDHCI_SUPPORT_DDR50))
822 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
824 if (caps_1 & SDHCI_SUPPORT_SDR104) {
825 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
827 * SD3.0: SDR104 is supported so (for eMMC) the caps2
828 * field can be promoted to support HS200.
830 cfg->host_caps |= MMC_CAP(MMC_HS_200);
831 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
832 cfg->host_caps |= MMC_CAP(UHS_SDR50);
835 if (caps_1 & SDHCI_SUPPORT_DDR50)
836 cfg->host_caps |= MMC_CAP(UHS_DDR50);
839 cfg->host_caps |= host->host_caps;
841 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
847 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
849 return mmc_bind(dev, mmc, cfg);
852 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
856 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
860 host->mmc = mmc_create(&host->cfg, host);
861 if (host->mmc == NULL) {
862 printf("%s: mmc create fail!\n", __func__);