1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
19 static void sdhci_reset(struct sdhci_host *host, u8 mask)
21 unsigned long timeout;
25 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
26 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
28 printf("%s: Reset 0x%x never completed.\n",
37 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
40 if (cmd->resp_type & MMC_RSP_136) {
41 /* CRC is stripped so we need to do some shifting. */
42 for (i = 0; i < 4; i++) {
43 cmd->response[i] = sdhci_readl(host,
44 SDHCI_RESPONSE + (3-i)*4) << 8;
46 cmd->response[i] |= sdhci_readb(host,
47 SDHCI_RESPONSE + (3-i)*4-1);
50 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
54 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
58 for (i = 0; i < data->blocksize; i += 4) {
59 offs = data->dest + i;
60 if (data->flags == MMC_DATA_READ)
61 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
63 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
67 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
68 static void sdhci_adma_desc(struct sdhci_host *host, char *buf, u16 len,
71 struct sdhci_adma_desc *desc;
74 desc = &host->adma_desc_table[host->desc_slot];
76 attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
80 attr |= ADMA_DESC_ATTR_END;
85 desc->addr_lo = (dma_addr_t)buf;
86 #ifdef CONFIG_DMA_ADDR_T_64BIT
87 desc->addr_hi = (u64)buf >> 32;
91 static void sdhci_prepare_adma_table(struct sdhci_host *host,
92 struct mmc_data *data)
94 uint trans_bytes = data->blocksize * data->blocks;
95 uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN);
101 if (data->flags & MMC_DATA_READ)
104 buf = (char *)data->src;
107 sdhci_adma_desc(host, buf, ADMA_MAX_LEN, false);
109 trans_bytes -= ADMA_MAX_LEN;
112 sdhci_adma_desc(host, buf, trans_bytes, true);
114 flush_cache((dma_addr_t)host->adma_desc_table,
115 ROUND(desc_count * sizeof(struct sdhci_adma_desc),
118 #elif defined(CONFIG_MMC_SDHCI_SDMA)
119 static void sdhci_prepare_adma_table(struct sdhci_host *host,
120 struct mmc_data *data)
123 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
124 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
125 int *is_aligned, int trans_bytes)
129 if (data->flags == MMC_DATA_READ)
130 host->start_addr = (dma_addr_t)data->dest;
132 host->start_addr = (dma_addr_t)data->src;
134 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
135 ctrl &= ~SDHCI_CTRL_DMA_MASK;
136 if (host->flags & USE_ADMA64)
137 ctrl |= SDHCI_CTRL_ADMA64;
138 else if (host->flags & USE_ADMA)
139 ctrl |= SDHCI_CTRL_ADMA32;
140 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
142 if (host->flags & USE_SDMA) {
143 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
144 (host->start_addr & 0x7) != 0x0) {
146 host->start_addr = (unsigned long)host->align_buffer;
147 if (data->flags != MMC_DATA_READ)
148 memcpy(host->align_buffer, data->src,
152 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
154 * Always use this bounce-buffer when
155 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined
158 host->start_addr = (unsigned long)host->align_buffer;
159 if (data->flags != MMC_DATA_READ)
160 memcpy(host->align_buffer, data->src, trans_bytes);
162 sdhci_writel(host, host->start_addr, SDHCI_DMA_ADDRESS);
164 } else if (host->flags & (USE_ADMA | USE_ADMA64)) {
165 sdhci_prepare_adma_table(host, data);
167 sdhci_writel(host, (u32)host->adma_addr, SDHCI_ADMA_ADDRESS);
168 if (host->flags & USE_ADMA64)
169 sdhci_writel(host, (u64)host->adma_addr >> 32,
170 SDHCI_ADMA_ADDRESS_HI);
173 flush_cache(host->start_addr, ROUND(trans_bytes, ARCH_DMA_MINALIGN));
176 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
177 int *is_aligned, int trans_bytes)
180 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
182 dma_addr_t start_addr = host->start_addr;
183 unsigned int stat, rdy, mask, timeout, block = 0;
184 bool transfer_done = false;
187 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
188 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
190 stat = sdhci_readl(host, SDHCI_INT_STATUS);
191 if (stat & SDHCI_INT_ERROR) {
192 pr_debug("%s: Error detected in status(0x%X)!\n",
196 if (!transfer_done && (stat & rdy)) {
197 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
199 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
200 sdhci_transfer_pio(host, data);
201 data->dest += data->blocksize;
202 if (++block >= data->blocks) {
203 /* Keep looping until the SDHCI_INT_DATA_END is
204 * cleared, even if we finished sending all the
207 transfer_done = true;
211 if ((host->flags & USE_DMA) && !transfer_done &&
212 (stat & SDHCI_INT_DMA_END)) {
213 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
214 if (host->flags & USE_SDMA) {
216 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
217 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
218 sdhci_writel(host, start_addr,
225 printf("%s: Transfer data timeout\n", __func__);
228 } while (!(stat & SDHCI_INT_DATA_END));
233 * No command will be sent by driver if card is busy, so driver must wait
234 * for card ready state.
235 * Every time when card is busy after timeout then (last) timeout value will be
236 * increased twice but only if it doesn't exceed global defined maximum.
237 * Each function call will use last timeout value.
239 #define SDHCI_CMD_MAX_TIMEOUT 3200
240 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
241 #define SDHCI_READ_STATUS_TIMEOUT 1000
244 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
245 struct mmc_data *data)
247 struct mmc *mmc = mmc_get_mmc_dev(dev);
250 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
251 struct mmc_data *data)
254 struct sdhci_host *host = mmc->priv;
255 unsigned int stat = 0;
257 int trans_bytes = 0, is_aligned = 1;
258 u32 mask, flags, mode;
259 unsigned int time = 0;
260 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
261 ulong start = get_timer(0);
263 host->start_addr = 0;
264 /* Timeout unit - ms */
265 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
267 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
269 /* We shouldn't wait for data inihibit for stop commands, even
270 though they might use busy signaling */
271 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
272 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
273 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
274 mask &= ~SDHCI_DATA_INHIBIT;
276 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
277 if (time >= cmd_timeout) {
278 printf("%s: MMC: %d busy ", __func__, mmc_dev);
279 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
280 cmd_timeout += cmd_timeout;
281 printf("timeout increasing to: %u ms.\n",
292 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
294 mask = SDHCI_INT_RESPONSE;
295 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
296 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
297 mask = SDHCI_INT_DATA_AVAIL;
299 if (!(cmd->resp_type & MMC_RSP_PRESENT))
300 flags = SDHCI_CMD_RESP_NONE;
301 else if (cmd->resp_type & MMC_RSP_136)
302 flags = SDHCI_CMD_RESP_LONG;
303 else if (cmd->resp_type & MMC_RSP_BUSY) {
304 flags = SDHCI_CMD_RESP_SHORT_BUSY;
306 mask |= SDHCI_INT_DATA_END;
308 flags = SDHCI_CMD_RESP_SHORT;
310 if (cmd->resp_type & MMC_RSP_CRC)
311 flags |= SDHCI_CMD_CRC;
312 if (cmd->resp_type & MMC_RSP_OPCODE)
313 flags |= SDHCI_CMD_INDEX;
314 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
315 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
316 flags |= SDHCI_CMD_DATA;
318 /* Set Transfer mode regarding to data flag */
320 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
321 mode = SDHCI_TRNS_BLK_CNT_EN;
322 trans_bytes = data->blocks * data->blocksize;
323 if (data->blocks > 1)
324 mode |= SDHCI_TRNS_MULTI;
326 if (data->flags == MMC_DATA_READ)
327 mode |= SDHCI_TRNS_READ;
329 if (host->flags & USE_DMA) {
330 mode |= SDHCI_TRNS_DMA;
331 sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
334 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
337 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
338 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
339 } else if (cmd->resp_type & MMC_RSP_BUSY) {
340 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
343 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
344 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
345 start = get_timer(0);
347 stat = sdhci_readl(host, SDHCI_INT_STATUS);
348 if (stat & SDHCI_INT_ERROR)
351 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
352 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
355 printf("%s: Timeout for status update!\n",
360 } while ((stat & mask) != mask);
362 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
363 sdhci_cmd_done(host, cmd);
364 sdhci_writel(host, mask, SDHCI_INT_STATUS);
369 ret = sdhci_transfer_data(host, data);
371 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
374 stat = sdhci_readl(host, SDHCI_INT_STATUS);
375 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
377 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
378 !is_aligned && (data->flags == MMC_DATA_READ))
379 memcpy(data->dest, host->align_buffer, trans_bytes);
383 sdhci_reset(host, SDHCI_RESET_CMD);
384 sdhci_reset(host, SDHCI_RESET_DATA);
385 if (stat & SDHCI_INT_TIMEOUT)
391 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
392 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
395 struct mmc *mmc = mmc_get_mmc_dev(dev);
396 struct sdhci_host *host = mmc->priv;
398 debug("%s\n", __func__);
400 if (host->ops && host->ops->platform_execute_tuning) {
401 err = host->ops->platform_execute_tuning(mmc, opcode);
409 int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
411 struct sdhci_host *host = mmc->priv;
412 unsigned int div, clk = 0, timeout;
416 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
417 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
419 printf("%s: Timeout to wait cmd & data inhibit\n",
428 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
433 if (host->ops && host->ops->set_delay)
434 host->ops->set_delay(host);
436 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
438 * Check if the Host Controller supports Programmable Clock
442 for (div = 1; div <= 1024; div++) {
443 if ((host->max_clk / div) <= clock)
448 * Set Programmable Clock Mode in the Clock
451 clk = SDHCI_PROG_CLOCK_MODE;
454 /* Version 3.00 divisors must be a multiple of 2. */
455 if (host->max_clk <= clock) {
459 div < SDHCI_MAX_DIV_SPEC_300;
461 if ((host->max_clk / div) <= clock)
468 /* Version 2.00 divisors must be a power of 2. */
469 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
470 if ((host->max_clk / div) <= clock)
476 if (host->ops && host->ops->set_clock)
477 host->ops->set_clock(host, div);
479 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
480 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
481 << SDHCI_DIVIDER_HI_SHIFT;
482 clk |= SDHCI_CLOCK_INT_EN;
483 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
487 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
488 & SDHCI_CLOCK_INT_STABLE)) {
490 printf("%s: Internal clock never stabilised.\n",
498 clk |= SDHCI_CLOCK_CARD_EN;
499 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
503 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
507 if (power != (unsigned short)-1) {
508 switch (1 << power) {
509 case MMC_VDD_165_195:
510 pwr = SDHCI_POWER_180;
514 pwr = SDHCI_POWER_300;
518 pwr = SDHCI_POWER_330;
524 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
528 pwr |= SDHCI_POWER_ON;
530 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
533 void sdhci_set_uhs_timing(struct sdhci_host *host)
535 struct mmc *mmc = (struct mmc *)host->mmc;
538 reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
539 reg &= ~SDHCI_CTRL_UHS_MASK;
541 switch (mmc->selected_mode) {
544 reg |= SDHCI_CTRL_UHS_SDR50;
548 reg |= SDHCI_CTRL_UHS_DDR50;
552 reg |= SDHCI_CTRL_UHS_SDR104;
555 reg |= SDHCI_CTRL_UHS_SDR12;
558 sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
562 static int sdhci_set_ios(struct udevice *dev)
564 struct mmc *mmc = mmc_get_mmc_dev(dev);
566 static int sdhci_set_ios(struct mmc *mmc)
570 struct sdhci_host *host = mmc->priv;
572 if (host->ops && host->ops->set_control_reg)
573 host->ops->set_control_reg(host);
575 if (mmc->clock != host->clock)
576 sdhci_set_clock(mmc, mmc->clock);
578 if (mmc->clk_disable)
579 sdhci_set_clock(mmc, 0);
582 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
583 if (mmc->bus_width == 8) {
584 ctrl &= ~SDHCI_CTRL_4BITBUS;
585 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
586 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
587 ctrl |= SDHCI_CTRL_8BITBUS;
589 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
590 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
591 ctrl &= ~SDHCI_CTRL_8BITBUS;
592 if (mmc->bus_width == 4)
593 ctrl |= SDHCI_CTRL_4BITBUS;
595 ctrl &= ~SDHCI_CTRL_4BITBUS;
598 if (mmc->clock > 26000000)
599 ctrl |= SDHCI_CTRL_HISPD;
601 ctrl &= ~SDHCI_CTRL_HISPD;
603 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
604 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE))
605 ctrl &= ~SDHCI_CTRL_HISPD;
607 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
609 /* If available, call the driver specific "post" set_ios() function */
610 if (host->ops && host->ops->set_ios_post)
611 return host->ops->set_ios_post(host);
616 static int sdhci_init(struct mmc *mmc)
618 struct sdhci_host *host = mmc->priv;
619 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
620 struct udevice *dev = mmc->dev;
622 gpio_request_by_name(dev, "cd-gpios", 0,
623 &host->cd_gpio, GPIOD_IS_IN);
626 sdhci_reset(host, SDHCI_RESET_ALL);
628 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
629 host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
631 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
632 host->align_buffer = memalign(8, 512 * 1024);
633 if (!host->align_buffer) {
634 printf("%s: Aligned buffer alloc failed!!!\n",
641 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
643 if (host->ops && host->ops->get_cd)
644 host->ops->get_cd(host);
646 /* Enable only interrupts served by the SD controller */
647 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
649 /* Mask all sdhci interrupt sources */
650 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
656 int sdhci_probe(struct udevice *dev)
658 struct mmc *mmc = mmc_get_mmc_dev(dev);
660 return sdhci_init(mmc);
663 static int sdhci_get_cd(struct udevice *dev)
665 struct mmc *mmc = mmc_get_mmc_dev(dev);
666 struct sdhci_host *host = mmc->priv;
669 /* If nonremovable, assume that the card is always present. */
670 if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
672 /* If polling, assume that the card is always present. */
673 if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
676 #if CONFIG_IS_ENABLED(DM_GPIO)
677 value = dm_gpio_get_value(&host->cd_gpio);
679 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
685 value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
687 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
693 const struct dm_mmc_ops sdhci_ops = {
694 .send_cmd = sdhci_send_command,
695 .set_ios = sdhci_set_ios,
696 .get_cd = sdhci_get_cd,
697 #ifdef MMC_SUPPORTS_TUNING
698 .execute_tuning = sdhci_execute_tuning,
702 static const struct mmc_ops sdhci_ops = {
703 .send_cmd = sdhci_send_command,
704 .set_ios = sdhci_set_ios,
709 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
710 u32 f_max, u32 f_min)
712 u32 caps, caps_1 = 0;
713 #if CONFIG_IS_ENABLED(DM_MMC)
714 u64 dt_caps, dt_caps_mask;
716 dt_caps_mask = dev_read_u64_default(host->mmc->dev,
717 "sdhci-caps-mask", 0);
718 dt_caps = dev_read_u64_default(host->mmc->dev,
720 caps = ~(u32)dt_caps_mask &
721 sdhci_readl(host, SDHCI_CAPABILITIES);
722 caps |= (u32)dt_caps;
724 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
726 debug("%s, caps: 0x%x\n", __func__, caps);
728 #ifdef CONFIG_MMC_SDHCI_SDMA
729 if (!(caps & SDHCI_CAN_DO_SDMA)) {
730 printf("%s: Your controller doesn't support SDMA!!\n",
735 host->flags |= USE_SDMA;
737 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
738 if (!(caps & SDHCI_CAN_DO_ADMA2)) {
739 printf("%s: Your controller doesn't support SDMA!!\n",
743 host->adma_desc_table = (struct sdhci_adma_desc *)
744 memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
746 host->adma_addr = (dma_addr_t)host->adma_desc_table;
747 #ifdef CONFIG_DMA_ADDR_T_64BIT
748 host->flags |= USE_ADMA64;
750 host->flags |= USE_ADMA;
753 if (host->quirks & SDHCI_QUIRK_REG32_RW)
755 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
757 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
759 cfg->name = host->name;
760 #ifndef CONFIG_DM_MMC
761 cfg->ops = &sdhci_ops;
764 /* Check whether the clock multiplier is supported or not */
765 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
766 #if CONFIG_IS_ENABLED(DM_MMC)
767 caps_1 = ~(u32)(dt_caps_mask >> 32) &
768 sdhci_readl(host, SDHCI_CAPABILITIES_1);
769 caps_1 |= (u32)(dt_caps >> 32);
771 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
773 debug("%s, caps_1: 0x%x\n", __func__, caps_1);
774 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
775 SDHCI_CLOCK_MUL_SHIFT;
778 if (host->max_clk == 0) {
779 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
780 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
781 SDHCI_CLOCK_BASE_SHIFT;
783 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
784 SDHCI_CLOCK_BASE_SHIFT;
785 host->max_clk *= 1000000;
787 host->max_clk *= host->clk_mul;
789 if (host->max_clk == 0) {
790 printf("%s: Hardware doesn't specify base clock frequency\n",
794 if (f_max && (f_max < host->max_clk))
797 cfg->f_max = host->max_clk;
801 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
802 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
804 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
807 if (caps & SDHCI_CAN_VDD_330)
808 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
809 if (caps & SDHCI_CAN_VDD_300)
810 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
811 if (caps & SDHCI_CAN_VDD_180)
812 cfg->voltages |= MMC_VDD_165_195;
814 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
815 cfg->voltages |= host->voltages;
817 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;
819 /* Since Host Controller Version3.0 */
820 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
821 if (!(caps & SDHCI_CAN_DO_8BIT))
822 cfg->host_caps &= ~MMC_MODE_8BIT;
825 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
826 cfg->host_caps &= ~MMC_MODE_HS;
827 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
830 if (!(cfg->voltages & MMC_VDD_165_195) ||
831 (host->quirks & SDHCI_QUIRK_NO_1_8_V))
832 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
833 SDHCI_SUPPORT_DDR50);
835 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
836 SDHCI_SUPPORT_DDR50))
837 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
839 if (caps_1 & SDHCI_SUPPORT_SDR104) {
840 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
842 * SD3.0: SDR104 is supported so (for eMMC) the caps2
843 * field can be promoted to support HS200.
845 cfg->host_caps |= MMC_CAP(MMC_HS_200);
846 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
847 cfg->host_caps |= MMC_CAP(UHS_SDR50);
850 if (caps_1 & SDHCI_SUPPORT_DDR50)
851 cfg->host_caps |= MMC_CAP(UHS_DDR50);
854 cfg->host_caps |= host->host_caps;
856 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
862 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
864 return mmc_bind(dev, mmc, cfg);
867 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
871 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
875 host->mmc = mmc_create(&host->cfg, host);
876 if (host->mmc == NULL) {
877 printf("%s: mmc create fail!\n", __func__);