1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
16 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
17 void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
22 static void sdhci_reset(struct sdhci_host *host, u8 mask)
24 unsigned long timeout;
28 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
29 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
31 printf("%s: Reset 0x%x never completed.\n",
40 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
43 if (cmd->resp_type & MMC_RSP_136) {
44 /* CRC is stripped so we need to do some shifting. */
45 for (i = 0; i < 4; i++) {
46 cmd->response[i] = sdhci_readl(host,
47 SDHCI_RESPONSE + (3-i)*4) << 8;
49 cmd->response[i] |= sdhci_readb(host,
50 SDHCI_RESPONSE + (3-i)*4-1);
53 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
57 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
61 for (i = 0; i < data->blocksize; i += 4) {
62 offs = data->dest + i;
63 if (data->flags == MMC_DATA_READ)
64 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
66 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
70 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data,
71 unsigned int start_addr)
73 unsigned int stat, rdy, mask, timeout, block = 0;
74 bool transfer_done = false;
75 #ifdef CONFIG_MMC_SDHCI_SDMA
77 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
78 ctrl &= ~SDHCI_CTRL_DMA_MASK;
79 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
83 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
84 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
86 stat = sdhci_readl(host, SDHCI_INT_STATUS);
87 if (stat & SDHCI_INT_ERROR) {
88 pr_debug("%s: Error detected in status(0x%X)!\n",
92 if (!transfer_done && (stat & rdy)) {
93 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
95 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
96 sdhci_transfer_pio(host, data);
97 data->dest += data->blocksize;
98 if (++block >= data->blocks) {
99 /* Keep looping until the SDHCI_INT_DATA_END is
100 * cleared, even if we finished sending all the
103 transfer_done = true;
107 #ifdef CONFIG_MMC_SDHCI_SDMA
108 if (!transfer_done && (stat & SDHCI_INT_DMA_END)) {
109 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
110 start_addr &= ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
111 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
112 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS);
118 printf("%s: Transfer data timeout\n", __func__);
121 } while (!(stat & SDHCI_INT_DATA_END));
126 * No command will be sent by driver if card is busy, so driver must wait
127 * for card ready state.
128 * Every time when card is busy after timeout then (last) timeout value will be
129 * increased twice but only if it doesn't exceed global defined maximum.
130 * Each function call will use last timeout value.
132 #define SDHCI_CMD_MAX_TIMEOUT 3200
133 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
134 #define SDHCI_READ_STATUS_TIMEOUT 1000
137 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
138 struct mmc_data *data)
140 struct mmc *mmc = mmc_get_mmc_dev(dev);
143 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
144 struct mmc_data *data)
147 struct sdhci_host *host = mmc->priv;
148 unsigned int stat = 0;
150 int trans_bytes = 0, is_aligned = 1;
151 u32 mask, flags, mode;
152 unsigned int time = 0, start_addr = 0;
153 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
154 ulong start = get_timer(0);
156 /* Timeout unit - ms */
157 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
159 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
161 /* We shouldn't wait for data inihibit for stop commands, even
162 though they might use busy signaling */
163 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
164 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
165 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
166 mask &= ~SDHCI_DATA_INHIBIT;
168 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
169 if (time >= cmd_timeout) {
170 printf("%s: MMC: %d busy ", __func__, mmc_dev);
171 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
172 cmd_timeout += cmd_timeout;
173 printf("timeout increasing to: %u ms.\n",
184 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
186 mask = SDHCI_INT_RESPONSE;
187 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
188 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
189 mask = SDHCI_INT_DATA_AVAIL;
191 if (!(cmd->resp_type & MMC_RSP_PRESENT))
192 flags = SDHCI_CMD_RESP_NONE;
193 else if (cmd->resp_type & MMC_RSP_136)
194 flags = SDHCI_CMD_RESP_LONG;
195 else if (cmd->resp_type & MMC_RSP_BUSY) {
196 flags = SDHCI_CMD_RESP_SHORT_BUSY;
198 mask |= SDHCI_INT_DATA_END;
200 flags = SDHCI_CMD_RESP_SHORT;
202 if (cmd->resp_type & MMC_RSP_CRC)
203 flags |= SDHCI_CMD_CRC;
204 if (cmd->resp_type & MMC_RSP_OPCODE)
205 flags |= SDHCI_CMD_INDEX;
206 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
207 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
208 flags |= SDHCI_CMD_DATA;
210 /* Set Transfer mode regarding to data flag */
212 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
213 mode = SDHCI_TRNS_BLK_CNT_EN;
214 trans_bytes = data->blocks * data->blocksize;
215 if (data->blocks > 1)
216 mode |= SDHCI_TRNS_MULTI;
218 if (data->flags == MMC_DATA_READ)
219 mode |= SDHCI_TRNS_READ;
221 #ifdef CONFIG_MMC_SDHCI_SDMA
222 if (data->flags == MMC_DATA_READ)
223 start_addr = (unsigned long)data->dest;
225 start_addr = (unsigned long)data->src;
226 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
227 (start_addr & 0x7) != 0x0) {
229 start_addr = (unsigned long)aligned_buffer;
230 if (data->flags != MMC_DATA_READ)
231 memcpy(aligned_buffer, data->src, trans_bytes);
234 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
236 * Always use this bounce-buffer when
237 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined
240 start_addr = (unsigned long)aligned_buffer;
241 if (data->flags != MMC_DATA_READ)
242 memcpy(aligned_buffer, data->src, trans_bytes);
245 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS);
246 mode |= SDHCI_TRNS_DMA;
248 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
251 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
252 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
253 } else if (cmd->resp_type & MMC_RSP_BUSY) {
254 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
257 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
258 #ifdef CONFIG_MMC_SDHCI_SDMA
260 trans_bytes = ALIGN(trans_bytes, CONFIG_SYS_CACHELINE_SIZE);
261 flush_cache(start_addr, trans_bytes);
264 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
265 start = get_timer(0);
267 stat = sdhci_readl(host, SDHCI_INT_STATUS);
268 if (stat & SDHCI_INT_ERROR)
271 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
272 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
275 printf("%s: Timeout for status update!\n",
280 } while ((stat & mask) != mask);
282 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
283 sdhci_cmd_done(host, cmd);
284 sdhci_writel(host, mask, SDHCI_INT_STATUS);
289 ret = sdhci_transfer_data(host, data, start_addr);
291 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
294 stat = sdhci_readl(host, SDHCI_INT_STATUS);
295 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
297 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
298 !is_aligned && (data->flags == MMC_DATA_READ))
299 memcpy(data->dest, aligned_buffer, trans_bytes);
303 sdhci_reset(host, SDHCI_RESET_CMD);
304 sdhci_reset(host, SDHCI_RESET_DATA);
305 if (stat & SDHCI_INT_TIMEOUT)
311 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
312 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
315 struct mmc *mmc = mmc_get_mmc_dev(dev);
316 struct sdhci_host *host = mmc->priv;
318 debug("%s\n", __func__);
320 if (host->ops && host->ops->platform_execute_tuning) {
321 err = host->ops->platform_execute_tuning(mmc, opcode);
329 static int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
331 struct sdhci_host *host = mmc->priv;
332 unsigned int div, clk = 0, timeout;
336 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
337 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
339 printf("%s: Timeout to wait cmd & data inhibit\n",
348 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
353 if (host->ops && host->ops->set_delay)
354 host->ops->set_delay(host);
356 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
358 * Check if the Host Controller supports Programmable Clock
362 for (div = 1; div <= 1024; div++) {
363 if ((host->max_clk / div) <= clock)
368 * Set Programmable Clock Mode in the Clock
371 clk = SDHCI_PROG_CLOCK_MODE;
374 /* Version 3.00 divisors must be a multiple of 2. */
375 if (host->max_clk <= clock) {
379 div < SDHCI_MAX_DIV_SPEC_300;
381 if ((host->max_clk / div) <= clock)
388 /* Version 2.00 divisors must be a power of 2. */
389 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
390 if ((host->max_clk / div) <= clock)
396 if (host->ops && host->ops->set_clock)
397 host->ops->set_clock(host, div);
399 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
400 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
401 << SDHCI_DIVIDER_HI_SHIFT;
402 clk |= SDHCI_CLOCK_INT_EN;
403 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
407 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
408 & SDHCI_CLOCK_INT_STABLE)) {
410 printf("%s: Internal clock never stabilised.\n",
418 clk |= SDHCI_CLOCK_CARD_EN;
419 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
423 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
427 if (power != (unsigned short)-1) {
428 switch (1 << power) {
429 case MMC_VDD_165_195:
430 pwr = SDHCI_POWER_180;
434 pwr = SDHCI_POWER_300;
438 pwr = SDHCI_POWER_330;
444 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
448 pwr |= SDHCI_POWER_ON;
450 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
454 static int sdhci_set_ios(struct udevice *dev)
456 struct mmc *mmc = mmc_get_mmc_dev(dev);
458 static int sdhci_set_ios(struct mmc *mmc)
462 struct sdhci_host *host = mmc->priv;
464 if (host->ops && host->ops->set_control_reg)
465 host->ops->set_control_reg(host);
467 if (mmc->clock != host->clock)
468 sdhci_set_clock(mmc, mmc->clock);
470 if (mmc->clk_disable)
471 sdhci_set_clock(mmc, 0);
474 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
475 if (mmc->bus_width == 8) {
476 ctrl &= ~SDHCI_CTRL_4BITBUS;
477 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
478 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
479 ctrl |= SDHCI_CTRL_8BITBUS;
481 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
482 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
483 ctrl &= ~SDHCI_CTRL_8BITBUS;
484 if (mmc->bus_width == 4)
485 ctrl |= SDHCI_CTRL_4BITBUS;
487 ctrl &= ~SDHCI_CTRL_4BITBUS;
490 if (mmc->clock > 26000000)
491 ctrl |= SDHCI_CTRL_HISPD;
493 ctrl &= ~SDHCI_CTRL_HISPD;
495 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
496 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE))
497 ctrl &= ~SDHCI_CTRL_HISPD;
499 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
501 /* If available, call the driver specific "post" set_ios() function */
502 if (host->ops && host->ops->set_ios_post)
503 host->ops->set_ios_post(host);
508 static int sdhci_init(struct mmc *mmc)
510 struct sdhci_host *host = mmc->priv;
512 sdhci_reset(host, SDHCI_RESET_ALL);
514 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) {
515 aligned_buffer = memalign(8, 512*1024);
516 if (!aligned_buffer) {
517 printf("%s: Aligned buffer alloc failed!!!\n",
523 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
525 if (host->ops && host->ops->get_cd)
526 host->ops->get_cd(host);
528 /* Enable only interrupts served by the SD controller */
529 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
531 /* Mask all sdhci interrupt sources */
532 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
538 int sdhci_probe(struct udevice *dev)
540 struct mmc *mmc = mmc_get_mmc_dev(dev);
542 return sdhci_init(mmc);
545 const struct dm_mmc_ops sdhci_ops = {
546 .send_cmd = sdhci_send_command,
547 .set_ios = sdhci_set_ios,
548 #ifdef MMC_SUPPORTS_TUNING
549 .execute_tuning = sdhci_execute_tuning,
553 static const struct mmc_ops sdhci_ops = {
554 .send_cmd = sdhci_send_command,
555 .set_ios = sdhci_set_ios,
560 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
561 u32 f_max, u32 f_min)
563 u32 caps, caps_1 = 0;
565 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
567 #ifdef CONFIG_MMC_SDHCI_SDMA
568 if (!(caps & SDHCI_CAN_DO_SDMA)) {
569 printf("%s: Your controller doesn't support SDMA!!\n",
574 if (host->quirks & SDHCI_QUIRK_REG32_RW)
576 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
578 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
580 cfg->name = host->name;
581 #ifndef CONFIG_DM_MMC
582 cfg->ops = &sdhci_ops;
585 /* Check whether the clock multiplier is supported or not */
586 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
587 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
588 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
589 SDHCI_CLOCK_MUL_SHIFT;
592 if (host->max_clk == 0) {
593 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
594 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
595 SDHCI_CLOCK_BASE_SHIFT;
597 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
598 SDHCI_CLOCK_BASE_SHIFT;
599 host->max_clk *= 1000000;
601 host->max_clk *= host->clk_mul;
603 if (host->max_clk == 0) {
604 printf("%s: Hardware doesn't specify base clock frequency\n",
608 if (f_max && (f_max < host->max_clk))
611 cfg->f_max = host->max_clk;
615 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
616 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
618 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
621 if (caps & SDHCI_CAN_VDD_330)
622 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
623 if (caps & SDHCI_CAN_VDD_300)
624 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
625 if (caps & SDHCI_CAN_VDD_180)
626 cfg->voltages |= MMC_VDD_165_195;
628 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
629 cfg->voltages |= host->voltages;
631 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;
633 /* Since Host Controller Version3.0 */
634 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
635 if (!(caps & SDHCI_CAN_DO_8BIT))
636 cfg->host_caps &= ~MMC_MODE_8BIT;
639 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
640 cfg->host_caps &= ~MMC_MODE_HS;
641 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
644 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
645 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
647 if (!(cfg->voltages & MMC_VDD_165_195) ||
648 (host->quirks & SDHCI_QUIRK_NO_1_8_V))
649 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
650 SDHCI_SUPPORT_DDR50);
652 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
653 SDHCI_SUPPORT_DDR50))
654 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
656 if (caps_1 & SDHCI_SUPPORT_SDR104) {
657 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
659 * SD3.0: SDR104 is supported so (for eMMC) the caps2
660 * field can be promoted to support HS200.
662 cfg->host_caps |= MMC_CAP(MMC_HS_200);
663 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
664 cfg->host_caps |= MMC_CAP(UHS_SDR50);
667 if (caps_1 & SDHCI_SUPPORT_DDR50)
668 cfg->host_caps |= MMC_CAP(UHS_DDR50);
671 cfg->host_caps |= host->host_caps;
673 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
679 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
681 return mmc_bind(dev, mmc, cfg);
684 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
688 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
692 host->mmc = mmc_create(&host->cfg, host);
693 if (host->mmc == NULL) {
694 printf("%s: mmc create fail!\n", __func__);