1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
14 #include <dm/device-internal.h>
18 #include <power/regulator.h>
21 #include <linux/list.h>
23 #include "mmc_private.h"
25 #define DEFAULT_CMD6_TIMEOUT_MS 500
27 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
29 #if !CONFIG_IS_ENABLED(DM_MMC)
31 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
36 __weak int board_mmc_getwp(struct mmc *mmc)
41 int mmc_getwp(struct mmc *mmc)
45 wp = board_mmc_getwp(mmc);
48 if (mmc->cfg->ops->getwp)
49 wp = mmc->cfg->ops->getwp(mmc);
57 __weak int board_mmc_getcd(struct mmc *mmc)
63 #ifdef CONFIG_MMC_TRACE
64 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
66 printf("CMD_SEND:%d\n", cmd->cmdidx);
67 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
70 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
76 printf("\t\tRET\t\t\t %d\n", ret);
78 switch (cmd->resp_type) {
80 printf("\t\tMMC_RSP_NONE\n");
83 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
87 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
91 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
93 printf("\t\t \t\t 0x%08x \n",
95 printf("\t\t \t\t 0x%08x \n",
97 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t\t\t\tDUMPING DATA\n");
101 for (i = 0; i < 4; i++) {
103 printf("\t\t\t\t\t%03d - ", i*4);
104 ptr = (u8 *)&cmd->response[i];
106 for (j = 0; j < 4; j++)
107 printf("%02x ", *ptr--);
112 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
116 printf("\t\tERROR MMC rsp not supported\n");
122 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
126 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
127 printf("CURR STATE:%d\n", status);
131 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
132 const char *mmc_mode_name(enum bus_mode mode)
134 static const char *const names[] = {
135 [MMC_LEGACY] = "MMC legacy",
136 [MMC_HS] = "MMC High Speed (26MHz)",
137 [SD_HS] = "SD High Speed (50MHz)",
138 [UHS_SDR12] = "UHS SDR12 (25MHz)",
139 [UHS_SDR25] = "UHS SDR25 (50MHz)",
140 [UHS_SDR50] = "UHS SDR50 (100MHz)",
141 [UHS_SDR104] = "UHS SDR104 (208MHz)",
142 [UHS_DDR50] = "UHS DDR50 (50MHz)",
143 [MMC_HS_52] = "MMC High Speed (52MHz)",
144 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
145 [MMC_HS_200] = "HS200 (200MHz)",
146 [MMC_HS_400] = "HS400 (200MHz)",
147 [MMC_HS_400_ES] = "HS400ES (200MHz)",
150 if (mode >= MMC_MODES_END)
151 return "Unknown mode";
157 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
159 static const int freqs[] = {
160 [MMC_LEGACY] = 25000000,
163 [MMC_HS_52] = 52000000,
164 [MMC_DDR_52] = 52000000,
165 [UHS_SDR12] = 25000000,
166 [UHS_SDR25] = 50000000,
167 [UHS_SDR50] = 100000000,
168 [UHS_DDR50] = 50000000,
169 [UHS_SDR104] = 208000000,
170 [MMC_HS_200] = 200000000,
171 [MMC_HS_400] = 200000000,
172 [MMC_HS_400_ES] = 200000000,
175 if (mode == MMC_LEGACY)
176 return mmc->legacy_speed;
177 else if (mode >= MMC_MODES_END)
183 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
185 mmc->selected_mode = mode;
186 mmc->tran_speed = mmc_mode2freq(mmc, mode);
187 mmc->ddr_mode = mmc_is_mode_ddr(mode);
188 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
189 mmc->tran_speed / 1000000);
193 #if !CONFIG_IS_ENABLED(DM_MMC)
194 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
198 mmmc_trace_before_send(mmc, cmd);
199 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
200 mmmc_trace_after_send(mmc, cmd, ret);
206 int mmc_send_status(struct mmc *mmc, unsigned int *status)
209 int err, retries = 5;
211 cmd.cmdidx = MMC_CMD_SEND_STATUS;
212 cmd.resp_type = MMC_RSP_R1;
213 if (!mmc_host_is_spi(mmc))
214 cmd.cmdarg = mmc->rca << 16;
217 err = mmc_send_cmd(mmc, &cmd, NULL);
219 mmc_trace_state(mmc, &cmd);
220 *status = cmd.response[0];
224 mmc_trace_state(mmc, &cmd);
228 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
233 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
238 err = mmc_send_status(mmc, &status);
242 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
243 (status & MMC_STATUS_CURR_STATE) !=
247 if (status & MMC_STATUS_MASK) {
248 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
249 pr_err("Status Error: 0x%08x\n", status);
254 if (timeout_ms-- <= 0)
260 if (timeout_ms <= 0) {
261 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
262 pr_err("Timeout waiting card ready\n");
270 int mmc_set_blocklen(struct mmc *mmc, int len)
278 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
279 cmd.resp_type = MMC_RSP_R1;
282 err = mmc_send_cmd(mmc, &cmd, NULL);
284 #ifdef CONFIG_MMC_QUIRKS
285 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
288 * It has been seen that SET_BLOCKLEN may fail on the first
289 * attempt, let's try a few more time
292 err = mmc_send_cmd(mmc, &cmd, NULL);
302 #ifdef MMC_SUPPORTS_TUNING
303 static const u8 tuning_blk_pattern_4bit[] = {
304 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
305 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
306 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
307 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
308 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
309 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
310 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
311 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
314 static const u8 tuning_blk_pattern_8bit[] = {
315 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
316 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
317 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
318 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
319 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
320 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
321 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
322 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
323 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
324 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
325 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
326 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
327 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
328 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
329 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
330 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
333 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
336 struct mmc_data data;
337 const u8 *tuning_block_pattern;
340 if (mmc->bus_width == 8) {
341 tuning_block_pattern = tuning_blk_pattern_8bit;
342 size = sizeof(tuning_blk_pattern_8bit);
343 } else if (mmc->bus_width == 4) {
344 tuning_block_pattern = tuning_blk_pattern_4bit;
345 size = sizeof(tuning_blk_pattern_4bit);
350 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
354 cmd.resp_type = MMC_RSP_R1;
356 data.dest = (void *)data_buf;
358 data.blocksize = size;
359 data.flags = MMC_DATA_READ;
361 err = mmc_send_cmd(mmc, &cmd, &data);
365 if (memcmp(data_buf, tuning_block_pattern, size))
372 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
376 struct mmc_data data;
379 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
381 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
383 if (mmc->high_capacity)
386 cmd.cmdarg = start * mmc->read_bl_len;
388 cmd.resp_type = MMC_RSP_R1;
391 data.blocks = blkcnt;
392 data.blocksize = mmc->read_bl_len;
393 data.flags = MMC_DATA_READ;
395 if (mmc_send_cmd(mmc, &cmd, &data))
399 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
401 cmd.resp_type = MMC_RSP_R1b;
402 if (mmc_send_cmd(mmc, &cmd, NULL)) {
403 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
404 pr_err("mmc fail to send stop cmd\n");
413 #if !CONFIG_IS_ENABLED(DM_MMC)
414 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
416 if (mmc->cfg->ops->get_b_max)
417 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
419 return mmc->cfg->b_max;
423 #if CONFIG_IS_ENABLED(BLK)
424 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
426 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
430 #if CONFIG_IS_ENABLED(BLK)
431 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
433 int dev_num = block_dev->devnum;
435 lbaint_t cur, blocks_todo = blkcnt;
441 struct mmc *mmc = find_mmc_device(dev_num);
445 if (CONFIG_IS_ENABLED(MMC_TINY))
446 err = mmc_switch_part(mmc, block_dev->hwpart);
448 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
453 if ((start + blkcnt) > block_dev->lba) {
454 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
455 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
456 start + blkcnt, block_dev->lba);
461 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
462 pr_debug("%s: Failed to set blocklen\n", __func__);
466 b_max = mmc_get_b_max(mmc, dst, blkcnt);
469 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
470 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
471 pr_debug("%s: Failed to read blocks\n", __func__);
476 dst += cur * mmc->read_bl_len;
477 } while (blocks_todo > 0);
482 static int mmc_go_idle(struct mmc *mmc)
489 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
491 cmd.resp_type = MMC_RSP_NONE;
493 err = mmc_send_cmd(mmc, &cmd, NULL);
503 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
504 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
510 * Send CMD11 only if the request is to switch the card to
513 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
514 return mmc_set_signal_voltage(mmc, signal_voltage);
516 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
518 cmd.resp_type = MMC_RSP_R1;
520 err = mmc_send_cmd(mmc, &cmd, NULL);
524 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
528 * The card should drive cmd and dat[0:3] low immediately
529 * after the response of cmd11, but wait 100 us to be sure
531 err = mmc_wait_dat0(mmc, 0, 100);
538 * During a signal voltage level switch, the clock must be gated
539 * for 5 ms according to the SD spec
541 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
543 err = mmc_set_signal_voltage(mmc, signal_voltage);
547 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
549 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
552 * Failure to switch is indicated by the card holding
553 * dat[0:3] low. Wait for at least 1 ms according to spec
555 err = mmc_wait_dat0(mmc, 1, 1000);
565 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
572 cmd.cmdidx = MMC_CMD_APP_CMD;
573 cmd.resp_type = MMC_RSP_R1;
576 err = mmc_send_cmd(mmc, &cmd, NULL);
581 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
582 cmd.resp_type = MMC_RSP_R3;
585 * Most cards do not answer if some reserved bits
586 * in the ocr are set. However, Some controller
587 * can set bit 7 (reserved for low voltages), but
588 * how to manage low voltages SD card is not yet
591 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
592 (mmc->cfg->voltages & 0xff8000);
594 if (mmc->version == SD_VERSION_2)
595 cmd.cmdarg |= OCR_HCS;
598 cmd.cmdarg |= OCR_S18R;
600 err = mmc_send_cmd(mmc, &cmd, NULL);
605 if (cmd.response[0] & OCR_BUSY)
614 if (mmc->version != SD_VERSION_2)
615 mmc->version = SD_VERSION_1_0;
617 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
618 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
619 cmd.resp_type = MMC_RSP_R3;
622 err = mmc_send_cmd(mmc, &cmd, NULL);
628 mmc->ocr = cmd.response[0];
630 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
631 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
633 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
639 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
645 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
650 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
651 cmd.resp_type = MMC_RSP_R3;
653 if (use_arg && !mmc_host_is_spi(mmc))
654 cmd.cmdarg = OCR_HCS |
655 (mmc->cfg->voltages &
656 (mmc->ocr & OCR_VOLTAGE_MASK)) |
657 (mmc->ocr & OCR_ACCESS_MODE);
659 err = mmc_send_cmd(mmc, &cmd, NULL);
662 mmc->ocr = cmd.response[0];
666 static int mmc_send_op_cond(struct mmc *mmc)
670 /* Some cards seem to need this */
673 /* Asking to the card its capabilities */
674 for (i = 0; i < 2; i++) {
675 err = mmc_send_op_cond_iter(mmc, i != 0);
679 /* exit if not busy (flag seems to be inverted) */
680 if (mmc->ocr & OCR_BUSY)
683 mmc->op_cond_pending = 1;
687 static int mmc_complete_op_cond(struct mmc *mmc)
694 mmc->op_cond_pending = 0;
695 if (!(mmc->ocr & OCR_BUSY)) {
696 /* Some cards seem to need this */
699 start = get_timer(0);
701 err = mmc_send_op_cond_iter(mmc, 1);
704 if (mmc->ocr & OCR_BUSY)
706 if (get_timer(start) > timeout)
712 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
713 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
714 cmd.resp_type = MMC_RSP_R3;
717 err = mmc_send_cmd(mmc, &cmd, NULL);
722 mmc->ocr = cmd.response[0];
725 mmc->version = MMC_VERSION_UNKNOWN;
727 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
734 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
737 struct mmc_data data;
740 /* Get the Card Status Register */
741 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
742 cmd.resp_type = MMC_RSP_R1;
745 data.dest = (char *)ext_csd;
747 data.blocksize = MMC_MAX_BLOCK_LEN;
748 data.flags = MMC_DATA_READ;
750 err = mmc_send_cmd(mmc, &cmd, &data);
755 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
758 unsigned int status, start;
760 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
761 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
762 (index == EXT_CSD_PART_CONF);
766 if (mmc->gen_cmd6_time)
767 timeout_ms = mmc->gen_cmd6_time * 10;
769 if (is_part_switch && mmc->part_switch_time)
770 timeout_ms = mmc->part_switch_time * 10;
772 cmd.cmdidx = MMC_CMD_SWITCH;
773 cmd.resp_type = MMC_RSP_R1b;
774 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
779 ret = mmc_send_cmd(mmc, &cmd, NULL);
780 } while (ret && retries-- > 0);
785 start = get_timer(0);
787 /* poll dat0 for rdy/buys status */
788 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
789 if (ret && ret != -ENOSYS)
793 * In cases when not allowed to poll by using CMD13 or because we aren't
794 * capable of polling by using mmc_wait_dat0, then rely on waiting the
795 * stated timeout to be sufficient.
797 if (ret == -ENOSYS && !send_status)
800 /* Finally wait until the card is ready or indicates a failure
801 * to switch. It doesn't hurt to use CMD13 here even if send_status
802 * is false, because by now (after 'timeout_ms' ms) the bus should be
806 ret = mmc_send_status(mmc, &status);
808 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
809 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
813 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
816 } while (get_timer(start) < timeout_ms);
821 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
823 return __mmc_switch(mmc, set, index, value, true);
826 int mmc_boot_wp(struct mmc *mmc)
828 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
831 #if !CONFIG_IS_ENABLED(MMC_TINY)
832 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
838 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
844 speed_bits = EXT_CSD_TIMING_HS;
846 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
848 speed_bits = EXT_CSD_TIMING_HS200;
851 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
853 speed_bits = EXT_CSD_TIMING_HS400;
856 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
858 speed_bits = EXT_CSD_TIMING_HS400;
862 speed_bits = EXT_CSD_TIMING_LEGACY;
868 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
869 speed_bits, !hsdowngrade);
873 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
874 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
876 * In case the eMMC is in HS200/HS400 mode and we are downgrading
877 * to HS mode, the card clock are still running much faster than
878 * the supported HS mode clock, so we can not reliably read out
879 * Extended CSD. Reconfigure the controller to run at HS mode.
882 mmc_select_mode(mmc, MMC_HS);
883 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
887 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
888 /* Now check to see that it worked */
889 err = mmc_send_ext_csd(mmc, test_csd);
893 /* No high-speed support */
894 if (!test_csd[EXT_CSD_HS_TIMING])
901 static int mmc_get_capabilities(struct mmc *mmc)
903 u8 *ext_csd = mmc->ext_csd;
906 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
908 if (mmc_host_is_spi(mmc))
911 /* Only version 4 supports high-speed */
912 if (mmc->version < MMC_VERSION_4)
916 pr_err("No ext_csd found!\n"); /* this should enver happen */
920 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
922 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
923 mmc->cardtype = cardtype;
925 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
926 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
927 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
928 mmc->card_caps |= MMC_MODE_HS200;
931 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
932 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
933 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
934 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
935 mmc->card_caps |= MMC_MODE_HS400;
938 if (cardtype & EXT_CSD_CARD_TYPE_52) {
939 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
940 mmc->card_caps |= MMC_MODE_DDR_52MHz;
941 mmc->card_caps |= MMC_MODE_HS_52MHz;
943 if (cardtype & EXT_CSD_CARD_TYPE_26)
944 mmc->card_caps |= MMC_MODE_HS;
946 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
947 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
948 (mmc->card_caps & MMC_MODE_HS400)) {
949 mmc->card_caps |= MMC_MODE_HS400_ES;
957 static int mmc_set_capacity(struct mmc *mmc, int part_num)
961 mmc->capacity = mmc->capacity_user;
965 mmc->capacity = mmc->capacity_boot;
968 mmc->capacity = mmc->capacity_rpmb;
974 mmc->capacity = mmc->capacity_gp[part_num - 4];
980 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
985 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
991 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
993 (mmc->part_config & ~PART_ACCESS_MASK)
994 | (part_num & PART_ACCESS_MASK));
995 } while (ret && retry--);
998 * Set the capacity if the switch succeeded or was intended
999 * to return to representing the raw device.
1001 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1002 ret = mmc_set_capacity(mmc, part_num);
1003 mmc_get_blk_desc(mmc)->hwpart = part_num;
1009 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1010 int mmc_hwpart_config(struct mmc *mmc,
1011 const struct mmc_hwpart_conf *conf,
1012 enum mmc_hwpart_conf_mode mode)
1017 u32 gp_size_mult[4];
1018 u32 max_enh_size_mult;
1019 u32 tot_enh_size_mult = 0;
1022 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1024 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1027 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1028 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1029 return -EMEDIUMTYPE;
1032 if (!(mmc->part_support & PART_SUPPORT)) {
1033 pr_err("Card does not support partitioning\n");
1034 return -EMEDIUMTYPE;
1037 if (!mmc->hc_wp_grp_size) {
1038 pr_err("Card does not define HC WP group size\n");
1039 return -EMEDIUMTYPE;
1042 /* check partition alignment and total enhanced size */
1043 if (conf->user.enh_size) {
1044 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1045 conf->user.enh_start % mmc->hc_wp_grp_size) {
1046 pr_err("User data enhanced area not HC WP group "
1050 part_attrs |= EXT_CSD_ENH_USR;
1051 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1052 if (mmc->high_capacity) {
1053 enh_start_addr = conf->user.enh_start;
1055 enh_start_addr = (conf->user.enh_start << 9);
1061 tot_enh_size_mult += enh_size_mult;
1063 for (pidx = 0; pidx < 4; pidx++) {
1064 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1065 pr_err("GP%i partition not HC WP group size "
1066 "aligned\n", pidx+1);
1069 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1070 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1071 part_attrs |= EXT_CSD_ENH_GP(pidx);
1072 tot_enh_size_mult += gp_size_mult[pidx];
1076 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1077 pr_err("Card does not support enhanced attribute\n");
1078 return -EMEDIUMTYPE;
1081 err = mmc_send_ext_csd(mmc, ext_csd);
1086 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1087 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1088 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1089 if (tot_enh_size_mult > max_enh_size_mult) {
1090 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1091 tot_enh_size_mult, max_enh_size_mult);
1092 return -EMEDIUMTYPE;
1095 /* The default value of EXT_CSD_WR_REL_SET is device
1096 * dependent, the values can only be changed if the
1097 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1098 * changed only once and before partitioning is completed. */
1099 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1100 if (conf->user.wr_rel_change) {
1101 if (conf->user.wr_rel_set)
1102 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1104 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1106 for (pidx = 0; pidx < 4; pidx++) {
1107 if (conf->gp_part[pidx].wr_rel_change) {
1108 if (conf->gp_part[pidx].wr_rel_set)
1109 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1111 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1115 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1116 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1117 puts("Card does not support host controlled partition write "
1118 "reliability settings\n");
1119 return -EMEDIUMTYPE;
1122 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1123 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1124 pr_err("Card already partitioned\n");
1128 if (mode == MMC_HWPART_CONF_CHECK)
1131 /* Partitioning requires high-capacity size definitions */
1132 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1133 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1134 EXT_CSD_ERASE_GROUP_DEF, 1);
1139 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1141 #if CONFIG_IS_ENABLED(MMC_WRITE)
1142 /* update erase group size to be high-capacity */
1143 mmc->erase_grp_size =
1144 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1149 /* all OK, write the configuration */
1150 for (i = 0; i < 4; i++) {
1151 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1152 EXT_CSD_ENH_START_ADDR+i,
1153 (enh_start_addr >> (i*8)) & 0xFF);
1157 for (i = 0; i < 3; i++) {
1158 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1159 EXT_CSD_ENH_SIZE_MULT+i,
1160 (enh_size_mult >> (i*8)) & 0xFF);
1164 for (pidx = 0; pidx < 4; pidx++) {
1165 for (i = 0; i < 3; i++) {
1166 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1167 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1168 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1173 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1174 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1178 if (mode == MMC_HWPART_CONF_SET)
1181 /* The WR_REL_SET is a write-once register but shall be
1182 * written before setting PART_SETTING_COMPLETED. As it is
1183 * write-once we can only write it when completing the
1185 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1186 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1187 EXT_CSD_WR_REL_SET, wr_rel_set);
1192 /* Setting PART_SETTING_COMPLETED confirms the partition
1193 * configuration but it only becomes effective after power
1194 * cycle, so we do not adjust the partition related settings
1195 * in the mmc struct. */
1197 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1198 EXT_CSD_PARTITION_SETTING,
1199 EXT_CSD_PARTITION_SETTING_COMPLETED);
1207 #if !CONFIG_IS_ENABLED(DM_MMC)
1208 int mmc_getcd(struct mmc *mmc)
1212 cd = board_mmc_getcd(mmc);
1215 if (mmc->cfg->ops->getcd)
1216 cd = mmc->cfg->ops->getcd(mmc);
1225 #if !CONFIG_IS_ENABLED(MMC_TINY)
1226 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1229 struct mmc_data data;
1231 /* Switch the frequency */
1232 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1233 cmd.resp_type = MMC_RSP_R1;
1234 cmd.cmdarg = (mode << 31) | 0xffffff;
1235 cmd.cmdarg &= ~(0xf << (group * 4));
1236 cmd.cmdarg |= value << (group * 4);
1238 data.dest = (char *)resp;
1239 data.blocksize = 64;
1241 data.flags = MMC_DATA_READ;
1243 return mmc_send_cmd(mmc, &cmd, &data);
1246 static int sd_get_capabilities(struct mmc *mmc)
1250 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1251 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1252 struct mmc_data data;
1254 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1258 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1260 if (mmc_host_is_spi(mmc))
1263 /* Read the SCR to find out if this card supports higher speeds */
1264 cmd.cmdidx = MMC_CMD_APP_CMD;
1265 cmd.resp_type = MMC_RSP_R1;
1266 cmd.cmdarg = mmc->rca << 16;
1268 err = mmc_send_cmd(mmc, &cmd, NULL);
1273 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1274 cmd.resp_type = MMC_RSP_R1;
1280 data.dest = (char *)scr;
1283 data.flags = MMC_DATA_READ;
1285 err = mmc_send_cmd(mmc, &cmd, &data);
1294 mmc->scr[0] = __be32_to_cpu(scr[0]);
1295 mmc->scr[1] = __be32_to_cpu(scr[1]);
1297 switch ((mmc->scr[0] >> 24) & 0xf) {
1299 mmc->version = SD_VERSION_1_0;
1302 mmc->version = SD_VERSION_1_10;
1305 mmc->version = SD_VERSION_2;
1306 if ((mmc->scr[0] >> 15) & 0x1)
1307 mmc->version = SD_VERSION_3;
1310 mmc->version = SD_VERSION_1_0;
1314 if (mmc->scr[0] & SD_DATA_4BIT)
1315 mmc->card_caps |= MMC_MODE_4BIT;
1317 /* Version 1.0 doesn't support switching */
1318 if (mmc->version == SD_VERSION_1_0)
1323 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1324 (u8 *)switch_status);
1329 /* The high-speed function is busy. Try again */
1330 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1334 /* If high-speed isn't supported, we return */
1335 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1336 mmc->card_caps |= MMC_CAP(SD_HS);
1338 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1339 /* Version before 3.0 don't support UHS modes */
1340 if (mmc->version < SD_VERSION_3)
1343 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1344 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1345 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1346 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1347 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1348 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1349 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1350 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1351 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1352 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1353 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1359 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1363 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1366 /* SD version 1.00 and 1.01 does not support CMD 6 */
1367 if (mmc->version == SD_VERSION_1_0)
1372 speed = UHS_SDR12_BUS_SPEED;
1375 speed = HIGH_SPEED_BUS_SPEED;
1377 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1379 speed = UHS_SDR12_BUS_SPEED;
1382 speed = UHS_SDR25_BUS_SPEED;
1385 speed = UHS_SDR50_BUS_SPEED;
1388 speed = UHS_DDR50_BUS_SPEED;
1391 speed = UHS_SDR104_BUS_SPEED;
1398 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1402 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1408 static int sd_select_bus_width(struct mmc *mmc, int w)
1413 if ((w != 4) && (w != 1))
1416 cmd.cmdidx = MMC_CMD_APP_CMD;
1417 cmd.resp_type = MMC_RSP_R1;
1418 cmd.cmdarg = mmc->rca << 16;
1420 err = mmc_send_cmd(mmc, &cmd, NULL);
1424 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1425 cmd.resp_type = MMC_RSP_R1;
1430 err = mmc_send_cmd(mmc, &cmd, NULL);
1438 #if CONFIG_IS_ENABLED(MMC_WRITE)
1439 static int sd_read_ssr(struct mmc *mmc)
1441 static const unsigned int sd_au_size[] = {
1442 0, SZ_16K / 512, SZ_32K / 512,
1443 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1444 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1445 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1446 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1451 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1452 struct mmc_data data;
1454 unsigned int au, eo, et, es;
1456 cmd.cmdidx = MMC_CMD_APP_CMD;
1457 cmd.resp_type = MMC_RSP_R1;
1458 cmd.cmdarg = mmc->rca << 16;
1460 err = mmc_send_cmd(mmc, &cmd, NULL);
1461 #ifdef CONFIG_MMC_QUIRKS
1462 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1465 * It has been seen that APP_CMD may fail on the first
1466 * attempt, let's try a few more times
1469 err = mmc_send_cmd(mmc, &cmd, NULL);
1472 } while (retries--);
1478 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1479 cmd.resp_type = MMC_RSP_R1;
1483 data.dest = (char *)ssr;
1484 data.blocksize = 64;
1486 data.flags = MMC_DATA_READ;
1488 err = mmc_send_cmd(mmc, &cmd, &data);
1496 for (i = 0; i < 16; i++)
1497 ssr[i] = be32_to_cpu(ssr[i]);
1499 au = (ssr[2] >> 12) & 0xF;
1500 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1501 mmc->ssr.au = sd_au_size[au];
1502 es = (ssr[3] >> 24) & 0xFF;
1503 es |= (ssr[2] & 0xFF) << 8;
1504 et = (ssr[3] >> 18) & 0x3F;
1506 eo = (ssr[3] >> 16) & 0x3;
1507 mmc->ssr.erase_timeout = (et * 1000) / es;
1508 mmc->ssr.erase_offset = eo * 1000;
1511 pr_debug("Invalid Allocation Unit Size.\n");
1517 /* frequency bases */
1518 /* divided by 10 to be nice to platforms without floating point */
1519 static const int fbase[] = {
1526 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1527 * to platforms without floating point.
1529 static const u8 multipliers[] = {
1548 static inline int bus_width(uint cap)
1550 if (cap == MMC_MODE_8BIT)
1552 if (cap == MMC_MODE_4BIT)
1554 if (cap == MMC_MODE_1BIT)
1556 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1560 #if !CONFIG_IS_ENABLED(DM_MMC)
1561 #ifdef MMC_SUPPORTS_TUNING
1562 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1568 static int mmc_set_ios(struct mmc *mmc)
1572 if (mmc->cfg->ops->set_ios)
1573 ret = mmc->cfg->ops->set_ios(mmc);
1578 static int mmc_host_power_cycle(struct mmc *mmc)
1582 if (mmc->cfg->ops->host_power_cycle)
1583 ret = mmc->cfg->ops->host_power_cycle(mmc);
1589 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1592 if (clock > mmc->cfg->f_max)
1593 clock = mmc->cfg->f_max;
1595 if (clock < mmc->cfg->f_min)
1596 clock = mmc->cfg->f_min;
1600 mmc->clk_disable = disable;
1602 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1604 return mmc_set_ios(mmc);
1607 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1609 mmc->bus_width = width;
1611 return mmc_set_ios(mmc);
1614 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1616 * helper function to display the capabilities in a human
1617 * friendly manner. The capabilities include bus width and
1620 void mmc_dump_capabilities(const char *text, uint caps)
1624 pr_debug("%s: widths [", text);
1625 if (caps & MMC_MODE_8BIT)
1627 if (caps & MMC_MODE_4BIT)
1629 if (caps & MMC_MODE_1BIT)
1631 pr_debug("\b\b] modes [");
1632 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1633 if (MMC_CAP(mode) & caps)
1634 pr_debug("%s, ", mmc_mode_name(mode));
1635 pr_debug("\b\b]\n");
1639 struct mode_width_tuning {
1642 #ifdef MMC_SUPPORTS_TUNING
1647 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1648 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1651 case MMC_SIGNAL_VOLTAGE_000: return 0;
1652 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1653 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1654 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1659 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1663 if (mmc->signal_voltage == signal_voltage)
1666 mmc->signal_voltage = signal_voltage;
1667 err = mmc_set_ios(mmc);
1669 pr_debug("unable to set voltage (err %d)\n", err);
1674 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1680 #if !CONFIG_IS_ENABLED(MMC_TINY)
1681 static const struct mode_width_tuning sd_modes_by_pref[] = {
1682 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1683 #ifdef MMC_SUPPORTS_TUNING
1686 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1687 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1692 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1696 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1700 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1705 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1707 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1710 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1715 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1719 #define for_each_sd_mode_by_pref(caps, mwt) \
1720 for (mwt = sd_modes_by_pref;\
1721 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1723 if (caps & MMC_CAP(mwt->mode))
1725 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1728 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1729 const struct mode_width_tuning *mwt;
1730 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1731 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1733 bool uhs_en = false;
1738 mmc_dump_capabilities("sd card", card_caps);
1739 mmc_dump_capabilities("host", mmc->host_caps);
1742 if (mmc_host_is_spi(mmc)) {
1743 mmc_set_bus_width(mmc, 1);
1744 mmc_select_mode(mmc, MMC_LEGACY);
1745 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1749 /* Restrict card's capabilities by what the host can do */
1750 caps = card_caps & mmc->host_caps;
1755 for_each_sd_mode_by_pref(caps, mwt) {
1758 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1759 if (*w & caps & mwt->widths) {
1760 pr_debug("trying mode %s width %d (at %d MHz)\n",
1761 mmc_mode_name(mwt->mode),
1763 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1765 /* configure the bus width (card + host) */
1766 err = sd_select_bus_width(mmc, bus_width(*w));
1769 mmc_set_bus_width(mmc, bus_width(*w));
1771 /* configure the bus mode (card) */
1772 err = sd_set_card_speed(mmc, mwt->mode);
1776 /* configure the bus mode (host) */
1777 mmc_select_mode(mmc, mwt->mode);
1778 mmc_set_clock(mmc, mmc->tran_speed,
1781 #ifdef MMC_SUPPORTS_TUNING
1782 /* execute tuning if needed */
1783 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1784 err = mmc_execute_tuning(mmc,
1787 pr_debug("tuning failed\n");
1793 #if CONFIG_IS_ENABLED(MMC_WRITE)
1794 err = sd_read_ssr(mmc);
1796 pr_warn("unable to read ssr\n");
1802 /* revert to a safer bus speed */
1803 mmc_select_mode(mmc, MMC_LEGACY);
1804 mmc_set_clock(mmc, mmc->tran_speed,
1810 pr_err("unable to select a mode\n");
1815 * read the compare the part of ext csd that is constant.
1816 * This can be used to check that the transfer is working
1819 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1822 const u8 *ext_csd = mmc->ext_csd;
1823 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1825 if (mmc->version < MMC_VERSION_4)
1828 err = mmc_send_ext_csd(mmc, test_csd);
1832 /* Only compare read only fields */
1833 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1834 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1835 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1836 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1837 ext_csd[EXT_CSD_REV]
1838 == test_csd[EXT_CSD_REV] &&
1839 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1840 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1841 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1842 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1848 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1849 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1850 uint32_t allowed_mask)
1858 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1859 EXT_CSD_CARD_TYPE_HS400_1_8V))
1860 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1861 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1862 EXT_CSD_CARD_TYPE_HS400_1_2V))
1863 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1866 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1867 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1868 MMC_SIGNAL_VOLTAGE_180;
1869 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1870 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1873 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1877 while (card_mask & allowed_mask) {
1878 enum mmc_voltage best_match;
1880 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1881 if (!mmc_set_signal_voltage(mmc, best_match))
1884 allowed_mask &= ~best_match;
1890 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1891 uint32_t allowed_mask)
1897 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1898 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1900 .mode = MMC_HS_400_ES,
1901 .widths = MMC_MODE_8BIT,
1904 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1907 .widths = MMC_MODE_8BIT,
1908 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1911 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1914 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1915 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1920 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1924 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1928 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1932 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1936 #define for_each_mmc_mode_by_pref(caps, mwt) \
1937 for (mwt = mmc_modes_by_pref;\
1938 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1940 if (caps & MMC_CAP(mwt->mode))
1942 static const struct ext_csd_bus_width {
1946 } ext_csd_bus_width[] = {
1947 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1948 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1949 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1950 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1951 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1954 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1955 static int mmc_select_hs400(struct mmc *mmc)
1959 /* Set timing to HS200 for tuning */
1960 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1964 /* configure the bus mode (host) */
1965 mmc_select_mode(mmc, MMC_HS_200);
1966 mmc_set_clock(mmc, mmc->tran_speed, false);
1968 /* execute tuning if needed */
1969 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1971 debug("tuning failed\n");
1975 /* Set back to HS */
1976 mmc_set_card_speed(mmc, MMC_HS, true);
1978 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1979 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1983 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1987 mmc_select_mode(mmc, MMC_HS_400);
1988 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1995 static int mmc_select_hs400(struct mmc *mmc)
2001 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2002 #if !CONFIG_IS_ENABLED(DM_MMC)
2003 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2008 static int mmc_select_hs400es(struct mmc *mmc)
2012 err = mmc_set_card_speed(mmc, MMC_HS, true);
2016 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2017 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2018 EXT_CSD_BUS_WIDTH_STROBE);
2020 printf("switch to bus width for hs400 failed\n");
2023 /* TODO: driver strength */
2024 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2028 mmc_select_mode(mmc, MMC_HS_400_ES);
2029 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2033 return mmc_set_enhanced_strobe(mmc);
2036 static int mmc_select_hs400es(struct mmc *mmc)
2042 #define for_each_supported_width(caps, ddr, ecbv) \
2043 for (ecbv = ext_csd_bus_width;\
2044 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2046 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2048 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2051 const struct mode_width_tuning *mwt;
2052 const struct ext_csd_bus_width *ecbw;
2055 mmc_dump_capabilities("mmc", card_caps);
2056 mmc_dump_capabilities("host", mmc->host_caps);
2059 if (mmc_host_is_spi(mmc)) {
2060 mmc_set_bus_width(mmc, 1);
2061 mmc_select_mode(mmc, MMC_LEGACY);
2062 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2066 /* Restrict card's capabilities by what the host can do */
2067 card_caps &= mmc->host_caps;
2069 /* Only version 4 of MMC supports wider bus widths */
2070 if (mmc->version < MMC_VERSION_4)
2073 if (!mmc->ext_csd) {
2074 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2078 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2079 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2081 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2082 * before doing anything else, since a transition from either of
2083 * the HS200/HS400 mode directly to legacy mode is not supported.
2085 if (mmc->selected_mode == MMC_HS_200 ||
2086 mmc->selected_mode == MMC_HS_400)
2087 mmc_set_card_speed(mmc, MMC_HS, true);
2090 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2092 for_each_mmc_mode_by_pref(card_caps, mwt) {
2093 for_each_supported_width(card_caps & mwt->widths,
2094 mmc_is_mode_ddr(mwt->mode), ecbw) {
2095 enum mmc_voltage old_voltage;
2096 pr_debug("trying mode %s width %d (at %d MHz)\n",
2097 mmc_mode_name(mwt->mode),
2098 bus_width(ecbw->cap),
2099 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2100 old_voltage = mmc->signal_voltage;
2101 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2102 MMC_ALL_SIGNAL_VOLTAGE);
2106 /* configure the bus width (card + host) */
2107 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2109 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2112 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2114 if (mwt->mode == MMC_HS_400) {
2115 err = mmc_select_hs400(mmc);
2117 printf("Select HS400 failed %d\n", err);
2120 } else if (mwt->mode == MMC_HS_400_ES) {
2121 err = mmc_select_hs400es(mmc);
2123 printf("Select HS400ES failed %d\n",
2128 /* configure the bus speed (card) */
2129 err = mmc_set_card_speed(mmc, mwt->mode, false);
2134 * configure the bus width AND the ddr mode
2135 * (card). The host side will be taken care
2136 * of in the next step
2138 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2139 err = mmc_switch(mmc,
2140 EXT_CSD_CMD_SET_NORMAL,
2142 ecbw->ext_csd_bits);
2147 /* configure the bus mode (host) */
2148 mmc_select_mode(mmc, mwt->mode);
2149 mmc_set_clock(mmc, mmc->tran_speed,
2151 #ifdef MMC_SUPPORTS_TUNING
2153 /* execute tuning if needed */
2155 err = mmc_execute_tuning(mmc,
2158 pr_debug("tuning failed\n");
2165 /* do a transfer to check the configuration */
2166 err = mmc_read_and_compare_ext_csd(mmc);
2170 mmc_set_signal_voltage(mmc, old_voltage);
2171 /* if an error occured, revert to a safer bus mode */
2172 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2173 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2174 mmc_select_mode(mmc, MMC_LEGACY);
2175 mmc_set_bus_width(mmc, 1);
2179 pr_err("unable to select a mode\n");
2185 #if CONFIG_IS_ENABLED(MMC_TINY)
2186 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2189 static int mmc_startup_v4(struct mmc *mmc)
2193 bool has_parts = false;
2194 bool part_completed;
2195 static const u32 mmc_versions[] = {
2207 #if CONFIG_IS_ENABLED(MMC_TINY)
2208 u8 *ext_csd = ext_csd_bkup;
2210 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2214 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2216 err = mmc_send_ext_csd(mmc, ext_csd);
2220 /* store the ext csd for future reference */
2222 mmc->ext_csd = ext_csd;
2224 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2226 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2229 /* check ext_csd version and capacity */
2230 err = mmc_send_ext_csd(mmc, ext_csd);
2234 /* store the ext csd for future reference */
2236 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2239 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2241 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2244 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2246 if (mmc->version >= MMC_VERSION_4_2) {
2248 * According to the JEDEC Standard, the value of
2249 * ext_csd's capacity is valid if the value is more
2252 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2253 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2254 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2255 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2256 capacity *= MMC_MAX_BLOCK_LEN;
2257 if ((capacity >> 20) > 2 * 1024)
2258 mmc->capacity_user = capacity;
2261 if (mmc->version >= MMC_VERSION_4_5)
2262 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2264 /* The partition data may be non-zero but it is only
2265 * effective if PARTITION_SETTING_COMPLETED is set in
2266 * EXT_CSD, so ignore any data if this bit is not set,
2267 * except for enabling the high-capacity group size
2268 * definition (see below).
2270 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2271 EXT_CSD_PARTITION_SETTING_COMPLETED);
2273 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2274 /* Some eMMC set the value too low so set a minimum */
2275 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2276 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2278 /* store the partition info of emmc */
2279 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2280 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2281 ext_csd[EXT_CSD_BOOT_MULT])
2282 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2283 if (part_completed &&
2284 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2285 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2287 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2289 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2291 for (i = 0; i < 4; i++) {
2292 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2293 uint mult = (ext_csd[idx + 2] << 16) +
2294 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2297 if (!part_completed)
2299 mmc->capacity_gp[i] = mult;
2300 mmc->capacity_gp[i] *=
2301 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2302 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2303 mmc->capacity_gp[i] <<= 19;
2306 #ifndef CONFIG_SPL_BUILD
2307 if (part_completed) {
2308 mmc->enh_user_size =
2309 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2310 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2311 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2312 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2313 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2314 mmc->enh_user_size <<= 19;
2315 mmc->enh_user_start =
2316 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2317 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2318 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2319 ext_csd[EXT_CSD_ENH_START_ADDR];
2320 if (mmc->high_capacity)
2321 mmc->enh_user_start <<= 9;
2326 * Host needs to enable ERASE_GRP_DEF bit if device is
2327 * partitioned. This bit will be lost every time after a reset
2328 * or power off. This will affect erase size.
2332 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2333 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2336 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2337 EXT_CSD_ERASE_GROUP_DEF, 1);
2342 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2345 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2346 #if CONFIG_IS_ENABLED(MMC_WRITE)
2347 /* Read out group size from ext_csd */
2348 mmc->erase_grp_size =
2349 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2352 * if high capacity and partition setting completed
2353 * SEC_COUNT is valid even if it is smaller than 2 GiB
2354 * JEDEC Standard JESD84-B45, 6.2.4
2356 if (mmc->high_capacity && part_completed) {
2357 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2358 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2359 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2360 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2361 capacity *= MMC_MAX_BLOCK_LEN;
2362 mmc->capacity_user = capacity;
2365 #if CONFIG_IS_ENABLED(MMC_WRITE)
2367 /* Calculate the group size from the csd value. */
2368 int erase_gsz, erase_gmul;
2370 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2371 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2372 mmc->erase_grp_size = (erase_gsz + 1)
2376 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2377 mmc->hc_wp_grp_size = 1024
2378 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2379 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2382 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2387 #if !CONFIG_IS_ENABLED(MMC_TINY)
2390 mmc->ext_csd = NULL;
2395 static int mmc_startup(struct mmc *mmc)
2401 struct blk_desc *bdesc;
2403 #ifdef CONFIG_MMC_SPI_CRC_ON
2404 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2405 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2406 cmd.resp_type = MMC_RSP_R1;
2408 err = mmc_send_cmd(mmc, &cmd, NULL);
2414 /* Put the Card in Identify Mode */
2415 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2416 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2417 cmd.resp_type = MMC_RSP_R2;
2420 err = mmc_send_cmd(mmc, &cmd, NULL);
2422 #ifdef CONFIG_MMC_QUIRKS
2423 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2426 * It has been seen that SEND_CID may fail on the first
2427 * attempt, let's try a few more time
2430 err = mmc_send_cmd(mmc, &cmd, NULL);
2433 } while (retries--);
2440 memcpy(mmc->cid, cmd.response, 16);
2443 * For MMC cards, set the Relative Address.
2444 * For SD cards, get the Relatvie Address.
2445 * This also puts the cards into Standby State
2447 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2448 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2449 cmd.cmdarg = mmc->rca << 16;
2450 cmd.resp_type = MMC_RSP_R6;
2452 err = mmc_send_cmd(mmc, &cmd, NULL);
2458 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2461 /* Get the Card-Specific Data */
2462 cmd.cmdidx = MMC_CMD_SEND_CSD;
2463 cmd.resp_type = MMC_RSP_R2;
2464 cmd.cmdarg = mmc->rca << 16;
2466 err = mmc_send_cmd(mmc, &cmd, NULL);
2471 mmc->csd[0] = cmd.response[0];
2472 mmc->csd[1] = cmd.response[1];
2473 mmc->csd[2] = cmd.response[2];
2474 mmc->csd[3] = cmd.response[3];
2476 if (mmc->version == MMC_VERSION_UNKNOWN) {
2477 int version = (cmd.response[0] >> 26) & 0xf;
2481 mmc->version = MMC_VERSION_1_2;
2484 mmc->version = MMC_VERSION_1_4;
2487 mmc->version = MMC_VERSION_2_2;
2490 mmc->version = MMC_VERSION_3;
2493 mmc->version = MMC_VERSION_4;
2496 mmc->version = MMC_VERSION_1_2;
2501 /* divide frequency by 10, since the mults are 10x bigger */
2502 freq = fbase[(cmd.response[0] & 0x7)];
2503 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2505 mmc->legacy_speed = freq * mult;
2506 mmc_select_mode(mmc, MMC_LEGACY);
2508 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2509 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2510 #if CONFIG_IS_ENABLED(MMC_WRITE)
2513 mmc->write_bl_len = mmc->read_bl_len;
2515 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2518 if (mmc->high_capacity) {
2519 csize = (mmc->csd[1] & 0x3f) << 16
2520 | (mmc->csd[2] & 0xffff0000) >> 16;
2523 csize = (mmc->csd[1] & 0x3ff) << 2
2524 | (mmc->csd[2] & 0xc0000000) >> 30;
2525 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2528 mmc->capacity_user = (csize + 1) << (cmult + 2);
2529 mmc->capacity_user *= mmc->read_bl_len;
2530 mmc->capacity_boot = 0;
2531 mmc->capacity_rpmb = 0;
2532 for (i = 0; i < 4; i++)
2533 mmc->capacity_gp[i] = 0;
2535 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2536 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2538 #if CONFIG_IS_ENABLED(MMC_WRITE)
2539 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2540 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2543 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2544 cmd.cmdidx = MMC_CMD_SET_DSR;
2545 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2546 cmd.resp_type = MMC_RSP_NONE;
2547 if (mmc_send_cmd(mmc, &cmd, NULL))
2548 pr_warn("MMC: SET_DSR failed\n");
2551 /* Select the card, and put it into Transfer Mode */
2552 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2553 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2554 cmd.resp_type = MMC_RSP_R1;
2555 cmd.cmdarg = mmc->rca << 16;
2556 err = mmc_send_cmd(mmc, &cmd, NULL);
2563 * For SD, its erase group is always one sector
2565 #if CONFIG_IS_ENABLED(MMC_WRITE)
2566 mmc->erase_grp_size = 1;
2568 mmc->part_config = MMCPART_NOAVAILABLE;
2570 err = mmc_startup_v4(mmc);
2574 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2578 #if CONFIG_IS_ENABLED(MMC_TINY)
2579 mmc_set_clock(mmc, mmc->legacy_speed, false);
2580 mmc_select_mode(mmc, MMC_LEGACY);
2581 mmc_set_bus_width(mmc, 1);
2584 err = sd_get_capabilities(mmc);
2587 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2589 err = mmc_get_capabilities(mmc);
2592 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2598 mmc->best_mode = mmc->selected_mode;
2600 /* Fix the block length for DDR mode */
2601 if (mmc->ddr_mode) {
2602 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2603 #if CONFIG_IS_ENABLED(MMC_WRITE)
2604 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2608 /* fill in device description */
2609 bdesc = mmc_get_blk_desc(mmc);
2613 bdesc->blksz = mmc->read_bl_len;
2614 bdesc->log2blksz = LOG2(bdesc->blksz);
2615 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2616 #if !defined(CONFIG_SPL_BUILD) || \
2617 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2618 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2619 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2620 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2621 (mmc->cid[3] >> 16) & 0xffff);
2622 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2623 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2624 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2625 (mmc->cid[2] >> 24) & 0xff);
2626 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2627 (mmc->cid[2] >> 16) & 0xf);
2629 bdesc->vendor[0] = 0;
2630 bdesc->product[0] = 0;
2631 bdesc->revision[0] = 0;
2634 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2641 static int mmc_send_if_cond(struct mmc *mmc)
2646 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2647 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2648 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2649 cmd.resp_type = MMC_RSP_R7;
2651 err = mmc_send_cmd(mmc, &cmd, NULL);
2656 if ((cmd.response[0] & 0xff) != 0xaa)
2659 mmc->version = SD_VERSION_2;
2664 #if !CONFIG_IS_ENABLED(DM_MMC)
2665 /* board-specific MMC power initializations. */
2666 __weak void board_mmc_power_init(void)
2671 static int mmc_power_init(struct mmc *mmc)
2673 #if CONFIG_IS_ENABLED(DM_MMC)
2674 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2677 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2680 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2682 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2683 &mmc->vqmmc_supply);
2685 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2687 #else /* !CONFIG_DM_MMC */
2689 * Driver model should use a regulator, as above, rather than calling
2690 * out to board code.
2692 board_mmc_power_init();
2698 * put the host in the initial state:
2699 * - turn on Vdd (card power supply)
2700 * - configure the bus width and clock to minimal values
2702 static void mmc_set_initial_state(struct mmc *mmc)
2706 /* First try to set 3.3V. If it fails set to 1.8V */
2707 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2709 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2711 pr_warn("mmc: failed to set signal voltage\n");
2713 mmc_select_mode(mmc, MMC_LEGACY);
2714 mmc_set_bus_width(mmc, 1);
2715 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2718 static int mmc_power_on(struct mmc *mmc)
2720 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2721 if (mmc->vmmc_supply) {
2722 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2725 puts("Error enabling VMMC supply\n");
2733 static int mmc_power_off(struct mmc *mmc)
2735 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2736 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2737 if (mmc->vmmc_supply) {
2738 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2741 pr_debug("Error disabling VMMC supply\n");
2749 static int mmc_power_cycle(struct mmc *mmc)
2753 ret = mmc_power_off(mmc);
2757 ret = mmc_host_power_cycle(mmc);
2762 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2763 * to be on the safer side.
2766 return mmc_power_on(mmc);
2769 int mmc_get_op_cond(struct mmc *mmc)
2771 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2777 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2778 mmc_adapter_card_type_ident();
2780 err = mmc_power_init(mmc);
2784 #ifdef CONFIG_MMC_QUIRKS
2785 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2786 MMC_QUIRK_RETRY_SEND_CID |
2787 MMC_QUIRK_RETRY_APP_CMD;
2790 err = mmc_power_cycle(mmc);
2793 * if power cycling is not supported, we should not try
2794 * to use the UHS modes, because we wouldn't be able to
2795 * recover from an error during the UHS initialization.
2797 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2799 mmc->host_caps &= ~UHS_CAPS;
2800 err = mmc_power_on(mmc);
2805 #if CONFIG_IS_ENABLED(DM_MMC)
2806 /* The device has already been probed ready for use */
2808 /* made sure it's not NULL earlier */
2809 err = mmc->cfg->ops->init(mmc);
2816 mmc_set_initial_state(mmc);
2818 /* Reset the Card */
2819 err = mmc_go_idle(mmc);
2824 /* The internal partition reset to user partition(0) at every CMD0*/
2825 mmc_get_blk_desc(mmc)->hwpart = 0;
2827 /* Test for SD version 2 */
2828 err = mmc_send_if_cond(mmc);
2830 /* Now try to get the SD card's operating condition */
2831 err = sd_send_op_cond(mmc, uhs_en);
2832 if (err && uhs_en) {
2834 mmc_power_cycle(mmc);
2838 /* If the command timed out, we check for an MMC card */
2839 if (err == -ETIMEDOUT) {
2840 err = mmc_send_op_cond(mmc);
2843 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2844 pr_err("Card did not respond to voltage select!\n");
2853 int mmc_start_init(struct mmc *mmc)
2859 * all hosts are capable of 1 bit bus-width and able to use the legacy
2862 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2863 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2864 #if CONFIG_IS_ENABLED(DM_MMC)
2865 mmc_deferred_probe(mmc);
2867 #if !defined(CONFIG_MMC_BROKEN_CD)
2868 no_card = mmc_getcd(mmc) == 0;
2872 #if !CONFIG_IS_ENABLED(DM_MMC)
2873 /* we pretend there's no card when init is NULL */
2874 no_card = no_card || (mmc->cfg->ops->init == NULL);
2878 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2879 pr_err("MMC: no card present\n");
2884 err = mmc_get_op_cond(mmc);
2887 mmc->init_in_progress = 1;
2892 static int mmc_complete_init(struct mmc *mmc)
2896 mmc->init_in_progress = 0;
2897 if (mmc->op_cond_pending)
2898 err = mmc_complete_op_cond(mmc);
2901 err = mmc_startup(mmc);
2909 int mmc_init(struct mmc *mmc)
2912 __maybe_unused ulong start;
2913 #if CONFIG_IS_ENABLED(DM_MMC)
2914 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2921 start = get_timer(0);
2923 if (!mmc->init_in_progress)
2924 err = mmc_start_init(mmc);
2927 err = mmc_complete_init(mmc);
2929 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2934 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2935 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2936 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2937 int mmc_deinit(struct mmc *mmc)
2945 caps_filtered = mmc->card_caps &
2946 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2947 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2948 MMC_CAP(UHS_SDR104));
2950 return sd_select_mode_and_width(mmc, caps_filtered);
2952 caps_filtered = mmc->card_caps &
2953 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2955 return mmc_select_mode_and_width(mmc, caps_filtered);
2960 int mmc_set_dsr(struct mmc *mmc, u16 val)
2966 /* CPU-specific MMC initializations */
2967 __weak int cpu_mmc_init(bd_t *bis)
2972 /* board-specific MMC initializations. */
2973 __weak int board_mmc_init(bd_t *bis)
2978 void mmc_set_preinit(struct mmc *mmc, int preinit)
2980 mmc->preinit = preinit;
2983 #if CONFIG_IS_ENABLED(DM_MMC)
2984 static int mmc_probe(bd_t *bis)
2988 struct udevice *dev;
2990 ret = uclass_get(UCLASS_MMC, &uc);
2995 * Try to add them in sequence order. Really with driver model we
2996 * should allow holes, but the current MMC list does not allow that.
2997 * So if we request 0, 1, 3 we will get 0, 1, 2.
2999 for (i = 0; ; i++) {
3000 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3004 uclass_foreach_dev(dev, uc) {
3005 ret = device_probe(dev);
3007 pr_err("%s - probe failed: %d\n", dev->name, ret);
3013 static int mmc_probe(bd_t *bis)
3015 if (board_mmc_init(bis) < 0)
3022 int mmc_initialize(bd_t *bis)
3024 static int initialized = 0;
3026 if (initialized) /* Avoid initializing mmc multiple times */
3030 #if !CONFIG_IS_ENABLED(BLK)
3031 #if !CONFIG_IS_ENABLED(MMC_TINY)
3035 ret = mmc_probe(bis);
3039 #ifndef CONFIG_SPL_BUILD
3040 print_mmc_devices(',');
3047 #if CONFIG_IS_ENABLED(DM_MMC)
3048 int mmc_init_device(int num)
3050 struct udevice *dev;
3054 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3058 m = mmc_get_mmc_dev(dev);
3061 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3062 mmc_set_preinit(m, 1);
3071 #ifdef CONFIG_CMD_BKOPS_ENABLE
3072 int mmc_set_bkops_enable(struct mmc *mmc)
3075 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3077 err = mmc_send_ext_csd(mmc, ext_csd);
3079 puts("Could not get ext_csd register values\n");
3083 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3084 puts("Background operations not supported on device\n");
3085 return -EMEDIUMTYPE;
3088 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3089 puts("Background operations already enabled\n");
3093 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3095 puts("Failed to enable manual background operations\n");
3099 puts("Enabled manual background operations\n");