1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
7 * Based vaguely on the Linux code
16 #include <dm/device-internal.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
25 #include <linux/list.h>
27 #include "mmc_private.h"
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33 #if !CONFIG_IS_ENABLED(DM_MMC)
35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
40 __weak int board_mmc_getwp(struct mmc *mmc)
45 int mmc_getwp(struct mmc *mmc)
49 wp = board_mmc_getwp(mmc);
52 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
61 __weak int board_mmc_getcd(struct mmc *mmc)
67 #ifdef CONFIG_MMC_TRACE
68 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
70 printf("CMD_SEND:%d\n", cmd->cmdidx);
71 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
74 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
80 printf("\t\tRET\t\t\t %d\n", ret);
82 switch (cmd->resp_type) {
84 printf("\t\tMMC_RSP_NONE\n");
87 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
91 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
95 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
97 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t \t\t 0x%08x \n",
101 printf("\t\t \t\t 0x%08x \n",
104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
110 for (j = 0; j < 4; j++)
111 printf("%02x ", *ptr--);
116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
120 printf("\t\tERROR MMC rsp not supported\n");
126 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
135 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136 const char *mmc_mode_name(enum bus_mode mode)
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
161 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
163 static const int freqs[] = {
164 [MMC_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
176 [MMC_HS_400_ES] = 200000000,
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
187 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
189 mmc->selected_mode = mode;
190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
197 #if !CONFIG_IS_ENABLED(DM_MMC)
198 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
210 int mmc_send_status(struct mmc *mmc, unsigned int *status)
213 int err, retries = 5;
215 cmd.cmdidx = MMC_CMD_SEND_STATUS;
216 cmd.resp_type = MMC_RSP_R1;
217 if (!mmc_host_is_spi(mmc))
218 cmd.cmdarg = mmc->rca << 16;
221 err = mmc_send_cmd(mmc, &cmd, NULL);
223 mmc_trace_state(mmc, &cmd);
224 *status = cmd.response[0];
228 mmc_trace_state(mmc, &cmd);
232 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
237 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
242 err = mmc_send_status(mmc, &status);
246 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
247 (status & MMC_STATUS_CURR_STATE) !=
251 if (status & MMC_STATUS_MASK) {
252 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
253 pr_err("Status Error: 0x%08x\n", status);
258 if (timeout_ms-- <= 0)
264 if (timeout_ms <= 0) {
265 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
266 pr_err("Timeout waiting card ready\n");
274 int mmc_set_blocklen(struct mmc *mmc, int len)
282 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
283 cmd.resp_type = MMC_RSP_R1;
286 err = mmc_send_cmd(mmc, &cmd, NULL);
288 #ifdef CONFIG_MMC_QUIRKS
289 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
292 * It has been seen that SET_BLOCKLEN may fail on the first
293 * attempt, let's try a few more time
296 err = mmc_send_cmd(mmc, &cmd, NULL);
306 #ifdef MMC_SUPPORTS_TUNING
307 static const u8 tuning_blk_pattern_4bit[] = {
308 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
309 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
310 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
311 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
312 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
313 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
314 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
315 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
318 static const u8 tuning_blk_pattern_8bit[] = {
319 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
320 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
321 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
322 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
323 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
324 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
325 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
326 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
327 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
328 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
329 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
330 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
331 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
332 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
333 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
334 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
337 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
340 struct mmc_data data;
341 const u8 *tuning_block_pattern;
344 if (mmc->bus_width == 8) {
345 tuning_block_pattern = tuning_blk_pattern_8bit;
346 size = sizeof(tuning_blk_pattern_8bit);
347 } else if (mmc->bus_width == 4) {
348 tuning_block_pattern = tuning_blk_pattern_4bit;
349 size = sizeof(tuning_blk_pattern_4bit);
354 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
358 cmd.resp_type = MMC_RSP_R1;
360 data.dest = (void *)data_buf;
362 data.blocksize = size;
363 data.flags = MMC_DATA_READ;
365 err = mmc_send_cmd(mmc, &cmd, &data);
369 if (memcmp(data_buf, tuning_block_pattern, size))
376 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
380 struct mmc_data data;
383 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
385 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
387 if (mmc->high_capacity)
390 cmd.cmdarg = start * mmc->read_bl_len;
392 cmd.resp_type = MMC_RSP_R1;
395 data.blocks = blkcnt;
396 data.blocksize = mmc->read_bl_len;
397 data.flags = MMC_DATA_READ;
399 if (mmc_send_cmd(mmc, &cmd, &data))
403 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
405 cmd.resp_type = MMC_RSP_R1b;
406 if (mmc_send_cmd(mmc, &cmd, NULL)) {
407 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
408 pr_err("mmc fail to send stop cmd\n");
417 #if !CONFIG_IS_ENABLED(DM_MMC)
418 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
420 if (mmc->cfg->ops->get_b_max)
421 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
423 return mmc->cfg->b_max;
427 #if CONFIG_IS_ENABLED(BLK)
428 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
430 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
434 #if CONFIG_IS_ENABLED(BLK)
435 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
437 int dev_num = block_dev->devnum;
439 lbaint_t cur, blocks_todo = blkcnt;
445 struct mmc *mmc = find_mmc_device(dev_num);
449 if (CONFIG_IS_ENABLED(MMC_TINY))
450 err = mmc_switch_part(mmc, block_dev->hwpart);
452 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
457 if ((start + blkcnt) > block_dev->lba) {
458 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
459 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
460 start + blkcnt, block_dev->lba);
465 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
466 pr_debug("%s: Failed to set blocklen\n", __func__);
470 b_max = mmc_get_b_max(mmc, dst, blkcnt);
473 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
474 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
475 pr_debug("%s: Failed to read blocks\n", __func__);
480 dst += cur * mmc->read_bl_len;
481 } while (blocks_todo > 0);
486 static int mmc_go_idle(struct mmc *mmc)
493 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
495 cmd.resp_type = MMC_RSP_NONE;
497 err = mmc_send_cmd(mmc, &cmd, NULL);
507 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
508 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
514 * Send CMD11 only if the request is to switch the card to
517 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
518 return mmc_set_signal_voltage(mmc, signal_voltage);
520 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
522 cmd.resp_type = MMC_RSP_R1;
524 err = mmc_send_cmd(mmc, &cmd, NULL);
528 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
532 * The card should drive cmd and dat[0:3] low immediately
533 * after the response of cmd11, but wait 100 us to be sure
535 err = mmc_wait_dat0(mmc, 0, 100);
542 * During a signal voltage level switch, the clock must be gated
543 * for 5 ms according to the SD spec
545 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
547 err = mmc_set_signal_voltage(mmc, signal_voltage);
551 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
553 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
556 * Failure to switch is indicated by the card holding
557 * dat[0:3] low. Wait for at least 1 ms according to spec
559 err = mmc_wait_dat0(mmc, 1, 1000);
569 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
576 cmd.cmdidx = MMC_CMD_APP_CMD;
577 cmd.resp_type = MMC_RSP_R1;
580 err = mmc_send_cmd(mmc, &cmd, NULL);
585 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
586 cmd.resp_type = MMC_RSP_R3;
589 * Most cards do not answer if some reserved bits
590 * in the ocr are set. However, Some controller
591 * can set bit 7 (reserved for low voltages), but
592 * how to manage low voltages SD card is not yet
595 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
596 (mmc->cfg->voltages & 0xff8000);
598 if (mmc->version == SD_VERSION_2)
599 cmd.cmdarg |= OCR_HCS;
602 cmd.cmdarg |= OCR_S18R;
604 err = mmc_send_cmd(mmc, &cmd, NULL);
609 if (cmd.response[0] & OCR_BUSY)
618 if (mmc->version != SD_VERSION_2)
619 mmc->version = SD_VERSION_1_0;
621 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
622 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
623 cmd.resp_type = MMC_RSP_R3;
626 err = mmc_send_cmd(mmc, &cmd, NULL);
632 mmc->ocr = cmd.response[0];
634 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
635 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
637 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
643 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
649 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
654 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
655 cmd.resp_type = MMC_RSP_R3;
657 if (use_arg && !mmc_host_is_spi(mmc))
658 cmd.cmdarg = OCR_HCS |
659 (mmc->cfg->voltages &
660 (mmc->ocr & OCR_VOLTAGE_MASK)) |
661 (mmc->ocr & OCR_ACCESS_MODE);
663 err = mmc_send_cmd(mmc, &cmd, NULL);
666 mmc->ocr = cmd.response[0];
670 static int mmc_send_op_cond(struct mmc *mmc)
676 /* Some cards seem to need this */
679 start = get_timer(0);
680 /* Asking to the card its capabilities */
682 err = mmc_send_op_cond_iter(mmc, i != 0);
686 /* exit if not busy (flag seems to be inverted) */
687 if (mmc->ocr & OCR_BUSY)
690 if (get_timer(start) > timeout)
694 mmc->op_cond_pending = 1;
698 static int mmc_complete_op_cond(struct mmc *mmc)
705 mmc->op_cond_pending = 0;
706 if (!(mmc->ocr & OCR_BUSY)) {
707 /* Some cards seem to need this */
710 start = get_timer(0);
712 err = mmc_send_op_cond_iter(mmc, 1);
715 if (mmc->ocr & OCR_BUSY)
717 if (get_timer(start) > timeout)
723 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
724 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
725 cmd.resp_type = MMC_RSP_R3;
728 err = mmc_send_cmd(mmc, &cmd, NULL);
733 mmc->ocr = cmd.response[0];
736 mmc->version = MMC_VERSION_UNKNOWN;
738 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
745 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
748 struct mmc_data data;
751 /* Get the Card Status Register */
752 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
753 cmd.resp_type = MMC_RSP_R1;
756 data.dest = (char *)ext_csd;
758 data.blocksize = MMC_MAX_BLOCK_LEN;
759 data.flags = MMC_DATA_READ;
761 err = mmc_send_cmd(mmc, &cmd, &data);
766 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
769 unsigned int status, start;
771 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
772 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
773 (index == EXT_CSD_PART_CONF);
777 if (mmc->gen_cmd6_time)
778 timeout_ms = mmc->gen_cmd6_time * 10;
780 if (is_part_switch && mmc->part_switch_time)
781 timeout_ms = mmc->part_switch_time * 10;
783 cmd.cmdidx = MMC_CMD_SWITCH;
784 cmd.resp_type = MMC_RSP_R1b;
785 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
790 ret = mmc_send_cmd(mmc, &cmd, NULL);
791 } while (ret && retries-- > 0);
796 start = get_timer(0);
798 /* poll dat0 for rdy/buys status */
799 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
800 if (ret && ret != -ENOSYS)
804 * In cases when not allowed to poll by using CMD13 or because we aren't
805 * capable of polling by using mmc_wait_dat0, then rely on waiting the
806 * stated timeout to be sufficient.
808 if (ret == -ENOSYS && !send_status) {
813 /* Finally wait until the card is ready or indicates a failure
814 * to switch. It doesn't hurt to use CMD13 here even if send_status
815 * is false, because by now (after 'timeout_ms' ms) the bus should be
819 ret = mmc_send_status(mmc, &status);
821 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
822 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
826 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
829 } while (get_timer(start) < timeout_ms);
834 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
836 return __mmc_switch(mmc, set, index, value, true);
839 int mmc_boot_wp(struct mmc *mmc)
841 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
844 #if !CONFIG_IS_ENABLED(MMC_TINY)
845 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
851 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
857 speed_bits = EXT_CSD_TIMING_HS;
859 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
861 speed_bits = EXT_CSD_TIMING_HS200;
864 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
866 speed_bits = EXT_CSD_TIMING_HS400;
869 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
871 speed_bits = EXT_CSD_TIMING_HS400;
875 speed_bits = EXT_CSD_TIMING_LEGACY;
881 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
882 speed_bits, !hsdowngrade);
886 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
887 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
889 * In case the eMMC is in HS200/HS400 mode and we are downgrading
890 * to HS mode, the card clock are still running much faster than
891 * the supported HS mode clock, so we can not reliably read out
892 * Extended CSD. Reconfigure the controller to run at HS mode.
895 mmc_select_mode(mmc, MMC_HS);
896 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
900 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
901 /* Now check to see that it worked */
902 err = mmc_send_ext_csd(mmc, test_csd);
906 /* No high-speed support */
907 if (!test_csd[EXT_CSD_HS_TIMING])
914 static int mmc_get_capabilities(struct mmc *mmc)
916 u8 *ext_csd = mmc->ext_csd;
919 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
921 if (mmc_host_is_spi(mmc))
924 /* Only version 4 supports high-speed */
925 if (mmc->version < MMC_VERSION_4)
929 pr_err("No ext_csd found!\n"); /* this should enver happen */
933 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
935 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
936 mmc->cardtype = cardtype;
938 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
939 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
940 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
941 mmc->card_caps |= MMC_MODE_HS200;
944 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
945 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
946 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
947 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
948 mmc->card_caps |= MMC_MODE_HS400;
951 if (cardtype & EXT_CSD_CARD_TYPE_52) {
952 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
953 mmc->card_caps |= MMC_MODE_DDR_52MHz;
954 mmc->card_caps |= MMC_MODE_HS_52MHz;
956 if (cardtype & EXT_CSD_CARD_TYPE_26)
957 mmc->card_caps |= MMC_MODE_HS;
959 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
960 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
961 (mmc->card_caps & MMC_MODE_HS400)) {
962 mmc->card_caps |= MMC_MODE_HS400_ES;
970 static int mmc_set_capacity(struct mmc *mmc, int part_num)
974 mmc->capacity = mmc->capacity_user;
978 mmc->capacity = mmc->capacity_boot;
981 mmc->capacity = mmc->capacity_rpmb;
987 mmc->capacity = mmc->capacity_gp[part_num - 4];
993 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
998 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1004 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1006 (mmc->part_config & ~PART_ACCESS_MASK)
1007 | (part_num & PART_ACCESS_MASK));
1008 } while (ret && retry--);
1011 * Set the capacity if the switch succeeded or was intended
1012 * to return to representing the raw device.
1014 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1015 ret = mmc_set_capacity(mmc, part_num);
1016 mmc_get_blk_desc(mmc)->hwpart = part_num;
1022 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1023 int mmc_hwpart_config(struct mmc *mmc,
1024 const struct mmc_hwpart_conf *conf,
1025 enum mmc_hwpart_conf_mode mode)
1030 u32 gp_size_mult[4];
1031 u32 max_enh_size_mult;
1032 u32 tot_enh_size_mult = 0;
1035 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1037 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1040 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1041 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1042 return -EMEDIUMTYPE;
1045 if (!(mmc->part_support & PART_SUPPORT)) {
1046 pr_err("Card does not support partitioning\n");
1047 return -EMEDIUMTYPE;
1050 if (!mmc->hc_wp_grp_size) {
1051 pr_err("Card does not define HC WP group size\n");
1052 return -EMEDIUMTYPE;
1055 /* check partition alignment and total enhanced size */
1056 if (conf->user.enh_size) {
1057 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1058 conf->user.enh_start % mmc->hc_wp_grp_size) {
1059 pr_err("User data enhanced area not HC WP group "
1063 part_attrs |= EXT_CSD_ENH_USR;
1064 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1065 if (mmc->high_capacity) {
1066 enh_start_addr = conf->user.enh_start;
1068 enh_start_addr = (conf->user.enh_start << 9);
1074 tot_enh_size_mult += enh_size_mult;
1076 for (pidx = 0; pidx < 4; pidx++) {
1077 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1078 pr_err("GP%i partition not HC WP group size "
1079 "aligned\n", pidx+1);
1082 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1083 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1084 part_attrs |= EXT_CSD_ENH_GP(pidx);
1085 tot_enh_size_mult += gp_size_mult[pidx];
1089 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1090 pr_err("Card does not support enhanced attribute\n");
1091 return -EMEDIUMTYPE;
1094 err = mmc_send_ext_csd(mmc, ext_csd);
1099 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1100 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1101 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1102 if (tot_enh_size_mult > max_enh_size_mult) {
1103 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1104 tot_enh_size_mult, max_enh_size_mult);
1105 return -EMEDIUMTYPE;
1108 /* The default value of EXT_CSD_WR_REL_SET is device
1109 * dependent, the values can only be changed if the
1110 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1111 * changed only once and before partitioning is completed. */
1112 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1113 if (conf->user.wr_rel_change) {
1114 if (conf->user.wr_rel_set)
1115 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1117 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1119 for (pidx = 0; pidx < 4; pidx++) {
1120 if (conf->gp_part[pidx].wr_rel_change) {
1121 if (conf->gp_part[pidx].wr_rel_set)
1122 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1124 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1128 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1129 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1130 puts("Card does not support host controlled partition write "
1131 "reliability settings\n");
1132 return -EMEDIUMTYPE;
1135 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1136 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1137 pr_err("Card already partitioned\n");
1141 if (mode == MMC_HWPART_CONF_CHECK)
1144 /* Partitioning requires high-capacity size definitions */
1145 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1146 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1147 EXT_CSD_ERASE_GROUP_DEF, 1);
1152 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1154 #if CONFIG_IS_ENABLED(MMC_WRITE)
1155 /* update erase group size to be high-capacity */
1156 mmc->erase_grp_size =
1157 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1162 /* all OK, write the configuration */
1163 for (i = 0; i < 4; i++) {
1164 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1165 EXT_CSD_ENH_START_ADDR+i,
1166 (enh_start_addr >> (i*8)) & 0xFF);
1170 for (i = 0; i < 3; i++) {
1171 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1172 EXT_CSD_ENH_SIZE_MULT+i,
1173 (enh_size_mult >> (i*8)) & 0xFF);
1177 for (pidx = 0; pidx < 4; pidx++) {
1178 for (i = 0; i < 3; i++) {
1179 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1180 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1181 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1186 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1187 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1191 if (mode == MMC_HWPART_CONF_SET)
1194 /* The WR_REL_SET is a write-once register but shall be
1195 * written before setting PART_SETTING_COMPLETED. As it is
1196 * write-once we can only write it when completing the
1198 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1199 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1200 EXT_CSD_WR_REL_SET, wr_rel_set);
1205 /* Setting PART_SETTING_COMPLETED confirms the partition
1206 * configuration but it only becomes effective after power
1207 * cycle, so we do not adjust the partition related settings
1208 * in the mmc struct. */
1210 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1211 EXT_CSD_PARTITION_SETTING,
1212 EXT_CSD_PARTITION_SETTING_COMPLETED);
1220 #if !CONFIG_IS_ENABLED(DM_MMC)
1221 int mmc_getcd(struct mmc *mmc)
1225 cd = board_mmc_getcd(mmc);
1228 if (mmc->cfg->ops->getcd)
1229 cd = mmc->cfg->ops->getcd(mmc);
1238 #if !CONFIG_IS_ENABLED(MMC_TINY)
1239 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1242 struct mmc_data data;
1244 /* Switch the frequency */
1245 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1246 cmd.resp_type = MMC_RSP_R1;
1247 cmd.cmdarg = (mode << 31) | 0xffffff;
1248 cmd.cmdarg &= ~(0xf << (group * 4));
1249 cmd.cmdarg |= value << (group * 4);
1251 data.dest = (char *)resp;
1252 data.blocksize = 64;
1254 data.flags = MMC_DATA_READ;
1256 return mmc_send_cmd(mmc, &cmd, &data);
1259 static int sd_get_capabilities(struct mmc *mmc)
1263 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1264 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1265 struct mmc_data data;
1267 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1271 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1273 if (mmc_host_is_spi(mmc))
1276 /* Read the SCR to find out if this card supports higher speeds */
1277 cmd.cmdidx = MMC_CMD_APP_CMD;
1278 cmd.resp_type = MMC_RSP_R1;
1279 cmd.cmdarg = mmc->rca << 16;
1281 err = mmc_send_cmd(mmc, &cmd, NULL);
1286 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1287 cmd.resp_type = MMC_RSP_R1;
1293 data.dest = (char *)scr;
1296 data.flags = MMC_DATA_READ;
1298 err = mmc_send_cmd(mmc, &cmd, &data);
1307 mmc->scr[0] = __be32_to_cpu(scr[0]);
1308 mmc->scr[1] = __be32_to_cpu(scr[1]);
1310 switch ((mmc->scr[0] >> 24) & 0xf) {
1312 mmc->version = SD_VERSION_1_0;
1315 mmc->version = SD_VERSION_1_10;
1318 mmc->version = SD_VERSION_2;
1319 if ((mmc->scr[0] >> 15) & 0x1)
1320 mmc->version = SD_VERSION_3;
1323 mmc->version = SD_VERSION_1_0;
1327 if (mmc->scr[0] & SD_DATA_4BIT)
1328 mmc->card_caps |= MMC_MODE_4BIT;
1330 /* Version 1.0 doesn't support switching */
1331 if (mmc->version == SD_VERSION_1_0)
1336 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1337 (u8 *)switch_status);
1342 /* The high-speed function is busy. Try again */
1343 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1347 /* If high-speed isn't supported, we return */
1348 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1349 mmc->card_caps |= MMC_CAP(SD_HS);
1351 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1352 /* Version before 3.0 don't support UHS modes */
1353 if (mmc->version < SD_VERSION_3)
1356 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1357 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1358 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1359 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1360 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1361 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1362 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1363 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1364 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1365 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1366 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1372 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1376 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1379 /* SD version 1.00 and 1.01 does not support CMD 6 */
1380 if (mmc->version == SD_VERSION_1_0)
1385 speed = UHS_SDR12_BUS_SPEED;
1388 speed = HIGH_SPEED_BUS_SPEED;
1390 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1392 speed = UHS_SDR12_BUS_SPEED;
1395 speed = UHS_SDR25_BUS_SPEED;
1398 speed = UHS_SDR50_BUS_SPEED;
1401 speed = UHS_DDR50_BUS_SPEED;
1404 speed = UHS_SDR104_BUS_SPEED;
1411 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1415 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1421 static int sd_select_bus_width(struct mmc *mmc, int w)
1426 if ((w != 4) && (w != 1))
1429 cmd.cmdidx = MMC_CMD_APP_CMD;
1430 cmd.resp_type = MMC_RSP_R1;
1431 cmd.cmdarg = mmc->rca << 16;
1433 err = mmc_send_cmd(mmc, &cmd, NULL);
1437 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1438 cmd.resp_type = MMC_RSP_R1;
1443 err = mmc_send_cmd(mmc, &cmd, NULL);
1451 #if CONFIG_IS_ENABLED(MMC_WRITE)
1452 static int sd_read_ssr(struct mmc *mmc)
1454 static const unsigned int sd_au_size[] = {
1455 0, SZ_16K / 512, SZ_32K / 512,
1456 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1457 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1458 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1459 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1464 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1465 struct mmc_data data;
1467 unsigned int au, eo, et, es;
1469 cmd.cmdidx = MMC_CMD_APP_CMD;
1470 cmd.resp_type = MMC_RSP_R1;
1471 cmd.cmdarg = mmc->rca << 16;
1473 err = mmc_send_cmd(mmc, &cmd, NULL);
1474 #ifdef CONFIG_MMC_QUIRKS
1475 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1478 * It has been seen that APP_CMD may fail on the first
1479 * attempt, let's try a few more times
1482 err = mmc_send_cmd(mmc, &cmd, NULL);
1485 } while (retries--);
1491 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1492 cmd.resp_type = MMC_RSP_R1;
1496 data.dest = (char *)ssr;
1497 data.blocksize = 64;
1499 data.flags = MMC_DATA_READ;
1501 err = mmc_send_cmd(mmc, &cmd, &data);
1509 for (i = 0; i < 16; i++)
1510 ssr[i] = be32_to_cpu(ssr[i]);
1512 au = (ssr[2] >> 12) & 0xF;
1513 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1514 mmc->ssr.au = sd_au_size[au];
1515 es = (ssr[3] >> 24) & 0xFF;
1516 es |= (ssr[2] & 0xFF) << 8;
1517 et = (ssr[3] >> 18) & 0x3F;
1519 eo = (ssr[3] >> 16) & 0x3;
1520 mmc->ssr.erase_timeout = (et * 1000) / es;
1521 mmc->ssr.erase_offset = eo * 1000;
1524 pr_debug("Invalid Allocation Unit Size.\n");
1530 /* frequency bases */
1531 /* divided by 10 to be nice to platforms without floating point */
1532 static const int fbase[] = {
1539 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1540 * to platforms without floating point.
1542 static const u8 multipliers[] = {
1561 static inline int bus_width(uint cap)
1563 if (cap == MMC_MODE_8BIT)
1565 if (cap == MMC_MODE_4BIT)
1567 if (cap == MMC_MODE_1BIT)
1569 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1573 #if !CONFIG_IS_ENABLED(DM_MMC)
1574 #ifdef MMC_SUPPORTS_TUNING
1575 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1581 static int mmc_set_ios(struct mmc *mmc)
1585 if (mmc->cfg->ops->set_ios)
1586 ret = mmc->cfg->ops->set_ios(mmc);
1591 static int mmc_host_power_cycle(struct mmc *mmc)
1595 if (mmc->cfg->ops->host_power_cycle)
1596 ret = mmc->cfg->ops->host_power_cycle(mmc);
1602 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1605 if (clock > mmc->cfg->f_max)
1606 clock = mmc->cfg->f_max;
1608 if (clock < mmc->cfg->f_min)
1609 clock = mmc->cfg->f_min;
1613 mmc->clk_disable = disable;
1615 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1617 return mmc_set_ios(mmc);
1620 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1622 mmc->bus_width = width;
1624 return mmc_set_ios(mmc);
1627 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1629 * helper function to display the capabilities in a human
1630 * friendly manner. The capabilities include bus width and
1633 void mmc_dump_capabilities(const char *text, uint caps)
1637 pr_debug("%s: widths [", text);
1638 if (caps & MMC_MODE_8BIT)
1640 if (caps & MMC_MODE_4BIT)
1642 if (caps & MMC_MODE_1BIT)
1644 pr_debug("\b\b] modes [");
1645 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1646 if (MMC_CAP(mode) & caps)
1647 pr_debug("%s, ", mmc_mode_name(mode));
1648 pr_debug("\b\b]\n");
1652 struct mode_width_tuning {
1655 #ifdef MMC_SUPPORTS_TUNING
1660 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1661 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1664 case MMC_SIGNAL_VOLTAGE_000: return 0;
1665 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1666 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1667 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1672 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1676 if (mmc->signal_voltage == signal_voltage)
1679 mmc->signal_voltage = signal_voltage;
1680 err = mmc_set_ios(mmc);
1682 pr_debug("unable to set voltage (err %d)\n", err);
1687 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1693 #if !CONFIG_IS_ENABLED(MMC_TINY)
1694 static const struct mode_width_tuning sd_modes_by_pref[] = {
1695 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1696 #ifdef MMC_SUPPORTS_TUNING
1699 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1700 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1705 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1709 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1713 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1718 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1720 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1723 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1728 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1732 #define for_each_sd_mode_by_pref(caps, mwt) \
1733 for (mwt = sd_modes_by_pref;\
1734 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1736 if (caps & MMC_CAP(mwt->mode))
1738 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1741 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1742 const struct mode_width_tuning *mwt;
1743 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1744 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1746 bool uhs_en = false;
1751 mmc_dump_capabilities("sd card", card_caps);
1752 mmc_dump_capabilities("host", mmc->host_caps);
1755 if (mmc_host_is_spi(mmc)) {
1756 mmc_set_bus_width(mmc, 1);
1757 mmc_select_mode(mmc, MMC_LEGACY);
1758 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1759 #if CONFIG_IS_ENABLED(MMC_WRITE)
1760 err = sd_read_ssr(mmc);
1762 pr_warn("unable to read ssr\n");
1767 /* Restrict card's capabilities by what the host can do */
1768 caps = card_caps & mmc->host_caps;
1773 for_each_sd_mode_by_pref(caps, mwt) {
1776 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1777 if (*w & caps & mwt->widths) {
1778 pr_debug("trying mode %s width %d (at %d MHz)\n",
1779 mmc_mode_name(mwt->mode),
1781 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1783 /* configure the bus width (card + host) */
1784 err = sd_select_bus_width(mmc, bus_width(*w));
1787 mmc_set_bus_width(mmc, bus_width(*w));
1789 /* configure the bus mode (card) */
1790 err = sd_set_card_speed(mmc, mwt->mode);
1794 /* configure the bus mode (host) */
1795 mmc_select_mode(mmc, mwt->mode);
1796 mmc_set_clock(mmc, mmc->tran_speed,
1799 #ifdef MMC_SUPPORTS_TUNING
1800 /* execute tuning if needed */
1801 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1802 err = mmc_execute_tuning(mmc,
1805 pr_debug("tuning failed\n");
1811 #if CONFIG_IS_ENABLED(MMC_WRITE)
1812 err = sd_read_ssr(mmc);
1814 pr_warn("unable to read ssr\n");
1820 /* revert to a safer bus speed */
1821 mmc_select_mode(mmc, MMC_LEGACY);
1822 mmc_set_clock(mmc, mmc->tran_speed,
1828 pr_err("unable to select a mode\n");
1833 * read the compare the part of ext csd that is constant.
1834 * This can be used to check that the transfer is working
1837 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1840 const u8 *ext_csd = mmc->ext_csd;
1841 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1843 if (mmc->version < MMC_VERSION_4)
1846 err = mmc_send_ext_csd(mmc, test_csd);
1850 /* Only compare read only fields */
1851 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1852 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1853 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1854 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1855 ext_csd[EXT_CSD_REV]
1856 == test_csd[EXT_CSD_REV] &&
1857 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1858 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1859 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1860 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1866 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1867 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1868 uint32_t allowed_mask)
1876 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1877 EXT_CSD_CARD_TYPE_HS400_1_8V))
1878 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1879 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1880 EXT_CSD_CARD_TYPE_HS400_1_2V))
1881 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1884 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1885 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1886 MMC_SIGNAL_VOLTAGE_180;
1887 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1888 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1891 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1895 while (card_mask & allowed_mask) {
1896 enum mmc_voltage best_match;
1898 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1899 if (!mmc_set_signal_voltage(mmc, best_match))
1902 allowed_mask &= ~best_match;
1908 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1909 uint32_t allowed_mask)
1915 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1916 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1918 .mode = MMC_HS_400_ES,
1919 .widths = MMC_MODE_8BIT,
1922 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1925 .widths = MMC_MODE_8BIT,
1926 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1929 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1932 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1933 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1938 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1942 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1946 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1950 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1954 #define for_each_mmc_mode_by_pref(caps, mwt) \
1955 for (mwt = mmc_modes_by_pref;\
1956 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1958 if (caps & MMC_CAP(mwt->mode))
1960 static const struct ext_csd_bus_width {
1964 } ext_csd_bus_width[] = {
1965 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1966 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1967 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1968 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1969 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1972 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1973 static int mmc_select_hs400(struct mmc *mmc)
1977 /* Set timing to HS200 for tuning */
1978 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1982 /* configure the bus mode (host) */
1983 mmc_select_mode(mmc, MMC_HS_200);
1984 mmc_set_clock(mmc, mmc->tran_speed, false);
1986 /* execute tuning if needed */
1987 mmc->hs400_tuning = 1;
1988 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1989 mmc->hs400_tuning = 0;
1991 debug("tuning failed\n");
1995 /* Set back to HS */
1996 mmc_set_card_speed(mmc, MMC_HS, true);
1998 err = mmc_hs400_prepare_ddr(mmc);
2002 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2003 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
2007 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2011 mmc_select_mode(mmc, MMC_HS_400);
2012 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2019 static int mmc_select_hs400(struct mmc *mmc)
2025 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2026 #if !CONFIG_IS_ENABLED(DM_MMC)
2027 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2032 static int mmc_select_hs400es(struct mmc *mmc)
2036 err = mmc_set_card_speed(mmc, MMC_HS, true);
2040 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2041 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2042 EXT_CSD_BUS_WIDTH_STROBE);
2044 printf("switch to bus width for hs400 failed\n");
2047 /* TODO: driver strength */
2048 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2052 mmc_select_mode(mmc, MMC_HS_400_ES);
2053 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2057 return mmc_set_enhanced_strobe(mmc);
2060 static int mmc_select_hs400es(struct mmc *mmc)
2066 #define for_each_supported_width(caps, ddr, ecbv) \
2067 for (ecbv = ext_csd_bus_width;\
2068 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2070 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2072 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2075 const struct mode_width_tuning *mwt;
2076 const struct ext_csd_bus_width *ecbw;
2079 mmc_dump_capabilities("mmc", card_caps);
2080 mmc_dump_capabilities("host", mmc->host_caps);
2083 if (mmc_host_is_spi(mmc)) {
2084 mmc_set_bus_width(mmc, 1);
2085 mmc_select_mode(mmc, MMC_LEGACY);
2086 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2090 /* Restrict card's capabilities by what the host can do */
2091 card_caps &= mmc->host_caps;
2093 /* Only version 4 of MMC supports wider bus widths */
2094 if (mmc->version < MMC_VERSION_4)
2097 if (!mmc->ext_csd) {
2098 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2102 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2103 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2105 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2106 * before doing anything else, since a transition from either of
2107 * the HS200/HS400 mode directly to legacy mode is not supported.
2109 if (mmc->selected_mode == MMC_HS_200 ||
2110 mmc->selected_mode == MMC_HS_400)
2111 mmc_set_card_speed(mmc, MMC_HS, true);
2114 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2116 for_each_mmc_mode_by_pref(card_caps, mwt) {
2117 for_each_supported_width(card_caps & mwt->widths,
2118 mmc_is_mode_ddr(mwt->mode), ecbw) {
2119 enum mmc_voltage old_voltage;
2120 pr_debug("trying mode %s width %d (at %d MHz)\n",
2121 mmc_mode_name(mwt->mode),
2122 bus_width(ecbw->cap),
2123 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2124 old_voltage = mmc->signal_voltage;
2125 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2126 MMC_ALL_SIGNAL_VOLTAGE);
2130 /* configure the bus width (card + host) */
2131 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2133 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2136 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2138 if (mwt->mode == MMC_HS_400) {
2139 err = mmc_select_hs400(mmc);
2141 printf("Select HS400 failed %d\n", err);
2144 } else if (mwt->mode == MMC_HS_400_ES) {
2145 err = mmc_select_hs400es(mmc);
2147 printf("Select HS400ES failed %d\n",
2152 /* configure the bus speed (card) */
2153 err = mmc_set_card_speed(mmc, mwt->mode, false);
2158 * configure the bus width AND the ddr mode
2159 * (card). The host side will be taken care
2160 * of in the next step
2162 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2163 err = mmc_switch(mmc,
2164 EXT_CSD_CMD_SET_NORMAL,
2166 ecbw->ext_csd_bits);
2171 /* configure the bus mode (host) */
2172 mmc_select_mode(mmc, mwt->mode);
2173 mmc_set_clock(mmc, mmc->tran_speed,
2175 #ifdef MMC_SUPPORTS_TUNING
2177 /* execute tuning if needed */
2179 err = mmc_execute_tuning(mmc,
2182 pr_debug("tuning failed\n");
2189 /* do a transfer to check the configuration */
2190 err = mmc_read_and_compare_ext_csd(mmc);
2194 mmc_set_signal_voltage(mmc, old_voltage);
2195 /* if an error occurred, revert to a safer bus mode */
2196 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2197 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2198 mmc_select_mode(mmc, MMC_LEGACY);
2199 mmc_set_bus_width(mmc, 1);
2203 pr_err("unable to select a mode\n");
2209 #if CONFIG_IS_ENABLED(MMC_TINY)
2210 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2213 static int mmc_startup_v4(struct mmc *mmc)
2217 bool has_parts = false;
2218 bool part_completed;
2219 static const u32 mmc_versions[] = {
2231 #if CONFIG_IS_ENABLED(MMC_TINY)
2232 u8 *ext_csd = ext_csd_bkup;
2234 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2238 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2240 err = mmc_send_ext_csd(mmc, ext_csd);
2244 /* store the ext csd for future reference */
2246 mmc->ext_csd = ext_csd;
2248 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2250 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2253 /* check ext_csd version and capacity */
2254 err = mmc_send_ext_csd(mmc, ext_csd);
2258 /* store the ext csd for future reference */
2260 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2263 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2265 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2268 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2270 if (mmc->version >= MMC_VERSION_4_2) {
2272 * According to the JEDEC Standard, the value of
2273 * ext_csd's capacity is valid if the value is more
2276 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2277 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2278 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2279 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2280 capacity *= MMC_MAX_BLOCK_LEN;
2281 if ((capacity >> 20) > 2 * 1024)
2282 mmc->capacity_user = capacity;
2285 if (mmc->version >= MMC_VERSION_4_5)
2286 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2288 /* The partition data may be non-zero but it is only
2289 * effective if PARTITION_SETTING_COMPLETED is set in
2290 * EXT_CSD, so ignore any data if this bit is not set,
2291 * except for enabling the high-capacity group size
2292 * definition (see below).
2294 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2295 EXT_CSD_PARTITION_SETTING_COMPLETED);
2297 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2298 /* Some eMMC set the value too low so set a minimum */
2299 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2300 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2302 /* store the partition info of emmc */
2303 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2304 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2305 ext_csd[EXT_CSD_BOOT_MULT])
2306 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2307 if (part_completed &&
2308 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2309 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2311 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2313 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2315 for (i = 0; i < 4; i++) {
2316 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2317 uint mult = (ext_csd[idx + 2] << 16) +
2318 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2321 if (!part_completed)
2323 mmc->capacity_gp[i] = mult;
2324 mmc->capacity_gp[i] *=
2325 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2326 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2327 mmc->capacity_gp[i] <<= 19;
2330 #ifndef CONFIG_SPL_BUILD
2331 if (part_completed) {
2332 mmc->enh_user_size =
2333 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2334 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2335 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2336 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2337 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2338 mmc->enh_user_size <<= 19;
2339 mmc->enh_user_start =
2340 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2341 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2342 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2343 ext_csd[EXT_CSD_ENH_START_ADDR];
2344 if (mmc->high_capacity)
2345 mmc->enh_user_start <<= 9;
2350 * Host needs to enable ERASE_GRP_DEF bit if device is
2351 * partitioned. This bit will be lost every time after a reset
2352 * or power off. This will affect erase size.
2356 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2357 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2360 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2361 EXT_CSD_ERASE_GROUP_DEF, 1);
2366 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2369 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2370 #if CONFIG_IS_ENABLED(MMC_WRITE)
2371 /* Read out group size from ext_csd */
2372 mmc->erase_grp_size =
2373 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2376 * if high capacity and partition setting completed
2377 * SEC_COUNT is valid even if it is smaller than 2 GiB
2378 * JEDEC Standard JESD84-B45, 6.2.4
2380 if (mmc->high_capacity && part_completed) {
2381 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2382 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2383 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2384 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2385 capacity *= MMC_MAX_BLOCK_LEN;
2386 mmc->capacity_user = capacity;
2389 #if CONFIG_IS_ENABLED(MMC_WRITE)
2391 /* Calculate the group size from the csd value. */
2392 int erase_gsz, erase_gmul;
2394 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2395 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2396 mmc->erase_grp_size = (erase_gsz + 1)
2400 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2401 mmc->hc_wp_grp_size = 1024
2402 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2403 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2406 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2411 #if !CONFIG_IS_ENABLED(MMC_TINY)
2414 mmc->ext_csd = NULL;
2419 static int mmc_startup(struct mmc *mmc)
2425 struct blk_desc *bdesc;
2427 #ifdef CONFIG_MMC_SPI_CRC_ON
2428 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2429 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2430 cmd.resp_type = MMC_RSP_R1;
2432 err = mmc_send_cmd(mmc, &cmd, NULL);
2438 /* Put the Card in Identify Mode */
2439 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2440 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2441 cmd.resp_type = MMC_RSP_R2;
2444 err = mmc_send_cmd(mmc, &cmd, NULL);
2446 #ifdef CONFIG_MMC_QUIRKS
2447 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2450 * It has been seen that SEND_CID may fail on the first
2451 * attempt, let's try a few more time
2454 err = mmc_send_cmd(mmc, &cmd, NULL);
2457 } while (retries--);
2464 memcpy(mmc->cid, cmd.response, 16);
2467 * For MMC cards, set the Relative Address.
2468 * For SD cards, get the Relatvie Address.
2469 * This also puts the cards into Standby State
2471 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2472 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2473 cmd.cmdarg = mmc->rca << 16;
2474 cmd.resp_type = MMC_RSP_R6;
2476 err = mmc_send_cmd(mmc, &cmd, NULL);
2482 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2485 /* Get the Card-Specific Data */
2486 cmd.cmdidx = MMC_CMD_SEND_CSD;
2487 cmd.resp_type = MMC_RSP_R2;
2488 cmd.cmdarg = mmc->rca << 16;
2490 err = mmc_send_cmd(mmc, &cmd, NULL);
2495 mmc->csd[0] = cmd.response[0];
2496 mmc->csd[1] = cmd.response[1];
2497 mmc->csd[2] = cmd.response[2];
2498 mmc->csd[3] = cmd.response[3];
2500 if (mmc->version == MMC_VERSION_UNKNOWN) {
2501 int version = (cmd.response[0] >> 26) & 0xf;
2505 mmc->version = MMC_VERSION_1_2;
2508 mmc->version = MMC_VERSION_1_4;
2511 mmc->version = MMC_VERSION_2_2;
2514 mmc->version = MMC_VERSION_3;
2517 mmc->version = MMC_VERSION_4;
2520 mmc->version = MMC_VERSION_1_2;
2525 /* divide frequency by 10, since the mults are 10x bigger */
2526 freq = fbase[(cmd.response[0] & 0x7)];
2527 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2529 mmc->legacy_speed = freq * mult;
2530 mmc_select_mode(mmc, MMC_LEGACY);
2532 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2533 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2534 #if CONFIG_IS_ENABLED(MMC_WRITE)
2537 mmc->write_bl_len = mmc->read_bl_len;
2539 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2542 if (mmc->high_capacity) {
2543 csize = (mmc->csd[1] & 0x3f) << 16
2544 | (mmc->csd[2] & 0xffff0000) >> 16;
2547 csize = (mmc->csd[1] & 0x3ff) << 2
2548 | (mmc->csd[2] & 0xc0000000) >> 30;
2549 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2552 mmc->capacity_user = (csize + 1) << (cmult + 2);
2553 mmc->capacity_user *= mmc->read_bl_len;
2554 mmc->capacity_boot = 0;
2555 mmc->capacity_rpmb = 0;
2556 for (i = 0; i < 4; i++)
2557 mmc->capacity_gp[i] = 0;
2559 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2560 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2562 #if CONFIG_IS_ENABLED(MMC_WRITE)
2563 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2564 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2567 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2568 cmd.cmdidx = MMC_CMD_SET_DSR;
2569 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2570 cmd.resp_type = MMC_RSP_NONE;
2571 if (mmc_send_cmd(mmc, &cmd, NULL))
2572 pr_warn("MMC: SET_DSR failed\n");
2575 /* Select the card, and put it into Transfer Mode */
2576 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2577 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2578 cmd.resp_type = MMC_RSP_R1;
2579 cmd.cmdarg = mmc->rca << 16;
2580 err = mmc_send_cmd(mmc, &cmd, NULL);
2587 * For SD, its erase group is always one sector
2589 #if CONFIG_IS_ENABLED(MMC_WRITE)
2590 mmc->erase_grp_size = 1;
2592 mmc->part_config = MMCPART_NOAVAILABLE;
2594 err = mmc_startup_v4(mmc);
2598 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2602 #if CONFIG_IS_ENABLED(MMC_TINY)
2603 mmc_set_clock(mmc, mmc->legacy_speed, false);
2604 mmc_select_mode(mmc, MMC_LEGACY);
2605 mmc_set_bus_width(mmc, 1);
2608 err = sd_get_capabilities(mmc);
2611 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2613 err = mmc_get_capabilities(mmc);
2616 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2622 mmc->best_mode = mmc->selected_mode;
2624 /* Fix the block length for DDR mode */
2625 if (mmc->ddr_mode) {
2626 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2627 #if CONFIG_IS_ENABLED(MMC_WRITE)
2628 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2632 /* fill in device description */
2633 bdesc = mmc_get_blk_desc(mmc);
2637 bdesc->blksz = mmc->read_bl_len;
2638 bdesc->log2blksz = LOG2(bdesc->blksz);
2639 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2640 #if !defined(CONFIG_SPL_BUILD) || \
2641 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2642 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2643 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2644 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2645 (mmc->cid[3] >> 16) & 0xffff);
2646 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2647 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2648 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2649 (mmc->cid[2] >> 24) & 0xff);
2650 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2651 (mmc->cid[2] >> 16) & 0xf);
2653 bdesc->vendor[0] = 0;
2654 bdesc->product[0] = 0;
2655 bdesc->revision[0] = 0;
2658 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2665 static int mmc_send_if_cond(struct mmc *mmc)
2670 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2671 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2672 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2673 cmd.resp_type = MMC_RSP_R7;
2675 err = mmc_send_cmd(mmc, &cmd, NULL);
2680 if ((cmd.response[0] & 0xff) != 0xaa)
2683 mmc->version = SD_VERSION_2;
2688 #if !CONFIG_IS_ENABLED(DM_MMC)
2689 /* board-specific MMC power initializations. */
2690 __weak void board_mmc_power_init(void)
2695 static int mmc_power_init(struct mmc *mmc)
2697 #if CONFIG_IS_ENABLED(DM_MMC)
2698 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2701 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2704 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2706 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2707 &mmc->vqmmc_supply);
2709 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2711 #else /* !CONFIG_DM_MMC */
2713 * Driver model should use a regulator, as above, rather than calling
2714 * out to board code.
2716 board_mmc_power_init();
2722 * put the host in the initial state:
2723 * - turn on Vdd (card power supply)
2724 * - configure the bus width and clock to minimal values
2726 static void mmc_set_initial_state(struct mmc *mmc)
2730 /* First try to set 3.3V. If it fails set to 1.8V */
2731 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2733 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2735 pr_warn("mmc: failed to set signal voltage\n");
2737 mmc_select_mode(mmc, MMC_LEGACY);
2738 mmc_set_bus_width(mmc, 1);
2739 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2742 static int mmc_power_on(struct mmc *mmc)
2744 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2745 if (mmc->vmmc_supply) {
2746 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2749 puts("Error enabling VMMC supply\n");
2757 static int mmc_power_off(struct mmc *mmc)
2759 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2760 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2761 if (mmc->vmmc_supply) {
2762 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2765 pr_debug("Error disabling VMMC supply\n");
2773 static int mmc_power_cycle(struct mmc *mmc)
2777 ret = mmc_power_off(mmc);
2781 ret = mmc_host_power_cycle(mmc);
2786 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2787 * to be on the safer side.
2790 return mmc_power_on(mmc);
2793 int mmc_get_op_cond(struct mmc *mmc)
2795 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2801 err = mmc_power_init(mmc);
2805 #ifdef CONFIG_MMC_QUIRKS
2806 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2807 MMC_QUIRK_RETRY_SEND_CID |
2808 MMC_QUIRK_RETRY_APP_CMD;
2811 err = mmc_power_cycle(mmc);
2814 * if power cycling is not supported, we should not try
2815 * to use the UHS modes, because we wouldn't be able to
2816 * recover from an error during the UHS initialization.
2818 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2820 mmc->host_caps &= ~UHS_CAPS;
2821 err = mmc_power_on(mmc);
2826 #if CONFIG_IS_ENABLED(DM_MMC)
2828 * Re-initialization is needed to clear old configuration for
2831 err = mmc_reinit(mmc);
2833 /* made sure it's not NULL earlier */
2834 err = mmc->cfg->ops->init(mmc);
2841 mmc_set_initial_state(mmc);
2843 /* Reset the Card */
2844 err = mmc_go_idle(mmc);
2849 /* The internal partition reset to user partition(0) at every CMD0 */
2850 mmc_get_blk_desc(mmc)->hwpart = 0;
2852 /* Test for SD version 2 */
2853 err = mmc_send_if_cond(mmc);
2855 /* Now try to get the SD card's operating condition */
2856 err = sd_send_op_cond(mmc, uhs_en);
2857 if (err && uhs_en) {
2859 mmc_power_cycle(mmc);
2863 /* If the command timed out, we check for an MMC card */
2864 if (err == -ETIMEDOUT) {
2865 err = mmc_send_op_cond(mmc);
2868 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2869 pr_err("Card did not respond to voltage select!\n");
2878 int mmc_start_init(struct mmc *mmc)
2884 * all hosts are capable of 1 bit bus-width and able to use the legacy
2887 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2888 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2889 #if CONFIG_IS_ENABLED(DM_MMC)
2890 mmc_deferred_probe(mmc);
2892 #if !defined(CONFIG_MMC_BROKEN_CD)
2893 no_card = mmc_getcd(mmc) == 0;
2897 #if !CONFIG_IS_ENABLED(DM_MMC)
2898 /* we pretend there's no card when init is NULL */
2899 no_card = no_card || (mmc->cfg->ops->init == NULL);
2903 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2904 pr_err("MMC: no card present\n");
2909 err = mmc_get_op_cond(mmc);
2912 mmc->init_in_progress = 1;
2917 static int mmc_complete_init(struct mmc *mmc)
2921 mmc->init_in_progress = 0;
2922 if (mmc->op_cond_pending)
2923 err = mmc_complete_op_cond(mmc);
2926 err = mmc_startup(mmc);
2934 int mmc_init(struct mmc *mmc)
2937 __maybe_unused ulong start;
2938 #if CONFIG_IS_ENABLED(DM_MMC)
2939 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2946 start = get_timer(0);
2948 if (!mmc->init_in_progress)
2949 err = mmc_start_init(mmc);
2952 err = mmc_complete_init(mmc);
2954 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2959 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2960 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2961 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2962 int mmc_deinit(struct mmc *mmc)
2970 caps_filtered = mmc->card_caps &
2971 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2972 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2973 MMC_CAP(UHS_SDR104));
2975 return sd_select_mode_and_width(mmc, caps_filtered);
2977 caps_filtered = mmc->card_caps &
2978 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2980 return mmc_select_mode_and_width(mmc, caps_filtered);
2985 int mmc_set_dsr(struct mmc *mmc, u16 val)
2991 /* CPU-specific MMC initializations */
2992 __weak int cpu_mmc_init(struct bd_info *bis)
2997 /* board-specific MMC initializations. */
2998 __weak int board_mmc_init(struct bd_info *bis)
3003 void mmc_set_preinit(struct mmc *mmc, int preinit)
3005 mmc->preinit = preinit;
3008 #if CONFIG_IS_ENABLED(DM_MMC)
3009 static int mmc_probe(struct bd_info *bis)
3013 struct udevice *dev;
3015 ret = uclass_get(UCLASS_MMC, &uc);
3020 * Try to add them in sequence order. Really with driver model we
3021 * should allow holes, but the current MMC list does not allow that.
3022 * So if we request 0, 1, 3 we will get 0, 1, 2.
3024 for (i = 0; ; i++) {
3025 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3029 uclass_foreach_dev(dev, uc) {
3030 ret = device_probe(dev);
3032 pr_err("%s - probe failed: %d\n", dev->name, ret);
3038 static int mmc_probe(struct bd_info *bis)
3040 if (board_mmc_init(bis) < 0)
3047 int mmc_initialize(struct bd_info *bis)
3049 static int initialized = 0;
3051 if (initialized) /* Avoid initializing mmc multiple times */
3055 #if !CONFIG_IS_ENABLED(BLK)
3056 #if !CONFIG_IS_ENABLED(MMC_TINY)
3060 ret = mmc_probe(bis);
3064 #ifndef CONFIG_SPL_BUILD
3065 print_mmc_devices(',');
3072 #if CONFIG_IS_ENABLED(DM_MMC)
3073 int mmc_init_device(int num)
3075 struct udevice *dev;
3079 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3083 m = mmc_get_mmc_dev(dev);
3093 #ifdef CONFIG_CMD_BKOPS_ENABLE
3094 int mmc_set_bkops_enable(struct mmc *mmc)
3097 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3099 err = mmc_send_ext_csd(mmc, ext_csd);
3101 puts("Could not get ext_csd register values\n");
3105 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3106 puts("Background operations not supported on device\n");
3107 return -EMEDIUMTYPE;
3110 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3111 puts("Background operations already enabled\n");
3115 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3117 puts("Failed to enable manual background operations\n");
3121 puts("Enabled manual background operations\n");
3127 __weak int mmc_get_env_dev(void)
3129 #ifdef CONFIG_SYS_MMC_ENV_DEV
3130 return CONFIG_SYS_MMC_ENV_DEV;