1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
30 #if !CONFIG_IS_ENABLED(DM_MMC)
32 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
37 __weak int board_mmc_getwp(struct mmc *mmc)
42 int mmc_getwp(struct mmc *mmc)
46 wp = board_mmc_getwp(mmc);
49 if (mmc->cfg->ops->getwp)
50 wp = mmc->cfg->ops->getwp(mmc);
58 __weak int board_mmc_getcd(struct mmc *mmc)
64 #ifdef CONFIG_MMC_TRACE
65 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
67 printf("CMD_SEND:%d\n", cmd->cmdidx);
68 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
71 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
77 printf("\t\tRET\t\t\t %d\n", ret);
79 switch (cmd->resp_type) {
81 printf("\t\tMMC_RSP_NONE\n");
84 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
88 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
92 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
94 printf("\t\t \t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
101 printf("\t\t\t\t\tDUMPING DATA\n");
102 for (i = 0; i < 4; i++) {
104 printf("\t\t\t\t\t%03d - ", i*4);
105 ptr = (u8 *)&cmd->response[i];
107 for (j = 0; j < 4; j++)
108 printf("%02x ", *ptr--);
113 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
117 printf("\t\tERROR MMC rsp not supported\n");
123 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
127 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
128 printf("CURR STATE:%d\n", status);
132 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
133 const char *mmc_mode_name(enum bus_mode mode)
135 static const char *const names[] = {
136 [MMC_LEGACY] = "MMC legacy",
137 [SD_LEGACY] = "SD Legacy",
138 [MMC_HS] = "MMC High Speed (26MHz)",
139 [SD_HS] = "SD High Speed (50MHz)",
140 [UHS_SDR12] = "UHS SDR12 (25MHz)",
141 [UHS_SDR25] = "UHS SDR25 (50MHz)",
142 [UHS_SDR50] = "UHS SDR50 (100MHz)",
143 [UHS_SDR104] = "UHS SDR104 (208MHz)",
144 [UHS_DDR50] = "UHS DDR50 (50MHz)",
145 [MMC_HS_52] = "MMC High Speed (52MHz)",
146 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
147 [MMC_HS_200] = "HS200 (200MHz)",
148 [MMC_HS_400] = "HS400 (200MHz)",
151 if (mode >= MMC_MODES_END)
152 return "Unknown mode";
158 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
160 static const int freqs[] = {
161 [MMC_LEGACY] = 25000000,
162 [SD_LEGACY] = 25000000,
165 [MMC_HS_52] = 52000000,
166 [MMC_DDR_52] = 52000000,
167 [UHS_SDR12] = 25000000,
168 [UHS_SDR25] = 50000000,
169 [UHS_SDR50] = 100000000,
170 [UHS_DDR50] = 50000000,
171 [UHS_SDR104] = 208000000,
172 [MMC_HS_200] = 200000000,
173 [MMC_HS_400] = 200000000,
176 if (mode == MMC_LEGACY)
177 return mmc->legacy_speed;
178 else if (mode >= MMC_MODES_END)
184 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
186 mmc->selected_mode = mode;
187 mmc->tran_speed = mmc_mode2freq(mmc, mode);
188 mmc->ddr_mode = mmc_is_mode_ddr(mode);
189 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
190 mmc->tran_speed / 1000000);
194 #if !CONFIG_IS_ENABLED(DM_MMC)
195 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199 mmmc_trace_before_send(mmc, cmd);
200 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
201 mmmc_trace_after_send(mmc, cmd, ret);
207 int mmc_send_status(struct mmc *mmc, unsigned int *status)
210 int err, retries = 5;
212 cmd.cmdidx = MMC_CMD_SEND_STATUS;
213 cmd.resp_type = MMC_RSP_R1;
214 if (!mmc_host_is_spi(mmc))
215 cmd.cmdarg = mmc->rca << 16;
218 err = mmc_send_cmd(mmc, &cmd, NULL);
220 mmc_trace_state(mmc, &cmd);
221 *status = cmd.response[0];
225 mmc_trace_state(mmc, &cmd);
229 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
234 err = mmc_wait_dat0(mmc, 1, timeout);
239 err = mmc_send_status(mmc, &status);
243 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
244 (status & MMC_STATUS_CURR_STATE) !=
248 if (status & MMC_STATUS_MASK) {
249 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
250 pr_err("Status Error: 0x%08x\n", status);
262 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
263 pr_err("Timeout waiting card ready\n");
271 int mmc_set_blocklen(struct mmc *mmc, int len)
279 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
280 cmd.resp_type = MMC_RSP_R1;
283 err = mmc_send_cmd(mmc, &cmd, NULL);
285 #ifdef CONFIG_MMC_QUIRKS
286 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
289 * It has been seen that SET_BLOCKLEN may fail on the first
290 * attempt, let's try a few more time
293 err = mmc_send_cmd(mmc, &cmd, NULL);
303 #ifdef MMC_SUPPORTS_TUNING
304 static const u8 tuning_blk_pattern_4bit[] = {
305 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
306 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
307 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
308 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
309 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
310 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
311 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
312 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
315 static const u8 tuning_blk_pattern_8bit[] = {
316 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
317 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
318 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
319 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
320 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
321 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
322 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
323 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
324 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
325 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
326 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
327 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
328 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
329 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
330 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
331 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
334 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
337 struct mmc_data data;
338 const u8 *tuning_block_pattern;
341 if (mmc->bus_width == 8) {
342 tuning_block_pattern = tuning_blk_pattern_8bit;
343 size = sizeof(tuning_blk_pattern_8bit);
344 } else if (mmc->bus_width == 4) {
345 tuning_block_pattern = tuning_blk_pattern_4bit;
346 size = sizeof(tuning_blk_pattern_4bit);
351 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
355 cmd.resp_type = MMC_RSP_R1;
357 data.dest = (void *)data_buf;
359 data.blocksize = size;
360 data.flags = MMC_DATA_READ;
362 err = mmc_send_cmd(mmc, &cmd, &data);
366 if (memcmp(data_buf, tuning_block_pattern, size))
373 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
377 struct mmc_data data;
380 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
382 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
384 if (mmc->high_capacity)
387 cmd.cmdarg = start * mmc->read_bl_len;
389 cmd.resp_type = MMC_RSP_R1;
392 data.blocks = blkcnt;
393 data.blocksize = mmc->read_bl_len;
394 data.flags = MMC_DATA_READ;
396 if (mmc_send_cmd(mmc, &cmd, &data))
400 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
402 cmd.resp_type = MMC_RSP_R1b;
403 if (mmc_send_cmd(mmc, &cmd, NULL)) {
404 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
405 pr_err("mmc fail to send stop cmd\n");
414 #if CONFIG_IS_ENABLED(BLK)
415 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
417 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
421 #if CONFIG_IS_ENABLED(BLK)
422 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
424 int dev_num = block_dev->devnum;
426 lbaint_t cur, blocks_todo = blkcnt;
431 struct mmc *mmc = find_mmc_device(dev_num);
435 if (CONFIG_IS_ENABLED(MMC_TINY))
436 err = mmc_switch_part(mmc, block_dev->hwpart);
438 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
443 if ((start + blkcnt) > block_dev->lba) {
444 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
445 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
446 start + blkcnt, block_dev->lba);
451 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
452 pr_debug("%s: Failed to set blocklen\n", __func__);
457 cur = (blocks_todo > mmc->cfg->b_max) ?
458 mmc->cfg->b_max : blocks_todo;
459 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
460 pr_debug("%s: Failed to read blocks\n", __func__);
465 dst += cur * mmc->read_bl_len;
466 } while (blocks_todo > 0);
471 static int mmc_go_idle(struct mmc *mmc)
478 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
480 cmd.resp_type = MMC_RSP_NONE;
482 err = mmc_send_cmd(mmc, &cmd, NULL);
492 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
493 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
499 * Send CMD11 only if the request is to switch the card to
502 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
503 return mmc_set_signal_voltage(mmc, signal_voltage);
505 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
507 cmd.resp_type = MMC_RSP_R1;
509 err = mmc_send_cmd(mmc, &cmd, NULL);
513 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
517 * The card should drive cmd and dat[0:3] low immediately
518 * after the response of cmd11, but wait 100 us to be sure
520 err = mmc_wait_dat0(mmc, 0, 100);
527 * During a signal voltage level switch, the clock must be gated
528 * for 5 ms according to the SD spec
530 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
532 err = mmc_set_signal_voltage(mmc, signal_voltage);
536 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
538 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
541 * Failure to switch is indicated by the card holding
542 * dat[0:3] low. Wait for at least 1 ms according to spec
544 err = mmc_wait_dat0(mmc, 1, 1000);
554 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
561 cmd.cmdidx = MMC_CMD_APP_CMD;
562 cmd.resp_type = MMC_RSP_R1;
565 err = mmc_send_cmd(mmc, &cmd, NULL);
570 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
571 cmd.resp_type = MMC_RSP_R3;
574 * Most cards do not answer if some reserved bits
575 * in the ocr are set. However, Some controller
576 * can set bit 7 (reserved for low voltages), but
577 * how to manage low voltages SD card is not yet
580 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
581 (mmc->cfg->voltages & 0xff8000);
583 if (mmc->version == SD_VERSION_2)
584 cmd.cmdarg |= OCR_HCS;
587 cmd.cmdarg |= OCR_S18R;
589 err = mmc_send_cmd(mmc, &cmd, NULL);
594 if (cmd.response[0] & OCR_BUSY)
603 if (mmc->version != SD_VERSION_2)
604 mmc->version = SD_VERSION_1_0;
606 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
607 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
608 cmd.resp_type = MMC_RSP_R3;
611 err = mmc_send_cmd(mmc, &cmd, NULL);
617 mmc->ocr = cmd.response[0];
619 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
620 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
622 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
628 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
634 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
639 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
640 cmd.resp_type = MMC_RSP_R3;
642 if (use_arg && !mmc_host_is_spi(mmc))
643 cmd.cmdarg = OCR_HCS |
644 (mmc->cfg->voltages &
645 (mmc->ocr & OCR_VOLTAGE_MASK)) |
646 (mmc->ocr & OCR_ACCESS_MODE);
648 err = mmc_send_cmd(mmc, &cmd, NULL);
651 mmc->ocr = cmd.response[0];
655 static int mmc_send_op_cond(struct mmc *mmc)
659 /* Some cards seem to need this */
662 /* Asking to the card its capabilities */
663 for (i = 0; i < 2; i++) {
664 err = mmc_send_op_cond_iter(mmc, i != 0);
668 /* exit if not busy (flag seems to be inverted) */
669 if (mmc->ocr & OCR_BUSY)
672 mmc->op_cond_pending = 1;
676 static int mmc_complete_op_cond(struct mmc *mmc)
683 mmc->op_cond_pending = 0;
684 if (!(mmc->ocr & OCR_BUSY)) {
685 /* Some cards seem to need this */
688 start = get_timer(0);
690 err = mmc_send_op_cond_iter(mmc, 1);
693 if (mmc->ocr & OCR_BUSY)
695 if (get_timer(start) > timeout)
701 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
702 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
703 cmd.resp_type = MMC_RSP_R3;
706 err = mmc_send_cmd(mmc, &cmd, NULL);
711 mmc->ocr = cmd.response[0];
714 mmc->version = MMC_VERSION_UNKNOWN;
716 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
723 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
726 struct mmc_data data;
729 /* Get the Card Status Register */
730 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
731 cmd.resp_type = MMC_RSP_R1;
734 data.dest = (char *)ext_csd;
736 data.blocksize = MMC_MAX_BLOCK_LEN;
737 data.flags = MMC_DATA_READ;
739 err = mmc_send_cmd(mmc, &cmd, &data);
744 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
752 cmd.cmdidx = MMC_CMD_SWITCH;
753 cmd.resp_type = MMC_RSP_R1b;
754 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
758 while (retries > 0) {
759 ret = mmc_send_cmd(mmc, &cmd, NULL);
771 /* Waiting for the ready status */
772 return mmc_poll_for_busy(mmc, timeout);
779 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
781 return __mmc_switch(mmc, set, index, value, true);
784 #if !CONFIG_IS_ENABLED(MMC_TINY)
785 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
791 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
797 speed_bits = EXT_CSD_TIMING_HS;
799 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
801 speed_bits = EXT_CSD_TIMING_HS200;
804 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
806 speed_bits = EXT_CSD_TIMING_HS400;
810 speed_bits = EXT_CSD_TIMING_LEGACY;
816 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
817 speed_bits, !hsdowngrade);
821 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
822 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
824 * In case the eMMC is in HS200/HS400 mode and we are downgrading
825 * to HS mode, the card clock are still running much faster than
826 * the supported HS mode clock, so we can not reliably read out
827 * Extended CSD. Reconfigure the controller to run at HS mode.
830 mmc_select_mode(mmc, MMC_HS);
831 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
835 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
836 /* Now check to see that it worked */
837 err = mmc_send_ext_csd(mmc, test_csd);
841 /* No high-speed support */
842 if (!test_csd[EXT_CSD_HS_TIMING])
849 static int mmc_get_capabilities(struct mmc *mmc)
851 u8 *ext_csd = mmc->ext_csd;
854 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
856 if (mmc_host_is_spi(mmc))
859 /* Only version 4 supports high-speed */
860 if (mmc->version < MMC_VERSION_4)
864 pr_err("No ext_csd found!\n"); /* this should enver happen */
868 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
870 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
871 mmc->cardtype = cardtype;
873 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
874 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
875 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
876 mmc->card_caps |= MMC_MODE_HS200;
879 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
880 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
881 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
882 mmc->card_caps |= MMC_MODE_HS400;
885 if (cardtype & EXT_CSD_CARD_TYPE_52) {
886 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
887 mmc->card_caps |= MMC_MODE_DDR_52MHz;
888 mmc->card_caps |= MMC_MODE_HS_52MHz;
890 if (cardtype & EXT_CSD_CARD_TYPE_26)
891 mmc->card_caps |= MMC_MODE_HS;
897 static int mmc_set_capacity(struct mmc *mmc, int part_num)
901 mmc->capacity = mmc->capacity_user;
905 mmc->capacity = mmc->capacity_boot;
908 mmc->capacity = mmc->capacity_rpmb;
914 mmc->capacity = mmc->capacity_gp[part_num - 4];
920 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
925 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
926 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
931 if (part_num & PART_ACCESS_MASK)
932 forbidden = MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400);
934 if (MMC_CAP(mmc->selected_mode) & forbidden) {
935 pr_debug("selected mode (%s) is forbidden for part %d\n",
936 mmc_mode_name(mmc->selected_mode), part_num);
938 } else if (mmc->selected_mode != mmc->best_mode) {
939 pr_debug("selected mode is not optimal\n");
944 return mmc_select_mode_and_width(mmc,
945 mmc->card_caps & ~forbidden);
950 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
951 unsigned int part_num)
957 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
961 ret = mmc_boot_part_access_chk(mmc, part_num);
965 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
966 (mmc->part_config & ~PART_ACCESS_MASK)
967 | (part_num & PART_ACCESS_MASK));
970 * Set the capacity if the switch succeeded or was intended
971 * to return to representing the raw device.
973 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
974 ret = mmc_set_capacity(mmc, part_num);
975 mmc_get_blk_desc(mmc)->hwpart = part_num;
981 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
982 int mmc_hwpart_config(struct mmc *mmc,
983 const struct mmc_hwpart_conf *conf,
984 enum mmc_hwpart_conf_mode mode)
990 u32 max_enh_size_mult;
991 u32 tot_enh_size_mult = 0;
994 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
996 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
999 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1000 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1001 return -EMEDIUMTYPE;
1004 if (!(mmc->part_support & PART_SUPPORT)) {
1005 pr_err("Card does not support partitioning\n");
1006 return -EMEDIUMTYPE;
1009 if (!mmc->hc_wp_grp_size) {
1010 pr_err("Card does not define HC WP group size\n");
1011 return -EMEDIUMTYPE;
1014 /* check partition alignment and total enhanced size */
1015 if (conf->user.enh_size) {
1016 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1017 conf->user.enh_start % mmc->hc_wp_grp_size) {
1018 pr_err("User data enhanced area not HC WP group "
1022 part_attrs |= EXT_CSD_ENH_USR;
1023 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1024 if (mmc->high_capacity) {
1025 enh_start_addr = conf->user.enh_start;
1027 enh_start_addr = (conf->user.enh_start << 9);
1033 tot_enh_size_mult += enh_size_mult;
1035 for (pidx = 0; pidx < 4; pidx++) {
1036 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1037 pr_err("GP%i partition not HC WP group size "
1038 "aligned\n", pidx+1);
1041 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1042 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1043 part_attrs |= EXT_CSD_ENH_GP(pidx);
1044 tot_enh_size_mult += gp_size_mult[pidx];
1048 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1049 pr_err("Card does not support enhanced attribute\n");
1050 return -EMEDIUMTYPE;
1053 err = mmc_send_ext_csd(mmc, ext_csd);
1058 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1059 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1060 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1061 if (tot_enh_size_mult > max_enh_size_mult) {
1062 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1063 tot_enh_size_mult, max_enh_size_mult);
1064 return -EMEDIUMTYPE;
1067 /* The default value of EXT_CSD_WR_REL_SET is device
1068 * dependent, the values can only be changed if the
1069 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1070 * changed only once and before partitioning is completed. */
1071 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1072 if (conf->user.wr_rel_change) {
1073 if (conf->user.wr_rel_set)
1074 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1076 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1078 for (pidx = 0; pidx < 4; pidx++) {
1079 if (conf->gp_part[pidx].wr_rel_change) {
1080 if (conf->gp_part[pidx].wr_rel_set)
1081 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1083 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1087 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1088 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1089 puts("Card does not support host controlled partition write "
1090 "reliability settings\n");
1091 return -EMEDIUMTYPE;
1094 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1095 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1096 pr_err("Card already partitioned\n");
1100 if (mode == MMC_HWPART_CONF_CHECK)
1103 /* Partitioning requires high-capacity size definitions */
1104 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1105 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1106 EXT_CSD_ERASE_GROUP_DEF, 1);
1111 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1113 /* update erase group size to be high-capacity */
1114 mmc->erase_grp_size =
1115 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1119 /* all OK, write the configuration */
1120 for (i = 0; i < 4; i++) {
1121 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1122 EXT_CSD_ENH_START_ADDR+i,
1123 (enh_start_addr >> (i*8)) & 0xFF);
1127 for (i = 0; i < 3; i++) {
1128 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1129 EXT_CSD_ENH_SIZE_MULT+i,
1130 (enh_size_mult >> (i*8)) & 0xFF);
1134 for (pidx = 0; pidx < 4; pidx++) {
1135 for (i = 0; i < 3; i++) {
1136 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1137 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1138 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1143 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1144 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1148 if (mode == MMC_HWPART_CONF_SET)
1151 /* The WR_REL_SET is a write-once register but shall be
1152 * written before setting PART_SETTING_COMPLETED. As it is
1153 * write-once we can only write it when completing the
1155 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1156 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1157 EXT_CSD_WR_REL_SET, wr_rel_set);
1162 /* Setting PART_SETTING_COMPLETED confirms the partition
1163 * configuration but it only becomes effective after power
1164 * cycle, so we do not adjust the partition related settings
1165 * in the mmc struct. */
1167 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1168 EXT_CSD_PARTITION_SETTING,
1169 EXT_CSD_PARTITION_SETTING_COMPLETED);
1177 #if !CONFIG_IS_ENABLED(DM_MMC)
1178 int mmc_getcd(struct mmc *mmc)
1182 cd = board_mmc_getcd(mmc);
1185 if (mmc->cfg->ops->getcd)
1186 cd = mmc->cfg->ops->getcd(mmc);
1195 #if !CONFIG_IS_ENABLED(MMC_TINY)
1196 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1199 struct mmc_data data;
1201 /* Switch the frequency */
1202 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1203 cmd.resp_type = MMC_RSP_R1;
1204 cmd.cmdarg = (mode << 31) | 0xffffff;
1205 cmd.cmdarg &= ~(0xf << (group * 4));
1206 cmd.cmdarg |= value << (group * 4);
1208 data.dest = (char *)resp;
1209 data.blocksize = 64;
1211 data.flags = MMC_DATA_READ;
1213 return mmc_send_cmd(mmc, &cmd, &data);
1216 static int sd_get_capabilities(struct mmc *mmc)
1220 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1221 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1222 struct mmc_data data;
1224 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1228 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1230 if (mmc_host_is_spi(mmc))
1233 /* Read the SCR to find out if this card supports higher speeds */
1234 cmd.cmdidx = MMC_CMD_APP_CMD;
1235 cmd.resp_type = MMC_RSP_R1;
1236 cmd.cmdarg = mmc->rca << 16;
1238 err = mmc_send_cmd(mmc, &cmd, NULL);
1243 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1244 cmd.resp_type = MMC_RSP_R1;
1250 data.dest = (char *)scr;
1253 data.flags = MMC_DATA_READ;
1255 err = mmc_send_cmd(mmc, &cmd, &data);
1264 mmc->scr[0] = __be32_to_cpu(scr[0]);
1265 mmc->scr[1] = __be32_to_cpu(scr[1]);
1267 switch ((mmc->scr[0] >> 24) & 0xf) {
1269 mmc->version = SD_VERSION_1_0;
1272 mmc->version = SD_VERSION_1_10;
1275 mmc->version = SD_VERSION_2;
1276 if ((mmc->scr[0] >> 15) & 0x1)
1277 mmc->version = SD_VERSION_3;
1280 mmc->version = SD_VERSION_1_0;
1284 if (mmc->scr[0] & SD_DATA_4BIT)
1285 mmc->card_caps |= MMC_MODE_4BIT;
1287 /* Version 1.0 doesn't support switching */
1288 if (mmc->version == SD_VERSION_1_0)
1293 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1294 (u8 *)switch_status);
1299 /* The high-speed function is busy. Try again */
1300 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1304 /* If high-speed isn't supported, we return */
1305 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1306 mmc->card_caps |= MMC_CAP(SD_HS);
1308 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1309 /* Version before 3.0 don't support UHS modes */
1310 if (mmc->version < SD_VERSION_3)
1313 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1314 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1315 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1316 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1317 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1318 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1319 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1320 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1321 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1322 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1323 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1329 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1333 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1336 /* SD version 1.00 and 1.01 does not support CMD 6 */
1337 if (mmc->version == SD_VERSION_1_0)
1342 speed = UHS_SDR12_BUS_SPEED;
1345 speed = HIGH_SPEED_BUS_SPEED;
1347 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1349 speed = UHS_SDR12_BUS_SPEED;
1352 speed = UHS_SDR25_BUS_SPEED;
1355 speed = UHS_SDR50_BUS_SPEED;
1358 speed = UHS_DDR50_BUS_SPEED;
1361 speed = UHS_SDR104_BUS_SPEED;
1368 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1372 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1378 static int sd_select_bus_width(struct mmc *mmc, int w)
1383 if ((w != 4) && (w != 1))
1386 cmd.cmdidx = MMC_CMD_APP_CMD;
1387 cmd.resp_type = MMC_RSP_R1;
1388 cmd.cmdarg = mmc->rca << 16;
1390 err = mmc_send_cmd(mmc, &cmd, NULL);
1394 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1395 cmd.resp_type = MMC_RSP_R1;
1400 err = mmc_send_cmd(mmc, &cmd, NULL);
1408 #if CONFIG_IS_ENABLED(MMC_WRITE)
1409 static int sd_read_ssr(struct mmc *mmc)
1411 static const unsigned int sd_au_size[] = {
1412 0, SZ_16K / 512, SZ_32K / 512,
1413 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1414 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1415 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1416 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1421 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1422 struct mmc_data data;
1424 unsigned int au, eo, et, es;
1426 cmd.cmdidx = MMC_CMD_APP_CMD;
1427 cmd.resp_type = MMC_RSP_R1;
1428 cmd.cmdarg = mmc->rca << 16;
1430 err = mmc_send_cmd(mmc, &cmd, NULL);
1434 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1435 cmd.resp_type = MMC_RSP_R1;
1439 data.dest = (char *)ssr;
1440 data.blocksize = 64;
1442 data.flags = MMC_DATA_READ;
1444 err = mmc_send_cmd(mmc, &cmd, &data);
1452 for (i = 0; i < 16; i++)
1453 ssr[i] = be32_to_cpu(ssr[i]);
1455 au = (ssr[2] >> 12) & 0xF;
1456 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1457 mmc->ssr.au = sd_au_size[au];
1458 es = (ssr[3] >> 24) & 0xFF;
1459 es |= (ssr[2] & 0xFF) << 8;
1460 et = (ssr[3] >> 18) & 0x3F;
1462 eo = (ssr[3] >> 16) & 0x3;
1463 mmc->ssr.erase_timeout = (et * 1000) / es;
1464 mmc->ssr.erase_offset = eo * 1000;
1467 pr_debug("Invalid Allocation Unit Size.\n");
1473 /* frequency bases */
1474 /* divided by 10 to be nice to platforms without floating point */
1475 static const int fbase[] = {
1482 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1483 * to platforms without floating point.
1485 static const u8 multipliers[] = {
1504 static inline int bus_width(uint cap)
1506 if (cap == MMC_MODE_8BIT)
1508 if (cap == MMC_MODE_4BIT)
1510 if (cap == MMC_MODE_1BIT)
1512 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1516 #if !CONFIG_IS_ENABLED(DM_MMC)
1517 #ifdef MMC_SUPPORTS_TUNING
1518 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1524 static int mmc_set_ios(struct mmc *mmc)
1528 if (mmc->cfg->ops->set_ios)
1529 ret = mmc->cfg->ops->set_ios(mmc);
1535 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1538 if (clock > mmc->cfg->f_max)
1539 clock = mmc->cfg->f_max;
1541 if (clock < mmc->cfg->f_min)
1542 clock = mmc->cfg->f_min;
1546 mmc->clk_disable = disable;
1548 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1550 return mmc_set_ios(mmc);
1553 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1555 mmc->bus_width = width;
1557 return mmc_set_ios(mmc);
1560 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1562 * helper function to display the capabilities in a human
1563 * friendly manner. The capabilities include bus width and
1566 void mmc_dump_capabilities(const char *text, uint caps)
1570 pr_debug("%s: widths [", text);
1571 if (caps & MMC_MODE_8BIT)
1573 if (caps & MMC_MODE_4BIT)
1575 if (caps & MMC_MODE_1BIT)
1577 pr_debug("\b\b] modes [");
1578 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1579 if (MMC_CAP(mode) & caps)
1580 pr_debug("%s, ", mmc_mode_name(mode));
1581 pr_debug("\b\b]\n");
1585 struct mode_width_tuning {
1588 #ifdef MMC_SUPPORTS_TUNING
1593 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1594 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1597 case MMC_SIGNAL_VOLTAGE_000: return 0;
1598 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1599 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1600 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1605 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1609 if (mmc->signal_voltage == signal_voltage)
1612 mmc->signal_voltage = signal_voltage;
1613 err = mmc_set_ios(mmc);
1615 pr_debug("unable to set voltage (err %d)\n", err);
1620 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1626 #if !CONFIG_IS_ENABLED(MMC_TINY)
1627 static const struct mode_width_tuning sd_modes_by_pref[] = {
1628 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1629 #ifdef MMC_SUPPORTS_TUNING
1632 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1633 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1638 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1642 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1646 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1651 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1653 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1656 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1661 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1665 #define for_each_sd_mode_by_pref(caps, mwt) \
1666 for (mwt = sd_modes_by_pref;\
1667 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1669 if (caps & MMC_CAP(mwt->mode))
1671 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1674 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1675 const struct mode_width_tuning *mwt;
1676 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1677 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1679 bool uhs_en = false;
1684 mmc_dump_capabilities("sd card", card_caps);
1685 mmc_dump_capabilities("host", mmc->host_caps);
1688 /* Restrict card's capabilities by what the host can do */
1689 caps = card_caps & mmc->host_caps;
1694 for_each_sd_mode_by_pref(caps, mwt) {
1697 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1698 if (*w & caps & mwt->widths) {
1699 pr_debug("trying mode %s width %d (at %d MHz)\n",
1700 mmc_mode_name(mwt->mode),
1702 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1704 /* configure the bus width (card + host) */
1705 err = sd_select_bus_width(mmc, bus_width(*w));
1708 mmc_set_bus_width(mmc, bus_width(*w));
1710 /* configure the bus mode (card) */
1711 err = sd_set_card_speed(mmc, mwt->mode);
1715 /* configure the bus mode (host) */
1716 mmc_select_mode(mmc, mwt->mode);
1717 mmc_set_clock(mmc, mmc->tran_speed,
1720 #ifdef MMC_SUPPORTS_TUNING
1721 /* execute tuning if needed */
1722 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1723 err = mmc_execute_tuning(mmc,
1726 pr_debug("tuning failed\n");
1732 #if CONFIG_IS_ENABLED(MMC_WRITE)
1733 err = sd_read_ssr(mmc);
1735 pr_warn("unable to read ssr\n");
1741 /* revert to a safer bus speed */
1742 mmc_select_mode(mmc, SD_LEGACY);
1743 mmc_set_clock(mmc, mmc->tran_speed,
1749 pr_err("unable to select a mode\n");
1754 * read the compare the part of ext csd that is constant.
1755 * This can be used to check that the transfer is working
1758 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1761 const u8 *ext_csd = mmc->ext_csd;
1762 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1764 if (mmc->version < MMC_VERSION_4)
1767 err = mmc_send_ext_csd(mmc, test_csd);
1771 /* Only compare read only fields */
1772 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1773 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1774 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1775 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1776 ext_csd[EXT_CSD_REV]
1777 == test_csd[EXT_CSD_REV] &&
1778 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1779 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1780 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1781 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1787 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1788 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1789 uint32_t allowed_mask)
1796 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1797 EXT_CSD_CARD_TYPE_HS400_1_8V))
1798 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1799 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1800 EXT_CSD_CARD_TYPE_HS400_1_2V))
1801 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1804 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1805 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1806 MMC_SIGNAL_VOLTAGE_180;
1807 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1808 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1811 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1815 while (card_mask & allowed_mask) {
1816 enum mmc_voltage best_match;
1818 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1819 if (!mmc_set_signal_voltage(mmc, best_match))
1822 allowed_mask &= ~best_match;
1828 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1829 uint32_t allowed_mask)
1835 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1836 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1839 .widths = MMC_MODE_8BIT,
1840 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1843 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1846 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1847 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1852 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1856 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1860 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1864 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1868 #define for_each_mmc_mode_by_pref(caps, mwt) \
1869 for (mwt = mmc_modes_by_pref;\
1870 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1872 if (caps & MMC_CAP(mwt->mode))
1874 static const struct ext_csd_bus_width {
1878 } ext_csd_bus_width[] = {
1879 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1880 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1881 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1882 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1883 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1886 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1887 static int mmc_select_hs400(struct mmc *mmc)
1891 /* Set timing to HS200 for tuning */
1892 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1896 /* configure the bus mode (host) */
1897 mmc_select_mode(mmc, MMC_HS_200);
1898 mmc_set_clock(mmc, mmc->tran_speed, false);
1900 /* execute tuning if needed */
1901 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1903 debug("tuning failed\n");
1907 /* Set back to HS */
1908 mmc_set_card_speed(mmc, MMC_HS, true);
1910 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1911 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1915 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1919 mmc_select_mode(mmc, MMC_HS_400);
1920 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1927 static int mmc_select_hs400(struct mmc *mmc)
1933 #define for_each_supported_width(caps, ddr, ecbv) \
1934 for (ecbv = ext_csd_bus_width;\
1935 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1937 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1939 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1942 const struct mode_width_tuning *mwt;
1943 const struct ext_csd_bus_width *ecbw;
1946 mmc_dump_capabilities("mmc", card_caps);
1947 mmc_dump_capabilities("host", mmc->host_caps);
1950 /* Restrict card's capabilities by what the host can do */
1951 card_caps &= mmc->host_caps;
1953 /* Only version 4 of MMC supports wider bus widths */
1954 if (mmc->version < MMC_VERSION_4)
1957 if (!mmc->ext_csd) {
1958 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1962 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1963 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1965 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1966 * before doing anything else, since a transition from either of
1967 * the HS200/HS400 mode directly to legacy mode is not supported.
1969 if (mmc->selected_mode == MMC_HS_200 ||
1970 mmc->selected_mode == MMC_HS_400)
1971 mmc_set_card_speed(mmc, MMC_HS, true);
1974 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1976 for_each_mmc_mode_by_pref(card_caps, mwt) {
1977 for_each_supported_width(card_caps & mwt->widths,
1978 mmc_is_mode_ddr(mwt->mode), ecbw) {
1979 enum mmc_voltage old_voltage;
1980 pr_debug("trying mode %s width %d (at %d MHz)\n",
1981 mmc_mode_name(mwt->mode),
1982 bus_width(ecbw->cap),
1983 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1984 old_voltage = mmc->signal_voltage;
1985 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1986 MMC_ALL_SIGNAL_VOLTAGE);
1990 /* configure the bus width (card + host) */
1991 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1993 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1996 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1998 if (mwt->mode == MMC_HS_400) {
1999 err = mmc_select_hs400(mmc);
2001 printf("Select HS400 failed %d\n", err);
2005 /* configure the bus speed (card) */
2006 err = mmc_set_card_speed(mmc, mwt->mode, false);
2011 * configure the bus width AND the ddr mode
2012 * (card). The host side will be taken care
2013 * of in the next step
2015 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2016 err = mmc_switch(mmc,
2017 EXT_CSD_CMD_SET_NORMAL,
2019 ecbw->ext_csd_bits);
2024 /* configure the bus mode (host) */
2025 mmc_select_mode(mmc, mwt->mode);
2026 mmc_set_clock(mmc, mmc->tran_speed,
2028 #ifdef MMC_SUPPORTS_TUNING
2030 /* execute tuning if needed */
2032 err = mmc_execute_tuning(mmc,
2035 pr_debug("tuning failed\n");
2042 /* do a transfer to check the configuration */
2043 err = mmc_read_and_compare_ext_csd(mmc);
2047 mmc_set_signal_voltage(mmc, old_voltage);
2048 /* if an error occured, revert to a safer bus mode */
2049 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2050 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2051 mmc_select_mode(mmc, MMC_LEGACY);
2052 mmc_set_bus_width(mmc, 1);
2056 pr_err("unable to select a mode\n");
2062 #if CONFIG_IS_ENABLED(MMC_TINY)
2063 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2066 static int mmc_startup_v4(struct mmc *mmc)
2070 bool has_parts = false;
2071 bool part_completed;
2072 static const u32 mmc_versions[] = {
2084 #if CONFIG_IS_ENABLED(MMC_TINY)
2085 u8 *ext_csd = ext_csd_bkup;
2087 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2091 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2093 err = mmc_send_ext_csd(mmc, ext_csd);
2097 /* store the ext csd for future reference */
2099 mmc->ext_csd = ext_csd;
2101 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2103 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2106 /* check ext_csd version and capacity */
2107 err = mmc_send_ext_csd(mmc, ext_csd);
2111 /* store the ext csd for future reference */
2113 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2116 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2118 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2121 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2123 if (mmc->version >= MMC_VERSION_4_2) {
2125 * According to the JEDEC Standard, the value of
2126 * ext_csd's capacity is valid if the value is more
2129 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2130 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2131 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2132 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2133 capacity *= MMC_MAX_BLOCK_LEN;
2134 if ((capacity >> 20) > 2 * 1024)
2135 mmc->capacity_user = capacity;
2138 /* The partition data may be non-zero but it is only
2139 * effective if PARTITION_SETTING_COMPLETED is set in
2140 * EXT_CSD, so ignore any data if this bit is not set,
2141 * except for enabling the high-capacity group size
2142 * definition (see below).
2144 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2145 EXT_CSD_PARTITION_SETTING_COMPLETED);
2147 /* store the partition info of emmc */
2148 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2149 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2150 ext_csd[EXT_CSD_BOOT_MULT])
2151 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2152 if (part_completed &&
2153 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2154 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2156 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2158 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2160 for (i = 0; i < 4; i++) {
2161 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2162 uint mult = (ext_csd[idx + 2] << 16) +
2163 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2166 if (!part_completed)
2168 mmc->capacity_gp[i] = mult;
2169 mmc->capacity_gp[i] *=
2170 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2171 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2172 mmc->capacity_gp[i] <<= 19;
2175 #ifndef CONFIG_SPL_BUILD
2176 if (part_completed) {
2177 mmc->enh_user_size =
2178 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2179 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2180 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2181 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2182 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2183 mmc->enh_user_size <<= 19;
2184 mmc->enh_user_start =
2185 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2186 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2187 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2188 ext_csd[EXT_CSD_ENH_START_ADDR];
2189 if (mmc->high_capacity)
2190 mmc->enh_user_start <<= 9;
2195 * Host needs to enable ERASE_GRP_DEF bit if device is
2196 * partitioned. This bit will be lost every time after a reset
2197 * or power off. This will affect erase size.
2201 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2202 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2205 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2206 EXT_CSD_ERASE_GROUP_DEF, 1);
2211 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2214 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2215 #if CONFIG_IS_ENABLED(MMC_WRITE)
2216 /* Read out group size from ext_csd */
2217 mmc->erase_grp_size =
2218 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2221 * if high capacity and partition setting completed
2222 * SEC_COUNT is valid even if it is smaller than 2 GiB
2223 * JEDEC Standard JESD84-B45, 6.2.4
2225 if (mmc->high_capacity && part_completed) {
2226 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2227 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2228 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2229 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2230 capacity *= MMC_MAX_BLOCK_LEN;
2231 mmc->capacity_user = capacity;
2234 #if CONFIG_IS_ENABLED(MMC_WRITE)
2236 /* Calculate the group size from the csd value. */
2237 int erase_gsz, erase_gmul;
2239 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2240 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2241 mmc->erase_grp_size = (erase_gsz + 1)
2245 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2246 mmc->hc_wp_grp_size = 1024
2247 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2248 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2251 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2256 #if !CONFIG_IS_ENABLED(MMC_TINY)
2259 mmc->ext_csd = NULL;
2264 static int mmc_startup(struct mmc *mmc)
2270 struct blk_desc *bdesc;
2272 #ifdef CONFIG_MMC_SPI_CRC_ON
2273 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2274 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2275 cmd.resp_type = MMC_RSP_R1;
2277 err = mmc_send_cmd(mmc, &cmd, NULL);
2283 /* Put the Card in Identify Mode */
2284 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2285 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2286 cmd.resp_type = MMC_RSP_R2;
2289 err = mmc_send_cmd(mmc, &cmd, NULL);
2291 #ifdef CONFIG_MMC_QUIRKS
2292 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2295 * It has been seen that SEND_CID may fail on the first
2296 * attempt, let's try a few more time
2299 err = mmc_send_cmd(mmc, &cmd, NULL);
2302 } while (retries--);
2309 memcpy(mmc->cid, cmd.response, 16);
2312 * For MMC cards, set the Relative Address.
2313 * For SD cards, get the Relatvie Address.
2314 * This also puts the cards into Standby State
2316 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2317 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2318 cmd.cmdarg = mmc->rca << 16;
2319 cmd.resp_type = MMC_RSP_R6;
2321 err = mmc_send_cmd(mmc, &cmd, NULL);
2327 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2330 /* Get the Card-Specific Data */
2331 cmd.cmdidx = MMC_CMD_SEND_CSD;
2332 cmd.resp_type = MMC_RSP_R2;
2333 cmd.cmdarg = mmc->rca << 16;
2335 err = mmc_send_cmd(mmc, &cmd, NULL);
2340 mmc->csd[0] = cmd.response[0];
2341 mmc->csd[1] = cmd.response[1];
2342 mmc->csd[2] = cmd.response[2];
2343 mmc->csd[3] = cmd.response[3];
2345 if (mmc->version == MMC_VERSION_UNKNOWN) {
2346 int version = (cmd.response[0] >> 26) & 0xf;
2350 mmc->version = MMC_VERSION_1_2;
2353 mmc->version = MMC_VERSION_1_4;
2356 mmc->version = MMC_VERSION_2_2;
2359 mmc->version = MMC_VERSION_3;
2362 mmc->version = MMC_VERSION_4;
2365 mmc->version = MMC_VERSION_1_2;
2370 /* divide frequency by 10, since the mults are 10x bigger */
2371 freq = fbase[(cmd.response[0] & 0x7)];
2372 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2374 mmc->legacy_speed = freq * mult;
2375 mmc_select_mode(mmc, MMC_LEGACY);
2377 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2378 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2379 #if CONFIG_IS_ENABLED(MMC_WRITE)
2382 mmc->write_bl_len = mmc->read_bl_len;
2384 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2387 if (mmc->high_capacity) {
2388 csize = (mmc->csd[1] & 0x3f) << 16
2389 | (mmc->csd[2] & 0xffff0000) >> 16;
2392 csize = (mmc->csd[1] & 0x3ff) << 2
2393 | (mmc->csd[2] & 0xc0000000) >> 30;
2394 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2397 mmc->capacity_user = (csize + 1) << (cmult + 2);
2398 mmc->capacity_user *= mmc->read_bl_len;
2399 mmc->capacity_boot = 0;
2400 mmc->capacity_rpmb = 0;
2401 for (i = 0; i < 4; i++)
2402 mmc->capacity_gp[i] = 0;
2404 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2405 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2407 #if CONFIG_IS_ENABLED(MMC_WRITE)
2408 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2409 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2412 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2413 cmd.cmdidx = MMC_CMD_SET_DSR;
2414 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2415 cmd.resp_type = MMC_RSP_NONE;
2416 if (mmc_send_cmd(mmc, &cmd, NULL))
2417 pr_warn("MMC: SET_DSR failed\n");
2420 /* Select the card, and put it into Transfer Mode */
2421 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2422 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2423 cmd.resp_type = MMC_RSP_R1;
2424 cmd.cmdarg = mmc->rca << 16;
2425 err = mmc_send_cmd(mmc, &cmd, NULL);
2432 * For SD, its erase group is always one sector
2434 #if CONFIG_IS_ENABLED(MMC_WRITE)
2435 mmc->erase_grp_size = 1;
2437 mmc->part_config = MMCPART_NOAVAILABLE;
2439 err = mmc_startup_v4(mmc);
2443 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2447 #if CONFIG_IS_ENABLED(MMC_TINY)
2448 mmc_set_clock(mmc, mmc->legacy_speed, false);
2449 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2450 mmc_set_bus_width(mmc, 1);
2453 err = sd_get_capabilities(mmc);
2456 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2458 err = mmc_get_capabilities(mmc);
2461 mmc_select_mode_and_width(mmc, mmc->card_caps);
2467 mmc->best_mode = mmc->selected_mode;
2469 /* Fix the block length for DDR mode */
2470 if (mmc->ddr_mode) {
2471 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2472 #if CONFIG_IS_ENABLED(MMC_WRITE)
2473 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2477 /* fill in device description */
2478 bdesc = mmc_get_blk_desc(mmc);
2482 bdesc->blksz = mmc->read_bl_len;
2483 bdesc->log2blksz = LOG2(bdesc->blksz);
2484 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2485 #if !defined(CONFIG_SPL_BUILD) || \
2486 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2487 !defined(CONFIG_USE_TINY_PRINTF))
2488 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2489 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2490 (mmc->cid[3] >> 16) & 0xffff);
2491 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2492 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2493 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2494 (mmc->cid[2] >> 24) & 0xff);
2495 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2496 (mmc->cid[2] >> 16) & 0xf);
2498 bdesc->vendor[0] = 0;
2499 bdesc->product[0] = 0;
2500 bdesc->revision[0] = 0;
2503 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2510 static int mmc_send_if_cond(struct mmc *mmc)
2515 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2516 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2517 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2518 cmd.resp_type = MMC_RSP_R7;
2520 err = mmc_send_cmd(mmc, &cmd, NULL);
2525 if ((cmd.response[0] & 0xff) != 0xaa)
2528 mmc->version = SD_VERSION_2;
2533 #if !CONFIG_IS_ENABLED(DM_MMC)
2534 /* board-specific MMC power initializations. */
2535 __weak void board_mmc_power_init(void)
2540 static int mmc_power_init(struct mmc *mmc)
2542 #if CONFIG_IS_ENABLED(DM_MMC)
2543 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2546 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2549 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2551 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2552 &mmc->vqmmc_supply);
2554 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2556 #else /* !CONFIG_DM_MMC */
2558 * Driver model should use a regulator, as above, rather than calling
2559 * out to board code.
2561 board_mmc_power_init();
2567 * put the host in the initial state:
2568 * - turn on Vdd (card power supply)
2569 * - configure the bus width and clock to minimal values
2571 static void mmc_set_initial_state(struct mmc *mmc)
2575 /* First try to set 3.3V. If it fails set to 1.8V */
2576 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2578 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2580 pr_warn("mmc: failed to set signal voltage\n");
2582 mmc_select_mode(mmc, MMC_LEGACY);
2583 mmc_set_bus_width(mmc, 1);
2584 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2587 static int mmc_power_on(struct mmc *mmc)
2589 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2590 if (mmc->vmmc_supply) {
2591 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2594 puts("Error enabling VMMC supply\n");
2602 static int mmc_power_off(struct mmc *mmc)
2604 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2605 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2606 if (mmc->vmmc_supply) {
2607 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2610 pr_debug("Error disabling VMMC supply\n");
2618 static int mmc_power_cycle(struct mmc *mmc)
2622 ret = mmc_power_off(mmc);
2626 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2627 * to be on the safer side.
2630 return mmc_power_on(mmc);
2633 int mmc_get_op_cond(struct mmc *mmc)
2635 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2641 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2642 mmc_adapter_card_type_ident();
2644 err = mmc_power_init(mmc);
2648 #ifdef CONFIG_MMC_QUIRKS
2649 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2650 MMC_QUIRK_RETRY_SEND_CID;
2653 err = mmc_power_cycle(mmc);
2656 * if power cycling is not supported, we should not try
2657 * to use the UHS modes, because we wouldn't be able to
2658 * recover from an error during the UHS initialization.
2660 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2662 mmc->host_caps &= ~UHS_CAPS;
2663 err = mmc_power_on(mmc);
2668 #if CONFIG_IS_ENABLED(DM_MMC)
2669 /* The device has already been probed ready for use */
2671 /* made sure it's not NULL earlier */
2672 err = mmc->cfg->ops->init(mmc);
2679 mmc_set_initial_state(mmc);
2681 /* Reset the Card */
2682 err = mmc_go_idle(mmc);
2687 /* The internal partition reset to user partition(0) at every CMD0*/
2688 mmc_get_blk_desc(mmc)->hwpart = 0;
2690 /* Test for SD version 2 */
2691 err = mmc_send_if_cond(mmc);
2693 /* Now try to get the SD card's operating condition */
2694 err = sd_send_op_cond(mmc, uhs_en);
2695 if (err && uhs_en) {
2697 mmc_power_cycle(mmc);
2701 /* If the command timed out, we check for an MMC card */
2702 if (err == -ETIMEDOUT) {
2703 err = mmc_send_op_cond(mmc);
2706 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2707 pr_err("Card did not respond to voltage select!\n");
2716 int mmc_start_init(struct mmc *mmc)
2722 * all hosts are capable of 1 bit bus-width and able to use the legacy
2725 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2726 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2728 #if !defined(CONFIG_MMC_BROKEN_CD)
2729 /* we pretend there's no card when init is NULL */
2730 no_card = mmc_getcd(mmc) == 0;
2734 #if !CONFIG_IS_ENABLED(DM_MMC)
2735 no_card = no_card || (mmc->cfg->ops->init == NULL);
2739 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2740 pr_err("MMC: no card present\n");
2745 err = mmc_get_op_cond(mmc);
2748 mmc->init_in_progress = 1;
2753 static int mmc_complete_init(struct mmc *mmc)
2757 mmc->init_in_progress = 0;
2758 if (mmc->op_cond_pending)
2759 err = mmc_complete_op_cond(mmc);
2762 err = mmc_startup(mmc);
2770 int mmc_init(struct mmc *mmc)
2773 __maybe_unused ulong start;
2774 #if CONFIG_IS_ENABLED(DM_MMC)
2775 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2782 start = get_timer(0);
2784 if (!mmc->init_in_progress)
2785 err = mmc_start_init(mmc);
2788 err = mmc_complete_init(mmc);
2790 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2795 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2796 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2797 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2798 int mmc_deinit(struct mmc *mmc)
2806 caps_filtered = mmc->card_caps &
2807 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2808 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2809 MMC_CAP(UHS_SDR104));
2811 return sd_select_mode_and_width(mmc, caps_filtered);
2813 caps_filtered = mmc->card_caps &
2814 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2816 return mmc_select_mode_and_width(mmc, caps_filtered);
2821 int mmc_set_dsr(struct mmc *mmc, u16 val)
2827 /* CPU-specific MMC initializations */
2828 __weak int cpu_mmc_init(bd_t *bis)
2833 /* board-specific MMC initializations. */
2834 __weak int board_mmc_init(bd_t *bis)
2839 void mmc_set_preinit(struct mmc *mmc, int preinit)
2841 mmc->preinit = preinit;
2844 #if CONFIG_IS_ENABLED(DM_MMC)
2845 static int mmc_probe(bd_t *bis)
2849 struct udevice *dev;
2851 ret = uclass_get(UCLASS_MMC, &uc);
2856 * Try to add them in sequence order. Really with driver model we
2857 * should allow holes, but the current MMC list does not allow that.
2858 * So if we request 0, 1, 3 we will get 0, 1, 2.
2860 for (i = 0; ; i++) {
2861 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2865 uclass_foreach_dev(dev, uc) {
2866 ret = device_probe(dev);
2868 pr_err("%s - probe failed: %d\n", dev->name, ret);
2874 static int mmc_probe(bd_t *bis)
2876 if (board_mmc_init(bis) < 0)
2883 int mmc_initialize(bd_t *bis)
2885 static int initialized = 0;
2887 if (initialized) /* Avoid initializing mmc multiple times */
2891 #if !CONFIG_IS_ENABLED(BLK)
2892 #if !CONFIG_IS_ENABLED(MMC_TINY)
2896 ret = mmc_probe(bis);
2900 #ifndef CONFIG_SPL_BUILD
2901 print_mmc_devices(',');
2908 #ifdef CONFIG_CMD_BKOPS_ENABLE
2909 int mmc_set_bkops_enable(struct mmc *mmc)
2912 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2914 err = mmc_send_ext_csd(mmc, ext_csd);
2916 puts("Could not get ext_csd register values\n");
2920 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2921 puts("Background operations not supported on device\n");
2922 return -EMEDIUMTYPE;
2925 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2926 puts("Background operations already enabled\n");
2930 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2932 puts("Failed to enable manual background operations\n");
2936 puts("Enabled manual background operations\n");