1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
28 #if !CONFIG_IS_ENABLED(DM_MMC)
30 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
35 __weak int board_mmc_getwp(struct mmc *mmc)
40 int mmc_getwp(struct mmc *mmc)
44 wp = board_mmc_getwp(mmc);
47 if (mmc->cfg->ops->getwp)
48 wp = mmc->cfg->ops->getwp(mmc);
56 __weak int board_mmc_getcd(struct mmc *mmc)
62 #ifdef CONFIG_MMC_TRACE
63 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
65 printf("CMD_SEND:%d\n", cmd->cmdidx);
66 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
69 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75 printf("\t\tRET\t\t\t %d\n", ret);
77 switch (cmd->resp_type) {
79 printf("\t\tMMC_RSP_NONE\n");
82 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
86 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
90 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
92 printf("\t\t \t\t 0x%08x \n",
94 printf("\t\t \t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t\t\t\tDUMPING DATA\n");
100 for (i = 0; i < 4; i++) {
102 printf("\t\t\t\t\t%03d - ", i*4);
103 ptr = (u8 *)&cmd->response[i];
105 for (j = 0; j < 4; j++)
106 printf("%02x ", *ptr--);
111 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
115 printf("\t\tERROR MMC rsp not supported\n");
121 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
125 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
126 printf("CURR STATE:%d\n", status);
130 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
131 const char *mmc_mode_name(enum bus_mode mode)
133 static const char *const names[] = {
134 [MMC_LEGACY] = "MMC legacy",
135 [MMC_HS] = "MMC High Speed (26MHz)",
136 [SD_HS] = "SD High Speed (50MHz)",
137 [UHS_SDR12] = "UHS SDR12 (25MHz)",
138 [UHS_SDR25] = "UHS SDR25 (50MHz)",
139 [UHS_SDR50] = "UHS SDR50 (100MHz)",
140 [UHS_SDR104] = "UHS SDR104 (208MHz)",
141 [UHS_DDR50] = "UHS DDR50 (50MHz)",
142 [MMC_HS_52] = "MMC High Speed (52MHz)",
143 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
144 [MMC_HS_200] = "HS200 (200MHz)",
145 [MMC_HS_400] = "HS400 (200MHz)",
146 [MMC_HS_400_ES] = "HS400ES (200MHz)",
149 if (mode >= MMC_MODES_END)
150 return "Unknown mode";
156 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
158 static const int freqs[] = {
159 [MMC_LEGACY] = 25000000,
162 [MMC_HS_52] = 52000000,
163 [MMC_DDR_52] = 52000000,
164 [UHS_SDR12] = 25000000,
165 [UHS_SDR25] = 50000000,
166 [UHS_SDR50] = 100000000,
167 [UHS_DDR50] = 50000000,
168 [UHS_SDR104] = 208000000,
169 [MMC_HS_200] = 200000000,
170 [MMC_HS_400] = 200000000,
171 [MMC_HS_400_ES] = 200000000,
174 if (mode == MMC_LEGACY)
175 return mmc->legacy_speed;
176 else if (mode >= MMC_MODES_END)
182 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
184 mmc->selected_mode = mode;
185 mmc->tran_speed = mmc_mode2freq(mmc, mode);
186 mmc->ddr_mode = mmc_is_mode_ddr(mode);
187 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
188 mmc->tran_speed / 1000000);
192 #if !CONFIG_IS_ENABLED(DM_MMC)
193 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
197 mmmc_trace_before_send(mmc, cmd);
198 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
199 mmmc_trace_after_send(mmc, cmd, ret);
205 int mmc_send_status(struct mmc *mmc, unsigned int *status)
208 int err, retries = 5;
210 cmd.cmdidx = MMC_CMD_SEND_STATUS;
211 cmd.resp_type = MMC_RSP_R1;
212 if (!mmc_host_is_spi(mmc))
213 cmd.cmdarg = mmc->rca << 16;
216 err = mmc_send_cmd(mmc, &cmd, NULL);
218 mmc_trace_state(mmc, &cmd);
219 *status = cmd.response[0];
223 mmc_trace_state(mmc, &cmd);
227 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
232 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
237 err = mmc_send_status(mmc, &status);
241 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
242 (status & MMC_STATUS_CURR_STATE) !=
246 if (status & MMC_STATUS_MASK) {
247 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 pr_err("Status Error: 0x%08x\n", status);
253 if (timeout_ms-- <= 0)
259 if (timeout_ms <= 0) {
260 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
261 pr_err("Timeout waiting card ready\n");
269 int mmc_set_blocklen(struct mmc *mmc, int len)
277 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
278 cmd.resp_type = MMC_RSP_R1;
281 err = mmc_send_cmd(mmc, &cmd, NULL);
283 #ifdef CONFIG_MMC_QUIRKS
284 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
287 * It has been seen that SET_BLOCKLEN may fail on the first
288 * attempt, let's try a few more time
291 err = mmc_send_cmd(mmc, &cmd, NULL);
301 #ifdef MMC_SUPPORTS_TUNING
302 static const u8 tuning_blk_pattern_4bit[] = {
303 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
304 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
305 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
306 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
307 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
308 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
309 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
310 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
313 static const u8 tuning_blk_pattern_8bit[] = {
314 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
315 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
316 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
317 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
318 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
319 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
320 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
321 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
322 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
323 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
324 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
325 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
326 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
327 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
328 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
329 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
332 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
335 struct mmc_data data;
336 const u8 *tuning_block_pattern;
339 if (mmc->bus_width == 8) {
340 tuning_block_pattern = tuning_blk_pattern_8bit;
341 size = sizeof(tuning_blk_pattern_8bit);
342 } else if (mmc->bus_width == 4) {
343 tuning_block_pattern = tuning_blk_pattern_4bit;
344 size = sizeof(tuning_blk_pattern_4bit);
349 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
353 cmd.resp_type = MMC_RSP_R1;
355 data.dest = (void *)data_buf;
357 data.blocksize = size;
358 data.flags = MMC_DATA_READ;
360 err = mmc_send_cmd(mmc, &cmd, &data);
364 if (memcmp(data_buf, tuning_block_pattern, size))
371 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
375 struct mmc_data data;
378 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
380 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
382 if (mmc->high_capacity)
385 cmd.cmdarg = start * mmc->read_bl_len;
387 cmd.resp_type = MMC_RSP_R1;
390 data.blocks = blkcnt;
391 data.blocksize = mmc->read_bl_len;
392 data.flags = MMC_DATA_READ;
394 if (mmc_send_cmd(mmc, &cmd, &data))
398 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
400 cmd.resp_type = MMC_RSP_R1b;
401 if (mmc_send_cmd(mmc, &cmd, NULL)) {
402 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
403 pr_err("mmc fail to send stop cmd\n");
412 #if CONFIG_IS_ENABLED(BLK)
413 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
415 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
419 #if CONFIG_IS_ENABLED(BLK)
420 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
422 int dev_num = block_dev->devnum;
424 lbaint_t cur, blocks_todo = blkcnt;
429 struct mmc *mmc = find_mmc_device(dev_num);
433 if (CONFIG_IS_ENABLED(MMC_TINY))
434 err = mmc_switch_part(mmc, block_dev->hwpart);
436 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
441 if ((start + blkcnt) > block_dev->lba) {
442 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
443 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
444 start + blkcnt, block_dev->lba);
449 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
450 pr_debug("%s: Failed to set blocklen\n", __func__);
455 cur = (blocks_todo > mmc->cfg->b_max) ?
456 mmc->cfg->b_max : blocks_todo;
457 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
458 pr_debug("%s: Failed to read blocks\n", __func__);
463 dst += cur * mmc->read_bl_len;
464 } while (blocks_todo > 0);
469 static int mmc_go_idle(struct mmc *mmc)
476 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
478 cmd.resp_type = MMC_RSP_NONE;
480 err = mmc_send_cmd(mmc, &cmd, NULL);
490 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
491 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
497 * Send CMD11 only if the request is to switch the card to
500 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
501 return mmc_set_signal_voltage(mmc, signal_voltage);
503 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
505 cmd.resp_type = MMC_RSP_R1;
507 err = mmc_send_cmd(mmc, &cmd, NULL);
511 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
515 * The card should drive cmd and dat[0:3] low immediately
516 * after the response of cmd11, but wait 100 us to be sure
518 err = mmc_wait_dat0(mmc, 0, 100);
525 * During a signal voltage level switch, the clock must be gated
526 * for 5 ms according to the SD spec
528 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
530 err = mmc_set_signal_voltage(mmc, signal_voltage);
534 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
536 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
539 * Failure to switch is indicated by the card holding
540 * dat[0:3] low. Wait for at least 1 ms according to spec
542 err = mmc_wait_dat0(mmc, 1, 1000);
552 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
559 cmd.cmdidx = MMC_CMD_APP_CMD;
560 cmd.resp_type = MMC_RSP_R1;
563 err = mmc_send_cmd(mmc, &cmd, NULL);
568 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
569 cmd.resp_type = MMC_RSP_R3;
572 * Most cards do not answer if some reserved bits
573 * in the ocr are set. However, Some controller
574 * can set bit 7 (reserved for low voltages), but
575 * how to manage low voltages SD card is not yet
578 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
579 (mmc->cfg->voltages & 0xff8000);
581 if (mmc->version == SD_VERSION_2)
582 cmd.cmdarg |= OCR_HCS;
585 cmd.cmdarg |= OCR_S18R;
587 err = mmc_send_cmd(mmc, &cmd, NULL);
592 if (cmd.response[0] & OCR_BUSY)
601 if (mmc->version != SD_VERSION_2)
602 mmc->version = SD_VERSION_1_0;
604 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
605 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
606 cmd.resp_type = MMC_RSP_R3;
609 err = mmc_send_cmd(mmc, &cmd, NULL);
615 mmc->ocr = cmd.response[0];
617 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
618 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
620 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
626 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
632 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
637 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
638 cmd.resp_type = MMC_RSP_R3;
640 if (use_arg && !mmc_host_is_spi(mmc))
641 cmd.cmdarg = OCR_HCS |
642 (mmc->cfg->voltages &
643 (mmc->ocr & OCR_VOLTAGE_MASK)) |
644 (mmc->ocr & OCR_ACCESS_MODE);
646 err = mmc_send_cmd(mmc, &cmd, NULL);
649 mmc->ocr = cmd.response[0];
653 static int mmc_send_op_cond(struct mmc *mmc)
657 /* Some cards seem to need this */
660 /* Asking to the card its capabilities */
661 for (i = 0; i < 2; i++) {
662 err = mmc_send_op_cond_iter(mmc, i != 0);
666 /* exit if not busy (flag seems to be inverted) */
667 if (mmc->ocr & OCR_BUSY)
670 mmc->op_cond_pending = 1;
674 static int mmc_complete_op_cond(struct mmc *mmc)
681 mmc->op_cond_pending = 0;
682 if (!(mmc->ocr & OCR_BUSY)) {
683 /* Some cards seem to need this */
686 start = get_timer(0);
688 err = mmc_send_op_cond_iter(mmc, 1);
691 if (mmc->ocr & OCR_BUSY)
693 if (get_timer(start) > timeout)
699 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
700 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
701 cmd.resp_type = MMC_RSP_R3;
704 err = mmc_send_cmd(mmc, &cmd, NULL);
709 mmc->ocr = cmd.response[0];
712 mmc->version = MMC_VERSION_UNKNOWN;
714 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
721 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
724 struct mmc_data data;
727 /* Get the Card Status Register */
728 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
729 cmd.resp_type = MMC_RSP_R1;
732 data.dest = (char *)ext_csd;
734 data.blocksize = MMC_MAX_BLOCK_LEN;
735 data.flags = MMC_DATA_READ;
737 err = mmc_send_cmd(mmc, &cmd, &data);
742 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
745 unsigned int status, start;
747 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
748 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
749 (index == EXT_CSD_PART_CONF);
753 if (mmc->gen_cmd6_time)
754 timeout_ms = mmc->gen_cmd6_time * 10;
756 if (is_part_switch && mmc->part_switch_time)
757 timeout_ms = mmc->part_switch_time * 10;
759 cmd.cmdidx = MMC_CMD_SWITCH;
760 cmd.resp_type = MMC_RSP_R1b;
761 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
766 ret = mmc_send_cmd(mmc, &cmd, NULL);
767 } while (ret && retries-- > 0);
772 start = get_timer(0);
774 /* poll dat0 for rdy/buys status */
775 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
776 if (ret && ret != -ENOSYS)
780 * In cases when not allowed to poll by using CMD13 or because we aren't
781 * capable of polling by using mmc_wait_dat0, then rely on waiting the
782 * stated timeout to be sufficient.
784 if (ret == -ENOSYS && !send_status)
787 /* Finally wait until the card is ready or indicates a failure
788 * to switch. It doesn't hurt to use CMD13 here even if send_status
789 * is false, because by now (after 'timeout_ms' ms) the bus should be
793 ret = mmc_send_status(mmc, &status);
795 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
796 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
800 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
803 } while (get_timer(start) < timeout_ms);
808 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
810 return __mmc_switch(mmc, set, index, value, true);
813 int mmc_boot_wp(struct mmc *mmc)
815 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
818 #if !CONFIG_IS_ENABLED(MMC_TINY)
819 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
825 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
831 speed_bits = EXT_CSD_TIMING_HS;
833 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
835 speed_bits = EXT_CSD_TIMING_HS200;
838 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
840 speed_bits = EXT_CSD_TIMING_HS400;
843 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
845 speed_bits = EXT_CSD_TIMING_HS400;
849 speed_bits = EXT_CSD_TIMING_LEGACY;
855 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
856 speed_bits, !hsdowngrade);
860 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
861 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
863 * In case the eMMC is in HS200/HS400 mode and we are downgrading
864 * to HS mode, the card clock are still running much faster than
865 * the supported HS mode clock, so we can not reliably read out
866 * Extended CSD. Reconfigure the controller to run at HS mode.
869 mmc_select_mode(mmc, MMC_HS);
870 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
874 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
875 /* Now check to see that it worked */
876 err = mmc_send_ext_csd(mmc, test_csd);
880 /* No high-speed support */
881 if (!test_csd[EXT_CSD_HS_TIMING])
888 static int mmc_get_capabilities(struct mmc *mmc)
890 u8 *ext_csd = mmc->ext_csd;
893 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
895 if (mmc_host_is_spi(mmc))
898 /* Only version 4 supports high-speed */
899 if (mmc->version < MMC_VERSION_4)
903 pr_err("No ext_csd found!\n"); /* this should enver happen */
907 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
909 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
910 mmc->cardtype = cardtype;
912 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
913 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
914 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
915 mmc->card_caps |= MMC_MODE_HS200;
918 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
919 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
920 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
921 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
922 mmc->card_caps |= MMC_MODE_HS400;
925 if (cardtype & EXT_CSD_CARD_TYPE_52) {
926 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
927 mmc->card_caps |= MMC_MODE_DDR_52MHz;
928 mmc->card_caps |= MMC_MODE_HS_52MHz;
930 if (cardtype & EXT_CSD_CARD_TYPE_26)
931 mmc->card_caps |= MMC_MODE_HS;
933 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
934 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
935 (mmc->card_caps & MMC_MODE_HS400)) {
936 mmc->card_caps |= MMC_MODE_HS400_ES;
944 static int mmc_set_capacity(struct mmc *mmc, int part_num)
948 mmc->capacity = mmc->capacity_user;
952 mmc->capacity = mmc->capacity_boot;
955 mmc->capacity = mmc->capacity_rpmb;
961 mmc->capacity = mmc->capacity_gp[part_num - 4];
967 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
972 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
978 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
980 (mmc->part_config & ~PART_ACCESS_MASK)
981 | (part_num & PART_ACCESS_MASK));
982 } while (ret && retry--);
985 * Set the capacity if the switch succeeded or was intended
986 * to return to representing the raw device.
988 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
989 ret = mmc_set_capacity(mmc, part_num);
990 mmc_get_blk_desc(mmc)->hwpart = part_num;
996 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
997 int mmc_hwpart_config(struct mmc *mmc,
998 const struct mmc_hwpart_conf *conf,
999 enum mmc_hwpart_conf_mode mode)
1004 u32 gp_size_mult[4];
1005 u32 max_enh_size_mult;
1006 u32 tot_enh_size_mult = 0;
1009 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1011 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1014 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1015 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1016 return -EMEDIUMTYPE;
1019 if (!(mmc->part_support & PART_SUPPORT)) {
1020 pr_err("Card does not support partitioning\n");
1021 return -EMEDIUMTYPE;
1024 if (!mmc->hc_wp_grp_size) {
1025 pr_err("Card does not define HC WP group size\n");
1026 return -EMEDIUMTYPE;
1029 /* check partition alignment and total enhanced size */
1030 if (conf->user.enh_size) {
1031 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1032 conf->user.enh_start % mmc->hc_wp_grp_size) {
1033 pr_err("User data enhanced area not HC WP group "
1037 part_attrs |= EXT_CSD_ENH_USR;
1038 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1039 if (mmc->high_capacity) {
1040 enh_start_addr = conf->user.enh_start;
1042 enh_start_addr = (conf->user.enh_start << 9);
1048 tot_enh_size_mult += enh_size_mult;
1050 for (pidx = 0; pidx < 4; pidx++) {
1051 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1052 pr_err("GP%i partition not HC WP group size "
1053 "aligned\n", pidx+1);
1056 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1057 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1058 part_attrs |= EXT_CSD_ENH_GP(pidx);
1059 tot_enh_size_mult += gp_size_mult[pidx];
1063 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1064 pr_err("Card does not support enhanced attribute\n");
1065 return -EMEDIUMTYPE;
1068 err = mmc_send_ext_csd(mmc, ext_csd);
1073 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1074 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1075 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1076 if (tot_enh_size_mult > max_enh_size_mult) {
1077 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1078 tot_enh_size_mult, max_enh_size_mult);
1079 return -EMEDIUMTYPE;
1082 /* The default value of EXT_CSD_WR_REL_SET is device
1083 * dependent, the values can only be changed if the
1084 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1085 * changed only once and before partitioning is completed. */
1086 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1087 if (conf->user.wr_rel_change) {
1088 if (conf->user.wr_rel_set)
1089 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1091 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1093 for (pidx = 0; pidx < 4; pidx++) {
1094 if (conf->gp_part[pidx].wr_rel_change) {
1095 if (conf->gp_part[pidx].wr_rel_set)
1096 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1098 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1102 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1103 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1104 puts("Card does not support host controlled partition write "
1105 "reliability settings\n");
1106 return -EMEDIUMTYPE;
1109 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1110 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1111 pr_err("Card already partitioned\n");
1115 if (mode == MMC_HWPART_CONF_CHECK)
1118 /* Partitioning requires high-capacity size definitions */
1119 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1120 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1121 EXT_CSD_ERASE_GROUP_DEF, 1);
1126 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1128 #if CONFIG_IS_ENABLED(MMC_WRITE)
1129 /* update erase group size to be high-capacity */
1130 mmc->erase_grp_size =
1131 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1136 /* all OK, write the configuration */
1137 for (i = 0; i < 4; i++) {
1138 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1139 EXT_CSD_ENH_START_ADDR+i,
1140 (enh_start_addr >> (i*8)) & 0xFF);
1144 for (i = 0; i < 3; i++) {
1145 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1146 EXT_CSD_ENH_SIZE_MULT+i,
1147 (enh_size_mult >> (i*8)) & 0xFF);
1151 for (pidx = 0; pidx < 4; pidx++) {
1152 for (i = 0; i < 3; i++) {
1153 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1154 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1155 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1160 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1161 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1165 if (mode == MMC_HWPART_CONF_SET)
1168 /* The WR_REL_SET is a write-once register but shall be
1169 * written before setting PART_SETTING_COMPLETED. As it is
1170 * write-once we can only write it when completing the
1172 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1173 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1174 EXT_CSD_WR_REL_SET, wr_rel_set);
1179 /* Setting PART_SETTING_COMPLETED confirms the partition
1180 * configuration but it only becomes effective after power
1181 * cycle, so we do not adjust the partition related settings
1182 * in the mmc struct. */
1184 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1185 EXT_CSD_PARTITION_SETTING,
1186 EXT_CSD_PARTITION_SETTING_COMPLETED);
1194 #if !CONFIG_IS_ENABLED(DM_MMC)
1195 int mmc_getcd(struct mmc *mmc)
1199 cd = board_mmc_getcd(mmc);
1202 if (mmc->cfg->ops->getcd)
1203 cd = mmc->cfg->ops->getcd(mmc);
1212 #if !CONFIG_IS_ENABLED(MMC_TINY)
1213 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1216 struct mmc_data data;
1218 /* Switch the frequency */
1219 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1220 cmd.resp_type = MMC_RSP_R1;
1221 cmd.cmdarg = (mode << 31) | 0xffffff;
1222 cmd.cmdarg &= ~(0xf << (group * 4));
1223 cmd.cmdarg |= value << (group * 4);
1225 data.dest = (char *)resp;
1226 data.blocksize = 64;
1228 data.flags = MMC_DATA_READ;
1230 return mmc_send_cmd(mmc, &cmd, &data);
1233 static int sd_get_capabilities(struct mmc *mmc)
1237 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1238 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1239 struct mmc_data data;
1241 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1245 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1247 if (mmc_host_is_spi(mmc))
1250 /* Read the SCR to find out if this card supports higher speeds */
1251 cmd.cmdidx = MMC_CMD_APP_CMD;
1252 cmd.resp_type = MMC_RSP_R1;
1253 cmd.cmdarg = mmc->rca << 16;
1255 err = mmc_send_cmd(mmc, &cmd, NULL);
1260 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1261 cmd.resp_type = MMC_RSP_R1;
1267 data.dest = (char *)scr;
1270 data.flags = MMC_DATA_READ;
1272 err = mmc_send_cmd(mmc, &cmd, &data);
1281 mmc->scr[0] = __be32_to_cpu(scr[0]);
1282 mmc->scr[1] = __be32_to_cpu(scr[1]);
1284 switch ((mmc->scr[0] >> 24) & 0xf) {
1286 mmc->version = SD_VERSION_1_0;
1289 mmc->version = SD_VERSION_1_10;
1292 mmc->version = SD_VERSION_2;
1293 if ((mmc->scr[0] >> 15) & 0x1)
1294 mmc->version = SD_VERSION_3;
1297 mmc->version = SD_VERSION_1_0;
1301 if (mmc->scr[0] & SD_DATA_4BIT)
1302 mmc->card_caps |= MMC_MODE_4BIT;
1304 /* Version 1.0 doesn't support switching */
1305 if (mmc->version == SD_VERSION_1_0)
1310 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1311 (u8 *)switch_status);
1316 /* The high-speed function is busy. Try again */
1317 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1321 /* If high-speed isn't supported, we return */
1322 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1323 mmc->card_caps |= MMC_CAP(SD_HS);
1325 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1326 /* Version before 3.0 don't support UHS modes */
1327 if (mmc->version < SD_VERSION_3)
1330 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1331 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1332 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1333 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1334 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1335 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1336 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1337 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1338 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1339 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1340 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1346 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1350 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1353 /* SD version 1.00 and 1.01 does not support CMD 6 */
1354 if (mmc->version == SD_VERSION_1_0)
1359 speed = UHS_SDR12_BUS_SPEED;
1362 speed = HIGH_SPEED_BUS_SPEED;
1364 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1366 speed = UHS_SDR12_BUS_SPEED;
1369 speed = UHS_SDR25_BUS_SPEED;
1372 speed = UHS_SDR50_BUS_SPEED;
1375 speed = UHS_DDR50_BUS_SPEED;
1378 speed = UHS_SDR104_BUS_SPEED;
1385 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1389 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1395 static int sd_select_bus_width(struct mmc *mmc, int w)
1400 if ((w != 4) && (w != 1))
1403 cmd.cmdidx = MMC_CMD_APP_CMD;
1404 cmd.resp_type = MMC_RSP_R1;
1405 cmd.cmdarg = mmc->rca << 16;
1407 err = mmc_send_cmd(mmc, &cmd, NULL);
1411 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1412 cmd.resp_type = MMC_RSP_R1;
1417 err = mmc_send_cmd(mmc, &cmd, NULL);
1425 #if CONFIG_IS_ENABLED(MMC_WRITE)
1426 static int sd_read_ssr(struct mmc *mmc)
1428 static const unsigned int sd_au_size[] = {
1429 0, SZ_16K / 512, SZ_32K / 512,
1430 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1431 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1432 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1433 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1438 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1439 struct mmc_data data;
1441 unsigned int au, eo, et, es;
1443 cmd.cmdidx = MMC_CMD_APP_CMD;
1444 cmd.resp_type = MMC_RSP_R1;
1445 cmd.cmdarg = mmc->rca << 16;
1447 err = mmc_send_cmd(mmc, &cmd, NULL);
1448 #ifdef CONFIG_MMC_QUIRKS
1449 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1452 * It has been seen that APP_CMD may fail on the first
1453 * attempt, let's try a few more times
1456 err = mmc_send_cmd(mmc, &cmd, NULL);
1459 } while (retries--);
1465 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1466 cmd.resp_type = MMC_RSP_R1;
1470 data.dest = (char *)ssr;
1471 data.blocksize = 64;
1473 data.flags = MMC_DATA_READ;
1475 err = mmc_send_cmd(mmc, &cmd, &data);
1483 for (i = 0; i < 16; i++)
1484 ssr[i] = be32_to_cpu(ssr[i]);
1486 au = (ssr[2] >> 12) & 0xF;
1487 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1488 mmc->ssr.au = sd_au_size[au];
1489 es = (ssr[3] >> 24) & 0xFF;
1490 es |= (ssr[2] & 0xFF) << 8;
1491 et = (ssr[3] >> 18) & 0x3F;
1493 eo = (ssr[3] >> 16) & 0x3;
1494 mmc->ssr.erase_timeout = (et * 1000) / es;
1495 mmc->ssr.erase_offset = eo * 1000;
1498 pr_debug("Invalid Allocation Unit Size.\n");
1504 /* frequency bases */
1505 /* divided by 10 to be nice to platforms without floating point */
1506 static const int fbase[] = {
1513 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1514 * to platforms without floating point.
1516 static const u8 multipliers[] = {
1535 static inline int bus_width(uint cap)
1537 if (cap == MMC_MODE_8BIT)
1539 if (cap == MMC_MODE_4BIT)
1541 if (cap == MMC_MODE_1BIT)
1543 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1547 #if !CONFIG_IS_ENABLED(DM_MMC)
1548 #ifdef MMC_SUPPORTS_TUNING
1549 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1555 static int mmc_set_ios(struct mmc *mmc)
1559 if (mmc->cfg->ops->set_ios)
1560 ret = mmc->cfg->ops->set_ios(mmc);
1565 static int mmc_host_power_cycle(struct mmc *mmc)
1569 if (mmc->cfg->ops->host_power_cycle)
1570 ret = mmc->cfg->ops->host_power_cycle(mmc);
1576 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1579 if (clock > mmc->cfg->f_max)
1580 clock = mmc->cfg->f_max;
1582 if (clock < mmc->cfg->f_min)
1583 clock = mmc->cfg->f_min;
1587 mmc->clk_disable = disable;
1589 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1591 return mmc_set_ios(mmc);
1594 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1596 mmc->bus_width = width;
1598 return mmc_set_ios(mmc);
1601 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1603 * helper function to display the capabilities in a human
1604 * friendly manner. The capabilities include bus width and
1607 void mmc_dump_capabilities(const char *text, uint caps)
1611 pr_debug("%s: widths [", text);
1612 if (caps & MMC_MODE_8BIT)
1614 if (caps & MMC_MODE_4BIT)
1616 if (caps & MMC_MODE_1BIT)
1618 pr_debug("\b\b] modes [");
1619 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1620 if (MMC_CAP(mode) & caps)
1621 pr_debug("%s, ", mmc_mode_name(mode));
1622 pr_debug("\b\b]\n");
1626 struct mode_width_tuning {
1629 #ifdef MMC_SUPPORTS_TUNING
1634 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1635 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1638 case MMC_SIGNAL_VOLTAGE_000: return 0;
1639 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1640 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1641 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1646 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1650 if (mmc->signal_voltage == signal_voltage)
1653 mmc->signal_voltage = signal_voltage;
1654 err = mmc_set_ios(mmc);
1656 pr_debug("unable to set voltage (err %d)\n", err);
1661 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1667 #if !CONFIG_IS_ENABLED(MMC_TINY)
1668 static const struct mode_width_tuning sd_modes_by_pref[] = {
1669 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1670 #ifdef MMC_SUPPORTS_TUNING
1673 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1674 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1679 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1683 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1687 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1692 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1694 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1702 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 #define for_each_sd_mode_by_pref(caps, mwt) \
1707 for (mwt = sd_modes_by_pref;\
1708 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1710 if (caps & MMC_CAP(mwt->mode))
1712 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1715 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1716 const struct mode_width_tuning *mwt;
1717 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1718 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1720 bool uhs_en = false;
1725 mmc_dump_capabilities("sd card", card_caps);
1726 mmc_dump_capabilities("host", mmc->host_caps);
1729 if (mmc_host_is_spi(mmc)) {
1730 mmc_set_bus_width(mmc, 1);
1731 mmc_select_mode(mmc, MMC_LEGACY);
1732 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1736 /* Restrict card's capabilities by what the host can do */
1737 caps = card_caps & mmc->host_caps;
1742 for_each_sd_mode_by_pref(caps, mwt) {
1745 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1746 if (*w & caps & mwt->widths) {
1747 pr_debug("trying mode %s width %d (at %d MHz)\n",
1748 mmc_mode_name(mwt->mode),
1750 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1752 /* configure the bus width (card + host) */
1753 err = sd_select_bus_width(mmc, bus_width(*w));
1756 mmc_set_bus_width(mmc, bus_width(*w));
1758 /* configure the bus mode (card) */
1759 err = sd_set_card_speed(mmc, mwt->mode);
1763 /* configure the bus mode (host) */
1764 mmc_select_mode(mmc, mwt->mode);
1765 mmc_set_clock(mmc, mmc->tran_speed,
1768 #ifdef MMC_SUPPORTS_TUNING
1769 /* execute tuning if needed */
1770 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1771 err = mmc_execute_tuning(mmc,
1774 pr_debug("tuning failed\n");
1780 #if CONFIG_IS_ENABLED(MMC_WRITE)
1781 err = sd_read_ssr(mmc);
1783 pr_warn("unable to read ssr\n");
1789 /* revert to a safer bus speed */
1790 mmc_select_mode(mmc, MMC_LEGACY);
1791 mmc_set_clock(mmc, mmc->tran_speed,
1797 pr_err("unable to select a mode\n");
1802 * read the compare the part of ext csd that is constant.
1803 * This can be used to check that the transfer is working
1806 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1809 const u8 *ext_csd = mmc->ext_csd;
1810 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1812 if (mmc->version < MMC_VERSION_4)
1815 err = mmc_send_ext_csd(mmc, test_csd);
1819 /* Only compare read only fields */
1820 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1821 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1822 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1823 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1824 ext_csd[EXT_CSD_REV]
1825 == test_csd[EXT_CSD_REV] &&
1826 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1827 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1828 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1829 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1835 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1836 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1837 uint32_t allowed_mask)
1845 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1846 EXT_CSD_CARD_TYPE_HS400_1_8V))
1847 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1848 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1849 EXT_CSD_CARD_TYPE_HS400_1_2V))
1850 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1853 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1854 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1855 MMC_SIGNAL_VOLTAGE_180;
1856 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1857 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1860 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1864 while (card_mask & allowed_mask) {
1865 enum mmc_voltage best_match;
1867 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1868 if (!mmc_set_signal_voltage(mmc, best_match))
1871 allowed_mask &= ~best_match;
1877 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1878 uint32_t allowed_mask)
1884 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1885 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1887 .mode = MMC_HS_400_ES,
1888 .widths = MMC_MODE_8BIT,
1891 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1894 .widths = MMC_MODE_8BIT,
1895 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1898 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1901 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1902 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1907 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1911 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1915 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1919 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1923 #define for_each_mmc_mode_by_pref(caps, mwt) \
1924 for (mwt = mmc_modes_by_pref;\
1925 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1927 if (caps & MMC_CAP(mwt->mode))
1929 static const struct ext_csd_bus_width {
1933 } ext_csd_bus_width[] = {
1934 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1935 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1936 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1937 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1938 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1941 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1942 static int mmc_select_hs400(struct mmc *mmc)
1946 /* Set timing to HS200 for tuning */
1947 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1951 /* configure the bus mode (host) */
1952 mmc_select_mode(mmc, MMC_HS_200);
1953 mmc_set_clock(mmc, mmc->tran_speed, false);
1955 /* execute tuning if needed */
1956 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1958 debug("tuning failed\n");
1962 /* Set back to HS */
1963 mmc_set_card_speed(mmc, MMC_HS, true);
1965 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1966 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1970 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1974 mmc_select_mode(mmc, MMC_HS_400);
1975 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1982 static int mmc_select_hs400(struct mmc *mmc)
1988 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1989 #if !CONFIG_IS_ENABLED(DM_MMC)
1990 static int mmc_set_enhanced_strobe(struct mmc *mmc)
1995 static int mmc_select_hs400es(struct mmc *mmc)
1999 err = mmc_set_card_speed(mmc, MMC_HS, true);
2003 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2004 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2005 EXT_CSD_BUS_WIDTH_STROBE);
2007 printf("switch to bus width for hs400 failed\n");
2010 /* TODO: driver strength */
2011 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2015 mmc_select_mode(mmc, MMC_HS_400_ES);
2016 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2020 return mmc_set_enhanced_strobe(mmc);
2023 static int mmc_select_hs400es(struct mmc *mmc)
2029 #define for_each_supported_width(caps, ddr, ecbv) \
2030 for (ecbv = ext_csd_bus_width;\
2031 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2033 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2035 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2038 const struct mode_width_tuning *mwt;
2039 const struct ext_csd_bus_width *ecbw;
2042 mmc_dump_capabilities("mmc", card_caps);
2043 mmc_dump_capabilities("host", mmc->host_caps);
2046 if (mmc_host_is_spi(mmc)) {
2047 mmc_set_bus_width(mmc, 1);
2048 mmc_select_mode(mmc, MMC_LEGACY);
2049 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2053 /* Restrict card's capabilities by what the host can do */
2054 card_caps &= mmc->host_caps;
2056 /* Only version 4 of MMC supports wider bus widths */
2057 if (mmc->version < MMC_VERSION_4)
2060 if (!mmc->ext_csd) {
2061 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2065 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2066 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2068 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2069 * before doing anything else, since a transition from either of
2070 * the HS200/HS400 mode directly to legacy mode is not supported.
2072 if (mmc->selected_mode == MMC_HS_200 ||
2073 mmc->selected_mode == MMC_HS_400)
2074 mmc_set_card_speed(mmc, MMC_HS, true);
2077 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2079 for_each_mmc_mode_by_pref(card_caps, mwt) {
2080 for_each_supported_width(card_caps & mwt->widths,
2081 mmc_is_mode_ddr(mwt->mode), ecbw) {
2082 enum mmc_voltage old_voltage;
2083 pr_debug("trying mode %s width %d (at %d MHz)\n",
2084 mmc_mode_name(mwt->mode),
2085 bus_width(ecbw->cap),
2086 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2087 old_voltage = mmc->signal_voltage;
2088 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2089 MMC_ALL_SIGNAL_VOLTAGE);
2093 /* configure the bus width (card + host) */
2094 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2096 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2099 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2101 if (mwt->mode == MMC_HS_400) {
2102 err = mmc_select_hs400(mmc);
2104 printf("Select HS400 failed %d\n", err);
2107 } else if (mwt->mode == MMC_HS_400_ES) {
2108 err = mmc_select_hs400es(mmc);
2110 printf("Select HS400ES failed %d\n",
2115 /* configure the bus speed (card) */
2116 err = mmc_set_card_speed(mmc, mwt->mode, false);
2121 * configure the bus width AND the ddr mode
2122 * (card). The host side will be taken care
2123 * of in the next step
2125 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2126 err = mmc_switch(mmc,
2127 EXT_CSD_CMD_SET_NORMAL,
2129 ecbw->ext_csd_bits);
2134 /* configure the bus mode (host) */
2135 mmc_select_mode(mmc, mwt->mode);
2136 mmc_set_clock(mmc, mmc->tran_speed,
2138 #ifdef MMC_SUPPORTS_TUNING
2140 /* execute tuning if needed */
2142 err = mmc_execute_tuning(mmc,
2145 pr_debug("tuning failed\n");
2152 /* do a transfer to check the configuration */
2153 err = mmc_read_and_compare_ext_csd(mmc);
2157 mmc_set_signal_voltage(mmc, old_voltage);
2158 /* if an error occured, revert to a safer bus mode */
2159 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2160 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2161 mmc_select_mode(mmc, MMC_LEGACY);
2162 mmc_set_bus_width(mmc, 1);
2166 pr_err("unable to select a mode\n");
2172 #if CONFIG_IS_ENABLED(MMC_TINY)
2173 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2176 static int mmc_startup_v4(struct mmc *mmc)
2180 bool has_parts = false;
2181 bool part_completed;
2182 static const u32 mmc_versions[] = {
2194 #if CONFIG_IS_ENABLED(MMC_TINY)
2195 u8 *ext_csd = ext_csd_bkup;
2197 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2201 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2203 err = mmc_send_ext_csd(mmc, ext_csd);
2207 /* store the ext csd for future reference */
2209 mmc->ext_csd = ext_csd;
2211 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2213 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2216 /* check ext_csd version and capacity */
2217 err = mmc_send_ext_csd(mmc, ext_csd);
2221 /* store the ext csd for future reference */
2223 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2226 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2228 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2231 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2233 if (mmc->version >= MMC_VERSION_4_2) {
2235 * According to the JEDEC Standard, the value of
2236 * ext_csd's capacity is valid if the value is more
2239 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2240 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2241 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2242 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2243 capacity *= MMC_MAX_BLOCK_LEN;
2244 if ((capacity >> 20) > 2 * 1024)
2245 mmc->capacity_user = capacity;
2248 if (mmc->version >= MMC_VERSION_4_5)
2249 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2251 /* The partition data may be non-zero but it is only
2252 * effective if PARTITION_SETTING_COMPLETED is set in
2253 * EXT_CSD, so ignore any data if this bit is not set,
2254 * except for enabling the high-capacity group size
2255 * definition (see below).
2257 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2258 EXT_CSD_PARTITION_SETTING_COMPLETED);
2260 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2261 /* Some eMMC set the value too low so set a minimum */
2262 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2263 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2265 /* store the partition info of emmc */
2266 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2267 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2268 ext_csd[EXT_CSD_BOOT_MULT])
2269 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2270 if (part_completed &&
2271 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2272 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2274 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2276 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2278 for (i = 0; i < 4; i++) {
2279 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2280 uint mult = (ext_csd[idx + 2] << 16) +
2281 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2284 if (!part_completed)
2286 mmc->capacity_gp[i] = mult;
2287 mmc->capacity_gp[i] *=
2288 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2289 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2290 mmc->capacity_gp[i] <<= 19;
2293 #ifndef CONFIG_SPL_BUILD
2294 if (part_completed) {
2295 mmc->enh_user_size =
2296 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2297 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2298 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2299 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2300 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2301 mmc->enh_user_size <<= 19;
2302 mmc->enh_user_start =
2303 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2304 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2305 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2306 ext_csd[EXT_CSD_ENH_START_ADDR];
2307 if (mmc->high_capacity)
2308 mmc->enh_user_start <<= 9;
2313 * Host needs to enable ERASE_GRP_DEF bit if device is
2314 * partitioned. This bit will be lost every time after a reset
2315 * or power off. This will affect erase size.
2319 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2320 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2323 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2324 EXT_CSD_ERASE_GROUP_DEF, 1);
2329 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2332 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2333 #if CONFIG_IS_ENABLED(MMC_WRITE)
2334 /* Read out group size from ext_csd */
2335 mmc->erase_grp_size =
2336 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2339 * if high capacity and partition setting completed
2340 * SEC_COUNT is valid even if it is smaller than 2 GiB
2341 * JEDEC Standard JESD84-B45, 6.2.4
2343 if (mmc->high_capacity && part_completed) {
2344 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2345 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2346 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2347 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2348 capacity *= MMC_MAX_BLOCK_LEN;
2349 mmc->capacity_user = capacity;
2352 #if CONFIG_IS_ENABLED(MMC_WRITE)
2354 /* Calculate the group size from the csd value. */
2355 int erase_gsz, erase_gmul;
2357 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2358 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2359 mmc->erase_grp_size = (erase_gsz + 1)
2363 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2364 mmc->hc_wp_grp_size = 1024
2365 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2366 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2369 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2374 #if !CONFIG_IS_ENABLED(MMC_TINY)
2377 mmc->ext_csd = NULL;
2382 static int mmc_startup(struct mmc *mmc)
2388 struct blk_desc *bdesc;
2390 #ifdef CONFIG_MMC_SPI_CRC_ON
2391 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2392 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2393 cmd.resp_type = MMC_RSP_R1;
2395 err = mmc_send_cmd(mmc, &cmd, NULL);
2401 /* Put the Card in Identify Mode */
2402 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2403 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2404 cmd.resp_type = MMC_RSP_R2;
2407 err = mmc_send_cmd(mmc, &cmd, NULL);
2409 #ifdef CONFIG_MMC_QUIRKS
2410 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2413 * It has been seen that SEND_CID may fail on the first
2414 * attempt, let's try a few more time
2417 err = mmc_send_cmd(mmc, &cmd, NULL);
2420 } while (retries--);
2427 memcpy(mmc->cid, cmd.response, 16);
2430 * For MMC cards, set the Relative Address.
2431 * For SD cards, get the Relatvie Address.
2432 * This also puts the cards into Standby State
2434 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2435 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2436 cmd.cmdarg = mmc->rca << 16;
2437 cmd.resp_type = MMC_RSP_R6;
2439 err = mmc_send_cmd(mmc, &cmd, NULL);
2445 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2448 /* Get the Card-Specific Data */
2449 cmd.cmdidx = MMC_CMD_SEND_CSD;
2450 cmd.resp_type = MMC_RSP_R2;
2451 cmd.cmdarg = mmc->rca << 16;
2453 err = mmc_send_cmd(mmc, &cmd, NULL);
2458 mmc->csd[0] = cmd.response[0];
2459 mmc->csd[1] = cmd.response[1];
2460 mmc->csd[2] = cmd.response[2];
2461 mmc->csd[3] = cmd.response[3];
2463 if (mmc->version == MMC_VERSION_UNKNOWN) {
2464 int version = (cmd.response[0] >> 26) & 0xf;
2468 mmc->version = MMC_VERSION_1_2;
2471 mmc->version = MMC_VERSION_1_4;
2474 mmc->version = MMC_VERSION_2_2;
2477 mmc->version = MMC_VERSION_3;
2480 mmc->version = MMC_VERSION_4;
2483 mmc->version = MMC_VERSION_1_2;
2488 /* divide frequency by 10, since the mults are 10x bigger */
2489 freq = fbase[(cmd.response[0] & 0x7)];
2490 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2492 mmc->legacy_speed = freq * mult;
2493 mmc_select_mode(mmc, MMC_LEGACY);
2495 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2496 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2497 #if CONFIG_IS_ENABLED(MMC_WRITE)
2500 mmc->write_bl_len = mmc->read_bl_len;
2502 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2505 if (mmc->high_capacity) {
2506 csize = (mmc->csd[1] & 0x3f) << 16
2507 | (mmc->csd[2] & 0xffff0000) >> 16;
2510 csize = (mmc->csd[1] & 0x3ff) << 2
2511 | (mmc->csd[2] & 0xc0000000) >> 30;
2512 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2515 mmc->capacity_user = (csize + 1) << (cmult + 2);
2516 mmc->capacity_user *= mmc->read_bl_len;
2517 mmc->capacity_boot = 0;
2518 mmc->capacity_rpmb = 0;
2519 for (i = 0; i < 4; i++)
2520 mmc->capacity_gp[i] = 0;
2522 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2523 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2525 #if CONFIG_IS_ENABLED(MMC_WRITE)
2526 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2527 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2530 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2531 cmd.cmdidx = MMC_CMD_SET_DSR;
2532 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2533 cmd.resp_type = MMC_RSP_NONE;
2534 if (mmc_send_cmd(mmc, &cmd, NULL))
2535 pr_warn("MMC: SET_DSR failed\n");
2538 /* Select the card, and put it into Transfer Mode */
2539 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2540 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2541 cmd.resp_type = MMC_RSP_R1;
2542 cmd.cmdarg = mmc->rca << 16;
2543 err = mmc_send_cmd(mmc, &cmd, NULL);
2550 * For SD, its erase group is always one sector
2552 #if CONFIG_IS_ENABLED(MMC_WRITE)
2553 mmc->erase_grp_size = 1;
2555 mmc->part_config = MMCPART_NOAVAILABLE;
2557 err = mmc_startup_v4(mmc);
2561 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2565 #if CONFIG_IS_ENABLED(MMC_TINY)
2566 mmc_set_clock(mmc, mmc->legacy_speed, false);
2567 mmc_select_mode(mmc, MMC_LEGACY);
2568 mmc_set_bus_width(mmc, 1);
2571 err = sd_get_capabilities(mmc);
2574 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2576 err = mmc_get_capabilities(mmc);
2579 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2585 mmc->best_mode = mmc->selected_mode;
2587 /* Fix the block length for DDR mode */
2588 if (mmc->ddr_mode) {
2589 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2590 #if CONFIG_IS_ENABLED(MMC_WRITE)
2591 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2595 /* fill in device description */
2596 bdesc = mmc_get_blk_desc(mmc);
2600 bdesc->blksz = mmc->read_bl_len;
2601 bdesc->log2blksz = LOG2(bdesc->blksz);
2602 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2603 #if !defined(CONFIG_SPL_BUILD) || \
2604 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2605 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2606 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2607 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2608 (mmc->cid[3] >> 16) & 0xffff);
2609 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2610 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2611 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2612 (mmc->cid[2] >> 24) & 0xff);
2613 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2614 (mmc->cid[2] >> 16) & 0xf);
2616 bdesc->vendor[0] = 0;
2617 bdesc->product[0] = 0;
2618 bdesc->revision[0] = 0;
2621 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2628 static int mmc_send_if_cond(struct mmc *mmc)
2633 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2634 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2635 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2636 cmd.resp_type = MMC_RSP_R7;
2638 err = mmc_send_cmd(mmc, &cmd, NULL);
2643 if ((cmd.response[0] & 0xff) != 0xaa)
2646 mmc->version = SD_VERSION_2;
2651 #if !CONFIG_IS_ENABLED(DM_MMC)
2652 /* board-specific MMC power initializations. */
2653 __weak void board_mmc_power_init(void)
2658 static int mmc_power_init(struct mmc *mmc)
2660 #if CONFIG_IS_ENABLED(DM_MMC)
2661 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2664 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2667 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2669 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2670 &mmc->vqmmc_supply);
2672 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2674 #else /* !CONFIG_DM_MMC */
2676 * Driver model should use a regulator, as above, rather than calling
2677 * out to board code.
2679 board_mmc_power_init();
2685 * put the host in the initial state:
2686 * - turn on Vdd (card power supply)
2687 * - configure the bus width and clock to minimal values
2689 static void mmc_set_initial_state(struct mmc *mmc)
2693 /* First try to set 3.3V. If it fails set to 1.8V */
2694 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2696 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2698 pr_warn("mmc: failed to set signal voltage\n");
2700 mmc_select_mode(mmc, MMC_LEGACY);
2701 mmc_set_bus_width(mmc, 1);
2702 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2705 static int mmc_power_on(struct mmc *mmc)
2707 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2708 if (mmc->vmmc_supply) {
2709 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2712 puts("Error enabling VMMC supply\n");
2720 static int mmc_power_off(struct mmc *mmc)
2722 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2723 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2724 if (mmc->vmmc_supply) {
2725 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2728 pr_debug("Error disabling VMMC supply\n");
2736 static int mmc_power_cycle(struct mmc *mmc)
2740 ret = mmc_power_off(mmc);
2744 ret = mmc_host_power_cycle(mmc);
2749 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2750 * to be on the safer side.
2753 return mmc_power_on(mmc);
2756 int mmc_get_op_cond(struct mmc *mmc)
2758 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2764 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2765 mmc_adapter_card_type_ident();
2767 err = mmc_power_init(mmc);
2771 #ifdef CONFIG_MMC_QUIRKS
2772 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2773 MMC_QUIRK_RETRY_SEND_CID |
2774 MMC_QUIRK_RETRY_APP_CMD;
2777 err = mmc_power_cycle(mmc);
2780 * if power cycling is not supported, we should not try
2781 * to use the UHS modes, because we wouldn't be able to
2782 * recover from an error during the UHS initialization.
2784 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2786 mmc->host_caps &= ~UHS_CAPS;
2787 err = mmc_power_on(mmc);
2792 #if CONFIG_IS_ENABLED(DM_MMC)
2793 /* The device has already been probed ready for use */
2795 /* made sure it's not NULL earlier */
2796 err = mmc->cfg->ops->init(mmc);
2803 mmc_set_initial_state(mmc);
2805 /* Reset the Card */
2806 err = mmc_go_idle(mmc);
2811 /* The internal partition reset to user partition(0) at every CMD0*/
2812 mmc_get_blk_desc(mmc)->hwpart = 0;
2814 /* Test for SD version 2 */
2815 err = mmc_send_if_cond(mmc);
2817 /* Now try to get the SD card's operating condition */
2818 err = sd_send_op_cond(mmc, uhs_en);
2819 if (err && uhs_en) {
2821 mmc_power_cycle(mmc);
2825 /* If the command timed out, we check for an MMC card */
2826 if (err == -ETIMEDOUT) {
2827 err = mmc_send_op_cond(mmc);
2830 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2831 pr_err("Card did not respond to voltage select!\n");
2840 int mmc_start_init(struct mmc *mmc)
2846 * all hosts are capable of 1 bit bus-width and able to use the legacy
2849 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2850 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2851 #if CONFIG_IS_ENABLED(DM_MMC)
2852 mmc_deferred_probe(mmc);
2854 #if !defined(CONFIG_MMC_BROKEN_CD)
2855 no_card = mmc_getcd(mmc) == 0;
2859 #if !CONFIG_IS_ENABLED(DM_MMC)
2860 /* we pretend there's no card when init is NULL */
2861 no_card = no_card || (mmc->cfg->ops->init == NULL);
2865 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2866 pr_err("MMC: no card present\n");
2871 err = mmc_get_op_cond(mmc);
2874 mmc->init_in_progress = 1;
2879 static int mmc_complete_init(struct mmc *mmc)
2883 mmc->init_in_progress = 0;
2884 if (mmc->op_cond_pending)
2885 err = mmc_complete_op_cond(mmc);
2888 err = mmc_startup(mmc);
2896 int mmc_init(struct mmc *mmc)
2899 __maybe_unused ulong start;
2900 #if CONFIG_IS_ENABLED(DM_MMC)
2901 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2908 start = get_timer(0);
2910 if (!mmc->init_in_progress)
2911 err = mmc_start_init(mmc);
2914 err = mmc_complete_init(mmc);
2916 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2921 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2922 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2923 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2924 int mmc_deinit(struct mmc *mmc)
2932 caps_filtered = mmc->card_caps &
2933 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2934 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2935 MMC_CAP(UHS_SDR104));
2937 return sd_select_mode_and_width(mmc, caps_filtered);
2939 caps_filtered = mmc->card_caps &
2940 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2942 return mmc_select_mode_and_width(mmc, caps_filtered);
2947 int mmc_set_dsr(struct mmc *mmc, u16 val)
2953 /* CPU-specific MMC initializations */
2954 __weak int cpu_mmc_init(bd_t *bis)
2959 /* board-specific MMC initializations. */
2960 __weak int board_mmc_init(bd_t *bis)
2965 void mmc_set_preinit(struct mmc *mmc, int preinit)
2967 mmc->preinit = preinit;
2970 #if CONFIG_IS_ENABLED(DM_MMC)
2971 static int mmc_probe(bd_t *bis)
2975 struct udevice *dev;
2977 ret = uclass_get(UCLASS_MMC, &uc);
2982 * Try to add them in sequence order. Really with driver model we
2983 * should allow holes, but the current MMC list does not allow that.
2984 * So if we request 0, 1, 3 we will get 0, 1, 2.
2986 for (i = 0; ; i++) {
2987 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2991 uclass_foreach_dev(dev, uc) {
2992 ret = device_probe(dev);
2994 pr_err("%s - probe failed: %d\n", dev->name, ret);
3000 static int mmc_probe(bd_t *bis)
3002 if (board_mmc_init(bis) < 0)
3009 int mmc_initialize(bd_t *bis)
3011 static int initialized = 0;
3013 if (initialized) /* Avoid initializing mmc multiple times */
3017 #if !CONFIG_IS_ENABLED(BLK)
3018 #if !CONFIG_IS_ENABLED(MMC_TINY)
3022 ret = mmc_probe(bis);
3026 #ifndef CONFIG_SPL_BUILD
3027 print_mmc_devices(',');
3034 #if CONFIG_IS_ENABLED(DM_MMC)
3035 int mmc_init_device(int num)
3037 struct udevice *dev;
3041 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3045 m = mmc_get_mmc_dev(dev);
3048 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3049 mmc_set_preinit(m, 1);
3058 #ifdef CONFIG_CMD_BKOPS_ENABLE
3059 int mmc_set_bkops_enable(struct mmc *mmc)
3062 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3064 err = mmc_send_ext_csd(mmc, ext_csd);
3066 puts("Could not get ext_csd register values\n");
3070 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3071 puts("Background operations not supported on device\n");
3072 return -EMEDIUMTYPE;
3075 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3076 puts("Background operations already enabled\n");
3080 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3082 puts("Failed to enable manual background operations\n");
3086 puts("Enabled manual background operations\n");