1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27 static int mmc_power_cycle(struct mmc *mmc);
28 #if !CONFIG_IS_ENABLED(MMC_TINY)
29 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
236 err = mmc_wait_dat0(mmc, 1, timeout);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
423 #if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 int dev_num = block_dev->devnum;
428 lbaint_t cur, blocks_todo = blkcnt;
433 struct mmc *mmc = find_mmc_device(dev_num);
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445 if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
473 static int mmc_go_idle(struct mmc *mmc)
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 cmd.resp_type = MMC_RSP_NONE;
484 err = mmc_send_cmd(mmc, &cmd, NULL);
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
501 * Send CMD11 only if the request is to switch the card to
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 cmd.resp_type = MMC_RSP_R1;
511 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 * The card should drive cmd and dat[0:3] low immediately
520 * after the response of cmd11, but wait 100 us to be sure
522 err = mmc_wait_dat0(mmc, 0, 100);
529 * During a signal voltage level switch, the clock must be gated
530 * for 5 ms according to the SD spec
532 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 * Failure to switch is indicated by the card holding
544 * dat[0:3] low. Wait for at least 1 ms according to spec
546 err = mmc_wait_dat0(mmc, 1, 1000);
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
567 err = mmc_send_cmd(mmc, &cmd, NULL);
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
576 * Most cards do not answer if some reserved bits
577 * in the ocr are set. However, Some controller
578 * can set bit 7 (reserved for low voltages), but
579 * how to manage low voltages SD card is not yet
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
589 cmd.cmdarg |= OCR_S18R;
591 err = mmc_send_cmd(mmc, &cmd, NULL);
596 if (cmd.response[0] & OCR_BUSY)
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
608 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
613 err = mmc_send_cmd(mmc, &cmd, NULL);
619 mmc->ocr = cmd.response[0];
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
650 err = mmc_send_cmd(mmc, &cmd, NULL);
653 mmc->ocr = cmd.response[0];
657 static int mmc_send_op_cond(struct mmc *mmc)
661 /* Some cards seem to need this */
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
674 mmc->op_cond_pending = 1;
678 static int mmc_complete_op_cond(struct mmc *mmc)
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
690 start = get_timer(0);
692 err = mmc_send_op_cond_iter(mmc, 1);
695 if (mmc->ocr & OCR_BUSY)
697 if (get_timer(start) > timeout)
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
708 err = mmc_send_cmd(mmc, &cmd, NULL);
713 mmc->ocr = cmd.response[0];
716 mmc->version = MMC_VERSION_UNKNOWN;
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 struct mmc_data data;
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
736 data.dest = (char *)ext_csd;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
741 err = mmc_send_cmd(mmc, &cmd, &data);
746 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
749 unsigned int status, start;
751 int timeout = DEFAULT_CMD6_TIMEOUT_MS;
752 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
753 (index == EXT_CSD_PART_CONF);
757 if (mmc->gen_cmd6_time)
758 timeout = mmc->gen_cmd6_time * 10;
760 if (is_part_switch && mmc->part_switch_time)
761 timeout = mmc->part_switch_time * 10;
763 cmd.cmdidx = MMC_CMD_SWITCH;
764 cmd.resp_type = MMC_RSP_R1b;
765 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
770 ret = mmc_send_cmd(mmc, &cmd, NULL);
771 } while (ret && retries-- > 0);
776 start = get_timer(0);
778 /* poll dat0 for rdy/buys status */
779 ret = mmc_wait_dat0(mmc, 1, timeout);
780 if (ret && ret != -ENOSYS)
784 * In cases when not allowed to poll by using CMD13 or because we aren't
785 * capable of polling by using mmc_wait_dat0, then rely on waiting the
786 * stated timeout to be sufficient.
788 if (ret == -ENOSYS && !send_status)
791 /* Finally wait until the card is ready or indicates a failure
792 * to switch. It doesn't hurt to use CMD13 here even if send_status
793 * is false, because by now (after 'timeout' ms) the bus should be
797 ret = mmc_send_status(mmc, &status);
799 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
800 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
804 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
807 } while (get_timer(start) < timeout);
812 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
814 return __mmc_switch(mmc, set, index, value, true);
817 #if !CONFIG_IS_ENABLED(MMC_TINY)
818 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
824 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
830 speed_bits = EXT_CSD_TIMING_HS;
832 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
834 speed_bits = EXT_CSD_TIMING_HS200;
837 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
839 speed_bits = EXT_CSD_TIMING_HS400;
843 speed_bits = EXT_CSD_TIMING_LEGACY;
849 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
850 speed_bits, !hsdowngrade);
854 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
855 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
857 * In case the eMMC is in HS200/HS400 mode and we are downgrading
858 * to HS mode, the card clock are still running much faster than
859 * the supported HS mode clock, so we can not reliably read out
860 * Extended CSD. Reconfigure the controller to run at HS mode.
863 mmc_select_mode(mmc, MMC_HS);
864 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
868 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
869 /* Now check to see that it worked */
870 err = mmc_send_ext_csd(mmc, test_csd);
874 /* No high-speed support */
875 if (!test_csd[EXT_CSD_HS_TIMING])
882 static int mmc_get_capabilities(struct mmc *mmc)
884 u8 *ext_csd = mmc->ext_csd;
887 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
889 if (mmc_host_is_spi(mmc))
892 /* Only version 4 supports high-speed */
893 if (mmc->version < MMC_VERSION_4)
897 pr_err("No ext_csd found!\n"); /* this should enver happen */
901 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
903 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
904 mmc->cardtype = cardtype;
906 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
907 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
908 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
909 mmc->card_caps |= MMC_MODE_HS200;
912 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
913 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
914 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
915 mmc->card_caps |= MMC_MODE_HS400;
918 if (cardtype & EXT_CSD_CARD_TYPE_52) {
919 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
920 mmc->card_caps |= MMC_MODE_DDR_52MHz;
921 mmc->card_caps |= MMC_MODE_HS_52MHz;
923 if (cardtype & EXT_CSD_CARD_TYPE_26)
924 mmc->card_caps |= MMC_MODE_HS;
930 static int mmc_set_capacity(struct mmc *mmc, int part_num)
934 mmc->capacity = mmc->capacity_user;
938 mmc->capacity = mmc->capacity_boot;
941 mmc->capacity = mmc->capacity_rpmb;
947 mmc->capacity = mmc->capacity_gp[part_num - 4];
953 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
958 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
964 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
966 (mmc->part_config & ~PART_ACCESS_MASK)
967 | (part_num & PART_ACCESS_MASK));
968 } while (ret && retry--);
971 * Set the capacity if the switch succeeded or was intended
972 * to return to representing the raw device.
974 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
975 ret = mmc_set_capacity(mmc, part_num);
976 mmc_get_blk_desc(mmc)->hwpart = part_num;
982 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
983 int mmc_hwpart_config(struct mmc *mmc,
984 const struct mmc_hwpart_conf *conf,
985 enum mmc_hwpart_conf_mode mode)
991 u32 max_enh_size_mult;
992 u32 tot_enh_size_mult = 0;
995 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
997 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1000 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1001 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1002 return -EMEDIUMTYPE;
1005 if (!(mmc->part_support & PART_SUPPORT)) {
1006 pr_err("Card does not support partitioning\n");
1007 return -EMEDIUMTYPE;
1010 if (!mmc->hc_wp_grp_size) {
1011 pr_err("Card does not define HC WP group size\n");
1012 return -EMEDIUMTYPE;
1015 /* check partition alignment and total enhanced size */
1016 if (conf->user.enh_size) {
1017 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1018 conf->user.enh_start % mmc->hc_wp_grp_size) {
1019 pr_err("User data enhanced area not HC WP group "
1023 part_attrs |= EXT_CSD_ENH_USR;
1024 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1025 if (mmc->high_capacity) {
1026 enh_start_addr = conf->user.enh_start;
1028 enh_start_addr = (conf->user.enh_start << 9);
1034 tot_enh_size_mult += enh_size_mult;
1036 for (pidx = 0; pidx < 4; pidx++) {
1037 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1038 pr_err("GP%i partition not HC WP group size "
1039 "aligned\n", pidx+1);
1042 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1043 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1044 part_attrs |= EXT_CSD_ENH_GP(pidx);
1045 tot_enh_size_mult += gp_size_mult[pidx];
1049 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1050 pr_err("Card does not support enhanced attribute\n");
1051 return -EMEDIUMTYPE;
1054 err = mmc_send_ext_csd(mmc, ext_csd);
1059 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1060 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1061 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1062 if (tot_enh_size_mult > max_enh_size_mult) {
1063 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1064 tot_enh_size_mult, max_enh_size_mult);
1065 return -EMEDIUMTYPE;
1068 /* The default value of EXT_CSD_WR_REL_SET is device
1069 * dependent, the values can only be changed if the
1070 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1071 * changed only once and before partitioning is completed. */
1072 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1073 if (conf->user.wr_rel_change) {
1074 if (conf->user.wr_rel_set)
1075 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1077 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1079 for (pidx = 0; pidx < 4; pidx++) {
1080 if (conf->gp_part[pidx].wr_rel_change) {
1081 if (conf->gp_part[pidx].wr_rel_set)
1082 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1084 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1088 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1089 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1090 puts("Card does not support host controlled partition write "
1091 "reliability settings\n");
1092 return -EMEDIUMTYPE;
1095 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1096 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1097 pr_err("Card already partitioned\n");
1101 if (mode == MMC_HWPART_CONF_CHECK)
1104 /* Partitioning requires high-capacity size definitions */
1105 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1106 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1107 EXT_CSD_ERASE_GROUP_DEF, 1);
1112 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1114 /* update erase group size to be high-capacity */
1115 mmc->erase_grp_size =
1116 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1120 /* all OK, write the configuration */
1121 for (i = 0; i < 4; i++) {
1122 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1123 EXT_CSD_ENH_START_ADDR+i,
1124 (enh_start_addr >> (i*8)) & 0xFF);
1128 for (i = 0; i < 3; i++) {
1129 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1130 EXT_CSD_ENH_SIZE_MULT+i,
1131 (enh_size_mult >> (i*8)) & 0xFF);
1135 for (pidx = 0; pidx < 4; pidx++) {
1136 for (i = 0; i < 3; i++) {
1137 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1138 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1139 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1144 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1145 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1149 if (mode == MMC_HWPART_CONF_SET)
1152 /* The WR_REL_SET is a write-once register but shall be
1153 * written before setting PART_SETTING_COMPLETED. As it is
1154 * write-once we can only write it when completing the
1156 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1157 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1158 EXT_CSD_WR_REL_SET, wr_rel_set);
1163 /* Setting PART_SETTING_COMPLETED confirms the partition
1164 * configuration but it only becomes effective after power
1165 * cycle, so we do not adjust the partition related settings
1166 * in the mmc struct. */
1168 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 EXT_CSD_PARTITION_SETTING,
1170 EXT_CSD_PARTITION_SETTING_COMPLETED);
1178 #if !CONFIG_IS_ENABLED(DM_MMC)
1179 int mmc_getcd(struct mmc *mmc)
1183 cd = board_mmc_getcd(mmc);
1186 if (mmc->cfg->ops->getcd)
1187 cd = mmc->cfg->ops->getcd(mmc);
1196 #if !CONFIG_IS_ENABLED(MMC_TINY)
1197 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1200 struct mmc_data data;
1202 /* Switch the frequency */
1203 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1204 cmd.resp_type = MMC_RSP_R1;
1205 cmd.cmdarg = (mode << 31) | 0xffffff;
1206 cmd.cmdarg &= ~(0xf << (group * 4));
1207 cmd.cmdarg |= value << (group * 4);
1209 data.dest = (char *)resp;
1210 data.blocksize = 64;
1212 data.flags = MMC_DATA_READ;
1214 return mmc_send_cmd(mmc, &cmd, &data);
1217 static int sd_get_capabilities(struct mmc *mmc)
1221 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1222 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1223 struct mmc_data data;
1225 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1229 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1231 if (mmc_host_is_spi(mmc))
1234 /* Read the SCR to find out if this card supports higher speeds */
1235 cmd.cmdidx = MMC_CMD_APP_CMD;
1236 cmd.resp_type = MMC_RSP_R1;
1237 cmd.cmdarg = mmc->rca << 16;
1239 err = mmc_send_cmd(mmc, &cmd, NULL);
1244 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1245 cmd.resp_type = MMC_RSP_R1;
1251 data.dest = (char *)scr;
1254 data.flags = MMC_DATA_READ;
1256 err = mmc_send_cmd(mmc, &cmd, &data);
1265 mmc->scr[0] = __be32_to_cpu(scr[0]);
1266 mmc->scr[1] = __be32_to_cpu(scr[1]);
1268 switch ((mmc->scr[0] >> 24) & 0xf) {
1270 mmc->version = SD_VERSION_1_0;
1273 mmc->version = SD_VERSION_1_10;
1276 mmc->version = SD_VERSION_2;
1277 if ((mmc->scr[0] >> 15) & 0x1)
1278 mmc->version = SD_VERSION_3;
1281 mmc->version = SD_VERSION_1_0;
1285 if (mmc->scr[0] & SD_DATA_4BIT)
1286 mmc->card_caps |= MMC_MODE_4BIT;
1288 /* Version 1.0 doesn't support switching */
1289 if (mmc->version == SD_VERSION_1_0)
1294 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1295 (u8 *)switch_status);
1300 /* The high-speed function is busy. Try again */
1301 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1305 /* If high-speed isn't supported, we return */
1306 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1307 mmc->card_caps |= MMC_CAP(SD_HS);
1309 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1310 /* Version before 3.0 don't support UHS modes */
1311 if (mmc->version < SD_VERSION_3)
1314 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1315 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1316 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1317 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1318 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1319 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1320 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1321 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1322 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1323 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1324 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1330 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1334 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1337 /* SD version 1.00 and 1.01 does not support CMD 6 */
1338 if (mmc->version == SD_VERSION_1_0)
1343 speed = UHS_SDR12_BUS_SPEED;
1346 speed = HIGH_SPEED_BUS_SPEED;
1348 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1350 speed = UHS_SDR12_BUS_SPEED;
1353 speed = UHS_SDR25_BUS_SPEED;
1356 speed = UHS_SDR50_BUS_SPEED;
1359 speed = UHS_DDR50_BUS_SPEED;
1362 speed = UHS_SDR104_BUS_SPEED;
1369 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1373 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1379 static int sd_select_bus_width(struct mmc *mmc, int w)
1384 if ((w != 4) && (w != 1))
1387 cmd.cmdidx = MMC_CMD_APP_CMD;
1388 cmd.resp_type = MMC_RSP_R1;
1389 cmd.cmdarg = mmc->rca << 16;
1391 err = mmc_send_cmd(mmc, &cmd, NULL);
1395 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1396 cmd.resp_type = MMC_RSP_R1;
1401 err = mmc_send_cmd(mmc, &cmd, NULL);
1409 #if CONFIG_IS_ENABLED(MMC_WRITE)
1410 static int sd_read_ssr(struct mmc *mmc)
1412 static const unsigned int sd_au_size[] = {
1413 0, SZ_16K / 512, SZ_32K / 512,
1414 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1415 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1416 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1417 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1422 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1423 struct mmc_data data;
1425 unsigned int au, eo, et, es;
1427 cmd.cmdidx = MMC_CMD_APP_CMD;
1428 cmd.resp_type = MMC_RSP_R1;
1429 cmd.cmdarg = mmc->rca << 16;
1431 err = mmc_send_cmd(mmc, &cmd, NULL);
1435 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1436 cmd.resp_type = MMC_RSP_R1;
1440 data.dest = (char *)ssr;
1441 data.blocksize = 64;
1443 data.flags = MMC_DATA_READ;
1445 err = mmc_send_cmd(mmc, &cmd, &data);
1453 for (i = 0; i < 16; i++)
1454 ssr[i] = be32_to_cpu(ssr[i]);
1456 au = (ssr[2] >> 12) & 0xF;
1457 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1458 mmc->ssr.au = sd_au_size[au];
1459 es = (ssr[3] >> 24) & 0xFF;
1460 es |= (ssr[2] & 0xFF) << 8;
1461 et = (ssr[3] >> 18) & 0x3F;
1463 eo = (ssr[3] >> 16) & 0x3;
1464 mmc->ssr.erase_timeout = (et * 1000) / es;
1465 mmc->ssr.erase_offset = eo * 1000;
1468 pr_debug("Invalid Allocation Unit Size.\n");
1474 /* frequency bases */
1475 /* divided by 10 to be nice to platforms without floating point */
1476 static const int fbase[] = {
1483 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1484 * to platforms without floating point.
1486 static const u8 multipliers[] = {
1505 static inline int bus_width(uint cap)
1507 if (cap == MMC_MODE_8BIT)
1509 if (cap == MMC_MODE_4BIT)
1511 if (cap == MMC_MODE_1BIT)
1513 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1517 #if !CONFIG_IS_ENABLED(DM_MMC)
1518 #ifdef MMC_SUPPORTS_TUNING
1519 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1525 static int mmc_set_ios(struct mmc *mmc)
1529 if (mmc->cfg->ops->set_ios)
1530 ret = mmc->cfg->ops->set_ios(mmc);
1536 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1539 if (clock > mmc->cfg->f_max)
1540 clock = mmc->cfg->f_max;
1542 if (clock < mmc->cfg->f_min)
1543 clock = mmc->cfg->f_min;
1547 mmc->clk_disable = disable;
1549 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1551 return mmc_set_ios(mmc);
1554 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1556 mmc->bus_width = width;
1558 return mmc_set_ios(mmc);
1561 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1563 * helper function to display the capabilities in a human
1564 * friendly manner. The capabilities include bus width and
1567 void mmc_dump_capabilities(const char *text, uint caps)
1571 pr_debug("%s: widths [", text);
1572 if (caps & MMC_MODE_8BIT)
1574 if (caps & MMC_MODE_4BIT)
1576 if (caps & MMC_MODE_1BIT)
1578 pr_debug("\b\b] modes [");
1579 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1580 if (MMC_CAP(mode) & caps)
1581 pr_debug("%s, ", mmc_mode_name(mode));
1582 pr_debug("\b\b]\n");
1586 struct mode_width_tuning {
1589 #ifdef MMC_SUPPORTS_TUNING
1594 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1595 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1598 case MMC_SIGNAL_VOLTAGE_000: return 0;
1599 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1600 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1601 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1606 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1610 if (mmc->signal_voltage == signal_voltage)
1613 mmc->signal_voltage = signal_voltage;
1614 err = mmc_set_ios(mmc);
1616 pr_debug("unable to set voltage (err %d)\n", err);
1621 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1627 #if !CONFIG_IS_ENABLED(MMC_TINY)
1628 static const struct mode_width_tuning sd_modes_by_pref[] = {
1629 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1630 #ifdef MMC_SUPPORTS_TUNING
1633 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1634 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1639 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1643 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1647 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1652 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1654 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1657 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1662 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1666 #define for_each_sd_mode_by_pref(caps, mwt) \
1667 for (mwt = sd_modes_by_pref;\
1668 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1670 if (caps & MMC_CAP(mwt->mode))
1672 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1675 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1676 const struct mode_width_tuning *mwt;
1677 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1678 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1680 bool uhs_en = false;
1685 mmc_dump_capabilities("sd card", card_caps);
1686 mmc_dump_capabilities("host", mmc->host_caps);
1689 if (mmc_host_is_spi(mmc)) {
1690 mmc_set_bus_width(mmc, 1);
1691 mmc_select_mode(mmc, SD_LEGACY);
1692 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1696 /* Restrict card's capabilities by what the host can do */
1697 caps = card_caps & mmc->host_caps;
1702 for_each_sd_mode_by_pref(caps, mwt) {
1705 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1706 if (*w & caps & mwt->widths) {
1707 pr_debug("trying mode %s width %d (at %d MHz)\n",
1708 mmc_mode_name(mwt->mode),
1710 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1712 /* configure the bus width (card + host) */
1713 err = sd_select_bus_width(mmc, bus_width(*w));
1716 mmc_set_bus_width(mmc, bus_width(*w));
1718 /* configure the bus mode (card) */
1719 err = sd_set_card_speed(mmc, mwt->mode);
1723 /* configure the bus mode (host) */
1724 mmc_select_mode(mmc, mwt->mode);
1725 mmc_set_clock(mmc, mmc->tran_speed,
1728 #ifdef MMC_SUPPORTS_TUNING
1729 /* execute tuning if needed */
1730 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1731 err = mmc_execute_tuning(mmc,
1734 pr_debug("tuning failed\n");
1740 #if CONFIG_IS_ENABLED(MMC_WRITE)
1741 err = sd_read_ssr(mmc);
1743 pr_warn("unable to read ssr\n");
1749 /* revert to a safer bus speed */
1750 mmc_select_mode(mmc, SD_LEGACY);
1751 mmc_set_clock(mmc, mmc->tran_speed,
1757 pr_err("unable to select a mode\n");
1762 * read the compare the part of ext csd that is constant.
1763 * This can be used to check that the transfer is working
1766 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1769 const u8 *ext_csd = mmc->ext_csd;
1770 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1772 if (mmc->version < MMC_VERSION_4)
1775 err = mmc_send_ext_csd(mmc, test_csd);
1779 /* Only compare read only fields */
1780 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1781 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1782 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1783 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1784 ext_csd[EXT_CSD_REV]
1785 == test_csd[EXT_CSD_REV] &&
1786 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1787 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1788 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1789 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1795 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1796 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1797 uint32_t allowed_mask)
1804 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1805 EXT_CSD_CARD_TYPE_HS400_1_8V))
1806 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1807 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1808 EXT_CSD_CARD_TYPE_HS400_1_2V))
1809 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1812 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1813 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1814 MMC_SIGNAL_VOLTAGE_180;
1815 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1816 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1819 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1823 while (card_mask & allowed_mask) {
1824 enum mmc_voltage best_match;
1826 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1827 if (!mmc_set_signal_voltage(mmc, best_match))
1830 allowed_mask &= ~best_match;
1836 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1837 uint32_t allowed_mask)
1843 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1844 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1847 .widths = MMC_MODE_8BIT,
1848 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1851 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1854 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1855 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1860 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1864 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1868 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1872 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1876 #define for_each_mmc_mode_by_pref(caps, mwt) \
1877 for (mwt = mmc_modes_by_pref;\
1878 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1880 if (caps & MMC_CAP(mwt->mode))
1882 static const struct ext_csd_bus_width {
1886 } ext_csd_bus_width[] = {
1887 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1888 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1889 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1890 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1891 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1894 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1895 static int mmc_select_hs400(struct mmc *mmc)
1899 /* Set timing to HS200 for tuning */
1900 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1904 /* configure the bus mode (host) */
1905 mmc_select_mode(mmc, MMC_HS_200);
1906 mmc_set_clock(mmc, mmc->tran_speed, false);
1908 /* execute tuning if needed */
1909 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1911 debug("tuning failed\n");
1915 /* Set back to HS */
1916 mmc_set_card_speed(mmc, MMC_HS, true);
1918 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1919 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1923 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1927 mmc_select_mode(mmc, MMC_HS_400);
1928 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1935 static int mmc_select_hs400(struct mmc *mmc)
1941 #define for_each_supported_width(caps, ddr, ecbv) \
1942 for (ecbv = ext_csd_bus_width;\
1943 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1945 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1947 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1950 const struct mode_width_tuning *mwt;
1951 const struct ext_csd_bus_width *ecbw;
1954 mmc_dump_capabilities("mmc", card_caps);
1955 mmc_dump_capabilities("host", mmc->host_caps);
1958 if (mmc_host_is_spi(mmc)) {
1959 mmc_set_bus_width(mmc, 1);
1960 mmc_select_mode(mmc, MMC_LEGACY);
1961 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1965 /* Restrict card's capabilities by what the host can do */
1966 card_caps &= mmc->host_caps;
1968 /* Only version 4 of MMC supports wider bus widths */
1969 if (mmc->version < MMC_VERSION_4)
1972 if (!mmc->ext_csd) {
1973 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1977 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1978 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1980 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1981 * before doing anything else, since a transition from either of
1982 * the HS200/HS400 mode directly to legacy mode is not supported.
1984 if (mmc->selected_mode == MMC_HS_200 ||
1985 mmc->selected_mode == MMC_HS_400)
1986 mmc_set_card_speed(mmc, MMC_HS, true);
1989 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1991 for_each_mmc_mode_by_pref(card_caps, mwt) {
1992 for_each_supported_width(card_caps & mwt->widths,
1993 mmc_is_mode_ddr(mwt->mode), ecbw) {
1994 enum mmc_voltage old_voltage;
1995 pr_debug("trying mode %s width %d (at %d MHz)\n",
1996 mmc_mode_name(mwt->mode),
1997 bus_width(ecbw->cap),
1998 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1999 old_voltage = mmc->signal_voltage;
2000 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2001 MMC_ALL_SIGNAL_VOLTAGE);
2005 /* configure the bus width (card + host) */
2006 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2008 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2011 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2013 if (mwt->mode == MMC_HS_400) {
2014 err = mmc_select_hs400(mmc);
2016 printf("Select HS400 failed %d\n", err);
2020 /* configure the bus speed (card) */
2021 err = mmc_set_card_speed(mmc, mwt->mode, false);
2026 * configure the bus width AND the ddr mode
2027 * (card). The host side will be taken care
2028 * of in the next step
2030 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2031 err = mmc_switch(mmc,
2032 EXT_CSD_CMD_SET_NORMAL,
2034 ecbw->ext_csd_bits);
2039 /* configure the bus mode (host) */
2040 mmc_select_mode(mmc, mwt->mode);
2041 mmc_set_clock(mmc, mmc->tran_speed,
2043 #ifdef MMC_SUPPORTS_TUNING
2045 /* execute tuning if needed */
2047 err = mmc_execute_tuning(mmc,
2050 pr_debug("tuning failed\n");
2057 /* do a transfer to check the configuration */
2058 err = mmc_read_and_compare_ext_csd(mmc);
2062 mmc_set_signal_voltage(mmc, old_voltage);
2063 /* if an error occured, revert to a safer bus mode */
2064 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2065 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2066 mmc_select_mode(mmc, MMC_LEGACY);
2067 mmc_set_bus_width(mmc, 1);
2071 pr_err("unable to select a mode\n");
2077 #if CONFIG_IS_ENABLED(MMC_TINY)
2078 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2081 static int mmc_startup_v4(struct mmc *mmc)
2085 bool has_parts = false;
2086 bool part_completed;
2087 static const u32 mmc_versions[] = {
2099 #if CONFIG_IS_ENABLED(MMC_TINY)
2100 u8 *ext_csd = ext_csd_bkup;
2102 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2106 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2108 err = mmc_send_ext_csd(mmc, ext_csd);
2112 /* store the ext csd for future reference */
2114 mmc->ext_csd = ext_csd;
2116 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2118 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2121 /* check ext_csd version and capacity */
2122 err = mmc_send_ext_csd(mmc, ext_csd);
2126 /* store the ext csd for future reference */
2128 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2131 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2133 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2136 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2138 if (mmc->version >= MMC_VERSION_4_2) {
2140 * According to the JEDEC Standard, the value of
2141 * ext_csd's capacity is valid if the value is more
2144 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2145 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2146 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2147 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2148 capacity *= MMC_MAX_BLOCK_LEN;
2149 if ((capacity >> 20) > 2 * 1024)
2150 mmc->capacity_user = capacity;
2153 if (mmc->version >= MMC_VERSION_4_5)
2154 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2156 /* The partition data may be non-zero but it is only
2157 * effective if PARTITION_SETTING_COMPLETED is set in
2158 * EXT_CSD, so ignore any data if this bit is not set,
2159 * except for enabling the high-capacity group size
2160 * definition (see below).
2162 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2163 EXT_CSD_PARTITION_SETTING_COMPLETED);
2165 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2166 /* Some eMMC set the value too low so set a minimum */
2167 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2168 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2170 /* store the partition info of emmc */
2171 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2172 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2173 ext_csd[EXT_CSD_BOOT_MULT])
2174 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2175 if (part_completed &&
2176 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2177 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2179 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2181 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2183 for (i = 0; i < 4; i++) {
2184 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2185 uint mult = (ext_csd[idx + 2] << 16) +
2186 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2189 if (!part_completed)
2191 mmc->capacity_gp[i] = mult;
2192 mmc->capacity_gp[i] *=
2193 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2194 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2195 mmc->capacity_gp[i] <<= 19;
2198 #ifndef CONFIG_SPL_BUILD
2199 if (part_completed) {
2200 mmc->enh_user_size =
2201 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2202 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2203 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2204 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2205 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2206 mmc->enh_user_size <<= 19;
2207 mmc->enh_user_start =
2208 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2209 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2210 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2211 ext_csd[EXT_CSD_ENH_START_ADDR];
2212 if (mmc->high_capacity)
2213 mmc->enh_user_start <<= 9;
2218 * Host needs to enable ERASE_GRP_DEF bit if device is
2219 * partitioned. This bit will be lost every time after a reset
2220 * or power off. This will affect erase size.
2224 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2225 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2228 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2229 EXT_CSD_ERASE_GROUP_DEF, 1);
2234 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2237 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2238 #if CONFIG_IS_ENABLED(MMC_WRITE)
2239 /* Read out group size from ext_csd */
2240 mmc->erase_grp_size =
2241 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2244 * if high capacity and partition setting completed
2245 * SEC_COUNT is valid even if it is smaller than 2 GiB
2246 * JEDEC Standard JESD84-B45, 6.2.4
2248 if (mmc->high_capacity && part_completed) {
2249 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2250 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2251 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2252 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2253 capacity *= MMC_MAX_BLOCK_LEN;
2254 mmc->capacity_user = capacity;
2257 #if CONFIG_IS_ENABLED(MMC_WRITE)
2259 /* Calculate the group size from the csd value. */
2260 int erase_gsz, erase_gmul;
2262 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2263 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2264 mmc->erase_grp_size = (erase_gsz + 1)
2268 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2269 mmc->hc_wp_grp_size = 1024
2270 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2271 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2274 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2279 #if !CONFIG_IS_ENABLED(MMC_TINY)
2282 mmc->ext_csd = NULL;
2287 static int mmc_startup(struct mmc *mmc)
2293 struct blk_desc *bdesc;
2295 #ifdef CONFIG_MMC_SPI_CRC_ON
2296 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2297 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2298 cmd.resp_type = MMC_RSP_R1;
2300 err = mmc_send_cmd(mmc, &cmd, NULL);
2306 /* Put the Card in Identify Mode */
2307 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2308 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2309 cmd.resp_type = MMC_RSP_R2;
2312 err = mmc_send_cmd(mmc, &cmd, NULL);
2314 #ifdef CONFIG_MMC_QUIRKS
2315 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2318 * It has been seen that SEND_CID may fail on the first
2319 * attempt, let's try a few more time
2322 err = mmc_send_cmd(mmc, &cmd, NULL);
2325 } while (retries--);
2332 memcpy(mmc->cid, cmd.response, 16);
2335 * For MMC cards, set the Relative Address.
2336 * For SD cards, get the Relatvie Address.
2337 * This also puts the cards into Standby State
2339 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2340 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2341 cmd.cmdarg = mmc->rca << 16;
2342 cmd.resp_type = MMC_RSP_R6;
2344 err = mmc_send_cmd(mmc, &cmd, NULL);
2350 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2353 /* Get the Card-Specific Data */
2354 cmd.cmdidx = MMC_CMD_SEND_CSD;
2355 cmd.resp_type = MMC_RSP_R2;
2356 cmd.cmdarg = mmc->rca << 16;
2358 err = mmc_send_cmd(mmc, &cmd, NULL);
2363 mmc->csd[0] = cmd.response[0];
2364 mmc->csd[1] = cmd.response[1];
2365 mmc->csd[2] = cmd.response[2];
2366 mmc->csd[3] = cmd.response[3];
2368 if (mmc->version == MMC_VERSION_UNKNOWN) {
2369 int version = (cmd.response[0] >> 26) & 0xf;
2373 mmc->version = MMC_VERSION_1_2;
2376 mmc->version = MMC_VERSION_1_4;
2379 mmc->version = MMC_VERSION_2_2;
2382 mmc->version = MMC_VERSION_3;
2385 mmc->version = MMC_VERSION_4;
2388 mmc->version = MMC_VERSION_1_2;
2393 /* divide frequency by 10, since the mults are 10x bigger */
2394 freq = fbase[(cmd.response[0] & 0x7)];
2395 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2397 mmc->legacy_speed = freq * mult;
2398 mmc_select_mode(mmc, MMC_LEGACY);
2400 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2401 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2402 #if CONFIG_IS_ENABLED(MMC_WRITE)
2405 mmc->write_bl_len = mmc->read_bl_len;
2407 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2410 if (mmc->high_capacity) {
2411 csize = (mmc->csd[1] & 0x3f) << 16
2412 | (mmc->csd[2] & 0xffff0000) >> 16;
2415 csize = (mmc->csd[1] & 0x3ff) << 2
2416 | (mmc->csd[2] & 0xc0000000) >> 30;
2417 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2420 mmc->capacity_user = (csize + 1) << (cmult + 2);
2421 mmc->capacity_user *= mmc->read_bl_len;
2422 mmc->capacity_boot = 0;
2423 mmc->capacity_rpmb = 0;
2424 for (i = 0; i < 4; i++)
2425 mmc->capacity_gp[i] = 0;
2427 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2428 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2430 #if CONFIG_IS_ENABLED(MMC_WRITE)
2431 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2432 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2435 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2436 cmd.cmdidx = MMC_CMD_SET_DSR;
2437 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2438 cmd.resp_type = MMC_RSP_NONE;
2439 if (mmc_send_cmd(mmc, &cmd, NULL))
2440 pr_warn("MMC: SET_DSR failed\n");
2443 /* Select the card, and put it into Transfer Mode */
2444 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2445 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2446 cmd.resp_type = MMC_RSP_R1;
2447 cmd.cmdarg = mmc->rca << 16;
2448 err = mmc_send_cmd(mmc, &cmd, NULL);
2455 * For SD, its erase group is always one sector
2457 #if CONFIG_IS_ENABLED(MMC_WRITE)
2458 mmc->erase_grp_size = 1;
2460 mmc->part_config = MMCPART_NOAVAILABLE;
2462 err = mmc_startup_v4(mmc);
2466 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2470 #if CONFIG_IS_ENABLED(MMC_TINY)
2471 mmc_set_clock(mmc, mmc->legacy_speed, false);
2472 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2473 mmc_set_bus_width(mmc, 1);
2476 err = sd_get_capabilities(mmc);
2479 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2481 err = mmc_get_capabilities(mmc);
2484 mmc_select_mode_and_width(mmc, mmc->card_caps);
2490 mmc->best_mode = mmc->selected_mode;
2492 /* Fix the block length for DDR mode */
2493 if (mmc->ddr_mode) {
2494 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2495 #if CONFIG_IS_ENABLED(MMC_WRITE)
2496 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2500 /* fill in device description */
2501 bdesc = mmc_get_blk_desc(mmc);
2505 bdesc->blksz = mmc->read_bl_len;
2506 bdesc->log2blksz = LOG2(bdesc->blksz);
2507 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2508 #if !defined(CONFIG_SPL_BUILD) || \
2509 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2510 !defined(CONFIG_USE_TINY_PRINTF))
2511 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2512 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2513 (mmc->cid[3] >> 16) & 0xffff);
2514 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2515 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2516 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2517 (mmc->cid[2] >> 24) & 0xff);
2518 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2519 (mmc->cid[2] >> 16) & 0xf);
2521 bdesc->vendor[0] = 0;
2522 bdesc->product[0] = 0;
2523 bdesc->revision[0] = 0;
2526 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2533 static int mmc_send_if_cond(struct mmc *mmc)
2538 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2539 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2540 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2541 cmd.resp_type = MMC_RSP_R7;
2543 err = mmc_send_cmd(mmc, &cmd, NULL);
2548 if ((cmd.response[0] & 0xff) != 0xaa)
2551 mmc->version = SD_VERSION_2;
2556 #if !CONFIG_IS_ENABLED(DM_MMC)
2557 /* board-specific MMC power initializations. */
2558 __weak void board_mmc_power_init(void)
2563 static int mmc_power_init(struct mmc *mmc)
2565 #if CONFIG_IS_ENABLED(DM_MMC)
2566 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2569 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2572 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2574 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2575 &mmc->vqmmc_supply);
2577 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2579 #else /* !CONFIG_DM_MMC */
2581 * Driver model should use a regulator, as above, rather than calling
2582 * out to board code.
2584 board_mmc_power_init();
2590 * put the host in the initial state:
2591 * - turn on Vdd (card power supply)
2592 * - configure the bus width and clock to minimal values
2594 static void mmc_set_initial_state(struct mmc *mmc)
2598 /* First try to set 3.3V. If it fails set to 1.8V */
2599 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2601 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2603 pr_warn("mmc: failed to set signal voltage\n");
2605 mmc_select_mode(mmc, MMC_LEGACY);
2606 mmc_set_bus_width(mmc, 1);
2607 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2610 static int mmc_power_on(struct mmc *mmc)
2612 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2613 if (mmc->vmmc_supply) {
2614 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2617 puts("Error enabling VMMC supply\n");
2625 static int mmc_power_off(struct mmc *mmc)
2627 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2628 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2629 if (mmc->vmmc_supply) {
2630 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2633 pr_debug("Error disabling VMMC supply\n");
2641 static int mmc_power_cycle(struct mmc *mmc)
2645 ret = mmc_power_off(mmc);
2649 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2650 * to be on the safer side.
2653 return mmc_power_on(mmc);
2656 int mmc_get_op_cond(struct mmc *mmc)
2658 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2664 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2665 mmc_adapter_card_type_ident();
2667 err = mmc_power_init(mmc);
2671 #ifdef CONFIG_MMC_QUIRKS
2672 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2673 MMC_QUIRK_RETRY_SEND_CID;
2676 err = mmc_power_cycle(mmc);
2679 * if power cycling is not supported, we should not try
2680 * to use the UHS modes, because we wouldn't be able to
2681 * recover from an error during the UHS initialization.
2683 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2685 mmc->host_caps &= ~UHS_CAPS;
2686 err = mmc_power_on(mmc);
2691 #if CONFIG_IS_ENABLED(DM_MMC)
2692 /* The device has already been probed ready for use */
2694 /* made sure it's not NULL earlier */
2695 err = mmc->cfg->ops->init(mmc);
2702 mmc_set_initial_state(mmc);
2704 /* Reset the Card */
2705 err = mmc_go_idle(mmc);
2710 /* The internal partition reset to user partition(0) at every CMD0*/
2711 mmc_get_blk_desc(mmc)->hwpart = 0;
2713 /* Test for SD version 2 */
2714 err = mmc_send_if_cond(mmc);
2716 /* Now try to get the SD card's operating condition */
2717 err = sd_send_op_cond(mmc, uhs_en);
2718 if (err && uhs_en) {
2720 mmc_power_cycle(mmc);
2724 /* If the command timed out, we check for an MMC card */
2725 if (err == -ETIMEDOUT) {
2726 err = mmc_send_op_cond(mmc);
2729 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2730 pr_err("Card did not respond to voltage select!\n");
2739 int mmc_start_init(struct mmc *mmc)
2745 * all hosts are capable of 1 bit bus-width and able to use the legacy
2748 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2749 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2751 #if !defined(CONFIG_MMC_BROKEN_CD)
2752 /* we pretend there's no card when init is NULL */
2753 no_card = mmc_getcd(mmc) == 0;
2757 #if !CONFIG_IS_ENABLED(DM_MMC)
2758 no_card = no_card || (mmc->cfg->ops->init == NULL);
2762 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2763 pr_err("MMC: no card present\n");
2768 err = mmc_get_op_cond(mmc);
2771 mmc->init_in_progress = 1;
2776 static int mmc_complete_init(struct mmc *mmc)
2780 mmc->init_in_progress = 0;
2781 if (mmc->op_cond_pending)
2782 err = mmc_complete_op_cond(mmc);
2785 err = mmc_startup(mmc);
2793 int mmc_init(struct mmc *mmc)
2796 __maybe_unused ulong start;
2797 #if CONFIG_IS_ENABLED(DM_MMC)
2798 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2805 start = get_timer(0);
2807 if (!mmc->init_in_progress)
2808 err = mmc_start_init(mmc);
2811 err = mmc_complete_init(mmc);
2813 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2818 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2819 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2820 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2821 int mmc_deinit(struct mmc *mmc)
2829 caps_filtered = mmc->card_caps &
2830 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2831 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2832 MMC_CAP(UHS_SDR104));
2834 return sd_select_mode_and_width(mmc, caps_filtered);
2836 caps_filtered = mmc->card_caps &
2837 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2839 return mmc_select_mode_and_width(mmc, caps_filtered);
2844 int mmc_set_dsr(struct mmc *mmc, u16 val)
2850 /* CPU-specific MMC initializations */
2851 __weak int cpu_mmc_init(bd_t *bis)
2856 /* board-specific MMC initializations. */
2857 __weak int board_mmc_init(bd_t *bis)
2862 void mmc_set_preinit(struct mmc *mmc, int preinit)
2864 mmc->preinit = preinit;
2867 #if CONFIG_IS_ENABLED(DM_MMC)
2868 static int mmc_probe(bd_t *bis)
2872 struct udevice *dev;
2874 ret = uclass_get(UCLASS_MMC, &uc);
2879 * Try to add them in sequence order. Really with driver model we
2880 * should allow holes, but the current MMC list does not allow that.
2881 * So if we request 0, 1, 3 we will get 0, 1, 2.
2883 for (i = 0; ; i++) {
2884 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2888 uclass_foreach_dev(dev, uc) {
2889 ret = device_probe(dev);
2891 pr_err("%s - probe failed: %d\n", dev->name, ret);
2897 static int mmc_probe(bd_t *bis)
2899 if (board_mmc_init(bis) < 0)
2906 int mmc_initialize(bd_t *bis)
2908 static int initialized = 0;
2910 if (initialized) /* Avoid initializing mmc multiple times */
2914 #if !CONFIG_IS_ENABLED(BLK)
2915 #if !CONFIG_IS_ENABLED(MMC_TINY)
2919 ret = mmc_probe(bis);
2923 #ifndef CONFIG_SPL_BUILD
2924 print_mmc_devices(',');
2931 #ifdef CONFIG_CMD_BKOPS_ENABLE
2932 int mmc_set_bkops_enable(struct mmc *mmc)
2935 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2937 err = mmc_send_ext_csd(mmc, ext_csd);
2939 puts("Could not get ext_csd register values\n");
2943 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2944 puts("Background operations not supported on device\n");
2945 return -EMEDIUMTYPE;
2948 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2949 puts("Background operations already enabled\n");
2953 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2955 puts("Failed to enable manual background operations\n");
2959 puts("Enabled manual background operations\n");