1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27 static int mmc_power_cycle(struct mmc *mmc);
28 #if !CONFIG_IS_ENABLED(MMC_TINY)
29 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
236 err = mmc_wait_dat0(mmc, 1, timeout);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
423 #if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 int dev_num = block_dev->devnum;
428 lbaint_t cur, blocks_todo = blkcnt;
433 struct mmc *mmc = find_mmc_device(dev_num);
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445 if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
473 static int mmc_go_idle(struct mmc *mmc)
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 cmd.resp_type = MMC_RSP_NONE;
484 err = mmc_send_cmd(mmc, &cmd, NULL);
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
501 * Send CMD11 only if the request is to switch the card to
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 cmd.resp_type = MMC_RSP_R1;
511 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 * The card should drive cmd and dat[0:3] low immediately
520 * after the response of cmd11, but wait 100 us to be sure
522 err = mmc_wait_dat0(mmc, 0, 100);
529 * During a signal voltage level switch, the clock must be gated
530 * for 5 ms according to the SD spec
532 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 * Failure to switch is indicated by the card holding
544 * dat[0:3] low. Wait for at least 1 ms according to spec
546 err = mmc_wait_dat0(mmc, 1, 1000);
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
567 err = mmc_send_cmd(mmc, &cmd, NULL);
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
576 * Most cards do not answer if some reserved bits
577 * in the ocr are set. However, Some controller
578 * can set bit 7 (reserved for low voltages), but
579 * how to manage low voltages SD card is not yet
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
589 cmd.cmdarg |= OCR_S18R;
591 err = mmc_send_cmd(mmc, &cmd, NULL);
596 if (cmd.response[0] & OCR_BUSY)
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
608 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
613 err = mmc_send_cmd(mmc, &cmd, NULL);
619 mmc->ocr = cmd.response[0];
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
650 err = mmc_send_cmd(mmc, &cmd, NULL);
653 mmc->ocr = cmd.response[0];
657 static int mmc_send_op_cond(struct mmc *mmc)
661 /* Some cards seem to need this */
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
674 mmc->op_cond_pending = 1;
678 static int mmc_complete_op_cond(struct mmc *mmc)
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
690 start = get_timer(0);
692 err = mmc_send_op_cond_iter(mmc, 1);
695 if (mmc->ocr & OCR_BUSY)
697 if (get_timer(start) > timeout)
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
708 err = mmc_send_cmd(mmc, &cmd, NULL);
713 mmc->ocr = cmd.response[0];
716 mmc->version = MMC_VERSION_UNKNOWN;
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 struct mmc_data data;
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
736 data.dest = (char *)ext_csd;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
741 err = mmc_send_cmd(mmc, &cmd, &data);
746 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
750 int timeout = DEFAULT_CMD6_TIMEOUT_MS;
754 if (mmc->gen_cmd6_time)
755 timeout = mmc->gen_cmd6_time * 10;
757 cmd.cmdidx = MMC_CMD_SWITCH;
758 cmd.resp_type = MMC_RSP_R1b;
759 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
763 while (retries > 0) {
764 ret = mmc_send_cmd(mmc, &cmd, NULL);
776 /* Waiting for the ready status */
777 return mmc_poll_for_busy(mmc, timeout);
784 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
786 return __mmc_switch(mmc, set, index, value, true);
789 #if !CONFIG_IS_ENABLED(MMC_TINY)
790 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
796 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
802 speed_bits = EXT_CSD_TIMING_HS;
804 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
806 speed_bits = EXT_CSD_TIMING_HS200;
809 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
811 speed_bits = EXT_CSD_TIMING_HS400;
815 speed_bits = EXT_CSD_TIMING_LEGACY;
821 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
822 speed_bits, !hsdowngrade);
826 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
827 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
829 * In case the eMMC is in HS200/HS400 mode and we are downgrading
830 * to HS mode, the card clock are still running much faster than
831 * the supported HS mode clock, so we can not reliably read out
832 * Extended CSD. Reconfigure the controller to run at HS mode.
835 mmc_select_mode(mmc, MMC_HS);
836 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
840 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
841 /* Now check to see that it worked */
842 err = mmc_send_ext_csd(mmc, test_csd);
846 /* No high-speed support */
847 if (!test_csd[EXT_CSD_HS_TIMING])
854 static int mmc_get_capabilities(struct mmc *mmc)
856 u8 *ext_csd = mmc->ext_csd;
859 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
861 if (mmc_host_is_spi(mmc))
864 /* Only version 4 supports high-speed */
865 if (mmc->version < MMC_VERSION_4)
869 pr_err("No ext_csd found!\n"); /* this should enver happen */
873 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
875 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
876 mmc->cardtype = cardtype;
878 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
879 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
880 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
881 mmc->card_caps |= MMC_MODE_HS200;
884 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
885 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
886 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
887 mmc->card_caps |= MMC_MODE_HS400;
890 if (cardtype & EXT_CSD_CARD_TYPE_52) {
891 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
892 mmc->card_caps |= MMC_MODE_DDR_52MHz;
893 mmc->card_caps |= MMC_MODE_HS_52MHz;
895 if (cardtype & EXT_CSD_CARD_TYPE_26)
896 mmc->card_caps |= MMC_MODE_HS;
902 static int mmc_set_capacity(struct mmc *mmc, int part_num)
906 mmc->capacity = mmc->capacity_user;
910 mmc->capacity = mmc->capacity_boot;
913 mmc->capacity = mmc->capacity_rpmb;
919 mmc->capacity = mmc->capacity_gp[part_num - 4];
925 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
930 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
931 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
936 if (part_num & PART_ACCESS_MASK)
937 forbidden = MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400);
939 if (MMC_CAP(mmc->selected_mode) & forbidden) {
940 pr_debug("selected mode (%s) is forbidden for part %d\n",
941 mmc_mode_name(mmc->selected_mode), part_num);
943 } else if (mmc->selected_mode != mmc->best_mode) {
944 pr_debug("selected mode is not optimal\n");
949 return mmc_select_mode_and_width(mmc,
950 mmc->card_caps & ~forbidden);
955 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
956 unsigned int part_num)
962 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
966 ret = mmc_boot_part_access_chk(mmc, part_num);
970 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
971 (mmc->part_config & ~PART_ACCESS_MASK)
972 | (part_num & PART_ACCESS_MASK));
975 * Set the capacity if the switch succeeded or was intended
976 * to return to representing the raw device.
978 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
979 ret = mmc_set_capacity(mmc, part_num);
980 mmc_get_blk_desc(mmc)->hwpart = part_num;
986 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
987 int mmc_hwpart_config(struct mmc *mmc,
988 const struct mmc_hwpart_conf *conf,
989 enum mmc_hwpart_conf_mode mode)
995 u32 max_enh_size_mult;
996 u32 tot_enh_size_mult = 0;
999 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1001 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1004 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1005 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1006 return -EMEDIUMTYPE;
1009 if (!(mmc->part_support & PART_SUPPORT)) {
1010 pr_err("Card does not support partitioning\n");
1011 return -EMEDIUMTYPE;
1014 if (!mmc->hc_wp_grp_size) {
1015 pr_err("Card does not define HC WP group size\n");
1016 return -EMEDIUMTYPE;
1019 /* check partition alignment and total enhanced size */
1020 if (conf->user.enh_size) {
1021 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1022 conf->user.enh_start % mmc->hc_wp_grp_size) {
1023 pr_err("User data enhanced area not HC WP group "
1027 part_attrs |= EXT_CSD_ENH_USR;
1028 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1029 if (mmc->high_capacity) {
1030 enh_start_addr = conf->user.enh_start;
1032 enh_start_addr = (conf->user.enh_start << 9);
1038 tot_enh_size_mult += enh_size_mult;
1040 for (pidx = 0; pidx < 4; pidx++) {
1041 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1042 pr_err("GP%i partition not HC WP group size "
1043 "aligned\n", pidx+1);
1046 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1047 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1048 part_attrs |= EXT_CSD_ENH_GP(pidx);
1049 tot_enh_size_mult += gp_size_mult[pidx];
1053 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1054 pr_err("Card does not support enhanced attribute\n");
1055 return -EMEDIUMTYPE;
1058 err = mmc_send_ext_csd(mmc, ext_csd);
1063 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1064 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1065 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1066 if (tot_enh_size_mult > max_enh_size_mult) {
1067 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1068 tot_enh_size_mult, max_enh_size_mult);
1069 return -EMEDIUMTYPE;
1072 /* The default value of EXT_CSD_WR_REL_SET is device
1073 * dependent, the values can only be changed if the
1074 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1075 * changed only once and before partitioning is completed. */
1076 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1077 if (conf->user.wr_rel_change) {
1078 if (conf->user.wr_rel_set)
1079 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1081 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1083 for (pidx = 0; pidx < 4; pidx++) {
1084 if (conf->gp_part[pidx].wr_rel_change) {
1085 if (conf->gp_part[pidx].wr_rel_set)
1086 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1088 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1092 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1093 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1094 puts("Card does not support host controlled partition write "
1095 "reliability settings\n");
1096 return -EMEDIUMTYPE;
1099 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1100 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1101 pr_err("Card already partitioned\n");
1105 if (mode == MMC_HWPART_CONF_CHECK)
1108 /* Partitioning requires high-capacity size definitions */
1109 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1110 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1111 EXT_CSD_ERASE_GROUP_DEF, 1);
1116 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1118 /* update erase group size to be high-capacity */
1119 mmc->erase_grp_size =
1120 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1124 /* all OK, write the configuration */
1125 for (i = 0; i < 4; i++) {
1126 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1127 EXT_CSD_ENH_START_ADDR+i,
1128 (enh_start_addr >> (i*8)) & 0xFF);
1132 for (i = 0; i < 3; i++) {
1133 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1134 EXT_CSD_ENH_SIZE_MULT+i,
1135 (enh_size_mult >> (i*8)) & 0xFF);
1139 for (pidx = 0; pidx < 4; pidx++) {
1140 for (i = 0; i < 3; i++) {
1141 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1142 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1143 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1148 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1149 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1153 if (mode == MMC_HWPART_CONF_SET)
1156 /* The WR_REL_SET is a write-once register but shall be
1157 * written before setting PART_SETTING_COMPLETED. As it is
1158 * write-once we can only write it when completing the
1160 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1161 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1162 EXT_CSD_WR_REL_SET, wr_rel_set);
1167 /* Setting PART_SETTING_COMPLETED confirms the partition
1168 * configuration but it only becomes effective after power
1169 * cycle, so we do not adjust the partition related settings
1170 * in the mmc struct. */
1172 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1173 EXT_CSD_PARTITION_SETTING,
1174 EXT_CSD_PARTITION_SETTING_COMPLETED);
1182 #if !CONFIG_IS_ENABLED(DM_MMC)
1183 int mmc_getcd(struct mmc *mmc)
1187 cd = board_mmc_getcd(mmc);
1190 if (mmc->cfg->ops->getcd)
1191 cd = mmc->cfg->ops->getcd(mmc);
1200 #if !CONFIG_IS_ENABLED(MMC_TINY)
1201 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1204 struct mmc_data data;
1206 /* Switch the frequency */
1207 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1208 cmd.resp_type = MMC_RSP_R1;
1209 cmd.cmdarg = (mode << 31) | 0xffffff;
1210 cmd.cmdarg &= ~(0xf << (group * 4));
1211 cmd.cmdarg |= value << (group * 4);
1213 data.dest = (char *)resp;
1214 data.blocksize = 64;
1216 data.flags = MMC_DATA_READ;
1218 return mmc_send_cmd(mmc, &cmd, &data);
1221 static int sd_get_capabilities(struct mmc *mmc)
1225 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1226 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1227 struct mmc_data data;
1229 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1233 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1235 if (mmc_host_is_spi(mmc))
1238 /* Read the SCR to find out if this card supports higher speeds */
1239 cmd.cmdidx = MMC_CMD_APP_CMD;
1240 cmd.resp_type = MMC_RSP_R1;
1241 cmd.cmdarg = mmc->rca << 16;
1243 err = mmc_send_cmd(mmc, &cmd, NULL);
1248 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1249 cmd.resp_type = MMC_RSP_R1;
1255 data.dest = (char *)scr;
1258 data.flags = MMC_DATA_READ;
1260 err = mmc_send_cmd(mmc, &cmd, &data);
1269 mmc->scr[0] = __be32_to_cpu(scr[0]);
1270 mmc->scr[1] = __be32_to_cpu(scr[1]);
1272 switch ((mmc->scr[0] >> 24) & 0xf) {
1274 mmc->version = SD_VERSION_1_0;
1277 mmc->version = SD_VERSION_1_10;
1280 mmc->version = SD_VERSION_2;
1281 if ((mmc->scr[0] >> 15) & 0x1)
1282 mmc->version = SD_VERSION_3;
1285 mmc->version = SD_VERSION_1_0;
1289 if (mmc->scr[0] & SD_DATA_4BIT)
1290 mmc->card_caps |= MMC_MODE_4BIT;
1292 /* Version 1.0 doesn't support switching */
1293 if (mmc->version == SD_VERSION_1_0)
1298 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1299 (u8 *)switch_status);
1304 /* The high-speed function is busy. Try again */
1305 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1309 /* If high-speed isn't supported, we return */
1310 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1311 mmc->card_caps |= MMC_CAP(SD_HS);
1313 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1314 /* Version before 3.0 don't support UHS modes */
1315 if (mmc->version < SD_VERSION_3)
1318 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1319 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1320 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1321 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1322 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1323 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1324 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1325 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1326 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1327 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1328 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1334 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1338 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1341 /* SD version 1.00 and 1.01 does not support CMD 6 */
1342 if (mmc->version == SD_VERSION_1_0)
1347 speed = UHS_SDR12_BUS_SPEED;
1350 speed = HIGH_SPEED_BUS_SPEED;
1352 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1354 speed = UHS_SDR12_BUS_SPEED;
1357 speed = UHS_SDR25_BUS_SPEED;
1360 speed = UHS_SDR50_BUS_SPEED;
1363 speed = UHS_DDR50_BUS_SPEED;
1366 speed = UHS_SDR104_BUS_SPEED;
1373 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1377 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1383 static int sd_select_bus_width(struct mmc *mmc, int w)
1388 if ((w != 4) && (w != 1))
1391 cmd.cmdidx = MMC_CMD_APP_CMD;
1392 cmd.resp_type = MMC_RSP_R1;
1393 cmd.cmdarg = mmc->rca << 16;
1395 err = mmc_send_cmd(mmc, &cmd, NULL);
1399 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1400 cmd.resp_type = MMC_RSP_R1;
1405 err = mmc_send_cmd(mmc, &cmd, NULL);
1413 #if CONFIG_IS_ENABLED(MMC_WRITE)
1414 static int sd_read_ssr(struct mmc *mmc)
1416 static const unsigned int sd_au_size[] = {
1417 0, SZ_16K / 512, SZ_32K / 512,
1418 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1419 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1420 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1421 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1426 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1427 struct mmc_data data;
1429 unsigned int au, eo, et, es;
1431 cmd.cmdidx = MMC_CMD_APP_CMD;
1432 cmd.resp_type = MMC_RSP_R1;
1433 cmd.cmdarg = mmc->rca << 16;
1435 err = mmc_send_cmd(mmc, &cmd, NULL);
1439 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1440 cmd.resp_type = MMC_RSP_R1;
1444 data.dest = (char *)ssr;
1445 data.blocksize = 64;
1447 data.flags = MMC_DATA_READ;
1449 err = mmc_send_cmd(mmc, &cmd, &data);
1457 for (i = 0; i < 16; i++)
1458 ssr[i] = be32_to_cpu(ssr[i]);
1460 au = (ssr[2] >> 12) & 0xF;
1461 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1462 mmc->ssr.au = sd_au_size[au];
1463 es = (ssr[3] >> 24) & 0xFF;
1464 es |= (ssr[2] & 0xFF) << 8;
1465 et = (ssr[3] >> 18) & 0x3F;
1467 eo = (ssr[3] >> 16) & 0x3;
1468 mmc->ssr.erase_timeout = (et * 1000) / es;
1469 mmc->ssr.erase_offset = eo * 1000;
1472 pr_debug("Invalid Allocation Unit Size.\n");
1478 /* frequency bases */
1479 /* divided by 10 to be nice to platforms without floating point */
1480 static const int fbase[] = {
1487 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1488 * to platforms without floating point.
1490 static const u8 multipliers[] = {
1509 static inline int bus_width(uint cap)
1511 if (cap == MMC_MODE_8BIT)
1513 if (cap == MMC_MODE_4BIT)
1515 if (cap == MMC_MODE_1BIT)
1517 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1521 #if !CONFIG_IS_ENABLED(DM_MMC)
1522 #ifdef MMC_SUPPORTS_TUNING
1523 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1529 static int mmc_set_ios(struct mmc *mmc)
1533 if (mmc->cfg->ops->set_ios)
1534 ret = mmc->cfg->ops->set_ios(mmc);
1540 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1543 if (clock > mmc->cfg->f_max)
1544 clock = mmc->cfg->f_max;
1546 if (clock < mmc->cfg->f_min)
1547 clock = mmc->cfg->f_min;
1551 mmc->clk_disable = disable;
1553 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1555 return mmc_set_ios(mmc);
1558 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1560 mmc->bus_width = width;
1562 return mmc_set_ios(mmc);
1565 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1567 * helper function to display the capabilities in a human
1568 * friendly manner. The capabilities include bus width and
1571 void mmc_dump_capabilities(const char *text, uint caps)
1575 pr_debug("%s: widths [", text);
1576 if (caps & MMC_MODE_8BIT)
1578 if (caps & MMC_MODE_4BIT)
1580 if (caps & MMC_MODE_1BIT)
1582 pr_debug("\b\b] modes [");
1583 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1584 if (MMC_CAP(mode) & caps)
1585 pr_debug("%s, ", mmc_mode_name(mode));
1586 pr_debug("\b\b]\n");
1590 struct mode_width_tuning {
1593 #ifdef MMC_SUPPORTS_TUNING
1598 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1599 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1602 case MMC_SIGNAL_VOLTAGE_000: return 0;
1603 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1604 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1605 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1610 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1614 if (mmc->signal_voltage == signal_voltage)
1617 mmc->signal_voltage = signal_voltage;
1618 err = mmc_set_ios(mmc);
1620 pr_debug("unable to set voltage (err %d)\n", err);
1625 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1631 #if !CONFIG_IS_ENABLED(MMC_TINY)
1632 static const struct mode_width_tuning sd_modes_by_pref[] = {
1633 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1634 #ifdef MMC_SUPPORTS_TUNING
1637 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1638 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1643 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1647 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1651 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1656 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1658 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1661 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1666 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1670 #define for_each_sd_mode_by_pref(caps, mwt) \
1671 for (mwt = sd_modes_by_pref;\
1672 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1674 if (caps & MMC_CAP(mwt->mode))
1676 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1679 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1680 const struct mode_width_tuning *mwt;
1681 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1682 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1684 bool uhs_en = false;
1689 mmc_dump_capabilities("sd card", card_caps);
1690 mmc_dump_capabilities("host", mmc->host_caps);
1693 /* Restrict card's capabilities by what the host can do */
1694 caps = card_caps & mmc->host_caps;
1699 for_each_sd_mode_by_pref(caps, mwt) {
1702 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1703 if (*w & caps & mwt->widths) {
1704 pr_debug("trying mode %s width %d (at %d MHz)\n",
1705 mmc_mode_name(mwt->mode),
1707 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1709 /* configure the bus width (card + host) */
1710 err = sd_select_bus_width(mmc, bus_width(*w));
1713 mmc_set_bus_width(mmc, bus_width(*w));
1715 /* configure the bus mode (card) */
1716 err = sd_set_card_speed(mmc, mwt->mode);
1720 /* configure the bus mode (host) */
1721 mmc_select_mode(mmc, mwt->mode);
1722 mmc_set_clock(mmc, mmc->tran_speed,
1725 #ifdef MMC_SUPPORTS_TUNING
1726 /* execute tuning if needed */
1727 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1728 err = mmc_execute_tuning(mmc,
1731 pr_debug("tuning failed\n");
1737 #if CONFIG_IS_ENABLED(MMC_WRITE)
1738 err = sd_read_ssr(mmc);
1740 pr_warn("unable to read ssr\n");
1746 /* revert to a safer bus speed */
1747 mmc_select_mode(mmc, SD_LEGACY);
1748 mmc_set_clock(mmc, mmc->tran_speed,
1754 pr_err("unable to select a mode\n");
1759 * read the compare the part of ext csd that is constant.
1760 * This can be used to check that the transfer is working
1763 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1766 const u8 *ext_csd = mmc->ext_csd;
1767 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1769 if (mmc->version < MMC_VERSION_4)
1772 err = mmc_send_ext_csd(mmc, test_csd);
1776 /* Only compare read only fields */
1777 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1778 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1779 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1780 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1781 ext_csd[EXT_CSD_REV]
1782 == test_csd[EXT_CSD_REV] &&
1783 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1784 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1785 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1786 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1792 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1793 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1794 uint32_t allowed_mask)
1801 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1802 EXT_CSD_CARD_TYPE_HS400_1_8V))
1803 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1804 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1805 EXT_CSD_CARD_TYPE_HS400_1_2V))
1806 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1809 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1810 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1811 MMC_SIGNAL_VOLTAGE_180;
1812 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1813 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1816 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1820 while (card_mask & allowed_mask) {
1821 enum mmc_voltage best_match;
1823 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1824 if (!mmc_set_signal_voltage(mmc, best_match))
1827 allowed_mask &= ~best_match;
1833 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1834 uint32_t allowed_mask)
1840 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1841 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1844 .widths = MMC_MODE_8BIT,
1845 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1848 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1851 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1852 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1857 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1861 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1865 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1869 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1873 #define for_each_mmc_mode_by_pref(caps, mwt) \
1874 for (mwt = mmc_modes_by_pref;\
1875 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1877 if (caps & MMC_CAP(mwt->mode))
1879 static const struct ext_csd_bus_width {
1883 } ext_csd_bus_width[] = {
1884 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1885 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1886 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1887 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1888 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1891 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1892 static int mmc_select_hs400(struct mmc *mmc)
1896 /* Set timing to HS200 for tuning */
1897 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1901 /* configure the bus mode (host) */
1902 mmc_select_mode(mmc, MMC_HS_200);
1903 mmc_set_clock(mmc, mmc->tran_speed, false);
1905 /* execute tuning if needed */
1906 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1908 debug("tuning failed\n");
1912 /* Set back to HS */
1913 mmc_set_card_speed(mmc, MMC_HS, true);
1915 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1916 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1920 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1924 mmc_select_mode(mmc, MMC_HS_400);
1925 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1932 static int mmc_select_hs400(struct mmc *mmc)
1938 #define for_each_supported_width(caps, ddr, ecbv) \
1939 for (ecbv = ext_csd_bus_width;\
1940 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1942 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1944 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1947 const struct mode_width_tuning *mwt;
1948 const struct ext_csd_bus_width *ecbw;
1951 mmc_dump_capabilities("mmc", card_caps);
1952 mmc_dump_capabilities("host", mmc->host_caps);
1955 /* Restrict card's capabilities by what the host can do */
1956 card_caps &= mmc->host_caps;
1958 /* Only version 4 of MMC supports wider bus widths */
1959 if (mmc->version < MMC_VERSION_4)
1962 if (!mmc->ext_csd) {
1963 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1967 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1968 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1970 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1971 * before doing anything else, since a transition from either of
1972 * the HS200/HS400 mode directly to legacy mode is not supported.
1974 if (mmc->selected_mode == MMC_HS_200 ||
1975 mmc->selected_mode == MMC_HS_400)
1976 mmc_set_card_speed(mmc, MMC_HS, true);
1979 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1981 for_each_mmc_mode_by_pref(card_caps, mwt) {
1982 for_each_supported_width(card_caps & mwt->widths,
1983 mmc_is_mode_ddr(mwt->mode), ecbw) {
1984 enum mmc_voltage old_voltage;
1985 pr_debug("trying mode %s width %d (at %d MHz)\n",
1986 mmc_mode_name(mwt->mode),
1987 bus_width(ecbw->cap),
1988 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1989 old_voltage = mmc->signal_voltage;
1990 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1991 MMC_ALL_SIGNAL_VOLTAGE);
1995 /* configure the bus width (card + host) */
1996 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1998 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2001 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2003 if (mwt->mode == MMC_HS_400) {
2004 err = mmc_select_hs400(mmc);
2006 printf("Select HS400 failed %d\n", err);
2010 /* configure the bus speed (card) */
2011 err = mmc_set_card_speed(mmc, mwt->mode, false);
2016 * configure the bus width AND the ddr mode
2017 * (card). The host side will be taken care
2018 * of in the next step
2020 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2021 err = mmc_switch(mmc,
2022 EXT_CSD_CMD_SET_NORMAL,
2024 ecbw->ext_csd_bits);
2029 /* configure the bus mode (host) */
2030 mmc_select_mode(mmc, mwt->mode);
2031 mmc_set_clock(mmc, mmc->tran_speed,
2033 #ifdef MMC_SUPPORTS_TUNING
2035 /* execute tuning if needed */
2037 err = mmc_execute_tuning(mmc,
2040 pr_debug("tuning failed\n");
2047 /* do a transfer to check the configuration */
2048 err = mmc_read_and_compare_ext_csd(mmc);
2052 mmc_set_signal_voltage(mmc, old_voltage);
2053 /* if an error occured, revert to a safer bus mode */
2054 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2055 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2056 mmc_select_mode(mmc, MMC_LEGACY);
2057 mmc_set_bus_width(mmc, 1);
2061 pr_err("unable to select a mode\n");
2067 #if CONFIG_IS_ENABLED(MMC_TINY)
2068 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2071 static int mmc_startup_v4(struct mmc *mmc)
2075 bool has_parts = false;
2076 bool part_completed;
2077 static const u32 mmc_versions[] = {
2089 #if CONFIG_IS_ENABLED(MMC_TINY)
2090 u8 *ext_csd = ext_csd_bkup;
2092 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2096 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2098 err = mmc_send_ext_csd(mmc, ext_csd);
2102 /* store the ext csd for future reference */
2104 mmc->ext_csd = ext_csd;
2106 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2108 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2111 /* check ext_csd version and capacity */
2112 err = mmc_send_ext_csd(mmc, ext_csd);
2116 /* store the ext csd for future reference */
2118 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2121 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2123 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2126 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2128 if (mmc->version >= MMC_VERSION_4_2) {
2130 * According to the JEDEC Standard, the value of
2131 * ext_csd's capacity is valid if the value is more
2134 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2135 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2136 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2137 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2138 capacity *= MMC_MAX_BLOCK_LEN;
2139 if ((capacity >> 20) > 2 * 1024)
2140 mmc->capacity_user = capacity;
2143 if (mmc->version >= MMC_VERSION_4_5)
2144 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2146 /* The partition data may be non-zero but it is only
2147 * effective if PARTITION_SETTING_COMPLETED is set in
2148 * EXT_CSD, so ignore any data if this bit is not set,
2149 * except for enabling the high-capacity group size
2150 * definition (see below).
2152 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2153 EXT_CSD_PARTITION_SETTING_COMPLETED);
2155 /* store the partition info of emmc */
2156 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2157 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2158 ext_csd[EXT_CSD_BOOT_MULT])
2159 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2160 if (part_completed &&
2161 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2162 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2164 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2166 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2168 for (i = 0; i < 4; i++) {
2169 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2170 uint mult = (ext_csd[idx + 2] << 16) +
2171 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2174 if (!part_completed)
2176 mmc->capacity_gp[i] = mult;
2177 mmc->capacity_gp[i] *=
2178 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2179 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2180 mmc->capacity_gp[i] <<= 19;
2183 #ifndef CONFIG_SPL_BUILD
2184 if (part_completed) {
2185 mmc->enh_user_size =
2186 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2187 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2188 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2189 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2190 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2191 mmc->enh_user_size <<= 19;
2192 mmc->enh_user_start =
2193 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2194 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2195 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2196 ext_csd[EXT_CSD_ENH_START_ADDR];
2197 if (mmc->high_capacity)
2198 mmc->enh_user_start <<= 9;
2203 * Host needs to enable ERASE_GRP_DEF bit if device is
2204 * partitioned. This bit will be lost every time after a reset
2205 * or power off. This will affect erase size.
2209 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2210 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2213 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2214 EXT_CSD_ERASE_GROUP_DEF, 1);
2219 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2222 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2223 #if CONFIG_IS_ENABLED(MMC_WRITE)
2224 /* Read out group size from ext_csd */
2225 mmc->erase_grp_size =
2226 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2229 * if high capacity and partition setting completed
2230 * SEC_COUNT is valid even if it is smaller than 2 GiB
2231 * JEDEC Standard JESD84-B45, 6.2.4
2233 if (mmc->high_capacity && part_completed) {
2234 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2235 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2236 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2237 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2238 capacity *= MMC_MAX_BLOCK_LEN;
2239 mmc->capacity_user = capacity;
2242 #if CONFIG_IS_ENABLED(MMC_WRITE)
2244 /* Calculate the group size from the csd value. */
2245 int erase_gsz, erase_gmul;
2247 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2248 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2249 mmc->erase_grp_size = (erase_gsz + 1)
2253 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2254 mmc->hc_wp_grp_size = 1024
2255 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2256 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2259 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2264 #if !CONFIG_IS_ENABLED(MMC_TINY)
2267 mmc->ext_csd = NULL;
2272 static int mmc_startup(struct mmc *mmc)
2278 struct blk_desc *bdesc;
2280 #ifdef CONFIG_MMC_SPI_CRC_ON
2281 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2282 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2283 cmd.resp_type = MMC_RSP_R1;
2285 err = mmc_send_cmd(mmc, &cmd, NULL);
2291 /* Put the Card in Identify Mode */
2292 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2293 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2294 cmd.resp_type = MMC_RSP_R2;
2297 err = mmc_send_cmd(mmc, &cmd, NULL);
2299 #ifdef CONFIG_MMC_QUIRKS
2300 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2303 * It has been seen that SEND_CID may fail on the first
2304 * attempt, let's try a few more time
2307 err = mmc_send_cmd(mmc, &cmd, NULL);
2310 } while (retries--);
2317 memcpy(mmc->cid, cmd.response, 16);
2320 * For MMC cards, set the Relative Address.
2321 * For SD cards, get the Relatvie Address.
2322 * This also puts the cards into Standby State
2324 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2325 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2326 cmd.cmdarg = mmc->rca << 16;
2327 cmd.resp_type = MMC_RSP_R6;
2329 err = mmc_send_cmd(mmc, &cmd, NULL);
2335 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2338 /* Get the Card-Specific Data */
2339 cmd.cmdidx = MMC_CMD_SEND_CSD;
2340 cmd.resp_type = MMC_RSP_R2;
2341 cmd.cmdarg = mmc->rca << 16;
2343 err = mmc_send_cmd(mmc, &cmd, NULL);
2348 mmc->csd[0] = cmd.response[0];
2349 mmc->csd[1] = cmd.response[1];
2350 mmc->csd[2] = cmd.response[2];
2351 mmc->csd[3] = cmd.response[3];
2353 if (mmc->version == MMC_VERSION_UNKNOWN) {
2354 int version = (cmd.response[0] >> 26) & 0xf;
2358 mmc->version = MMC_VERSION_1_2;
2361 mmc->version = MMC_VERSION_1_4;
2364 mmc->version = MMC_VERSION_2_2;
2367 mmc->version = MMC_VERSION_3;
2370 mmc->version = MMC_VERSION_4;
2373 mmc->version = MMC_VERSION_1_2;
2378 /* divide frequency by 10, since the mults are 10x bigger */
2379 freq = fbase[(cmd.response[0] & 0x7)];
2380 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2382 mmc->legacy_speed = freq * mult;
2383 mmc_select_mode(mmc, MMC_LEGACY);
2385 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2386 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2387 #if CONFIG_IS_ENABLED(MMC_WRITE)
2390 mmc->write_bl_len = mmc->read_bl_len;
2392 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2395 if (mmc->high_capacity) {
2396 csize = (mmc->csd[1] & 0x3f) << 16
2397 | (mmc->csd[2] & 0xffff0000) >> 16;
2400 csize = (mmc->csd[1] & 0x3ff) << 2
2401 | (mmc->csd[2] & 0xc0000000) >> 30;
2402 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2405 mmc->capacity_user = (csize + 1) << (cmult + 2);
2406 mmc->capacity_user *= mmc->read_bl_len;
2407 mmc->capacity_boot = 0;
2408 mmc->capacity_rpmb = 0;
2409 for (i = 0; i < 4; i++)
2410 mmc->capacity_gp[i] = 0;
2412 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2413 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2415 #if CONFIG_IS_ENABLED(MMC_WRITE)
2416 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2417 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2420 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2421 cmd.cmdidx = MMC_CMD_SET_DSR;
2422 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2423 cmd.resp_type = MMC_RSP_NONE;
2424 if (mmc_send_cmd(mmc, &cmd, NULL))
2425 pr_warn("MMC: SET_DSR failed\n");
2428 /* Select the card, and put it into Transfer Mode */
2429 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2430 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2431 cmd.resp_type = MMC_RSP_R1;
2432 cmd.cmdarg = mmc->rca << 16;
2433 err = mmc_send_cmd(mmc, &cmd, NULL);
2440 * For SD, its erase group is always one sector
2442 #if CONFIG_IS_ENABLED(MMC_WRITE)
2443 mmc->erase_grp_size = 1;
2445 mmc->part_config = MMCPART_NOAVAILABLE;
2447 err = mmc_startup_v4(mmc);
2451 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2455 #if CONFIG_IS_ENABLED(MMC_TINY)
2456 mmc_set_clock(mmc, mmc->legacy_speed, false);
2457 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2458 mmc_set_bus_width(mmc, 1);
2461 err = sd_get_capabilities(mmc);
2464 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2466 err = mmc_get_capabilities(mmc);
2469 mmc_select_mode_and_width(mmc, mmc->card_caps);
2475 mmc->best_mode = mmc->selected_mode;
2477 /* Fix the block length for DDR mode */
2478 if (mmc->ddr_mode) {
2479 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2480 #if CONFIG_IS_ENABLED(MMC_WRITE)
2481 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2485 /* fill in device description */
2486 bdesc = mmc_get_blk_desc(mmc);
2490 bdesc->blksz = mmc->read_bl_len;
2491 bdesc->log2blksz = LOG2(bdesc->blksz);
2492 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2493 #if !defined(CONFIG_SPL_BUILD) || \
2494 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2495 !defined(CONFIG_USE_TINY_PRINTF))
2496 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2497 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2498 (mmc->cid[3] >> 16) & 0xffff);
2499 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2500 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2501 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2502 (mmc->cid[2] >> 24) & 0xff);
2503 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2504 (mmc->cid[2] >> 16) & 0xf);
2506 bdesc->vendor[0] = 0;
2507 bdesc->product[0] = 0;
2508 bdesc->revision[0] = 0;
2511 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2518 static int mmc_send_if_cond(struct mmc *mmc)
2523 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2524 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2525 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2526 cmd.resp_type = MMC_RSP_R7;
2528 err = mmc_send_cmd(mmc, &cmd, NULL);
2533 if ((cmd.response[0] & 0xff) != 0xaa)
2536 mmc->version = SD_VERSION_2;
2541 #if !CONFIG_IS_ENABLED(DM_MMC)
2542 /* board-specific MMC power initializations. */
2543 __weak void board_mmc_power_init(void)
2548 static int mmc_power_init(struct mmc *mmc)
2550 #if CONFIG_IS_ENABLED(DM_MMC)
2551 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2554 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2557 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2559 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2560 &mmc->vqmmc_supply);
2562 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2564 #else /* !CONFIG_DM_MMC */
2566 * Driver model should use a regulator, as above, rather than calling
2567 * out to board code.
2569 board_mmc_power_init();
2575 * put the host in the initial state:
2576 * - turn on Vdd (card power supply)
2577 * - configure the bus width and clock to minimal values
2579 static void mmc_set_initial_state(struct mmc *mmc)
2583 /* First try to set 3.3V. If it fails set to 1.8V */
2584 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2586 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2588 pr_warn("mmc: failed to set signal voltage\n");
2590 mmc_select_mode(mmc, MMC_LEGACY);
2591 mmc_set_bus_width(mmc, 1);
2592 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2595 static int mmc_power_on(struct mmc *mmc)
2597 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2598 if (mmc->vmmc_supply) {
2599 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2602 puts("Error enabling VMMC supply\n");
2610 static int mmc_power_off(struct mmc *mmc)
2612 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2613 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2614 if (mmc->vmmc_supply) {
2615 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2618 pr_debug("Error disabling VMMC supply\n");
2626 static int mmc_power_cycle(struct mmc *mmc)
2630 ret = mmc_power_off(mmc);
2634 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2635 * to be on the safer side.
2638 return mmc_power_on(mmc);
2641 int mmc_get_op_cond(struct mmc *mmc)
2643 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2649 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2650 mmc_adapter_card_type_ident();
2652 err = mmc_power_init(mmc);
2656 #ifdef CONFIG_MMC_QUIRKS
2657 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2658 MMC_QUIRK_RETRY_SEND_CID;
2661 err = mmc_power_cycle(mmc);
2664 * if power cycling is not supported, we should not try
2665 * to use the UHS modes, because we wouldn't be able to
2666 * recover from an error during the UHS initialization.
2668 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2670 mmc->host_caps &= ~UHS_CAPS;
2671 err = mmc_power_on(mmc);
2676 #if CONFIG_IS_ENABLED(DM_MMC)
2677 /* The device has already been probed ready for use */
2679 /* made sure it's not NULL earlier */
2680 err = mmc->cfg->ops->init(mmc);
2687 mmc_set_initial_state(mmc);
2689 /* Reset the Card */
2690 err = mmc_go_idle(mmc);
2695 /* The internal partition reset to user partition(0) at every CMD0*/
2696 mmc_get_blk_desc(mmc)->hwpart = 0;
2698 /* Test for SD version 2 */
2699 err = mmc_send_if_cond(mmc);
2701 /* Now try to get the SD card's operating condition */
2702 err = sd_send_op_cond(mmc, uhs_en);
2703 if (err && uhs_en) {
2705 mmc_power_cycle(mmc);
2709 /* If the command timed out, we check for an MMC card */
2710 if (err == -ETIMEDOUT) {
2711 err = mmc_send_op_cond(mmc);
2714 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2715 pr_err("Card did not respond to voltage select!\n");
2724 int mmc_start_init(struct mmc *mmc)
2730 * all hosts are capable of 1 bit bus-width and able to use the legacy
2733 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2734 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2736 #if !defined(CONFIG_MMC_BROKEN_CD)
2737 /* we pretend there's no card when init is NULL */
2738 no_card = mmc_getcd(mmc) == 0;
2742 #if !CONFIG_IS_ENABLED(DM_MMC)
2743 no_card = no_card || (mmc->cfg->ops->init == NULL);
2747 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2748 pr_err("MMC: no card present\n");
2753 err = mmc_get_op_cond(mmc);
2756 mmc->init_in_progress = 1;
2761 static int mmc_complete_init(struct mmc *mmc)
2765 mmc->init_in_progress = 0;
2766 if (mmc->op_cond_pending)
2767 err = mmc_complete_op_cond(mmc);
2770 err = mmc_startup(mmc);
2778 int mmc_init(struct mmc *mmc)
2781 __maybe_unused ulong start;
2782 #if CONFIG_IS_ENABLED(DM_MMC)
2783 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2790 start = get_timer(0);
2792 if (!mmc->init_in_progress)
2793 err = mmc_start_init(mmc);
2796 err = mmc_complete_init(mmc);
2798 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2803 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2804 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2805 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2806 int mmc_deinit(struct mmc *mmc)
2814 caps_filtered = mmc->card_caps &
2815 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2816 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2817 MMC_CAP(UHS_SDR104));
2819 return sd_select_mode_and_width(mmc, caps_filtered);
2821 caps_filtered = mmc->card_caps &
2822 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2824 return mmc_select_mode_and_width(mmc, caps_filtered);
2829 int mmc_set_dsr(struct mmc *mmc, u16 val)
2835 /* CPU-specific MMC initializations */
2836 __weak int cpu_mmc_init(bd_t *bis)
2841 /* board-specific MMC initializations. */
2842 __weak int board_mmc_init(bd_t *bis)
2847 void mmc_set_preinit(struct mmc *mmc, int preinit)
2849 mmc->preinit = preinit;
2852 #if CONFIG_IS_ENABLED(DM_MMC)
2853 static int mmc_probe(bd_t *bis)
2857 struct udevice *dev;
2859 ret = uclass_get(UCLASS_MMC, &uc);
2864 * Try to add them in sequence order. Really with driver model we
2865 * should allow holes, but the current MMC list does not allow that.
2866 * So if we request 0, 1, 3 we will get 0, 1, 2.
2868 for (i = 0; ; i++) {
2869 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2873 uclass_foreach_dev(dev, uc) {
2874 ret = device_probe(dev);
2876 pr_err("%s - probe failed: %d\n", dev->name, ret);
2882 static int mmc_probe(bd_t *bis)
2884 if (board_mmc_init(bis) < 0)
2891 int mmc_initialize(bd_t *bis)
2893 static int initialized = 0;
2895 if (initialized) /* Avoid initializing mmc multiple times */
2899 #if !CONFIG_IS_ENABLED(BLK)
2900 #if !CONFIG_IS_ENABLED(MMC_TINY)
2904 ret = mmc_probe(bis);
2908 #ifndef CONFIG_SPL_BUILD
2909 print_mmc_devices(',');
2916 #ifdef CONFIG_CMD_BKOPS_ENABLE
2917 int mmc_set_bkops_enable(struct mmc *mmc)
2920 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2922 err = mmc_send_ext_csd(mmc, ext_csd);
2924 puts("Could not get ext_csd register values\n");
2928 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2929 puts("Background operations not supported on device\n");
2930 return -EMEDIUMTYPE;
2933 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2934 puts("Background operations already enabled\n");
2938 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2940 puts("Failed to enable manual background operations\n");
2944 puts("Enabled manual background operations\n");