1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
15 #include <dm/device-internal.h>
19 #include <linux/bitops.h>
20 #include <linux/delay.h>
21 #include <power/regulator.h>
24 #include <linux/list.h>
26 #include "mmc_private.h"
28 #define DEFAULT_CMD6_TIMEOUT_MS 500
30 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [MMC_HS] = "MMC High Speed (26MHz)",
140 [SD_HS] = "SD High Speed (50MHz)",
141 [UHS_SDR12] = "UHS SDR12 (25MHz)",
142 [UHS_SDR25] = "UHS SDR25 (50MHz)",
143 [UHS_SDR50] = "UHS SDR50 (100MHz)",
144 [UHS_SDR104] = "UHS SDR104 (208MHz)",
145 [UHS_DDR50] = "UHS DDR50 (50MHz)",
146 [MMC_HS_52] = "MMC High Speed (52MHz)",
147 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
148 [MMC_HS_200] = "HS200 (200MHz)",
149 [MMC_HS_400] = "HS400 (200MHz)",
150 [MMC_HS_400_ES] = "HS400ES (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
166 [MMC_HS_52] = 52000000,
167 [MMC_DDR_52] = 52000000,
168 [UHS_SDR12] = 25000000,
169 [UHS_SDR25] = 50000000,
170 [UHS_SDR50] = 100000000,
171 [UHS_DDR50] = 50000000,
172 [UHS_SDR104] = 208000000,
173 [MMC_HS_200] = 200000000,
174 [MMC_HS_400] = 200000000,
175 [MMC_HS_400_ES] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
236 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
257 if (timeout_ms-- <= 0)
263 if (timeout_ms <= 0) {
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if !CONFIG_IS_ENABLED(DM_MMC)
417 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
419 if (mmc->cfg->ops->get_b_max)
420 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
422 return mmc->cfg->b_max;
426 #if CONFIG_IS_ENABLED(BLK)
427 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
429 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
433 #if CONFIG_IS_ENABLED(BLK)
434 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
436 int dev_num = block_dev->devnum;
438 lbaint_t cur, blocks_todo = blkcnt;
444 struct mmc *mmc = find_mmc_device(dev_num);
448 if (CONFIG_IS_ENABLED(MMC_TINY))
449 err = mmc_switch_part(mmc, block_dev->hwpart);
451 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
456 if ((start + blkcnt) > block_dev->lba) {
457 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
458 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
459 start + blkcnt, block_dev->lba);
464 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
465 pr_debug("%s: Failed to set blocklen\n", __func__);
469 b_max = mmc_get_b_max(mmc, dst, blkcnt);
472 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
473 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
474 pr_debug("%s: Failed to read blocks\n", __func__);
479 dst += cur * mmc->read_bl_len;
480 } while (blocks_todo > 0);
485 static int mmc_go_idle(struct mmc *mmc)
492 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
494 cmd.resp_type = MMC_RSP_NONE;
496 err = mmc_send_cmd(mmc, &cmd, NULL);
506 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
507 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
513 * Send CMD11 only if the request is to switch the card to
516 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
517 return mmc_set_signal_voltage(mmc, signal_voltage);
519 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
521 cmd.resp_type = MMC_RSP_R1;
523 err = mmc_send_cmd(mmc, &cmd, NULL);
527 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
531 * The card should drive cmd and dat[0:3] low immediately
532 * after the response of cmd11, but wait 100 us to be sure
534 err = mmc_wait_dat0(mmc, 0, 100);
541 * During a signal voltage level switch, the clock must be gated
542 * for 5 ms according to the SD spec
544 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
546 err = mmc_set_signal_voltage(mmc, signal_voltage);
550 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
552 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
555 * Failure to switch is indicated by the card holding
556 * dat[0:3] low. Wait for at least 1 ms according to spec
558 err = mmc_wait_dat0(mmc, 1, 1000);
568 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
575 cmd.cmdidx = MMC_CMD_APP_CMD;
576 cmd.resp_type = MMC_RSP_R1;
579 err = mmc_send_cmd(mmc, &cmd, NULL);
584 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
585 cmd.resp_type = MMC_RSP_R3;
588 * Most cards do not answer if some reserved bits
589 * in the ocr are set. However, Some controller
590 * can set bit 7 (reserved for low voltages), but
591 * how to manage low voltages SD card is not yet
594 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
595 (mmc->cfg->voltages & 0xff8000);
597 if (mmc->version == SD_VERSION_2)
598 cmd.cmdarg |= OCR_HCS;
601 cmd.cmdarg |= OCR_S18R;
603 err = mmc_send_cmd(mmc, &cmd, NULL);
608 if (cmd.response[0] & OCR_BUSY)
617 if (mmc->version != SD_VERSION_2)
618 mmc->version = SD_VERSION_1_0;
620 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
621 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
622 cmd.resp_type = MMC_RSP_R3;
625 err = mmc_send_cmd(mmc, &cmd, NULL);
631 mmc->ocr = cmd.response[0];
633 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
634 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
636 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
642 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
648 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
653 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
654 cmd.resp_type = MMC_RSP_R3;
656 if (use_arg && !mmc_host_is_spi(mmc))
657 cmd.cmdarg = OCR_HCS |
658 (mmc->cfg->voltages &
659 (mmc->ocr & OCR_VOLTAGE_MASK)) |
660 (mmc->ocr & OCR_ACCESS_MODE);
662 err = mmc_send_cmd(mmc, &cmd, NULL);
665 mmc->ocr = cmd.response[0];
669 static int mmc_send_op_cond(struct mmc *mmc)
673 /* Some cards seem to need this */
676 /* Asking to the card its capabilities */
677 for (i = 0; i < 2; i++) {
678 err = mmc_send_op_cond_iter(mmc, i != 0);
682 /* exit if not busy (flag seems to be inverted) */
683 if (mmc->ocr & OCR_BUSY)
686 mmc->op_cond_pending = 1;
690 static int mmc_complete_op_cond(struct mmc *mmc)
697 mmc->op_cond_pending = 0;
698 if (!(mmc->ocr & OCR_BUSY)) {
699 /* Some cards seem to need this */
702 start = get_timer(0);
704 err = mmc_send_op_cond_iter(mmc, 1);
707 if (mmc->ocr & OCR_BUSY)
709 if (get_timer(start) > timeout)
715 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
716 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
717 cmd.resp_type = MMC_RSP_R3;
720 err = mmc_send_cmd(mmc, &cmd, NULL);
725 mmc->ocr = cmd.response[0];
728 mmc->version = MMC_VERSION_UNKNOWN;
730 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
737 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
740 struct mmc_data data;
743 /* Get the Card Status Register */
744 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
745 cmd.resp_type = MMC_RSP_R1;
748 data.dest = (char *)ext_csd;
750 data.blocksize = MMC_MAX_BLOCK_LEN;
751 data.flags = MMC_DATA_READ;
753 err = mmc_send_cmd(mmc, &cmd, &data);
758 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
761 unsigned int status, start;
763 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
764 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
765 (index == EXT_CSD_PART_CONF);
769 if (mmc->gen_cmd6_time)
770 timeout_ms = mmc->gen_cmd6_time * 10;
772 if (is_part_switch && mmc->part_switch_time)
773 timeout_ms = mmc->part_switch_time * 10;
775 cmd.cmdidx = MMC_CMD_SWITCH;
776 cmd.resp_type = MMC_RSP_R1b;
777 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
782 ret = mmc_send_cmd(mmc, &cmd, NULL);
783 } while (ret && retries-- > 0);
788 start = get_timer(0);
790 /* poll dat0 for rdy/buys status */
791 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
792 if (ret && ret != -ENOSYS)
796 * In cases when not allowed to poll by using CMD13 or because we aren't
797 * capable of polling by using mmc_wait_dat0, then rely on waiting the
798 * stated timeout to be sufficient.
800 if (ret == -ENOSYS && !send_status)
803 /* Finally wait until the card is ready or indicates a failure
804 * to switch. It doesn't hurt to use CMD13 here even if send_status
805 * is false, because by now (after 'timeout_ms' ms) the bus should be
809 ret = mmc_send_status(mmc, &status);
811 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
812 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
816 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
819 } while (get_timer(start) < timeout_ms);
824 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
826 return __mmc_switch(mmc, set, index, value, true);
829 int mmc_boot_wp(struct mmc *mmc)
831 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
834 #if !CONFIG_IS_ENABLED(MMC_TINY)
835 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
841 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
847 speed_bits = EXT_CSD_TIMING_HS;
849 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
851 speed_bits = EXT_CSD_TIMING_HS200;
854 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
856 speed_bits = EXT_CSD_TIMING_HS400;
859 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
861 speed_bits = EXT_CSD_TIMING_HS400;
865 speed_bits = EXT_CSD_TIMING_LEGACY;
871 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
872 speed_bits, !hsdowngrade);
876 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
877 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
879 * In case the eMMC is in HS200/HS400 mode and we are downgrading
880 * to HS mode, the card clock are still running much faster than
881 * the supported HS mode clock, so we can not reliably read out
882 * Extended CSD. Reconfigure the controller to run at HS mode.
885 mmc_select_mode(mmc, MMC_HS);
886 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
890 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
891 /* Now check to see that it worked */
892 err = mmc_send_ext_csd(mmc, test_csd);
896 /* No high-speed support */
897 if (!test_csd[EXT_CSD_HS_TIMING])
904 static int mmc_get_capabilities(struct mmc *mmc)
906 u8 *ext_csd = mmc->ext_csd;
909 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
911 if (mmc_host_is_spi(mmc))
914 /* Only version 4 supports high-speed */
915 if (mmc->version < MMC_VERSION_4)
919 pr_err("No ext_csd found!\n"); /* this should enver happen */
923 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
925 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
926 mmc->cardtype = cardtype;
928 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
929 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
930 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
931 mmc->card_caps |= MMC_MODE_HS200;
934 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
935 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
936 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
937 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
938 mmc->card_caps |= MMC_MODE_HS400;
941 if (cardtype & EXT_CSD_CARD_TYPE_52) {
942 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
943 mmc->card_caps |= MMC_MODE_DDR_52MHz;
944 mmc->card_caps |= MMC_MODE_HS_52MHz;
946 if (cardtype & EXT_CSD_CARD_TYPE_26)
947 mmc->card_caps |= MMC_MODE_HS;
949 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
950 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
951 (mmc->card_caps & MMC_MODE_HS400)) {
952 mmc->card_caps |= MMC_MODE_HS400_ES;
960 static int mmc_set_capacity(struct mmc *mmc, int part_num)
964 mmc->capacity = mmc->capacity_user;
968 mmc->capacity = mmc->capacity_boot;
971 mmc->capacity = mmc->capacity_rpmb;
977 mmc->capacity = mmc->capacity_gp[part_num - 4];
983 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
988 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
994 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
996 (mmc->part_config & ~PART_ACCESS_MASK)
997 | (part_num & PART_ACCESS_MASK));
998 } while (ret && retry--);
1001 * Set the capacity if the switch succeeded or was intended
1002 * to return to representing the raw device.
1004 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1005 ret = mmc_set_capacity(mmc, part_num);
1006 mmc_get_blk_desc(mmc)->hwpart = part_num;
1012 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1013 int mmc_hwpart_config(struct mmc *mmc,
1014 const struct mmc_hwpart_conf *conf,
1015 enum mmc_hwpart_conf_mode mode)
1020 u32 gp_size_mult[4];
1021 u32 max_enh_size_mult;
1022 u32 tot_enh_size_mult = 0;
1025 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1027 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1030 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1031 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1032 return -EMEDIUMTYPE;
1035 if (!(mmc->part_support & PART_SUPPORT)) {
1036 pr_err("Card does not support partitioning\n");
1037 return -EMEDIUMTYPE;
1040 if (!mmc->hc_wp_grp_size) {
1041 pr_err("Card does not define HC WP group size\n");
1042 return -EMEDIUMTYPE;
1045 /* check partition alignment and total enhanced size */
1046 if (conf->user.enh_size) {
1047 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1048 conf->user.enh_start % mmc->hc_wp_grp_size) {
1049 pr_err("User data enhanced area not HC WP group "
1053 part_attrs |= EXT_CSD_ENH_USR;
1054 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1055 if (mmc->high_capacity) {
1056 enh_start_addr = conf->user.enh_start;
1058 enh_start_addr = (conf->user.enh_start << 9);
1064 tot_enh_size_mult += enh_size_mult;
1066 for (pidx = 0; pidx < 4; pidx++) {
1067 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1068 pr_err("GP%i partition not HC WP group size "
1069 "aligned\n", pidx+1);
1072 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1073 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1074 part_attrs |= EXT_CSD_ENH_GP(pidx);
1075 tot_enh_size_mult += gp_size_mult[pidx];
1079 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1080 pr_err("Card does not support enhanced attribute\n");
1081 return -EMEDIUMTYPE;
1084 err = mmc_send_ext_csd(mmc, ext_csd);
1089 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1090 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1091 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1092 if (tot_enh_size_mult > max_enh_size_mult) {
1093 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1094 tot_enh_size_mult, max_enh_size_mult);
1095 return -EMEDIUMTYPE;
1098 /* The default value of EXT_CSD_WR_REL_SET is device
1099 * dependent, the values can only be changed if the
1100 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1101 * changed only once and before partitioning is completed. */
1102 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1103 if (conf->user.wr_rel_change) {
1104 if (conf->user.wr_rel_set)
1105 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1107 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1109 for (pidx = 0; pidx < 4; pidx++) {
1110 if (conf->gp_part[pidx].wr_rel_change) {
1111 if (conf->gp_part[pidx].wr_rel_set)
1112 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1114 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1118 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1119 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1120 puts("Card does not support host controlled partition write "
1121 "reliability settings\n");
1122 return -EMEDIUMTYPE;
1125 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1126 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1127 pr_err("Card already partitioned\n");
1131 if (mode == MMC_HWPART_CONF_CHECK)
1134 /* Partitioning requires high-capacity size definitions */
1135 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1136 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1137 EXT_CSD_ERASE_GROUP_DEF, 1);
1142 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1144 #if CONFIG_IS_ENABLED(MMC_WRITE)
1145 /* update erase group size to be high-capacity */
1146 mmc->erase_grp_size =
1147 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1152 /* all OK, write the configuration */
1153 for (i = 0; i < 4; i++) {
1154 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1155 EXT_CSD_ENH_START_ADDR+i,
1156 (enh_start_addr >> (i*8)) & 0xFF);
1160 for (i = 0; i < 3; i++) {
1161 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1162 EXT_CSD_ENH_SIZE_MULT+i,
1163 (enh_size_mult >> (i*8)) & 0xFF);
1167 for (pidx = 0; pidx < 4; pidx++) {
1168 for (i = 0; i < 3; i++) {
1169 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1170 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1171 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1176 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1177 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1181 if (mode == MMC_HWPART_CONF_SET)
1184 /* The WR_REL_SET is a write-once register but shall be
1185 * written before setting PART_SETTING_COMPLETED. As it is
1186 * write-once we can only write it when completing the
1188 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1189 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1190 EXT_CSD_WR_REL_SET, wr_rel_set);
1195 /* Setting PART_SETTING_COMPLETED confirms the partition
1196 * configuration but it only becomes effective after power
1197 * cycle, so we do not adjust the partition related settings
1198 * in the mmc struct. */
1200 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1201 EXT_CSD_PARTITION_SETTING,
1202 EXT_CSD_PARTITION_SETTING_COMPLETED);
1210 #if !CONFIG_IS_ENABLED(DM_MMC)
1211 int mmc_getcd(struct mmc *mmc)
1215 cd = board_mmc_getcd(mmc);
1218 if (mmc->cfg->ops->getcd)
1219 cd = mmc->cfg->ops->getcd(mmc);
1228 #if !CONFIG_IS_ENABLED(MMC_TINY)
1229 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1232 struct mmc_data data;
1234 /* Switch the frequency */
1235 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1236 cmd.resp_type = MMC_RSP_R1;
1237 cmd.cmdarg = (mode << 31) | 0xffffff;
1238 cmd.cmdarg &= ~(0xf << (group * 4));
1239 cmd.cmdarg |= value << (group * 4);
1241 data.dest = (char *)resp;
1242 data.blocksize = 64;
1244 data.flags = MMC_DATA_READ;
1246 return mmc_send_cmd(mmc, &cmd, &data);
1249 static int sd_get_capabilities(struct mmc *mmc)
1253 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1254 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1255 struct mmc_data data;
1257 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1261 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1263 if (mmc_host_is_spi(mmc))
1266 /* Read the SCR to find out if this card supports higher speeds */
1267 cmd.cmdidx = MMC_CMD_APP_CMD;
1268 cmd.resp_type = MMC_RSP_R1;
1269 cmd.cmdarg = mmc->rca << 16;
1271 err = mmc_send_cmd(mmc, &cmd, NULL);
1276 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1277 cmd.resp_type = MMC_RSP_R1;
1283 data.dest = (char *)scr;
1286 data.flags = MMC_DATA_READ;
1288 err = mmc_send_cmd(mmc, &cmd, &data);
1297 mmc->scr[0] = __be32_to_cpu(scr[0]);
1298 mmc->scr[1] = __be32_to_cpu(scr[1]);
1300 switch ((mmc->scr[0] >> 24) & 0xf) {
1302 mmc->version = SD_VERSION_1_0;
1305 mmc->version = SD_VERSION_1_10;
1308 mmc->version = SD_VERSION_2;
1309 if ((mmc->scr[0] >> 15) & 0x1)
1310 mmc->version = SD_VERSION_3;
1313 mmc->version = SD_VERSION_1_0;
1317 if (mmc->scr[0] & SD_DATA_4BIT)
1318 mmc->card_caps |= MMC_MODE_4BIT;
1320 /* Version 1.0 doesn't support switching */
1321 if (mmc->version == SD_VERSION_1_0)
1326 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1327 (u8 *)switch_status);
1332 /* The high-speed function is busy. Try again */
1333 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1337 /* If high-speed isn't supported, we return */
1338 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1339 mmc->card_caps |= MMC_CAP(SD_HS);
1341 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1342 /* Version before 3.0 don't support UHS modes */
1343 if (mmc->version < SD_VERSION_3)
1346 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1347 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1348 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1349 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1350 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1351 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1352 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1353 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1354 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1355 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1356 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1362 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1366 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1369 /* SD version 1.00 and 1.01 does not support CMD 6 */
1370 if (mmc->version == SD_VERSION_1_0)
1375 speed = UHS_SDR12_BUS_SPEED;
1378 speed = HIGH_SPEED_BUS_SPEED;
1380 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1382 speed = UHS_SDR12_BUS_SPEED;
1385 speed = UHS_SDR25_BUS_SPEED;
1388 speed = UHS_SDR50_BUS_SPEED;
1391 speed = UHS_DDR50_BUS_SPEED;
1394 speed = UHS_SDR104_BUS_SPEED;
1401 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1405 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1411 static int sd_select_bus_width(struct mmc *mmc, int w)
1416 if ((w != 4) && (w != 1))
1419 cmd.cmdidx = MMC_CMD_APP_CMD;
1420 cmd.resp_type = MMC_RSP_R1;
1421 cmd.cmdarg = mmc->rca << 16;
1423 err = mmc_send_cmd(mmc, &cmd, NULL);
1427 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1428 cmd.resp_type = MMC_RSP_R1;
1433 err = mmc_send_cmd(mmc, &cmd, NULL);
1441 #if CONFIG_IS_ENABLED(MMC_WRITE)
1442 static int sd_read_ssr(struct mmc *mmc)
1444 static const unsigned int sd_au_size[] = {
1445 0, SZ_16K / 512, SZ_32K / 512,
1446 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1447 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1448 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1449 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1454 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1455 struct mmc_data data;
1457 unsigned int au, eo, et, es;
1459 cmd.cmdidx = MMC_CMD_APP_CMD;
1460 cmd.resp_type = MMC_RSP_R1;
1461 cmd.cmdarg = mmc->rca << 16;
1463 err = mmc_send_cmd(mmc, &cmd, NULL);
1464 #ifdef CONFIG_MMC_QUIRKS
1465 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1468 * It has been seen that APP_CMD may fail on the first
1469 * attempt, let's try a few more times
1472 err = mmc_send_cmd(mmc, &cmd, NULL);
1475 } while (retries--);
1481 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1482 cmd.resp_type = MMC_RSP_R1;
1486 data.dest = (char *)ssr;
1487 data.blocksize = 64;
1489 data.flags = MMC_DATA_READ;
1491 err = mmc_send_cmd(mmc, &cmd, &data);
1499 for (i = 0; i < 16; i++)
1500 ssr[i] = be32_to_cpu(ssr[i]);
1502 au = (ssr[2] >> 12) & 0xF;
1503 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1504 mmc->ssr.au = sd_au_size[au];
1505 es = (ssr[3] >> 24) & 0xFF;
1506 es |= (ssr[2] & 0xFF) << 8;
1507 et = (ssr[3] >> 18) & 0x3F;
1509 eo = (ssr[3] >> 16) & 0x3;
1510 mmc->ssr.erase_timeout = (et * 1000) / es;
1511 mmc->ssr.erase_offset = eo * 1000;
1514 pr_debug("Invalid Allocation Unit Size.\n");
1520 /* frequency bases */
1521 /* divided by 10 to be nice to platforms without floating point */
1522 static const int fbase[] = {
1529 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1530 * to platforms without floating point.
1532 static const u8 multipliers[] = {
1551 static inline int bus_width(uint cap)
1553 if (cap == MMC_MODE_8BIT)
1555 if (cap == MMC_MODE_4BIT)
1557 if (cap == MMC_MODE_1BIT)
1559 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1563 #if !CONFIG_IS_ENABLED(DM_MMC)
1564 #ifdef MMC_SUPPORTS_TUNING
1565 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1571 static int mmc_set_ios(struct mmc *mmc)
1575 if (mmc->cfg->ops->set_ios)
1576 ret = mmc->cfg->ops->set_ios(mmc);
1581 static int mmc_host_power_cycle(struct mmc *mmc)
1585 if (mmc->cfg->ops->host_power_cycle)
1586 ret = mmc->cfg->ops->host_power_cycle(mmc);
1592 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1595 if (clock > mmc->cfg->f_max)
1596 clock = mmc->cfg->f_max;
1598 if (clock < mmc->cfg->f_min)
1599 clock = mmc->cfg->f_min;
1603 mmc->clk_disable = disable;
1605 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1607 return mmc_set_ios(mmc);
1610 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1612 mmc->bus_width = width;
1614 return mmc_set_ios(mmc);
1617 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1619 * helper function to display the capabilities in a human
1620 * friendly manner. The capabilities include bus width and
1623 void mmc_dump_capabilities(const char *text, uint caps)
1627 pr_debug("%s: widths [", text);
1628 if (caps & MMC_MODE_8BIT)
1630 if (caps & MMC_MODE_4BIT)
1632 if (caps & MMC_MODE_1BIT)
1634 pr_debug("\b\b] modes [");
1635 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1636 if (MMC_CAP(mode) & caps)
1637 pr_debug("%s, ", mmc_mode_name(mode));
1638 pr_debug("\b\b]\n");
1642 struct mode_width_tuning {
1645 #ifdef MMC_SUPPORTS_TUNING
1650 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1651 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1654 case MMC_SIGNAL_VOLTAGE_000: return 0;
1655 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1656 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1657 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1662 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1666 if (mmc->signal_voltage == signal_voltage)
1669 mmc->signal_voltage = signal_voltage;
1670 err = mmc_set_ios(mmc);
1672 pr_debug("unable to set voltage (err %d)\n", err);
1677 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1683 #if !CONFIG_IS_ENABLED(MMC_TINY)
1684 static const struct mode_width_tuning sd_modes_by_pref[] = {
1685 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1686 #ifdef MMC_SUPPORTS_TUNING
1689 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1690 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1695 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1699 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1703 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1708 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1710 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1713 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1718 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1722 #define for_each_sd_mode_by_pref(caps, mwt) \
1723 for (mwt = sd_modes_by_pref;\
1724 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1726 if (caps & MMC_CAP(mwt->mode))
1728 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1731 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1732 const struct mode_width_tuning *mwt;
1733 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1734 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1736 bool uhs_en = false;
1741 mmc_dump_capabilities("sd card", card_caps);
1742 mmc_dump_capabilities("host", mmc->host_caps);
1745 if (mmc_host_is_spi(mmc)) {
1746 mmc_set_bus_width(mmc, 1);
1747 mmc_select_mode(mmc, MMC_LEGACY);
1748 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1752 /* Restrict card's capabilities by what the host can do */
1753 caps = card_caps & mmc->host_caps;
1758 for_each_sd_mode_by_pref(caps, mwt) {
1761 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1762 if (*w & caps & mwt->widths) {
1763 pr_debug("trying mode %s width %d (at %d MHz)\n",
1764 mmc_mode_name(mwt->mode),
1766 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1768 /* configure the bus width (card + host) */
1769 err = sd_select_bus_width(mmc, bus_width(*w));
1772 mmc_set_bus_width(mmc, bus_width(*w));
1774 /* configure the bus mode (card) */
1775 err = sd_set_card_speed(mmc, mwt->mode);
1779 /* configure the bus mode (host) */
1780 mmc_select_mode(mmc, mwt->mode);
1781 mmc_set_clock(mmc, mmc->tran_speed,
1784 #ifdef MMC_SUPPORTS_TUNING
1785 /* execute tuning if needed */
1786 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1787 err = mmc_execute_tuning(mmc,
1790 pr_debug("tuning failed\n");
1796 #if CONFIG_IS_ENABLED(MMC_WRITE)
1797 err = sd_read_ssr(mmc);
1799 pr_warn("unable to read ssr\n");
1805 /* revert to a safer bus speed */
1806 mmc_select_mode(mmc, MMC_LEGACY);
1807 mmc_set_clock(mmc, mmc->tran_speed,
1813 pr_err("unable to select a mode\n");
1818 * read the compare the part of ext csd that is constant.
1819 * This can be used to check that the transfer is working
1822 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1825 const u8 *ext_csd = mmc->ext_csd;
1826 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1828 if (mmc->version < MMC_VERSION_4)
1831 err = mmc_send_ext_csd(mmc, test_csd);
1835 /* Only compare read only fields */
1836 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1837 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1838 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1839 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1840 ext_csd[EXT_CSD_REV]
1841 == test_csd[EXT_CSD_REV] &&
1842 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1843 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1844 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1845 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1851 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1852 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1853 uint32_t allowed_mask)
1861 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1862 EXT_CSD_CARD_TYPE_HS400_1_8V))
1863 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1864 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1865 EXT_CSD_CARD_TYPE_HS400_1_2V))
1866 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1869 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1870 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1871 MMC_SIGNAL_VOLTAGE_180;
1872 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1873 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1876 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1880 while (card_mask & allowed_mask) {
1881 enum mmc_voltage best_match;
1883 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1884 if (!mmc_set_signal_voltage(mmc, best_match))
1887 allowed_mask &= ~best_match;
1893 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1894 uint32_t allowed_mask)
1900 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1901 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1903 .mode = MMC_HS_400_ES,
1904 .widths = MMC_MODE_8BIT,
1907 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1910 .widths = MMC_MODE_8BIT,
1911 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1914 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1917 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1918 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1923 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1927 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1931 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1935 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1939 #define for_each_mmc_mode_by_pref(caps, mwt) \
1940 for (mwt = mmc_modes_by_pref;\
1941 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1943 if (caps & MMC_CAP(mwt->mode))
1945 static const struct ext_csd_bus_width {
1949 } ext_csd_bus_width[] = {
1950 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1951 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1952 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1953 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1954 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1957 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1958 static int mmc_select_hs400(struct mmc *mmc)
1962 /* Set timing to HS200 for tuning */
1963 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1967 /* configure the bus mode (host) */
1968 mmc_select_mode(mmc, MMC_HS_200);
1969 mmc_set_clock(mmc, mmc->tran_speed, false);
1971 /* execute tuning if needed */
1972 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1974 debug("tuning failed\n");
1978 /* Set back to HS */
1979 mmc_set_card_speed(mmc, MMC_HS, true);
1981 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1982 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1986 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1990 mmc_select_mode(mmc, MMC_HS_400);
1991 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1998 static int mmc_select_hs400(struct mmc *mmc)
2004 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2005 #if !CONFIG_IS_ENABLED(DM_MMC)
2006 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2011 static int mmc_select_hs400es(struct mmc *mmc)
2015 err = mmc_set_card_speed(mmc, MMC_HS, true);
2019 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2020 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2021 EXT_CSD_BUS_WIDTH_STROBE);
2023 printf("switch to bus width for hs400 failed\n");
2026 /* TODO: driver strength */
2027 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2031 mmc_select_mode(mmc, MMC_HS_400_ES);
2032 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2036 return mmc_set_enhanced_strobe(mmc);
2039 static int mmc_select_hs400es(struct mmc *mmc)
2045 #define for_each_supported_width(caps, ddr, ecbv) \
2046 for (ecbv = ext_csd_bus_width;\
2047 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2049 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2051 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2054 const struct mode_width_tuning *mwt;
2055 const struct ext_csd_bus_width *ecbw;
2058 mmc_dump_capabilities("mmc", card_caps);
2059 mmc_dump_capabilities("host", mmc->host_caps);
2062 if (mmc_host_is_spi(mmc)) {
2063 mmc_set_bus_width(mmc, 1);
2064 mmc_select_mode(mmc, MMC_LEGACY);
2065 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2069 /* Restrict card's capabilities by what the host can do */
2070 card_caps &= mmc->host_caps;
2072 /* Only version 4 of MMC supports wider bus widths */
2073 if (mmc->version < MMC_VERSION_4)
2076 if (!mmc->ext_csd) {
2077 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2081 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2082 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2084 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2085 * before doing anything else, since a transition from either of
2086 * the HS200/HS400 mode directly to legacy mode is not supported.
2088 if (mmc->selected_mode == MMC_HS_200 ||
2089 mmc->selected_mode == MMC_HS_400)
2090 mmc_set_card_speed(mmc, MMC_HS, true);
2093 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2095 for_each_mmc_mode_by_pref(card_caps, mwt) {
2096 for_each_supported_width(card_caps & mwt->widths,
2097 mmc_is_mode_ddr(mwt->mode), ecbw) {
2098 enum mmc_voltage old_voltage;
2099 pr_debug("trying mode %s width %d (at %d MHz)\n",
2100 mmc_mode_name(mwt->mode),
2101 bus_width(ecbw->cap),
2102 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2103 old_voltage = mmc->signal_voltage;
2104 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2105 MMC_ALL_SIGNAL_VOLTAGE);
2109 /* configure the bus width (card + host) */
2110 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2112 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2115 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2117 if (mwt->mode == MMC_HS_400) {
2118 err = mmc_select_hs400(mmc);
2120 printf("Select HS400 failed %d\n", err);
2123 } else if (mwt->mode == MMC_HS_400_ES) {
2124 err = mmc_select_hs400es(mmc);
2126 printf("Select HS400ES failed %d\n",
2131 /* configure the bus speed (card) */
2132 err = mmc_set_card_speed(mmc, mwt->mode, false);
2137 * configure the bus width AND the ddr mode
2138 * (card). The host side will be taken care
2139 * of in the next step
2141 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2142 err = mmc_switch(mmc,
2143 EXT_CSD_CMD_SET_NORMAL,
2145 ecbw->ext_csd_bits);
2150 /* configure the bus mode (host) */
2151 mmc_select_mode(mmc, mwt->mode);
2152 mmc_set_clock(mmc, mmc->tran_speed,
2154 #ifdef MMC_SUPPORTS_TUNING
2156 /* execute tuning if needed */
2158 err = mmc_execute_tuning(mmc,
2161 pr_debug("tuning failed\n");
2168 /* do a transfer to check the configuration */
2169 err = mmc_read_and_compare_ext_csd(mmc);
2173 mmc_set_signal_voltage(mmc, old_voltage);
2174 /* if an error occured, revert to a safer bus mode */
2175 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2176 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2177 mmc_select_mode(mmc, MMC_LEGACY);
2178 mmc_set_bus_width(mmc, 1);
2182 pr_err("unable to select a mode\n");
2188 #if CONFIG_IS_ENABLED(MMC_TINY)
2189 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2192 static int mmc_startup_v4(struct mmc *mmc)
2196 bool has_parts = false;
2197 bool part_completed;
2198 static const u32 mmc_versions[] = {
2210 #if CONFIG_IS_ENABLED(MMC_TINY)
2211 u8 *ext_csd = ext_csd_bkup;
2213 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2217 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2219 err = mmc_send_ext_csd(mmc, ext_csd);
2223 /* store the ext csd for future reference */
2225 mmc->ext_csd = ext_csd;
2227 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2229 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2232 /* check ext_csd version and capacity */
2233 err = mmc_send_ext_csd(mmc, ext_csd);
2237 /* store the ext csd for future reference */
2239 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2242 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2244 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2247 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2249 if (mmc->version >= MMC_VERSION_4_2) {
2251 * According to the JEDEC Standard, the value of
2252 * ext_csd's capacity is valid if the value is more
2255 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2256 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2257 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2258 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2259 capacity *= MMC_MAX_BLOCK_LEN;
2260 if ((capacity >> 20) > 2 * 1024)
2261 mmc->capacity_user = capacity;
2264 if (mmc->version >= MMC_VERSION_4_5)
2265 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2267 /* The partition data may be non-zero but it is only
2268 * effective if PARTITION_SETTING_COMPLETED is set in
2269 * EXT_CSD, so ignore any data if this bit is not set,
2270 * except for enabling the high-capacity group size
2271 * definition (see below).
2273 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2274 EXT_CSD_PARTITION_SETTING_COMPLETED);
2276 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2277 /* Some eMMC set the value too low so set a minimum */
2278 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2279 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2281 /* store the partition info of emmc */
2282 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2283 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2284 ext_csd[EXT_CSD_BOOT_MULT])
2285 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2286 if (part_completed &&
2287 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2288 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2290 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2292 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2294 for (i = 0; i < 4; i++) {
2295 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2296 uint mult = (ext_csd[idx + 2] << 16) +
2297 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2300 if (!part_completed)
2302 mmc->capacity_gp[i] = mult;
2303 mmc->capacity_gp[i] *=
2304 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2305 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2306 mmc->capacity_gp[i] <<= 19;
2309 #ifndef CONFIG_SPL_BUILD
2310 if (part_completed) {
2311 mmc->enh_user_size =
2312 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2313 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2314 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2315 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2316 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2317 mmc->enh_user_size <<= 19;
2318 mmc->enh_user_start =
2319 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2320 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2321 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2322 ext_csd[EXT_CSD_ENH_START_ADDR];
2323 if (mmc->high_capacity)
2324 mmc->enh_user_start <<= 9;
2329 * Host needs to enable ERASE_GRP_DEF bit if device is
2330 * partitioned. This bit will be lost every time after a reset
2331 * or power off. This will affect erase size.
2335 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2336 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2339 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2340 EXT_CSD_ERASE_GROUP_DEF, 1);
2345 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2348 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2349 #if CONFIG_IS_ENABLED(MMC_WRITE)
2350 /* Read out group size from ext_csd */
2351 mmc->erase_grp_size =
2352 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2355 * if high capacity and partition setting completed
2356 * SEC_COUNT is valid even if it is smaller than 2 GiB
2357 * JEDEC Standard JESD84-B45, 6.2.4
2359 if (mmc->high_capacity && part_completed) {
2360 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2361 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2362 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2363 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2364 capacity *= MMC_MAX_BLOCK_LEN;
2365 mmc->capacity_user = capacity;
2368 #if CONFIG_IS_ENABLED(MMC_WRITE)
2370 /* Calculate the group size from the csd value. */
2371 int erase_gsz, erase_gmul;
2373 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2374 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2375 mmc->erase_grp_size = (erase_gsz + 1)
2379 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2380 mmc->hc_wp_grp_size = 1024
2381 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2382 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2385 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2390 #if !CONFIG_IS_ENABLED(MMC_TINY)
2393 mmc->ext_csd = NULL;
2398 static int mmc_startup(struct mmc *mmc)
2404 struct blk_desc *bdesc;
2406 #ifdef CONFIG_MMC_SPI_CRC_ON
2407 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2408 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2409 cmd.resp_type = MMC_RSP_R1;
2411 err = mmc_send_cmd(mmc, &cmd, NULL);
2417 /* Put the Card in Identify Mode */
2418 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2419 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2420 cmd.resp_type = MMC_RSP_R2;
2423 err = mmc_send_cmd(mmc, &cmd, NULL);
2425 #ifdef CONFIG_MMC_QUIRKS
2426 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2429 * It has been seen that SEND_CID may fail on the first
2430 * attempt, let's try a few more time
2433 err = mmc_send_cmd(mmc, &cmd, NULL);
2436 } while (retries--);
2443 memcpy(mmc->cid, cmd.response, 16);
2446 * For MMC cards, set the Relative Address.
2447 * For SD cards, get the Relatvie Address.
2448 * This also puts the cards into Standby State
2450 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2451 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2452 cmd.cmdarg = mmc->rca << 16;
2453 cmd.resp_type = MMC_RSP_R6;
2455 err = mmc_send_cmd(mmc, &cmd, NULL);
2461 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2464 /* Get the Card-Specific Data */
2465 cmd.cmdidx = MMC_CMD_SEND_CSD;
2466 cmd.resp_type = MMC_RSP_R2;
2467 cmd.cmdarg = mmc->rca << 16;
2469 err = mmc_send_cmd(mmc, &cmd, NULL);
2474 mmc->csd[0] = cmd.response[0];
2475 mmc->csd[1] = cmd.response[1];
2476 mmc->csd[2] = cmd.response[2];
2477 mmc->csd[3] = cmd.response[3];
2479 if (mmc->version == MMC_VERSION_UNKNOWN) {
2480 int version = (cmd.response[0] >> 26) & 0xf;
2484 mmc->version = MMC_VERSION_1_2;
2487 mmc->version = MMC_VERSION_1_4;
2490 mmc->version = MMC_VERSION_2_2;
2493 mmc->version = MMC_VERSION_3;
2496 mmc->version = MMC_VERSION_4;
2499 mmc->version = MMC_VERSION_1_2;
2504 /* divide frequency by 10, since the mults are 10x bigger */
2505 freq = fbase[(cmd.response[0] & 0x7)];
2506 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2508 mmc->legacy_speed = freq * mult;
2509 mmc_select_mode(mmc, MMC_LEGACY);
2511 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2512 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2513 #if CONFIG_IS_ENABLED(MMC_WRITE)
2516 mmc->write_bl_len = mmc->read_bl_len;
2518 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2521 if (mmc->high_capacity) {
2522 csize = (mmc->csd[1] & 0x3f) << 16
2523 | (mmc->csd[2] & 0xffff0000) >> 16;
2526 csize = (mmc->csd[1] & 0x3ff) << 2
2527 | (mmc->csd[2] & 0xc0000000) >> 30;
2528 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2531 mmc->capacity_user = (csize + 1) << (cmult + 2);
2532 mmc->capacity_user *= mmc->read_bl_len;
2533 mmc->capacity_boot = 0;
2534 mmc->capacity_rpmb = 0;
2535 for (i = 0; i < 4; i++)
2536 mmc->capacity_gp[i] = 0;
2538 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2539 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2541 #if CONFIG_IS_ENABLED(MMC_WRITE)
2542 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2543 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2546 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2547 cmd.cmdidx = MMC_CMD_SET_DSR;
2548 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2549 cmd.resp_type = MMC_RSP_NONE;
2550 if (mmc_send_cmd(mmc, &cmd, NULL))
2551 pr_warn("MMC: SET_DSR failed\n");
2554 /* Select the card, and put it into Transfer Mode */
2555 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2556 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2557 cmd.resp_type = MMC_RSP_R1;
2558 cmd.cmdarg = mmc->rca << 16;
2559 err = mmc_send_cmd(mmc, &cmd, NULL);
2566 * For SD, its erase group is always one sector
2568 #if CONFIG_IS_ENABLED(MMC_WRITE)
2569 mmc->erase_grp_size = 1;
2571 mmc->part_config = MMCPART_NOAVAILABLE;
2573 err = mmc_startup_v4(mmc);
2577 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2581 #if CONFIG_IS_ENABLED(MMC_TINY)
2582 mmc_set_clock(mmc, mmc->legacy_speed, false);
2583 mmc_select_mode(mmc, MMC_LEGACY);
2584 mmc_set_bus_width(mmc, 1);
2587 err = sd_get_capabilities(mmc);
2590 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2592 err = mmc_get_capabilities(mmc);
2595 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2601 mmc->best_mode = mmc->selected_mode;
2603 /* Fix the block length for DDR mode */
2604 if (mmc->ddr_mode) {
2605 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2606 #if CONFIG_IS_ENABLED(MMC_WRITE)
2607 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2611 /* fill in device description */
2612 bdesc = mmc_get_blk_desc(mmc);
2616 bdesc->blksz = mmc->read_bl_len;
2617 bdesc->log2blksz = LOG2(bdesc->blksz);
2618 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2619 #if !defined(CONFIG_SPL_BUILD) || \
2620 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2621 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2622 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2623 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2624 (mmc->cid[3] >> 16) & 0xffff);
2625 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2626 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2627 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2628 (mmc->cid[2] >> 24) & 0xff);
2629 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2630 (mmc->cid[2] >> 16) & 0xf);
2632 bdesc->vendor[0] = 0;
2633 bdesc->product[0] = 0;
2634 bdesc->revision[0] = 0;
2637 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2644 static int mmc_send_if_cond(struct mmc *mmc)
2649 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2650 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2651 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2652 cmd.resp_type = MMC_RSP_R7;
2654 err = mmc_send_cmd(mmc, &cmd, NULL);
2659 if ((cmd.response[0] & 0xff) != 0xaa)
2662 mmc->version = SD_VERSION_2;
2667 #if !CONFIG_IS_ENABLED(DM_MMC)
2668 /* board-specific MMC power initializations. */
2669 __weak void board_mmc_power_init(void)
2674 static int mmc_power_init(struct mmc *mmc)
2676 #if CONFIG_IS_ENABLED(DM_MMC)
2677 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2680 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2683 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2685 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2686 &mmc->vqmmc_supply);
2688 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2690 #else /* !CONFIG_DM_MMC */
2692 * Driver model should use a regulator, as above, rather than calling
2693 * out to board code.
2695 board_mmc_power_init();
2701 * put the host in the initial state:
2702 * - turn on Vdd (card power supply)
2703 * - configure the bus width and clock to minimal values
2705 static void mmc_set_initial_state(struct mmc *mmc)
2709 /* First try to set 3.3V. If it fails set to 1.8V */
2710 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2712 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2714 pr_warn("mmc: failed to set signal voltage\n");
2716 mmc_select_mode(mmc, MMC_LEGACY);
2717 mmc_set_bus_width(mmc, 1);
2718 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2721 static int mmc_power_on(struct mmc *mmc)
2723 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2724 if (mmc->vmmc_supply) {
2725 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2728 puts("Error enabling VMMC supply\n");
2736 static int mmc_power_off(struct mmc *mmc)
2738 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2739 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2740 if (mmc->vmmc_supply) {
2741 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2744 pr_debug("Error disabling VMMC supply\n");
2752 static int mmc_power_cycle(struct mmc *mmc)
2756 ret = mmc_power_off(mmc);
2760 ret = mmc_host_power_cycle(mmc);
2765 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2766 * to be on the safer side.
2769 return mmc_power_on(mmc);
2772 int mmc_get_op_cond(struct mmc *mmc)
2774 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2780 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2781 mmc_adapter_card_type_ident();
2783 err = mmc_power_init(mmc);
2787 #ifdef CONFIG_MMC_QUIRKS
2788 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2789 MMC_QUIRK_RETRY_SEND_CID |
2790 MMC_QUIRK_RETRY_APP_CMD;
2793 err = mmc_power_cycle(mmc);
2796 * if power cycling is not supported, we should not try
2797 * to use the UHS modes, because we wouldn't be able to
2798 * recover from an error during the UHS initialization.
2800 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2802 mmc->host_caps &= ~UHS_CAPS;
2803 err = mmc_power_on(mmc);
2808 #if CONFIG_IS_ENABLED(DM_MMC)
2809 /* The device has already been probed ready for use */
2811 /* made sure it's not NULL earlier */
2812 err = mmc->cfg->ops->init(mmc);
2819 mmc_set_initial_state(mmc);
2821 /* Reset the Card */
2822 err = mmc_go_idle(mmc);
2827 /* The internal partition reset to user partition(0) at every CMD0 */
2828 mmc_get_blk_desc(mmc)->hwpart = 0;
2830 /* Test for SD version 2 */
2831 err = mmc_send_if_cond(mmc);
2833 /* Now try to get the SD card's operating condition */
2834 err = sd_send_op_cond(mmc, uhs_en);
2835 if (err && uhs_en) {
2837 mmc_power_cycle(mmc);
2841 /* If the command timed out, we check for an MMC card */
2842 if (err == -ETIMEDOUT) {
2843 err = mmc_send_op_cond(mmc);
2846 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2847 pr_err("Card did not respond to voltage select!\n");
2856 int mmc_start_init(struct mmc *mmc)
2862 * all hosts are capable of 1 bit bus-width and able to use the legacy
2865 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2866 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2867 #if CONFIG_IS_ENABLED(DM_MMC)
2868 mmc_deferred_probe(mmc);
2870 #if !defined(CONFIG_MMC_BROKEN_CD)
2871 no_card = mmc_getcd(mmc) == 0;
2875 #if !CONFIG_IS_ENABLED(DM_MMC)
2876 /* we pretend there's no card when init is NULL */
2877 no_card = no_card || (mmc->cfg->ops->init == NULL);
2881 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2882 pr_err("MMC: no card present\n");
2887 err = mmc_get_op_cond(mmc);
2890 mmc->init_in_progress = 1;
2895 static int mmc_complete_init(struct mmc *mmc)
2899 mmc->init_in_progress = 0;
2900 if (mmc->op_cond_pending)
2901 err = mmc_complete_op_cond(mmc);
2904 err = mmc_startup(mmc);
2912 int mmc_init(struct mmc *mmc)
2915 __maybe_unused ulong start;
2916 #if CONFIG_IS_ENABLED(DM_MMC)
2917 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2924 start = get_timer(0);
2926 if (!mmc->init_in_progress)
2927 err = mmc_start_init(mmc);
2930 err = mmc_complete_init(mmc);
2932 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2937 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2938 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2939 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2940 int mmc_deinit(struct mmc *mmc)
2948 caps_filtered = mmc->card_caps &
2949 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2950 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2951 MMC_CAP(UHS_SDR104));
2953 return sd_select_mode_and_width(mmc, caps_filtered);
2955 caps_filtered = mmc->card_caps &
2956 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2958 return mmc_select_mode_and_width(mmc, caps_filtered);
2963 int mmc_set_dsr(struct mmc *mmc, u16 val)
2969 /* CPU-specific MMC initializations */
2970 __weak int cpu_mmc_init(bd_t *bis)
2975 /* board-specific MMC initializations. */
2976 __weak int board_mmc_init(bd_t *bis)
2981 void mmc_set_preinit(struct mmc *mmc, int preinit)
2983 mmc->preinit = preinit;
2986 #if CONFIG_IS_ENABLED(DM_MMC)
2987 static int mmc_probe(bd_t *bis)
2991 struct udevice *dev;
2993 ret = uclass_get(UCLASS_MMC, &uc);
2998 * Try to add them in sequence order. Really with driver model we
2999 * should allow holes, but the current MMC list does not allow that.
3000 * So if we request 0, 1, 3 we will get 0, 1, 2.
3002 for (i = 0; ; i++) {
3003 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3007 uclass_foreach_dev(dev, uc) {
3008 ret = device_probe(dev);
3010 pr_err("%s - probe failed: %d\n", dev->name, ret);
3016 static int mmc_probe(bd_t *bis)
3018 if (board_mmc_init(bis) < 0)
3025 int mmc_initialize(bd_t *bis)
3027 static int initialized = 0;
3029 if (initialized) /* Avoid initializing mmc multiple times */
3033 #if !CONFIG_IS_ENABLED(BLK)
3034 #if !CONFIG_IS_ENABLED(MMC_TINY)
3038 ret = mmc_probe(bis);
3042 #ifndef CONFIG_SPL_BUILD
3043 print_mmc_devices(',');
3050 #if CONFIG_IS_ENABLED(DM_MMC)
3051 int mmc_init_device(int num)
3053 struct udevice *dev;
3057 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3061 m = mmc_get_mmc_dev(dev);
3064 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3065 mmc_set_preinit(m, 1);
3074 #ifdef CONFIG_CMD_BKOPS_ENABLE
3075 int mmc_set_bkops_enable(struct mmc *mmc)
3078 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3080 err = mmc_send_ext_csd(mmc, ext_csd);
3082 puts("Could not get ext_csd register values\n");
3086 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3087 puts("Background operations not supported on device\n");
3088 return -EMEDIUMTYPE;
3091 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3092 puts("Background operations already enabled\n");
3096 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3098 puts("Failed to enable manual background operations\n");
3102 puts("Enabled manual background operations\n");