1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
15 #include <dm/device-internal.h>
19 #include <linux/bitops.h>
20 #include <linux/delay.h>
21 #include <power/regulator.h>
24 #include <linux/list.h>
26 #include "mmc_private.h"
28 #define DEFAULT_CMD6_TIMEOUT_MS 500
30 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [MMC_HS] = "MMC High Speed (26MHz)",
140 [SD_HS] = "SD High Speed (50MHz)",
141 [UHS_SDR12] = "UHS SDR12 (25MHz)",
142 [UHS_SDR25] = "UHS SDR25 (50MHz)",
143 [UHS_SDR50] = "UHS SDR50 (100MHz)",
144 [UHS_SDR104] = "UHS SDR104 (208MHz)",
145 [UHS_DDR50] = "UHS DDR50 (50MHz)",
146 [MMC_HS_52] = "MMC High Speed (52MHz)",
147 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
148 [MMC_HS_200] = "HS200 (200MHz)",
149 [MMC_HS_400] = "HS400 (200MHz)",
150 [MMC_HS_400_ES] = "HS400ES (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
166 [MMC_HS_52] = 52000000,
167 [MMC_DDR_52] = 52000000,
168 [UHS_SDR12] = 25000000,
169 [UHS_SDR25] = 50000000,
170 [UHS_SDR50] = 100000000,
171 [UHS_DDR50] = 50000000,
172 [UHS_SDR104] = 208000000,
173 [MMC_HS_200] = 200000000,
174 [MMC_HS_400] = 200000000,
175 [MMC_HS_400_ES] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
236 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
257 if (timeout_ms-- <= 0)
263 if (timeout_ms <= 0) {
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if !CONFIG_IS_ENABLED(DM_MMC)
417 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
419 if (mmc->cfg->ops->get_b_max)
420 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
422 return mmc->cfg->b_max;
426 #if CONFIG_IS_ENABLED(BLK)
427 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
429 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
433 #if CONFIG_IS_ENABLED(BLK)
434 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
436 int dev_num = block_dev->devnum;
438 lbaint_t cur, blocks_todo = blkcnt;
444 struct mmc *mmc = find_mmc_device(dev_num);
448 if (CONFIG_IS_ENABLED(MMC_TINY))
449 err = mmc_switch_part(mmc, block_dev->hwpart);
451 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
456 if ((start + blkcnt) > block_dev->lba) {
457 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
458 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
459 start + blkcnt, block_dev->lba);
464 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
465 pr_debug("%s: Failed to set blocklen\n", __func__);
469 b_max = mmc_get_b_max(mmc, dst, blkcnt);
472 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
473 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
474 pr_debug("%s: Failed to read blocks\n", __func__);
479 dst += cur * mmc->read_bl_len;
480 } while (blocks_todo > 0);
485 static int mmc_go_idle(struct mmc *mmc)
492 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
494 cmd.resp_type = MMC_RSP_NONE;
496 err = mmc_send_cmd(mmc, &cmd, NULL);
506 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
507 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
513 * Send CMD11 only if the request is to switch the card to
516 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
517 return mmc_set_signal_voltage(mmc, signal_voltage);
519 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
521 cmd.resp_type = MMC_RSP_R1;
523 err = mmc_send_cmd(mmc, &cmd, NULL);
527 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
531 * The card should drive cmd and dat[0:3] low immediately
532 * after the response of cmd11, but wait 100 us to be sure
534 err = mmc_wait_dat0(mmc, 0, 100);
541 * During a signal voltage level switch, the clock must be gated
542 * for 5 ms according to the SD spec
544 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
546 err = mmc_set_signal_voltage(mmc, signal_voltage);
550 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
552 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
555 * Failure to switch is indicated by the card holding
556 * dat[0:3] low. Wait for at least 1 ms according to spec
558 err = mmc_wait_dat0(mmc, 1, 1000);
568 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
575 cmd.cmdidx = MMC_CMD_APP_CMD;
576 cmd.resp_type = MMC_RSP_R1;
579 err = mmc_send_cmd(mmc, &cmd, NULL);
584 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
585 cmd.resp_type = MMC_RSP_R3;
588 * Most cards do not answer if some reserved bits
589 * in the ocr are set. However, Some controller
590 * can set bit 7 (reserved for low voltages), but
591 * how to manage low voltages SD card is not yet
594 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
595 (mmc->cfg->voltages & 0xff8000);
597 if (mmc->version == SD_VERSION_2)
598 cmd.cmdarg |= OCR_HCS;
601 cmd.cmdarg |= OCR_S18R;
603 err = mmc_send_cmd(mmc, &cmd, NULL);
608 if (cmd.response[0] & OCR_BUSY)
617 if (mmc->version != SD_VERSION_2)
618 mmc->version = SD_VERSION_1_0;
620 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
621 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
622 cmd.resp_type = MMC_RSP_R3;
625 err = mmc_send_cmd(mmc, &cmd, NULL);
631 mmc->ocr = cmd.response[0];
633 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
634 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
636 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
642 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
648 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
653 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
654 cmd.resp_type = MMC_RSP_R3;
656 if (use_arg && !mmc_host_is_spi(mmc))
657 cmd.cmdarg = OCR_HCS |
658 (mmc->cfg->voltages &
659 (mmc->ocr & OCR_VOLTAGE_MASK)) |
660 (mmc->ocr & OCR_ACCESS_MODE);
662 err = mmc_send_cmd(mmc, &cmd, NULL);
665 mmc->ocr = cmd.response[0];
669 static int mmc_send_op_cond(struct mmc *mmc)
675 /* Some cards seem to need this */
678 start = get_timer(0);
679 /* Asking to the card its capabilities */
681 err = mmc_send_op_cond_iter(mmc, i != 0);
685 /* exit if not busy (flag seems to be inverted) */
686 if (mmc->ocr & OCR_BUSY)
689 if (get_timer(start) > timeout)
693 mmc->op_cond_pending = 1;
697 static int mmc_complete_op_cond(struct mmc *mmc)
704 mmc->op_cond_pending = 0;
705 if (!(mmc->ocr & OCR_BUSY)) {
706 /* Some cards seem to need this */
709 start = get_timer(0);
711 err = mmc_send_op_cond_iter(mmc, 1);
714 if (mmc->ocr & OCR_BUSY)
716 if (get_timer(start) > timeout)
722 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
723 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
724 cmd.resp_type = MMC_RSP_R3;
727 err = mmc_send_cmd(mmc, &cmd, NULL);
732 mmc->ocr = cmd.response[0];
735 mmc->version = MMC_VERSION_UNKNOWN;
737 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
744 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
747 struct mmc_data data;
750 /* Get the Card Status Register */
751 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
752 cmd.resp_type = MMC_RSP_R1;
755 data.dest = (char *)ext_csd;
757 data.blocksize = MMC_MAX_BLOCK_LEN;
758 data.flags = MMC_DATA_READ;
760 err = mmc_send_cmd(mmc, &cmd, &data);
765 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
768 unsigned int status, start;
770 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
771 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
772 (index == EXT_CSD_PART_CONF);
776 if (mmc->gen_cmd6_time)
777 timeout_ms = mmc->gen_cmd6_time * 10;
779 if (is_part_switch && mmc->part_switch_time)
780 timeout_ms = mmc->part_switch_time * 10;
782 cmd.cmdidx = MMC_CMD_SWITCH;
783 cmd.resp_type = MMC_RSP_R1b;
784 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
789 ret = mmc_send_cmd(mmc, &cmd, NULL);
790 } while (ret && retries-- > 0);
795 start = get_timer(0);
797 /* poll dat0 for rdy/buys status */
798 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
799 if (ret && ret != -ENOSYS)
803 * In cases when not allowed to poll by using CMD13 or because we aren't
804 * capable of polling by using mmc_wait_dat0, then rely on waiting the
805 * stated timeout to be sufficient.
807 if (ret == -ENOSYS && !send_status)
810 /* Finally wait until the card is ready or indicates a failure
811 * to switch. It doesn't hurt to use CMD13 here even if send_status
812 * is false, because by now (after 'timeout_ms' ms) the bus should be
816 ret = mmc_send_status(mmc, &status);
818 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
819 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
823 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
826 } while (get_timer(start) < timeout_ms);
831 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
833 return __mmc_switch(mmc, set, index, value, true);
836 int mmc_boot_wp(struct mmc *mmc)
838 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
841 #if !CONFIG_IS_ENABLED(MMC_TINY)
842 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
848 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
854 speed_bits = EXT_CSD_TIMING_HS;
856 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
858 speed_bits = EXT_CSD_TIMING_HS200;
861 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
863 speed_bits = EXT_CSD_TIMING_HS400;
866 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
868 speed_bits = EXT_CSD_TIMING_HS400;
872 speed_bits = EXT_CSD_TIMING_LEGACY;
878 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
879 speed_bits, !hsdowngrade);
883 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
884 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
886 * In case the eMMC is in HS200/HS400 mode and we are downgrading
887 * to HS mode, the card clock are still running much faster than
888 * the supported HS mode clock, so we can not reliably read out
889 * Extended CSD. Reconfigure the controller to run at HS mode.
892 mmc_select_mode(mmc, MMC_HS);
893 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
897 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
898 /* Now check to see that it worked */
899 err = mmc_send_ext_csd(mmc, test_csd);
903 /* No high-speed support */
904 if (!test_csd[EXT_CSD_HS_TIMING])
911 static int mmc_get_capabilities(struct mmc *mmc)
913 u8 *ext_csd = mmc->ext_csd;
916 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
918 if (mmc_host_is_spi(mmc))
921 /* Only version 4 supports high-speed */
922 if (mmc->version < MMC_VERSION_4)
926 pr_err("No ext_csd found!\n"); /* this should enver happen */
930 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
932 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
933 mmc->cardtype = cardtype;
935 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
936 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
937 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
938 mmc->card_caps |= MMC_MODE_HS200;
941 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
942 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
943 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
944 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
945 mmc->card_caps |= MMC_MODE_HS400;
948 if (cardtype & EXT_CSD_CARD_TYPE_52) {
949 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
950 mmc->card_caps |= MMC_MODE_DDR_52MHz;
951 mmc->card_caps |= MMC_MODE_HS_52MHz;
953 if (cardtype & EXT_CSD_CARD_TYPE_26)
954 mmc->card_caps |= MMC_MODE_HS;
956 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
957 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
958 (mmc->card_caps & MMC_MODE_HS400)) {
959 mmc->card_caps |= MMC_MODE_HS400_ES;
967 static int mmc_set_capacity(struct mmc *mmc, int part_num)
971 mmc->capacity = mmc->capacity_user;
975 mmc->capacity = mmc->capacity_boot;
978 mmc->capacity = mmc->capacity_rpmb;
984 mmc->capacity = mmc->capacity_gp[part_num - 4];
990 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
995 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1001 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1003 (mmc->part_config & ~PART_ACCESS_MASK)
1004 | (part_num & PART_ACCESS_MASK));
1005 } while (ret && retry--);
1008 * Set the capacity if the switch succeeded or was intended
1009 * to return to representing the raw device.
1011 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1012 ret = mmc_set_capacity(mmc, part_num);
1013 mmc_get_blk_desc(mmc)->hwpart = part_num;
1019 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1020 int mmc_hwpart_config(struct mmc *mmc,
1021 const struct mmc_hwpart_conf *conf,
1022 enum mmc_hwpart_conf_mode mode)
1027 u32 gp_size_mult[4];
1028 u32 max_enh_size_mult;
1029 u32 tot_enh_size_mult = 0;
1032 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1034 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1037 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1038 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1039 return -EMEDIUMTYPE;
1042 if (!(mmc->part_support & PART_SUPPORT)) {
1043 pr_err("Card does not support partitioning\n");
1044 return -EMEDIUMTYPE;
1047 if (!mmc->hc_wp_grp_size) {
1048 pr_err("Card does not define HC WP group size\n");
1049 return -EMEDIUMTYPE;
1052 /* check partition alignment and total enhanced size */
1053 if (conf->user.enh_size) {
1054 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1055 conf->user.enh_start % mmc->hc_wp_grp_size) {
1056 pr_err("User data enhanced area not HC WP group "
1060 part_attrs |= EXT_CSD_ENH_USR;
1061 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1062 if (mmc->high_capacity) {
1063 enh_start_addr = conf->user.enh_start;
1065 enh_start_addr = (conf->user.enh_start << 9);
1071 tot_enh_size_mult += enh_size_mult;
1073 for (pidx = 0; pidx < 4; pidx++) {
1074 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1075 pr_err("GP%i partition not HC WP group size "
1076 "aligned\n", pidx+1);
1079 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1080 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1081 part_attrs |= EXT_CSD_ENH_GP(pidx);
1082 tot_enh_size_mult += gp_size_mult[pidx];
1086 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1087 pr_err("Card does not support enhanced attribute\n");
1088 return -EMEDIUMTYPE;
1091 err = mmc_send_ext_csd(mmc, ext_csd);
1096 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1097 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1098 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1099 if (tot_enh_size_mult > max_enh_size_mult) {
1100 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1101 tot_enh_size_mult, max_enh_size_mult);
1102 return -EMEDIUMTYPE;
1105 /* The default value of EXT_CSD_WR_REL_SET is device
1106 * dependent, the values can only be changed if the
1107 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1108 * changed only once and before partitioning is completed. */
1109 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1110 if (conf->user.wr_rel_change) {
1111 if (conf->user.wr_rel_set)
1112 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1114 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1116 for (pidx = 0; pidx < 4; pidx++) {
1117 if (conf->gp_part[pidx].wr_rel_change) {
1118 if (conf->gp_part[pidx].wr_rel_set)
1119 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1121 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1125 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1126 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1127 puts("Card does not support host controlled partition write "
1128 "reliability settings\n");
1129 return -EMEDIUMTYPE;
1132 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1133 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1134 pr_err("Card already partitioned\n");
1138 if (mode == MMC_HWPART_CONF_CHECK)
1141 /* Partitioning requires high-capacity size definitions */
1142 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1143 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1144 EXT_CSD_ERASE_GROUP_DEF, 1);
1149 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1151 #if CONFIG_IS_ENABLED(MMC_WRITE)
1152 /* update erase group size to be high-capacity */
1153 mmc->erase_grp_size =
1154 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1159 /* all OK, write the configuration */
1160 for (i = 0; i < 4; i++) {
1161 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1162 EXT_CSD_ENH_START_ADDR+i,
1163 (enh_start_addr >> (i*8)) & 0xFF);
1167 for (i = 0; i < 3; i++) {
1168 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 EXT_CSD_ENH_SIZE_MULT+i,
1170 (enh_size_mult >> (i*8)) & 0xFF);
1174 for (pidx = 0; pidx < 4; pidx++) {
1175 for (i = 0; i < 3; i++) {
1176 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1177 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1178 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1183 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1188 if (mode == MMC_HWPART_CONF_SET)
1191 /* The WR_REL_SET is a write-once register but shall be
1192 * written before setting PART_SETTING_COMPLETED. As it is
1193 * write-once we can only write it when completing the
1195 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1196 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1197 EXT_CSD_WR_REL_SET, wr_rel_set);
1202 /* Setting PART_SETTING_COMPLETED confirms the partition
1203 * configuration but it only becomes effective after power
1204 * cycle, so we do not adjust the partition related settings
1205 * in the mmc struct. */
1207 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1208 EXT_CSD_PARTITION_SETTING,
1209 EXT_CSD_PARTITION_SETTING_COMPLETED);
1217 #if !CONFIG_IS_ENABLED(DM_MMC)
1218 int mmc_getcd(struct mmc *mmc)
1222 cd = board_mmc_getcd(mmc);
1225 if (mmc->cfg->ops->getcd)
1226 cd = mmc->cfg->ops->getcd(mmc);
1235 #if !CONFIG_IS_ENABLED(MMC_TINY)
1236 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1239 struct mmc_data data;
1241 /* Switch the frequency */
1242 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1243 cmd.resp_type = MMC_RSP_R1;
1244 cmd.cmdarg = (mode << 31) | 0xffffff;
1245 cmd.cmdarg &= ~(0xf << (group * 4));
1246 cmd.cmdarg |= value << (group * 4);
1248 data.dest = (char *)resp;
1249 data.blocksize = 64;
1251 data.flags = MMC_DATA_READ;
1253 return mmc_send_cmd(mmc, &cmd, &data);
1256 static int sd_get_capabilities(struct mmc *mmc)
1260 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1261 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1262 struct mmc_data data;
1264 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1268 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1270 if (mmc_host_is_spi(mmc))
1273 /* Read the SCR to find out if this card supports higher speeds */
1274 cmd.cmdidx = MMC_CMD_APP_CMD;
1275 cmd.resp_type = MMC_RSP_R1;
1276 cmd.cmdarg = mmc->rca << 16;
1278 err = mmc_send_cmd(mmc, &cmd, NULL);
1283 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1284 cmd.resp_type = MMC_RSP_R1;
1290 data.dest = (char *)scr;
1293 data.flags = MMC_DATA_READ;
1295 err = mmc_send_cmd(mmc, &cmd, &data);
1304 mmc->scr[0] = __be32_to_cpu(scr[0]);
1305 mmc->scr[1] = __be32_to_cpu(scr[1]);
1307 switch ((mmc->scr[0] >> 24) & 0xf) {
1309 mmc->version = SD_VERSION_1_0;
1312 mmc->version = SD_VERSION_1_10;
1315 mmc->version = SD_VERSION_2;
1316 if ((mmc->scr[0] >> 15) & 0x1)
1317 mmc->version = SD_VERSION_3;
1320 mmc->version = SD_VERSION_1_0;
1324 if (mmc->scr[0] & SD_DATA_4BIT)
1325 mmc->card_caps |= MMC_MODE_4BIT;
1327 /* Version 1.0 doesn't support switching */
1328 if (mmc->version == SD_VERSION_1_0)
1333 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1334 (u8 *)switch_status);
1339 /* The high-speed function is busy. Try again */
1340 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1344 /* If high-speed isn't supported, we return */
1345 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1346 mmc->card_caps |= MMC_CAP(SD_HS);
1348 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1349 /* Version before 3.0 don't support UHS modes */
1350 if (mmc->version < SD_VERSION_3)
1353 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1354 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1355 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1356 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1357 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1358 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1359 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1360 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1361 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1362 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1363 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1369 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1373 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1376 /* SD version 1.00 and 1.01 does not support CMD 6 */
1377 if (mmc->version == SD_VERSION_1_0)
1382 speed = UHS_SDR12_BUS_SPEED;
1385 speed = HIGH_SPEED_BUS_SPEED;
1387 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1389 speed = UHS_SDR12_BUS_SPEED;
1392 speed = UHS_SDR25_BUS_SPEED;
1395 speed = UHS_SDR50_BUS_SPEED;
1398 speed = UHS_DDR50_BUS_SPEED;
1401 speed = UHS_SDR104_BUS_SPEED;
1408 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1412 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1418 static int sd_select_bus_width(struct mmc *mmc, int w)
1423 if ((w != 4) && (w != 1))
1426 cmd.cmdidx = MMC_CMD_APP_CMD;
1427 cmd.resp_type = MMC_RSP_R1;
1428 cmd.cmdarg = mmc->rca << 16;
1430 err = mmc_send_cmd(mmc, &cmd, NULL);
1434 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1435 cmd.resp_type = MMC_RSP_R1;
1440 err = mmc_send_cmd(mmc, &cmd, NULL);
1448 #if CONFIG_IS_ENABLED(MMC_WRITE)
1449 static int sd_read_ssr(struct mmc *mmc)
1451 static const unsigned int sd_au_size[] = {
1452 0, SZ_16K / 512, SZ_32K / 512,
1453 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1454 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1455 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1456 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1461 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1462 struct mmc_data data;
1464 unsigned int au, eo, et, es;
1466 cmd.cmdidx = MMC_CMD_APP_CMD;
1467 cmd.resp_type = MMC_RSP_R1;
1468 cmd.cmdarg = mmc->rca << 16;
1470 err = mmc_send_cmd(mmc, &cmd, NULL);
1471 #ifdef CONFIG_MMC_QUIRKS
1472 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1475 * It has been seen that APP_CMD may fail on the first
1476 * attempt, let's try a few more times
1479 err = mmc_send_cmd(mmc, &cmd, NULL);
1482 } while (retries--);
1488 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1489 cmd.resp_type = MMC_RSP_R1;
1493 data.dest = (char *)ssr;
1494 data.blocksize = 64;
1496 data.flags = MMC_DATA_READ;
1498 err = mmc_send_cmd(mmc, &cmd, &data);
1506 for (i = 0; i < 16; i++)
1507 ssr[i] = be32_to_cpu(ssr[i]);
1509 au = (ssr[2] >> 12) & 0xF;
1510 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1511 mmc->ssr.au = sd_au_size[au];
1512 es = (ssr[3] >> 24) & 0xFF;
1513 es |= (ssr[2] & 0xFF) << 8;
1514 et = (ssr[3] >> 18) & 0x3F;
1516 eo = (ssr[3] >> 16) & 0x3;
1517 mmc->ssr.erase_timeout = (et * 1000) / es;
1518 mmc->ssr.erase_offset = eo * 1000;
1521 pr_debug("Invalid Allocation Unit Size.\n");
1527 /* frequency bases */
1528 /* divided by 10 to be nice to platforms without floating point */
1529 static const int fbase[] = {
1536 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1537 * to platforms without floating point.
1539 static const u8 multipliers[] = {
1558 static inline int bus_width(uint cap)
1560 if (cap == MMC_MODE_8BIT)
1562 if (cap == MMC_MODE_4BIT)
1564 if (cap == MMC_MODE_1BIT)
1566 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1570 #if !CONFIG_IS_ENABLED(DM_MMC)
1571 #ifdef MMC_SUPPORTS_TUNING
1572 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1578 static int mmc_set_ios(struct mmc *mmc)
1582 if (mmc->cfg->ops->set_ios)
1583 ret = mmc->cfg->ops->set_ios(mmc);
1588 static int mmc_host_power_cycle(struct mmc *mmc)
1592 if (mmc->cfg->ops->host_power_cycle)
1593 ret = mmc->cfg->ops->host_power_cycle(mmc);
1599 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1602 if (clock > mmc->cfg->f_max)
1603 clock = mmc->cfg->f_max;
1605 if (clock < mmc->cfg->f_min)
1606 clock = mmc->cfg->f_min;
1610 mmc->clk_disable = disable;
1612 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1614 return mmc_set_ios(mmc);
1617 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1619 mmc->bus_width = width;
1621 return mmc_set_ios(mmc);
1624 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1626 * helper function to display the capabilities in a human
1627 * friendly manner. The capabilities include bus width and
1630 void mmc_dump_capabilities(const char *text, uint caps)
1634 pr_debug("%s: widths [", text);
1635 if (caps & MMC_MODE_8BIT)
1637 if (caps & MMC_MODE_4BIT)
1639 if (caps & MMC_MODE_1BIT)
1641 pr_debug("\b\b] modes [");
1642 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1643 if (MMC_CAP(mode) & caps)
1644 pr_debug("%s, ", mmc_mode_name(mode));
1645 pr_debug("\b\b]\n");
1649 struct mode_width_tuning {
1652 #ifdef MMC_SUPPORTS_TUNING
1657 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1658 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1661 case MMC_SIGNAL_VOLTAGE_000: return 0;
1662 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1663 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1664 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1669 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1673 if (mmc->signal_voltage == signal_voltage)
1676 mmc->signal_voltage = signal_voltage;
1677 err = mmc_set_ios(mmc);
1679 pr_debug("unable to set voltage (err %d)\n", err);
1684 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1690 #if !CONFIG_IS_ENABLED(MMC_TINY)
1691 static const struct mode_width_tuning sd_modes_by_pref[] = {
1692 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1693 #ifdef MMC_SUPPORTS_TUNING
1696 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1697 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1702 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1710 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1715 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1717 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1720 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1725 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1729 #define for_each_sd_mode_by_pref(caps, mwt) \
1730 for (mwt = sd_modes_by_pref;\
1731 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1733 if (caps & MMC_CAP(mwt->mode))
1735 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1738 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1739 const struct mode_width_tuning *mwt;
1740 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1741 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1743 bool uhs_en = false;
1748 mmc_dump_capabilities("sd card", card_caps);
1749 mmc_dump_capabilities("host", mmc->host_caps);
1752 if (mmc_host_is_spi(mmc)) {
1753 mmc_set_bus_width(mmc, 1);
1754 mmc_select_mode(mmc, MMC_LEGACY);
1755 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1756 #if CONFIG_IS_ENABLED(MMC_WRITE)
1757 err = sd_read_ssr(mmc);
1759 pr_warn("unable to read ssr\n");
1764 /* Restrict card's capabilities by what the host can do */
1765 caps = card_caps & mmc->host_caps;
1770 for_each_sd_mode_by_pref(caps, mwt) {
1773 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1774 if (*w & caps & mwt->widths) {
1775 pr_debug("trying mode %s width %d (at %d MHz)\n",
1776 mmc_mode_name(mwt->mode),
1778 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1780 /* configure the bus width (card + host) */
1781 err = sd_select_bus_width(mmc, bus_width(*w));
1784 mmc_set_bus_width(mmc, bus_width(*w));
1786 /* configure the bus mode (card) */
1787 err = sd_set_card_speed(mmc, mwt->mode);
1791 /* configure the bus mode (host) */
1792 mmc_select_mode(mmc, mwt->mode);
1793 mmc_set_clock(mmc, mmc->tran_speed,
1796 #ifdef MMC_SUPPORTS_TUNING
1797 /* execute tuning if needed */
1798 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1799 err = mmc_execute_tuning(mmc,
1802 pr_debug("tuning failed\n");
1808 #if CONFIG_IS_ENABLED(MMC_WRITE)
1809 err = sd_read_ssr(mmc);
1811 pr_warn("unable to read ssr\n");
1817 /* revert to a safer bus speed */
1818 mmc_select_mode(mmc, MMC_LEGACY);
1819 mmc_set_clock(mmc, mmc->tran_speed,
1825 pr_err("unable to select a mode\n");
1830 * read the compare the part of ext csd that is constant.
1831 * This can be used to check that the transfer is working
1834 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1837 const u8 *ext_csd = mmc->ext_csd;
1838 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1840 if (mmc->version < MMC_VERSION_4)
1843 err = mmc_send_ext_csd(mmc, test_csd);
1847 /* Only compare read only fields */
1848 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1849 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1850 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1851 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1852 ext_csd[EXT_CSD_REV]
1853 == test_csd[EXT_CSD_REV] &&
1854 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1855 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1856 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1857 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1863 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1864 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1865 uint32_t allowed_mask)
1873 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1874 EXT_CSD_CARD_TYPE_HS400_1_8V))
1875 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1876 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1877 EXT_CSD_CARD_TYPE_HS400_1_2V))
1878 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1881 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1882 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1883 MMC_SIGNAL_VOLTAGE_180;
1884 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1885 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1888 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1892 while (card_mask & allowed_mask) {
1893 enum mmc_voltage best_match;
1895 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1896 if (!mmc_set_signal_voltage(mmc, best_match))
1899 allowed_mask &= ~best_match;
1905 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1906 uint32_t allowed_mask)
1912 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1913 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1915 .mode = MMC_HS_400_ES,
1916 .widths = MMC_MODE_8BIT,
1919 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1922 .widths = MMC_MODE_8BIT,
1923 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1926 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1929 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1930 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1935 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1939 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1943 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1947 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1951 #define for_each_mmc_mode_by_pref(caps, mwt) \
1952 for (mwt = mmc_modes_by_pref;\
1953 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1955 if (caps & MMC_CAP(mwt->mode))
1957 static const struct ext_csd_bus_width {
1961 } ext_csd_bus_width[] = {
1962 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1963 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1964 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1965 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1966 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1969 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1970 static int mmc_select_hs400(struct mmc *mmc)
1974 /* Set timing to HS200 for tuning */
1975 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1979 /* configure the bus mode (host) */
1980 mmc_select_mode(mmc, MMC_HS_200);
1981 mmc_set_clock(mmc, mmc->tran_speed, false);
1983 /* execute tuning if needed */
1984 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1986 debug("tuning failed\n");
1990 /* Set back to HS */
1991 mmc_set_card_speed(mmc, MMC_HS, true);
1993 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1994 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1998 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2002 mmc_select_mode(mmc, MMC_HS_400);
2003 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2010 static int mmc_select_hs400(struct mmc *mmc)
2016 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2017 #if !CONFIG_IS_ENABLED(DM_MMC)
2018 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2023 static int mmc_select_hs400es(struct mmc *mmc)
2027 err = mmc_set_card_speed(mmc, MMC_HS, true);
2031 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2032 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2033 EXT_CSD_BUS_WIDTH_STROBE);
2035 printf("switch to bus width for hs400 failed\n");
2038 /* TODO: driver strength */
2039 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2043 mmc_select_mode(mmc, MMC_HS_400_ES);
2044 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2048 return mmc_set_enhanced_strobe(mmc);
2051 static int mmc_select_hs400es(struct mmc *mmc)
2057 #define for_each_supported_width(caps, ddr, ecbv) \
2058 for (ecbv = ext_csd_bus_width;\
2059 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2061 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2063 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2066 const struct mode_width_tuning *mwt;
2067 const struct ext_csd_bus_width *ecbw;
2070 mmc_dump_capabilities("mmc", card_caps);
2071 mmc_dump_capabilities("host", mmc->host_caps);
2074 if (mmc_host_is_spi(mmc)) {
2075 mmc_set_bus_width(mmc, 1);
2076 mmc_select_mode(mmc, MMC_LEGACY);
2077 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2081 /* Restrict card's capabilities by what the host can do */
2082 card_caps &= mmc->host_caps;
2084 /* Only version 4 of MMC supports wider bus widths */
2085 if (mmc->version < MMC_VERSION_4)
2088 if (!mmc->ext_csd) {
2089 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2093 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2094 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2096 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2097 * before doing anything else, since a transition from either of
2098 * the HS200/HS400 mode directly to legacy mode is not supported.
2100 if (mmc->selected_mode == MMC_HS_200 ||
2101 mmc->selected_mode == MMC_HS_400)
2102 mmc_set_card_speed(mmc, MMC_HS, true);
2105 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2107 for_each_mmc_mode_by_pref(card_caps, mwt) {
2108 for_each_supported_width(card_caps & mwt->widths,
2109 mmc_is_mode_ddr(mwt->mode), ecbw) {
2110 enum mmc_voltage old_voltage;
2111 pr_debug("trying mode %s width %d (at %d MHz)\n",
2112 mmc_mode_name(mwt->mode),
2113 bus_width(ecbw->cap),
2114 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2115 old_voltage = mmc->signal_voltage;
2116 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2117 MMC_ALL_SIGNAL_VOLTAGE);
2121 /* configure the bus width (card + host) */
2122 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2124 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2127 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2129 if (mwt->mode == MMC_HS_400) {
2130 err = mmc_select_hs400(mmc);
2132 printf("Select HS400 failed %d\n", err);
2135 } else if (mwt->mode == MMC_HS_400_ES) {
2136 err = mmc_select_hs400es(mmc);
2138 printf("Select HS400ES failed %d\n",
2143 /* configure the bus speed (card) */
2144 err = mmc_set_card_speed(mmc, mwt->mode, false);
2149 * configure the bus width AND the ddr mode
2150 * (card). The host side will be taken care
2151 * of in the next step
2153 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2154 err = mmc_switch(mmc,
2155 EXT_CSD_CMD_SET_NORMAL,
2157 ecbw->ext_csd_bits);
2162 /* configure the bus mode (host) */
2163 mmc_select_mode(mmc, mwt->mode);
2164 mmc_set_clock(mmc, mmc->tran_speed,
2166 #ifdef MMC_SUPPORTS_TUNING
2168 /* execute tuning if needed */
2170 err = mmc_execute_tuning(mmc,
2173 pr_debug("tuning failed\n");
2180 /* do a transfer to check the configuration */
2181 err = mmc_read_and_compare_ext_csd(mmc);
2185 mmc_set_signal_voltage(mmc, old_voltage);
2186 /* if an error occured, revert to a safer bus mode */
2187 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2188 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2189 mmc_select_mode(mmc, MMC_LEGACY);
2190 mmc_set_bus_width(mmc, 1);
2194 pr_err("unable to select a mode\n");
2200 #if CONFIG_IS_ENABLED(MMC_TINY)
2201 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2204 static int mmc_startup_v4(struct mmc *mmc)
2208 bool has_parts = false;
2209 bool part_completed;
2210 static const u32 mmc_versions[] = {
2222 #if CONFIG_IS_ENABLED(MMC_TINY)
2223 u8 *ext_csd = ext_csd_bkup;
2225 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2229 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2231 err = mmc_send_ext_csd(mmc, ext_csd);
2235 /* store the ext csd for future reference */
2237 mmc->ext_csd = ext_csd;
2239 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2241 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2244 /* check ext_csd version and capacity */
2245 err = mmc_send_ext_csd(mmc, ext_csd);
2249 /* store the ext csd for future reference */
2251 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2254 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2256 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2259 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2261 if (mmc->version >= MMC_VERSION_4_2) {
2263 * According to the JEDEC Standard, the value of
2264 * ext_csd's capacity is valid if the value is more
2267 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2268 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2269 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2270 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2271 capacity *= MMC_MAX_BLOCK_LEN;
2272 if ((capacity >> 20) > 2 * 1024)
2273 mmc->capacity_user = capacity;
2276 if (mmc->version >= MMC_VERSION_4_5)
2277 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2279 /* The partition data may be non-zero but it is only
2280 * effective if PARTITION_SETTING_COMPLETED is set in
2281 * EXT_CSD, so ignore any data if this bit is not set,
2282 * except for enabling the high-capacity group size
2283 * definition (see below).
2285 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2286 EXT_CSD_PARTITION_SETTING_COMPLETED);
2288 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2289 /* Some eMMC set the value too low so set a minimum */
2290 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2291 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2293 /* store the partition info of emmc */
2294 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2295 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2296 ext_csd[EXT_CSD_BOOT_MULT])
2297 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2298 if (part_completed &&
2299 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2300 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2302 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2304 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2306 for (i = 0; i < 4; i++) {
2307 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2308 uint mult = (ext_csd[idx + 2] << 16) +
2309 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2312 if (!part_completed)
2314 mmc->capacity_gp[i] = mult;
2315 mmc->capacity_gp[i] *=
2316 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2317 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2318 mmc->capacity_gp[i] <<= 19;
2321 #ifndef CONFIG_SPL_BUILD
2322 if (part_completed) {
2323 mmc->enh_user_size =
2324 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2325 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2326 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2327 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2328 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2329 mmc->enh_user_size <<= 19;
2330 mmc->enh_user_start =
2331 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2332 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2333 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2334 ext_csd[EXT_CSD_ENH_START_ADDR];
2335 if (mmc->high_capacity)
2336 mmc->enh_user_start <<= 9;
2341 * Host needs to enable ERASE_GRP_DEF bit if device is
2342 * partitioned. This bit will be lost every time after a reset
2343 * or power off. This will affect erase size.
2347 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2348 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2351 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2352 EXT_CSD_ERASE_GROUP_DEF, 1);
2357 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2360 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2361 #if CONFIG_IS_ENABLED(MMC_WRITE)
2362 /* Read out group size from ext_csd */
2363 mmc->erase_grp_size =
2364 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2367 * if high capacity and partition setting completed
2368 * SEC_COUNT is valid even if it is smaller than 2 GiB
2369 * JEDEC Standard JESD84-B45, 6.2.4
2371 if (mmc->high_capacity && part_completed) {
2372 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2373 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2374 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2375 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2376 capacity *= MMC_MAX_BLOCK_LEN;
2377 mmc->capacity_user = capacity;
2380 #if CONFIG_IS_ENABLED(MMC_WRITE)
2382 /* Calculate the group size from the csd value. */
2383 int erase_gsz, erase_gmul;
2385 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2386 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2387 mmc->erase_grp_size = (erase_gsz + 1)
2391 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2392 mmc->hc_wp_grp_size = 1024
2393 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2394 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2397 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2402 #if !CONFIG_IS_ENABLED(MMC_TINY)
2405 mmc->ext_csd = NULL;
2410 static int mmc_startup(struct mmc *mmc)
2416 struct blk_desc *bdesc;
2418 #ifdef CONFIG_MMC_SPI_CRC_ON
2419 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2420 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2421 cmd.resp_type = MMC_RSP_R1;
2423 err = mmc_send_cmd(mmc, &cmd, NULL);
2429 /* Put the Card in Identify Mode */
2430 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2431 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2432 cmd.resp_type = MMC_RSP_R2;
2435 err = mmc_send_cmd(mmc, &cmd, NULL);
2437 #ifdef CONFIG_MMC_QUIRKS
2438 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2441 * It has been seen that SEND_CID may fail on the first
2442 * attempt, let's try a few more time
2445 err = mmc_send_cmd(mmc, &cmd, NULL);
2448 } while (retries--);
2455 memcpy(mmc->cid, cmd.response, 16);
2458 * For MMC cards, set the Relative Address.
2459 * For SD cards, get the Relatvie Address.
2460 * This also puts the cards into Standby State
2462 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2463 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2464 cmd.cmdarg = mmc->rca << 16;
2465 cmd.resp_type = MMC_RSP_R6;
2467 err = mmc_send_cmd(mmc, &cmd, NULL);
2473 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2476 /* Get the Card-Specific Data */
2477 cmd.cmdidx = MMC_CMD_SEND_CSD;
2478 cmd.resp_type = MMC_RSP_R2;
2479 cmd.cmdarg = mmc->rca << 16;
2481 err = mmc_send_cmd(mmc, &cmd, NULL);
2486 mmc->csd[0] = cmd.response[0];
2487 mmc->csd[1] = cmd.response[1];
2488 mmc->csd[2] = cmd.response[2];
2489 mmc->csd[3] = cmd.response[3];
2491 if (mmc->version == MMC_VERSION_UNKNOWN) {
2492 int version = (cmd.response[0] >> 26) & 0xf;
2496 mmc->version = MMC_VERSION_1_2;
2499 mmc->version = MMC_VERSION_1_4;
2502 mmc->version = MMC_VERSION_2_2;
2505 mmc->version = MMC_VERSION_3;
2508 mmc->version = MMC_VERSION_4;
2511 mmc->version = MMC_VERSION_1_2;
2516 /* divide frequency by 10, since the mults are 10x bigger */
2517 freq = fbase[(cmd.response[0] & 0x7)];
2518 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2520 mmc->legacy_speed = freq * mult;
2521 mmc_select_mode(mmc, MMC_LEGACY);
2523 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2524 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2525 #if CONFIG_IS_ENABLED(MMC_WRITE)
2528 mmc->write_bl_len = mmc->read_bl_len;
2530 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2533 if (mmc->high_capacity) {
2534 csize = (mmc->csd[1] & 0x3f) << 16
2535 | (mmc->csd[2] & 0xffff0000) >> 16;
2538 csize = (mmc->csd[1] & 0x3ff) << 2
2539 | (mmc->csd[2] & 0xc0000000) >> 30;
2540 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2543 mmc->capacity_user = (csize + 1) << (cmult + 2);
2544 mmc->capacity_user *= mmc->read_bl_len;
2545 mmc->capacity_boot = 0;
2546 mmc->capacity_rpmb = 0;
2547 for (i = 0; i < 4; i++)
2548 mmc->capacity_gp[i] = 0;
2550 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2551 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2553 #if CONFIG_IS_ENABLED(MMC_WRITE)
2554 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2555 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2558 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2559 cmd.cmdidx = MMC_CMD_SET_DSR;
2560 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2561 cmd.resp_type = MMC_RSP_NONE;
2562 if (mmc_send_cmd(mmc, &cmd, NULL))
2563 pr_warn("MMC: SET_DSR failed\n");
2566 /* Select the card, and put it into Transfer Mode */
2567 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2568 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2569 cmd.resp_type = MMC_RSP_R1;
2570 cmd.cmdarg = mmc->rca << 16;
2571 err = mmc_send_cmd(mmc, &cmd, NULL);
2578 * For SD, its erase group is always one sector
2580 #if CONFIG_IS_ENABLED(MMC_WRITE)
2581 mmc->erase_grp_size = 1;
2583 mmc->part_config = MMCPART_NOAVAILABLE;
2585 err = mmc_startup_v4(mmc);
2589 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2593 #if CONFIG_IS_ENABLED(MMC_TINY)
2594 mmc_set_clock(mmc, mmc->legacy_speed, false);
2595 mmc_select_mode(mmc, MMC_LEGACY);
2596 mmc_set_bus_width(mmc, 1);
2599 err = sd_get_capabilities(mmc);
2602 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2604 err = mmc_get_capabilities(mmc);
2607 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2613 mmc->best_mode = mmc->selected_mode;
2615 /* Fix the block length for DDR mode */
2616 if (mmc->ddr_mode) {
2617 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2618 #if CONFIG_IS_ENABLED(MMC_WRITE)
2619 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2623 /* fill in device description */
2624 bdesc = mmc_get_blk_desc(mmc);
2628 bdesc->blksz = mmc->read_bl_len;
2629 bdesc->log2blksz = LOG2(bdesc->blksz);
2630 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2631 #if !defined(CONFIG_SPL_BUILD) || \
2632 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2633 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2634 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2635 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2636 (mmc->cid[3] >> 16) & 0xffff);
2637 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2638 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2639 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2640 (mmc->cid[2] >> 24) & 0xff);
2641 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2642 (mmc->cid[2] >> 16) & 0xf);
2644 bdesc->vendor[0] = 0;
2645 bdesc->product[0] = 0;
2646 bdesc->revision[0] = 0;
2649 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2656 static int mmc_send_if_cond(struct mmc *mmc)
2661 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2662 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2663 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2664 cmd.resp_type = MMC_RSP_R7;
2666 err = mmc_send_cmd(mmc, &cmd, NULL);
2671 if ((cmd.response[0] & 0xff) != 0xaa)
2674 mmc->version = SD_VERSION_2;
2679 #if !CONFIG_IS_ENABLED(DM_MMC)
2680 /* board-specific MMC power initializations. */
2681 __weak void board_mmc_power_init(void)
2686 static int mmc_power_init(struct mmc *mmc)
2688 #if CONFIG_IS_ENABLED(DM_MMC)
2689 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2692 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2695 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2697 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2698 &mmc->vqmmc_supply);
2700 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2702 #else /* !CONFIG_DM_MMC */
2704 * Driver model should use a regulator, as above, rather than calling
2705 * out to board code.
2707 board_mmc_power_init();
2713 * put the host in the initial state:
2714 * - turn on Vdd (card power supply)
2715 * - configure the bus width and clock to minimal values
2717 static void mmc_set_initial_state(struct mmc *mmc)
2721 /* First try to set 3.3V. If it fails set to 1.8V */
2722 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2724 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2726 pr_warn("mmc: failed to set signal voltage\n");
2728 mmc_select_mode(mmc, MMC_LEGACY);
2729 mmc_set_bus_width(mmc, 1);
2730 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2733 static int mmc_power_on(struct mmc *mmc)
2735 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2736 if (mmc->vmmc_supply) {
2737 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2740 puts("Error enabling VMMC supply\n");
2748 static int mmc_power_off(struct mmc *mmc)
2750 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2751 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2752 if (mmc->vmmc_supply) {
2753 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2756 pr_debug("Error disabling VMMC supply\n");
2764 static int mmc_power_cycle(struct mmc *mmc)
2768 ret = mmc_power_off(mmc);
2772 ret = mmc_host_power_cycle(mmc);
2777 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2778 * to be on the safer side.
2781 return mmc_power_on(mmc);
2784 int mmc_get_op_cond(struct mmc *mmc)
2786 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2792 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2793 mmc_adapter_card_type_ident();
2795 err = mmc_power_init(mmc);
2799 #ifdef CONFIG_MMC_QUIRKS
2800 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2801 MMC_QUIRK_RETRY_SEND_CID |
2802 MMC_QUIRK_RETRY_APP_CMD;
2805 err = mmc_power_cycle(mmc);
2808 * if power cycling is not supported, we should not try
2809 * to use the UHS modes, because we wouldn't be able to
2810 * recover from an error during the UHS initialization.
2812 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2814 mmc->host_caps &= ~UHS_CAPS;
2815 err = mmc_power_on(mmc);
2820 #if CONFIG_IS_ENABLED(DM_MMC)
2821 /* The device has already been probed ready for use */
2823 /* made sure it's not NULL earlier */
2824 err = mmc->cfg->ops->init(mmc);
2831 mmc_set_initial_state(mmc);
2833 /* Reset the Card */
2834 err = mmc_go_idle(mmc);
2839 /* The internal partition reset to user partition(0) at every CMD0 */
2840 mmc_get_blk_desc(mmc)->hwpart = 0;
2842 /* Test for SD version 2 */
2843 err = mmc_send_if_cond(mmc);
2845 /* Now try to get the SD card's operating condition */
2846 err = sd_send_op_cond(mmc, uhs_en);
2847 if (err && uhs_en) {
2849 mmc_power_cycle(mmc);
2853 /* If the command timed out, we check for an MMC card */
2854 if (err == -ETIMEDOUT) {
2855 err = mmc_send_op_cond(mmc);
2858 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2859 pr_err("Card did not respond to voltage select!\n");
2868 int mmc_start_init(struct mmc *mmc)
2874 * all hosts are capable of 1 bit bus-width and able to use the legacy
2877 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2878 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2879 #if CONFIG_IS_ENABLED(DM_MMC)
2880 mmc_deferred_probe(mmc);
2882 #if !defined(CONFIG_MMC_BROKEN_CD)
2883 no_card = mmc_getcd(mmc) == 0;
2887 #if !CONFIG_IS_ENABLED(DM_MMC)
2888 /* we pretend there's no card when init is NULL */
2889 no_card = no_card || (mmc->cfg->ops->init == NULL);
2893 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2894 pr_err("MMC: no card present\n");
2899 err = mmc_get_op_cond(mmc);
2902 mmc->init_in_progress = 1;
2907 static int mmc_complete_init(struct mmc *mmc)
2911 mmc->init_in_progress = 0;
2912 if (mmc->op_cond_pending)
2913 err = mmc_complete_op_cond(mmc);
2916 err = mmc_startup(mmc);
2924 int mmc_init(struct mmc *mmc)
2927 __maybe_unused ulong start;
2928 #if CONFIG_IS_ENABLED(DM_MMC)
2929 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2936 start = get_timer(0);
2938 if (!mmc->init_in_progress)
2939 err = mmc_start_init(mmc);
2942 err = mmc_complete_init(mmc);
2944 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2949 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2950 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2951 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2952 int mmc_deinit(struct mmc *mmc)
2960 caps_filtered = mmc->card_caps &
2961 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2962 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2963 MMC_CAP(UHS_SDR104));
2965 return sd_select_mode_and_width(mmc, caps_filtered);
2967 caps_filtered = mmc->card_caps &
2968 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2970 return mmc_select_mode_and_width(mmc, caps_filtered);
2975 int mmc_set_dsr(struct mmc *mmc, u16 val)
2981 /* CPU-specific MMC initializations */
2982 __weak int cpu_mmc_init(bd_t *bis)
2987 /* board-specific MMC initializations. */
2988 __weak int board_mmc_init(bd_t *bis)
2993 void mmc_set_preinit(struct mmc *mmc, int preinit)
2995 mmc->preinit = preinit;
2998 #if CONFIG_IS_ENABLED(DM_MMC)
2999 static int mmc_probe(bd_t *bis)
3003 struct udevice *dev;
3005 ret = uclass_get(UCLASS_MMC, &uc);
3010 * Try to add them in sequence order. Really with driver model we
3011 * should allow holes, but the current MMC list does not allow that.
3012 * So if we request 0, 1, 3 we will get 0, 1, 2.
3014 for (i = 0; ; i++) {
3015 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3019 uclass_foreach_dev(dev, uc) {
3020 ret = device_probe(dev);
3022 pr_err("%s - probe failed: %d\n", dev->name, ret);
3028 static int mmc_probe(bd_t *bis)
3030 if (board_mmc_init(bis) < 0)
3037 int mmc_initialize(bd_t *bis)
3039 static int initialized = 0;
3041 if (initialized) /* Avoid initializing mmc multiple times */
3045 #if !CONFIG_IS_ENABLED(BLK)
3046 #if !CONFIG_IS_ENABLED(MMC_TINY)
3050 ret = mmc_probe(bis);
3054 #ifndef CONFIG_SPL_BUILD
3055 print_mmc_devices(',');
3062 #if CONFIG_IS_ENABLED(DM_MMC)
3063 int mmc_init_device(int num)
3065 struct udevice *dev;
3069 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3073 m = mmc_get_mmc_dev(dev);
3076 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3077 mmc_set_preinit(m, 1);
3086 #ifdef CONFIG_CMD_BKOPS_ENABLE
3087 int mmc_set_bkops_enable(struct mmc *mmc)
3090 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3092 err = mmc_send_ext_csd(mmc, ext_csd);
3094 puts("Could not get ext_csd register values\n");
3098 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3099 puts("Background operations not supported on device\n");
3100 return -EMEDIUMTYPE;
3103 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3104 puts("Background operations already enabled\n");
3108 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3110 puts("Failed to enable manual background operations\n");
3114 puts("Enabled manual background operations\n");