1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
28 #if !CONFIG_IS_ENABLED(DM_MMC)
30 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
35 __weak int board_mmc_getwp(struct mmc *mmc)
40 int mmc_getwp(struct mmc *mmc)
44 wp = board_mmc_getwp(mmc);
47 if (mmc->cfg->ops->getwp)
48 wp = mmc->cfg->ops->getwp(mmc);
56 __weak int board_mmc_getcd(struct mmc *mmc)
62 #ifdef CONFIG_MMC_TRACE
63 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
65 printf("CMD_SEND:%d\n", cmd->cmdidx);
66 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
69 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75 printf("\t\tRET\t\t\t %d\n", ret);
77 switch (cmd->resp_type) {
79 printf("\t\tMMC_RSP_NONE\n");
82 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
86 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
90 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
92 printf("\t\t \t\t 0x%08x \n",
94 printf("\t\t \t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t\t\t\tDUMPING DATA\n");
100 for (i = 0; i < 4; i++) {
102 printf("\t\t\t\t\t%03d - ", i*4);
103 ptr = (u8 *)&cmd->response[i];
105 for (j = 0; j < 4; j++)
106 printf("%02x ", *ptr--);
111 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
115 printf("\t\tERROR MMC rsp not supported\n");
121 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
125 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
126 printf("CURR STATE:%d\n", status);
130 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
131 const char *mmc_mode_name(enum bus_mode mode)
133 static const char *const names[] = {
134 [MMC_LEGACY] = "MMC legacy",
135 [MMC_HS] = "MMC High Speed (26MHz)",
136 [SD_HS] = "SD High Speed (50MHz)",
137 [UHS_SDR12] = "UHS SDR12 (25MHz)",
138 [UHS_SDR25] = "UHS SDR25 (50MHz)",
139 [UHS_SDR50] = "UHS SDR50 (100MHz)",
140 [UHS_SDR104] = "UHS SDR104 (208MHz)",
141 [UHS_DDR50] = "UHS DDR50 (50MHz)",
142 [MMC_HS_52] = "MMC High Speed (52MHz)",
143 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
144 [MMC_HS_200] = "HS200 (200MHz)",
145 [MMC_HS_400] = "HS400 (200MHz)",
146 [MMC_HS_400_ES] = "HS400ES (200MHz)",
149 if (mode >= MMC_MODES_END)
150 return "Unknown mode";
156 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
158 static const int freqs[] = {
159 [MMC_LEGACY] = 25000000,
162 [MMC_HS_52] = 52000000,
163 [MMC_DDR_52] = 52000000,
164 [UHS_SDR12] = 25000000,
165 [UHS_SDR25] = 50000000,
166 [UHS_SDR50] = 100000000,
167 [UHS_DDR50] = 50000000,
168 [UHS_SDR104] = 208000000,
169 [MMC_HS_200] = 200000000,
170 [MMC_HS_400] = 200000000,
171 [MMC_HS_400_ES] = 200000000,
174 if (mode == MMC_LEGACY)
175 return mmc->legacy_speed;
176 else if (mode >= MMC_MODES_END)
182 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
184 mmc->selected_mode = mode;
185 mmc->tran_speed = mmc_mode2freq(mmc, mode);
186 mmc->ddr_mode = mmc_is_mode_ddr(mode);
187 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
188 mmc->tran_speed / 1000000);
192 #if !CONFIG_IS_ENABLED(DM_MMC)
193 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
197 mmmc_trace_before_send(mmc, cmd);
198 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
199 mmmc_trace_after_send(mmc, cmd, ret);
205 int mmc_send_status(struct mmc *mmc, unsigned int *status)
208 int err, retries = 5;
210 cmd.cmdidx = MMC_CMD_SEND_STATUS;
211 cmd.resp_type = MMC_RSP_R1;
212 if (!mmc_host_is_spi(mmc))
213 cmd.cmdarg = mmc->rca << 16;
216 err = mmc_send_cmd(mmc, &cmd, NULL);
218 mmc_trace_state(mmc, &cmd);
219 *status = cmd.response[0];
223 mmc_trace_state(mmc, &cmd);
227 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
232 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
237 err = mmc_send_status(mmc, &status);
241 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
242 (status & MMC_STATUS_CURR_STATE) !=
246 if (status & MMC_STATUS_MASK) {
247 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 pr_err("Status Error: 0x%08x\n", status);
253 if (timeout_ms-- <= 0)
259 if (timeout_ms <= 0) {
260 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
261 pr_err("Timeout waiting card ready\n");
269 int mmc_set_blocklen(struct mmc *mmc, int len)
277 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
278 cmd.resp_type = MMC_RSP_R1;
281 err = mmc_send_cmd(mmc, &cmd, NULL);
283 #ifdef CONFIG_MMC_QUIRKS
284 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
287 * It has been seen that SET_BLOCKLEN may fail on the first
288 * attempt, let's try a few more time
291 err = mmc_send_cmd(mmc, &cmd, NULL);
301 #ifdef MMC_SUPPORTS_TUNING
302 static const u8 tuning_blk_pattern_4bit[] = {
303 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
304 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
305 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
306 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
307 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
308 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
309 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
310 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
313 static const u8 tuning_blk_pattern_8bit[] = {
314 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
315 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
316 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
317 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
318 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
319 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
320 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
321 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
322 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
323 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
324 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
325 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
326 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
327 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
328 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
329 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
332 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
335 struct mmc_data data;
336 const u8 *tuning_block_pattern;
339 if (mmc->bus_width == 8) {
340 tuning_block_pattern = tuning_blk_pattern_8bit;
341 size = sizeof(tuning_blk_pattern_8bit);
342 } else if (mmc->bus_width == 4) {
343 tuning_block_pattern = tuning_blk_pattern_4bit;
344 size = sizeof(tuning_blk_pattern_4bit);
349 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
353 cmd.resp_type = MMC_RSP_R1;
355 data.dest = (void *)data_buf;
357 data.blocksize = size;
358 data.flags = MMC_DATA_READ;
360 err = mmc_send_cmd(mmc, &cmd, &data);
364 if (memcmp(data_buf, tuning_block_pattern, size))
371 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
375 struct mmc_data data;
378 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
380 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
382 if (mmc->high_capacity)
385 cmd.cmdarg = start * mmc->read_bl_len;
387 cmd.resp_type = MMC_RSP_R1;
390 data.blocks = blkcnt;
391 data.blocksize = mmc->read_bl_len;
392 data.flags = MMC_DATA_READ;
394 if (mmc_send_cmd(mmc, &cmd, &data))
398 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
400 cmd.resp_type = MMC_RSP_R1b;
401 if (mmc_send_cmd(mmc, &cmd, NULL)) {
402 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
403 pr_err("mmc fail to send stop cmd\n");
412 #if !CONFIG_IS_ENABLED(DM_MMC)
413 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
415 if (mmc->cfg->ops->get_b_max)
416 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
418 return mmc->cfg->b_max;
422 #if CONFIG_IS_ENABLED(BLK)
423 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
425 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
429 #if CONFIG_IS_ENABLED(BLK)
430 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
432 int dev_num = block_dev->devnum;
434 lbaint_t cur, blocks_todo = blkcnt;
440 struct mmc *mmc = find_mmc_device(dev_num);
444 if (CONFIG_IS_ENABLED(MMC_TINY))
445 err = mmc_switch_part(mmc, block_dev->hwpart);
447 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
452 if ((start + blkcnt) > block_dev->lba) {
453 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
454 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
455 start + blkcnt, block_dev->lba);
460 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
461 pr_debug("%s: Failed to set blocklen\n", __func__);
465 b_max = mmc_get_b_max(mmc, dst, blkcnt);
468 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
469 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
470 pr_debug("%s: Failed to read blocks\n", __func__);
475 dst += cur * mmc->read_bl_len;
476 } while (blocks_todo > 0);
481 static int mmc_go_idle(struct mmc *mmc)
488 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
490 cmd.resp_type = MMC_RSP_NONE;
492 err = mmc_send_cmd(mmc, &cmd, NULL);
502 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
503 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
509 * Send CMD11 only if the request is to switch the card to
512 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
513 return mmc_set_signal_voltage(mmc, signal_voltage);
515 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
517 cmd.resp_type = MMC_RSP_R1;
519 err = mmc_send_cmd(mmc, &cmd, NULL);
523 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
527 * The card should drive cmd and dat[0:3] low immediately
528 * after the response of cmd11, but wait 100 us to be sure
530 err = mmc_wait_dat0(mmc, 0, 100);
537 * During a signal voltage level switch, the clock must be gated
538 * for 5 ms according to the SD spec
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
542 err = mmc_set_signal_voltage(mmc, signal_voltage);
546 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
548 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
551 * Failure to switch is indicated by the card holding
552 * dat[0:3] low. Wait for at least 1 ms according to spec
554 err = mmc_wait_dat0(mmc, 1, 1000);
564 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
571 cmd.cmdidx = MMC_CMD_APP_CMD;
572 cmd.resp_type = MMC_RSP_R1;
575 err = mmc_send_cmd(mmc, &cmd, NULL);
580 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
581 cmd.resp_type = MMC_RSP_R3;
584 * Most cards do not answer if some reserved bits
585 * in the ocr are set. However, Some controller
586 * can set bit 7 (reserved for low voltages), but
587 * how to manage low voltages SD card is not yet
590 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
591 (mmc->cfg->voltages & 0xff8000);
593 if (mmc->version == SD_VERSION_2)
594 cmd.cmdarg |= OCR_HCS;
597 cmd.cmdarg |= OCR_S18R;
599 err = mmc_send_cmd(mmc, &cmd, NULL);
604 if (cmd.response[0] & OCR_BUSY)
613 if (mmc->version != SD_VERSION_2)
614 mmc->version = SD_VERSION_1_0;
616 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
617 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
618 cmd.resp_type = MMC_RSP_R3;
621 err = mmc_send_cmd(mmc, &cmd, NULL);
627 mmc->ocr = cmd.response[0];
629 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
630 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
632 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
638 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
644 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
649 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
650 cmd.resp_type = MMC_RSP_R3;
652 if (use_arg && !mmc_host_is_spi(mmc))
653 cmd.cmdarg = OCR_HCS |
654 (mmc->cfg->voltages &
655 (mmc->ocr & OCR_VOLTAGE_MASK)) |
656 (mmc->ocr & OCR_ACCESS_MODE);
658 err = mmc_send_cmd(mmc, &cmd, NULL);
661 mmc->ocr = cmd.response[0];
665 static int mmc_send_op_cond(struct mmc *mmc)
669 /* Some cards seem to need this */
672 /* Asking to the card its capabilities */
673 for (i = 0; i < 2; i++) {
674 err = mmc_send_op_cond_iter(mmc, i != 0);
678 /* exit if not busy (flag seems to be inverted) */
679 if (mmc->ocr & OCR_BUSY)
682 mmc->op_cond_pending = 1;
686 static int mmc_complete_op_cond(struct mmc *mmc)
693 mmc->op_cond_pending = 0;
694 if (!(mmc->ocr & OCR_BUSY)) {
695 /* Some cards seem to need this */
698 start = get_timer(0);
700 err = mmc_send_op_cond_iter(mmc, 1);
703 if (mmc->ocr & OCR_BUSY)
705 if (get_timer(start) > timeout)
711 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
712 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
713 cmd.resp_type = MMC_RSP_R3;
716 err = mmc_send_cmd(mmc, &cmd, NULL);
721 mmc->ocr = cmd.response[0];
724 mmc->version = MMC_VERSION_UNKNOWN;
726 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
733 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
736 struct mmc_data data;
739 /* Get the Card Status Register */
740 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
741 cmd.resp_type = MMC_RSP_R1;
744 data.dest = (char *)ext_csd;
746 data.blocksize = MMC_MAX_BLOCK_LEN;
747 data.flags = MMC_DATA_READ;
749 err = mmc_send_cmd(mmc, &cmd, &data);
754 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
757 unsigned int status, start;
759 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
760 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
761 (index == EXT_CSD_PART_CONF);
765 if (mmc->gen_cmd6_time)
766 timeout_ms = mmc->gen_cmd6_time * 10;
768 if (is_part_switch && mmc->part_switch_time)
769 timeout_ms = mmc->part_switch_time * 10;
771 cmd.cmdidx = MMC_CMD_SWITCH;
772 cmd.resp_type = MMC_RSP_R1b;
773 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
778 ret = mmc_send_cmd(mmc, &cmd, NULL);
779 } while (ret && retries-- > 0);
784 start = get_timer(0);
786 /* poll dat0 for rdy/buys status */
787 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
788 if (ret && ret != -ENOSYS)
792 * In cases when not allowed to poll by using CMD13 or because we aren't
793 * capable of polling by using mmc_wait_dat0, then rely on waiting the
794 * stated timeout to be sufficient.
796 if (ret == -ENOSYS && !send_status)
799 /* Finally wait until the card is ready or indicates a failure
800 * to switch. It doesn't hurt to use CMD13 here even if send_status
801 * is false, because by now (after 'timeout_ms' ms) the bus should be
805 ret = mmc_send_status(mmc, &status);
807 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
808 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
812 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
815 } while (get_timer(start) < timeout_ms);
820 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
822 return __mmc_switch(mmc, set, index, value, true);
825 int mmc_boot_wp(struct mmc *mmc)
827 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
830 #if !CONFIG_IS_ENABLED(MMC_TINY)
831 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
837 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
843 speed_bits = EXT_CSD_TIMING_HS;
845 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
847 speed_bits = EXT_CSD_TIMING_HS200;
850 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
852 speed_bits = EXT_CSD_TIMING_HS400;
855 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
857 speed_bits = EXT_CSD_TIMING_HS400;
861 speed_bits = EXT_CSD_TIMING_LEGACY;
867 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
868 speed_bits, !hsdowngrade);
872 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
873 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
875 * In case the eMMC is in HS200/HS400 mode and we are downgrading
876 * to HS mode, the card clock are still running much faster than
877 * the supported HS mode clock, so we can not reliably read out
878 * Extended CSD. Reconfigure the controller to run at HS mode.
881 mmc_select_mode(mmc, MMC_HS);
882 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
886 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
887 /* Now check to see that it worked */
888 err = mmc_send_ext_csd(mmc, test_csd);
892 /* No high-speed support */
893 if (!test_csd[EXT_CSD_HS_TIMING])
900 static int mmc_get_capabilities(struct mmc *mmc)
902 u8 *ext_csd = mmc->ext_csd;
905 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
907 if (mmc_host_is_spi(mmc))
910 /* Only version 4 supports high-speed */
911 if (mmc->version < MMC_VERSION_4)
915 pr_err("No ext_csd found!\n"); /* this should enver happen */
919 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
921 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
922 mmc->cardtype = cardtype;
924 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
925 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
926 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
927 mmc->card_caps |= MMC_MODE_HS200;
930 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
931 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
932 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
933 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
934 mmc->card_caps |= MMC_MODE_HS400;
937 if (cardtype & EXT_CSD_CARD_TYPE_52) {
938 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
939 mmc->card_caps |= MMC_MODE_DDR_52MHz;
940 mmc->card_caps |= MMC_MODE_HS_52MHz;
942 if (cardtype & EXT_CSD_CARD_TYPE_26)
943 mmc->card_caps |= MMC_MODE_HS;
945 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
946 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
947 (mmc->card_caps & MMC_MODE_HS400)) {
948 mmc->card_caps |= MMC_MODE_HS400_ES;
956 static int mmc_set_capacity(struct mmc *mmc, int part_num)
960 mmc->capacity = mmc->capacity_user;
964 mmc->capacity = mmc->capacity_boot;
967 mmc->capacity = mmc->capacity_rpmb;
973 mmc->capacity = mmc->capacity_gp[part_num - 4];
979 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
984 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
990 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
992 (mmc->part_config & ~PART_ACCESS_MASK)
993 | (part_num & PART_ACCESS_MASK));
994 } while (ret && retry--);
997 * Set the capacity if the switch succeeded or was intended
998 * to return to representing the raw device.
1000 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1001 ret = mmc_set_capacity(mmc, part_num);
1002 mmc_get_blk_desc(mmc)->hwpart = part_num;
1008 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1009 int mmc_hwpart_config(struct mmc *mmc,
1010 const struct mmc_hwpart_conf *conf,
1011 enum mmc_hwpart_conf_mode mode)
1016 u32 gp_size_mult[4];
1017 u32 max_enh_size_mult;
1018 u32 tot_enh_size_mult = 0;
1021 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1023 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1026 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1027 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1028 return -EMEDIUMTYPE;
1031 if (!(mmc->part_support & PART_SUPPORT)) {
1032 pr_err("Card does not support partitioning\n");
1033 return -EMEDIUMTYPE;
1036 if (!mmc->hc_wp_grp_size) {
1037 pr_err("Card does not define HC WP group size\n");
1038 return -EMEDIUMTYPE;
1041 /* check partition alignment and total enhanced size */
1042 if (conf->user.enh_size) {
1043 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1044 conf->user.enh_start % mmc->hc_wp_grp_size) {
1045 pr_err("User data enhanced area not HC WP group "
1049 part_attrs |= EXT_CSD_ENH_USR;
1050 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1051 if (mmc->high_capacity) {
1052 enh_start_addr = conf->user.enh_start;
1054 enh_start_addr = (conf->user.enh_start << 9);
1060 tot_enh_size_mult += enh_size_mult;
1062 for (pidx = 0; pidx < 4; pidx++) {
1063 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1064 pr_err("GP%i partition not HC WP group size "
1065 "aligned\n", pidx+1);
1068 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1069 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1070 part_attrs |= EXT_CSD_ENH_GP(pidx);
1071 tot_enh_size_mult += gp_size_mult[pidx];
1075 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1076 pr_err("Card does not support enhanced attribute\n");
1077 return -EMEDIUMTYPE;
1080 err = mmc_send_ext_csd(mmc, ext_csd);
1085 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1086 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1087 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1088 if (tot_enh_size_mult > max_enh_size_mult) {
1089 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1090 tot_enh_size_mult, max_enh_size_mult);
1091 return -EMEDIUMTYPE;
1094 /* The default value of EXT_CSD_WR_REL_SET is device
1095 * dependent, the values can only be changed if the
1096 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1097 * changed only once and before partitioning is completed. */
1098 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1099 if (conf->user.wr_rel_change) {
1100 if (conf->user.wr_rel_set)
1101 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1103 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1105 for (pidx = 0; pidx < 4; pidx++) {
1106 if (conf->gp_part[pidx].wr_rel_change) {
1107 if (conf->gp_part[pidx].wr_rel_set)
1108 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1110 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1114 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1115 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1116 puts("Card does not support host controlled partition write "
1117 "reliability settings\n");
1118 return -EMEDIUMTYPE;
1121 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1122 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1123 pr_err("Card already partitioned\n");
1127 if (mode == MMC_HWPART_CONF_CHECK)
1130 /* Partitioning requires high-capacity size definitions */
1131 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1132 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1133 EXT_CSD_ERASE_GROUP_DEF, 1);
1138 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1140 #if CONFIG_IS_ENABLED(MMC_WRITE)
1141 /* update erase group size to be high-capacity */
1142 mmc->erase_grp_size =
1143 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1148 /* all OK, write the configuration */
1149 for (i = 0; i < 4; i++) {
1150 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1151 EXT_CSD_ENH_START_ADDR+i,
1152 (enh_start_addr >> (i*8)) & 0xFF);
1156 for (i = 0; i < 3; i++) {
1157 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1158 EXT_CSD_ENH_SIZE_MULT+i,
1159 (enh_size_mult >> (i*8)) & 0xFF);
1163 for (pidx = 0; pidx < 4; pidx++) {
1164 for (i = 0; i < 3; i++) {
1165 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1166 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1167 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1172 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1173 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1177 if (mode == MMC_HWPART_CONF_SET)
1180 /* The WR_REL_SET is a write-once register but shall be
1181 * written before setting PART_SETTING_COMPLETED. As it is
1182 * write-once we can only write it when completing the
1184 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1185 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1186 EXT_CSD_WR_REL_SET, wr_rel_set);
1191 /* Setting PART_SETTING_COMPLETED confirms the partition
1192 * configuration but it only becomes effective after power
1193 * cycle, so we do not adjust the partition related settings
1194 * in the mmc struct. */
1196 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1197 EXT_CSD_PARTITION_SETTING,
1198 EXT_CSD_PARTITION_SETTING_COMPLETED);
1206 #if !CONFIG_IS_ENABLED(DM_MMC)
1207 int mmc_getcd(struct mmc *mmc)
1211 cd = board_mmc_getcd(mmc);
1214 if (mmc->cfg->ops->getcd)
1215 cd = mmc->cfg->ops->getcd(mmc);
1224 #if !CONFIG_IS_ENABLED(MMC_TINY)
1225 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1228 struct mmc_data data;
1230 /* Switch the frequency */
1231 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1232 cmd.resp_type = MMC_RSP_R1;
1233 cmd.cmdarg = (mode << 31) | 0xffffff;
1234 cmd.cmdarg &= ~(0xf << (group * 4));
1235 cmd.cmdarg |= value << (group * 4);
1237 data.dest = (char *)resp;
1238 data.blocksize = 64;
1240 data.flags = MMC_DATA_READ;
1242 return mmc_send_cmd(mmc, &cmd, &data);
1245 static int sd_get_capabilities(struct mmc *mmc)
1249 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1250 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1251 struct mmc_data data;
1253 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1257 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1259 if (mmc_host_is_spi(mmc))
1262 /* Read the SCR to find out if this card supports higher speeds */
1263 cmd.cmdidx = MMC_CMD_APP_CMD;
1264 cmd.resp_type = MMC_RSP_R1;
1265 cmd.cmdarg = mmc->rca << 16;
1267 err = mmc_send_cmd(mmc, &cmd, NULL);
1272 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1273 cmd.resp_type = MMC_RSP_R1;
1279 data.dest = (char *)scr;
1282 data.flags = MMC_DATA_READ;
1284 err = mmc_send_cmd(mmc, &cmd, &data);
1293 mmc->scr[0] = __be32_to_cpu(scr[0]);
1294 mmc->scr[1] = __be32_to_cpu(scr[1]);
1296 switch ((mmc->scr[0] >> 24) & 0xf) {
1298 mmc->version = SD_VERSION_1_0;
1301 mmc->version = SD_VERSION_1_10;
1304 mmc->version = SD_VERSION_2;
1305 if ((mmc->scr[0] >> 15) & 0x1)
1306 mmc->version = SD_VERSION_3;
1309 mmc->version = SD_VERSION_1_0;
1313 if (mmc->scr[0] & SD_DATA_4BIT)
1314 mmc->card_caps |= MMC_MODE_4BIT;
1316 /* Version 1.0 doesn't support switching */
1317 if (mmc->version == SD_VERSION_1_0)
1322 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1323 (u8 *)switch_status);
1328 /* The high-speed function is busy. Try again */
1329 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1333 /* If high-speed isn't supported, we return */
1334 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1335 mmc->card_caps |= MMC_CAP(SD_HS);
1337 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1338 /* Version before 3.0 don't support UHS modes */
1339 if (mmc->version < SD_VERSION_3)
1342 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1343 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1344 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1345 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1346 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1347 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1348 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1349 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1350 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1351 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1352 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1358 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1362 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1365 /* SD version 1.00 and 1.01 does not support CMD 6 */
1366 if (mmc->version == SD_VERSION_1_0)
1371 speed = UHS_SDR12_BUS_SPEED;
1374 speed = HIGH_SPEED_BUS_SPEED;
1376 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1378 speed = UHS_SDR12_BUS_SPEED;
1381 speed = UHS_SDR25_BUS_SPEED;
1384 speed = UHS_SDR50_BUS_SPEED;
1387 speed = UHS_DDR50_BUS_SPEED;
1390 speed = UHS_SDR104_BUS_SPEED;
1397 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1401 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1407 static int sd_select_bus_width(struct mmc *mmc, int w)
1412 if ((w != 4) && (w != 1))
1415 cmd.cmdidx = MMC_CMD_APP_CMD;
1416 cmd.resp_type = MMC_RSP_R1;
1417 cmd.cmdarg = mmc->rca << 16;
1419 err = mmc_send_cmd(mmc, &cmd, NULL);
1423 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1424 cmd.resp_type = MMC_RSP_R1;
1429 err = mmc_send_cmd(mmc, &cmd, NULL);
1437 #if CONFIG_IS_ENABLED(MMC_WRITE)
1438 static int sd_read_ssr(struct mmc *mmc)
1440 static const unsigned int sd_au_size[] = {
1441 0, SZ_16K / 512, SZ_32K / 512,
1442 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1443 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1444 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1445 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1450 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1451 struct mmc_data data;
1453 unsigned int au, eo, et, es;
1455 cmd.cmdidx = MMC_CMD_APP_CMD;
1456 cmd.resp_type = MMC_RSP_R1;
1457 cmd.cmdarg = mmc->rca << 16;
1459 err = mmc_send_cmd(mmc, &cmd, NULL);
1460 #ifdef CONFIG_MMC_QUIRKS
1461 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1464 * It has been seen that APP_CMD may fail on the first
1465 * attempt, let's try a few more times
1468 err = mmc_send_cmd(mmc, &cmd, NULL);
1471 } while (retries--);
1477 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1478 cmd.resp_type = MMC_RSP_R1;
1482 data.dest = (char *)ssr;
1483 data.blocksize = 64;
1485 data.flags = MMC_DATA_READ;
1487 err = mmc_send_cmd(mmc, &cmd, &data);
1495 for (i = 0; i < 16; i++)
1496 ssr[i] = be32_to_cpu(ssr[i]);
1498 au = (ssr[2] >> 12) & 0xF;
1499 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1500 mmc->ssr.au = sd_au_size[au];
1501 es = (ssr[3] >> 24) & 0xFF;
1502 es |= (ssr[2] & 0xFF) << 8;
1503 et = (ssr[3] >> 18) & 0x3F;
1505 eo = (ssr[3] >> 16) & 0x3;
1506 mmc->ssr.erase_timeout = (et * 1000) / es;
1507 mmc->ssr.erase_offset = eo * 1000;
1510 pr_debug("Invalid Allocation Unit Size.\n");
1516 /* frequency bases */
1517 /* divided by 10 to be nice to platforms without floating point */
1518 static const int fbase[] = {
1525 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1526 * to platforms without floating point.
1528 static const u8 multipliers[] = {
1547 static inline int bus_width(uint cap)
1549 if (cap == MMC_MODE_8BIT)
1551 if (cap == MMC_MODE_4BIT)
1553 if (cap == MMC_MODE_1BIT)
1555 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1559 #if !CONFIG_IS_ENABLED(DM_MMC)
1560 #ifdef MMC_SUPPORTS_TUNING
1561 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1567 static int mmc_set_ios(struct mmc *mmc)
1571 if (mmc->cfg->ops->set_ios)
1572 ret = mmc->cfg->ops->set_ios(mmc);
1577 static int mmc_host_power_cycle(struct mmc *mmc)
1581 if (mmc->cfg->ops->host_power_cycle)
1582 ret = mmc->cfg->ops->host_power_cycle(mmc);
1588 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1591 if (clock > mmc->cfg->f_max)
1592 clock = mmc->cfg->f_max;
1594 if (clock < mmc->cfg->f_min)
1595 clock = mmc->cfg->f_min;
1599 mmc->clk_disable = disable;
1601 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1603 return mmc_set_ios(mmc);
1606 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1608 mmc->bus_width = width;
1610 return mmc_set_ios(mmc);
1613 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1615 * helper function to display the capabilities in a human
1616 * friendly manner. The capabilities include bus width and
1619 void mmc_dump_capabilities(const char *text, uint caps)
1623 pr_debug("%s: widths [", text);
1624 if (caps & MMC_MODE_8BIT)
1626 if (caps & MMC_MODE_4BIT)
1628 if (caps & MMC_MODE_1BIT)
1630 pr_debug("\b\b] modes [");
1631 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1632 if (MMC_CAP(mode) & caps)
1633 pr_debug("%s, ", mmc_mode_name(mode));
1634 pr_debug("\b\b]\n");
1638 struct mode_width_tuning {
1641 #ifdef MMC_SUPPORTS_TUNING
1646 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1647 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1650 case MMC_SIGNAL_VOLTAGE_000: return 0;
1651 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1652 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1653 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1658 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1662 if (mmc->signal_voltage == signal_voltage)
1665 mmc->signal_voltage = signal_voltage;
1666 err = mmc_set_ios(mmc);
1668 pr_debug("unable to set voltage (err %d)\n", err);
1673 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1679 #if !CONFIG_IS_ENABLED(MMC_TINY)
1680 static const struct mode_width_tuning sd_modes_by_pref[] = {
1681 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1682 #ifdef MMC_SUPPORTS_TUNING
1685 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1686 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1691 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1695 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1699 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1704 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1709 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1714 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1718 #define for_each_sd_mode_by_pref(caps, mwt) \
1719 for (mwt = sd_modes_by_pref;\
1720 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1722 if (caps & MMC_CAP(mwt->mode))
1724 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1727 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1728 const struct mode_width_tuning *mwt;
1729 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1730 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1732 bool uhs_en = false;
1737 mmc_dump_capabilities("sd card", card_caps);
1738 mmc_dump_capabilities("host", mmc->host_caps);
1741 if (mmc_host_is_spi(mmc)) {
1742 mmc_set_bus_width(mmc, 1);
1743 mmc_select_mode(mmc, MMC_LEGACY);
1744 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1748 /* Restrict card's capabilities by what the host can do */
1749 caps = card_caps & mmc->host_caps;
1754 for_each_sd_mode_by_pref(caps, mwt) {
1757 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1758 if (*w & caps & mwt->widths) {
1759 pr_debug("trying mode %s width %d (at %d MHz)\n",
1760 mmc_mode_name(mwt->mode),
1762 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1764 /* configure the bus width (card + host) */
1765 err = sd_select_bus_width(mmc, bus_width(*w));
1768 mmc_set_bus_width(mmc, bus_width(*w));
1770 /* configure the bus mode (card) */
1771 err = sd_set_card_speed(mmc, mwt->mode);
1775 /* configure the bus mode (host) */
1776 mmc_select_mode(mmc, mwt->mode);
1777 mmc_set_clock(mmc, mmc->tran_speed,
1780 #ifdef MMC_SUPPORTS_TUNING
1781 /* execute tuning if needed */
1782 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1783 err = mmc_execute_tuning(mmc,
1786 pr_debug("tuning failed\n");
1792 #if CONFIG_IS_ENABLED(MMC_WRITE)
1793 err = sd_read_ssr(mmc);
1795 pr_warn("unable to read ssr\n");
1801 /* revert to a safer bus speed */
1802 mmc_select_mode(mmc, MMC_LEGACY);
1803 mmc_set_clock(mmc, mmc->tran_speed,
1809 pr_err("unable to select a mode\n");
1814 * read the compare the part of ext csd that is constant.
1815 * This can be used to check that the transfer is working
1818 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1821 const u8 *ext_csd = mmc->ext_csd;
1822 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1824 if (mmc->version < MMC_VERSION_4)
1827 err = mmc_send_ext_csd(mmc, test_csd);
1831 /* Only compare read only fields */
1832 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1833 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1834 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1835 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1836 ext_csd[EXT_CSD_REV]
1837 == test_csd[EXT_CSD_REV] &&
1838 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1839 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1840 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1841 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1847 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1848 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1849 uint32_t allowed_mask)
1857 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1858 EXT_CSD_CARD_TYPE_HS400_1_8V))
1859 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1860 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1861 EXT_CSD_CARD_TYPE_HS400_1_2V))
1862 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1865 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1866 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1867 MMC_SIGNAL_VOLTAGE_180;
1868 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1869 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1872 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1876 while (card_mask & allowed_mask) {
1877 enum mmc_voltage best_match;
1879 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1880 if (!mmc_set_signal_voltage(mmc, best_match))
1883 allowed_mask &= ~best_match;
1889 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1890 uint32_t allowed_mask)
1896 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1897 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1899 .mode = MMC_HS_400_ES,
1900 .widths = MMC_MODE_8BIT,
1903 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1906 .widths = MMC_MODE_8BIT,
1907 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1910 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1913 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1914 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1919 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1923 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1927 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1931 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1935 #define for_each_mmc_mode_by_pref(caps, mwt) \
1936 for (mwt = mmc_modes_by_pref;\
1937 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1939 if (caps & MMC_CAP(mwt->mode))
1941 static const struct ext_csd_bus_width {
1945 } ext_csd_bus_width[] = {
1946 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1947 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1948 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1949 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1950 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1953 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1954 static int mmc_select_hs400(struct mmc *mmc)
1958 /* Set timing to HS200 for tuning */
1959 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1963 /* configure the bus mode (host) */
1964 mmc_select_mode(mmc, MMC_HS_200);
1965 mmc_set_clock(mmc, mmc->tran_speed, false);
1967 /* execute tuning if needed */
1968 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1970 debug("tuning failed\n");
1974 /* Set back to HS */
1975 mmc_set_card_speed(mmc, MMC_HS, true);
1977 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1978 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1982 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1986 mmc_select_mode(mmc, MMC_HS_400);
1987 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1994 static int mmc_select_hs400(struct mmc *mmc)
2000 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2001 #if !CONFIG_IS_ENABLED(DM_MMC)
2002 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2007 static int mmc_select_hs400es(struct mmc *mmc)
2011 err = mmc_set_card_speed(mmc, MMC_HS, true);
2015 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2016 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2017 EXT_CSD_BUS_WIDTH_STROBE);
2019 printf("switch to bus width for hs400 failed\n");
2022 /* TODO: driver strength */
2023 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2027 mmc_select_mode(mmc, MMC_HS_400_ES);
2028 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2032 return mmc_set_enhanced_strobe(mmc);
2035 static int mmc_select_hs400es(struct mmc *mmc)
2041 #define for_each_supported_width(caps, ddr, ecbv) \
2042 for (ecbv = ext_csd_bus_width;\
2043 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2045 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2047 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2050 const struct mode_width_tuning *mwt;
2051 const struct ext_csd_bus_width *ecbw;
2054 mmc_dump_capabilities("mmc", card_caps);
2055 mmc_dump_capabilities("host", mmc->host_caps);
2058 if (mmc_host_is_spi(mmc)) {
2059 mmc_set_bus_width(mmc, 1);
2060 mmc_select_mode(mmc, MMC_LEGACY);
2061 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2065 /* Restrict card's capabilities by what the host can do */
2066 card_caps &= mmc->host_caps;
2068 /* Only version 4 of MMC supports wider bus widths */
2069 if (mmc->version < MMC_VERSION_4)
2072 if (!mmc->ext_csd) {
2073 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2077 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2078 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2080 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2081 * before doing anything else, since a transition from either of
2082 * the HS200/HS400 mode directly to legacy mode is not supported.
2084 if (mmc->selected_mode == MMC_HS_200 ||
2085 mmc->selected_mode == MMC_HS_400)
2086 mmc_set_card_speed(mmc, MMC_HS, true);
2089 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2091 for_each_mmc_mode_by_pref(card_caps, mwt) {
2092 for_each_supported_width(card_caps & mwt->widths,
2093 mmc_is_mode_ddr(mwt->mode), ecbw) {
2094 enum mmc_voltage old_voltage;
2095 pr_debug("trying mode %s width %d (at %d MHz)\n",
2096 mmc_mode_name(mwt->mode),
2097 bus_width(ecbw->cap),
2098 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2099 old_voltage = mmc->signal_voltage;
2100 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2101 MMC_ALL_SIGNAL_VOLTAGE);
2105 /* configure the bus width (card + host) */
2106 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2108 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2111 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2113 if (mwt->mode == MMC_HS_400) {
2114 err = mmc_select_hs400(mmc);
2116 printf("Select HS400 failed %d\n", err);
2119 } else if (mwt->mode == MMC_HS_400_ES) {
2120 err = mmc_select_hs400es(mmc);
2122 printf("Select HS400ES failed %d\n",
2127 /* configure the bus speed (card) */
2128 err = mmc_set_card_speed(mmc, mwt->mode, false);
2133 * configure the bus width AND the ddr mode
2134 * (card). The host side will be taken care
2135 * of in the next step
2137 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2138 err = mmc_switch(mmc,
2139 EXT_CSD_CMD_SET_NORMAL,
2141 ecbw->ext_csd_bits);
2146 /* configure the bus mode (host) */
2147 mmc_select_mode(mmc, mwt->mode);
2148 mmc_set_clock(mmc, mmc->tran_speed,
2150 #ifdef MMC_SUPPORTS_TUNING
2152 /* execute tuning if needed */
2154 err = mmc_execute_tuning(mmc,
2157 pr_debug("tuning failed\n");
2164 /* do a transfer to check the configuration */
2165 err = mmc_read_and_compare_ext_csd(mmc);
2169 mmc_set_signal_voltage(mmc, old_voltage);
2170 /* if an error occured, revert to a safer bus mode */
2171 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2172 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2173 mmc_select_mode(mmc, MMC_LEGACY);
2174 mmc_set_bus_width(mmc, 1);
2178 pr_err("unable to select a mode\n");
2184 #if CONFIG_IS_ENABLED(MMC_TINY)
2185 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2188 static int mmc_startup_v4(struct mmc *mmc)
2192 bool has_parts = false;
2193 bool part_completed;
2194 static const u32 mmc_versions[] = {
2206 #if CONFIG_IS_ENABLED(MMC_TINY)
2207 u8 *ext_csd = ext_csd_bkup;
2209 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2213 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2215 err = mmc_send_ext_csd(mmc, ext_csd);
2219 /* store the ext csd for future reference */
2221 mmc->ext_csd = ext_csd;
2223 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2225 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2228 /* check ext_csd version and capacity */
2229 err = mmc_send_ext_csd(mmc, ext_csd);
2233 /* store the ext csd for future reference */
2235 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2238 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2240 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2243 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2245 if (mmc->version >= MMC_VERSION_4_2) {
2247 * According to the JEDEC Standard, the value of
2248 * ext_csd's capacity is valid if the value is more
2251 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2252 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2253 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2254 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2255 capacity *= MMC_MAX_BLOCK_LEN;
2256 if ((capacity >> 20) > 2 * 1024)
2257 mmc->capacity_user = capacity;
2260 if (mmc->version >= MMC_VERSION_4_5)
2261 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2263 /* The partition data may be non-zero but it is only
2264 * effective if PARTITION_SETTING_COMPLETED is set in
2265 * EXT_CSD, so ignore any data if this bit is not set,
2266 * except for enabling the high-capacity group size
2267 * definition (see below).
2269 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2270 EXT_CSD_PARTITION_SETTING_COMPLETED);
2272 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2273 /* Some eMMC set the value too low so set a minimum */
2274 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2275 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2277 /* store the partition info of emmc */
2278 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2279 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2280 ext_csd[EXT_CSD_BOOT_MULT])
2281 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2282 if (part_completed &&
2283 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2284 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2286 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2288 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2290 for (i = 0; i < 4; i++) {
2291 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2292 uint mult = (ext_csd[idx + 2] << 16) +
2293 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2296 if (!part_completed)
2298 mmc->capacity_gp[i] = mult;
2299 mmc->capacity_gp[i] *=
2300 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2301 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2302 mmc->capacity_gp[i] <<= 19;
2305 #ifndef CONFIG_SPL_BUILD
2306 if (part_completed) {
2307 mmc->enh_user_size =
2308 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2309 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2310 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2311 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2312 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2313 mmc->enh_user_size <<= 19;
2314 mmc->enh_user_start =
2315 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2316 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2317 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2318 ext_csd[EXT_CSD_ENH_START_ADDR];
2319 if (mmc->high_capacity)
2320 mmc->enh_user_start <<= 9;
2325 * Host needs to enable ERASE_GRP_DEF bit if device is
2326 * partitioned. This bit will be lost every time after a reset
2327 * or power off. This will affect erase size.
2331 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2332 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2335 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2336 EXT_CSD_ERASE_GROUP_DEF, 1);
2341 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2344 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2345 #if CONFIG_IS_ENABLED(MMC_WRITE)
2346 /* Read out group size from ext_csd */
2347 mmc->erase_grp_size =
2348 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2351 * if high capacity and partition setting completed
2352 * SEC_COUNT is valid even if it is smaller than 2 GiB
2353 * JEDEC Standard JESD84-B45, 6.2.4
2355 if (mmc->high_capacity && part_completed) {
2356 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2357 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2358 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2359 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2360 capacity *= MMC_MAX_BLOCK_LEN;
2361 mmc->capacity_user = capacity;
2364 #if CONFIG_IS_ENABLED(MMC_WRITE)
2366 /* Calculate the group size from the csd value. */
2367 int erase_gsz, erase_gmul;
2369 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2370 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2371 mmc->erase_grp_size = (erase_gsz + 1)
2375 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2376 mmc->hc_wp_grp_size = 1024
2377 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2378 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2381 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2386 #if !CONFIG_IS_ENABLED(MMC_TINY)
2389 mmc->ext_csd = NULL;
2394 static int mmc_startup(struct mmc *mmc)
2400 struct blk_desc *bdesc;
2402 #ifdef CONFIG_MMC_SPI_CRC_ON
2403 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2404 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2405 cmd.resp_type = MMC_RSP_R1;
2407 err = mmc_send_cmd(mmc, &cmd, NULL);
2413 /* Put the Card in Identify Mode */
2414 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2415 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2416 cmd.resp_type = MMC_RSP_R2;
2419 err = mmc_send_cmd(mmc, &cmd, NULL);
2421 #ifdef CONFIG_MMC_QUIRKS
2422 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2425 * It has been seen that SEND_CID may fail on the first
2426 * attempt, let's try a few more time
2429 err = mmc_send_cmd(mmc, &cmd, NULL);
2432 } while (retries--);
2439 memcpy(mmc->cid, cmd.response, 16);
2442 * For MMC cards, set the Relative Address.
2443 * For SD cards, get the Relatvie Address.
2444 * This also puts the cards into Standby State
2446 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2447 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2448 cmd.cmdarg = mmc->rca << 16;
2449 cmd.resp_type = MMC_RSP_R6;
2451 err = mmc_send_cmd(mmc, &cmd, NULL);
2457 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2460 /* Get the Card-Specific Data */
2461 cmd.cmdidx = MMC_CMD_SEND_CSD;
2462 cmd.resp_type = MMC_RSP_R2;
2463 cmd.cmdarg = mmc->rca << 16;
2465 err = mmc_send_cmd(mmc, &cmd, NULL);
2470 mmc->csd[0] = cmd.response[0];
2471 mmc->csd[1] = cmd.response[1];
2472 mmc->csd[2] = cmd.response[2];
2473 mmc->csd[3] = cmd.response[3];
2475 if (mmc->version == MMC_VERSION_UNKNOWN) {
2476 int version = (cmd.response[0] >> 26) & 0xf;
2480 mmc->version = MMC_VERSION_1_2;
2483 mmc->version = MMC_VERSION_1_4;
2486 mmc->version = MMC_VERSION_2_2;
2489 mmc->version = MMC_VERSION_3;
2492 mmc->version = MMC_VERSION_4;
2495 mmc->version = MMC_VERSION_1_2;
2500 /* divide frequency by 10, since the mults are 10x bigger */
2501 freq = fbase[(cmd.response[0] & 0x7)];
2502 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2504 mmc->legacy_speed = freq * mult;
2505 mmc_select_mode(mmc, MMC_LEGACY);
2507 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2508 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2509 #if CONFIG_IS_ENABLED(MMC_WRITE)
2512 mmc->write_bl_len = mmc->read_bl_len;
2514 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2517 if (mmc->high_capacity) {
2518 csize = (mmc->csd[1] & 0x3f) << 16
2519 | (mmc->csd[2] & 0xffff0000) >> 16;
2522 csize = (mmc->csd[1] & 0x3ff) << 2
2523 | (mmc->csd[2] & 0xc0000000) >> 30;
2524 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2527 mmc->capacity_user = (csize + 1) << (cmult + 2);
2528 mmc->capacity_user *= mmc->read_bl_len;
2529 mmc->capacity_boot = 0;
2530 mmc->capacity_rpmb = 0;
2531 for (i = 0; i < 4; i++)
2532 mmc->capacity_gp[i] = 0;
2534 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2535 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2537 #if CONFIG_IS_ENABLED(MMC_WRITE)
2538 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2539 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2542 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2543 cmd.cmdidx = MMC_CMD_SET_DSR;
2544 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2545 cmd.resp_type = MMC_RSP_NONE;
2546 if (mmc_send_cmd(mmc, &cmd, NULL))
2547 pr_warn("MMC: SET_DSR failed\n");
2550 /* Select the card, and put it into Transfer Mode */
2551 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2552 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2553 cmd.resp_type = MMC_RSP_R1;
2554 cmd.cmdarg = mmc->rca << 16;
2555 err = mmc_send_cmd(mmc, &cmd, NULL);
2562 * For SD, its erase group is always one sector
2564 #if CONFIG_IS_ENABLED(MMC_WRITE)
2565 mmc->erase_grp_size = 1;
2567 mmc->part_config = MMCPART_NOAVAILABLE;
2569 err = mmc_startup_v4(mmc);
2573 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2577 #if CONFIG_IS_ENABLED(MMC_TINY)
2578 mmc_set_clock(mmc, mmc->legacy_speed, false);
2579 mmc_select_mode(mmc, MMC_LEGACY);
2580 mmc_set_bus_width(mmc, 1);
2583 err = sd_get_capabilities(mmc);
2586 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2588 err = mmc_get_capabilities(mmc);
2591 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2597 mmc->best_mode = mmc->selected_mode;
2599 /* Fix the block length for DDR mode */
2600 if (mmc->ddr_mode) {
2601 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2602 #if CONFIG_IS_ENABLED(MMC_WRITE)
2603 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2607 /* fill in device description */
2608 bdesc = mmc_get_blk_desc(mmc);
2612 bdesc->blksz = mmc->read_bl_len;
2613 bdesc->log2blksz = LOG2(bdesc->blksz);
2614 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2615 #if !defined(CONFIG_SPL_BUILD) || \
2616 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2617 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2618 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2619 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2620 (mmc->cid[3] >> 16) & 0xffff);
2621 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2622 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2623 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2624 (mmc->cid[2] >> 24) & 0xff);
2625 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2626 (mmc->cid[2] >> 16) & 0xf);
2628 bdesc->vendor[0] = 0;
2629 bdesc->product[0] = 0;
2630 bdesc->revision[0] = 0;
2633 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2640 static int mmc_send_if_cond(struct mmc *mmc)
2645 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2646 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2647 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2648 cmd.resp_type = MMC_RSP_R7;
2650 err = mmc_send_cmd(mmc, &cmd, NULL);
2655 if ((cmd.response[0] & 0xff) != 0xaa)
2658 mmc->version = SD_VERSION_2;
2663 #if !CONFIG_IS_ENABLED(DM_MMC)
2664 /* board-specific MMC power initializations. */
2665 __weak void board_mmc_power_init(void)
2670 static int mmc_power_init(struct mmc *mmc)
2672 #if CONFIG_IS_ENABLED(DM_MMC)
2673 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2676 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2679 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2681 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2682 &mmc->vqmmc_supply);
2684 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2686 #else /* !CONFIG_DM_MMC */
2688 * Driver model should use a regulator, as above, rather than calling
2689 * out to board code.
2691 board_mmc_power_init();
2697 * put the host in the initial state:
2698 * - turn on Vdd (card power supply)
2699 * - configure the bus width and clock to minimal values
2701 static void mmc_set_initial_state(struct mmc *mmc)
2705 /* First try to set 3.3V. If it fails set to 1.8V */
2706 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2708 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2710 pr_warn("mmc: failed to set signal voltage\n");
2712 mmc_select_mode(mmc, MMC_LEGACY);
2713 mmc_set_bus_width(mmc, 1);
2714 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2717 static int mmc_power_on(struct mmc *mmc)
2719 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2720 if (mmc->vmmc_supply) {
2721 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2724 puts("Error enabling VMMC supply\n");
2732 static int mmc_power_off(struct mmc *mmc)
2734 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2735 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2736 if (mmc->vmmc_supply) {
2737 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2740 pr_debug("Error disabling VMMC supply\n");
2748 static int mmc_power_cycle(struct mmc *mmc)
2752 ret = mmc_power_off(mmc);
2756 ret = mmc_host_power_cycle(mmc);
2761 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2762 * to be on the safer side.
2765 return mmc_power_on(mmc);
2768 int mmc_get_op_cond(struct mmc *mmc)
2770 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2776 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2777 mmc_adapter_card_type_ident();
2779 err = mmc_power_init(mmc);
2783 #ifdef CONFIG_MMC_QUIRKS
2784 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2785 MMC_QUIRK_RETRY_SEND_CID |
2786 MMC_QUIRK_RETRY_APP_CMD;
2789 err = mmc_power_cycle(mmc);
2792 * if power cycling is not supported, we should not try
2793 * to use the UHS modes, because we wouldn't be able to
2794 * recover from an error during the UHS initialization.
2796 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2798 mmc->host_caps &= ~UHS_CAPS;
2799 err = mmc_power_on(mmc);
2804 #if CONFIG_IS_ENABLED(DM_MMC)
2805 /* The device has already been probed ready for use */
2807 /* made sure it's not NULL earlier */
2808 err = mmc->cfg->ops->init(mmc);
2815 mmc_set_initial_state(mmc);
2817 /* Reset the Card */
2818 err = mmc_go_idle(mmc);
2823 /* The internal partition reset to user partition(0) at every CMD0*/
2824 mmc_get_blk_desc(mmc)->hwpart = 0;
2826 /* Test for SD version 2 */
2827 err = mmc_send_if_cond(mmc);
2829 /* Now try to get the SD card's operating condition */
2830 err = sd_send_op_cond(mmc, uhs_en);
2831 if (err && uhs_en) {
2833 mmc_power_cycle(mmc);
2837 /* If the command timed out, we check for an MMC card */
2838 if (err == -ETIMEDOUT) {
2839 err = mmc_send_op_cond(mmc);
2842 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2843 pr_err("Card did not respond to voltage select!\n");
2852 int mmc_start_init(struct mmc *mmc)
2858 * all hosts are capable of 1 bit bus-width and able to use the legacy
2861 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2862 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2863 #if CONFIG_IS_ENABLED(DM_MMC)
2864 mmc_deferred_probe(mmc);
2866 #if !defined(CONFIG_MMC_BROKEN_CD)
2867 no_card = mmc_getcd(mmc) == 0;
2871 #if !CONFIG_IS_ENABLED(DM_MMC)
2872 /* we pretend there's no card when init is NULL */
2873 no_card = no_card || (mmc->cfg->ops->init == NULL);
2877 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2878 pr_err("MMC: no card present\n");
2883 err = mmc_get_op_cond(mmc);
2886 mmc->init_in_progress = 1;
2891 static int mmc_complete_init(struct mmc *mmc)
2895 mmc->init_in_progress = 0;
2896 if (mmc->op_cond_pending)
2897 err = mmc_complete_op_cond(mmc);
2900 err = mmc_startup(mmc);
2908 int mmc_init(struct mmc *mmc)
2911 __maybe_unused ulong start;
2912 #if CONFIG_IS_ENABLED(DM_MMC)
2913 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2920 start = get_timer(0);
2922 if (!mmc->init_in_progress)
2923 err = mmc_start_init(mmc);
2926 err = mmc_complete_init(mmc);
2928 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2933 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2934 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2935 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2936 int mmc_deinit(struct mmc *mmc)
2944 caps_filtered = mmc->card_caps &
2945 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2946 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2947 MMC_CAP(UHS_SDR104));
2949 return sd_select_mode_and_width(mmc, caps_filtered);
2951 caps_filtered = mmc->card_caps &
2952 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2954 return mmc_select_mode_and_width(mmc, caps_filtered);
2959 int mmc_set_dsr(struct mmc *mmc, u16 val)
2965 /* CPU-specific MMC initializations */
2966 __weak int cpu_mmc_init(bd_t *bis)
2971 /* board-specific MMC initializations. */
2972 __weak int board_mmc_init(bd_t *bis)
2977 void mmc_set_preinit(struct mmc *mmc, int preinit)
2979 mmc->preinit = preinit;
2982 #if CONFIG_IS_ENABLED(DM_MMC)
2983 static int mmc_probe(bd_t *bis)
2987 struct udevice *dev;
2989 ret = uclass_get(UCLASS_MMC, &uc);
2994 * Try to add them in sequence order. Really with driver model we
2995 * should allow holes, but the current MMC list does not allow that.
2996 * So if we request 0, 1, 3 we will get 0, 1, 2.
2998 for (i = 0; ; i++) {
2999 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3003 uclass_foreach_dev(dev, uc) {
3004 ret = device_probe(dev);
3006 pr_err("%s - probe failed: %d\n", dev->name, ret);
3012 static int mmc_probe(bd_t *bis)
3014 if (board_mmc_init(bis) < 0)
3021 int mmc_initialize(bd_t *bis)
3023 static int initialized = 0;
3025 if (initialized) /* Avoid initializing mmc multiple times */
3029 #if !CONFIG_IS_ENABLED(BLK)
3030 #if !CONFIG_IS_ENABLED(MMC_TINY)
3034 ret = mmc_probe(bis);
3038 #ifndef CONFIG_SPL_BUILD
3039 print_mmc_devices(',');
3046 #if CONFIG_IS_ENABLED(DM_MMC)
3047 int mmc_init_device(int num)
3049 struct udevice *dev;
3053 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3057 m = mmc_get_mmc_dev(dev);
3060 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3061 mmc_set_preinit(m, 1);
3070 #ifdef CONFIG_CMD_BKOPS_ENABLE
3071 int mmc_set_bkops_enable(struct mmc *mmc)
3074 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3076 err = mmc_send_ext_csd(mmc, ext_csd);
3078 puts("Could not get ext_csd register values\n");
3082 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3083 puts("Background operations not supported on device\n");
3084 return -EMEDIUMTYPE;
3087 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3088 puts("Background operations already enabled\n");
3092 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3094 puts("Failed to enable manual background operations\n");
3098 puts("Enabled manual background operations\n");