1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
7 * Based vaguely on the Linux code
16 #include <dm/device-internal.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
25 #include <linux/list.h>
27 #include "mmc_private.h"
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33 #if !CONFIG_IS_ENABLED(DM_MMC)
35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
40 __weak int board_mmc_getwp(struct mmc *mmc)
45 int mmc_getwp(struct mmc *mmc)
49 wp = board_mmc_getwp(mmc);
52 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
61 __weak int board_mmc_getcd(struct mmc *mmc)
67 #ifdef CONFIG_MMC_TRACE
68 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
70 printf("CMD_SEND:%d\n", cmd->cmdidx);
71 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
74 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
80 printf("\t\tRET\t\t\t %d\n", ret);
82 switch (cmd->resp_type) {
84 printf("\t\tMMC_RSP_NONE\n");
87 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
91 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
95 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
97 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t \t\t 0x%08x \n",
101 printf("\t\t \t\t 0x%08x \n",
104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
110 for (j = 0; j < 4; j++)
111 printf("%02x ", *ptr--);
116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
120 printf("\t\tERROR MMC rsp not supported\n");
126 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
135 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136 const char *mmc_mode_name(enum bus_mode mode)
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
161 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
163 static const int freqs[] = {
164 [MMC_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
176 [MMC_HS_400_ES] = 200000000,
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
187 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
189 mmc->selected_mode = mode;
190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
197 #if !CONFIG_IS_ENABLED(DM_MMC)
198 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
210 int mmc_send_status(struct mmc *mmc, unsigned int *status)
213 int err, retries = 5;
215 cmd.cmdidx = MMC_CMD_SEND_STATUS;
216 cmd.resp_type = MMC_RSP_R1;
217 if (!mmc_host_is_spi(mmc))
218 cmd.cmdarg = mmc->rca << 16;
221 err = mmc_send_cmd(mmc, &cmd, NULL);
223 mmc_trace_state(mmc, &cmd);
224 *status = cmd.response[0];
228 mmc_trace_state(mmc, &cmd);
232 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
237 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
242 err = mmc_send_status(mmc, &status);
246 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
247 (status & MMC_STATUS_CURR_STATE) !=
251 if (status & MMC_STATUS_MASK) {
252 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
253 pr_err("Status Error: 0x%08x\n", status);
258 if (timeout_ms-- <= 0)
264 if (timeout_ms <= 0) {
265 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
266 pr_err("Timeout waiting card ready\n");
274 int mmc_set_blocklen(struct mmc *mmc, int len)
282 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
283 cmd.resp_type = MMC_RSP_R1;
286 err = mmc_send_cmd(mmc, &cmd, NULL);
288 #ifdef CONFIG_MMC_QUIRKS
289 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
292 * It has been seen that SET_BLOCKLEN may fail on the first
293 * attempt, let's try a few more time
296 err = mmc_send_cmd(mmc, &cmd, NULL);
306 #ifdef MMC_SUPPORTS_TUNING
307 static const u8 tuning_blk_pattern_4bit[] = {
308 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
309 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
310 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
311 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
312 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
313 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
314 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
315 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
318 static const u8 tuning_blk_pattern_8bit[] = {
319 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
320 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
321 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
322 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
323 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
324 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
325 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
326 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
327 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
328 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
329 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
330 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
331 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
332 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
333 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
334 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
337 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
340 struct mmc_data data;
341 const u8 *tuning_block_pattern;
344 if (mmc->bus_width == 8) {
345 tuning_block_pattern = tuning_blk_pattern_8bit;
346 size = sizeof(tuning_blk_pattern_8bit);
347 } else if (mmc->bus_width == 4) {
348 tuning_block_pattern = tuning_blk_pattern_4bit;
349 size = sizeof(tuning_blk_pattern_4bit);
354 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
358 cmd.resp_type = MMC_RSP_R1;
360 data.dest = (void *)data_buf;
362 data.blocksize = size;
363 data.flags = MMC_DATA_READ;
365 err = mmc_send_cmd(mmc, &cmd, &data);
369 if (memcmp(data_buf, tuning_block_pattern, size))
376 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
380 struct mmc_data data;
383 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
385 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
387 if (mmc->high_capacity)
390 cmd.cmdarg = start * mmc->read_bl_len;
392 cmd.resp_type = MMC_RSP_R1;
395 data.blocks = blkcnt;
396 data.blocksize = mmc->read_bl_len;
397 data.flags = MMC_DATA_READ;
399 if (mmc_send_cmd(mmc, &cmd, &data))
403 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
405 cmd.resp_type = MMC_RSP_R1b;
406 if (mmc_send_cmd(mmc, &cmd, NULL)) {
407 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
408 pr_err("mmc fail to send stop cmd\n");
417 #if !CONFIG_IS_ENABLED(DM_MMC)
418 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
420 if (mmc->cfg->ops->get_b_max)
421 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
423 return mmc->cfg->b_max;
427 #if CONFIG_IS_ENABLED(BLK)
428 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
430 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
434 #if CONFIG_IS_ENABLED(BLK)
435 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
437 int dev_num = block_dev->devnum;
439 lbaint_t cur, blocks_todo = blkcnt;
445 struct mmc *mmc = find_mmc_device(dev_num);
449 if (CONFIG_IS_ENABLED(MMC_TINY))
450 err = mmc_switch_part(mmc, block_dev->hwpart);
452 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
457 if ((start + blkcnt) > block_dev->lba) {
458 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
459 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
460 start + blkcnt, block_dev->lba);
465 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
466 pr_debug("%s: Failed to set blocklen\n", __func__);
470 b_max = mmc_get_b_max(mmc, dst, blkcnt);
473 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
474 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
475 pr_debug("%s: Failed to read blocks\n", __func__);
480 dst += cur * mmc->read_bl_len;
481 } while (blocks_todo > 0);
486 static int mmc_go_idle(struct mmc *mmc)
493 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
495 cmd.resp_type = MMC_RSP_NONE;
497 err = mmc_send_cmd(mmc, &cmd, NULL);
507 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
508 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
514 * Send CMD11 only if the request is to switch the card to
517 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
518 return mmc_set_signal_voltage(mmc, signal_voltage);
520 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
522 cmd.resp_type = MMC_RSP_R1;
524 err = mmc_send_cmd(mmc, &cmd, NULL);
528 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
532 * The card should drive cmd and dat[0:3] low immediately
533 * after the response of cmd11, but wait 100 us to be sure
535 err = mmc_wait_dat0(mmc, 0, 100);
542 * During a signal voltage level switch, the clock must be gated
543 * for 5 ms according to the SD spec
545 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
547 err = mmc_set_signal_voltage(mmc, signal_voltage);
551 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
553 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
556 * Failure to switch is indicated by the card holding
557 * dat[0:3] low. Wait for at least 1 ms according to spec
559 err = mmc_wait_dat0(mmc, 1, 1000);
569 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
576 cmd.cmdidx = MMC_CMD_APP_CMD;
577 cmd.resp_type = MMC_RSP_R1;
580 err = mmc_send_cmd(mmc, &cmd, NULL);
585 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
586 cmd.resp_type = MMC_RSP_R3;
589 * Most cards do not answer if some reserved bits
590 * in the ocr are set. However, Some controller
591 * can set bit 7 (reserved for low voltages), but
592 * how to manage low voltages SD card is not yet
595 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
596 (mmc->cfg->voltages & 0xff8000);
598 if (mmc->version == SD_VERSION_2)
599 cmd.cmdarg |= OCR_HCS;
602 cmd.cmdarg |= OCR_S18R;
604 err = mmc_send_cmd(mmc, &cmd, NULL);
609 if (cmd.response[0] & OCR_BUSY)
618 if (mmc->version != SD_VERSION_2)
619 mmc->version = SD_VERSION_1_0;
621 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
622 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
623 cmd.resp_type = MMC_RSP_R3;
626 err = mmc_send_cmd(mmc, &cmd, NULL);
632 mmc->ocr = cmd.response[0];
634 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
635 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
637 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
643 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
649 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
654 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
655 cmd.resp_type = MMC_RSP_R3;
657 if (use_arg && !mmc_host_is_spi(mmc))
658 cmd.cmdarg = OCR_HCS |
659 (mmc->cfg->voltages &
660 (mmc->ocr & OCR_VOLTAGE_MASK)) |
661 (mmc->ocr & OCR_ACCESS_MODE);
663 err = mmc_send_cmd(mmc, &cmd, NULL);
666 mmc->ocr = cmd.response[0];
670 static int mmc_send_op_cond(struct mmc *mmc)
676 /* Some cards seem to need this */
679 start = get_timer(0);
680 /* Asking to the card its capabilities */
682 err = mmc_send_op_cond_iter(mmc, i != 0);
686 /* exit if not busy (flag seems to be inverted) */
687 if (mmc->ocr & OCR_BUSY)
690 if (get_timer(start) > timeout)
694 mmc->op_cond_pending = 1;
698 static int mmc_complete_op_cond(struct mmc *mmc)
705 mmc->op_cond_pending = 0;
706 if (!(mmc->ocr & OCR_BUSY)) {
707 /* Some cards seem to need this */
710 start = get_timer(0);
712 err = mmc_send_op_cond_iter(mmc, 1);
715 if (mmc->ocr & OCR_BUSY)
717 if (get_timer(start) > timeout)
723 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
724 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
725 cmd.resp_type = MMC_RSP_R3;
728 err = mmc_send_cmd(mmc, &cmd, NULL);
733 mmc->ocr = cmd.response[0];
736 mmc->version = MMC_VERSION_UNKNOWN;
738 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
745 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
748 struct mmc_data data;
751 /* Get the Card Status Register */
752 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
753 cmd.resp_type = MMC_RSP_R1;
756 data.dest = (char *)ext_csd;
758 data.blocksize = MMC_MAX_BLOCK_LEN;
759 data.flags = MMC_DATA_READ;
761 err = mmc_send_cmd(mmc, &cmd, &data);
766 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
769 unsigned int status, start;
771 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
772 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
773 (index == EXT_CSD_PART_CONF);
777 if (mmc->gen_cmd6_time)
778 timeout_ms = mmc->gen_cmd6_time * 10;
780 if (is_part_switch && mmc->part_switch_time)
781 timeout_ms = mmc->part_switch_time * 10;
783 cmd.cmdidx = MMC_CMD_SWITCH;
784 cmd.resp_type = MMC_RSP_R1b;
785 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
790 ret = mmc_send_cmd(mmc, &cmd, NULL);
791 } while (ret && retries-- > 0);
796 start = get_timer(0);
798 /* poll dat0 for rdy/buys status */
799 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
800 if (ret && ret != -ENOSYS)
804 * In cases when not allowed to poll by using CMD13 or because we aren't
805 * capable of polling by using mmc_wait_dat0, then rely on waiting the
806 * stated timeout to be sufficient.
808 if (ret == -ENOSYS && !send_status)
811 /* Finally wait until the card is ready or indicates a failure
812 * to switch. It doesn't hurt to use CMD13 here even if send_status
813 * is false, because by now (after 'timeout_ms' ms) the bus should be
817 ret = mmc_send_status(mmc, &status);
819 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
820 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
824 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
827 } while (get_timer(start) < timeout_ms);
832 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
834 return __mmc_switch(mmc, set, index, value, true);
837 int mmc_boot_wp(struct mmc *mmc)
839 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
842 #if !CONFIG_IS_ENABLED(MMC_TINY)
843 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
849 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
855 speed_bits = EXT_CSD_TIMING_HS;
857 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
859 speed_bits = EXT_CSD_TIMING_HS200;
862 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
864 speed_bits = EXT_CSD_TIMING_HS400;
867 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
869 speed_bits = EXT_CSD_TIMING_HS400;
873 speed_bits = EXT_CSD_TIMING_LEGACY;
879 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
880 speed_bits, !hsdowngrade);
884 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
885 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
887 * In case the eMMC is in HS200/HS400 mode and we are downgrading
888 * to HS mode, the card clock are still running much faster than
889 * the supported HS mode clock, so we can not reliably read out
890 * Extended CSD. Reconfigure the controller to run at HS mode.
893 mmc_select_mode(mmc, MMC_HS);
894 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
898 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
899 /* Now check to see that it worked */
900 err = mmc_send_ext_csd(mmc, test_csd);
904 /* No high-speed support */
905 if (!test_csd[EXT_CSD_HS_TIMING])
912 static int mmc_get_capabilities(struct mmc *mmc)
914 u8 *ext_csd = mmc->ext_csd;
917 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
919 if (mmc_host_is_spi(mmc))
922 /* Only version 4 supports high-speed */
923 if (mmc->version < MMC_VERSION_4)
927 pr_err("No ext_csd found!\n"); /* this should enver happen */
931 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
933 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
934 mmc->cardtype = cardtype;
936 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
937 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
938 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
939 mmc->card_caps |= MMC_MODE_HS200;
942 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
943 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
944 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
945 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
946 mmc->card_caps |= MMC_MODE_HS400;
949 if (cardtype & EXT_CSD_CARD_TYPE_52) {
950 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
951 mmc->card_caps |= MMC_MODE_DDR_52MHz;
952 mmc->card_caps |= MMC_MODE_HS_52MHz;
954 if (cardtype & EXT_CSD_CARD_TYPE_26)
955 mmc->card_caps |= MMC_MODE_HS;
957 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
958 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
959 (mmc->card_caps & MMC_MODE_HS400)) {
960 mmc->card_caps |= MMC_MODE_HS400_ES;
968 static int mmc_set_capacity(struct mmc *mmc, int part_num)
972 mmc->capacity = mmc->capacity_user;
976 mmc->capacity = mmc->capacity_boot;
979 mmc->capacity = mmc->capacity_rpmb;
985 mmc->capacity = mmc->capacity_gp[part_num - 4];
991 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
996 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1002 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1004 (mmc->part_config & ~PART_ACCESS_MASK)
1005 | (part_num & PART_ACCESS_MASK));
1006 } while (ret && retry--);
1009 * Set the capacity if the switch succeeded or was intended
1010 * to return to representing the raw device.
1012 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1013 ret = mmc_set_capacity(mmc, part_num);
1014 mmc_get_blk_desc(mmc)->hwpart = part_num;
1020 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1021 int mmc_hwpart_config(struct mmc *mmc,
1022 const struct mmc_hwpart_conf *conf,
1023 enum mmc_hwpart_conf_mode mode)
1028 u32 gp_size_mult[4];
1029 u32 max_enh_size_mult;
1030 u32 tot_enh_size_mult = 0;
1033 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1035 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1038 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1039 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1040 return -EMEDIUMTYPE;
1043 if (!(mmc->part_support & PART_SUPPORT)) {
1044 pr_err("Card does not support partitioning\n");
1045 return -EMEDIUMTYPE;
1048 if (!mmc->hc_wp_grp_size) {
1049 pr_err("Card does not define HC WP group size\n");
1050 return -EMEDIUMTYPE;
1053 /* check partition alignment and total enhanced size */
1054 if (conf->user.enh_size) {
1055 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1056 conf->user.enh_start % mmc->hc_wp_grp_size) {
1057 pr_err("User data enhanced area not HC WP group "
1061 part_attrs |= EXT_CSD_ENH_USR;
1062 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1063 if (mmc->high_capacity) {
1064 enh_start_addr = conf->user.enh_start;
1066 enh_start_addr = (conf->user.enh_start << 9);
1072 tot_enh_size_mult += enh_size_mult;
1074 for (pidx = 0; pidx < 4; pidx++) {
1075 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1076 pr_err("GP%i partition not HC WP group size "
1077 "aligned\n", pidx+1);
1080 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1081 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1082 part_attrs |= EXT_CSD_ENH_GP(pidx);
1083 tot_enh_size_mult += gp_size_mult[pidx];
1087 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1088 pr_err("Card does not support enhanced attribute\n");
1089 return -EMEDIUMTYPE;
1092 err = mmc_send_ext_csd(mmc, ext_csd);
1097 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1098 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1099 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1100 if (tot_enh_size_mult > max_enh_size_mult) {
1101 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1102 tot_enh_size_mult, max_enh_size_mult);
1103 return -EMEDIUMTYPE;
1106 /* The default value of EXT_CSD_WR_REL_SET is device
1107 * dependent, the values can only be changed if the
1108 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1109 * changed only once and before partitioning is completed. */
1110 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1111 if (conf->user.wr_rel_change) {
1112 if (conf->user.wr_rel_set)
1113 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1115 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1117 for (pidx = 0; pidx < 4; pidx++) {
1118 if (conf->gp_part[pidx].wr_rel_change) {
1119 if (conf->gp_part[pidx].wr_rel_set)
1120 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1122 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1126 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1127 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1128 puts("Card does not support host controlled partition write "
1129 "reliability settings\n");
1130 return -EMEDIUMTYPE;
1133 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1134 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1135 pr_err("Card already partitioned\n");
1139 if (mode == MMC_HWPART_CONF_CHECK)
1142 /* Partitioning requires high-capacity size definitions */
1143 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1144 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1145 EXT_CSD_ERASE_GROUP_DEF, 1);
1150 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1152 #if CONFIG_IS_ENABLED(MMC_WRITE)
1153 /* update erase group size to be high-capacity */
1154 mmc->erase_grp_size =
1155 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1160 /* all OK, write the configuration */
1161 for (i = 0; i < 4; i++) {
1162 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1163 EXT_CSD_ENH_START_ADDR+i,
1164 (enh_start_addr >> (i*8)) & 0xFF);
1168 for (i = 0; i < 3; i++) {
1169 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1170 EXT_CSD_ENH_SIZE_MULT+i,
1171 (enh_size_mult >> (i*8)) & 0xFF);
1175 for (pidx = 0; pidx < 4; pidx++) {
1176 for (i = 0; i < 3; i++) {
1177 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1178 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1179 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1184 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1185 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1189 if (mode == MMC_HWPART_CONF_SET)
1192 /* The WR_REL_SET is a write-once register but shall be
1193 * written before setting PART_SETTING_COMPLETED. As it is
1194 * write-once we can only write it when completing the
1196 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1197 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1198 EXT_CSD_WR_REL_SET, wr_rel_set);
1203 /* Setting PART_SETTING_COMPLETED confirms the partition
1204 * configuration but it only becomes effective after power
1205 * cycle, so we do not adjust the partition related settings
1206 * in the mmc struct. */
1208 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1209 EXT_CSD_PARTITION_SETTING,
1210 EXT_CSD_PARTITION_SETTING_COMPLETED);
1218 #if !CONFIG_IS_ENABLED(DM_MMC)
1219 int mmc_getcd(struct mmc *mmc)
1223 cd = board_mmc_getcd(mmc);
1226 if (mmc->cfg->ops->getcd)
1227 cd = mmc->cfg->ops->getcd(mmc);
1236 #if !CONFIG_IS_ENABLED(MMC_TINY)
1237 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1240 struct mmc_data data;
1242 /* Switch the frequency */
1243 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1244 cmd.resp_type = MMC_RSP_R1;
1245 cmd.cmdarg = (mode << 31) | 0xffffff;
1246 cmd.cmdarg &= ~(0xf << (group * 4));
1247 cmd.cmdarg |= value << (group * 4);
1249 data.dest = (char *)resp;
1250 data.blocksize = 64;
1252 data.flags = MMC_DATA_READ;
1254 return mmc_send_cmd(mmc, &cmd, &data);
1257 static int sd_get_capabilities(struct mmc *mmc)
1261 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1262 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1263 struct mmc_data data;
1265 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1269 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1271 if (mmc_host_is_spi(mmc))
1274 /* Read the SCR to find out if this card supports higher speeds */
1275 cmd.cmdidx = MMC_CMD_APP_CMD;
1276 cmd.resp_type = MMC_RSP_R1;
1277 cmd.cmdarg = mmc->rca << 16;
1279 err = mmc_send_cmd(mmc, &cmd, NULL);
1284 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1285 cmd.resp_type = MMC_RSP_R1;
1291 data.dest = (char *)scr;
1294 data.flags = MMC_DATA_READ;
1296 err = mmc_send_cmd(mmc, &cmd, &data);
1305 mmc->scr[0] = __be32_to_cpu(scr[0]);
1306 mmc->scr[1] = __be32_to_cpu(scr[1]);
1308 switch ((mmc->scr[0] >> 24) & 0xf) {
1310 mmc->version = SD_VERSION_1_0;
1313 mmc->version = SD_VERSION_1_10;
1316 mmc->version = SD_VERSION_2;
1317 if ((mmc->scr[0] >> 15) & 0x1)
1318 mmc->version = SD_VERSION_3;
1321 mmc->version = SD_VERSION_1_0;
1325 if (mmc->scr[0] & SD_DATA_4BIT)
1326 mmc->card_caps |= MMC_MODE_4BIT;
1328 /* Version 1.0 doesn't support switching */
1329 if (mmc->version == SD_VERSION_1_0)
1334 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1335 (u8 *)switch_status);
1340 /* The high-speed function is busy. Try again */
1341 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1345 /* If high-speed isn't supported, we return */
1346 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1347 mmc->card_caps |= MMC_CAP(SD_HS);
1349 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1350 /* Version before 3.0 don't support UHS modes */
1351 if (mmc->version < SD_VERSION_3)
1354 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1355 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1356 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1357 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1358 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1359 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1360 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1361 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1362 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1363 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1364 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1370 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1374 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1377 /* SD version 1.00 and 1.01 does not support CMD 6 */
1378 if (mmc->version == SD_VERSION_1_0)
1383 speed = UHS_SDR12_BUS_SPEED;
1386 speed = HIGH_SPEED_BUS_SPEED;
1388 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1390 speed = UHS_SDR12_BUS_SPEED;
1393 speed = UHS_SDR25_BUS_SPEED;
1396 speed = UHS_SDR50_BUS_SPEED;
1399 speed = UHS_DDR50_BUS_SPEED;
1402 speed = UHS_SDR104_BUS_SPEED;
1409 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1413 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1419 static int sd_select_bus_width(struct mmc *mmc, int w)
1424 if ((w != 4) && (w != 1))
1427 cmd.cmdidx = MMC_CMD_APP_CMD;
1428 cmd.resp_type = MMC_RSP_R1;
1429 cmd.cmdarg = mmc->rca << 16;
1431 err = mmc_send_cmd(mmc, &cmd, NULL);
1435 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1436 cmd.resp_type = MMC_RSP_R1;
1441 err = mmc_send_cmd(mmc, &cmd, NULL);
1449 #if CONFIG_IS_ENABLED(MMC_WRITE)
1450 static int sd_read_ssr(struct mmc *mmc)
1452 static const unsigned int sd_au_size[] = {
1453 0, SZ_16K / 512, SZ_32K / 512,
1454 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1455 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1456 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1457 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1462 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1463 struct mmc_data data;
1465 unsigned int au, eo, et, es;
1467 cmd.cmdidx = MMC_CMD_APP_CMD;
1468 cmd.resp_type = MMC_RSP_R1;
1469 cmd.cmdarg = mmc->rca << 16;
1471 err = mmc_send_cmd(mmc, &cmd, NULL);
1472 #ifdef CONFIG_MMC_QUIRKS
1473 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1476 * It has been seen that APP_CMD may fail on the first
1477 * attempt, let's try a few more times
1480 err = mmc_send_cmd(mmc, &cmd, NULL);
1483 } while (retries--);
1489 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1490 cmd.resp_type = MMC_RSP_R1;
1494 data.dest = (char *)ssr;
1495 data.blocksize = 64;
1497 data.flags = MMC_DATA_READ;
1499 err = mmc_send_cmd(mmc, &cmd, &data);
1507 for (i = 0; i < 16; i++)
1508 ssr[i] = be32_to_cpu(ssr[i]);
1510 au = (ssr[2] >> 12) & 0xF;
1511 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1512 mmc->ssr.au = sd_au_size[au];
1513 es = (ssr[3] >> 24) & 0xFF;
1514 es |= (ssr[2] & 0xFF) << 8;
1515 et = (ssr[3] >> 18) & 0x3F;
1517 eo = (ssr[3] >> 16) & 0x3;
1518 mmc->ssr.erase_timeout = (et * 1000) / es;
1519 mmc->ssr.erase_offset = eo * 1000;
1522 pr_debug("Invalid Allocation Unit Size.\n");
1528 /* frequency bases */
1529 /* divided by 10 to be nice to platforms without floating point */
1530 static const int fbase[] = {
1537 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1538 * to platforms without floating point.
1540 static const u8 multipliers[] = {
1559 static inline int bus_width(uint cap)
1561 if (cap == MMC_MODE_8BIT)
1563 if (cap == MMC_MODE_4BIT)
1565 if (cap == MMC_MODE_1BIT)
1567 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1571 #if !CONFIG_IS_ENABLED(DM_MMC)
1572 #ifdef MMC_SUPPORTS_TUNING
1573 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1579 static int mmc_set_ios(struct mmc *mmc)
1583 if (mmc->cfg->ops->set_ios)
1584 ret = mmc->cfg->ops->set_ios(mmc);
1589 static int mmc_host_power_cycle(struct mmc *mmc)
1593 if (mmc->cfg->ops->host_power_cycle)
1594 ret = mmc->cfg->ops->host_power_cycle(mmc);
1600 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1603 if (clock > mmc->cfg->f_max)
1604 clock = mmc->cfg->f_max;
1606 if (clock < mmc->cfg->f_min)
1607 clock = mmc->cfg->f_min;
1611 mmc->clk_disable = disable;
1613 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1615 return mmc_set_ios(mmc);
1618 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1620 mmc->bus_width = width;
1622 return mmc_set_ios(mmc);
1625 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1627 * helper function to display the capabilities in a human
1628 * friendly manner. The capabilities include bus width and
1631 void mmc_dump_capabilities(const char *text, uint caps)
1635 pr_debug("%s: widths [", text);
1636 if (caps & MMC_MODE_8BIT)
1638 if (caps & MMC_MODE_4BIT)
1640 if (caps & MMC_MODE_1BIT)
1642 pr_debug("\b\b] modes [");
1643 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1644 if (MMC_CAP(mode) & caps)
1645 pr_debug("%s, ", mmc_mode_name(mode));
1646 pr_debug("\b\b]\n");
1650 struct mode_width_tuning {
1653 #ifdef MMC_SUPPORTS_TUNING
1658 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1659 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1662 case MMC_SIGNAL_VOLTAGE_000: return 0;
1663 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1664 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1665 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1670 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1674 if (mmc->signal_voltage == signal_voltage)
1677 mmc->signal_voltage = signal_voltage;
1678 err = mmc_set_ios(mmc);
1680 pr_debug("unable to set voltage (err %d)\n", err);
1685 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1691 #if !CONFIG_IS_ENABLED(MMC_TINY)
1692 static const struct mode_width_tuning sd_modes_by_pref[] = {
1693 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1694 #ifdef MMC_SUPPORTS_TUNING
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1703 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1707 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1711 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1716 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1718 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1721 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1726 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1730 #define for_each_sd_mode_by_pref(caps, mwt) \
1731 for (mwt = sd_modes_by_pref;\
1732 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1734 if (caps & MMC_CAP(mwt->mode))
1736 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1739 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1740 const struct mode_width_tuning *mwt;
1741 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1742 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1744 bool uhs_en = false;
1749 mmc_dump_capabilities("sd card", card_caps);
1750 mmc_dump_capabilities("host", mmc->host_caps);
1753 if (mmc_host_is_spi(mmc)) {
1754 mmc_set_bus_width(mmc, 1);
1755 mmc_select_mode(mmc, MMC_LEGACY);
1756 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1757 #if CONFIG_IS_ENABLED(MMC_WRITE)
1758 err = sd_read_ssr(mmc);
1760 pr_warn("unable to read ssr\n");
1765 /* Restrict card's capabilities by what the host can do */
1766 caps = card_caps & mmc->host_caps;
1771 for_each_sd_mode_by_pref(caps, mwt) {
1774 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1775 if (*w & caps & mwt->widths) {
1776 pr_debug("trying mode %s width %d (at %d MHz)\n",
1777 mmc_mode_name(mwt->mode),
1779 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1781 /* configure the bus width (card + host) */
1782 err = sd_select_bus_width(mmc, bus_width(*w));
1785 mmc_set_bus_width(mmc, bus_width(*w));
1787 /* configure the bus mode (card) */
1788 err = sd_set_card_speed(mmc, mwt->mode);
1792 /* configure the bus mode (host) */
1793 mmc_select_mode(mmc, mwt->mode);
1794 mmc_set_clock(mmc, mmc->tran_speed,
1797 #ifdef MMC_SUPPORTS_TUNING
1798 /* execute tuning if needed */
1799 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1800 err = mmc_execute_tuning(mmc,
1803 pr_debug("tuning failed\n");
1809 #if CONFIG_IS_ENABLED(MMC_WRITE)
1810 err = sd_read_ssr(mmc);
1812 pr_warn("unable to read ssr\n");
1818 /* revert to a safer bus speed */
1819 mmc_select_mode(mmc, MMC_LEGACY);
1820 mmc_set_clock(mmc, mmc->tran_speed,
1826 pr_err("unable to select a mode\n");
1831 * read the compare the part of ext csd that is constant.
1832 * This can be used to check that the transfer is working
1835 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1838 const u8 *ext_csd = mmc->ext_csd;
1839 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1841 if (mmc->version < MMC_VERSION_4)
1844 err = mmc_send_ext_csd(mmc, test_csd);
1848 /* Only compare read only fields */
1849 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1850 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1851 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1852 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1853 ext_csd[EXT_CSD_REV]
1854 == test_csd[EXT_CSD_REV] &&
1855 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1856 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1857 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1858 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1864 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1865 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1866 uint32_t allowed_mask)
1874 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1875 EXT_CSD_CARD_TYPE_HS400_1_8V))
1876 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1877 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1878 EXT_CSD_CARD_TYPE_HS400_1_2V))
1879 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1882 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1883 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1884 MMC_SIGNAL_VOLTAGE_180;
1885 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1886 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1889 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1893 while (card_mask & allowed_mask) {
1894 enum mmc_voltage best_match;
1896 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1897 if (!mmc_set_signal_voltage(mmc, best_match))
1900 allowed_mask &= ~best_match;
1906 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1907 uint32_t allowed_mask)
1913 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1914 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1916 .mode = MMC_HS_400_ES,
1917 .widths = MMC_MODE_8BIT,
1920 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1923 .widths = MMC_MODE_8BIT,
1924 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1927 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1930 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1931 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1936 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1940 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1944 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1948 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1952 #define for_each_mmc_mode_by_pref(caps, mwt) \
1953 for (mwt = mmc_modes_by_pref;\
1954 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1956 if (caps & MMC_CAP(mwt->mode))
1958 static const struct ext_csd_bus_width {
1962 } ext_csd_bus_width[] = {
1963 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1964 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1965 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1966 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1967 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1970 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1971 static int mmc_select_hs400(struct mmc *mmc)
1975 /* Set timing to HS200 for tuning */
1976 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1980 /* configure the bus mode (host) */
1981 mmc_select_mode(mmc, MMC_HS_200);
1982 mmc_set_clock(mmc, mmc->tran_speed, false);
1984 /* execute tuning if needed */
1985 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1987 debug("tuning failed\n");
1991 /* Set back to HS */
1992 mmc_set_card_speed(mmc, MMC_HS, true);
1994 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1995 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1999 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2003 mmc_select_mode(mmc, MMC_HS_400);
2004 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2011 static int mmc_select_hs400(struct mmc *mmc)
2017 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2018 #if !CONFIG_IS_ENABLED(DM_MMC)
2019 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2024 static int mmc_select_hs400es(struct mmc *mmc)
2028 err = mmc_set_card_speed(mmc, MMC_HS, true);
2032 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2033 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2034 EXT_CSD_BUS_WIDTH_STROBE);
2036 printf("switch to bus width for hs400 failed\n");
2039 /* TODO: driver strength */
2040 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2044 mmc_select_mode(mmc, MMC_HS_400_ES);
2045 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2049 return mmc_set_enhanced_strobe(mmc);
2052 static int mmc_select_hs400es(struct mmc *mmc)
2058 #define for_each_supported_width(caps, ddr, ecbv) \
2059 for (ecbv = ext_csd_bus_width;\
2060 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2062 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2064 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2067 const struct mode_width_tuning *mwt;
2068 const struct ext_csd_bus_width *ecbw;
2071 mmc_dump_capabilities("mmc", card_caps);
2072 mmc_dump_capabilities("host", mmc->host_caps);
2075 if (mmc_host_is_spi(mmc)) {
2076 mmc_set_bus_width(mmc, 1);
2077 mmc_select_mode(mmc, MMC_LEGACY);
2078 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2082 /* Restrict card's capabilities by what the host can do */
2083 card_caps &= mmc->host_caps;
2085 /* Only version 4 of MMC supports wider bus widths */
2086 if (mmc->version < MMC_VERSION_4)
2089 if (!mmc->ext_csd) {
2090 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2094 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2095 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2097 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2098 * before doing anything else, since a transition from either of
2099 * the HS200/HS400 mode directly to legacy mode is not supported.
2101 if (mmc->selected_mode == MMC_HS_200 ||
2102 mmc->selected_mode == MMC_HS_400)
2103 mmc_set_card_speed(mmc, MMC_HS, true);
2106 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2108 for_each_mmc_mode_by_pref(card_caps, mwt) {
2109 for_each_supported_width(card_caps & mwt->widths,
2110 mmc_is_mode_ddr(mwt->mode), ecbw) {
2111 enum mmc_voltage old_voltage;
2112 pr_debug("trying mode %s width %d (at %d MHz)\n",
2113 mmc_mode_name(mwt->mode),
2114 bus_width(ecbw->cap),
2115 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2116 old_voltage = mmc->signal_voltage;
2117 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2118 MMC_ALL_SIGNAL_VOLTAGE);
2122 /* configure the bus width (card + host) */
2123 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2125 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2128 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2130 if (mwt->mode == MMC_HS_400) {
2131 err = mmc_select_hs400(mmc);
2133 printf("Select HS400 failed %d\n", err);
2136 } else if (mwt->mode == MMC_HS_400_ES) {
2137 err = mmc_select_hs400es(mmc);
2139 printf("Select HS400ES failed %d\n",
2144 /* configure the bus speed (card) */
2145 err = mmc_set_card_speed(mmc, mwt->mode, false);
2150 * configure the bus width AND the ddr mode
2151 * (card). The host side will be taken care
2152 * of in the next step
2154 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2155 err = mmc_switch(mmc,
2156 EXT_CSD_CMD_SET_NORMAL,
2158 ecbw->ext_csd_bits);
2163 /* configure the bus mode (host) */
2164 mmc_select_mode(mmc, mwt->mode);
2165 mmc_set_clock(mmc, mmc->tran_speed,
2167 #ifdef MMC_SUPPORTS_TUNING
2169 /* execute tuning if needed */
2171 err = mmc_execute_tuning(mmc,
2174 pr_debug("tuning failed\n");
2181 /* do a transfer to check the configuration */
2182 err = mmc_read_and_compare_ext_csd(mmc);
2186 mmc_set_signal_voltage(mmc, old_voltage);
2187 /* if an error occured, revert to a safer bus mode */
2188 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2189 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2190 mmc_select_mode(mmc, MMC_LEGACY);
2191 mmc_set_bus_width(mmc, 1);
2195 pr_err("unable to select a mode\n");
2201 #if CONFIG_IS_ENABLED(MMC_TINY)
2202 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2205 static int mmc_startup_v4(struct mmc *mmc)
2209 bool has_parts = false;
2210 bool part_completed;
2211 static const u32 mmc_versions[] = {
2223 #if CONFIG_IS_ENABLED(MMC_TINY)
2224 u8 *ext_csd = ext_csd_bkup;
2226 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2230 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2232 err = mmc_send_ext_csd(mmc, ext_csd);
2236 /* store the ext csd for future reference */
2238 mmc->ext_csd = ext_csd;
2240 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2242 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2245 /* check ext_csd version and capacity */
2246 err = mmc_send_ext_csd(mmc, ext_csd);
2250 /* store the ext csd for future reference */
2252 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2255 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2257 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2260 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2262 if (mmc->version >= MMC_VERSION_4_2) {
2264 * According to the JEDEC Standard, the value of
2265 * ext_csd's capacity is valid if the value is more
2268 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2269 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2270 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2271 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2272 capacity *= MMC_MAX_BLOCK_LEN;
2273 if ((capacity >> 20) > 2 * 1024)
2274 mmc->capacity_user = capacity;
2277 if (mmc->version >= MMC_VERSION_4_5)
2278 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2280 /* The partition data may be non-zero but it is only
2281 * effective if PARTITION_SETTING_COMPLETED is set in
2282 * EXT_CSD, so ignore any data if this bit is not set,
2283 * except for enabling the high-capacity group size
2284 * definition (see below).
2286 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2287 EXT_CSD_PARTITION_SETTING_COMPLETED);
2289 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2290 /* Some eMMC set the value too low so set a minimum */
2291 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2292 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2294 /* store the partition info of emmc */
2295 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2296 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2297 ext_csd[EXT_CSD_BOOT_MULT])
2298 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2299 if (part_completed &&
2300 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2301 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2303 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2305 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2307 for (i = 0; i < 4; i++) {
2308 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2309 uint mult = (ext_csd[idx + 2] << 16) +
2310 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2313 if (!part_completed)
2315 mmc->capacity_gp[i] = mult;
2316 mmc->capacity_gp[i] *=
2317 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2318 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2319 mmc->capacity_gp[i] <<= 19;
2322 #ifndef CONFIG_SPL_BUILD
2323 if (part_completed) {
2324 mmc->enh_user_size =
2325 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2326 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2327 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2328 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2329 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2330 mmc->enh_user_size <<= 19;
2331 mmc->enh_user_start =
2332 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2333 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2334 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2335 ext_csd[EXT_CSD_ENH_START_ADDR];
2336 if (mmc->high_capacity)
2337 mmc->enh_user_start <<= 9;
2342 * Host needs to enable ERASE_GRP_DEF bit if device is
2343 * partitioned. This bit will be lost every time after a reset
2344 * or power off. This will affect erase size.
2348 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2349 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2352 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2353 EXT_CSD_ERASE_GROUP_DEF, 1);
2358 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2361 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2362 #if CONFIG_IS_ENABLED(MMC_WRITE)
2363 /* Read out group size from ext_csd */
2364 mmc->erase_grp_size =
2365 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2368 * if high capacity and partition setting completed
2369 * SEC_COUNT is valid even if it is smaller than 2 GiB
2370 * JEDEC Standard JESD84-B45, 6.2.4
2372 if (mmc->high_capacity && part_completed) {
2373 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2374 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2375 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2376 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2377 capacity *= MMC_MAX_BLOCK_LEN;
2378 mmc->capacity_user = capacity;
2381 #if CONFIG_IS_ENABLED(MMC_WRITE)
2383 /* Calculate the group size from the csd value. */
2384 int erase_gsz, erase_gmul;
2386 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2387 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2388 mmc->erase_grp_size = (erase_gsz + 1)
2392 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2393 mmc->hc_wp_grp_size = 1024
2394 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2395 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2398 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2403 #if !CONFIG_IS_ENABLED(MMC_TINY)
2406 mmc->ext_csd = NULL;
2411 static int mmc_startup(struct mmc *mmc)
2417 struct blk_desc *bdesc;
2419 #ifdef CONFIG_MMC_SPI_CRC_ON
2420 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2421 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2422 cmd.resp_type = MMC_RSP_R1;
2424 err = mmc_send_cmd(mmc, &cmd, NULL);
2430 /* Put the Card in Identify Mode */
2431 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2432 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2433 cmd.resp_type = MMC_RSP_R2;
2436 err = mmc_send_cmd(mmc, &cmd, NULL);
2438 #ifdef CONFIG_MMC_QUIRKS
2439 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2442 * It has been seen that SEND_CID may fail on the first
2443 * attempt, let's try a few more time
2446 err = mmc_send_cmd(mmc, &cmd, NULL);
2449 } while (retries--);
2456 memcpy(mmc->cid, cmd.response, 16);
2459 * For MMC cards, set the Relative Address.
2460 * For SD cards, get the Relatvie Address.
2461 * This also puts the cards into Standby State
2463 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2464 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2465 cmd.cmdarg = mmc->rca << 16;
2466 cmd.resp_type = MMC_RSP_R6;
2468 err = mmc_send_cmd(mmc, &cmd, NULL);
2474 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2477 /* Get the Card-Specific Data */
2478 cmd.cmdidx = MMC_CMD_SEND_CSD;
2479 cmd.resp_type = MMC_RSP_R2;
2480 cmd.cmdarg = mmc->rca << 16;
2482 err = mmc_send_cmd(mmc, &cmd, NULL);
2487 mmc->csd[0] = cmd.response[0];
2488 mmc->csd[1] = cmd.response[1];
2489 mmc->csd[2] = cmd.response[2];
2490 mmc->csd[3] = cmd.response[3];
2492 if (mmc->version == MMC_VERSION_UNKNOWN) {
2493 int version = (cmd.response[0] >> 26) & 0xf;
2497 mmc->version = MMC_VERSION_1_2;
2500 mmc->version = MMC_VERSION_1_4;
2503 mmc->version = MMC_VERSION_2_2;
2506 mmc->version = MMC_VERSION_3;
2509 mmc->version = MMC_VERSION_4;
2512 mmc->version = MMC_VERSION_1_2;
2517 /* divide frequency by 10, since the mults are 10x bigger */
2518 freq = fbase[(cmd.response[0] & 0x7)];
2519 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2521 mmc->legacy_speed = freq * mult;
2522 mmc_select_mode(mmc, MMC_LEGACY);
2524 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2525 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2526 #if CONFIG_IS_ENABLED(MMC_WRITE)
2529 mmc->write_bl_len = mmc->read_bl_len;
2531 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2534 if (mmc->high_capacity) {
2535 csize = (mmc->csd[1] & 0x3f) << 16
2536 | (mmc->csd[2] & 0xffff0000) >> 16;
2539 csize = (mmc->csd[1] & 0x3ff) << 2
2540 | (mmc->csd[2] & 0xc0000000) >> 30;
2541 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2544 mmc->capacity_user = (csize + 1) << (cmult + 2);
2545 mmc->capacity_user *= mmc->read_bl_len;
2546 mmc->capacity_boot = 0;
2547 mmc->capacity_rpmb = 0;
2548 for (i = 0; i < 4; i++)
2549 mmc->capacity_gp[i] = 0;
2551 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2552 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2554 #if CONFIG_IS_ENABLED(MMC_WRITE)
2555 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2556 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2559 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2560 cmd.cmdidx = MMC_CMD_SET_DSR;
2561 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2562 cmd.resp_type = MMC_RSP_NONE;
2563 if (mmc_send_cmd(mmc, &cmd, NULL))
2564 pr_warn("MMC: SET_DSR failed\n");
2567 /* Select the card, and put it into Transfer Mode */
2568 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2569 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2570 cmd.resp_type = MMC_RSP_R1;
2571 cmd.cmdarg = mmc->rca << 16;
2572 err = mmc_send_cmd(mmc, &cmd, NULL);
2579 * For SD, its erase group is always one sector
2581 #if CONFIG_IS_ENABLED(MMC_WRITE)
2582 mmc->erase_grp_size = 1;
2584 mmc->part_config = MMCPART_NOAVAILABLE;
2586 err = mmc_startup_v4(mmc);
2590 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2594 #if CONFIG_IS_ENABLED(MMC_TINY)
2595 mmc_set_clock(mmc, mmc->legacy_speed, false);
2596 mmc_select_mode(mmc, MMC_LEGACY);
2597 mmc_set_bus_width(mmc, 1);
2600 err = sd_get_capabilities(mmc);
2603 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2605 err = mmc_get_capabilities(mmc);
2608 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2614 mmc->best_mode = mmc->selected_mode;
2616 /* Fix the block length for DDR mode */
2617 if (mmc->ddr_mode) {
2618 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2619 #if CONFIG_IS_ENABLED(MMC_WRITE)
2620 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2624 /* fill in device description */
2625 bdesc = mmc_get_blk_desc(mmc);
2629 bdesc->blksz = mmc->read_bl_len;
2630 bdesc->log2blksz = LOG2(bdesc->blksz);
2631 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2632 #if !defined(CONFIG_SPL_BUILD) || \
2633 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2634 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2635 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2636 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2637 (mmc->cid[3] >> 16) & 0xffff);
2638 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2639 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2640 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2641 (mmc->cid[2] >> 24) & 0xff);
2642 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2643 (mmc->cid[2] >> 16) & 0xf);
2645 bdesc->vendor[0] = 0;
2646 bdesc->product[0] = 0;
2647 bdesc->revision[0] = 0;
2650 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2657 static int mmc_send_if_cond(struct mmc *mmc)
2662 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2663 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2664 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2665 cmd.resp_type = MMC_RSP_R7;
2667 err = mmc_send_cmd(mmc, &cmd, NULL);
2672 if ((cmd.response[0] & 0xff) != 0xaa)
2675 mmc->version = SD_VERSION_2;
2680 #if !CONFIG_IS_ENABLED(DM_MMC)
2681 /* board-specific MMC power initializations. */
2682 __weak void board_mmc_power_init(void)
2687 static int mmc_power_init(struct mmc *mmc)
2689 #if CONFIG_IS_ENABLED(DM_MMC)
2690 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2693 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2696 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2698 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2699 &mmc->vqmmc_supply);
2701 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2703 #else /* !CONFIG_DM_MMC */
2705 * Driver model should use a regulator, as above, rather than calling
2706 * out to board code.
2708 board_mmc_power_init();
2714 * put the host in the initial state:
2715 * - turn on Vdd (card power supply)
2716 * - configure the bus width and clock to minimal values
2718 static void mmc_set_initial_state(struct mmc *mmc)
2722 /* First try to set 3.3V. If it fails set to 1.8V */
2723 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2725 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2727 pr_warn("mmc: failed to set signal voltage\n");
2729 mmc_select_mode(mmc, MMC_LEGACY);
2730 mmc_set_bus_width(mmc, 1);
2731 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2734 static int mmc_power_on(struct mmc *mmc)
2736 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2737 if (mmc->vmmc_supply) {
2738 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2741 puts("Error enabling VMMC supply\n");
2749 static int mmc_power_off(struct mmc *mmc)
2751 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2752 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2753 if (mmc->vmmc_supply) {
2754 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2757 pr_debug("Error disabling VMMC supply\n");
2765 static int mmc_power_cycle(struct mmc *mmc)
2769 ret = mmc_power_off(mmc);
2773 ret = mmc_host_power_cycle(mmc);
2778 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2779 * to be on the safer side.
2782 return mmc_power_on(mmc);
2785 int mmc_get_op_cond(struct mmc *mmc)
2787 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2793 err = mmc_power_init(mmc);
2797 #ifdef CONFIG_MMC_QUIRKS
2798 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2799 MMC_QUIRK_RETRY_SEND_CID |
2800 MMC_QUIRK_RETRY_APP_CMD;
2803 err = mmc_power_cycle(mmc);
2806 * if power cycling is not supported, we should not try
2807 * to use the UHS modes, because we wouldn't be able to
2808 * recover from an error during the UHS initialization.
2810 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2812 mmc->host_caps &= ~UHS_CAPS;
2813 err = mmc_power_on(mmc);
2818 #if CONFIG_IS_ENABLED(DM_MMC)
2819 /* The device has already been probed ready for use */
2821 /* made sure it's not NULL earlier */
2822 err = mmc->cfg->ops->init(mmc);
2829 mmc_set_initial_state(mmc);
2831 /* Reset the Card */
2832 err = mmc_go_idle(mmc);
2837 /* The internal partition reset to user partition(0) at every CMD0 */
2838 mmc_get_blk_desc(mmc)->hwpart = 0;
2840 /* Test for SD version 2 */
2841 err = mmc_send_if_cond(mmc);
2843 /* Now try to get the SD card's operating condition */
2844 err = sd_send_op_cond(mmc, uhs_en);
2845 if (err && uhs_en) {
2847 mmc_power_cycle(mmc);
2851 /* If the command timed out, we check for an MMC card */
2852 if (err == -ETIMEDOUT) {
2853 err = mmc_send_op_cond(mmc);
2856 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2857 pr_err("Card did not respond to voltage select!\n");
2866 int mmc_start_init(struct mmc *mmc)
2872 * all hosts are capable of 1 bit bus-width and able to use the legacy
2875 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2876 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2877 #if CONFIG_IS_ENABLED(DM_MMC)
2878 mmc_deferred_probe(mmc);
2880 #if !defined(CONFIG_MMC_BROKEN_CD)
2881 no_card = mmc_getcd(mmc) == 0;
2885 #if !CONFIG_IS_ENABLED(DM_MMC)
2886 /* we pretend there's no card when init is NULL */
2887 no_card = no_card || (mmc->cfg->ops->init == NULL);
2891 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2892 pr_err("MMC: no card present\n");
2897 err = mmc_get_op_cond(mmc);
2900 mmc->init_in_progress = 1;
2905 static int mmc_complete_init(struct mmc *mmc)
2909 mmc->init_in_progress = 0;
2910 if (mmc->op_cond_pending)
2911 err = mmc_complete_op_cond(mmc);
2914 err = mmc_startup(mmc);
2922 int mmc_init(struct mmc *mmc)
2925 __maybe_unused ulong start;
2926 #if CONFIG_IS_ENABLED(DM_MMC)
2927 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2934 start = get_timer(0);
2936 if (!mmc->init_in_progress)
2937 err = mmc_start_init(mmc);
2940 err = mmc_complete_init(mmc);
2942 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2947 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2948 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2949 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2950 int mmc_deinit(struct mmc *mmc)
2958 caps_filtered = mmc->card_caps &
2959 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2960 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2961 MMC_CAP(UHS_SDR104));
2963 return sd_select_mode_and_width(mmc, caps_filtered);
2965 caps_filtered = mmc->card_caps &
2966 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2968 return mmc_select_mode_and_width(mmc, caps_filtered);
2973 int mmc_set_dsr(struct mmc *mmc, u16 val)
2979 /* CPU-specific MMC initializations */
2980 __weak int cpu_mmc_init(struct bd_info *bis)
2985 /* board-specific MMC initializations. */
2986 __weak int board_mmc_init(struct bd_info *bis)
2991 void mmc_set_preinit(struct mmc *mmc, int preinit)
2993 mmc->preinit = preinit;
2996 #if CONFIG_IS_ENABLED(DM_MMC)
2997 static int mmc_probe(struct bd_info *bis)
3001 struct udevice *dev;
3003 ret = uclass_get(UCLASS_MMC, &uc);
3008 * Try to add them in sequence order. Really with driver model we
3009 * should allow holes, but the current MMC list does not allow that.
3010 * So if we request 0, 1, 3 we will get 0, 1, 2.
3012 for (i = 0; ; i++) {
3013 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3017 uclass_foreach_dev(dev, uc) {
3018 ret = device_probe(dev);
3020 pr_err("%s - probe failed: %d\n", dev->name, ret);
3026 static int mmc_probe(struct bd_info *bis)
3028 if (board_mmc_init(bis) < 0)
3035 int mmc_initialize(struct bd_info *bis)
3037 static int initialized = 0;
3039 if (initialized) /* Avoid initializing mmc multiple times */
3043 #if !CONFIG_IS_ENABLED(BLK)
3044 #if !CONFIG_IS_ENABLED(MMC_TINY)
3048 ret = mmc_probe(bis);
3052 #ifndef CONFIG_SPL_BUILD
3053 print_mmc_devices(',');
3060 #if CONFIG_IS_ENABLED(DM_MMC)
3061 int mmc_init_device(int num)
3063 struct udevice *dev;
3067 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3071 m = mmc_get_mmc_dev(dev);
3081 #ifdef CONFIG_CMD_BKOPS_ENABLE
3082 int mmc_set_bkops_enable(struct mmc *mmc)
3085 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3087 err = mmc_send_ext_csd(mmc, ext_csd);
3089 puts("Could not get ext_csd register values\n");
3093 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3094 puts("Background operations not supported on device\n");
3095 return -EMEDIUMTYPE;
3098 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3099 puts("Background operations already enabled\n");
3103 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3105 puts("Failed to enable manual background operations\n");
3109 puts("Enabled manual background operations\n");
3115 __weak int mmc_get_env_dev(void)
3117 #ifdef CONFIG_SYS_MMC_ENV_DEV
3118 return CONFIG_SYS_MMC_ENV_DEV;