1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
7 * Based vaguely on the Linux code
16 #include <dm/device-internal.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
25 #include <linux/list.h>
27 #include "mmc_private.h"
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33 #if !CONFIG_IS_ENABLED(DM_MMC)
35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
40 __weak int board_mmc_getwp(struct mmc *mmc)
45 int mmc_getwp(struct mmc *mmc)
49 wp = board_mmc_getwp(mmc);
52 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
61 __weak int board_mmc_getcd(struct mmc *mmc)
67 #ifdef CONFIG_MMC_TRACE
68 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
70 printf("CMD_SEND:%d\n", cmd->cmdidx);
71 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
74 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
80 printf("\t\tRET\t\t\t %d\n", ret);
82 switch (cmd->resp_type) {
84 printf("\t\tMMC_RSP_NONE\n");
87 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
91 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
95 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
97 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t \t\t 0x%08x \n",
101 printf("\t\t \t\t 0x%08x \n",
104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
110 for (j = 0; j < 4; j++)
111 printf("%02x ", *ptr--);
116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
120 printf("\t\tERROR MMC rsp not supported\n");
126 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
135 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136 const char *mmc_mode_name(enum bus_mode mode)
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
161 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
163 static const int freqs[] = {
164 [MMC_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
176 [MMC_HS_400_ES] = 200000000,
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
187 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
189 mmc->selected_mode = mode;
190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
197 #if !CONFIG_IS_ENABLED(DM_MMC)
198 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
210 int mmc_send_status(struct mmc *mmc, unsigned int *status)
213 int err, retries = 5;
215 cmd.cmdidx = MMC_CMD_SEND_STATUS;
216 cmd.resp_type = MMC_RSP_R1;
217 if (!mmc_host_is_spi(mmc))
218 cmd.cmdarg = mmc->rca << 16;
221 err = mmc_send_cmd(mmc, &cmd, NULL);
223 mmc_trace_state(mmc, &cmd);
224 *status = cmd.response[0];
228 mmc_trace_state(mmc, &cmd);
232 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
237 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
242 err = mmc_send_status(mmc, &status);
246 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
247 (status & MMC_STATUS_CURR_STATE) !=
251 if (status & MMC_STATUS_MASK) {
252 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
253 pr_err("Status Error: 0x%08x\n", status);
258 if (timeout_ms-- <= 0)
264 if (timeout_ms <= 0) {
265 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
266 pr_err("Timeout waiting card ready\n");
274 int mmc_set_blocklen(struct mmc *mmc, int len)
282 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
283 cmd.resp_type = MMC_RSP_R1;
286 err = mmc_send_cmd(mmc, &cmd, NULL);
288 #ifdef CONFIG_MMC_QUIRKS
289 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
292 * It has been seen that SET_BLOCKLEN may fail on the first
293 * attempt, let's try a few more time
296 err = mmc_send_cmd(mmc, &cmd, NULL);
306 #ifdef MMC_SUPPORTS_TUNING
307 static const u8 tuning_blk_pattern_4bit[] = {
308 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
309 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
310 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
311 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
312 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
313 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
314 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
315 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
318 static const u8 tuning_blk_pattern_8bit[] = {
319 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
320 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
321 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
322 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
323 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
324 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
325 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
326 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
327 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
328 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
329 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
330 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
331 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
332 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
333 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
334 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
337 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
340 struct mmc_data data;
341 const u8 *tuning_block_pattern;
344 if (mmc->bus_width == 8) {
345 tuning_block_pattern = tuning_blk_pattern_8bit;
346 size = sizeof(tuning_blk_pattern_8bit);
347 } else if (mmc->bus_width == 4) {
348 tuning_block_pattern = tuning_blk_pattern_4bit;
349 size = sizeof(tuning_blk_pattern_4bit);
354 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
358 cmd.resp_type = MMC_RSP_R1;
360 data.dest = (void *)data_buf;
362 data.blocksize = size;
363 data.flags = MMC_DATA_READ;
365 err = mmc_send_cmd(mmc, &cmd, &data);
369 if (memcmp(data_buf, tuning_block_pattern, size))
376 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
380 struct mmc_data data;
383 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
385 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
387 if (mmc->high_capacity)
390 cmd.cmdarg = start * mmc->read_bl_len;
392 cmd.resp_type = MMC_RSP_R1;
395 data.blocks = blkcnt;
396 data.blocksize = mmc->read_bl_len;
397 data.flags = MMC_DATA_READ;
399 if (mmc_send_cmd(mmc, &cmd, &data))
403 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
405 cmd.resp_type = MMC_RSP_R1b;
406 if (mmc_send_cmd(mmc, &cmd, NULL)) {
407 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
408 pr_err("mmc fail to send stop cmd\n");
417 #if !CONFIG_IS_ENABLED(DM_MMC)
418 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
420 if (mmc->cfg->ops->get_b_max)
421 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
423 return mmc->cfg->b_max;
427 #if CONFIG_IS_ENABLED(BLK)
428 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
430 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
434 #if CONFIG_IS_ENABLED(BLK)
435 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
437 int dev_num = block_dev->devnum;
439 lbaint_t cur, blocks_todo = blkcnt;
445 struct mmc *mmc = find_mmc_device(dev_num);
449 if (CONFIG_IS_ENABLED(MMC_TINY))
450 err = mmc_switch_part(mmc, block_dev->hwpart);
452 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
457 if ((start + blkcnt) > block_dev->lba) {
458 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
459 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
460 start + blkcnt, block_dev->lba);
465 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
466 pr_debug("%s: Failed to set blocklen\n", __func__);
470 b_max = mmc_get_b_max(mmc, dst, blkcnt);
473 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
474 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
475 pr_debug("%s: Failed to read blocks\n", __func__);
480 dst += cur * mmc->read_bl_len;
481 } while (blocks_todo > 0);
486 static int mmc_go_idle(struct mmc *mmc)
493 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
495 cmd.resp_type = MMC_RSP_NONE;
497 err = mmc_send_cmd(mmc, &cmd, NULL);
507 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
508 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
514 * Send CMD11 only if the request is to switch the card to
517 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
518 return mmc_set_signal_voltage(mmc, signal_voltage);
520 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
522 cmd.resp_type = MMC_RSP_R1;
524 err = mmc_send_cmd(mmc, &cmd, NULL);
528 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
532 * The card should drive cmd and dat[0:3] low immediately
533 * after the response of cmd11, but wait 100 us to be sure
535 err = mmc_wait_dat0(mmc, 0, 100);
542 * During a signal voltage level switch, the clock must be gated
543 * for 5 ms according to the SD spec
545 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
547 err = mmc_set_signal_voltage(mmc, signal_voltage);
551 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
553 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
556 * Failure to switch is indicated by the card holding
557 * dat[0:3] low. Wait for at least 1 ms according to spec
559 err = mmc_wait_dat0(mmc, 1, 1000);
569 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
576 cmd.cmdidx = MMC_CMD_APP_CMD;
577 cmd.resp_type = MMC_RSP_R1;
580 err = mmc_send_cmd(mmc, &cmd, NULL);
585 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
586 cmd.resp_type = MMC_RSP_R3;
589 * Most cards do not answer if some reserved bits
590 * in the ocr are set. However, Some controller
591 * can set bit 7 (reserved for low voltages), but
592 * how to manage low voltages SD card is not yet
595 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
596 (mmc->cfg->voltages & 0xff8000);
598 if (mmc->version == SD_VERSION_2)
599 cmd.cmdarg |= OCR_HCS;
602 cmd.cmdarg |= OCR_S18R;
604 err = mmc_send_cmd(mmc, &cmd, NULL);
609 if (cmd.response[0] & OCR_BUSY)
618 if (mmc->version != SD_VERSION_2)
619 mmc->version = SD_VERSION_1_0;
621 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
622 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
623 cmd.resp_type = MMC_RSP_R3;
626 err = mmc_send_cmd(mmc, &cmd, NULL);
632 mmc->ocr = cmd.response[0];
634 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
635 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
637 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
643 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
649 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
654 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
655 cmd.resp_type = MMC_RSP_R3;
657 if (use_arg && !mmc_host_is_spi(mmc))
658 cmd.cmdarg = OCR_HCS |
659 (mmc->cfg->voltages &
660 (mmc->ocr & OCR_VOLTAGE_MASK)) |
661 (mmc->ocr & OCR_ACCESS_MODE);
663 err = mmc_send_cmd(mmc, &cmd, NULL);
666 mmc->ocr = cmd.response[0];
670 static int mmc_send_op_cond(struct mmc *mmc)
676 /* Some cards seem to need this */
679 start = get_timer(0);
680 /* Asking to the card its capabilities */
682 err = mmc_send_op_cond_iter(mmc, i != 0);
686 /* exit if not busy (flag seems to be inverted) */
687 if (mmc->ocr & OCR_BUSY)
690 if (get_timer(start) > timeout)
694 mmc->op_cond_pending = 1;
698 static int mmc_complete_op_cond(struct mmc *mmc)
705 mmc->op_cond_pending = 0;
706 if (!(mmc->ocr & OCR_BUSY)) {
707 /* Some cards seem to need this */
710 start = get_timer(0);
712 err = mmc_send_op_cond_iter(mmc, 1);
715 if (mmc->ocr & OCR_BUSY)
717 if (get_timer(start) > timeout)
723 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
724 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
725 cmd.resp_type = MMC_RSP_R3;
728 err = mmc_send_cmd(mmc, &cmd, NULL);
733 mmc->ocr = cmd.response[0];
736 mmc->version = MMC_VERSION_UNKNOWN;
738 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
745 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
748 struct mmc_data data;
751 /* Get the Card Status Register */
752 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
753 cmd.resp_type = MMC_RSP_R1;
756 data.dest = (char *)ext_csd;
758 data.blocksize = MMC_MAX_BLOCK_LEN;
759 data.flags = MMC_DATA_READ;
761 err = mmc_send_cmd(mmc, &cmd, &data);
766 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
769 unsigned int status, start;
771 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
772 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
773 (index == EXT_CSD_PART_CONF);
777 if (mmc->gen_cmd6_time)
778 timeout_ms = mmc->gen_cmd6_time * 10;
780 if (is_part_switch && mmc->part_switch_time)
781 timeout_ms = mmc->part_switch_time * 10;
783 cmd.cmdidx = MMC_CMD_SWITCH;
784 cmd.resp_type = MMC_RSP_R1b;
785 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
790 ret = mmc_send_cmd(mmc, &cmd, NULL);
791 } while (ret && retries-- > 0);
796 start = get_timer(0);
798 /* poll dat0 for rdy/buys status */
799 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
800 if (ret && ret != -ENOSYS)
804 * In cases when not allowed to poll by using CMD13 or because we aren't
805 * capable of polling by using mmc_wait_dat0, then rely on waiting the
806 * stated timeout to be sufficient.
808 if (ret == -ENOSYS && !send_status)
811 /* Finally wait until the card is ready or indicates a failure
812 * to switch. It doesn't hurt to use CMD13 here even if send_status
813 * is false, because by now (after 'timeout_ms' ms) the bus should be
817 ret = mmc_send_status(mmc, &status);
819 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
820 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
824 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
827 } while (get_timer(start) < timeout_ms);
832 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
834 return __mmc_switch(mmc, set, index, value, true);
837 int mmc_boot_wp(struct mmc *mmc)
839 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
842 #if !CONFIG_IS_ENABLED(MMC_TINY)
843 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
849 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
855 speed_bits = EXT_CSD_TIMING_HS;
857 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
859 speed_bits = EXT_CSD_TIMING_HS200;
862 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
864 speed_bits = EXT_CSD_TIMING_HS400;
867 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
869 speed_bits = EXT_CSD_TIMING_HS400;
873 speed_bits = EXT_CSD_TIMING_LEGACY;
879 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
880 speed_bits, !hsdowngrade);
884 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
885 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
887 * In case the eMMC is in HS200/HS400 mode and we are downgrading
888 * to HS mode, the card clock are still running much faster than
889 * the supported HS mode clock, so we can not reliably read out
890 * Extended CSD. Reconfigure the controller to run at HS mode.
893 mmc_select_mode(mmc, MMC_HS);
894 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
898 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
899 /* Now check to see that it worked */
900 err = mmc_send_ext_csd(mmc, test_csd);
904 /* No high-speed support */
905 if (!test_csd[EXT_CSD_HS_TIMING])
912 static int mmc_get_capabilities(struct mmc *mmc)
914 u8 *ext_csd = mmc->ext_csd;
917 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
919 if (mmc_host_is_spi(mmc))
922 /* Only version 4 supports high-speed */
923 if (mmc->version < MMC_VERSION_4)
927 pr_err("No ext_csd found!\n"); /* this should enver happen */
931 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
933 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
934 mmc->cardtype = cardtype;
936 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
937 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
938 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
939 mmc->card_caps |= MMC_MODE_HS200;
942 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
943 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
944 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
945 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
946 mmc->card_caps |= MMC_MODE_HS400;
949 if (cardtype & EXT_CSD_CARD_TYPE_52) {
950 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
951 mmc->card_caps |= MMC_MODE_DDR_52MHz;
952 mmc->card_caps |= MMC_MODE_HS_52MHz;
954 if (cardtype & EXT_CSD_CARD_TYPE_26)
955 mmc->card_caps |= MMC_MODE_HS;
957 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
958 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
959 (mmc->card_caps & MMC_MODE_HS400)) {
960 mmc->card_caps |= MMC_MODE_HS400_ES;
968 static int mmc_set_capacity(struct mmc *mmc, int part_num)
972 mmc->capacity = mmc->capacity_user;
976 mmc->capacity = mmc->capacity_boot;
979 mmc->capacity = mmc->capacity_rpmb;
985 mmc->capacity = mmc->capacity_gp[part_num - 4];
991 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
996 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1002 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1004 (mmc->part_config & ~PART_ACCESS_MASK)
1005 | (part_num & PART_ACCESS_MASK));
1006 } while (ret && retry--);
1009 * Set the capacity if the switch succeeded or was intended
1010 * to return to representing the raw device.
1012 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1013 ret = mmc_set_capacity(mmc, part_num);
1014 mmc_get_blk_desc(mmc)->hwpart = part_num;
1020 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1021 int mmc_hwpart_config(struct mmc *mmc,
1022 const struct mmc_hwpart_conf *conf,
1023 enum mmc_hwpart_conf_mode mode)
1028 u32 gp_size_mult[4];
1029 u32 max_enh_size_mult;
1030 u32 tot_enh_size_mult = 0;
1033 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1035 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1038 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1039 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1040 return -EMEDIUMTYPE;
1043 if (!(mmc->part_support & PART_SUPPORT)) {
1044 pr_err("Card does not support partitioning\n");
1045 return -EMEDIUMTYPE;
1048 if (!mmc->hc_wp_grp_size) {
1049 pr_err("Card does not define HC WP group size\n");
1050 return -EMEDIUMTYPE;
1053 /* check partition alignment and total enhanced size */
1054 if (conf->user.enh_size) {
1055 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1056 conf->user.enh_start % mmc->hc_wp_grp_size) {
1057 pr_err("User data enhanced area not HC WP group "
1061 part_attrs |= EXT_CSD_ENH_USR;
1062 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1063 if (mmc->high_capacity) {
1064 enh_start_addr = conf->user.enh_start;
1066 enh_start_addr = (conf->user.enh_start << 9);
1072 tot_enh_size_mult += enh_size_mult;
1074 for (pidx = 0; pidx < 4; pidx++) {
1075 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1076 pr_err("GP%i partition not HC WP group size "
1077 "aligned\n", pidx+1);
1080 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1081 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1082 part_attrs |= EXT_CSD_ENH_GP(pidx);
1083 tot_enh_size_mult += gp_size_mult[pidx];
1087 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1088 pr_err("Card does not support enhanced attribute\n");
1089 return -EMEDIUMTYPE;
1092 err = mmc_send_ext_csd(mmc, ext_csd);
1097 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1098 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1099 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1100 if (tot_enh_size_mult > max_enh_size_mult) {
1101 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1102 tot_enh_size_mult, max_enh_size_mult);
1103 return -EMEDIUMTYPE;
1106 /* The default value of EXT_CSD_WR_REL_SET is device
1107 * dependent, the values can only be changed if the
1108 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1109 * changed only once and before partitioning is completed. */
1110 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1111 if (conf->user.wr_rel_change) {
1112 if (conf->user.wr_rel_set)
1113 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1115 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1117 for (pidx = 0; pidx < 4; pidx++) {
1118 if (conf->gp_part[pidx].wr_rel_change) {
1119 if (conf->gp_part[pidx].wr_rel_set)
1120 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1122 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1126 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1127 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1128 puts("Card does not support host controlled partition write "
1129 "reliability settings\n");
1130 return -EMEDIUMTYPE;
1133 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1134 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1135 pr_err("Card already partitioned\n");
1139 if (mode == MMC_HWPART_CONF_CHECK)
1142 /* Partitioning requires high-capacity size definitions */
1143 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1144 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1145 EXT_CSD_ERASE_GROUP_DEF, 1);
1150 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1152 #if CONFIG_IS_ENABLED(MMC_WRITE)
1153 /* update erase group size to be high-capacity */
1154 mmc->erase_grp_size =
1155 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1160 /* all OK, write the configuration */
1161 for (i = 0; i < 4; i++) {
1162 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1163 EXT_CSD_ENH_START_ADDR+i,
1164 (enh_start_addr >> (i*8)) & 0xFF);
1168 for (i = 0; i < 3; i++) {
1169 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1170 EXT_CSD_ENH_SIZE_MULT+i,
1171 (enh_size_mult >> (i*8)) & 0xFF);
1175 for (pidx = 0; pidx < 4; pidx++) {
1176 for (i = 0; i < 3; i++) {
1177 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1178 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1179 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1184 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1185 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1189 if (mode == MMC_HWPART_CONF_SET)
1192 /* The WR_REL_SET is a write-once register but shall be
1193 * written before setting PART_SETTING_COMPLETED. As it is
1194 * write-once we can only write it when completing the
1196 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1197 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1198 EXT_CSD_WR_REL_SET, wr_rel_set);
1203 /* Setting PART_SETTING_COMPLETED confirms the partition
1204 * configuration but it only becomes effective after power
1205 * cycle, so we do not adjust the partition related settings
1206 * in the mmc struct. */
1208 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1209 EXT_CSD_PARTITION_SETTING,
1210 EXT_CSD_PARTITION_SETTING_COMPLETED);
1218 #if !CONFIG_IS_ENABLED(DM_MMC)
1219 int mmc_getcd(struct mmc *mmc)
1223 cd = board_mmc_getcd(mmc);
1226 if (mmc->cfg->ops->getcd)
1227 cd = mmc->cfg->ops->getcd(mmc);
1236 #if !CONFIG_IS_ENABLED(MMC_TINY)
1237 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1240 struct mmc_data data;
1242 /* Switch the frequency */
1243 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1244 cmd.resp_type = MMC_RSP_R1;
1245 cmd.cmdarg = (mode << 31) | 0xffffff;
1246 cmd.cmdarg &= ~(0xf << (group * 4));
1247 cmd.cmdarg |= value << (group * 4);
1249 data.dest = (char *)resp;
1250 data.blocksize = 64;
1252 data.flags = MMC_DATA_READ;
1254 return mmc_send_cmd(mmc, &cmd, &data);
1257 static int sd_get_capabilities(struct mmc *mmc)
1261 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1262 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1263 struct mmc_data data;
1265 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1269 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1271 if (mmc_host_is_spi(mmc))
1274 /* Read the SCR to find out if this card supports higher speeds */
1275 cmd.cmdidx = MMC_CMD_APP_CMD;
1276 cmd.resp_type = MMC_RSP_R1;
1277 cmd.cmdarg = mmc->rca << 16;
1279 err = mmc_send_cmd(mmc, &cmd, NULL);
1284 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1285 cmd.resp_type = MMC_RSP_R1;
1291 data.dest = (char *)scr;
1294 data.flags = MMC_DATA_READ;
1296 err = mmc_send_cmd(mmc, &cmd, &data);
1305 mmc->scr[0] = __be32_to_cpu(scr[0]);
1306 mmc->scr[1] = __be32_to_cpu(scr[1]);
1308 switch ((mmc->scr[0] >> 24) & 0xf) {
1310 mmc->version = SD_VERSION_1_0;
1313 mmc->version = SD_VERSION_1_10;
1316 mmc->version = SD_VERSION_2;
1317 if ((mmc->scr[0] >> 15) & 0x1)
1318 mmc->version = SD_VERSION_3;
1321 mmc->version = SD_VERSION_1_0;
1325 if (mmc->scr[0] & SD_DATA_4BIT)
1326 mmc->card_caps |= MMC_MODE_4BIT;
1328 /* Version 1.0 doesn't support switching */
1329 if (mmc->version == SD_VERSION_1_0)
1334 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1335 (u8 *)switch_status);
1340 /* The high-speed function is busy. Try again */
1341 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1345 /* If high-speed isn't supported, we return */
1346 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1347 mmc->card_caps |= MMC_CAP(SD_HS);
1349 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1350 /* Version before 3.0 don't support UHS modes */
1351 if (mmc->version < SD_VERSION_3)
1354 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1355 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1356 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1357 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1358 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1359 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1360 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1361 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1362 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1363 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1364 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1370 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1374 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1377 /* SD version 1.00 and 1.01 does not support CMD 6 */
1378 if (mmc->version == SD_VERSION_1_0)
1383 speed = UHS_SDR12_BUS_SPEED;
1386 speed = HIGH_SPEED_BUS_SPEED;
1388 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1390 speed = UHS_SDR12_BUS_SPEED;
1393 speed = UHS_SDR25_BUS_SPEED;
1396 speed = UHS_SDR50_BUS_SPEED;
1399 speed = UHS_DDR50_BUS_SPEED;
1402 speed = UHS_SDR104_BUS_SPEED;
1409 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1413 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1419 static int sd_select_bus_width(struct mmc *mmc, int w)
1424 if ((w != 4) && (w != 1))
1427 cmd.cmdidx = MMC_CMD_APP_CMD;
1428 cmd.resp_type = MMC_RSP_R1;
1429 cmd.cmdarg = mmc->rca << 16;
1431 err = mmc_send_cmd(mmc, &cmd, NULL);
1435 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1436 cmd.resp_type = MMC_RSP_R1;
1441 err = mmc_send_cmd(mmc, &cmd, NULL);
1449 #if CONFIG_IS_ENABLED(MMC_WRITE)
1450 static int sd_read_ssr(struct mmc *mmc)
1452 static const unsigned int sd_au_size[] = {
1453 0, SZ_16K / 512, SZ_32K / 512,
1454 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1455 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1456 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1457 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1462 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1463 struct mmc_data data;
1465 unsigned int au, eo, et, es;
1467 cmd.cmdidx = MMC_CMD_APP_CMD;
1468 cmd.resp_type = MMC_RSP_R1;
1469 cmd.cmdarg = mmc->rca << 16;
1471 err = mmc_send_cmd(mmc, &cmd, NULL);
1472 #ifdef CONFIG_MMC_QUIRKS
1473 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1476 * It has been seen that APP_CMD may fail on the first
1477 * attempt, let's try a few more times
1480 err = mmc_send_cmd(mmc, &cmd, NULL);
1483 } while (retries--);
1489 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1490 cmd.resp_type = MMC_RSP_R1;
1494 data.dest = (char *)ssr;
1495 data.blocksize = 64;
1497 data.flags = MMC_DATA_READ;
1499 err = mmc_send_cmd(mmc, &cmd, &data);
1507 for (i = 0; i < 16; i++)
1508 ssr[i] = be32_to_cpu(ssr[i]);
1510 au = (ssr[2] >> 12) & 0xF;
1511 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1512 mmc->ssr.au = sd_au_size[au];
1513 es = (ssr[3] >> 24) & 0xFF;
1514 es |= (ssr[2] & 0xFF) << 8;
1515 et = (ssr[3] >> 18) & 0x3F;
1517 eo = (ssr[3] >> 16) & 0x3;
1518 mmc->ssr.erase_timeout = (et * 1000) / es;
1519 mmc->ssr.erase_offset = eo * 1000;
1522 pr_debug("Invalid Allocation Unit Size.\n");
1528 /* frequency bases */
1529 /* divided by 10 to be nice to platforms without floating point */
1530 static const int fbase[] = {
1537 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1538 * to platforms without floating point.
1540 static const u8 multipliers[] = {
1559 static inline int bus_width(uint cap)
1561 if (cap == MMC_MODE_8BIT)
1563 if (cap == MMC_MODE_4BIT)
1565 if (cap == MMC_MODE_1BIT)
1567 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1571 #if !CONFIG_IS_ENABLED(DM_MMC)
1572 #ifdef MMC_SUPPORTS_TUNING
1573 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1579 static int mmc_set_ios(struct mmc *mmc)
1583 if (mmc->cfg->ops->set_ios)
1584 ret = mmc->cfg->ops->set_ios(mmc);
1589 static int mmc_host_power_cycle(struct mmc *mmc)
1593 if (mmc->cfg->ops->host_power_cycle)
1594 ret = mmc->cfg->ops->host_power_cycle(mmc);
1600 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1603 if (clock > mmc->cfg->f_max)
1604 clock = mmc->cfg->f_max;
1606 if (clock < mmc->cfg->f_min)
1607 clock = mmc->cfg->f_min;
1611 mmc->clk_disable = disable;
1613 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1615 return mmc_set_ios(mmc);
1618 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1620 mmc->bus_width = width;
1622 return mmc_set_ios(mmc);
1625 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1627 * helper function to display the capabilities in a human
1628 * friendly manner. The capabilities include bus width and
1631 void mmc_dump_capabilities(const char *text, uint caps)
1635 pr_debug("%s: widths [", text);
1636 if (caps & MMC_MODE_8BIT)
1638 if (caps & MMC_MODE_4BIT)
1640 if (caps & MMC_MODE_1BIT)
1642 pr_debug("\b\b] modes [");
1643 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1644 if (MMC_CAP(mode) & caps)
1645 pr_debug("%s, ", mmc_mode_name(mode));
1646 pr_debug("\b\b]\n");
1650 struct mode_width_tuning {
1653 #ifdef MMC_SUPPORTS_TUNING
1658 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1659 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1662 case MMC_SIGNAL_VOLTAGE_000: return 0;
1663 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1664 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1665 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1670 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1674 if (mmc->signal_voltage == signal_voltage)
1677 mmc->signal_voltage = signal_voltage;
1678 err = mmc_set_ios(mmc);
1680 pr_debug("unable to set voltage (err %d)\n", err);
1685 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1691 #if !CONFIG_IS_ENABLED(MMC_TINY)
1692 static const struct mode_width_tuning sd_modes_by_pref[] = {
1693 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1694 #ifdef MMC_SUPPORTS_TUNING
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1703 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1707 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1711 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1716 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1718 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1721 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1726 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1730 #define for_each_sd_mode_by_pref(caps, mwt) \
1731 for (mwt = sd_modes_by_pref;\
1732 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1734 if (caps & MMC_CAP(mwt->mode))
1736 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1739 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1740 const struct mode_width_tuning *mwt;
1741 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1742 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1744 bool uhs_en = false;
1749 mmc_dump_capabilities("sd card", card_caps);
1750 mmc_dump_capabilities("host", mmc->host_caps);
1753 if (mmc_host_is_spi(mmc)) {
1754 mmc_set_bus_width(mmc, 1);
1755 mmc_select_mode(mmc, MMC_LEGACY);
1756 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1757 #if CONFIG_IS_ENABLED(MMC_WRITE)
1758 err = sd_read_ssr(mmc);
1760 pr_warn("unable to read ssr\n");
1765 /* Restrict card's capabilities by what the host can do */
1766 caps = card_caps & mmc->host_caps;
1771 for_each_sd_mode_by_pref(caps, mwt) {
1774 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1775 if (*w & caps & mwt->widths) {
1776 pr_debug("trying mode %s width %d (at %d MHz)\n",
1777 mmc_mode_name(mwt->mode),
1779 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1781 /* configure the bus width (card + host) */
1782 err = sd_select_bus_width(mmc, bus_width(*w));
1785 mmc_set_bus_width(mmc, bus_width(*w));
1787 /* configure the bus mode (card) */
1788 err = sd_set_card_speed(mmc, mwt->mode);
1792 /* configure the bus mode (host) */
1793 mmc_select_mode(mmc, mwt->mode);
1794 mmc_set_clock(mmc, mmc->tran_speed,
1797 #ifdef MMC_SUPPORTS_TUNING
1798 /* execute tuning if needed */
1799 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1800 err = mmc_execute_tuning(mmc,
1803 pr_debug("tuning failed\n");
1809 #if CONFIG_IS_ENABLED(MMC_WRITE)
1810 err = sd_read_ssr(mmc);
1812 pr_warn("unable to read ssr\n");
1818 /* revert to a safer bus speed */
1819 mmc_select_mode(mmc, MMC_LEGACY);
1820 mmc_set_clock(mmc, mmc->tran_speed,
1826 pr_err("unable to select a mode\n");
1831 * read the compare the part of ext csd that is constant.
1832 * This can be used to check that the transfer is working
1835 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1838 const u8 *ext_csd = mmc->ext_csd;
1839 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1841 if (mmc->version < MMC_VERSION_4)
1844 err = mmc_send_ext_csd(mmc, test_csd);
1848 /* Only compare read only fields */
1849 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1850 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1851 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1852 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1853 ext_csd[EXT_CSD_REV]
1854 == test_csd[EXT_CSD_REV] &&
1855 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1856 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1857 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1858 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1864 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1865 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1866 uint32_t allowed_mask)
1874 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1875 EXT_CSD_CARD_TYPE_HS400_1_8V))
1876 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1877 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1878 EXT_CSD_CARD_TYPE_HS400_1_2V))
1879 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1882 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1883 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1884 MMC_SIGNAL_VOLTAGE_180;
1885 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1886 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1889 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1893 while (card_mask & allowed_mask) {
1894 enum mmc_voltage best_match;
1896 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1897 if (!mmc_set_signal_voltage(mmc, best_match))
1900 allowed_mask &= ~best_match;
1906 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1907 uint32_t allowed_mask)
1913 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1914 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1916 .mode = MMC_HS_400_ES,
1917 .widths = MMC_MODE_8BIT,
1920 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1923 .widths = MMC_MODE_8BIT,
1924 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1927 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1930 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1931 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1936 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1940 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1944 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1948 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1952 #define for_each_mmc_mode_by_pref(caps, mwt) \
1953 for (mwt = mmc_modes_by_pref;\
1954 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1956 if (caps & MMC_CAP(mwt->mode))
1958 static const struct ext_csd_bus_width {
1962 } ext_csd_bus_width[] = {
1963 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1964 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1965 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1966 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1967 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1970 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1971 static int mmc_select_hs400(struct mmc *mmc)
1975 /* Set timing to HS200 for tuning */
1976 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1980 /* configure the bus mode (host) */
1981 mmc_select_mode(mmc, MMC_HS_200);
1982 mmc_set_clock(mmc, mmc->tran_speed, false);
1984 /* execute tuning if needed */
1985 mmc->hs400_tuning = 1;
1986 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1987 mmc->hs400_tuning = 0;
1989 debug("tuning failed\n");
1993 /* Set back to HS */
1994 mmc_set_card_speed(mmc, MMC_HS, true);
1996 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1997 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
2001 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2005 mmc_select_mode(mmc, MMC_HS_400);
2006 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2013 static int mmc_select_hs400(struct mmc *mmc)
2019 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2020 #if !CONFIG_IS_ENABLED(DM_MMC)
2021 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2026 static int mmc_select_hs400es(struct mmc *mmc)
2030 err = mmc_set_card_speed(mmc, MMC_HS, true);
2034 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2035 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2036 EXT_CSD_BUS_WIDTH_STROBE);
2038 printf("switch to bus width for hs400 failed\n");
2041 /* TODO: driver strength */
2042 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2046 mmc_select_mode(mmc, MMC_HS_400_ES);
2047 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2051 return mmc_set_enhanced_strobe(mmc);
2054 static int mmc_select_hs400es(struct mmc *mmc)
2060 #define for_each_supported_width(caps, ddr, ecbv) \
2061 for (ecbv = ext_csd_bus_width;\
2062 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2064 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2066 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2069 const struct mode_width_tuning *mwt;
2070 const struct ext_csd_bus_width *ecbw;
2073 mmc_dump_capabilities("mmc", card_caps);
2074 mmc_dump_capabilities("host", mmc->host_caps);
2077 if (mmc_host_is_spi(mmc)) {
2078 mmc_set_bus_width(mmc, 1);
2079 mmc_select_mode(mmc, MMC_LEGACY);
2080 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2084 /* Restrict card's capabilities by what the host can do */
2085 card_caps &= mmc->host_caps;
2087 /* Only version 4 of MMC supports wider bus widths */
2088 if (mmc->version < MMC_VERSION_4)
2091 if (!mmc->ext_csd) {
2092 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2096 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2097 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2099 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2100 * before doing anything else, since a transition from either of
2101 * the HS200/HS400 mode directly to legacy mode is not supported.
2103 if (mmc->selected_mode == MMC_HS_200 ||
2104 mmc->selected_mode == MMC_HS_400)
2105 mmc_set_card_speed(mmc, MMC_HS, true);
2108 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2110 for_each_mmc_mode_by_pref(card_caps, mwt) {
2111 for_each_supported_width(card_caps & mwt->widths,
2112 mmc_is_mode_ddr(mwt->mode), ecbw) {
2113 enum mmc_voltage old_voltage;
2114 pr_debug("trying mode %s width %d (at %d MHz)\n",
2115 mmc_mode_name(mwt->mode),
2116 bus_width(ecbw->cap),
2117 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2118 old_voltage = mmc->signal_voltage;
2119 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2120 MMC_ALL_SIGNAL_VOLTAGE);
2124 /* configure the bus width (card + host) */
2125 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2127 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2130 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2132 if (mwt->mode == MMC_HS_400) {
2133 err = mmc_select_hs400(mmc);
2135 printf("Select HS400 failed %d\n", err);
2138 } else if (mwt->mode == MMC_HS_400_ES) {
2139 err = mmc_select_hs400es(mmc);
2141 printf("Select HS400ES failed %d\n",
2146 /* configure the bus speed (card) */
2147 err = mmc_set_card_speed(mmc, mwt->mode, false);
2152 * configure the bus width AND the ddr mode
2153 * (card). The host side will be taken care
2154 * of in the next step
2156 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2157 err = mmc_switch(mmc,
2158 EXT_CSD_CMD_SET_NORMAL,
2160 ecbw->ext_csd_bits);
2165 /* configure the bus mode (host) */
2166 mmc_select_mode(mmc, mwt->mode);
2167 mmc_set_clock(mmc, mmc->tran_speed,
2169 #ifdef MMC_SUPPORTS_TUNING
2171 /* execute tuning if needed */
2173 err = mmc_execute_tuning(mmc,
2176 pr_debug("tuning failed\n");
2183 /* do a transfer to check the configuration */
2184 err = mmc_read_and_compare_ext_csd(mmc);
2188 mmc_set_signal_voltage(mmc, old_voltage);
2189 /* if an error occured, revert to a safer bus mode */
2190 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2191 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2192 mmc_select_mode(mmc, MMC_LEGACY);
2193 mmc_set_bus_width(mmc, 1);
2197 pr_err("unable to select a mode\n");
2203 #if CONFIG_IS_ENABLED(MMC_TINY)
2204 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2207 static int mmc_startup_v4(struct mmc *mmc)
2211 bool has_parts = false;
2212 bool part_completed;
2213 static const u32 mmc_versions[] = {
2225 #if CONFIG_IS_ENABLED(MMC_TINY)
2226 u8 *ext_csd = ext_csd_bkup;
2228 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2232 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2234 err = mmc_send_ext_csd(mmc, ext_csd);
2238 /* store the ext csd for future reference */
2240 mmc->ext_csd = ext_csd;
2242 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2244 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2247 /* check ext_csd version and capacity */
2248 err = mmc_send_ext_csd(mmc, ext_csd);
2252 /* store the ext csd for future reference */
2254 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2257 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2259 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2262 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2264 if (mmc->version >= MMC_VERSION_4_2) {
2266 * According to the JEDEC Standard, the value of
2267 * ext_csd's capacity is valid if the value is more
2270 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2271 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2272 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2273 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2274 capacity *= MMC_MAX_BLOCK_LEN;
2275 if ((capacity >> 20) > 2 * 1024)
2276 mmc->capacity_user = capacity;
2279 if (mmc->version >= MMC_VERSION_4_5)
2280 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2282 /* The partition data may be non-zero but it is only
2283 * effective if PARTITION_SETTING_COMPLETED is set in
2284 * EXT_CSD, so ignore any data if this bit is not set,
2285 * except for enabling the high-capacity group size
2286 * definition (see below).
2288 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2289 EXT_CSD_PARTITION_SETTING_COMPLETED);
2291 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2292 /* Some eMMC set the value too low so set a minimum */
2293 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2294 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2296 /* store the partition info of emmc */
2297 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2298 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2299 ext_csd[EXT_CSD_BOOT_MULT])
2300 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2301 if (part_completed &&
2302 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2303 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2305 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2307 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2309 for (i = 0; i < 4; i++) {
2310 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2311 uint mult = (ext_csd[idx + 2] << 16) +
2312 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2315 if (!part_completed)
2317 mmc->capacity_gp[i] = mult;
2318 mmc->capacity_gp[i] *=
2319 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2320 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2321 mmc->capacity_gp[i] <<= 19;
2324 #ifndef CONFIG_SPL_BUILD
2325 if (part_completed) {
2326 mmc->enh_user_size =
2327 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2328 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2329 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2330 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2331 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2332 mmc->enh_user_size <<= 19;
2333 mmc->enh_user_start =
2334 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2335 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2336 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2337 ext_csd[EXT_CSD_ENH_START_ADDR];
2338 if (mmc->high_capacity)
2339 mmc->enh_user_start <<= 9;
2344 * Host needs to enable ERASE_GRP_DEF bit if device is
2345 * partitioned. This bit will be lost every time after a reset
2346 * or power off. This will affect erase size.
2350 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2351 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2354 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2355 EXT_CSD_ERASE_GROUP_DEF, 1);
2360 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2363 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2364 #if CONFIG_IS_ENABLED(MMC_WRITE)
2365 /* Read out group size from ext_csd */
2366 mmc->erase_grp_size =
2367 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2370 * if high capacity and partition setting completed
2371 * SEC_COUNT is valid even if it is smaller than 2 GiB
2372 * JEDEC Standard JESD84-B45, 6.2.4
2374 if (mmc->high_capacity && part_completed) {
2375 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2376 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2377 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2378 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2379 capacity *= MMC_MAX_BLOCK_LEN;
2380 mmc->capacity_user = capacity;
2383 #if CONFIG_IS_ENABLED(MMC_WRITE)
2385 /* Calculate the group size from the csd value. */
2386 int erase_gsz, erase_gmul;
2388 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2389 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2390 mmc->erase_grp_size = (erase_gsz + 1)
2394 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2395 mmc->hc_wp_grp_size = 1024
2396 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2397 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2400 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2405 #if !CONFIG_IS_ENABLED(MMC_TINY)
2408 mmc->ext_csd = NULL;
2413 static int mmc_startup(struct mmc *mmc)
2419 struct blk_desc *bdesc;
2421 #ifdef CONFIG_MMC_SPI_CRC_ON
2422 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2423 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2424 cmd.resp_type = MMC_RSP_R1;
2426 err = mmc_send_cmd(mmc, &cmd, NULL);
2432 /* Put the Card in Identify Mode */
2433 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2434 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2435 cmd.resp_type = MMC_RSP_R2;
2438 err = mmc_send_cmd(mmc, &cmd, NULL);
2440 #ifdef CONFIG_MMC_QUIRKS
2441 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2444 * It has been seen that SEND_CID may fail on the first
2445 * attempt, let's try a few more time
2448 err = mmc_send_cmd(mmc, &cmd, NULL);
2451 } while (retries--);
2458 memcpy(mmc->cid, cmd.response, 16);
2461 * For MMC cards, set the Relative Address.
2462 * For SD cards, get the Relatvie Address.
2463 * This also puts the cards into Standby State
2465 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2466 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2467 cmd.cmdarg = mmc->rca << 16;
2468 cmd.resp_type = MMC_RSP_R6;
2470 err = mmc_send_cmd(mmc, &cmd, NULL);
2476 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2479 /* Get the Card-Specific Data */
2480 cmd.cmdidx = MMC_CMD_SEND_CSD;
2481 cmd.resp_type = MMC_RSP_R2;
2482 cmd.cmdarg = mmc->rca << 16;
2484 err = mmc_send_cmd(mmc, &cmd, NULL);
2489 mmc->csd[0] = cmd.response[0];
2490 mmc->csd[1] = cmd.response[1];
2491 mmc->csd[2] = cmd.response[2];
2492 mmc->csd[3] = cmd.response[3];
2494 if (mmc->version == MMC_VERSION_UNKNOWN) {
2495 int version = (cmd.response[0] >> 26) & 0xf;
2499 mmc->version = MMC_VERSION_1_2;
2502 mmc->version = MMC_VERSION_1_4;
2505 mmc->version = MMC_VERSION_2_2;
2508 mmc->version = MMC_VERSION_3;
2511 mmc->version = MMC_VERSION_4;
2514 mmc->version = MMC_VERSION_1_2;
2519 /* divide frequency by 10, since the mults are 10x bigger */
2520 freq = fbase[(cmd.response[0] & 0x7)];
2521 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2523 mmc->legacy_speed = freq * mult;
2524 mmc_select_mode(mmc, MMC_LEGACY);
2526 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2527 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2528 #if CONFIG_IS_ENABLED(MMC_WRITE)
2531 mmc->write_bl_len = mmc->read_bl_len;
2533 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2536 if (mmc->high_capacity) {
2537 csize = (mmc->csd[1] & 0x3f) << 16
2538 | (mmc->csd[2] & 0xffff0000) >> 16;
2541 csize = (mmc->csd[1] & 0x3ff) << 2
2542 | (mmc->csd[2] & 0xc0000000) >> 30;
2543 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2546 mmc->capacity_user = (csize + 1) << (cmult + 2);
2547 mmc->capacity_user *= mmc->read_bl_len;
2548 mmc->capacity_boot = 0;
2549 mmc->capacity_rpmb = 0;
2550 for (i = 0; i < 4; i++)
2551 mmc->capacity_gp[i] = 0;
2553 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2554 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2556 #if CONFIG_IS_ENABLED(MMC_WRITE)
2557 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2558 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2561 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2562 cmd.cmdidx = MMC_CMD_SET_DSR;
2563 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2564 cmd.resp_type = MMC_RSP_NONE;
2565 if (mmc_send_cmd(mmc, &cmd, NULL))
2566 pr_warn("MMC: SET_DSR failed\n");
2569 /* Select the card, and put it into Transfer Mode */
2570 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2571 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2572 cmd.resp_type = MMC_RSP_R1;
2573 cmd.cmdarg = mmc->rca << 16;
2574 err = mmc_send_cmd(mmc, &cmd, NULL);
2581 * For SD, its erase group is always one sector
2583 #if CONFIG_IS_ENABLED(MMC_WRITE)
2584 mmc->erase_grp_size = 1;
2586 mmc->part_config = MMCPART_NOAVAILABLE;
2588 err = mmc_startup_v4(mmc);
2592 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2596 #if CONFIG_IS_ENABLED(MMC_TINY)
2597 mmc_set_clock(mmc, mmc->legacy_speed, false);
2598 mmc_select_mode(mmc, MMC_LEGACY);
2599 mmc_set_bus_width(mmc, 1);
2602 err = sd_get_capabilities(mmc);
2605 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2607 err = mmc_get_capabilities(mmc);
2610 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2616 mmc->best_mode = mmc->selected_mode;
2618 /* Fix the block length for DDR mode */
2619 if (mmc->ddr_mode) {
2620 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2621 #if CONFIG_IS_ENABLED(MMC_WRITE)
2622 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2626 /* fill in device description */
2627 bdesc = mmc_get_blk_desc(mmc);
2631 bdesc->blksz = mmc->read_bl_len;
2632 bdesc->log2blksz = LOG2(bdesc->blksz);
2633 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2634 #if !defined(CONFIG_SPL_BUILD) || \
2635 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2636 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2637 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2638 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2639 (mmc->cid[3] >> 16) & 0xffff);
2640 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2641 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2642 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2643 (mmc->cid[2] >> 24) & 0xff);
2644 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2645 (mmc->cid[2] >> 16) & 0xf);
2647 bdesc->vendor[0] = 0;
2648 bdesc->product[0] = 0;
2649 bdesc->revision[0] = 0;
2652 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2659 static int mmc_send_if_cond(struct mmc *mmc)
2664 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2665 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2666 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2667 cmd.resp_type = MMC_RSP_R7;
2669 err = mmc_send_cmd(mmc, &cmd, NULL);
2674 if ((cmd.response[0] & 0xff) != 0xaa)
2677 mmc->version = SD_VERSION_2;
2682 #if !CONFIG_IS_ENABLED(DM_MMC)
2683 /* board-specific MMC power initializations. */
2684 __weak void board_mmc_power_init(void)
2689 static int mmc_power_init(struct mmc *mmc)
2691 #if CONFIG_IS_ENABLED(DM_MMC)
2692 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2695 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2698 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2700 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2701 &mmc->vqmmc_supply);
2703 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2705 #else /* !CONFIG_DM_MMC */
2707 * Driver model should use a regulator, as above, rather than calling
2708 * out to board code.
2710 board_mmc_power_init();
2716 * put the host in the initial state:
2717 * - turn on Vdd (card power supply)
2718 * - configure the bus width and clock to minimal values
2720 static void mmc_set_initial_state(struct mmc *mmc)
2724 /* First try to set 3.3V. If it fails set to 1.8V */
2725 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2727 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2729 pr_warn("mmc: failed to set signal voltage\n");
2731 mmc_select_mode(mmc, MMC_LEGACY);
2732 mmc_set_bus_width(mmc, 1);
2733 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2736 static int mmc_power_on(struct mmc *mmc)
2738 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2739 if (mmc->vmmc_supply) {
2740 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2743 puts("Error enabling VMMC supply\n");
2751 static int mmc_power_off(struct mmc *mmc)
2753 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2754 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2755 if (mmc->vmmc_supply) {
2756 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2759 pr_debug("Error disabling VMMC supply\n");
2767 static int mmc_power_cycle(struct mmc *mmc)
2771 ret = mmc_power_off(mmc);
2775 ret = mmc_host_power_cycle(mmc);
2780 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2781 * to be on the safer side.
2784 return mmc_power_on(mmc);
2787 int mmc_get_op_cond(struct mmc *mmc)
2789 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2795 err = mmc_power_init(mmc);
2799 #ifdef CONFIG_MMC_QUIRKS
2800 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2801 MMC_QUIRK_RETRY_SEND_CID |
2802 MMC_QUIRK_RETRY_APP_CMD;
2805 err = mmc_power_cycle(mmc);
2808 * if power cycling is not supported, we should not try
2809 * to use the UHS modes, because we wouldn't be able to
2810 * recover from an error during the UHS initialization.
2812 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2814 mmc->host_caps &= ~UHS_CAPS;
2815 err = mmc_power_on(mmc);
2820 #if CONFIG_IS_ENABLED(DM_MMC)
2822 * Re-initialization is needed to clear old configuration for
2825 err = mmc_reinit(mmc);
2827 /* made sure it's not NULL earlier */
2828 err = mmc->cfg->ops->init(mmc);
2835 mmc_set_initial_state(mmc);
2837 /* Reset the Card */
2838 err = mmc_go_idle(mmc);
2843 /* The internal partition reset to user partition(0) at every CMD0 */
2844 mmc_get_blk_desc(mmc)->hwpart = 0;
2846 /* Test for SD version 2 */
2847 err = mmc_send_if_cond(mmc);
2849 /* Now try to get the SD card's operating condition */
2850 err = sd_send_op_cond(mmc, uhs_en);
2851 if (err && uhs_en) {
2853 mmc_power_cycle(mmc);
2857 /* If the command timed out, we check for an MMC card */
2858 if (err == -ETIMEDOUT) {
2859 err = mmc_send_op_cond(mmc);
2862 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2863 pr_err("Card did not respond to voltage select!\n");
2872 int mmc_start_init(struct mmc *mmc)
2878 * all hosts are capable of 1 bit bus-width and able to use the legacy
2881 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2882 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2883 #if CONFIG_IS_ENABLED(DM_MMC)
2884 mmc_deferred_probe(mmc);
2886 #if !defined(CONFIG_MMC_BROKEN_CD)
2887 no_card = mmc_getcd(mmc) == 0;
2891 #if !CONFIG_IS_ENABLED(DM_MMC)
2892 /* we pretend there's no card when init is NULL */
2893 no_card = no_card || (mmc->cfg->ops->init == NULL);
2897 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2898 pr_err("MMC: no card present\n");
2903 err = mmc_get_op_cond(mmc);
2906 mmc->init_in_progress = 1;
2911 static int mmc_complete_init(struct mmc *mmc)
2915 mmc->init_in_progress = 0;
2916 if (mmc->op_cond_pending)
2917 err = mmc_complete_op_cond(mmc);
2920 err = mmc_startup(mmc);
2928 int mmc_init(struct mmc *mmc)
2931 __maybe_unused ulong start;
2932 #if CONFIG_IS_ENABLED(DM_MMC)
2933 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2940 start = get_timer(0);
2942 if (!mmc->init_in_progress)
2943 err = mmc_start_init(mmc);
2946 err = mmc_complete_init(mmc);
2948 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2953 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2954 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2955 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2956 int mmc_deinit(struct mmc *mmc)
2964 caps_filtered = mmc->card_caps &
2965 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2966 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2967 MMC_CAP(UHS_SDR104));
2969 return sd_select_mode_and_width(mmc, caps_filtered);
2971 caps_filtered = mmc->card_caps &
2972 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2974 return mmc_select_mode_and_width(mmc, caps_filtered);
2979 int mmc_set_dsr(struct mmc *mmc, u16 val)
2985 /* CPU-specific MMC initializations */
2986 __weak int cpu_mmc_init(struct bd_info *bis)
2991 /* board-specific MMC initializations. */
2992 __weak int board_mmc_init(struct bd_info *bis)
2997 void mmc_set_preinit(struct mmc *mmc, int preinit)
2999 mmc->preinit = preinit;
3002 #if CONFIG_IS_ENABLED(DM_MMC)
3003 static int mmc_probe(struct bd_info *bis)
3007 struct udevice *dev;
3009 ret = uclass_get(UCLASS_MMC, &uc);
3014 * Try to add them in sequence order. Really with driver model we
3015 * should allow holes, but the current MMC list does not allow that.
3016 * So if we request 0, 1, 3 we will get 0, 1, 2.
3018 for (i = 0; ; i++) {
3019 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3023 uclass_foreach_dev(dev, uc) {
3024 ret = device_probe(dev);
3026 pr_err("%s - probe failed: %d\n", dev->name, ret);
3032 static int mmc_probe(struct bd_info *bis)
3034 if (board_mmc_init(bis) < 0)
3041 int mmc_initialize(struct bd_info *bis)
3043 static int initialized = 0;
3045 if (initialized) /* Avoid initializing mmc multiple times */
3049 #if !CONFIG_IS_ENABLED(BLK)
3050 #if !CONFIG_IS_ENABLED(MMC_TINY)
3054 ret = mmc_probe(bis);
3058 #ifndef CONFIG_SPL_BUILD
3059 print_mmc_devices(',');
3066 #if CONFIG_IS_ENABLED(DM_MMC)
3067 int mmc_init_device(int num)
3069 struct udevice *dev;
3073 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3077 m = mmc_get_mmc_dev(dev);
3087 #ifdef CONFIG_CMD_BKOPS_ENABLE
3088 int mmc_set_bkops_enable(struct mmc *mmc)
3091 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3093 err = mmc_send_ext_csd(mmc, ext_csd);
3095 puts("Could not get ext_csd register values\n");
3099 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3100 puts("Background operations not supported on device\n");
3101 return -EMEDIUMTYPE;
3104 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3105 puts("Background operations already enabled\n");
3109 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3111 puts("Failed to enable manual background operations\n");
3115 puts("Enabled manual background operations\n");