1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
30 #if !CONFIG_IS_ENABLED(DM_MMC)
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
96 printf("\t\t \t\t 0x%08X \n",
98 printf("\t\t \t\t 0x%08X \n",
100 printf("\t\t \t\t 0x%08X \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02X ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
152 if (mode >= MMC_MODES_END)
153 return "Unknown mode";
159 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
161 static const int freqs[] = {
162 [MMC_LEGACY] = 25000000,
163 [SD_LEGACY] = 25000000,
166 [MMC_HS_52] = 52000000,
167 [MMC_DDR_52] = 52000000,
168 [UHS_SDR12] = 25000000,
169 [UHS_SDR25] = 50000000,
170 [UHS_SDR50] = 100000000,
171 [UHS_DDR50] = 50000000,
172 [UHS_SDR104] = 208000000,
173 [MMC_HS_200] = 200000000,
176 if (mode == MMC_LEGACY)
177 return mmc->legacy_speed;
178 else if (mode >= MMC_MODES_END)
184 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
186 mmc->selected_mode = mode;
187 mmc->tran_speed = mmc_mode2freq(mmc, mode);
188 mmc->ddr_mode = mmc_is_mode_ddr(mode);
189 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
190 mmc->tran_speed / 1000000);
194 #if !CONFIG_IS_ENABLED(DM_MMC)
195 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199 mmmc_trace_before_send(mmc, cmd);
200 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
201 mmmc_trace_after_send(mmc, cmd, ret);
207 int mmc_send_status(struct mmc *mmc, int timeout)
210 int err, retries = 5;
212 cmd.cmdidx = MMC_CMD_SEND_STATUS;
213 cmd.resp_type = MMC_RSP_R1;
214 if (!mmc_host_is_spi(mmc))
215 cmd.cmdarg = mmc->rca << 16;
218 err = mmc_send_cmd(mmc, &cmd, NULL);
220 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
221 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
225 if (cmd.response[0] & MMC_STATUS_MASK) {
226 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
227 pr_err("Status Error: 0x%08X\n",
232 } else if (--retries < 0)
241 mmc_trace_state(mmc, &cmd);
243 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
244 pr_err("Timeout waiting card ready\n");
252 int mmc_set_blocklen(struct mmc *mmc, int len)
260 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
261 cmd.resp_type = MMC_RSP_R1;
264 err = mmc_send_cmd(mmc, &cmd, NULL);
266 #ifdef CONFIG_MMC_QUIRKS
267 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
270 * It has been seen that SET_BLOCKLEN may fail on the first
271 * attempt, let's try a few more time
274 err = mmc_send_cmd(mmc, &cmd, NULL);
284 #ifdef MMC_SUPPORTS_TUNING
285 static const u8 tuning_blk_pattern_4bit[] = {
286 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
287 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
288 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
289 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
290 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
291 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
292 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
293 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
296 static const u8 tuning_blk_pattern_8bit[] = {
297 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
298 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
299 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
300 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
301 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
302 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
303 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
304 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
305 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
306 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
307 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
308 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
309 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
310 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
311 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
312 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
315 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
318 struct mmc_data data;
319 const u8 *tuning_block_pattern;
322 if (mmc->bus_width == 8) {
323 tuning_block_pattern = tuning_blk_pattern_8bit;
324 size = sizeof(tuning_blk_pattern_8bit);
325 } else if (mmc->bus_width == 4) {
326 tuning_block_pattern = tuning_blk_pattern_4bit;
327 size = sizeof(tuning_blk_pattern_4bit);
332 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
336 cmd.resp_type = MMC_RSP_R1;
338 data.dest = (void *)data_buf;
340 data.blocksize = size;
341 data.flags = MMC_DATA_READ;
343 err = mmc_send_cmd(mmc, &cmd, &data);
347 if (memcmp(data_buf, tuning_block_pattern, size))
354 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
358 struct mmc_data data;
361 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
363 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
365 if (mmc->high_capacity)
368 cmd.cmdarg = start * mmc->read_bl_len;
370 cmd.resp_type = MMC_RSP_R1;
373 data.blocks = blkcnt;
374 data.blocksize = mmc->read_bl_len;
375 data.flags = MMC_DATA_READ;
377 if (mmc_send_cmd(mmc, &cmd, &data))
381 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
383 cmd.resp_type = MMC_RSP_R1b;
384 if (mmc_send_cmd(mmc, &cmd, NULL)) {
385 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
386 pr_err("mmc fail to send stop cmd\n");
395 #if CONFIG_IS_ENABLED(BLK)
396 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
398 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
402 #if CONFIG_IS_ENABLED(BLK)
403 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
405 int dev_num = block_dev->devnum;
407 lbaint_t cur, blocks_todo = blkcnt;
412 struct mmc *mmc = find_mmc_device(dev_num);
416 if (CONFIG_IS_ENABLED(MMC_TINY))
417 err = mmc_switch_part(mmc, block_dev->hwpart);
419 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
424 if ((start + blkcnt) > block_dev->lba) {
425 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
426 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
427 start + blkcnt, block_dev->lba);
432 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
433 pr_debug("%s: Failed to set blocklen\n", __func__);
438 cur = (blocks_todo > mmc->cfg->b_max) ?
439 mmc->cfg->b_max : blocks_todo;
440 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
441 pr_debug("%s: Failed to read blocks\n", __func__);
446 dst += cur * mmc->read_bl_len;
447 } while (blocks_todo > 0);
452 static int mmc_go_idle(struct mmc *mmc)
459 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
461 cmd.resp_type = MMC_RSP_NONE;
463 err = mmc_send_cmd(mmc, &cmd, NULL);
473 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
474 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
480 * Send CMD11 only if the request is to switch the card to
483 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
484 return mmc_set_signal_voltage(mmc, signal_voltage);
486 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
488 cmd.resp_type = MMC_RSP_R1;
490 err = mmc_send_cmd(mmc, &cmd, NULL);
494 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
498 * The card should drive cmd and dat[0:3] low immediately
499 * after the response of cmd11, but wait 100 us to be sure
501 err = mmc_wait_dat0(mmc, 0, 100);
508 * During a signal voltage level switch, the clock must be gated
509 * for 5 ms according to the SD spec
511 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
513 err = mmc_set_signal_voltage(mmc, signal_voltage);
517 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
519 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
522 * Failure to switch is indicated by the card holding
523 * dat[0:3] low. Wait for at least 1 ms according to spec
525 err = mmc_wait_dat0(mmc, 1, 1000);
535 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
542 cmd.cmdidx = MMC_CMD_APP_CMD;
543 cmd.resp_type = MMC_RSP_R1;
546 err = mmc_send_cmd(mmc, &cmd, NULL);
551 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
552 cmd.resp_type = MMC_RSP_R3;
555 * Most cards do not answer if some reserved bits
556 * in the ocr are set. However, Some controller
557 * can set bit 7 (reserved for low voltages), but
558 * how to manage low voltages SD card is not yet
561 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
562 (mmc->cfg->voltages & 0xff8000);
564 if (mmc->version == SD_VERSION_2)
565 cmd.cmdarg |= OCR_HCS;
568 cmd.cmdarg |= OCR_S18R;
570 err = mmc_send_cmd(mmc, &cmd, NULL);
575 if (cmd.response[0] & OCR_BUSY)
584 if (mmc->version != SD_VERSION_2)
585 mmc->version = SD_VERSION_1_0;
587 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
588 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
589 cmd.resp_type = MMC_RSP_R3;
592 err = mmc_send_cmd(mmc, &cmd, NULL);
598 mmc->ocr = cmd.response[0];
600 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
601 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
603 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
609 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
615 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
620 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
621 cmd.resp_type = MMC_RSP_R3;
623 if (use_arg && !mmc_host_is_spi(mmc))
624 cmd.cmdarg = OCR_HCS |
625 (mmc->cfg->voltages &
626 (mmc->ocr & OCR_VOLTAGE_MASK)) |
627 (mmc->ocr & OCR_ACCESS_MODE);
629 err = mmc_send_cmd(mmc, &cmd, NULL);
632 mmc->ocr = cmd.response[0];
636 static int mmc_send_op_cond(struct mmc *mmc)
640 /* Some cards seem to need this */
643 /* Asking to the card its capabilities */
644 for (i = 0; i < 2; i++) {
645 err = mmc_send_op_cond_iter(mmc, i != 0);
649 /* exit if not busy (flag seems to be inverted) */
650 if (mmc->ocr & OCR_BUSY)
653 mmc->op_cond_pending = 1;
657 static int mmc_complete_op_cond(struct mmc *mmc)
664 mmc->op_cond_pending = 0;
665 if (!(mmc->ocr & OCR_BUSY)) {
666 /* Some cards seem to need this */
669 start = get_timer(0);
671 err = mmc_send_op_cond_iter(mmc, 1);
674 if (mmc->ocr & OCR_BUSY)
676 if (get_timer(start) > timeout)
682 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
683 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
684 cmd.resp_type = MMC_RSP_R3;
687 err = mmc_send_cmd(mmc, &cmd, NULL);
692 mmc->ocr = cmd.response[0];
695 mmc->version = MMC_VERSION_UNKNOWN;
697 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
704 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
707 struct mmc_data data;
710 /* Get the Card Status Register */
711 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
712 cmd.resp_type = MMC_RSP_R1;
715 data.dest = (char *)ext_csd;
717 data.blocksize = MMC_MAX_BLOCK_LEN;
718 data.flags = MMC_DATA_READ;
720 err = mmc_send_cmd(mmc, &cmd, &data);
725 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
732 cmd.cmdidx = MMC_CMD_SWITCH;
733 cmd.resp_type = MMC_RSP_R1b;
734 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
738 while (retries > 0) {
739 ret = mmc_send_cmd(mmc, &cmd, NULL);
741 /* Waiting for the ready status */
743 ret = mmc_send_status(mmc, timeout);
754 #if !CONFIG_IS_ENABLED(MMC_TINY)
755 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
760 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
766 speed_bits = EXT_CSD_TIMING_HS;
768 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
770 speed_bits = EXT_CSD_TIMING_HS200;
774 speed_bits = EXT_CSD_TIMING_LEGACY;
779 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
784 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
785 /* Now check to see that it worked */
786 err = mmc_send_ext_csd(mmc, test_csd);
790 /* No high-speed support */
791 if (!test_csd[EXT_CSD_HS_TIMING])
798 static int mmc_get_capabilities(struct mmc *mmc)
800 u8 *ext_csd = mmc->ext_csd;
803 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
805 if (mmc_host_is_spi(mmc))
808 /* Only version 4 supports high-speed */
809 if (mmc->version < MMC_VERSION_4)
813 pr_err("No ext_csd found!\n"); /* this should enver happen */
817 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
819 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
820 mmc->cardtype = cardtype;
822 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
823 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
824 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
825 mmc->card_caps |= MMC_MODE_HS200;
828 if (cardtype & EXT_CSD_CARD_TYPE_52) {
829 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
830 mmc->card_caps |= MMC_MODE_DDR_52MHz;
831 mmc->card_caps |= MMC_MODE_HS_52MHz;
833 if (cardtype & EXT_CSD_CARD_TYPE_26)
834 mmc->card_caps |= MMC_MODE_HS;
840 static int mmc_set_capacity(struct mmc *mmc, int part_num)
844 mmc->capacity = mmc->capacity_user;
848 mmc->capacity = mmc->capacity_boot;
851 mmc->capacity = mmc->capacity_rpmb;
857 mmc->capacity = mmc->capacity_gp[part_num - 4];
863 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
868 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
869 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
874 if (part_num & PART_ACCESS_MASK)
875 forbidden = MMC_CAP(MMC_HS_200);
877 if (MMC_CAP(mmc->selected_mode) & forbidden) {
878 pr_debug("selected mode (%s) is forbidden for part %d\n",
879 mmc_mode_name(mmc->selected_mode), part_num);
881 } else if (mmc->selected_mode != mmc->best_mode) {
882 pr_debug("selected mode is not optimal\n");
887 return mmc_select_mode_and_width(mmc,
888 mmc->card_caps & ~forbidden);
893 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
894 unsigned int part_num)
900 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
904 ret = mmc_boot_part_access_chk(mmc, part_num);
908 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
909 (mmc->part_config & ~PART_ACCESS_MASK)
910 | (part_num & PART_ACCESS_MASK));
913 * Set the capacity if the switch succeeded or was intended
914 * to return to representing the raw device.
916 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
917 ret = mmc_set_capacity(mmc, part_num);
918 mmc_get_blk_desc(mmc)->hwpart = part_num;
924 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
925 int mmc_hwpart_config(struct mmc *mmc,
926 const struct mmc_hwpart_conf *conf,
927 enum mmc_hwpart_conf_mode mode)
933 u32 max_enh_size_mult;
934 u32 tot_enh_size_mult = 0;
937 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
939 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
942 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
943 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
947 if (!(mmc->part_support & PART_SUPPORT)) {
948 pr_err("Card does not support partitioning\n");
952 if (!mmc->hc_wp_grp_size) {
953 pr_err("Card does not define HC WP group size\n");
957 /* check partition alignment and total enhanced size */
958 if (conf->user.enh_size) {
959 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
960 conf->user.enh_start % mmc->hc_wp_grp_size) {
961 pr_err("User data enhanced area not HC WP group "
965 part_attrs |= EXT_CSD_ENH_USR;
966 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
967 if (mmc->high_capacity) {
968 enh_start_addr = conf->user.enh_start;
970 enh_start_addr = (conf->user.enh_start << 9);
976 tot_enh_size_mult += enh_size_mult;
978 for (pidx = 0; pidx < 4; pidx++) {
979 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
980 pr_err("GP%i partition not HC WP group size "
981 "aligned\n", pidx+1);
984 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
985 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
986 part_attrs |= EXT_CSD_ENH_GP(pidx);
987 tot_enh_size_mult += gp_size_mult[pidx];
991 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
992 pr_err("Card does not support enhanced attribute\n");
996 err = mmc_send_ext_csd(mmc, ext_csd);
1001 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1002 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1003 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1004 if (tot_enh_size_mult > max_enh_size_mult) {
1005 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1006 tot_enh_size_mult, max_enh_size_mult);
1007 return -EMEDIUMTYPE;
1010 /* The default value of EXT_CSD_WR_REL_SET is device
1011 * dependent, the values can only be changed if the
1012 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1013 * changed only once and before partitioning is completed. */
1014 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1015 if (conf->user.wr_rel_change) {
1016 if (conf->user.wr_rel_set)
1017 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1019 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1021 for (pidx = 0; pidx < 4; pidx++) {
1022 if (conf->gp_part[pidx].wr_rel_change) {
1023 if (conf->gp_part[pidx].wr_rel_set)
1024 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1026 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1030 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1031 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1032 puts("Card does not support host controlled partition write "
1033 "reliability settings\n");
1034 return -EMEDIUMTYPE;
1037 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1038 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1039 pr_err("Card already partitioned\n");
1043 if (mode == MMC_HWPART_CONF_CHECK)
1046 /* Partitioning requires high-capacity size definitions */
1047 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1048 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1049 EXT_CSD_ERASE_GROUP_DEF, 1);
1054 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1056 /* update erase group size to be high-capacity */
1057 mmc->erase_grp_size =
1058 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1062 /* all OK, write the configuration */
1063 for (i = 0; i < 4; i++) {
1064 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1065 EXT_CSD_ENH_START_ADDR+i,
1066 (enh_start_addr >> (i*8)) & 0xFF);
1070 for (i = 0; i < 3; i++) {
1071 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1072 EXT_CSD_ENH_SIZE_MULT+i,
1073 (enh_size_mult >> (i*8)) & 0xFF);
1077 for (pidx = 0; pidx < 4; pidx++) {
1078 for (i = 0; i < 3; i++) {
1079 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1080 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1081 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1086 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1087 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1091 if (mode == MMC_HWPART_CONF_SET)
1094 /* The WR_REL_SET is a write-once register but shall be
1095 * written before setting PART_SETTING_COMPLETED. As it is
1096 * write-once we can only write it when completing the
1098 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1099 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 EXT_CSD_WR_REL_SET, wr_rel_set);
1105 /* Setting PART_SETTING_COMPLETED confirms the partition
1106 * configuration but it only becomes effective after power
1107 * cycle, so we do not adjust the partition related settings
1108 * in the mmc struct. */
1110 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1111 EXT_CSD_PARTITION_SETTING,
1112 EXT_CSD_PARTITION_SETTING_COMPLETED);
1120 #if !CONFIG_IS_ENABLED(DM_MMC)
1121 int mmc_getcd(struct mmc *mmc)
1125 cd = board_mmc_getcd(mmc);
1128 if (mmc->cfg->ops->getcd)
1129 cd = mmc->cfg->ops->getcd(mmc);
1138 #if !CONFIG_IS_ENABLED(MMC_TINY)
1139 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1142 struct mmc_data data;
1144 /* Switch the frequency */
1145 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1146 cmd.resp_type = MMC_RSP_R1;
1147 cmd.cmdarg = (mode << 31) | 0xffffff;
1148 cmd.cmdarg &= ~(0xf << (group * 4));
1149 cmd.cmdarg |= value << (group * 4);
1151 data.dest = (char *)resp;
1152 data.blocksize = 64;
1154 data.flags = MMC_DATA_READ;
1156 return mmc_send_cmd(mmc, &cmd, &data);
1159 static int sd_get_capabilities(struct mmc *mmc)
1163 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1164 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1165 struct mmc_data data;
1167 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1171 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1173 if (mmc_host_is_spi(mmc))
1176 /* Read the SCR to find out if this card supports higher speeds */
1177 cmd.cmdidx = MMC_CMD_APP_CMD;
1178 cmd.resp_type = MMC_RSP_R1;
1179 cmd.cmdarg = mmc->rca << 16;
1181 err = mmc_send_cmd(mmc, &cmd, NULL);
1186 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1187 cmd.resp_type = MMC_RSP_R1;
1193 data.dest = (char *)scr;
1196 data.flags = MMC_DATA_READ;
1198 err = mmc_send_cmd(mmc, &cmd, &data);
1207 mmc->scr[0] = __be32_to_cpu(scr[0]);
1208 mmc->scr[1] = __be32_to_cpu(scr[1]);
1210 switch ((mmc->scr[0] >> 24) & 0xf) {
1212 mmc->version = SD_VERSION_1_0;
1215 mmc->version = SD_VERSION_1_10;
1218 mmc->version = SD_VERSION_2;
1219 if ((mmc->scr[0] >> 15) & 0x1)
1220 mmc->version = SD_VERSION_3;
1223 mmc->version = SD_VERSION_1_0;
1227 if (mmc->scr[0] & SD_DATA_4BIT)
1228 mmc->card_caps |= MMC_MODE_4BIT;
1230 /* Version 1.0 doesn't support switching */
1231 if (mmc->version == SD_VERSION_1_0)
1236 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1237 (u8 *)switch_status);
1242 /* The high-speed function is busy. Try again */
1243 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1247 /* If high-speed isn't supported, we return */
1248 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1249 mmc->card_caps |= MMC_CAP(SD_HS);
1251 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1252 /* Version before 3.0 don't support UHS modes */
1253 if (mmc->version < SD_VERSION_3)
1256 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1257 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1258 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1259 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1260 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1261 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1262 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1263 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1264 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1265 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1266 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1272 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1276 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1281 speed = UHS_SDR12_BUS_SPEED;
1284 speed = HIGH_SPEED_BUS_SPEED;
1286 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1288 speed = UHS_SDR12_BUS_SPEED;
1291 speed = UHS_SDR25_BUS_SPEED;
1294 speed = UHS_SDR50_BUS_SPEED;
1297 speed = UHS_DDR50_BUS_SPEED;
1300 speed = UHS_SDR104_BUS_SPEED;
1307 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1311 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1317 static int sd_select_bus_width(struct mmc *mmc, int w)
1322 if ((w != 4) && (w != 1))
1325 cmd.cmdidx = MMC_CMD_APP_CMD;
1326 cmd.resp_type = MMC_RSP_R1;
1327 cmd.cmdarg = mmc->rca << 16;
1329 err = mmc_send_cmd(mmc, &cmd, NULL);
1333 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1334 cmd.resp_type = MMC_RSP_R1;
1339 err = mmc_send_cmd(mmc, &cmd, NULL);
1347 #if CONFIG_IS_ENABLED(MMC_WRITE)
1348 static int sd_read_ssr(struct mmc *mmc)
1350 static const unsigned int sd_au_size[] = {
1351 0, SZ_16K / 512, SZ_32K / 512,
1352 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1353 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1354 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1355 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1360 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1361 struct mmc_data data;
1363 unsigned int au, eo, et, es;
1365 cmd.cmdidx = MMC_CMD_APP_CMD;
1366 cmd.resp_type = MMC_RSP_R1;
1367 cmd.cmdarg = mmc->rca << 16;
1369 err = mmc_send_cmd(mmc, &cmd, NULL);
1373 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1374 cmd.resp_type = MMC_RSP_R1;
1378 data.dest = (char *)ssr;
1379 data.blocksize = 64;
1381 data.flags = MMC_DATA_READ;
1383 err = mmc_send_cmd(mmc, &cmd, &data);
1391 for (i = 0; i < 16; i++)
1392 ssr[i] = be32_to_cpu(ssr[i]);
1394 au = (ssr[2] >> 12) & 0xF;
1395 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1396 mmc->ssr.au = sd_au_size[au];
1397 es = (ssr[3] >> 24) & 0xFF;
1398 es |= (ssr[2] & 0xFF) << 8;
1399 et = (ssr[3] >> 18) & 0x3F;
1401 eo = (ssr[3] >> 16) & 0x3;
1402 mmc->ssr.erase_timeout = (et * 1000) / es;
1403 mmc->ssr.erase_offset = eo * 1000;
1406 pr_debug("Invalid Allocation Unit Size.\n");
1412 /* frequency bases */
1413 /* divided by 10 to be nice to platforms without floating point */
1414 static const int fbase[] = {
1421 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1422 * to platforms without floating point.
1424 static const u8 multipliers[] = {
1443 static inline int bus_width(uint cap)
1445 if (cap == MMC_MODE_8BIT)
1447 if (cap == MMC_MODE_4BIT)
1449 if (cap == MMC_MODE_1BIT)
1451 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1455 #if !CONFIG_IS_ENABLED(DM_MMC)
1456 #ifdef MMC_SUPPORTS_TUNING
1457 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1463 static void mmc_send_init_stream(struct mmc *mmc)
1467 static int mmc_set_ios(struct mmc *mmc)
1471 if (mmc->cfg->ops->set_ios)
1472 ret = mmc->cfg->ops->set_ios(mmc);
1478 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1481 if (clock > mmc->cfg->f_max)
1482 clock = mmc->cfg->f_max;
1484 if (clock < mmc->cfg->f_min)
1485 clock = mmc->cfg->f_min;
1489 mmc->clk_disable = disable;
1491 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1493 return mmc_set_ios(mmc);
1496 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1498 mmc->bus_width = width;
1500 return mmc_set_ios(mmc);
1503 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1505 * helper function to display the capabilities in a human
1506 * friendly manner. The capabilities include bus width and
1509 void mmc_dump_capabilities(const char *text, uint caps)
1513 pr_debug("%s: widths [", text);
1514 if (caps & MMC_MODE_8BIT)
1516 if (caps & MMC_MODE_4BIT)
1518 if (caps & MMC_MODE_1BIT)
1520 pr_debug("\b\b] modes [");
1521 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1522 if (MMC_CAP(mode) & caps)
1523 pr_debug("%s, ", mmc_mode_name(mode));
1524 pr_debug("\b\b]\n");
1528 struct mode_width_tuning {
1531 #ifdef MMC_SUPPORTS_TUNING
1536 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1537 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1540 case MMC_SIGNAL_VOLTAGE_000: return 0;
1541 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1542 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1543 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1548 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1552 if (mmc->signal_voltage == signal_voltage)
1555 mmc->signal_voltage = signal_voltage;
1556 err = mmc_set_ios(mmc);
1558 pr_debug("unable to set voltage (err %d)\n", err);
1563 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1569 #if !CONFIG_IS_ENABLED(MMC_TINY)
1570 static const struct mode_width_tuning sd_modes_by_pref[] = {
1571 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1572 #ifdef MMC_SUPPORTS_TUNING
1575 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1576 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1581 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1585 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1589 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1594 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1596 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1599 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1604 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1608 #define for_each_sd_mode_by_pref(caps, mwt) \
1609 for (mwt = sd_modes_by_pref;\
1610 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1612 if (caps & MMC_CAP(mwt->mode))
1614 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1617 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1618 const struct mode_width_tuning *mwt;
1619 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1620 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1622 bool uhs_en = false;
1627 mmc_dump_capabilities("sd card", card_caps);
1628 mmc_dump_capabilities("host", mmc->host_caps);
1631 /* Restrict card's capabilities by what the host can do */
1632 caps = card_caps & mmc->host_caps;
1637 for_each_sd_mode_by_pref(caps, mwt) {
1640 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1641 if (*w & caps & mwt->widths) {
1642 pr_debug("trying mode %s width %d (at %d MHz)\n",
1643 mmc_mode_name(mwt->mode),
1645 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1647 /* configure the bus width (card + host) */
1648 err = sd_select_bus_width(mmc, bus_width(*w));
1651 mmc_set_bus_width(mmc, bus_width(*w));
1653 /* configure the bus mode (card) */
1654 err = sd_set_card_speed(mmc, mwt->mode);
1658 /* configure the bus mode (host) */
1659 mmc_select_mode(mmc, mwt->mode);
1660 mmc_set_clock(mmc, mmc->tran_speed,
1663 #ifdef MMC_SUPPORTS_TUNING
1664 /* execute tuning if needed */
1665 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1666 err = mmc_execute_tuning(mmc,
1669 pr_debug("tuning failed\n");
1675 #if CONFIG_IS_ENABLED(MMC_WRITE)
1676 err = sd_read_ssr(mmc);
1678 pr_warn("unable to read ssr\n");
1684 /* revert to a safer bus speed */
1685 mmc_select_mode(mmc, SD_LEGACY);
1686 mmc_set_clock(mmc, mmc->tran_speed,
1692 pr_err("unable to select a mode\n");
1697 * read the compare the part of ext csd that is constant.
1698 * This can be used to check that the transfer is working
1701 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1704 const u8 *ext_csd = mmc->ext_csd;
1705 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1707 if (mmc->version < MMC_VERSION_4)
1710 err = mmc_send_ext_csd(mmc, test_csd);
1714 /* Only compare read only fields */
1715 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1716 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1717 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1718 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1719 ext_csd[EXT_CSD_REV]
1720 == test_csd[EXT_CSD_REV] &&
1721 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1722 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1723 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1724 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1730 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1731 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1732 uint32_t allowed_mask)
1738 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1739 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1740 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1741 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1744 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1745 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1746 MMC_SIGNAL_VOLTAGE_180;
1747 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1748 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1751 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1755 while (card_mask & allowed_mask) {
1756 enum mmc_voltage best_match;
1758 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1759 if (!mmc_set_signal_voltage(mmc, best_match))
1762 allowed_mask &= ~best_match;
1768 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1769 uint32_t allowed_mask)
1775 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1776 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1779 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1780 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1785 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1789 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1793 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1797 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1801 #define for_each_mmc_mode_by_pref(caps, mwt) \
1802 for (mwt = mmc_modes_by_pref;\
1803 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1805 if (caps & MMC_CAP(mwt->mode))
1807 static const struct ext_csd_bus_width {
1811 } ext_csd_bus_width[] = {
1812 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1813 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1814 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1815 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1816 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1819 #define for_each_supported_width(caps, ddr, ecbv) \
1820 for (ecbv = ext_csd_bus_width;\
1821 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1823 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1825 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1828 const struct mode_width_tuning *mwt;
1829 const struct ext_csd_bus_width *ecbw;
1832 mmc_dump_capabilities("mmc", card_caps);
1833 mmc_dump_capabilities("host", mmc->host_caps);
1836 /* Restrict card's capabilities by what the host can do */
1837 card_caps &= mmc->host_caps;
1839 /* Only version 4 of MMC supports wider bus widths */
1840 if (mmc->version < MMC_VERSION_4)
1843 if (!mmc->ext_csd) {
1844 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1848 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1850 for_each_mmc_mode_by_pref(card_caps, mwt) {
1851 for_each_supported_width(card_caps & mwt->widths,
1852 mmc_is_mode_ddr(mwt->mode), ecbw) {
1853 enum mmc_voltage old_voltage;
1854 pr_debug("trying mode %s width %d (at %d MHz)\n",
1855 mmc_mode_name(mwt->mode),
1856 bus_width(ecbw->cap),
1857 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1858 old_voltage = mmc->signal_voltage;
1859 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1860 MMC_ALL_SIGNAL_VOLTAGE);
1864 /* configure the bus width (card + host) */
1865 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1867 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1870 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1872 /* configure the bus speed (card) */
1873 err = mmc_set_card_speed(mmc, mwt->mode);
1878 * configure the bus width AND the ddr mode (card)
1879 * The host side will be taken care of in the next step
1881 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1882 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1884 ecbw->ext_csd_bits);
1889 /* configure the bus mode (host) */
1890 mmc_select_mode(mmc, mwt->mode);
1891 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1892 #ifdef MMC_SUPPORTS_TUNING
1894 /* execute tuning if needed */
1896 err = mmc_execute_tuning(mmc, mwt->tuning);
1898 pr_debug("tuning failed\n");
1904 /* do a transfer to check the configuration */
1905 err = mmc_read_and_compare_ext_csd(mmc);
1909 mmc_set_signal_voltage(mmc, old_voltage);
1910 /* if an error occured, revert to a safer bus mode */
1911 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1912 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1913 mmc_select_mode(mmc, MMC_LEGACY);
1914 mmc_set_bus_width(mmc, 1);
1918 pr_err("unable to select a mode\n");
1924 #if CONFIG_IS_ENABLED(MMC_TINY)
1925 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
1928 static int mmc_startup_v4(struct mmc *mmc)
1932 bool has_parts = false;
1933 bool part_completed;
1934 static const u32 mmc_versions[] = {
1946 #if CONFIG_IS_ENABLED(MMC_TINY)
1947 u8 *ext_csd = ext_csd_bkup;
1949 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
1953 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
1955 err = mmc_send_ext_csd(mmc, ext_csd);
1959 /* store the ext csd for future reference */
1961 mmc->ext_csd = ext_csd;
1963 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1965 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1968 /* check ext_csd version and capacity */
1969 err = mmc_send_ext_csd(mmc, ext_csd);
1973 /* store the ext csd for future reference */
1975 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1978 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1980 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
1983 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1985 if (mmc->version >= MMC_VERSION_4_2) {
1987 * According to the JEDEC Standard, the value of
1988 * ext_csd's capacity is valid if the value is more
1991 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1992 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1993 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1994 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1995 capacity *= MMC_MAX_BLOCK_LEN;
1996 if ((capacity >> 20) > 2 * 1024)
1997 mmc->capacity_user = capacity;
2000 /* The partition data may be non-zero but it is only
2001 * effective if PARTITION_SETTING_COMPLETED is set in
2002 * EXT_CSD, so ignore any data if this bit is not set,
2003 * except for enabling the high-capacity group size
2004 * definition (see below).
2006 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2007 EXT_CSD_PARTITION_SETTING_COMPLETED);
2009 /* store the partition info of emmc */
2010 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2011 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2012 ext_csd[EXT_CSD_BOOT_MULT])
2013 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2014 if (part_completed &&
2015 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2016 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2018 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2020 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2022 for (i = 0; i < 4; i++) {
2023 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2024 uint mult = (ext_csd[idx + 2] << 16) +
2025 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2028 if (!part_completed)
2030 mmc->capacity_gp[i] = mult;
2031 mmc->capacity_gp[i] *=
2032 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2033 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2034 mmc->capacity_gp[i] <<= 19;
2037 #ifndef CONFIG_SPL_BUILD
2038 if (part_completed) {
2039 mmc->enh_user_size =
2040 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2041 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2042 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2043 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2044 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2045 mmc->enh_user_size <<= 19;
2046 mmc->enh_user_start =
2047 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2048 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2049 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2050 ext_csd[EXT_CSD_ENH_START_ADDR];
2051 if (mmc->high_capacity)
2052 mmc->enh_user_start <<= 9;
2057 * Host needs to enable ERASE_GRP_DEF bit if device is
2058 * partitioned. This bit will be lost every time after a reset
2059 * or power off. This will affect erase size.
2063 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2064 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2067 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2068 EXT_CSD_ERASE_GROUP_DEF, 1);
2073 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2076 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2077 #if CONFIG_IS_ENABLED(MMC_WRITE)
2078 /* Read out group size from ext_csd */
2079 mmc->erase_grp_size =
2080 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2083 * if high capacity and partition setting completed
2084 * SEC_COUNT is valid even if it is smaller than 2 GiB
2085 * JEDEC Standard JESD84-B45, 6.2.4
2087 if (mmc->high_capacity && part_completed) {
2088 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2089 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2090 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2091 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2092 capacity *= MMC_MAX_BLOCK_LEN;
2093 mmc->capacity_user = capacity;
2096 #if CONFIG_IS_ENABLED(MMC_WRITE)
2098 /* Calculate the group size from the csd value. */
2099 int erase_gsz, erase_gmul;
2101 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2102 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2103 mmc->erase_grp_size = (erase_gsz + 1)
2107 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2108 mmc->hc_wp_grp_size = 1024
2109 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2110 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2113 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2118 #if !CONFIG_IS_ENABLED(MMC_TINY)
2121 mmc->ext_csd = NULL;
2126 static int mmc_startup(struct mmc *mmc)
2132 struct blk_desc *bdesc;
2134 #ifdef CONFIG_MMC_SPI_CRC_ON
2135 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2136 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2137 cmd.resp_type = MMC_RSP_R1;
2139 err = mmc_send_cmd(mmc, &cmd, NULL);
2145 /* Put the Card in Identify Mode */
2146 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2147 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2148 cmd.resp_type = MMC_RSP_R2;
2151 err = mmc_send_cmd(mmc, &cmd, NULL);
2153 #ifdef CONFIG_MMC_QUIRKS
2154 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2157 * It has been seen that SEND_CID may fail on the first
2158 * attempt, let's try a few more time
2161 err = mmc_send_cmd(mmc, &cmd, NULL);
2164 } while (retries--);
2171 memcpy(mmc->cid, cmd.response, 16);
2174 * For MMC cards, set the Relative Address.
2175 * For SD cards, get the Relatvie Address.
2176 * This also puts the cards into Standby State
2178 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2179 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2180 cmd.cmdarg = mmc->rca << 16;
2181 cmd.resp_type = MMC_RSP_R6;
2183 err = mmc_send_cmd(mmc, &cmd, NULL);
2189 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2192 /* Get the Card-Specific Data */
2193 cmd.cmdidx = MMC_CMD_SEND_CSD;
2194 cmd.resp_type = MMC_RSP_R2;
2195 cmd.cmdarg = mmc->rca << 16;
2197 err = mmc_send_cmd(mmc, &cmd, NULL);
2202 mmc->csd[0] = cmd.response[0];
2203 mmc->csd[1] = cmd.response[1];
2204 mmc->csd[2] = cmd.response[2];
2205 mmc->csd[3] = cmd.response[3];
2207 if (mmc->version == MMC_VERSION_UNKNOWN) {
2208 int version = (cmd.response[0] >> 26) & 0xf;
2212 mmc->version = MMC_VERSION_1_2;
2215 mmc->version = MMC_VERSION_1_4;
2218 mmc->version = MMC_VERSION_2_2;
2221 mmc->version = MMC_VERSION_3;
2224 mmc->version = MMC_VERSION_4;
2227 mmc->version = MMC_VERSION_1_2;
2232 /* divide frequency by 10, since the mults are 10x bigger */
2233 freq = fbase[(cmd.response[0] & 0x7)];
2234 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2236 mmc->legacy_speed = freq * mult;
2237 mmc_select_mode(mmc, MMC_LEGACY);
2239 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2240 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2241 #if CONFIG_IS_ENABLED(MMC_WRITE)
2244 mmc->write_bl_len = mmc->read_bl_len;
2246 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2249 if (mmc->high_capacity) {
2250 csize = (mmc->csd[1] & 0x3f) << 16
2251 | (mmc->csd[2] & 0xffff0000) >> 16;
2254 csize = (mmc->csd[1] & 0x3ff) << 2
2255 | (mmc->csd[2] & 0xc0000000) >> 30;
2256 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2259 mmc->capacity_user = (csize + 1) << (cmult + 2);
2260 mmc->capacity_user *= mmc->read_bl_len;
2261 mmc->capacity_boot = 0;
2262 mmc->capacity_rpmb = 0;
2263 for (i = 0; i < 4; i++)
2264 mmc->capacity_gp[i] = 0;
2266 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2267 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2269 #if CONFIG_IS_ENABLED(MMC_WRITE)
2270 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2271 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2274 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2275 cmd.cmdidx = MMC_CMD_SET_DSR;
2276 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2277 cmd.resp_type = MMC_RSP_NONE;
2278 if (mmc_send_cmd(mmc, &cmd, NULL))
2279 pr_warn("MMC: SET_DSR failed\n");
2282 /* Select the card, and put it into Transfer Mode */
2283 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2284 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2285 cmd.resp_type = MMC_RSP_R1;
2286 cmd.cmdarg = mmc->rca << 16;
2287 err = mmc_send_cmd(mmc, &cmd, NULL);
2294 * For SD, its erase group is always one sector
2296 #if CONFIG_IS_ENABLED(MMC_WRITE)
2297 mmc->erase_grp_size = 1;
2299 mmc->part_config = MMCPART_NOAVAILABLE;
2301 err = mmc_startup_v4(mmc);
2305 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2309 #if CONFIG_IS_ENABLED(MMC_TINY)
2310 mmc_set_clock(mmc, mmc->legacy_speed, false);
2311 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2312 mmc_set_bus_width(mmc, 1);
2315 err = sd_get_capabilities(mmc);
2318 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2320 err = mmc_get_capabilities(mmc);
2323 mmc_select_mode_and_width(mmc, mmc->card_caps);
2329 mmc->best_mode = mmc->selected_mode;
2331 /* Fix the block length for DDR mode */
2332 if (mmc->ddr_mode) {
2333 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2334 #if CONFIG_IS_ENABLED(MMC_WRITE)
2335 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2339 /* fill in device description */
2340 bdesc = mmc_get_blk_desc(mmc);
2344 bdesc->blksz = mmc->read_bl_len;
2345 bdesc->log2blksz = LOG2(bdesc->blksz);
2346 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2347 #if !defined(CONFIG_SPL_BUILD) || \
2348 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2349 !defined(CONFIG_USE_TINY_PRINTF))
2350 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2351 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2352 (mmc->cid[3] >> 16) & 0xffff);
2353 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2354 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2355 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2356 (mmc->cid[2] >> 24) & 0xff);
2357 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2358 (mmc->cid[2] >> 16) & 0xf);
2360 bdesc->vendor[0] = 0;
2361 bdesc->product[0] = 0;
2362 bdesc->revision[0] = 0;
2364 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2371 static int mmc_send_if_cond(struct mmc *mmc)
2376 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2377 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2378 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2379 cmd.resp_type = MMC_RSP_R7;
2381 err = mmc_send_cmd(mmc, &cmd, NULL);
2386 if ((cmd.response[0] & 0xff) != 0xaa)
2389 mmc->version = SD_VERSION_2;
2394 #if !CONFIG_IS_ENABLED(DM_MMC)
2395 /* board-specific MMC power initializations. */
2396 __weak void board_mmc_power_init(void)
2401 static int mmc_power_init(struct mmc *mmc)
2403 #if CONFIG_IS_ENABLED(DM_MMC)
2404 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2407 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2410 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2412 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2413 &mmc->vqmmc_supply);
2415 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2417 #else /* !CONFIG_DM_MMC */
2419 * Driver model should use a regulator, as above, rather than calling
2420 * out to board code.
2422 board_mmc_power_init();
2428 * put the host in the initial state:
2429 * - turn on Vdd (card power supply)
2430 * - configure the bus width and clock to minimal values
2432 static void mmc_set_initial_state(struct mmc *mmc)
2436 /* First try to set 3.3V. If it fails set to 1.8V */
2437 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2439 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2441 pr_warn("mmc: failed to set signal voltage\n");
2443 mmc_select_mode(mmc, MMC_LEGACY);
2444 mmc_set_bus_width(mmc, 1);
2445 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2448 static int mmc_power_on(struct mmc *mmc)
2450 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2451 if (mmc->vmmc_supply) {
2452 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2455 puts("Error enabling VMMC supply\n");
2463 static int mmc_power_off(struct mmc *mmc)
2465 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2466 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2467 if (mmc->vmmc_supply) {
2468 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2471 pr_debug("Error disabling VMMC supply\n");
2479 static int mmc_power_cycle(struct mmc *mmc)
2483 ret = mmc_power_off(mmc);
2487 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2488 * to be on the safer side.
2491 return mmc_power_on(mmc);
2494 int mmc_get_op_cond(struct mmc *mmc)
2496 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2502 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2503 mmc_adapter_card_type_ident();
2505 err = mmc_power_init(mmc);
2509 #ifdef CONFIG_MMC_QUIRKS
2510 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2511 MMC_QUIRK_RETRY_SEND_CID;
2514 err = mmc_power_cycle(mmc);
2517 * if power cycling is not supported, we should not try
2518 * to use the UHS modes, because we wouldn't be able to
2519 * recover from an error during the UHS initialization.
2521 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2523 mmc->host_caps &= ~UHS_CAPS;
2524 err = mmc_power_on(mmc);
2529 #if CONFIG_IS_ENABLED(DM_MMC)
2530 /* The device has already been probed ready for use */
2532 /* made sure it's not NULL earlier */
2533 err = mmc->cfg->ops->init(mmc);
2540 mmc_set_initial_state(mmc);
2541 mmc_send_init_stream(mmc);
2543 /* Reset the Card */
2544 err = mmc_go_idle(mmc);
2549 /* The internal partition reset to user partition(0) at every CMD0*/
2550 mmc_get_blk_desc(mmc)->hwpart = 0;
2552 /* Test for SD version 2 */
2553 err = mmc_send_if_cond(mmc);
2555 /* Now try to get the SD card's operating condition */
2556 err = sd_send_op_cond(mmc, uhs_en);
2557 if (err && uhs_en) {
2559 mmc_power_cycle(mmc);
2563 /* If the command timed out, we check for an MMC card */
2564 if (err == -ETIMEDOUT) {
2565 err = mmc_send_op_cond(mmc);
2568 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2569 pr_err("Card did not respond to voltage select!\n");
2578 int mmc_start_init(struct mmc *mmc)
2584 * all hosts are capable of 1 bit bus-width and able to use the legacy
2587 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2588 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2590 #if !defined(CONFIG_MMC_BROKEN_CD)
2591 /* we pretend there's no card when init is NULL */
2592 no_card = mmc_getcd(mmc) == 0;
2596 #if !CONFIG_IS_ENABLED(DM_MMC)
2597 no_card = no_card || (mmc->cfg->ops->init == NULL);
2601 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2602 pr_err("MMC: no card present\n");
2607 err = mmc_get_op_cond(mmc);
2610 mmc->init_in_progress = 1;
2615 static int mmc_complete_init(struct mmc *mmc)
2619 mmc->init_in_progress = 0;
2620 if (mmc->op_cond_pending)
2621 err = mmc_complete_op_cond(mmc);
2624 err = mmc_startup(mmc);
2632 int mmc_init(struct mmc *mmc)
2635 __maybe_unused ulong start;
2636 #if CONFIG_IS_ENABLED(DM_MMC)
2637 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2644 start = get_timer(0);
2646 if (!mmc->init_in_progress)
2647 err = mmc_start_init(mmc);
2650 err = mmc_complete_init(mmc);
2652 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2657 int mmc_set_dsr(struct mmc *mmc, u16 val)
2663 /* CPU-specific MMC initializations */
2664 __weak int cpu_mmc_init(bd_t *bis)
2669 /* board-specific MMC initializations. */
2670 __weak int board_mmc_init(bd_t *bis)
2675 void mmc_set_preinit(struct mmc *mmc, int preinit)
2677 mmc->preinit = preinit;
2680 #if CONFIG_IS_ENABLED(DM_MMC)
2681 static int mmc_probe(bd_t *bis)
2685 struct udevice *dev;
2687 ret = uclass_get(UCLASS_MMC, &uc);
2692 * Try to add them in sequence order. Really with driver model we
2693 * should allow holes, but the current MMC list does not allow that.
2694 * So if we request 0, 1, 3 we will get 0, 1, 2.
2696 for (i = 0; ; i++) {
2697 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2701 uclass_foreach_dev(dev, uc) {
2702 ret = device_probe(dev);
2704 pr_err("%s - probe failed: %d\n", dev->name, ret);
2710 static int mmc_probe(bd_t *bis)
2712 if (board_mmc_init(bis) < 0)
2719 int mmc_initialize(bd_t *bis)
2721 static int initialized = 0;
2723 if (initialized) /* Avoid initializing mmc multiple times */
2727 #if !CONFIG_IS_ENABLED(BLK)
2728 #if !CONFIG_IS_ENABLED(MMC_TINY)
2732 ret = mmc_probe(bis);
2736 #ifndef CONFIG_SPL_BUILD
2737 print_mmc_devices(',');
2744 #ifdef CONFIG_CMD_BKOPS_ENABLE
2745 int mmc_set_bkops_enable(struct mmc *mmc)
2748 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2750 err = mmc_send_ext_csd(mmc, ext_csd);
2752 puts("Could not get ext_csd register values\n");
2756 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2757 puts("Background operations not supported on device\n");
2758 return -EMEDIUMTYPE;
2761 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2762 puts("Background operations already enabled\n");
2766 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2768 puts("Failed to enable manual background operations\n");
2772 puts("Enabled manual background operations\n");