1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
28 #if CONFIG_IS_ENABLED(MMC_TINY)
29 static struct mmc mmc_static;
30 struct mmc *find_mmc_device(int dev_num)
35 void mmc_do_preinit(void)
37 struct mmc *m = &mmc_static;
38 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
39 mmc_set_preinit(m, 1);
45 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
47 return &mmc->block_dev;
51 #if !CONFIG_IS_ENABLED(DM_MMC)
53 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
54 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
60 __weak int board_mmc_getwp(struct mmc *mmc)
65 int mmc_getwp(struct mmc *mmc)
69 wp = board_mmc_getwp(mmc);
72 if (mmc->cfg->ops->getwp)
73 wp = mmc->cfg->ops->getwp(mmc);
81 __weak int board_mmc_getcd(struct mmc *mmc)
87 #ifdef CONFIG_MMC_TRACE
88 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
90 printf("CMD_SEND:%d\n", cmd->cmdidx);
91 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
94 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
100 printf("\t\tRET\t\t\t %d\n", ret);
102 switch (cmd->resp_type) {
104 printf("\t\tMMC_RSP_NONE\n");
107 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
111 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
115 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
117 printf("\t\t \t\t 0x%08X \n",
119 printf("\t\t \t\t 0x%08X \n",
121 printf("\t\t \t\t 0x%08X \n",
124 printf("\t\t\t\t\tDUMPING DATA\n");
125 for (i = 0; i < 4; i++) {
127 printf("\t\t\t\t\t%03d - ", i*4);
128 ptr = (u8 *)&cmd->response[i];
130 for (j = 0; j < 4; j++)
131 printf("%02X ", *ptr--);
136 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
140 printf("\t\tERROR MMC rsp not supported\n");
146 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
150 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
151 printf("CURR STATE:%d\n", status);
155 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
156 const char *mmc_mode_name(enum bus_mode mode)
158 static const char *const names[] = {
159 [MMC_LEGACY] = "MMC legacy",
160 [SD_LEGACY] = "SD Legacy",
161 [MMC_HS] = "MMC High Speed (26MHz)",
162 [SD_HS] = "SD High Speed (50MHz)",
163 [UHS_SDR12] = "UHS SDR12 (25MHz)",
164 [UHS_SDR25] = "UHS SDR25 (50MHz)",
165 [UHS_SDR50] = "UHS SDR50 (100MHz)",
166 [UHS_SDR104] = "UHS SDR104 (208MHz)",
167 [UHS_DDR50] = "UHS DDR50 (50MHz)",
168 [MMC_HS_52] = "MMC High Speed (52MHz)",
169 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
170 [MMC_HS_200] = "HS200 (200MHz)",
173 if (mode >= MMC_MODES_END)
174 return "Unknown mode";
180 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
182 static const int freqs[] = {
183 [MMC_LEGACY] = 25000000,
184 [SD_LEGACY] = 25000000,
187 [MMC_HS_52] = 52000000,
188 [MMC_DDR_52] = 52000000,
189 [UHS_SDR12] = 25000000,
190 [UHS_SDR25] = 50000000,
191 [UHS_SDR50] = 100000000,
192 [UHS_DDR50] = 50000000,
193 [UHS_SDR104] = 208000000,
194 [MMC_HS_200] = 200000000,
197 if (mode == MMC_LEGACY)
198 return mmc->legacy_speed;
199 else if (mode >= MMC_MODES_END)
205 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
207 mmc->selected_mode = mode;
208 mmc->tran_speed = mmc_mode2freq(mmc, mode);
209 mmc->ddr_mode = mmc_is_mode_ddr(mode);
210 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
211 mmc->tran_speed / 1000000);
215 #if !CONFIG_IS_ENABLED(DM_MMC)
216 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
220 mmmc_trace_before_send(mmc, cmd);
221 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
222 mmmc_trace_after_send(mmc, cmd, ret);
228 int mmc_send_status(struct mmc *mmc, int timeout)
231 int err, retries = 5;
233 cmd.cmdidx = MMC_CMD_SEND_STATUS;
234 cmd.resp_type = MMC_RSP_R1;
235 if (!mmc_host_is_spi(mmc))
236 cmd.cmdarg = mmc->rca << 16;
239 err = mmc_send_cmd(mmc, &cmd, NULL);
241 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
242 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
246 if (cmd.response[0] & MMC_STATUS_MASK) {
247 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 pr_err("Status Error: 0x%08X\n",
253 } else if (--retries < 0)
262 mmc_trace_state(mmc, &cmd);
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
423 #if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 int dev_num = block_dev->devnum;
428 lbaint_t cur, blocks_todo = blkcnt;
433 struct mmc *mmc = find_mmc_device(dev_num);
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445 if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
473 static int mmc_go_idle(struct mmc *mmc)
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 cmd.resp_type = MMC_RSP_NONE;
484 err = mmc_send_cmd(mmc, &cmd, NULL);
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
501 * Send CMD11 only if the request is to switch the card to
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 cmd.resp_type = MMC_RSP_R1;
511 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 * The card should drive cmd and dat[0:3] low immediately
520 * after the response of cmd11, but wait 100 us to be sure
522 err = mmc_wait_dat0(mmc, 0, 100);
529 * During a signal voltage level switch, the clock must be gated
530 * for 5 ms according to the SD spec
532 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 * Failure to switch is indicated by the card holding
544 * dat[0:3] low. Wait for at least 1 ms according to spec
546 err = mmc_wait_dat0(mmc, 1, 1000);
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
567 err = mmc_send_cmd(mmc, &cmd, NULL);
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
576 * Most cards do not answer if some reserved bits
577 * in the ocr are set. However, Some controller
578 * can set bit 7 (reserved for low voltages), but
579 * how to manage low voltages SD card is not yet
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
589 cmd.cmdarg |= OCR_S18R;
591 err = mmc_send_cmd(mmc, &cmd, NULL);
596 if (cmd.response[0] & OCR_BUSY)
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
608 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
613 err = mmc_send_cmd(mmc, &cmd, NULL);
619 mmc->ocr = cmd.response[0];
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
650 err = mmc_send_cmd(mmc, &cmd, NULL);
653 mmc->ocr = cmd.response[0];
657 static int mmc_send_op_cond(struct mmc *mmc)
661 /* Some cards seem to need this */
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
674 mmc->op_cond_pending = 1;
678 static int mmc_complete_op_cond(struct mmc *mmc)
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
690 start = get_timer(0);
692 err = mmc_send_op_cond_iter(mmc, 1);
695 if (mmc->ocr & OCR_BUSY)
697 if (get_timer(start) > timeout)
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
708 err = mmc_send_cmd(mmc, &cmd, NULL);
713 mmc->ocr = cmd.response[0];
716 mmc->version = MMC_VERSION_UNKNOWN;
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 struct mmc_data data;
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
736 data.dest = (char *)ext_csd;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
741 err = mmc_send_cmd(mmc, &cmd, &data);
746 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
753 cmd.cmdidx = MMC_CMD_SWITCH;
754 cmd.resp_type = MMC_RSP_R1b;
755 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
759 while (retries > 0) {
760 ret = mmc_send_cmd(mmc, &cmd, NULL);
762 /* Waiting for the ready status */
764 ret = mmc_send_status(mmc, timeout);
775 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
780 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
786 speed_bits = EXT_CSD_TIMING_HS;
788 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
790 speed_bits = EXT_CSD_TIMING_HS200;
794 speed_bits = EXT_CSD_TIMING_LEGACY;
799 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
804 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
805 /* Now check to see that it worked */
806 err = mmc_send_ext_csd(mmc, test_csd);
810 /* No high-speed support */
811 if (!test_csd[EXT_CSD_HS_TIMING])
818 static int mmc_get_capabilities(struct mmc *mmc)
820 u8 *ext_csd = mmc->ext_csd;
823 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
825 if (mmc_host_is_spi(mmc))
828 /* Only version 4 supports high-speed */
829 if (mmc->version < MMC_VERSION_4)
833 pr_err("No ext_csd found!\n"); /* this should enver happen */
837 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
839 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
840 mmc->cardtype = cardtype;
842 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
843 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
844 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
845 mmc->card_caps |= MMC_MODE_HS200;
848 if (cardtype & EXT_CSD_CARD_TYPE_52) {
849 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
850 mmc->card_caps |= MMC_MODE_DDR_52MHz;
851 mmc->card_caps |= MMC_MODE_HS_52MHz;
853 if (cardtype & EXT_CSD_CARD_TYPE_26)
854 mmc->card_caps |= MMC_MODE_HS;
859 static int mmc_set_capacity(struct mmc *mmc, int part_num)
863 mmc->capacity = mmc->capacity_user;
867 mmc->capacity = mmc->capacity_boot;
870 mmc->capacity = mmc->capacity_rpmb;
876 mmc->capacity = mmc->capacity_gp[part_num - 4];
882 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
887 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
888 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
893 if (part_num & PART_ACCESS_MASK)
894 forbidden = MMC_CAP(MMC_HS_200);
896 if (MMC_CAP(mmc->selected_mode) & forbidden) {
897 pr_debug("selected mode (%s) is forbidden for part %d\n",
898 mmc_mode_name(mmc->selected_mode), part_num);
900 } else if (mmc->selected_mode != mmc->best_mode) {
901 pr_debug("selected mode is not optimal\n");
906 return mmc_select_mode_and_width(mmc,
907 mmc->card_caps & ~forbidden);
912 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
913 unsigned int part_num)
919 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
923 ret = mmc_boot_part_access_chk(mmc, part_num);
927 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
928 (mmc->part_config & ~PART_ACCESS_MASK)
929 | (part_num & PART_ACCESS_MASK));
932 * Set the capacity if the switch succeeded or was intended
933 * to return to representing the raw device.
935 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
936 ret = mmc_set_capacity(mmc, part_num);
937 mmc_get_blk_desc(mmc)->hwpart = part_num;
943 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
944 int mmc_hwpart_config(struct mmc *mmc,
945 const struct mmc_hwpart_conf *conf,
946 enum mmc_hwpart_conf_mode mode)
952 u32 max_enh_size_mult;
953 u32 tot_enh_size_mult = 0;
956 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
958 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
961 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
962 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
966 if (!(mmc->part_support & PART_SUPPORT)) {
967 pr_err("Card does not support partitioning\n");
971 if (!mmc->hc_wp_grp_size) {
972 pr_err("Card does not define HC WP group size\n");
976 /* check partition alignment and total enhanced size */
977 if (conf->user.enh_size) {
978 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
979 conf->user.enh_start % mmc->hc_wp_grp_size) {
980 pr_err("User data enhanced area not HC WP group "
984 part_attrs |= EXT_CSD_ENH_USR;
985 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
986 if (mmc->high_capacity) {
987 enh_start_addr = conf->user.enh_start;
989 enh_start_addr = (conf->user.enh_start << 9);
995 tot_enh_size_mult += enh_size_mult;
997 for (pidx = 0; pidx < 4; pidx++) {
998 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
999 pr_err("GP%i partition not HC WP group size "
1000 "aligned\n", pidx+1);
1003 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1004 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1005 part_attrs |= EXT_CSD_ENH_GP(pidx);
1006 tot_enh_size_mult += gp_size_mult[pidx];
1010 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1011 pr_err("Card does not support enhanced attribute\n");
1012 return -EMEDIUMTYPE;
1015 err = mmc_send_ext_csd(mmc, ext_csd);
1020 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1021 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1022 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1023 if (tot_enh_size_mult > max_enh_size_mult) {
1024 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1025 tot_enh_size_mult, max_enh_size_mult);
1026 return -EMEDIUMTYPE;
1029 /* The default value of EXT_CSD_WR_REL_SET is device
1030 * dependent, the values can only be changed if the
1031 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1032 * changed only once and before partitioning is completed. */
1033 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1034 if (conf->user.wr_rel_change) {
1035 if (conf->user.wr_rel_set)
1036 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1038 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1040 for (pidx = 0; pidx < 4; pidx++) {
1041 if (conf->gp_part[pidx].wr_rel_change) {
1042 if (conf->gp_part[pidx].wr_rel_set)
1043 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1045 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1049 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1050 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1051 puts("Card does not support host controlled partition write "
1052 "reliability settings\n");
1053 return -EMEDIUMTYPE;
1056 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1057 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1058 pr_err("Card already partitioned\n");
1062 if (mode == MMC_HWPART_CONF_CHECK)
1065 /* Partitioning requires high-capacity size definitions */
1066 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1067 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1068 EXT_CSD_ERASE_GROUP_DEF, 1);
1073 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1075 /* update erase group size to be high-capacity */
1076 mmc->erase_grp_size =
1077 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1081 /* all OK, write the configuration */
1082 for (i = 0; i < 4; i++) {
1083 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1084 EXT_CSD_ENH_START_ADDR+i,
1085 (enh_start_addr >> (i*8)) & 0xFF);
1089 for (i = 0; i < 3; i++) {
1090 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1091 EXT_CSD_ENH_SIZE_MULT+i,
1092 (enh_size_mult >> (i*8)) & 0xFF);
1096 for (pidx = 0; pidx < 4; pidx++) {
1097 for (i = 0; i < 3; i++) {
1098 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1099 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1100 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1105 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1106 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1110 if (mode == MMC_HWPART_CONF_SET)
1113 /* The WR_REL_SET is a write-once register but shall be
1114 * written before setting PART_SETTING_COMPLETED. As it is
1115 * write-once we can only write it when completing the
1117 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1118 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1119 EXT_CSD_WR_REL_SET, wr_rel_set);
1124 /* Setting PART_SETTING_COMPLETED confirms the partition
1125 * configuration but it only becomes effective after power
1126 * cycle, so we do not adjust the partition related settings
1127 * in the mmc struct. */
1129 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1130 EXT_CSD_PARTITION_SETTING,
1131 EXT_CSD_PARTITION_SETTING_COMPLETED);
1139 #if !CONFIG_IS_ENABLED(DM_MMC)
1140 int mmc_getcd(struct mmc *mmc)
1144 cd = board_mmc_getcd(mmc);
1147 if (mmc->cfg->ops->getcd)
1148 cd = mmc->cfg->ops->getcd(mmc);
1157 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1160 struct mmc_data data;
1162 /* Switch the frequency */
1163 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1164 cmd.resp_type = MMC_RSP_R1;
1165 cmd.cmdarg = (mode << 31) | 0xffffff;
1166 cmd.cmdarg &= ~(0xf << (group * 4));
1167 cmd.cmdarg |= value << (group * 4);
1169 data.dest = (char *)resp;
1170 data.blocksize = 64;
1172 data.flags = MMC_DATA_READ;
1174 return mmc_send_cmd(mmc, &cmd, &data);
1178 static int sd_get_capabilities(struct mmc *mmc)
1182 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1183 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1184 struct mmc_data data;
1186 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1190 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1192 if (mmc_host_is_spi(mmc))
1195 /* Read the SCR to find out if this card supports higher speeds */
1196 cmd.cmdidx = MMC_CMD_APP_CMD;
1197 cmd.resp_type = MMC_RSP_R1;
1198 cmd.cmdarg = mmc->rca << 16;
1200 err = mmc_send_cmd(mmc, &cmd, NULL);
1205 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1206 cmd.resp_type = MMC_RSP_R1;
1212 data.dest = (char *)scr;
1215 data.flags = MMC_DATA_READ;
1217 err = mmc_send_cmd(mmc, &cmd, &data);
1226 mmc->scr[0] = __be32_to_cpu(scr[0]);
1227 mmc->scr[1] = __be32_to_cpu(scr[1]);
1229 switch ((mmc->scr[0] >> 24) & 0xf) {
1231 mmc->version = SD_VERSION_1_0;
1234 mmc->version = SD_VERSION_1_10;
1237 mmc->version = SD_VERSION_2;
1238 if ((mmc->scr[0] >> 15) & 0x1)
1239 mmc->version = SD_VERSION_3;
1242 mmc->version = SD_VERSION_1_0;
1246 if (mmc->scr[0] & SD_DATA_4BIT)
1247 mmc->card_caps |= MMC_MODE_4BIT;
1249 /* Version 1.0 doesn't support switching */
1250 if (mmc->version == SD_VERSION_1_0)
1255 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1256 (u8 *)switch_status);
1261 /* The high-speed function is busy. Try again */
1262 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1266 /* If high-speed isn't supported, we return */
1267 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1268 mmc->card_caps |= MMC_CAP(SD_HS);
1270 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1271 /* Version before 3.0 don't support UHS modes */
1272 if (mmc->version < SD_VERSION_3)
1275 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1276 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1277 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1278 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1279 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1280 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1281 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1282 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1283 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1284 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1285 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1291 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1295 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1300 speed = UHS_SDR12_BUS_SPEED;
1303 speed = HIGH_SPEED_BUS_SPEED;
1305 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1307 speed = UHS_SDR12_BUS_SPEED;
1310 speed = UHS_SDR25_BUS_SPEED;
1313 speed = UHS_SDR50_BUS_SPEED;
1316 speed = UHS_DDR50_BUS_SPEED;
1319 speed = UHS_SDR104_BUS_SPEED;
1326 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1330 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1336 static int sd_select_bus_width(struct mmc *mmc, int w)
1341 if ((w != 4) && (w != 1))
1344 cmd.cmdidx = MMC_CMD_APP_CMD;
1345 cmd.resp_type = MMC_RSP_R1;
1346 cmd.cmdarg = mmc->rca << 16;
1348 err = mmc_send_cmd(mmc, &cmd, NULL);
1352 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1353 cmd.resp_type = MMC_RSP_R1;
1358 err = mmc_send_cmd(mmc, &cmd, NULL);
1365 #if CONFIG_IS_ENABLED(MMC_WRITE)
1366 static int sd_read_ssr(struct mmc *mmc)
1368 static const unsigned int sd_au_size[] = {
1369 0, SZ_16K / 512, SZ_32K / 512,
1370 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1371 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1372 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1373 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1378 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1379 struct mmc_data data;
1381 unsigned int au, eo, et, es;
1383 cmd.cmdidx = MMC_CMD_APP_CMD;
1384 cmd.resp_type = MMC_RSP_R1;
1385 cmd.cmdarg = mmc->rca << 16;
1387 err = mmc_send_cmd(mmc, &cmd, NULL);
1391 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1392 cmd.resp_type = MMC_RSP_R1;
1396 data.dest = (char *)ssr;
1397 data.blocksize = 64;
1399 data.flags = MMC_DATA_READ;
1401 err = mmc_send_cmd(mmc, &cmd, &data);
1409 for (i = 0; i < 16; i++)
1410 ssr[i] = be32_to_cpu(ssr[i]);
1412 au = (ssr[2] >> 12) & 0xF;
1413 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1414 mmc->ssr.au = sd_au_size[au];
1415 es = (ssr[3] >> 24) & 0xFF;
1416 es |= (ssr[2] & 0xFF) << 8;
1417 et = (ssr[3] >> 18) & 0x3F;
1419 eo = (ssr[3] >> 16) & 0x3;
1420 mmc->ssr.erase_timeout = (et * 1000) / es;
1421 mmc->ssr.erase_offset = eo * 1000;
1424 pr_debug("Invalid Allocation Unit Size.\n");
1430 /* frequency bases */
1431 /* divided by 10 to be nice to platforms without floating point */
1432 static const int fbase[] = {
1439 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1440 * to platforms without floating point.
1442 static const u8 multipliers[] = {
1461 static inline int bus_width(uint cap)
1463 if (cap == MMC_MODE_8BIT)
1465 if (cap == MMC_MODE_4BIT)
1467 if (cap == MMC_MODE_1BIT)
1469 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1473 #if !CONFIG_IS_ENABLED(DM_MMC)
1474 #ifdef MMC_SUPPORTS_TUNING
1475 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1481 static void mmc_send_init_stream(struct mmc *mmc)
1485 static int mmc_set_ios(struct mmc *mmc)
1489 if (mmc->cfg->ops->set_ios)
1490 ret = mmc->cfg->ops->set_ios(mmc);
1496 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1499 if (clock > mmc->cfg->f_max)
1500 clock = mmc->cfg->f_max;
1502 if (clock < mmc->cfg->f_min)
1503 clock = mmc->cfg->f_min;
1507 mmc->clk_disable = disable;
1509 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1511 return mmc_set_ios(mmc);
1514 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1516 mmc->bus_width = width;
1518 return mmc_set_ios(mmc);
1521 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1523 * helper function to display the capabilities in a human
1524 * friendly manner. The capabilities include bus width and
1527 void mmc_dump_capabilities(const char *text, uint caps)
1531 pr_debug("%s: widths [", text);
1532 if (caps & MMC_MODE_8BIT)
1534 if (caps & MMC_MODE_4BIT)
1536 if (caps & MMC_MODE_1BIT)
1538 pr_debug("\b\b] modes [");
1539 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1540 if (MMC_CAP(mode) & caps)
1541 pr_debug("%s, ", mmc_mode_name(mode));
1542 pr_debug("\b\b]\n");
1546 struct mode_width_tuning {
1549 #ifdef MMC_SUPPORTS_TUNING
1554 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1555 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1558 case MMC_SIGNAL_VOLTAGE_000: return 0;
1559 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1560 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1561 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1566 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1570 if (mmc->signal_voltage == signal_voltage)
1573 mmc->signal_voltage = signal_voltage;
1574 err = mmc_set_ios(mmc);
1576 pr_debug("unable to set voltage (err %d)\n", err);
1581 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1587 static const struct mode_width_tuning sd_modes_by_pref[] = {
1588 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1589 #ifdef MMC_SUPPORTS_TUNING
1592 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1593 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1598 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1602 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1606 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1611 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1613 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1616 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1621 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1625 #define for_each_sd_mode_by_pref(caps, mwt) \
1626 for (mwt = sd_modes_by_pref;\
1627 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1629 if (caps & MMC_CAP(mwt->mode))
1631 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1634 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1635 const struct mode_width_tuning *mwt;
1636 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1637 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1639 bool uhs_en = false;
1644 mmc_dump_capabilities("sd card", card_caps);
1645 mmc_dump_capabilities("host", mmc->host_caps);
1648 /* Restrict card's capabilities by what the host can do */
1649 caps = card_caps & mmc->host_caps;
1654 for_each_sd_mode_by_pref(caps, mwt) {
1657 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1658 if (*w & caps & mwt->widths) {
1659 pr_debug("trying mode %s width %d (at %d MHz)\n",
1660 mmc_mode_name(mwt->mode),
1662 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1664 /* configure the bus width (card + host) */
1665 err = sd_select_bus_width(mmc, bus_width(*w));
1668 mmc_set_bus_width(mmc, bus_width(*w));
1670 /* configure the bus mode (card) */
1671 err = sd_set_card_speed(mmc, mwt->mode);
1675 /* configure the bus mode (host) */
1676 mmc_select_mode(mmc, mwt->mode);
1677 mmc_set_clock(mmc, mmc->tran_speed,
1680 #ifdef MMC_SUPPORTS_TUNING
1681 /* execute tuning if needed */
1682 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1683 err = mmc_execute_tuning(mmc,
1686 pr_debug("tuning failed\n");
1692 #if CONFIG_IS_ENABLED(MMC_WRITE)
1693 err = sd_read_ssr(mmc);
1695 pr_warn("unable to read ssr\n");
1701 /* revert to a safer bus speed */
1702 mmc_select_mode(mmc, SD_LEGACY);
1703 mmc_set_clock(mmc, mmc->tran_speed,
1709 pr_err("unable to select a mode\n");
1714 * read the compare the part of ext csd that is constant.
1715 * This can be used to check that the transfer is working
1718 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1721 const u8 *ext_csd = mmc->ext_csd;
1722 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1724 if (mmc->version < MMC_VERSION_4)
1727 err = mmc_send_ext_csd(mmc, test_csd);
1731 /* Only compare read only fields */
1732 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1733 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1734 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1735 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1736 ext_csd[EXT_CSD_REV]
1737 == test_csd[EXT_CSD_REV] &&
1738 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1739 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1740 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1741 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1747 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1748 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1749 uint32_t allowed_mask)
1755 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1756 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1757 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1758 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1761 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1762 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1763 MMC_SIGNAL_VOLTAGE_180;
1764 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1765 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1768 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1772 while (card_mask & allowed_mask) {
1773 enum mmc_voltage best_match;
1775 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1776 if (!mmc_set_signal_voltage(mmc, best_match))
1779 allowed_mask &= ~best_match;
1785 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1786 uint32_t allowed_mask)
1792 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1793 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1796 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1797 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1802 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1806 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1810 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1814 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1818 #define for_each_mmc_mode_by_pref(caps, mwt) \
1819 for (mwt = mmc_modes_by_pref;\
1820 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1822 if (caps & MMC_CAP(mwt->mode))
1824 static const struct ext_csd_bus_width {
1828 } ext_csd_bus_width[] = {
1829 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1830 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1831 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1832 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1833 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1836 #define for_each_supported_width(caps, ddr, ecbv) \
1837 for (ecbv = ext_csd_bus_width;\
1838 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1840 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1842 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1845 const struct mode_width_tuning *mwt;
1846 const struct ext_csd_bus_width *ecbw;
1849 mmc_dump_capabilities("mmc", card_caps);
1850 mmc_dump_capabilities("host", mmc->host_caps);
1853 /* Restrict card's capabilities by what the host can do */
1854 card_caps &= mmc->host_caps;
1856 /* Only version 4 of MMC supports wider bus widths */
1857 if (mmc->version < MMC_VERSION_4)
1860 if (!mmc->ext_csd) {
1861 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1865 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1867 for_each_mmc_mode_by_pref(card_caps, mwt) {
1868 for_each_supported_width(card_caps & mwt->widths,
1869 mmc_is_mode_ddr(mwt->mode), ecbw) {
1870 enum mmc_voltage old_voltage;
1871 pr_debug("trying mode %s width %d (at %d MHz)\n",
1872 mmc_mode_name(mwt->mode),
1873 bus_width(ecbw->cap),
1874 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1875 old_voltage = mmc->signal_voltage;
1876 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1877 MMC_ALL_SIGNAL_VOLTAGE);
1881 /* configure the bus width (card + host) */
1882 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1884 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1887 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1889 /* configure the bus speed (card) */
1890 err = mmc_set_card_speed(mmc, mwt->mode);
1895 * configure the bus width AND the ddr mode (card)
1896 * The host side will be taken care of in the next step
1898 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1899 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1901 ecbw->ext_csd_bits);
1906 /* configure the bus mode (host) */
1907 mmc_select_mode(mmc, mwt->mode);
1908 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1909 #ifdef MMC_SUPPORTS_TUNING
1911 /* execute tuning if needed */
1913 err = mmc_execute_tuning(mmc, mwt->tuning);
1915 pr_debug("tuning failed\n");
1921 /* do a transfer to check the configuration */
1922 err = mmc_read_and_compare_ext_csd(mmc);
1926 mmc_set_signal_voltage(mmc, old_voltage);
1927 /* if an error occured, revert to a safer bus mode */
1928 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1929 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1930 mmc_select_mode(mmc, MMC_LEGACY);
1931 mmc_set_bus_width(mmc, 1);
1935 pr_err("unable to select a mode\n");
1940 static int mmc_startup_v4(struct mmc *mmc)
1944 bool has_parts = false;
1945 bool part_completed;
1946 static const u32 mmc_versions[] = {
1958 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1960 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1963 /* check ext_csd version and capacity */
1964 err = mmc_send_ext_csd(mmc, ext_csd);
1968 /* store the ext csd for future reference */
1970 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1973 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1975 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
1978 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1980 if (mmc->version >= MMC_VERSION_4_2) {
1982 * According to the JEDEC Standard, the value of
1983 * ext_csd's capacity is valid if the value is more
1986 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1987 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1988 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1989 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1990 capacity *= MMC_MAX_BLOCK_LEN;
1991 if ((capacity >> 20) > 2 * 1024)
1992 mmc->capacity_user = capacity;
1995 /* The partition data may be non-zero but it is only
1996 * effective if PARTITION_SETTING_COMPLETED is set in
1997 * EXT_CSD, so ignore any data if this bit is not set,
1998 * except for enabling the high-capacity group size
1999 * definition (see below).
2001 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2002 EXT_CSD_PARTITION_SETTING_COMPLETED);
2004 /* store the partition info of emmc */
2005 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2006 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2007 ext_csd[EXT_CSD_BOOT_MULT])
2008 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2009 if (part_completed &&
2010 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2011 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2013 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2015 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2017 for (i = 0; i < 4; i++) {
2018 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2019 uint mult = (ext_csd[idx + 2] << 16) +
2020 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2023 if (!part_completed)
2025 mmc->capacity_gp[i] = mult;
2026 mmc->capacity_gp[i] *=
2027 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2028 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2029 mmc->capacity_gp[i] <<= 19;
2032 #ifndef CONFIG_SPL_BUILD
2033 if (part_completed) {
2034 mmc->enh_user_size =
2035 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2036 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2037 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2038 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2039 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2040 mmc->enh_user_size <<= 19;
2041 mmc->enh_user_start =
2042 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2043 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2044 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2045 ext_csd[EXT_CSD_ENH_START_ADDR];
2046 if (mmc->high_capacity)
2047 mmc->enh_user_start <<= 9;
2052 * Host needs to enable ERASE_GRP_DEF bit if device is
2053 * partitioned. This bit will be lost every time after a reset
2054 * or power off. This will affect erase size.
2058 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2059 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2062 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2063 EXT_CSD_ERASE_GROUP_DEF, 1);
2068 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2071 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2072 #if CONFIG_IS_ENABLED(MMC_WRITE)
2073 /* Read out group size from ext_csd */
2074 mmc->erase_grp_size =
2075 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2078 * if high capacity and partition setting completed
2079 * SEC_COUNT is valid even if it is smaller than 2 GiB
2080 * JEDEC Standard JESD84-B45, 6.2.4
2082 if (mmc->high_capacity && part_completed) {
2083 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2084 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2085 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2086 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2087 capacity *= MMC_MAX_BLOCK_LEN;
2088 mmc->capacity_user = capacity;
2091 #if CONFIG_IS_ENABLED(MMC_WRITE)
2093 /* Calculate the group size from the csd value. */
2094 int erase_gsz, erase_gmul;
2096 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2097 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2098 mmc->erase_grp_size = (erase_gsz + 1)
2102 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2103 mmc->hc_wp_grp_size = 1024
2104 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2105 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2108 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2114 mmc->ext_csd = NULL;
2119 static int mmc_startup(struct mmc *mmc)
2125 struct blk_desc *bdesc;
2127 #ifdef CONFIG_MMC_SPI_CRC_ON
2128 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2129 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2130 cmd.resp_type = MMC_RSP_R1;
2132 err = mmc_send_cmd(mmc, &cmd, NULL);
2138 /* Put the Card in Identify Mode */
2139 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2140 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2141 cmd.resp_type = MMC_RSP_R2;
2144 err = mmc_send_cmd(mmc, &cmd, NULL);
2146 #ifdef CONFIG_MMC_QUIRKS
2147 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2150 * It has been seen that SEND_CID may fail on the first
2151 * attempt, let's try a few more time
2154 err = mmc_send_cmd(mmc, &cmd, NULL);
2157 } while (retries--);
2164 memcpy(mmc->cid, cmd.response, 16);
2167 * For MMC cards, set the Relative Address.
2168 * For SD cards, get the Relatvie Address.
2169 * This also puts the cards into Standby State
2171 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2172 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2173 cmd.cmdarg = mmc->rca << 16;
2174 cmd.resp_type = MMC_RSP_R6;
2176 err = mmc_send_cmd(mmc, &cmd, NULL);
2182 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2185 /* Get the Card-Specific Data */
2186 cmd.cmdidx = MMC_CMD_SEND_CSD;
2187 cmd.resp_type = MMC_RSP_R2;
2188 cmd.cmdarg = mmc->rca << 16;
2190 err = mmc_send_cmd(mmc, &cmd, NULL);
2195 mmc->csd[0] = cmd.response[0];
2196 mmc->csd[1] = cmd.response[1];
2197 mmc->csd[2] = cmd.response[2];
2198 mmc->csd[3] = cmd.response[3];
2200 if (mmc->version == MMC_VERSION_UNKNOWN) {
2201 int version = (cmd.response[0] >> 26) & 0xf;
2205 mmc->version = MMC_VERSION_1_2;
2208 mmc->version = MMC_VERSION_1_4;
2211 mmc->version = MMC_VERSION_2_2;
2214 mmc->version = MMC_VERSION_3;
2217 mmc->version = MMC_VERSION_4;
2220 mmc->version = MMC_VERSION_1_2;
2225 /* divide frequency by 10, since the mults are 10x bigger */
2226 freq = fbase[(cmd.response[0] & 0x7)];
2227 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2229 mmc->legacy_speed = freq * mult;
2230 mmc_select_mode(mmc, MMC_LEGACY);
2232 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2233 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2234 #if CONFIG_IS_ENABLED(MMC_WRITE)
2237 mmc->write_bl_len = mmc->read_bl_len;
2239 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2242 if (mmc->high_capacity) {
2243 csize = (mmc->csd[1] & 0x3f) << 16
2244 | (mmc->csd[2] & 0xffff0000) >> 16;
2247 csize = (mmc->csd[1] & 0x3ff) << 2
2248 | (mmc->csd[2] & 0xc0000000) >> 30;
2249 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2252 mmc->capacity_user = (csize + 1) << (cmult + 2);
2253 mmc->capacity_user *= mmc->read_bl_len;
2254 mmc->capacity_boot = 0;
2255 mmc->capacity_rpmb = 0;
2256 for (i = 0; i < 4; i++)
2257 mmc->capacity_gp[i] = 0;
2259 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2260 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2262 #if CONFIG_IS_ENABLED(MMC_WRITE)
2263 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2264 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2267 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2268 cmd.cmdidx = MMC_CMD_SET_DSR;
2269 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2270 cmd.resp_type = MMC_RSP_NONE;
2271 if (mmc_send_cmd(mmc, &cmd, NULL))
2272 pr_warn("MMC: SET_DSR failed\n");
2275 /* Select the card, and put it into Transfer Mode */
2276 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2277 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2278 cmd.resp_type = MMC_RSP_R1;
2279 cmd.cmdarg = mmc->rca << 16;
2280 err = mmc_send_cmd(mmc, &cmd, NULL);
2287 * For SD, its erase group is always one sector
2289 #if CONFIG_IS_ENABLED(MMC_WRITE)
2290 mmc->erase_grp_size = 1;
2292 mmc->part_config = MMCPART_NOAVAILABLE;
2294 err = mmc_startup_v4(mmc);
2298 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2303 err = sd_get_capabilities(mmc);
2306 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2308 err = mmc_get_capabilities(mmc);
2311 mmc_select_mode_and_width(mmc, mmc->card_caps);
2317 mmc->best_mode = mmc->selected_mode;
2319 /* Fix the block length for DDR mode */
2320 if (mmc->ddr_mode) {
2321 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2322 #if CONFIG_IS_ENABLED(MMC_WRITE)
2323 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2327 /* fill in device description */
2328 bdesc = mmc_get_blk_desc(mmc);
2332 bdesc->blksz = mmc->read_bl_len;
2333 bdesc->log2blksz = LOG2(bdesc->blksz);
2334 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2335 #if !defined(CONFIG_SPL_BUILD) || \
2336 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2337 !defined(CONFIG_USE_TINY_PRINTF))
2338 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2339 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2340 (mmc->cid[3] >> 16) & 0xffff);
2341 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2342 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2343 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2344 (mmc->cid[2] >> 24) & 0xff);
2345 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2346 (mmc->cid[2] >> 16) & 0xf);
2348 bdesc->vendor[0] = 0;
2349 bdesc->product[0] = 0;
2350 bdesc->revision[0] = 0;
2352 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2359 static int mmc_send_if_cond(struct mmc *mmc)
2364 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2365 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2366 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2367 cmd.resp_type = MMC_RSP_R7;
2369 err = mmc_send_cmd(mmc, &cmd, NULL);
2374 if ((cmd.response[0] & 0xff) != 0xaa)
2377 mmc->version = SD_VERSION_2;
2382 #if !CONFIG_IS_ENABLED(DM_MMC)
2383 /* board-specific MMC power initializations. */
2384 __weak void board_mmc_power_init(void)
2389 static int mmc_power_init(struct mmc *mmc)
2391 #if CONFIG_IS_ENABLED(DM_MMC)
2392 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2395 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2398 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2400 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2401 &mmc->vqmmc_supply);
2403 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2405 #else /* !CONFIG_DM_MMC */
2407 * Driver model should use a regulator, as above, rather than calling
2408 * out to board code.
2410 board_mmc_power_init();
2416 * put the host in the initial state:
2417 * - turn on Vdd (card power supply)
2418 * - configure the bus width and clock to minimal values
2420 static void mmc_set_initial_state(struct mmc *mmc)
2424 /* First try to set 3.3V. If it fails set to 1.8V */
2425 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2427 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2429 pr_warn("mmc: failed to set signal voltage\n");
2431 mmc_select_mode(mmc, MMC_LEGACY);
2432 mmc_set_bus_width(mmc, 1);
2433 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2436 static int mmc_power_on(struct mmc *mmc)
2438 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2439 if (mmc->vmmc_supply) {
2440 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2443 puts("Error enabling VMMC supply\n");
2451 static int mmc_power_off(struct mmc *mmc)
2453 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2454 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2455 if (mmc->vmmc_supply) {
2456 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2459 pr_debug("Error disabling VMMC supply\n");
2467 static int mmc_power_cycle(struct mmc *mmc)
2471 ret = mmc_power_off(mmc);
2475 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2476 * to be on the safer side.
2479 return mmc_power_on(mmc);
2482 int mmc_start_init(struct mmc *mmc)
2485 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2489 * all hosts are capable of 1 bit bus-width and able to use the legacy
2492 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2493 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2495 #if !defined(CONFIG_MMC_BROKEN_CD)
2496 /* we pretend there's no card when init is NULL */
2497 no_card = mmc_getcd(mmc) == 0;
2501 #if !CONFIG_IS_ENABLED(DM_MMC)
2502 no_card = no_card || (mmc->cfg->ops->init == NULL);
2506 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2507 pr_err("MMC: no card present\n");
2515 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2516 mmc_adapter_card_type_ident();
2518 err = mmc_power_init(mmc);
2522 #ifdef CONFIG_MMC_QUIRKS
2523 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2524 MMC_QUIRK_RETRY_SEND_CID;
2527 err = mmc_power_cycle(mmc);
2530 * if power cycling is not supported, we should not try
2531 * to use the UHS modes, because we wouldn't be able to
2532 * recover from an error during the UHS initialization.
2534 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2536 mmc->host_caps &= ~UHS_CAPS;
2537 err = mmc_power_on(mmc);
2542 #if CONFIG_IS_ENABLED(DM_MMC)
2543 /* The device has already been probed ready for use */
2545 /* made sure it's not NULL earlier */
2546 err = mmc->cfg->ops->init(mmc);
2553 mmc_set_initial_state(mmc);
2554 mmc_send_init_stream(mmc);
2556 /* Reset the Card */
2557 err = mmc_go_idle(mmc);
2562 /* The internal partition reset to user partition(0) at every CMD0*/
2563 mmc_get_blk_desc(mmc)->hwpart = 0;
2565 /* Test for SD version 2 */
2566 err = mmc_send_if_cond(mmc);
2568 /* Now try to get the SD card's operating condition */
2569 err = sd_send_op_cond(mmc, uhs_en);
2570 if (err && uhs_en) {
2572 mmc_power_cycle(mmc);
2576 /* If the command timed out, we check for an MMC card */
2577 if (err == -ETIMEDOUT) {
2578 err = mmc_send_op_cond(mmc);
2581 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2582 pr_err("Card did not respond to voltage select!\n");
2589 mmc->init_in_progress = 1;
2594 static int mmc_complete_init(struct mmc *mmc)
2598 mmc->init_in_progress = 0;
2599 if (mmc->op_cond_pending)
2600 err = mmc_complete_op_cond(mmc);
2603 err = mmc_startup(mmc);
2611 int mmc_init(struct mmc *mmc)
2614 __maybe_unused unsigned start;
2615 #if CONFIG_IS_ENABLED(DM_MMC)
2616 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2623 start = get_timer(0);
2625 if (!mmc->init_in_progress)
2626 err = mmc_start_init(mmc);
2629 err = mmc_complete_init(mmc);
2631 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2636 int mmc_set_dsr(struct mmc *mmc, u16 val)
2642 /* CPU-specific MMC initializations */
2643 __weak int cpu_mmc_init(bd_t *bis)
2648 /* board-specific MMC initializations. */
2649 __weak int board_mmc_init(bd_t *bis)
2654 void mmc_set_preinit(struct mmc *mmc, int preinit)
2656 mmc->preinit = preinit;
2659 #if CONFIG_IS_ENABLED(DM_MMC)
2660 static int mmc_probe(bd_t *bis)
2664 struct udevice *dev;
2666 ret = uclass_get(UCLASS_MMC, &uc);
2671 * Try to add them in sequence order. Really with driver model we
2672 * should allow holes, but the current MMC list does not allow that.
2673 * So if we request 0, 1, 3 we will get 0, 1, 2.
2675 for (i = 0; ; i++) {
2676 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2680 uclass_foreach_dev(dev, uc) {
2681 ret = device_probe(dev);
2683 pr_err("%s - probe failed: %d\n", dev->name, ret);
2689 static int mmc_probe(bd_t *bis)
2691 if (board_mmc_init(bis) < 0)
2698 int mmc_initialize(bd_t *bis)
2700 static int initialized = 0;
2702 if (initialized) /* Avoid initializing mmc multiple times */
2706 #if !CONFIG_IS_ENABLED(BLK)
2707 #if !CONFIG_IS_ENABLED(MMC_TINY)
2711 ret = mmc_probe(bis);
2715 #ifndef CONFIG_SPL_BUILD
2716 print_mmc_devices(',');
2723 #ifdef CONFIG_CMD_BKOPS_ENABLE
2724 int mmc_set_bkops_enable(struct mmc *mmc)
2727 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2729 err = mmc_send_ext_csd(mmc, ext_csd);
2731 puts("Could not get ext_csd register values\n");
2735 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2736 puts("Background operations not supported on device\n");
2737 return -EMEDIUMTYPE;
2740 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2741 puts("Background operations already enabled\n");
2745 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2747 puts("Failed to enable manual background operations\n");
2751 puts("Enabled manual background operations\n");