2 * Copyright 2008, Freescale Semiconductor, Inc
5 * Based vaguely on the Linux code
7 * SPDX-License-Identifier: GPL-2.0+
14 #include <dm/device-internal.h>
18 #include <power/regulator.h>
21 #include <linux/list.h>
23 #include "mmc_private.h"
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
44 void mmc_do_preinit(void)
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
56 return &mmc->block_dev;
60 #if !CONFIG_IS_ENABLED(DM_MMC)
62 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
63 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
69 __weak int board_mmc_getwp(struct mmc *mmc)
74 int mmc_getwp(struct mmc *mmc)
78 wp = board_mmc_getwp(mmc);
81 if (mmc->cfg->ops->getwp)
82 wp = mmc->cfg->ops->getwp(mmc);
90 __weak int board_mmc_getcd(struct mmc *mmc)
96 #ifdef CONFIG_MMC_TRACE
97 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
99 printf("CMD_SEND:%d\n", cmd->cmdidx);
100 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
103 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
109 printf("\t\tRET\t\t\t %d\n", ret);
111 switch (cmd->resp_type) {
113 printf("\t\tMMC_RSP_NONE\n");
116 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
120 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
124 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
126 printf("\t\t \t\t 0x%08X \n",
128 printf("\t\t \t\t 0x%08X \n",
130 printf("\t\t \t\t 0x%08X \n",
133 printf("\t\t\t\t\tDUMPING DATA\n");
134 for (i = 0; i < 4; i++) {
136 printf("\t\t\t\t\t%03d - ", i*4);
137 ptr = (u8 *)&cmd->response[i];
139 for (j = 0; j < 4; j++)
140 printf("%02X ", *ptr--);
145 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
149 printf("\t\tERROR MMC rsp not supported\n");
155 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
159 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
160 printf("CURR STATE:%d\n", status);
164 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
165 const char *mmc_mode_name(enum bus_mode mode)
167 static const char *const names[] = {
168 [MMC_LEGACY] = "MMC legacy",
169 [SD_LEGACY] = "SD Legacy",
170 [MMC_HS] = "MMC High Speed (26MHz)",
171 [SD_HS] = "SD High Speed (50MHz)",
172 [UHS_SDR12] = "UHS SDR12 (25MHz)",
173 [UHS_SDR25] = "UHS SDR25 (50MHz)",
174 [UHS_SDR50] = "UHS SDR50 (100MHz)",
175 [UHS_SDR104] = "UHS SDR104 (208MHz)",
176 [UHS_DDR50] = "UHS DDR50 (50MHz)",
177 [MMC_HS_52] = "MMC High Speed (52MHz)",
178 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
179 [MMC_HS_200] = "HS200 (200MHz)",
182 if (mode >= MMC_MODES_END)
183 return "Unknown mode";
189 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
191 static const int freqs[] = {
192 [SD_LEGACY] = 25000000,
195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
196 [UHS_SDR12] = 25000000,
197 [UHS_SDR25] = 50000000,
198 [UHS_SDR50] = 100000000,
199 [UHS_DDR50] = 50000000,
200 #ifdef MMC_SUPPORTS_TUNING
201 [UHS_SDR104] = 208000000,
204 [MMC_HS_52] = 52000000,
205 [MMC_DDR_52] = 52000000,
206 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
207 [MMC_HS_200] = 200000000,
211 if (mode == MMC_LEGACY)
212 return mmc->legacy_speed;
213 else if (mode >= MMC_MODES_END)
219 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
221 mmc->selected_mode = mode;
222 mmc->tran_speed = mmc_mode2freq(mmc, mode);
223 mmc->ddr_mode = mmc_is_mode_ddr(mode);
224 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
225 mmc->tran_speed / 1000000);
229 #if !CONFIG_IS_ENABLED(DM_MMC)
230 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
234 mmmc_trace_before_send(mmc, cmd);
235 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
236 mmmc_trace_after_send(mmc, cmd, ret);
242 int mmc_send_status(struct mmc *mmc, int timeout)
245 int err, retries = 5;
247 cmd.cmdidx = MMC_CMD_SEND_STATUS;
248 cmd.resp_type = MMC_RSP_R1;
249 if (!mmc_host_is_spi(mmc))
250 cmd.cmdarg = mmc->rca << 16;
253 err = mmc_send_cmd(mmc, &cmd, NULL);
255 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
256 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
260 if (cmd.response[0] & MMC_STATUS_MASK) {
261 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
262 pr_err("Status Error: 0x%08X\n",
267 } else if (--retries < 0)
276 mmc_trace_state(mmc, &cmd);
278 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
279 pr_err("Timeout waiting card ready\n");
287 int mmc_set_blocklen(struct mmc *mmc, int len)
295 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
296 cmd.resp_type = MMC_RSP_R1;
299 err = mmc_send_cmd(mmc, &cmd, NULL);
301 #ifdef CONFIG_MMC_QUIRKS
302 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
305 * It has been seen that SET_BLOCKLEN may fail on the first
306 * attempt, let's try a few more time
309 err = mmc_send_cmd(mmc, &cmd, NULL);
319 #ifdef MMC_SUPPORTS_TUNING
320 static const u8 tuning_blk_pattern_4bit[] = {
321 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
322 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
323 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
324 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
325 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
326 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
327 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
328 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
331 static const u8 tuning_blk_pattern_8bit[] = {
332 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
333 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
334 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
335 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
336 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
337 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
338 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
339 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
340 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
341 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
342 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
343 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
344 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
345 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
346 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
347 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
350 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
353 struct mmc_data data;
354 const u8 *tuning_block_pattern;
357 if (mmc->bus_width == 8) {
358 tuning_block_pattern = tuning_blk_pattern_8bit;
359 size = sizeof(tuning_blk_pattern_8bit);
360 } else if (mmc->bus_width == 4) {
361 tuning_block_pattern = tuning_blk_pattern_4bit;
362 size = sizeof(tuning_blk_pattern_4bit);
367 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
371 cmd.resp_type = MMC_RSP_R1;
373 data.dest = (void *)data_buf;
375 data.blocksize = size;
376 data.flags = MMC_DATA_READ;
378 err = mmc_send_cmd(mmc, &cmd, &data);
382 if (memcmp(data_buf, tuning_block_pattern, size))
389 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
393 struct mmc_data data;
396 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
398 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
400 if (mmc->high_capacity)
403 cmd.cmdarg = start * mmc->read_bl_len;
405 cmd.resp_type = MMC_RSP_R1;
408 data.blocks = blkcnt;
409 data.blocksize = mmc->read_bl_len;
410 data.flags = MMC_DATA_READ;
412 if (mmc_send_cmd(mmc, &cmd, &data))
416 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
418 cmd.resp_type = MMC_RSP_R1b;
419 if (mmc_send_cmd(mmc, &cmd, NULL)) {
420 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
421 pr_err("mmc fail to send stop cmd\n");
430 #if CONFIG_IS_ENABLED(BLK)
431 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
433 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
437 #if CONFIG_IS_ENABLED(BLK)
438 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
440 int dev_num = block_dev->devnum;
442 lbaint_t cur, blocks_todo = blkcnt;
447 struct mmc *mmc = find_mmc_device(dev_num);
451 if (CONFIG_IS_ENABLED(MMC_TINY))
452 err = mmc_switch_part(mmc, block_dev->hwpart);
454 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
459 if ((start + blkcnt) > block_dev->lba) {
460 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
461 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
462 start + blkcnt, block_dev->lba);
467 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
468 debug("%s: Failed to set blocklen\n", __func__);
473 cur = (blocks_todo > mmc->cfg->b_max) ?
474 mmc->cfg->b_max : blocks_todo;
475 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
476 debug("%s: Failed to read blocks\n", __func__);
481 dst += cur * mmc->read_bl_len;
482 } while (blocks_todo > 0);
487 static int mmc_go_idle(struct mmc *mmc)
494 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
496 cmd.resp_type = MMC_RSP_NONE;
498 err = mmc_send_cmd(mmc, &cmd, NULL);
508 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
509 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
515 * Send CMD11 only if the request is to switch the card to
518 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
519 return mmc_set_signal_voltage(mmc, signal_voltage);
521 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
523 cmd.resp_type = MMC_RSP_R1;
525 err = mmc_send_cmd(mmc, &cmd, NULL);
529 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
533 * The card should drive cmd and dat[0:3] low immediately
534 * after the response of cmd11, but wait 100 us to be sure
536 err = mmc_wait_dat0(mmc, 0, 100);
543 * During a signal voltage level switch, the clock must be gated
544 * for 5 ms according to the SD spec
546 mmc_set_clock(mmc, mmc->clock, true);
548 err = mmc_set_signal_voltage(mmc, signal_voltage);
552 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
554 mmc_set_clock(mmc, mmc->clock, false);
557 * Failure to switch is indicated by the card holding
558 * dat[0:3] low. Wait for at least 1 ms according to spec
560 err = mmc_wait_dat0(mmc, 1, 1000);
570 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
577 cmd.cmdidx = MMC_CMD_APP_CMD;
578 cmd.resp_type = MMC_RSP_R1;
581 err = mmc_send_cmd(mmc, &cmd, NULL);
586 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
587 cmd.resp_type = MMC_RSP_R3;
590 * Most cards do not answer if some reserved bits
591 * in the ocr are set. However, Some controller
592 * can set bit 7 (reserved for low voltages), but
593 * how to manage low voltages SD card is not yet
596 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
597 (mmc->cfg->voltages & 0xff8000);
599 if (mmc->version == SD_VERSION_2)
600 cmd.cmdarg |= OCR_HCS;
603 cmd.cmdarg |= OCR_S18R;
605 err = mmc_send_cmd(mmc, &cmd, NULL);
610 if (cmd.response[0] & OCR_BUSY)
619 if (mmc->version != SD_VERSION_2)
620 mmc->version = SD_VERSION_1_0;
622 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
623 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
624 cmd.resp_type = MMC_RSP_R3;
627 err = mmc_send_cmd(mmc, &cmd, NULL);
633 mmc->ocr = cmd.response[0];
635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
636 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
638 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
644 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
650 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
655 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
656 cmd.resp_type = MMC_RSP_R3;
658 if (use_arg && !mmc_host_is_spi(mmc))
659 cmd.cmdarg = OCR_HCS |
660 (mmc->cfg->voltages &
661 (mmc->ocr & OCR_VOLTAGE_MASK)) |
662 (mmc->ocr & OCR_ACCESS_MODE);
664 err = mmc_send_cmd(mmc, &cmd, NULL);
667 mmc->ocr = cmd.response[0];
671 static int mmc_send_op_cond(struct mmc *mmc)
675 /* Some cards seem to need this */
678 /* Asking to the card its capabilities */
679 for (i = 0; i < 2; i++) {
680 err = mmc_send_op_cond_iter(mmc, i != 0);
684 /* exit if not busy (flag seems to be inverted) */
685 if (mmc->ocr & OCR_BUSY)
688 mmc->op_cond_pending = 1;
692 static int mmc_complete_op_cond(struct mmc *mmc)
699 mmc->op_cond_pending = 0;
700 if (!(mmc->ocr & OCR_BUSY)) {
701 /* Some cards seem to need this */
704 start = get_timer(0);
706 err = mmc_send_op_cond_iter(mmc, 1);
709 if (mmc->ocr & OCR_BUSY)
711 if (get_timer(start) > timeout)
717 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
718 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
719 cmd.resp_type = MMC_RSP_R3;
722 err = mmc_send_cmd(mmc, &cmd, NULL);
727 mmc->ocr = cmd.response[0];
730 mmc->version = MMC_VERSION_UNKNOWN;
732 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
739 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
742 struct mmc_data data;
745 /* Get the Card Status Register */
746 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
747 cmd.resp_type = MMC_RSP_R1;
750 data.dest = (char *)ext_csd;
752 data.blocksize = MMC_MAX_BLOCK_LEN;
753 data.flags = MMC_DATA_READ;
755 err = mmc_send_cmd(mmc, &cmd, &data);
760 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
767 cmd.cmdidx = MMC_CMD_SWITCH;
768 cmd.resp_type = MMC_RSP_R1b;
769 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
773 while (retries > 0) {
774 ret = mmc_send_cmd(mmc, &cmd, NULL);
776 /* Waiting for the ready status */
778 ret = mmc_send_status(mmc, timeout);
789 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
794 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
800 speed_bits = EXT_CSD_TIMING_HS;
802 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
804 speed_bits = EXT_CSD_TIMING_HS200;
808 speed_bits = EXT_CSD_TIMING_LEGACY;
813 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
818 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
819 /* Now check to see that it worked */
820 err = mmc_send_ext_csd(mmc, test_csd);
824 /* No high-speed support */
825 if (!test_csd[EXT_CSD_HS_TIMING])
832 static int mmc_get_capabilities(struct mmc *mmc)
834 u8 *ext_csd = mmc->ext_csd;
837 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
839 if (mmc_host_is_spi(mmc))
842 /* Only version 4 supports high-speed */
843 if (mmc->version < MMC_VERSION_4)
847 pr_err("No ext_csd found!\n"); /* this should enver happen */
851 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
853 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
854 mmc->cardtype = cardtype;
856 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
857 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
858 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
859 mmc->card_caps |= MMC_MODE_HS200;
862 if (cardtype & EXT_CSD_CARD_TYPE_52) {
863 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
864 mmc->card_caps |= MMC_MODE_DDR_52MHz;
865 mmc->card_caps |= MMC_MODE_HS_52MHz;
867 if (cardtype & EXT_CSD_CARD_TYPE_26)
868 mmc->card_caps |= MMC_MODE_HS;
873 static int mmc_set_capacity(struct mmc *mmc, int part_num)
877 mmc->capacity = mmc->capacity_user;
881 mmc->capacity = mmc->capacity_boot;
884 mmc->capacity = mmc->capacity_rpmb;
890 mmc->capacity = mmc->capacity_gp[part_num - 4];
896 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
901 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
902 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
907 if (part_num & PART_ACCESS_MASK)
908 forbidden = MMC_CAP(MMC_HS_200);
910 if (MMC_CAP(mmc->selected_mode) & forbidden) {
911 debug("selected mode (%s) is forbidden for part %d\n",
912 mmc_mode_name(mmc->selected_mode), part_num);
914 } else if (mmc->selected_mode != mmc->best_mode) {
915 debug("selected mode is not optimal\n");
920 return mmc_select_mode_and_width(mmc,
921 mmc->card_caps & ~forbidden);
926 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
927 unsigned int part_num)
933 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
937 ret = mmc_boot_part_access_chk(mmc, part_num);
941 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
942 (mmc->part_config & ~PART_ACCESS_MASK)
943 | (part_num & PART_ACCESS_MASK));
946 * Set the capacity if the switch succeeded or was intended
947 * to return to representing the raw device.
949 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
950 ret = mmc_set_capacity(mmc, part_num);
951 mmc_get_blk_desc(mmc)->hwpart = part_num;
957 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
958 int mmc_hwpart_config(struct mmc *mmc,
959 const struct mmc_hwpart_conf *conf,
960 enum mmc_hwpart_conf_mode mode)
966 u32 max_enh_size_mult;
967 u32 tot_enh_size_mult = 0;
970 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
972 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
975 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
976 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
980 if (!(mmc->part_support & PART_SUPPORT)) {
981 pr_err("Card does not support partitioning\n");
985 if (!mmc->hc_wp_grp_size) {
986 pr_err("Card does not define HC WP group size\n");
990 /* check partition alignment and total enhanced size */
991 if (conf->user.enh_size) {
992 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
993 conf->user.enh_start % mmc->hc_wp_grp_size) {
994 pr_err("User data enhanced area not HC WP group "
998 part_attrs |= EXT_CSD_ENH_USR;
999 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1000 if (mmc->high_capacity) {
1001 enh_start_addr = conf->user.enh_start;
1003 enh_start_addr = (conf->user.enh_start << 9);
1009 tot_enh_size_mult += enh_size_mult;
1011 for (pidx = 0; pidx < 4; pidx++) {
1012 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1013 pr_err("GP%i partition not HC WP group size "
1014 "aligned\n", pidx+1);
1017 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1018 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1019 part_attrs |= EXT_CSD_ENH_GP(pidx);
1020 tot_enh_size_mult += gp_size_mult[pidx];
1024 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1025 pr_err("Card does not support enhanced attribute\n");
1026 return -EMEDIUMTYPE;
1029 err = mmc_send_ext_csd(mmc, ext_csd);
1034 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1035 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1036 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1037 if (tot_enh_size_mult > max_enh_size_mult) {
1038 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1039 tot_enh_size_mult, max_enh_size_mult);
1040 return -EMEDIUMTYPE;
1043 /* The default value of EXT_CSD_WR_REL_SET is device
1044 * dependent, the values can only be changed if the
1045 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1046 * changed only once and before partitioning is completed. */
1047 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1048 if (conf->user.wr_rel_change) {
1049 if (conf->user.wr_rel_set)
1050 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1052 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1054 for (pidx = 0; pidx < 4; pidx++) {
1055 if (conf->gp_part[pidx].wr_rel_change) {
1056 if (conf->gp_part[pidx].wr_rel_set)
1057 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1059 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1063 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1064 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1065 puts("Card does not support host controlled partition write "
1066 "reliability settings\n");
1067 return -EMEDIUMTYPE;
1070 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1071 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1072 pr_err("Card already partitioned\n");
1076 if (mode == MMC_HWPART_CONF_CHECK)
1079 /* Partitioning requires high-capacity size definitions */
1080 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1081 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1082 EXT_CSD_ERASE_GROUP_DEF, 1);
1087 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1089 /* update erase group size to be high-capacity */
1090 mmc->erase_grp_size =
1091 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1095 /* all OK, write the configuration */
1096 for (i = 0; i < 4; i++) {
1097 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1098 EXT_CSD_ENH_START_ADDR+i,
1099 (enh_start_addr >> (i*8)) & 0xFF);
1103 for (i = 0; i < 3; i++) {
1104 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1105 EXT_CSD_ENH_SIZE_MULT+i,
1106 (enh_size_mult >> (i*8)) & 0xFF);
1110 for (pidx = 0; pidx < 4; pidx++) {
1111 for (i = 0; i < 3; i++) {
1112 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1113 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1114 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1119 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1120 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1124 if (mode == MMC_HWPART_CONF_SET)
1127 /* The WR_REL_SET is a write-once register but shall be
1128 * written before setting PART_SETTING_COMPLETED. As it is
1129 * write-once we can only write it when completing the
1131 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1132 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1133 EXT_CSD_WR_REL_SET, wr_rel_set);
1138 /* Setting PART_SETTING_COMPLETED confirms the partition
1139 * configuration but it only becomes effective after power
1140 * cycle, so we do not adjust the partition related settings
1141 * in the mmc struct. */
1143 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1144 EXT_CSD_PARTITION_SETTING,
1145 EXT_CSD_PARTITION_SETTING_COMPLETED);
1153 #if !CONFIG_IS_ENABLED(DM_MMC)
1154 int mmc_getcd(struct mmc *mmc)
1158 cd = board_mmc_getcd(mmc);
1161 if (mmc->cfg->ops->getcd)
1162 cd = mmc->cfg->ops->getcd(mmc);
1171 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1174 struct mmc_data data;
1176 /* Switch the frequency */
1177 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1178 cmd.resp_type = MMC_RSP_R1;
1179 cmd.cmdarg = (mode << 31) | 0xffffff;
1180 cmd.cmdarg &= ~(0xf << (group * 4));
1181 cmd.cmdarg |= value << (group * 4);
1183 data.dest = (char *)resp;
1184 data.blocksize = 64;
1186 data.flags = MMC_DATA_READ;
1188 return mmc_send_cmd(mmc, &cmd, &data);
1192 static int sd_get_capabilities(struct mmc *mmc)
1196 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1197 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1198 struct mmc_data data;
1200 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1204 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1206 if (mmc_host_is_spi(mmc))
1209 /* Read the SCR to find out if this card supports higher speeds */
1210 cmd.cmdidx = MMC_CMD_APP_CMD;
1211 cmd.resp_type = MMC_RSP_R1;
1212 cmd.cmdarg = mmc->rca << 16;
1214 err = mmc_send_cmd(mmc, &cmd, NULL);
1219 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1220 cmd.resp_type = MMC_RSP_R1;
1226 data.dest = (char *)scr;
1229 data.flags = MMC_DATA_READ;
1231 err = mmc_send_cmd(mmc, &cmd, &data);
1240 mmc->scr[0] = __be32_to_cpu(scr[0]);
1241 mmc->scr[1] = __be32_to_cpu(scr[1]);
1243 switch ((mmc->scr[0] >> 24) & 0xf) {
1245 mmc->version = SD_VERSION_1_0;
1248 mmc->version = SD_VERSION_1_10;
1251 mmc->version = SD_VERSION_2;
1252 if ((mmc->scr[0] >> 15) & 0x1)
1253 mmc->version = SD_VERSION_3;
1256 mmc->version = SD_VERSION_1_0;
1260 if (mmc->scr[0] & SD_DATA_4BIT)
1261 mmc->card_caps |= MMC_MODE_4BIT;
1263 /* Version 1.0 doesn't support switching */
1264 if (mmc->version == SD_VERSION_1_0)
1269 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1270 (u8 *)switch_status);
1275 /* The high-speed function is busy. Try again */
1276 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1280 /* If high-speed isn't supported, we return */
1281 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1282 mmc->card_caps |= MMC_CAP(SD_HS);
1284 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1285 /* Version before 3.0 don't support UHS modes */
1286 if (mmc->version < SD_VERSION_3)
1289 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1290 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1291 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1292 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1293 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1294 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1295 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1296 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1297 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1298 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1299 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1305 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1309 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1314 speed = UHS_SDR12_BUS_SPEED;
1317 speed = HIGH_SPEED_BUS_SPEED;
1319 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1321 speed = UHS_SDR12_BUS_SPEED;
1324 speed = UHS_SDR25_BUS_SPEED;
1327 speed = UHS_SDR50_BUS_SPEED;
1330 speed = UHS_DDR50_BUS_SPEED;
1333 speed = UHS_SDR104_BUS_SPEED;
1340 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1344 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1350 int sd_select_bus_width(struct mmc *mmc, int w)
1355 if ((w != 4) && (w != 1))
1358 cmd.cmdidx = MMC_CMD_APP_CMD;
1359 cmd.resp_type = MMC_RSP_R1;
1360 cmd.cmdarg = mmc->rca << 16;
1362 err = mmc_send_cmd(mmc, &cmd, NULL);
1366 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1367 cmd.resp_type = MMC_RSP_R1;
1372 err = mmc_send_cmd(mmc, &cmd, NULL);
1379 static int sd_read_ssr(struct mmc *mmc)
1383 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1384 struct mmc_data data;
1386 unsigned int au, eo, et, es;
1388 cmd.cmdidx = MMC_CMD_APP_CMD;
1389 cmd.resp_type = MMC_RSP_R1;
1390 cmd.cmdarg = mmc->rca << 16;
1392 err = mmc_send_cmd(mmc, &cmd, NULL);
1396 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1397 cmd.resp_type = MMC_RSP_R1;
1401 data.dest = (char *)ssr;
1402 data.blocksize = 64;
1404 data.flags = MMC_DATA_READ;
1406 err = mmc_send_cmd(mmc, &cmd, &data);
1414 for (i = 0; i < 16; i++)
1415 ssr[i] = be32_to_cpu(ssr[i]);
1417 au = (ssr[2] >> 12) & 0xF;
1418 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1419 mmc->ssr.au = sd_au_size[au];
1420 es = (ssr[3] >> 24) & 0xFF;
1421 es |= (ssr[2] & 0xFF) << 8;
1422 et = (ssr[3] >> 18) & 0x3F;
1424 eo = (ssr[3] >> 16) & 0x3;
1425 mmc->ssr.erase_timeout = (et * 1000) / es;
1426 mmc->ssr.erase_offset = eo * 1000;
1429 debug("Invalid Allocation Unit Size.\n");
1435 /* frequency bases */
1436 /* divided by 10 to be nice to platforms without floating point */
1437 static const int fbase[] = {
1444 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1445 * to platforms without floating point.
1447 static const u8 multipliers[] = {
1466 static inline int bus_width(uint cap)
1468 if (cap == MMC_MODE_8BIT)
1470 if (cap == MMC_MODE_4BIT)
1472 if (cap == MMC_MODE_1BIT)
1474 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1478 #if !CONFIG_IS_ENABLED(DM_MMC)
1479 #ifdef MMC_SUPPORTS_TUNING
1480 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1486 static void mmc_send_init_stream(struct mmc *mmc)
1490 static int mmc_set_ios(struct mmc *mmc)
1494 if (mmc->cfg->ops->set_ios)
1495 ret = mmc->cfg->ops->set_ios(mmc);
1501 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1503 if (clock > mmc->cfg->f_max)
1504 clock = mmc->cfg->f_max;
1506 if (clock < mmc->cfg->f_min)
1507 clock = mmc->cfg->f_min;
1510 mmc->clk_disable = disable;
1512 return mmc_set_ios(mmc);
1515 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1517 mmc->bus_width = width;
1519 return mmc_set_ios(mmc);
1522 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1524 * helper function to display the capabilities in a human
1525 * friendly manner. The capabilities include bus width and
1528 void mmc_dump_capabilities(const char *text, uint caps)
1532 printf("%s: widths [", text);
1533 if (caps & MMC_MODE_8BIT)
1535 if (caps & MMC_MODE_4BIT)
1537 if (caps & MMC_MODE_1BIT)
1539 printf("\b\b] modes [");
1540 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1541 if (MMC_CAP(mode) & caps)
1542 printf("%s, ", mmc_mode_name(mode));
1547 struct mode_width_tuning {
1550 #ifdef MMC_SUPPORTS_TUNING
1555 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1556 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1559 case MMC_SIGNAL_VOLTAGE_000: return 0;
1560 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1561 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1562 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1567 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1571 if (mmc->signal_voltage == signal_voltage)
1574 mmc->signal_voltage = signal_voltage;
1575 err = mmc_set_ios(mmc);
1577 debug("unable to set voltage (err %d)\n", err);
1582 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1588 static const struct mode_width_tuning sd_modes_by_pref[] = {
1589 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1590 #ifdef MMC_SUPPORTS_TUNING
1593 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1594 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1599 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1603 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1607 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1612 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1614 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1617 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1622 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1626 #define for_each_sd_mode_by_pref(caps, mwt) \
1627 for (mwt = sd_modes_by_pref;\
1628 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1630 if (caps & MMC_CAP(mwt->mode))
1632 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1635 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1636 const struct mode_width_tuning *mwt;
1637 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1638 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1640 bool uhs_en = false;
1645 mmc_dump_capabilities("sd card", card_caps);
1646 mmc_dump_capabilities("host", mmc->host_caps);
1649 /* Restrict card's capabilities by what the host can do */
1650 caps = card_caps & mmc->host_caps;
1655 for_each_sd_mode_by_pref(caps, mwt) {
1658 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1659 if (*w & caps & mwt->widths) {
1660 debug("trying mode %s width %d (at %d MHz)\n",
1661 mmc_mode_name(mwt->mode),
1663 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1665 /* configure the bus width (card + host) */
1666 err = sd_select_bus_width(mmc, bus_width(*w));
1669 mmc_set_bus_width(mmc, bus_width(*w));
1671 /* configure the bus mode (card) */
1672 err = sd_set_card_speed(mmc, mwt->mode);
1676 /* configure the bus mode (host) */
1677 mmc_select_mode(mmc, mwt->mode);
1678 mmc_set_clock(mmc, mmc->tran_speed, false);
1680 #ifdef MMC_SUPPORTS_TUNING
1681 /* execute tuning if needed */
1682 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1683 err = mmc_execute_tuning(mmc,
1686 debug("tuning failed\n");
1692 err = sd_read_ssr(mmc);
1696 pr_warn("bad ssr\n");
1699 /* revert to a safer bus speed */
1700 mmc_select_mode(mmc, SD_LEGACY);
1701 mmc_set_clock(mmc, mmc->tran_speed, false);
1706 printf("unable to select a mode\n");
1711 * read the compare the part of ext csd that is constant.
1712 * This can be used to check that the transfer is working
1715 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1718 const u8 *ext_csd = mmc->ext_csd;
1719 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1721 if (mmc->version < MMC_VERSION_4)
1724 err = mmc_send_ext_csd(mmc, test_csd);
1728 /* Only compare read only fields */
1729 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1730 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1731 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1732 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1733 ext_csd[EXT_CSD_REV]
1734 == test_csd[EXT_CSD_REV] &&
1735 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1736 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1737 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1738 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1744 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1745 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1746 uint32_t allowed_mask)
1752 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1753 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1754 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1755 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1758 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1759 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1760 MMC_SIGNAL_VOLTAGE_180;
1761 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1762 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1765 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1769 while (card_mask & allowed_mask) {
1770 enum mmc_voltage best_match;
1772 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1773 if (!mmc_set_signal_voltage(mmc, best_match))
1776 allowed_mask &= ~best_match;
1782 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1783 uint32_t allowed_mask)
1789 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1790 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1793 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1794 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1799 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1803 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1807 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1811 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1815 #define for_each_mmc_mode_by_pref(caps, mwt) \
1816 for (mwt = mmc_modes_by_pref;\
1817 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1819 if (caps & MMC_CAP(mwt->mode))
1821 static const struct ext_csd_bus_width {
1825 } ext_csd_bus_width[] = {
1826 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1827 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1828 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1829 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1830 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1833 #define for_each_supported_width(caps, ddr, ecbv) \
1834 for (ecbv = ext_csd_bus_width;\
1835 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1837 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1839 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1842 const struct mode_width_tuning *mwt;
1843 const struct ext_csd_bus_width *ecbw;
1846 mmc_dump_capabilities("mmc", card_caps);
1847 mmc_dump_capabilities("host", mmc->host_caps);
1850 /* Restrict card's capabilities by what the host can do */
1851 card_caps &= mmc->host_caps;
1853 /* Only version 4 of MMC supports wider bus widths */
1854 if (mmc->version < MMC_VERSION_4)
1857 if (!mmc->ext_csd) {
1858 debug("No ext_csd found!\n"); /* this should enver happen */
1862 mmc_set_clock(mmc, mmc->legacy_speed, false);
1864 for_each_mmc_mode_by_pref(card_caps, mwt) {
1865 for_each_supported_width(card_caps & mwt->widths,
1866 mmc_is_mode_ddr(mwt->mode), ecbw) {
1867 enum mmc_voltage old_voltage;
1868 debug("trying mode %s width %d (at %d MHz)\n",
1869 mmc_mode_name(mwt->mode),
1870 bus_width(ecbw->cap),
1871 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1872 old_voltage = mmc->signal_voltage;
1873 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1874 MMC_ALL_SIGNAL_VOLTAGE);
1878 /* configure the bus width (card + host) */
1879 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1881 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1884 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1886 /* configure the bus speed (card) */
1887 err = mmc_set_card_speed(mmc, mwt->mode);
1892 * configure the bus width AND the ddr mode (card)
1893 * The host side will be taken care of in the next step
1895 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1896 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1898 ecbw->ext_csd_bits);
1903 /* configure the bus mode (host) */
1904 mmc_select_mode(mmc, mwt->mode);
1905 mmc_set_clock(mmc, mmc->tran_speed, false);
1906 #ifdef MMC_SUPPORTS_TUNING
1908 /* execute tuning if needed */
1910 err = mmc_execute_tuning(mmc, mwt->tuning);
1912 debug("tuning failed\n");
1918 /* do a transfer to check the configuration */
1919 err = mmc_read_and_compare_ext_csd(mmc);
1923 mmc_set_signal_voltage(mmc, old_voltage);
1924 /* if an error occured, revert to a safer bus mode */
1925 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1926 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1927 mmc_select_mode(mmc, MMC_LEGACY);
1928 mmc_set_bus_width(mmc, 1);
1932 pr_err("unable to select a mode\n");
1937 static int mmc_startup_v4(struct mmc *mmc)
1941 bool has_parts = false;
1942 bool part_completed;
1943 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1945 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1948 /* check ext_csd version and capacity */
1949 err = mmc_send_ext_csd(mmc, ext_csd);
1953 /* store the ext csd for future reference */
1955 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1958 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1960 if (ext_csd[EXT_CSD_REV] >= 2) {
1962 * According to the JEDEC Standard, the value of
1963 * ext_csd's capacity is valid if the value is more
1966 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1967 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1968 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1969 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1970 capacity *= MMC_MAX_BLOCK_LEN;
1971 if ((capacity >> 20) > 2 * 1024)
1972 mmc->capacity_user = capacity;
1975 switch (ext_csd[EXT_CSD_REV]) {
1977 mmc->version = MMC_VERSION_4_1;
1980 mmc->version = MMC_VERSION_4_2;
1983 mmc->version = MMC_VERSION_4_3;
1986 mmc->version = MMC_VERSION_4_41;
1989 mmc->version = MMC_VERSION_4_5;
1992 mmc->version = MMC_VERSION_5_0;
1995 mmc->version = MMC_VERSION_5_1;
1999 /* The partition data may be non-zero but it is only
2000 * effective if PARTITION_SETTING_COMPLETED is set in
2001 * EXT_CSD, so ignore any data if this bit is not set,
2002 * except for enabling the high-capacity group size
2003 * definition (see below).
2005 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2006 EXT_CSD_PARTITION_SETTING_COMPLETED);
2008 /* store the partition info of emmc */
2009 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2010 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2011 ext_csd[EXT_CSD_BOOT_MULT])
2012 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2013 if (part_completed &&
2014 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2015 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2017 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2019 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2021 for (i = 0; i < 4; i++) {
2022 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2023 uint mult = (ext_csd[idx + 2] << 16) +
2024 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2027 if (!part_completed)
2029 mmc->capacity_gp[i] = mult;
2030 mmc->capacity_gp[i] *=
2031 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2032 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2033 mmc->capacity_gp[i] <<= 19;
2036 if (part_completed) {
2037 mmc->enh_user_size =
2038 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2039 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2040 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2041 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2042 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2043 mmc->enh_user_size <<= 19;
2044 mmc->enh_user_start =
2045 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2046 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2047 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2048 ext_csd[EXT_CSD_ENH_START_ADDR];
2049 if (mmc->high_capacity)
2050 mmc->enh_user_start <<= 9;
2054 * Host needs to enable ERASE_GRP_DEF bit if device is
2055 * partitioned. This bit will be lost every time after a reset
2056 * or power off. This will affect erase size.
2060 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2061 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2064 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2065 EXT_CSD_ERASE_GROUP_DEF, 1);
2070 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2073 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2074 /* Read out group size from ext_csd */
2075 mmc->erase_grp_size =
2076 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2078 * if high capacity and partition setting completed
2079 * SEC_COUNT is valid even if it is smaller than 2 GiB
2080 * JEDEC Standard JESD84-B45, 6.2.4
2082 if (mmc->high_capacity && part_completed) {
2083 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2084 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2085 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2086 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2087 capacity *= MMC_MAX_BLOCK_LEN;
2088 mmc->capacity_user = capacity;
2091 /* Calculate the group size from the csd value. */
2092 int erase_gsz, erase_gmul;
2094 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2095 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2096 mmc->erase_grp_size = (erase_gsz + 1)
2100 mmc->hc_wp_grp_size = 1024
2101 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2102 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2104 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2110 mmc->ext_csd = NULL;
2115 static int mmc_startup(struct mmc *mmc)
2121 struct blk_desc *bdesc;
2123 #ifdef CONFIG_MMC_SPI_CRC_ON
2124 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2125 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2126 cmd.resp_type = MMC_RSP_R1;
2128 err = mmc_send_cmd(mmc, &cmd, NULL);
2134 /* Put the Card in Identify Mode */
2135 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2136 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2137 cmd.resp_type = MMC_RSP_R2;
2140 err = mmc_send_cmd(mmc, &cmd, NULL);
2142 #ifdef CONFIG_MMC_QUIRKS
2143 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2146 * It has been seen that SEND_CID may fail on the first
2147 * attempt, let's try a few more time
2150 err = mmc_send_cmd(mmc, &cmd, NULL);
2153 } while (retries--);
2160 memcpy(mmc->cid, cmd.response, 16);
2163 * For MMC cards, set the Relative Address.
2164 * For SD cards, get the Relatvie Address.
2165 * This also puts the cards into Standby State
2167 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2168 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2169 cmd.cmdarg = mmc->rca << 16;
2170 cmd.resp_type = MMC_RSP_R6;
2172 err = mmc_send_cmd(mmc, &cmd, NULL);
2178 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2181 /* Get the Card-Specific Data */
2182 cmd.cmdidx = MMC_CMD_SEND_CSD;
2183 cmd.resp_type = MMC_RSP_R2;
2184 cmd.cmdarg = mmc->rca << 16;
2186 err = mmc_send_cmd(mmc, &cmd, NULL);
2191 mmc->csd[0] = cmd.response[0];
2192 mmc->csd[1] = cmd.response[1];
2193 mmc->csd[2] = cmd.response[2];
2194 mmc->csd[3] = cmd.response[3];
2196 if (mmc->version == MMC_VERSION_UNKNOWN) {
2197 int version = (cmd.response[0] >> 26) & 0xf;
2201 mmc->version = MMC_VERSION_1_2;
2204 mmc->version = MMC_VERSION_1_4;
2207 mmc->version = MMC_VERSION_2_2;
2210 mmc->version = MMC_VERSION_3;
2213 mmc->version = MMC_VERSION_4;
2216 mmc->version = MMC_VERSION_1_2;
2221 /* divide frequency by 10, since the mults are 10x bigger */
2222 freq = fbase[(cmd.response[0] & 0x7)];
2223 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2225 mmc->legacy_speed = freq * mult;
2226 mmc_select_mode(mmc, MMC_LEGACY);
2228 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2229 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2232 mmc->write_bl_len = mmc->read_bl_len;
2234 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2236 if (mmc->high_capacity) {
2237 csize = (mmc->csd[1] & 0x3f) << 16
2238 | (mmc->csd[2] & 0xffff0000) >> 16;
2241 csize = (mmc->csd[1] & 0x3ff) << 2
2242 | (mmc->csd[2] & 0xc0000000) >> 30;
2243 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2246 mmc->capacity_user = (csize + 1) << (cmult + 2);
2247 mmc->capacity_user *= mmc->read_bl_len;
2248 mmc->capacity_boot = 0;
2249 mmc->capacity_rpmb = 0;
2250 for (i = 0; i < 4; i++)
2251 mmc->capacity_gp[i] = 0;
2253 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2254 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2256 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2257 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2259 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2260 cmd.cmdidx = MMC_CMD_SET_DSR;
2261 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2262 cmd.resp_type = MMC_RSP_NONE;
2263 if (mmc_send_cmd(mmc, &cmd, NULL))
2264 pr_warn("MMC: SET_DSR failed\n");
2267 /* Select the card, and put it into Transfer Mode */
2268 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2269 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2270 cmd.resp_type = MMC_RSP_R1;
2271 cmd.cmdarg = mmc->rca << 16;
2272 err = mmc_send_cmd(mmc, &cmd, NULL);
2279 * For SD, its erase group is always one sector
2281 mmc->erase_grp_size = 1;
2282 mmc->part_config = MMCPART_NOAVAILABLE;
2284 err = mmc_startup_v4(mmc);
2288 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2293 err = sd_get_capabilities(mmc);
2296 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2298 err = mmc_get_capabilities(mmc);
2301 mmc_select_mode_and_width(mmc, mmc->card_caps);
2307 mmc->best_mode = mmc->selected_mode;
2309 /* Fix the block length for DDR mode */
2310 if (mmc->ddr_mode) {
2311 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2312 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2315 /* fill in device description */
2316 bdesc = mmc_get_blk_desc(mmc);
2320 bdesc->blksz = mmc->read_bl_len;
2321 bdesc->log2blksz = LOG2(bdesc->blksz);
2322 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2323 #if !defined(CONFIG_SPL_BUILD) || \
2324 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2325 !defined(CONFIG_USE_TINY_PRINTF))
2326 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2327 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2328 (mmc->cid[3] >> 16) & 0xffff);
2329 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2330 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2331 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2332 (mmc->cid[2] >> 24) & 0xff);
2333 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2334 (mmc->cid[2] >> 16) & 0xf);
2336 bdesc->vendor[0] = 0;
2337 bdesc->product[0] = 0;
2338 bdesc->revision[0] = 0;
2340 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2347 static int mmc_send_if_cond(struct mmc *mmc)
2352 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2353 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2354 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2355 cmd.resp_type = MMC_RSP_R7;
2357 err = mmc_send_cmd(mmc, &cmd, NULL);
2362 if ((cmd.response[0] & 0xff) != 0xaa)
2365 mmc->version = SD_VERSION_2;
2370 #if !CONFIG_IS_ENABLED(DM_MMC)
2371 /* board-specific MMC power initializations. */
2372 __weak void board_mmc_power_init(void)
2377 static int mmc_power_init(struct mmc *mmc)
2379 #if CONFIG_IS_ENABLED(DM_MMC)
2380 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2383 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2386 debug("%s: No vmmc supply\n", mmc->dev->name);
2388 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2389 &mmc->vqmmc_supply);
2391 debug("%s: No vqmmc supply\n", mmc->dev->name);
2393 #else /* !CONFIG_DM_MMC */
2395 * Driver model should use a regulator, as above, rather than calling
2396 * out to board code.
2398 board_mmc_power_init();
2404 * put the host in the initial state:
2405 * - turn on Vdd (card power supply)
2406 * - configure the bus width and clock to minimal values
2408 static void mmc_set_initial_state(struct mmc *mmc)
2412 /* First try to set 3.3V. If it fails set to 1.8V */
2413 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2415 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2417 pr_warn("mmc: failed to set signal voltage\n");
2419 mmc_select_mode(mmc, MMC_LEGACY);
2420 mmc_set_bus_width(mmc, 1);
2421 mmc_set_clock(mmc, 0, false);
2424 static int mmc_power_on(struct mmc *mmc)
2426 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2427 if (mmc->vmmc_supply) {
2428 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2431 puts("Error enabling VMMC supply\n");
2439 static int mmc_power_off(struct mmc *mmc)
2441 mmc_set_clock(mmc, 1, true);
2442 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2443 if (mmc->vmmc_supply) {
2444 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2447 debug("Error disabling VMMC supply\n");
2455 static int mmc_power_cycle(struct mmc *mmc)
2459 ret = mmc_power_off(mmc);
2463 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2464 * to be on the safer side.
2467 return mmc_power_on(mmc);
2470 int mmc_start_init(struct mmc *mmc)
2473 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2477 * all hosts are capable of 1 bit bus-width and able to use the legacy
2480 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2481 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2483 /* we pretend there's no card when init is NULL */
2484 no_card = mmc_getcd(mmc) == 0;
2485 #if !CONFIG_IS_ENABLED(DM_MMC)
2486 no_card = no_card || (mmc->cfg->ops->init == NULL);
2490 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2491 printf("MMC: no card present\n");
2499 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2500 mmc_adapter_card_type_ident();
2502 err = mmc_power_init(mmc);
2506 #ifdef CONFIG_MMC_QUIRKS
2507 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2508 MMC_QUIRK_RETRY_SEND_CID;
2511 err = mmc_power_cycle(mmc);
2514 * if power cycling is not supported, we should not try
2515 * to use the UHS modes, because we wouldn't be able to
2516 * recover from an error during the UHS initialization.
2518 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2520 mmc->host_caps &= ~UHS_CAPS;
2521 err = mmc_power_on(mmc);
2526 #if CONFIG_IS_ENABLED(DM_MMC)
2527 /* The device has already been probed ready for use */
2529 /* made sure it's not NULL earlier */
2530 err = mmc->cfg->ops->init(mmc);
2537 mmc_set_initial_state(mmc);
2538 mmc_send_init_stream(mmc);
2540 /* Reset the Card */
2541 err = mmc_go_idle(mmc);
2546 /* The internal partition reset to user partition(0) at every CMD0*/
2547 mmc_get_blk_desc(mmc)->hwpart = 0;
2549 /* Test for SD version 2 */
2550 err = mmc_send_if_cond(mmc);
2552 /* Now try to get the SD card's operating condition */
2553 err = sd_send_op_cond(mmc, uhs_en);
2554 if (err && uhs_en) {
2556 mmc_power_cycle(mmc);
2560 /* If the command timed out, we check for an MMC card */
2561 if (err == -ETIMEDOUT) {
2562 err = mmc_send_op_cond(mmc);
2565 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2566 pr_err("Card did not respond to voltage select!\n");
2573 mmc->init_in_progress = 1;
2578 static int mmc_complete_init(struct mmc *mmc)
2582 mmc->init_in_progress = 0;
2583 if (mmc->op_cond_pending)
2584 err = mmc_complete_op_cond(mmc);
2587 err = mmc_startup(mmc);
2595 int mmc_init(struct mmc *mmc)
2598 __maybe_unused unsigned start;
2599 #if CONFIG_IS_ENABLED(DM_MMC)
2600 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2607 start = get_timer(0);
2609 if (!mmc->init_in_progress)
2610 err = mmc_start_init(mmc);
2613 err = mmc_complete_init(mmc);
2615 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2620 int mmc_set_dsr(struct mmc *mmc, u16 val)
2626 /* CPU-specific MMC initializations */
2627 __weak int cpu_mmc_init(bd_t *bis)
2632 /* board-specific MMC initializations. */
2633 __weak int board_mmc_init(bd_t *bis)
2638 void mmc_set_preinit(struct mmc *mmc, int preinit)
2640 mmc->preinit = preinit;
2643 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2644 static int mmc_probe(bd_t *bis)
2648 #elif CONFIG_IS_ENABLED(DM_MMC)
2649 static int mmc_probe(bd_t *bis)
2653 struct udevice *dev;
2655 ret = uclass_get(UCLASS_MMC, &uc);
2660 * Try to add them in sequence order. Really with driver model we
2661 * should allow holes, but the current MMC list does not allow that.
2662 * So if we request 0, 1, 3 we will get 0, 1, 2.
2664 for (i = 0; ; i++) {
2665 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2669 uclass_foreach_dev(dev, uc) {
2670 ret = device_probe(dev);
2672 pr_err("%s - probe failed: %d\n", dev->name, ret);
2678 static int mmc_probe(bd_t *bis)
2680 if (board_mmc_init(bis) < 0)
2687 int mmc_initialize(bd_t *bis)
2689 static int initialized = 0;
2691 if (initialized) /* Avoid initializing mmc multiple times */
2695 #if !CONFIG_IS_ENABLED(BLK)
2696 #if !CONFIG_IS_ENABLED(MMC_TINY)
2700 ret = mmc_probe(bis);
2704 #ifndef CONFIG_SPL_BUILD
2705 print_mmc_devices(',');
2712 #ifdef CONFIG_CMD_BKOPS_ENABLE
2713 int mmc_set_bkops_enable(struct mmc *mmc)
2716 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2718 err = mmc_send_ext_csd(mmc, ext_csd);
2720 puts("Could not get ext_csd register values\n");
2724 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2725 puts("Background operations not supported on device\n");
2726 return -EMEDIUMTYPE;
2729 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2730 puts("Background operations already enabled\n");
2734 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2736 puts("Failed to enable manual background operations\n");
2740 puts("Enabled manual background operations\n");