2 * Copyright 2008, Freescale Semiconductor, Inc
5 * Based vaguely on the Linux code
7 * SPDX-License-Identifier: GPL-2.0+
14 #include <dm/device-internal.h>
18 #include <power/regulator.h>
21 #include <linux/list.h>
23 #include "mmc_private.h"
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
44 void mmc_do_preinit(void)
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
56 return &mmc->block_dev;
60 #if !CONFIG_IS_ENABLED(DM_MMC)
62 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
63 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
69 __weak int board_mmc_getwp(struct mmc *mmc)
74 int mmc_getwp(struct mmc *mmc)
78 wp = board_mmc_getwp(mmc);
81 if (mmc->cfg->ops->getwp)
82 wp = mmc->cfg->ops->getwp(mmc);
90 __weak int board_mmc_getcd(struct mmc *mmc)
96 #ifdef CONFIG_MMC_TRACE
97 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
99 printf("CMD_SEND:%d\n", cmd->cmdidx);
100 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
103 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
109 printf("\t\tRET\t\t\t %d\n", ret);
111 switch (cmd->resp_type) {
113 printf("\t\tMMC_RSP_NONE\n");
116 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
120 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
124 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
126 printf("\t\t \t\t 0x%08X \n",
128 printf("\t\t \t\t 0x%08X \n",
130 printf("\t\t \t\t 0x%08X \n",
133 printf("\t\t\t\t\tDUMPING DATA\n");
134 for (i = 0; i < 4; i++) {
136 printf("\t\t\t\t\t%03d - ", i*4);
137 ptr = (u8 *)&cmd->response[i];
139 for (j = 0; j < 4; j++)
140 printf("%02X ", *ptr--);
145 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
149 printf("\t\tERROR MMC rsp not supported\n");
155 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
159 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
160 printf("CURR STATE:%d\n", status);
164 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
165 const char *mmc_mode_name(enum bus_mode mode)
167 static const char *const names[] = {
168 [MMC_LEGACY] = "MMC legacy",
169 [SD_LEGACY] = "SD Legacy",
170 [MMC_HS] = "MMC High Speed (26MHz)",
171 [SD_HS] = "SD High Speed (50MHz)",
172 [UHS_SDR12] = "UHS SDR12 (25MHz)",
173 [UHS_SDR25] = "UHS SDR25 (50MHz)",
174 [UHS_SDR50] = "UHS SDR50 (100MHz)",
175 [UHS_SDR104] = "UHS SDR104 (208MHz)",
176 [UHS_DDR50] = "UHS DDR50 (50MHz)",
177 [MMC_HS_52] = "MMC High Speed (52MHz)",
178 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
179 [MMC_HS_200] = "HS200 (200MHz)",
182 if (mode >= MMC_MODES_END)
183 return "Unknown mode";
189 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
191 static const int freqs[] = {
192 [SD_LEGACY] = 25000000,
195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
196 [UHS_SDR12] = 25000000,
197 [UHS_SDR25] = 50000000,
198 [UHS_SDR50] = 100000000,
199 [UHS_DDR50] = 50000000,
200 #ifdef MMC_SUPPORTS_TUNING
201 [UHS_SDR104] = 208000000,
204 [MMC_HS_52] = 52000000,
205 [MMC_DDR_52] = 52000000,
206 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
207 [MMC_HS_200] = 200000000,
211 if (mode == MMC_LEGACY)
212 return mmc->legacy_speed;
213 else if (mode >= MMC_MODES_END)
219 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
221 mmc->selected_mode = mode;
222 mmc->tran_speed = mmc_mode2freq(mmc, mode);
223 mmc->ddr_mode = mmc_is_mode_ddr(mode);
224 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
225 mmc->tran_speed / 1000000);
229 #if !CONFIG_IS_ENABLED(DM_MMC)
230 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
234 mmmc_trace_before_send(mmc, cmd);
235 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
236 mmmc_trace_after_send(mmc, cmd, ret);
242 int mmc_send_status(struct mmc *mmc, int timeout)
245 int err, retries = 5;
247 cmd.cmdidx = MMC_CMD_SEND_STATUS;
248 cmd.resp_type = MMC_RSP_R1;
249 if (!mmc_host_is_spi(mmc))
250 cmd.cmdarg = mmc->rca << 16;
253 err = mmc_send_cmd(mmc, &cmd, NULL);
255 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
256 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
260 if (cmd.response[0] & MMC_STATUS_MASK) {
261 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
262 pr_err("Status Error: 0x%08X\n",
267 } else if (--retries < 0)
276 mmc_trace_state(mmc, &cmd);
278 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
279 pr_err("Timeout waiting card ready\n");
287 int mmc_set_blocklen(struct mmc *mmc, int len)
295 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
296 cmd.resp_type = MMC_RSP_R1;
299 err = mmc_send_cmd(mmc, &cmd, NULL);
301 #ifdef CONFIG_MMC_QUIRKS
302 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
305 * It has been seen that SET_BLOCKLEN may fail on the first
306 * attempt, let's try a few more time
309 err = mmc_send_cmd(mmc, &cmd, NULL);
319 #ifdef MMC_SUPPORTS_TUNING
320 static const u8 tuning_blk_pattern_4bit[] = {
321 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
322 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
323 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
324 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
325 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
326 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
327 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
328 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
331 static const u8 tuning_blk_pattern_8bit[] = {
332 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
333 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
334 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
335 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
336 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
337 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
338 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
339 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
340 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
341 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
342 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
343 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
344 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
345 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
346 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
347 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
350 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
353 struct mmc_data data;
354 const u8 *tuning_block_pattern;
357 if (mmc->bus_width == 8) {
358 tuning_block_pattern = tuning_blk_pattern_8bit;
359 size = sizeof(tuning_blk_pattern_8bit);
360 } else if (mmc->bus_width == 4) {
361 tuning_block_pattern = tuning_blk_pattern_4bit;
362 size = sizeof(tuning_blk_pattern_4bit);
367 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
371 cmd.resp_type = MMC_RSP_R1;
373 data.dest = (void *)data_buf;
375 data.blocksize = size;
376 data.flags = MMC_DATA_READ;
378 err = mmc_send_cmd(mmc, &cmd, &data);
382 if (memcmp(data_buf, tuning_block_pattern, size))
389 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
393 struct mmc_data data;
396 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
398 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
400 if (mmc->high_capacity)
403 cmd.cmdarg = start * mmc->read_bl_len;
405 cmd.resp_type = MMC_RSP_R1;
408 data.blocks = blkcnt;
409 data.blocksize = mmc->read_bl_len;
410 data.flags = MMC_DATA_READ;
412 if (mmc_send_cmd(mmc, &cmd, &data))
416 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
418 cmd.resp_type = MMC_RSP_R1b;
419 if (mmc_send_cmd(mmc, &cmd, NULL)) {
420 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
421 pr_err("mmc fail to send stop cmd\n");
430 #if CONFIG_IS_ENABLED(BLK)
431 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
433 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
437 #if CONFIG_IS_ENABLED(BLK)
438 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
440 int dev_num = block_dev->devnum;
442 lbaint_t cur, blocks_todo = blkcnt;
447 struct mmc *mmc = find_mmc_device(dev_num);
451 if (CONFIG_IS_ENABLED(MMC_TINY))
452 err = mmc_switch_part(mmc, block_dev->hwpart);
454 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
459 if ((start + blkcnt) > block_dev->lba) {
460 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
461 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
462 start + blkcnt, block_dev->lba);
467 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
468 debug("%s: Failed to set blocklen\n", __func__);
473 cur = (blocks_todo > mmc->cfg->b_max) ?
474 mmc->cfg->b_max : blocks_todo;
475 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
476 debug("%s: Failed to read blocks\n", __func__);
481 dst += cur * mmc->read_bl_len;
482 } while (blocks_todo > 0);
487 static int mmc_go_idle(struct mmc *mmc)
494 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
496 cmd.resp_type = MMC_RSP_NONE;
498 err = mmc_send_cmd(mmc, &cmd, NULL);
508 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
509 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
515 * Send CMD11 only if the request is to switch the card to
518 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
519 return mmc_set_signal_voltage(mmc, signal_voltage);
521 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
523 cmd.resp_type = MMC_RSP_R1;
525 err = mmc_send_cmd(mmc, &cmd, NULL);
529 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
533 * The card should drive cmd and dat[0:3] low immediately
534 * after the response of cmd11, but wait 100 us to be sure
536 err = mmc_wait_dat0(mmc, 0, 100);
543 * During a signal voltage level switch, the clock must be gated
544 * for 5 ms according to the SD spec
546 mmc_set_clock(mmc, mmc->clock, true);
548 err = mmc_set_signal_voltage(mmc, signal_voltage);
552 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
554 mmc_set_clock(mmc, mmc->clock, false);
557 * Failure to switch is indicated by the card holding
558 * dat[0:3] low. Wait for at least 1 ms according to spec
560 err = mmc_wait_dat0(mmc, 1, 1000);
570 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
577 cmd.cmdidx = MMC_CMD_APP_CMD;
578 cmd.resp_type = MMC_RSP_R1;
581 err = mmc_send_cmd(mmc, &cmd, NULL);
586 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
587 cmd.resp_type = MMC_RSP_R3;
590 * Most cards do not answer if some reserved bits
591 * in the ocr are set. However, Some controller
592 * can set bit 7 (reserved for low voltages), but
593 * how to manage low voltages SD card is not yet
596 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
597 (mmc->cfg->voltages & 0xff8000);
599 if (mmc->version == SD_VERSION_2)
600 cmd.cmdarg |= OCR_HCS;
603 cmd.cmdarg |= OCR_S18R;
605 err = mmc_send_cmd(mmc, &cmd, NULL);
610 if (cmd.response[0] & OCR_BUSY)
619 if (mmc->version != SD_VERSION_2)
620 mmc->version = SD_VERSION_1_0;
622 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
623 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
624 cmd.resp_type = MMC_RSP_R3;
627 err = mmc_send_cmd(mmc, &cmd, NULL);
633 mmc->ocr = cmd.response[0];
635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
636 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
638 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
644 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
650 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
655 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
656 cmd.resp_type = MMC_RSP_R3;
658 if (use_arg && !mmc_host_is_spi(mmc))
659 cmd.cmdarg = OCR_HCS |
660 (mmc->cfg->voltages &
661 (mmc->ocr & OCR_VOLTAGE_MASK)) |
662 (mmc->ocr & OCR_ACCESS_MODE);
664 err = mmc_send_cmd(mmc, &cmd, NULL);
667 mmc->ocr = cmd.response[0];
671 static int mmc_send_op_cond(struct mmc *mmc)
675 /* Some cards seem to need this */
678 /* Asking to the card its capabilities */
679 for (i = 0; i < 2; i++) {
680 err = mmc_send_op_cond_iter(mmc, i != 0);
684 /* exit if not busy (flag seems to be inverted) */
685 if (mmc->ocr & OCR_BUSY)
688 mmc->op_cond_pending = 1;
692 static int mmc_complete_op_cond(struct mmc *mmc)
699 mmc->op_cond_pending = 0;
700 if (!(mmc->ocr & OCR_BUSY)) {
701 /* Some cards seem to need this */
704 start = get_timer(0);
706 err = mmc_send_op_cond_iter(mmc, 1);
709 if (mmc->ocr & OCR_BUSY)
711 if (get_timer(start) > timeout)
717 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
718 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
719 cmd.resp_type = MMC_RSP_R3;
722 err = mmc_send_cmd(mmc, &cmd, NULL);
727 mmc->ocr = cmd.response[0];
730 mmc->version = MMC_VERSION_UNKNOWN;
732 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
739 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
742 struct mmc_data data;
745 /* Get the Card Status Register */
746 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
747 cmd.resp_type = MMC_RSP_R1;
750 data.dest = (char *)ext_csd;
752 data.blocksize = MMC_MAX_BLOCK_LEN;
753 data.flags = MMC_DATA_READ;
755 err = mmc_send_cmd(mmc, &cmd, &data);
760 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
767 cmd.cmdidx = MMC_CMD_SWITCH;
768 cmd.resp_type = MMC_RSP_R1b;
769 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
773 while (retries > 0) {
774 ret = mmc_send_cmd(mmc, &cmd, NULL);
776 /* Waiting for the ready status */
778 ret = mmc_send_status(mmc, timeout);
789 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
794 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
800 speed_bits = EXT_CSD_TIMING_HS;
803 speed_bits = EXT_CSD_TIMING_HS200;
806 speed_bits = EXT_CSD_TIMING_LEGACY;
811 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
816 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
817 /* Now check to see that it worked */
818 err = mmc_send_ext_csd(mmc, test_csd);
822 /* No high-speed support */
823 if (!test_csd[EXT_CSD_HS_TIMING])
830 static int mmc_get_capabilities(struct mmc *mmc)
832 u8 *ext_csd = mmc->ext_csd;
835 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
837 if (mmc_host_is_spi(mmc))
840 /* Only version 4 supports high-speed */
841 if (mmc->version < MMC_VERSION_4)
845 pr_err("No ext_csd found!\n"); /* this should enver happen */
849 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
851 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
852 mmc->cardtype = cardtype;
854 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
855 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
856 mmc->card_caps |= MMC_MODE_HS200;
858 if (cardtype & EXT_CSD_CARD_TYPE_52) {
859 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
860 mmc->card_caps |= MMC_MODE_DDR_52MHz;
861 mmc->card_caps |= MMC_MODE_HS_52MHz;
863 if (cardtype & EXT_CSD_CARD_TYPE_26)
864 mmc->card_caps |= MMC_MODE_HS;
869 static int mmc_set_capacity(struct mmc *mmc, int part_num)
873 mmc->capacity = mmc->capacity_user;
877 mmc->capacity = mmc->capacity_boot;
880 mmc->capacity = mmc->capacity_rpmb;
886 mmc->capacity = mmc->capacity_gp[part_num - 4];
892 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
897 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
898 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
903 if (part_num & PART_ACCESS_MASK)
904 forbidden = MMC_CAP(MMC_HS_200);
906 if (MMC_CAP(mmc->selected_mode) & forbidden) {
907 debug("selected mode (%s) is forbidden for part %d\n",
908 mmc_mode_name(mmc->selected_mode), part_num);
910 } else if (mmc->selected_mode != mmc->best_mode) {
911 debug("selected mode is not optimal\n");
916 return mmc_select_mode_and_width(mmc,
917 mmc->card_caps & ~forbidden);
922 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
923 unsigned int part_num)
929 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
933 ret = mmc_boot_part_access_chk(mmc, part_num);
937 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
938 (mmc->part_config & ~PART_ACCESS_MASK)
939 | (part_num & PART_ACCESS_MASK));
942 * Set the capacity if the switch succeeded or was intended
943 * to return to representing the raw device.
945 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
946 ret = mmc_set_capacity(mmc, part_num);
947 mmc_get_blk_desc(mmc)->hwpart = part_num;
953 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
954 int mmc_hwpart_config(struct mmc *mmc,
955 const struct mmc_hwpart_conf *conf,
956 enum mmc_hwpart_conf_mode mode)
962 u32 max_enh_size_mult;
963 u32 tot_enh_size_mult = 0;
966 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
968 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
971 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
972 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
976 if (!(mmc->part_support & PART_SUPPORT)) {
977 pr_err("Card does not support partitioning\n");
981 if (!mmc->hc_wp_grp_size) {
982 pr_err("Card does not define HC WP group size\n");
986 /* check partition alignment and total enhanced size */
987 if (conf->user.enh_size) {
988 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
989 conf->user.enh_start % mmc->hc_wp_grp_size) {
990 pr_err("User data enhanced area not HC WP group "
994 part_attrs |= EXT_CSD_ENH_USR;
995 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
996 if (mmc->high_capacity) {
997 enh_start_addr = conf->user.enh_start;
999 enh_start_addr = (conf->user.enh_start << 9);
1005 tot_enh_size_mult += enh_size_mult;
1007 for (pidx = 0; pidx < 4; pidx++) {
1008 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1009 pr_err("GP%i partition not HC WP group size "
1010 "aligned\n", pidx+1);
1013 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1014 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1015 part_attrs |= EXT_CSD_ENH_GP(pidx);
1016 tot_enh_size_mult += gp_size_mult[pidx];
1020 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1021 pr_err("Card does not support enhanced attribute\n");
1022 return -EMEDIUMTYPE;
1025 err = mmc_send_ext_csd(mmc, ext_csd);
1030 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1031 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1032 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1033 if (tot_enh_size_mult > max_enh_size_mult) {
1034 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1035 tot_enh_size_mult, max_enh_size_mult);
1036 return -EMEDIUMTYPE;
1039 /* The default value of EXT_CSD_WR_REL_SET is device
1040 * dependent, the values can only be changed if the
1041 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1042 * changed only once and before partitioning is completed. */
1043 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1044 if (conf->user.wr_rel_change) {
1045 if (conf->user.wr_rel_set)
1046 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1048 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1050 for (pidx = 0; pidx < 4; pidx++) {
1051 if (conf->gp_part[pidx].wr_rel_change) {
1052 if (conf->gp_part[pidx].wr_rel_set)
1053 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1055 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1059 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1060 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1061 puts("Card does not support host controlled partition write "
1062 "reliability settings\n");
1063 return -EMEDIUMTYPE;
1066 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1067 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1068 pr_err("Card already partitioned\n");
1072 if (mode == MMC_HWPART_CONF_CHECK)
1075 /* Partitioning requires high-capacity size definitions */
1076 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1077 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1078 EXT_CSD_ERASE_GROUP_DEF, 1);
1083 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1085 /* update erase group size to be high-capacity */
1086 mmc->erase_grp_size =
1087 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1091 /* all OK, write the configuration */
1092 for (i = 0; i < 4; i++) {
1093 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1094 EXT_CSD_ENH_START_ADDR+i,
1095 (enh_start_addr >> (i*8)) & 0xFF);
1099 for (i = 0; i < 3; i++) {
1100 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1101 EXT_CSD_ENH_SIZE_MULT+i,
1102 (enh_size_mult >> (i*8)) & 0xFF);
1106 for (pidx = 0; pidx < 4; pidx++) {
1107 for (i = 0; i < 3; i++) {
1108 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1109 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1110 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1115 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1116 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1120 if (mode == MMC_HWPART_CONF_SET)
1123 /* The WR_REL_SET is a write-once register but shall be
1124 * written before setting PART_SETTING_COMPLETED. As it is
1125 * write-once we can only write it when completing the
1127 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1128 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1129 EXT_CSD_WR_REL_SET, wr_rel_set);
1134 /* Setting PART_SETTING_COMPLETED confirms the partition
1135 * configuration but it only becomes effective after power
1136 * cycle, so we do not adjust the partition related settings
1137 * in the mmc struct. */
1139 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1140 EXT_CSD_PARTITION_SETTING,
1141 EXT_CSD_PARTITION_SETTING_COMPLETED);
1149 #if !CONFIG_IS_ENABLED(DM_MMC)
1150 int mmc_getcd(struct mmc *mmc)
1154 cd = board_mmc_getcd(mmc);
1157 if (mmc->cfg->ops->getcd)
1158 cd = mmc->cfg->ops->getcd(mmc);
1167 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1170 struct mmc_data data;
1172 /* Switch the frequency */
1173 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1174 cmd.resp_type = MMC_RSP_R1;
1175 cmd.cmdarg = (mode << 31) | 0xffffff;
1176 cmd.cmdarg &= ~(0xf << (group * 4));
1177 cmd.cmdarg |= value << (group * 4);
1179 data.dest = (char *)resp;
1180 data.blocksize = 64;
1182 data.flags = MMC_DATA_READ;
1184 return mmc_send_cmd(mmc, &cmd, &data);
1188 static int sd_get_capabilities(struct mmc *mmc)
1192 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1193 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1194 struct mmc_data data;
1196 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1200 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1202 if (mmc_host_is_spi(mmc))
1205 /* Read the SCR to find out if this card supports higher speeds */
1206 cmd.cmdidx = MMC_CMD_APP_CMD;
1207 cmd.resp_type = MMC_RSP_R1;
1208 cmd.cmdarg = mmc->rca << 16;
1210 err = mmc_send_cmd(mmc, &cmd, NULL);
1215 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1216 cmd.resp_type = MMC_RSP_R1;
1222 data.dest = (char *)scr;
1225 data.flags = MMC_DATA_READ;
1227 err = mmc_send_cmd(mmc, &cmd, &data);
1236 mmc->scr[0] = __be32_to_cpu(scr[0]);
1237 mmc->scr[1] = __be32_to_cpu(scr[1]);
1239 switch ((mmc->scr[0] >> 24) & 0xf) {
1241 mmc->version = SD_VERSION_1_0;
1244 mmc->version = SD_VERSION_1_10;
1247 mmc->version = SD_VERSION_2;
1248 if ((mmc->scr[0] >> 15) & 0x1)
1249 mmc->version = SD_VERSION_3;
1252 mmc->version = SD_VERSION_1_0;
1256 if (mmc->scr[0] & SD_DATA_4BIT)
1257 mmc->card_caps |= MMC_MODE_4BIT;
1259 /* Version 1.0 doesn't support switching */
1260 if (mmc->version == SD_VERSION_1_0)
1265 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1266 (u8 *)switch_status);
1271 /* The high-speed function is busy. Try again */
1272 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1276 /* If high-speed isn't supported, we return */
1277 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1278 mmc->card_caps |= MMC_CAP(SD_HS);
1280 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1281 /* Version before 3.0 don't support UHS modes */
1282 if (mmc->version < SD_VERSION_3)
1285 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1286 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1287 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1288 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1289 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1290 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1291 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1292 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1293 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1294 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1295 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1301 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1305 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1311 speed = UHS_SDR12_BUS_SPEED;
1315 speed = UHS_SDR25_BUS_SPEED;
1318 speed = UHS_SDR50_BUS_SPEED;
1321 speed = UHS_DDR50_BUS_SPEED;
1324 speed = UHS_SDR104_BUS_SPEED;
1330 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1334 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1340 int sd_select_bus_width(struct mmc *mmc, int w)
1345 if ((w != 4) && (w != 1))
1348 cmd.cmdidx = MMC_CMD_APP_CMD;
1349 cmd.resp_type = MMC_RSP_R1;
1350 cmd.cmdarg = mmc->rca << 16;
1352 err = mmc_send_cmd(mmc, &cmd, NULL);
1356 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1357 cmd.resp_type = MMC_RSP_R1;
1362 err = mmc_send_cmd(mmc, &cmd, NULL);
1369 static int sd_read_ssr(struct mmc *mmc)
1373 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1374 struct mmc_data data;
1376 unsigned int au, eo, et, es;
1378 cmd.cmdidx = MMC_CMD_APP_CMD;
1379 cmd.resp_type = MMC_RSP_R1;
1380 cmd.cmdarg = mmc->rca << 16;
1382 err = mmc_send_cmd(mmc, &cmd, NULL);
1386 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1387 cmd.resp_type = MMC_RSP_R1;
1391 data.dest = (char *)ssr;
1392 data.blocksize = 64;
1394 data.flags = MMC_DATA_READ;
1396 err = mmc_send_cmd(mmc, &cmd, &data);
1404 for (i = 0; i < 16; i++)
1405 ssr[i] = be32_to_cpu(ssr[i]);
1407 au = (ssr[2] >> 12) & 0xF;
1408 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1409 mmc->ssr.au = sd_au_size[au];
1410 es = (ssr[3] >> 24) & 0xFF;
1411 es |= (ssr[2] & 0xFF) << 8;
1412 et = (ssr[3] >> 18) & 0x3F;
1414 eo = (ssr[3] >> 16) & 0x3;
1415 mmc->ssr.erase_timeout = (et * 1000) / es;
1416 mmc->ssr.erase_offset = eo * 1000;
1419 debug("Invalid Allocation Unit Size.\n");
1425 /* frequency bases */
1426 /* divided by 10 to be nice to platforms without floating point */
1427 static const int fbase[] = {
1434 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1435 * to platforms without floating point.
1437 static const u8 multipliers[] = {
1456 static inline int bus_width(uint cap)
1458 if (cap == MMC_MODE_8BIT)
1460 if (cap == MMC_MODE_4BIT)
1462 if (cap == MMC_MODE_1BIT)
1464 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1468 #if !CONFIG_IS_ENABLED(DM_MMC)
1469 #ifdef MMC_SUPPORTS_TUNING
1470 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1476 static void mmc_send_init_stream(struct mmc *mmc)
1480 static int mmc_set_ios(struct mmc *mmc)
1484 if (mmc->cfg->ops->set_ios)
1485 ret = mmc->cfg->ops->set_ios(mmc);
1491 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1493 if (clock > mmc->cfg->f_max)
1494 clock = mmc->cfg->f_max;
1496 if (clock < mmc->cfg->f_min)
1497 clock = mmc->cfg->f_min;
1500 mmc->clk_disable = disable;
1502 return mmc_set_ios(mmc);
1505 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1507 mmc->bus_width = width;
1509 return mmc_set_ios(mmc);
1512 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1514 * helper function to display the capabilities in a human
1515 * friendly manner. The capabilities include bus width and
1518 void mmc_dump_capabilities(const char *text, uint caps)
1522 printf("%s: widths [", text);
1523 if (caps & MMC_MODE_8BIT)
1525 if (caps & MMC_MODE_4BIT)
1527 if (caps & MMC_MODE_1BIT)
1529 printf("\b\b] modes [");
1530 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1531 if (MMC_CAP(mode) & caps)
1532 printf("%s, ", mmc_mode_name(mode));
1537 struct mode_width_tuning {
1540 #ifdef MMC_SUPPORTS_TUNING
1545 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1546 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1549 case MMC_SIGNAL_VOLTAGE_000: return 0;
1550 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1551 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1552 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1557 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1561 if (mmc->signal_voltage == signal_voltage)
1564 mmc->signal_voltage = signal_voltage;
1565 err = mmc_set_ios(mmc);
1567 debug("unable to set voltage (err %d)\n", err);
1572 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1578 static const struct mode_width_tuning sd_modes_by_pref[] = {
1579 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1580 #ifdef MMC_SUPPORTS_TUNING
1583 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1584 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1589 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1593 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1597 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1602 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1604 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1607 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1612 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1616 #define for_each_sd_mode_by_pref(caps, mwt) \
1617 for (mwt = sd_modes_by_pref;\
1618 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1620 if (caps & MMC_CAP(mwt->mode))
1622 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1625 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1626 const struct mode_width_tuning *mwt;
1627 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1628 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1630 bool uhs_en = false;
1635 mmc_dump_capabilities("sd card", card_caps);
1636 mmc_dump_capabilities("host", mmc->host_caps);
1639 /* Restrict card's capabilities by what the host can do */
1640 caps = card_caps & mmc->host_caps;
1645 for_each_sd_mode_by_pref(caps, mwt) {
1648 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1649 if (*w & caps & mwt->widths) {
1650 debug("trying mode %s width %d (at %d MHz)\n",
1651 mmc_mode_name(mwt->mode),
1653 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1655 /* configure the bus width (card + host) */
1656 err = sd_select_bus_width(mmc, bus_width(*w));
1659 mmc_set_bus_width(mmc, bus_width(*w));
1661 /* configure the bus mode (card) */
1662 err = sd_set_card_speed(mmc, mwt->mode);
1666 /* configure the bus mode (host) */
1667 mmc_select_mode(mmc, mwt->mode);
1668 mmc_set_clock(mmc, mmc->tran_speed, false);
1670 #ifdef MMC_SUPPORTS_TUNING
1671 /* execute tuning if needed */
1672 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1673 err = mmc_execute_tuning(mmc,
1676 debug("tuning failed\n");
1682 err = sd_read_ssr(mmc);
1686 pr_warn("bad ssr\n");
1689 /* revert to a safer bus speed */
1690 mmc_select_mode(mmc, SD_LEGACY);
1691 mmc_set_clock(mmc, mmc->tran_speed, false);
1696 printf("unable to select a mode\n");
1701 * read the compare the part of ext csd that is constant.
1702 * This can be used to check that the transfer is working
1705 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1708 const u8 *ext_csd = mmc->ext_csd;
1709 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1711 if (mmc->version < MMC_VERSION_4)
1714 err = mmc_send_ext_csd(mmc, test_csd);
1718 /* Only compare read only fields */
1719 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1720 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1721 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1722 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1723 ext_csd[EXT_CSD_REV]
1724 == test_csd[EXT_CSD_REV] &&
1725 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1726 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1727 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1728 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1734 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1735 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1736 uint32_t allowed_mask)
1742 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1743 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1744 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1745 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1748 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1749 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1750 MMC_SIGNAL_VOLTAGE_180;
1751 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1752 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1755 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1759 while (card_mask & allowed_mask) {
1760 enum mmc_voltage best_match;
1762 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1763 if (!mmc_set_signal_voltage(mmc, best_match))
1766 allowed_mask &= ~best_match;
1772 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1773 uint32_t allowed_mask)
1779 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1780 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1783 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1784 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1789 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1793 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1797 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1801 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1805 #define for_each_mmc_mode_by_pref(caps, mwt) \
1806 for (mwt = mmc_modes_by_pref;\
1807 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1809 if (caps & MMC_CAP(mwt->mode))
1811 static const struct ext_csd_bus_width {
1815 } ext_csd_bus_width[] = {
1816 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1817 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1818 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1819 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1820 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1823 #define for_each_supported_width(caps, ddr, ecbv) \
1824 for (ecbv = ext_csd_bus_width;\
1825 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1827 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1829 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1832 const struct mode_width_tuning *mwt;
1833 const struct ext_csd_bus_width *ecbw;
1836 mmc_dump_capabilities("mmc", card_caps);
1837 mmc_dump_capabilities("host", mmc->host_caps);
1840 /* Restrict card's capabilities by what the host can do */
1841 card_caps &= mmc->host_caps;
1843 /* Only version 4 of MMC supports wider bus widths */
1844 if (mmc->version < MMC_VERSION_4)
1847 if (!mmc->ext_csd) {
1848 debug("No ext_csd found!\n"); /* this should enver happen */
1852 mmc_set_clock(mmc, mmc->legacy_speed, false);
1854 for_each_mmc_mode_by_pref(card_caps, mwt) {
1855 for_each_supported_width(card_caps & mwt->widths,
1856 mmc_is_mode_ddr(mwt->mode), ecbw) {
1857 enum mmc_voltage old_voltage;
1858 debug("trying mode %s width %d (at %d MHz)\n",
1859 mmc_mode_name(mwt->mode),
1860 bus_width(ecbw->cap),
1861 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1862 old_voltage = mmc->signal_voltage;
1863 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1864 MMC_ALL_SIGNAL_VOLTAGE);
1868 /* configure the bus width (card + host) */
1869 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1871 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1874 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1876 /* configure the bus speed (card) */
1877 err = mmc_set_card_speed(mmc, mwt->mode);
1882 * configure the bus width AND the ddr mode (card)
1883 * The host side will be taken care of in the next step
1885 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1886 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1888 ecbw->ext_csd_bits);
1893 /* configure the bus mode (host) */
1894 mmc_select_mode(mmc, mwt->mode);
1895 mmc_set_clock(mmc, mmc->tran_speed, false);
1896 #ifdef MMC_SUPPORTS_TUNING
1898 /* execute tuning if needed */
1900 err = mmc_execute_tuning(mmc, mwt->tuning);
1902 debug("tuning failed\n");
1908 /* do a transfer to check the configuration */
1909 err = mmc_read_and_compare_ext_csd(mmc);
1913 mmc_set_signal_voltage(mmc, old_voltage);
1914 /* if an error occured, revert to a safer bus mode */
1915 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1916 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1917 mmc_select_mode(mmc, MMC_LEGACY);
1918 mmc_set_bus_width(mmc, 1);
1922 pr_err("unable to select a mode\n");
1927 static int mmc_startup_v4(struct mmc *mmc)
1931 bool has_parts = false;
1932 bool part_completed;
1933 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1935 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1938 /* check ext_csd version and capacity */
1939 err = mmc_send_ext_csd(mmc, ext_csd);
1943 /* store the ext csd for future reference */
1945 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1948 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1950 if (ext_csd[EXT_CSD_REV] >= 2) {
1952 * According to the JEDEC Standard, the value of
1953 * ext_csd's capacity is valid if the value is more
1956 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1957 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1958 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1959 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1960 capacity *= MMC_MAX_BLOCK_LEN;
1961 if ((capacity >> 20) > 2 * 1024)
1962 mmc->capacity_user = capacity;
1965 switch (ext_csd[EXT_CSD_REV]) {
1967 mmc->version = MMC_VERSION_4_1;
1970 mmc->version = MMC_VERSION_4_2;
1973 mmc->version = MMC_VERSION_4_3;
1976 mmc->version = MMC_VERSION_4_41;
1979 mmc->version = MMC_VERSION_4_5;
1982 mmc->version = MMC_VERSION_5_0;
1985 mmc->version = MMC_VERSION_5_1;
1989 /* The partition data may be non-zero but it is only
1990 * effective if PARTITION_SETTING_COMPLETED is set in
1991 * EXT_CSD, so ignore any data if this bit is not set,
1992 * except for enabling the high-capacity group size
1993 * definition (see below).
1995 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1996 EXT_CSD_PARTITION_SETTING_COMPLETED);
1998 /* store the partition info of emmc */
1999 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2000 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2001 ext_csd[EXT_CSD_BOOT_MULT])
2002 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2003 if (part_completed &&
2004 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2005 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2007 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2009 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2011 for (i = 0; i < 4; i++) {
2012 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2013 uint mult = (ext_csd[idx + 2] << 16) +
2014 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2017 if (!part_completed)
2019 mmc->capacity_gp[i] = mult;
2020 mmc->capacity_gp[i] *=
2021 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2022 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2023 mmc->capacity_gp[i] <<= 19;
2026 if (part_completed) {
2027 mmc->enh_user_size =
2028 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2029 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2030 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2031 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2032 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2033 mmc->enh_user_size <<= 19;
2034 mmc->enh_user_start =
2035 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2036 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2037 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2038 ext_csd[EXT_CSD_ENH_START_ADDR];
2039 if (mmc->high_capacity)
2040 mmc->enh_user_start <<= 9;
2044 * Host needs to enable ERASE_GRP_DEF bit if device is
2045 * partitioned. This bit will be lost every time after a reset
2046 * or power off. This will affect erase size.
2050 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2051 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2054 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2055 EXT_CSD_ERASE_GROUP_DEF, 1);
2060 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2063 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2064 /* Read out group size from ext_csd */
2065 mmc->erase_grp_size =
2066 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2068 * if high capacity and partition setting completed
2069 * SEC_COUNT is valid even if it is smaller than 2 GiB
2070 * JEDEC Standard JESD84-B45, 6.2.4
2072 if (mmc->high_capacity && part_completed) {
2073 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2074 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2075 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2076 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2077 capacity *= MMC_MAX_BLOCK_LEN;
2078 mmc->capacity_user = capacity;
2081 /* Calculate the group size from the csd value. */
2082 int erase_gsz, erase_gmul;
2084 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2085 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2086 mmc->erase_grp_size = (erase_gsz + 1)
2090 mmc->hc_wp_grp_size = 1024
2091 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2092 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2094 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2100 mmc->ext_csd = NULL;
2105 static int mmc_startup(struct mmc *mmc)
2111 struct blk_desc *bdesc;
2113 #ifdef CONFIG_MMC_SPI_CRC_ON
2114 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2115 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2116 cmd.resp_type = MMC_RSP_R1;
2118 err = mmc_send_cmd(mmc, &cmd, NULL);
2124 /* Put the Card in Identify Mode */
2125 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2126 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2127 cmd.resp_type = MMC_RSP_R2;
2130 err = mmc_send_cmd(mmc, &cmd, NULL);
2132 #ifdef CONFIG_MMC_QUIRKS
2133 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2136 * It has been seen that SEND_CID may fail on the first
2137 * attempt, let's try a few more time
2140 err = mmc_send_cmd(mmc, &cmd, NULL);
2143 } while (retries--);
2150 memcpy(mmc->cid, cmd.response, 16);
2153 * For MMC cards, set the Relative Address.
2154 * For SD cards, get the Relatvie Address.
2155 * This also puts the cards into Standby State
2157 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2158 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2159 cmd.cmdarg = mmc->rca << 16;
2160 cmd.resp_type = MMC_RSP_R6;
2162 err = mmc_send_cmd(mmc, &cmd, NULL);
2168 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2171 /* Get the Card-Specific Data */
2172 cmd.cmdidx = MMC_CMD_SEND_CSD;
2173 cmd.resp_type = MMC_RSP_R2;
2174 cmd.cmdarg = mmc->rca << 16;
2176 err = mmc_send_cmd(mmc, &cmd, NULL);
2181 mmc->csd[0] = cmd.response[0];
2182 mmc->csd[1] = cmd.response[1];
2183 mmc->csd[2] = cmd.response[2];
2184 mmc->csd[3] = cmd.response[3];
2186 if (mmc->version == MMC_VERSION_UNKNOWN) {
2187 int version = (cmd.response[0] >> 26) & 0xf;
2191 mmc->version = MMC_VERSION_1_2;
2194 mmc->version = MMC_VERSION_1_4;
2197 mmc->version = MMC_VERSION_2_2;
2200 mmc->version = MMC_VERSION_3;
2203 mmc->version = MMC_VERSION_4;
2206 mmc->version = MMC_VERSION_1_2;
2211 /* divide frequency by 10, since the mults are 10x bigger */
2212 freq = fbase[(cmd.response[0] & 0x7)];
2213 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2215 mmc->legacy_speed = freq * mult;
2216 mmc_select_mode(mmc, MMC_LEGACY);
2218 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2219 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2222 mmc->write_bl_len = mmc->read_bl_len;
2224 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2226 if (mmc->high_capacity) {
2227 csize = (mmc->csd[1] & 0x3f) << 16
2228 | (mmc->csd[2] & 0xffff0000) >> 16;
2231 csize = (mmc->csd[1] & 0x3ff) << 2
2232 | (mmc->csd[2] & 0xc0000000) >> 30;
2233 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2236 mmc->capacity_user = (csize + 1) << (cmult + 2);
2237 mmc->capacity_user *= mmc->read_bl_len;
2238 mmc->capacity_boot = 0;
2239 mmc->capacity_rpmb = 0;
2240 for (i = 0; i < 4; i++)
2241 mmc->capacity_gp[i] = 0;
2243 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2244 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2246 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2247 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2249 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2250 cmd.cmdidx = MMC_CMD_SET_DSR;
2251 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2252 cmd.resp_type = MMC_RSP_NONE;
2253 if (mmc_send_cmd(mmc, &cmd, NULL))
2254 pr_warn("MMC: SET_DSR failed\n");
2257 /* Select the card, and put it into Transfer Mode */
2258 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2259 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2260 cmd.resp_type = MMC_RSP_R1;
2261 cmd.cmdarg = mmc->rca << 16;
2262 err = mmc_send_cmd(mmc, &cmd, NULL);
2269 * For SD, its erase group is always one sector
2271 mmc->erase_grp_size = 1;
2272 mmc->part_config = MMCPART_NOAVAILABLE;
2274 err = mmc_startup_v4(mmc);
2278 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2283 err = sd_get_capabilities(mmc);
2286 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2288 err = mmc_get_capabilities(mmc);
2291 mmc_select_mode_and_width(mmc, mmc->card_caps);
2297 mmc->best_mode = mmc->selected_mode;
2299 /* Fix the block length for DDR mode */
2300 if (mmc->ddr_mode) {
2301 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2302 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2305 /* fill in device description */
2306 bdesc = mmc_get_blk_desc(mmc);
2310 bdesc->blksz = mmc->read_bl_len;
2311 bdesc->log2blksz = LOG2(bdesc->blksz);
2312 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2313 #if !defined(CONFIG_SPL_BUILD) || \
2314 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2315 !defined(CONFIG_USE_TINY_PRINTF))
2316 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2317 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2318 (mmc->cid[3] >> 16) & 0xffff);
2319 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2320 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2321 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2322 (mmc->cid[2] >> 24) & 0xff);
2323 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2324 (mmc->cid[2] >> 16) & 0xf);
2326 bdesc->vendor[0] = 0;
2327 bdesc->product[0] = 0;
2328 bdesc->revision[0] = 0;
2330 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2337 static int mmc_send_if_cond(struct mmc *mmc)
2342 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2343 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2344 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2345 cmd.resp_type = MMC_RSP_R7;
2347 err = mmc_send_cmd(mmc, &cmd, NULL);
2352 if ((cmd.response[0] & 0xff) != 0xaa)
2355 mmc->version = SD_VERSION_2;
2360 #if !CONFIG_IS_ENABLED(DM_MMC)
2361 /* board-specific MMC power initializations. */
2362 __weak void board_mmc_power_init(void)
2367 static int mmc_power_init(struct mmc *mmc)
2369 #if CONFIG_IS_ENABLED(DM_MMC)
2370 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2373 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2376 debug("%s: No vmmc supply\n", mmc->dev->name);
2378 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2379 &mmc->vqmmc_supply);
2381 debug("%s: No vqmmc supply\n", mmc->dev->name);
2383 #else /* !CONFIG_DM_MMC */
2385 * Driver model should use a regulator, as above, rather than calling
2386 * out to board code.
2388 board_mmc_power_init();
2394 * put the host in the initial state:
2395 * - turn on Vdd (card power supply)
2396 * - configure the bus width and clock to minimal values
2398 static void mmc_set_initial_state(struct mmc *mmc)
2402 /* First try to set 3.3V. If it fails set to 1.8V */
2403 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2405 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2407 pr_warn("mmc: failed to set signal voltage\n");
2409 mmc_select_mode(mmc, MMC_LEGACY);
2410 mmc_set_bus_width(mmc, 1);
2411 mmc_set_clock(mmc, 0, false);
2414 static int mmc_power_on(struct mmc *mmc)
2416 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2417 if (mmc->vmmc_supply) {
2418 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2421 puts("Error enabling VMMC supply\n");
2429 static int mmc_power_off(struct mmc *mmc)
2431 mmc_set_clock(mmc, 1, true);
2432 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2433 if (mmc->vmmc_supply) {
2434 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2437 debug("Error disabling VMMC supply\n");
2445 static int mmc_power_cycle(struct mmc *mmc)
2449 ret = mmc_power_off(mmc);
2453 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2454 * to be on the safer side.
2457 return mmc_power_on(mmc);
2460 int mmc_start_init(struct mmc *mmc)
2463 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2467 * all hosts are capable of 1 bit bus-width and able to use the legacy
2470 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2471 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2473 /* we pretend there's no card when init is NULL */
2474 no_card = mmc_getcd(mmc) == 0;
2475 #if !CONFIG_IS_ENABLED(DM_MMC)
2476 no_card = no_card || (mmc->cfg->ops->init == NULL);
2480 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2481 printf("MMC: no card present\n");
2489 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2490 mmc_adapter_card_type_ident();
2492 err = mmc_power_init(mmc);
2496 #ifdef CONFIG_MMC_QUIRKS
2497 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2498 MMC_QUIRK_RETRY_SEND_CID;
2501 err = mmc_power_cycle(mmc);
2504 * if power cycling is not supported, we should not try
2505 * to use the UHS modes, because we wouldn't be able to
2506 * recover from an error during the UHS initialization.
2508 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2510 mmc->host_caps &= ~UHS_CAPS;
2511 err = mmc_power_on(mmc);
2516 #if CONFIG_IS_ENABLED(DM_MMC)
2517 /* The device has already been probed ready for use */
2519 /* made sure it's not NULL earlier */
2520 err = mmc->cfg->ops->init(mmc);
2527 mmc_set_initial_state(mmc);
2528 mmc_send_init_stream(mmc);
2530 /* Reset the Card */
2531 err = mmc_go_idle(mmc);
2536 /* The internal partition reset to user partition(0) at every CMD0*/
2537 mmc_get_blk_desc(mmc)->hwpart = 0;
2539 /* Test for SD version 2 */
2540 err = mmc_send_if_cond(mmc);
2542 /* Now try to get the SD card's operating condition */
2543 err = sd_send_op_cond(mmc, uhs_en);
2544 if (err && uhs_en) {
2546 mmc_power_cycle(mmc);
2550 /* If the command timed out, we check for an MMC card */
2551 if (err == -ETIMEDOUT) {
2552 err = mmc_send_op_cond(mmc);
2555 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2556 pr_err("Card did not respond to voltage select!\n");
2563 mmc->init_in_progress = 1;
2568 static int mmc_complete_init(struct mmc *mmc)
2572 mmc->init_in_progress = 0;
2573 if (mmc->op_cond_pending)
2574 err = mmc_complete_op_cond(mmc);
2577 err = mmc_startup(mmc);
2585 int mmc_init(struct mmc *mmc)
2588 __maybe_unused unsigned start;
2589 #if CONFIG_IS_ENABLED(DM_MMC)
2590 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2597 start = get_timer(0);
2599 if (!mmc->init_in_progress)
2600 err = mmc_start_init(mmc);
2603 err = mmc_complete_init(mmc);
2605 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2610 int mmc_set_dsr(struct mmc *mmc, u16 val)
2616 /* CPU-specific MMC initializations */
2617 __weak int cpu_mmc_init(bd_t *bis)
2622 /* board-specific MMC initializations. */
2623 __weak int board_mmc_init(bd_t *bis)
2628 void mmc_set_preinit(struct mmc *mmc, int preinit)
2630 mmc->preinit = preinit;
2633 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2634 static int mmc_probe(bd_t *bis)
2638 #elif CONFIG_IS_ENABLED(DM_MMC)
2639 static int mmc_probe(bd_t *bis)
2643 struct udevice *dev;
2645 ret = uclass_get(UCLASS_MMC, &uc);
2650 * Try to add them in sequence order. Really with driver model we
2651 * should allow holes, but the current MMC list does not allow that.
2652 * So if we request 0, 1, 3 we will get 0, 1, 2.
2654 for (i = 0; ; i++) {
2655 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2659 uclass_foreach_dev(dev, uc) {
2660 ret = device_probe(dev);
2662 pr_err("%s - probe failed: %d\n", dev->name, ret);
2668 static int mmc_probe(bd_t *bis)
2670 if (board_mmc_init(bis) < 0)
2677 int mmc_initialize(bd_t *bis)
2679 static int initialized = 0;
2681 if (initialized) /* Avoid initializing mmc multiple times */
2685 #if !CONFIG_IS_ENABLED(BLK)
2686 #if !CONFIG_IS_ENABLED(MMC_TINY)
2690 ret = mmc_probe(bis);
2694 #ifndef CONFIG_SPL_BUILD
2695 print_mmc_devices(',');
2702 #ifdef CONFIG_CMD_BKOPS_ENABLE
2703 int mmc_set_bkops_enable(struct mmc *mmc)
2706 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2708 err = mmc_send_ext_csd(mmc, ext_csd);
2710 puts("Could not get ext_csd register values\n");
2714 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2715 puts("Background operations not supported on device\n");
2716 return -EMEDIUMTYPE;
2719 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2720 puts("Background operations already enabled\n");
2724 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2726 puts("Failed to enable manual background operations\n");
2730 puts("Enabled manual background operations\n");