2 * Copyright 2008, Freescale Semiconductor, Inc
5 * Based vaguely on the Linux code
7 * SPDX-License-Identifier: GPL-2.0+
14 #include <dm/device-internal.h>
18 #include <power/regulator.h>
21 #include <linux/list.h>
23 #include "mmc_private.h"
25 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
26 static int mmc_power_cycle(struct mmc *mmc);
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
29 #if CONFIG_IS_ENABLED(MMC_TINY)
30 static struct mmc mmc_static;
31 struct mmc *find_mmc_device(int dev_num)
36 void mmc_do_preinit(void)
38 struct mmc *m = &mmc_static;
39 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
40 mmc_set_preinit(m, 1);
46 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
48 return &mmc->block_dev;
52 #if !CONFIG_IS_ENABLED(DM_MMC)
54 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
55 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
61 __weak int board_mmc_getwp(struct mmc *mmc)
66 int mmc_getwp(struct mmc *mmc)
70 wp = board_mmc_getwp(mmc);
73 if (mmc->cfg->ops->getwp)
74 wp = mmc->cfg->ops->getwp(mmc);
82 __weak int board_mmc_getcd(struct mmc *mmc)
88 #ifdef CONFIG_MMC_TRACE
89 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
91 printf("CMD_SEND:%d\n", cmd->cmdidx);
92 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
95 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
101 printf("\t\tRET\t\t\t %d\n", ret);
103 switch (cmd->resp_type) {
105 printf("\t\tMMC_RSP_NONE\n");
108 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
112 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
116 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
118 printf("\t\t \t\t 0x%08X \n",
120 printf("\t\t \t\t 0x%08X \n",
122 printf("\t\t \t\t 0x%08X \n",
125 printf("\t\t\t\t\tDUMPING DATA\n");
126 for (i = 0; i < 4; i++) {
128 printf("\t\t\t\t\t%03d - ", i*4);
129 ptr = (u8 *)&cmd->response[i];
131 for (j = 0; j < 4; j++)
132 printf("%02X ", *ptr--);
137 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
141 printf("\t\tERROR MMC rsp not supported\n");
147 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
151 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
152 printf("CURR STATE:%d\n", status);
156 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
157 const char *mmc_mode_name(enum bus_mode mode)
159 static const char *const names[] = {
160 [MMC_LEGACY] = "MMC legacy",
161 [SD_LEGACY] = "SD Legacy",
162 [MMC_HS] = "MMC High Speed (26MHz)",
163 [SD_HS] = "SD High Speed (50MHz)",
164 [UHS_SDR12] = "UHS SDR12 (25MHz)",
165 [UHS_SDR25] = "UHS SDR25 (50MHz)",
166 [UHS_SDR50] = "UHS SDR50 (100MHz)",
167 [UHS_SDR104] = "UHS SDR104 (208MHz)",
168 [UHS_DDR50] = "UHS DDR50 (50MHz)",
169 [MMC_HS_52] = "MMC High Speed (52MHz)",
170 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
171 [MMC_HS_200] = "HS200 (200MHz)",
174 if (mode >= MMC_MODES_END)
175 return "Unknown mode";
181 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
183 static const int freqs[] = {
184 [MMC_LEGACY] = 25000000,
185 [SD_LEGACY] = 25000000,
188 [MMC_HS_52] = 52000000,
189 [MMC_DDR_52] = 52000000,
190 [UHS_SDR12] = 25000000,
191 [UHS_SDR25] = 50000000,
192 [UHS_SDR50] = 100000000,
193 [UHS_DDR50] = 50000000,
194 [UHS_SDR104] = 208000000,
195 [MMC_HS_200] = 200000000,
198 if (mode == MMC_LEGACY)
199 return mmc->legacy_speed;
200 else if (mode >= MMC_MODES_END)
206 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
208 mmc->selected_mode = mode;
209 mmc->tran_speed = mmc_mode2freq(mmc, mode);
210 mmc->ddr_mode = mmc_is_mode_ddr(mode);
211 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
212 mmc->tran_speed / 1000000);
216 #if !CONFIG_IS_ENABLED(DM_MMC)
217 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
221 mmmc_trace_before_send(mmc, cmd);
222 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
223 mmmc_trace_after_send(mmc, cmd, ret);
229 int mmc_send_status(struct mmc *mmc, int timeout)
232 int err, retries = 5;
234 cmd.cmdidx = MMC_CMD_SEND_STATUS;
235 cmd.resp_type = MMC_RSP_R1;
236 if (!mmc_host_is_spi(mmc))
237 cmd.cmdarg = mmc->rca << 16;
240 err = mmc_send_cmd(mmc, &cmd, NULL);
242 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
243 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
247 if (cmd.response[0] & MMC_STATUS_MASK) {
248 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
249 pr_err("Status Error: 0x%08X\n",
254 } else if (--retries < 0)
263 mmc_trace_state(mmc, &cmd);
265 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
266 pr_err("Timeout waiting card ready\n");
274 int mmc_set_blocklen(struct mmc *mmc, int len)
282 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
283 cmd.resp_type = MMC_RSP_R1;
286 err = mmc_send_cmd(mmc, &cmd, NULL);
288 #ifdef CONFIG_MMC_QUIRKS
289 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
292 * It has been seen that SET_BLOCKLEN may fail on the first
293 * attempt, let's try a few more time
296 err = mmc_send_cmd(mmc, &cmd, NULL);
306 #ifdef MMC_SUPPORTS_TUNING
307 static const u8 tuning_blk_pattern_4bit[] = {
308 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
309 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
310 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
311 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
312 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
313 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
314 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
315 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
318 static const u8 tuning_blk_pattern_8bit[] = {
319 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
320 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
321 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
322 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
323 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
324 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
325 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
326 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
327 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
328 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
329 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
330 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
331 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
332 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
333 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
334 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
337 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
340 struct mmc_data data;
341 const u8 *tuning_block_pattern;
344 if (mmc->bus_width == 8) {
345 tuning_block_pattern = tuning_blk_pattern_8bit;
346 size = sizeof(tuning_blk_pattern_8bit);
347 } else if (mmc->bus_width == 4) {
348 tuning_block_pattern = tuning_blk_pattern_4bit;
349 size = sizeof(tuning_blk_pattern_4bit);
354 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
358 cmd.resp_type = MMC_RSP_R1;
360 data.dest = (void *)data_buf;
362 data.blocksize = size;
363 data.flags = MMC_DATA_READ;
365 err = mmc_send_cmd(mmc, &cmd, &data);
369 if (memcmp(data_buf, tuning_block_pattern, size))
376 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
380 struct mmc_data data;
383 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
385 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
387 if (mmc->high_capacity)
390 cmd.cmdarg = start * mmc->read_bl_len;
392 cmd.resp_type = MMC_RSP_R1;
395 data.blocks = blkcnt;
396 data.blocksize = mmc->read_bl_len;
397 data.flags = MMC_DATA_READ;
399 if (mmc_send_cmd(mmc, &cmd, &data))
403 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
405 cmd.resp_type = MMC_RSP_R1b;
406 if (mmc_send_cmd(mmc, &cmd, NULL)) {
407 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
408 pr_err("mmc fail to send stop cmd\n");
417 #if CONFIG_IS_ENABLED(BLK)
418 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
420 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
424 #if CONFIG_IS_ENABLED(BLK)
425 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
427 int dev_num = block_dev->devnum;
429 lbaint_t cur, blocks_todo = blkcnt;
434 struct mmc *mmc = find_mmc_device(dev_num);
438 if (CONFIG_IS_ENABLED(MMC_TINY))
439 err = mmc_switch_part(mmc, block_dev->hwpart);
441 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
446 if ((start + blkcnt) > block_dev->lba) {
447 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
448 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
449 start + blkcnt, block_dev->lba);
454 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
455 pr_debug("%s: Failed to set blocklen\n", __func__);
460 cur = (blocks_todo > mmc->cfg->b_max) ?
461 mmc->cfg->b_max : blocks_todo;
462 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
463 pr_debug("%s: Failed to read blocks\n", __func__);
468 dst += cur * mmc->read_bl_len;
469 } while (blocks_todo > 0);
474 static int mmc_go_idle(struct mmc *mmc)
481 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
483 cmd.resp_type = MMC_RSP_NONE;
485 err = mmc_send_cmd(mmc, &cmd, NULL);
495 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
496 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
502 * Send CMD11 only if the request is to switch the card to
505 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
506 return mmc_set_signal_voltage(mmc, signal_voltage);
508 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
510 cmd.resp_type = MMC_RSP_R1;
512 err = mmc_send_cmd(mmc, &cmd, NULL);
516 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
520 * The card should drive cmd and dat[0:3] low immediately
521 * after the response of cmd11, but wait 100 us to be sure
523 err = mmc_wait_dat0(mmc, 0, 100);
530 * During a signal voltage level switch, the clock must be gated
531 * for 5 ms according to the SD spec
533 mmc_set_clock(mmc, mmc->clock, true);
535 err = mmc_set_signal_voltage(mmc, signal_voltage);
539 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
541 mmc_set_clock(mmc, mmc->clock, false);
544 * Failure to switch is indicated by the card holding
545 * dat[0:3] low. Wait for at least 1 ms according to spec
547 err = mmc_wait_dat0(mmc, 1, 1000);
557 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
564 cmd.cmdidx = MMC_CMD_APP_CMD;
565 cmd.resp_type = MMC_RSP_R1;
568 err = mmc_send_cmd(mmc, &cmd, NULL);
573 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
574 cmd.resp_type = MMC_RSP_R3;
577 * Most cards do not answer if some reserved bits
578 * in the ocr are set. However, Some controller
579 * can set bit 7 (reserved for low voltages), but
580 * how to manage low voltages SD card is not yet
583 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
584 (mmc->cfg->voltages & 0xff8000);
586 if (mmc->version == SD_VERSION_2)
587 cmd.cmdarg |= OCR_HCS;
590 cmd.cmdarg |= OCR_S18R;
592 err = mmc_send_cmd(mmc, &cmd, NULL);
597 if (cmd.response[0] & OCR_BUSY)
606 if (mmc->version != SD_VERSION_2)
607 mmc->version = SD_VERSION_1_0;
609 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
610 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
611 cmd.resp_type = MMC_RSP_R3;
614 err = mmc_send_cmd(mmc, &cmd, NULL);
620 mmc->ocr = cmd.response[0];
622 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
623 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
625 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
631 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
637 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
642 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
643 cmd.resp_type = MMC_RSP_R3;
645 if (use_arg && !mmc_host_is_spi(mmc))
646 cmd.cmdarg = OCR_HCS |
647 (mmc->cfg->voltages &
648 (mmc->ocr & OCR_VOLTAGE_MASK)) |
649 (mmc->ocr & OCR_ACCESS_MODE);
651 err = mmc_send_cmd(mmc, &cmd, NULL);
654 mmc->ocr = cmd.response[0];
658 static int mmc_send_op_cond(struct mmc *mmc)
662 /* Some cards seem to need this */
665 /* Asking to the card its capabilities */
666 for (i = 0; i < 2; i++) {
667 err = mmc_send_op_cond_iter(mmc, i != 0);
671 /* exit if not busy (flag seems to be inverted) */
672 if (mmc->ocr & OCR_BUSY)
675 mmc->op_cond_pending = 1;
679 static int mmc_complete_op_cond(struct mmc *mmc)
686 mmc->op_cond_pending = 0;
687 if (!(mmc->ocr & OCR_BUSY)) {
688 /* Some cards seem to need this */
691 start = get_timer(0);
693 err = mmc_send_op_cond_iter(mmc, 1);
696 if (mmc->ocr & OCR_BUSY)
698 if (get_timer(start) > timeout)
704 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
705 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
706 cmd.resp_type = MMC_RSP_R3;
709 err = mmc_send_cmd(mmc, &cmd, NULL);
714 mmc->ocr = cmd.response[0];
717 mmc->version = MMC_VERSION_UNKNOWN;
719 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
726 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
729 struct mmc_data data;
732 /* Get the Card Status Register */
733 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
734 cmd.resp_type = MMC_RSP_R1;
737 data.dest = (char *)ext_csd;
739 data.blocksize = MMC_MAX_BLOCK_LEN;
740 data.flags = MMC_DATA_READ;
742 err = mmc_send_cmd(mmc, &cmd, &data);
747 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
754 cmd.cmdidx = MMC_CMD_SWITCH;
755 cmd.resp_type = MMC_RSP_R1b;
756 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
760 while (retries > 0) {
761 ret = mmc_send_cmd(mmc, &cmd, NULL);
763 /* Waiting for the ready status */
765 ret = mmc_send_status(mmc, timeout);
776 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
781 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
787 speed_bits = EXT_CSD_TIMING_HS;
789 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
791 speed_bits = EXT_CSD_TIMING_HS200;
795 speed_bits = EXT_CSD_TIMING_LEGACY;
800 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
805 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
806 /* Now check to see that it worked */
807 err = mmc_send_ext_csd(mmc, test_csd);
811 /* No high-speed support */
812 if (!test_csd[EXT_CSD_HS_TIMING])
819 static int mmc_get_capabilities(struct mmc *mmc)
821 u8 *ext_csd = mmc->ext_csd;
824 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
826 if (mmc_host_is_spi(mmc))
829 /* Only version 4 supports high-speed */
830 if (mmc->version < MMC_VERSION_4)
834 pr_err("No ext_csd found!\n"); /* this should enver happen */
838 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
840 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
841 mmc->cardtype = cardtype;
843 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
844 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
845 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
846 mmc->card_caps |= MMC_MODE_HS200;
849 if (cardtype & EXT_CSD_CARD_TYPE_52) {
850 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
851 mmc->card_caps |= MMC_MODE_DDR_52MHz;
852 mmc->card_caps |= MMC_MODE_HS_52MHz;
854 if (cardtype & EXT_CSD_CARD_TYPE_26)
855 mmc->card_caps |= MMC_MODE_HS;
860 static int mmc_set_capacity(struct mmc *mmc, int part_num)
864 mmc->capacity = mmc->capacity_user;
868 mmc->capacity = mmc->capacity_boot;
871 mmc->capacity = mmc->capacity_rpmb;
877 mmc->capacity = mmc->capacity_gp[part_num - 4];
883 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
888 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
889 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
894 if (part_num & PART_ACCESS_MASK)
895 forbidden = MMC_CAP(MMC_HS_200);
897 if (MMC_CAP(mmc->selected_mode) & forbidden) {
898 pr_debug("selected mode (%s) is forbidden for part %d\n",
899 mmc_mode_name(mmc->selected_mode), part_num);
901 } else if (mmc->selected_mode != mmc->best_mode) {
902 pr_debug("selected mode is not optimal\n");
907 return mmc_select_mode_and_width(mmc,
908 mmc->card_caps & ~forbidden);
913 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
914 unsigned int part_num)
920 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
924 ret = mmc_boot_part_access_chk(mmc, part_num);
928 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
929 (mmc->part_config & ~PART_ACCESS_MASK)
930 | (part_num & PART_ACCESS_MASK));
933 * Set the capacity if the switch succeeded or was intended
934 * to return to representing the raw device.
936 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
937 ret = mmc_set_capacity(mmc, part_num);
938 mmc_get_blk_desc(mmc)->hwpart = part_num;
944 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
945 int mmc_hwpart_config(struct mmc *mmc,
946 const struct mmc_hwpart_conf *conf,
947 enum mmc_hwpart_conf_mode mode)
953 u32 max_enh_size_mult;
954 u32 tot_enh_size_mult = 0;
957 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
959 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
962 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
963 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
967 if (!(mmc->part_support & PART_SUPPORT)) {
968 pr_err("Card does not support partitioning\n");
972 if (!mmc->hc_wp_grp_size) {
973 pr_err("Card does not define HC WP group size\n");
977 /* check partition alignment and total enhanced size */
978 if (conf->user.enh_size) {
979 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
980 conf->user.enh_start % mmc->hc_wp_grp_size) {
981 pr_err("User data enhanced area not HC WP group "
985 part_attrs |= EXT_CSD_ENH_USR;
986 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
987 if (mmc->high_capacity) {
988 enh_start_addr = conf->user.enh_start;
990 enh_start_addr = (conf->user.enh_start << 9);
996 tot_enh_size_mult += enh_size_mult;
998 for (pidx = 0; pidx < 4; pidx++) {
999 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1000 pr_err("GP%i partition not HC WP group size "
1001 "aligned\n", pidx+1);
1004 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1005 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1006 part_attrs |= EXT_CSD_ENH_GP(pidx);
1007 tot_enh_size_mult += gp_size_mult[pidx];
1011 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1012 pr_err("Card does not support enhanced attribute\n");
1013 return -EMEDIUMTYPE;
1016 err = mmc_send_ext_csd(mmc, ext_csd);
1021 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1022 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1023 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1024 if (tot_enh_size_mult > max_enh_size_mult) {
1025 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1026 tot_enh_size_mult, max_enh_size_mult);
1027 return -EMEDIUMTYPE;
1030 /* The default value of EXT_CSD_WR_REL_SET is device
1031 * dependent, the values can only be changed if the
1032 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1033 * changed only once and before partitioning is completed. */
1034 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1035 if (conf->user.wr_rel_change) {
1036 if (conf->user.wr_rel_set)
1037 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1039 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1041 for (pidx = 0; pidx < 4; pidx++) {
1042 if (conf->gp_part[pidx].wr_rel_change) {
1043 if (conf->gp_part[pidx].wr_rel_set)
1044 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1046 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1050 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1051 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1052 puts("Card does not support host controlled partition write "
1053 "reliability settings\n");
1054 return -EMEDIUMTYPE;
1057 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1058 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1059 pr_err("Card already partitioned\n");
1063 if (mode == MMC_HWPART_CONF_CHECK)
1066 /* Partitioning requires high-capacity size definitions */
1067 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1068 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1069 EXT_CSD_ERASE_GROUP_DEF, 1);
1074 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1076 /* update erase group size to be high-capacity */
1077 mmc->erase_grp_size =
1078 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1082 /* all OK, write the configuration */
1083 for (i = 0; i < 4; i++) {
1084 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1085 EXT_CSD_ENH_START_ADDR+i,
1086 (enh_start_addr >> (i*8)) & 0xFF);
1090 for (i = 0; i < 3; i++) {
1091 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1092 EXT_CSD_ENH_SIZE_MULT+i,
1093 (enh_size_mult >> (i*8)) & 0xFF);
1097 for (pidx = 0; pidx < 4; pidx++) {
1098 for (i = 0; i < 3; i++) {
1099 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1101 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1106 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1107 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1111 if (mode == MMC_HWPART_CONF_SET)
1114 /* The WR_REL_SET is a write-once register but shall be
1115 * written before setting PART_SETTING_COMPLETED. As it is
1116 * write-once we can only write it when completing the
1118 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1119 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1120 EXT_CSD_WR_REL_SET, wr_rel_set);
1125 /* Setting PART_SETTING_COMPLETED confirms the partition
1126 * configuration but it only becomes effective after power
1127 * cycle, so we do not adjust the partition related settings
1128 * in the mmc struct. */
1130 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1131 EXT_CSD_PARTITION_SETTING,
1132 EXT_CSD_PARTITION_SETTING_COMPLETED);
1140 #if !CONFIG_IS_ENABLED(DM_MMC)
1141 int mmc_getcd(struct mmc *mmc)
1145 cd = board_mmc_getcd(mmc);
1148 if (mmc->cfg->ops->getcd)
1149 cd = mmc->cfg->ops->getcd(mmc);
1158 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1161 struct mmc_data data;
1163 /* Switch the frequency */
1164 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1165 cmd.resp_type = MMC_RSP_R1;
1166 cmd.cmdarg = (mode << 31) | 0xffffff;
1167 cmd.cmdarg &= ~(0xf << (group * 4));
1168 cmd.cmdarg |= value << (group * 4);
1170 data.dest = (char *)resp;
1171 data.blocksize = 64;
1173 data.flags = MMC_DATA_READ;
1175 return mmc_send_cmd(mmc, &cmd, &data);
1179 static int sd_get_capabilities(struct mmc *mmc)
1183 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1184 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1185 struct mmc_data data;
1187 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1191 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1193 if (mmc_host_is_spi(mmc))
1196 /* Read the SCR to find out if this card supports higher speeds */
1197 cmd.cmdidx = MMC_CMD_APP_CMD;
1198 cmd.resp_type = MMC_RSP_R1;
1199 cmd.cmdarg = mmc->rca << 16;
1201 err = mmc_send_cmd(mmc, &cmd, NULL);
1206 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1207 cmd.resp_type = MMC_RSP_R1;
1213 data.dest = (char *)scr;
1216 data.flags = MMC_DATA_READ;
1218 err = mmc_send_cmd(mmc, &cmd, &data);
1227 mmc->scr[0] = __be32_to_cpu(scr[0]);
1228 mmc->scr[1] = __be32_to_cpu(scr[1]);
1230 switch ((mmc->scr[0] >> 24) & 0xf) {
1232 mmc->version = SD_VERSION_1_0;
1235 mmc->version = SD_VERSION_1_10;
1238 mmc->version = SD_VERSION_2;
1239 if ((mmc->scr[0] >> 15) & 0x1)
1240 mmc->version = SD_VERSION_3;
1243 mmc->version = SD_VERSION_1_0;
1247 if (mmc->scr[0] & SD_DATA_4BIT)
1248 mmc->card_caps |= MMC_MODE_4BIT;
1250 /* Version 1.0 doesn't support switching */
1251 if (mmc->version == SD_VERSION_1_0)
1256 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1257 (u8 *)switch_status);
1262 /* The high-speed function is busy. Try again */
1263 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1267 /* If high-speed isn't supported, we return */
1268 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1269 mmc->card_caps |= MMC_CAP(SD_HS);
1271 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1272 /* Version before 3.0 don't support UHS modes */
1273 if (mmc->version < SD_VERSION_3)
1276 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1277 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1278 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1279 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1280 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1281 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1282 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1283 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1284 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1285 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1286 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1292 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1296 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1301 speed = UHS_SDR12_BUS_SPEED;
1304 speed = HIGH_SPEED_BUS_SPEED;
1306 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1308 speed = UHS_SDR12_BUS_SPEED;
1311 speed = UHS_SDR25_BUS_SPEED;
1314 speed = UHS_SDR50_BUS_SPEED;
1317 speed = UHS_DDR50_BUS_SPEED;
1320 speed = UHS_SDR104_BUS_SPEED;
1327 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1331 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1337 int sd_select_bus_width(struct mmc *mmc, int w)
1342 if ((w != 4) && (w != 1))
1345 cmd.cmdidx = MMC_CMD_APP_CMD;
1346 cmd.resp_type = MMC_RSP_R1;
1347 cmd.cmdarg = mmc->rca << 16;
1349 err = mmc_send_cmd(mmc, &cmd, NULL);
1353 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1354 cmd.resp_type = MMC_RSP_R1;
1359 err = mmc_send_cmd(mmc, &cmd, NULL);
1366 #if CONFIG_IS_ENABLED(MMC_WRITE)
1367 static int sd_read_ssr(struct mmc *mmc)
1369 static const unsigned int sd_au_size[] = {
1370 0, SZ_16K / 512, SZ_32K / 512,
1371 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1372 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1373 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1374 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1379 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1380 struct mmc_data data;
1382 unsigned int au, eo, et, es;
1384 cmd.cmdidx = MMC_CMD_APP_CMD;
1385 cmd.resp_type = MMC_RSP_R1;
1386 cmd.cmdarg = mmc->rca << 16;
1388 err = mmc_send_cmd(mmc, &cmd, NULL);
1392 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1393 cmd.resp_type = MMC_RSP_R1;
1397 data.dest = (char *)ssr;
1398 data.blocksize = 64;
1400 data.flags = MMC_DATA_READ;
1402 err = mmc_send_cmd(mmc, &cmd, &data);
1410 for (i = 0; i < 16; i++)
1411 ssr[i] = be32_to_cpu(ssr[i]);
1413 au = (ssr[2] >> 12) & 0xF;
1414 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1415 mmc->ssr.au = sd_au_size[au];
1416 es = (ssr[3] >> 24) & 0xFF;
1417 es |= (ssr[2] & 0xFF) << 8;
1418 et = (ssr[3] >> 18) & 0x3F;
1420 eo = (ssr[3] >> 16) & 0x3;
1421 mmc->ssr.erase_timeout = (et * 1000) / es;
1422 mmc->ssr.erase_offset = eo * 1000;
1425 pr_debug("Invalid Allocation Unit Size.\n");
1431 /* frequency bases */
1432 /* divided by 10 to be nice to platforms without floating point */
1433 static const int fbase[] = {
1440 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1441 * to platforms without floating point.
1443 static const u8 multipliers[] = {
1462 static inline int bus_width(uint cap)
1464 if (cap == MMC_MODE_8BIT)
1466 if (cap == MMC_MODE_4BIT)
1468 if (cap == MMC_MODE_1BIT)
1470 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1474 #if !CONFIG_IS_ENABLED(DM_MMC)
1475 #ifdef MMC_SUPPORTS_TUNING
1476 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1482 static void mmc_send_init_stream(struct mmc *mmc)
1486 static int mmc_set_ios(struct mmc *mmc)
1490 if (mmc->cfg->ops->set_ios)
1491 ret = mmc->cfg->ops->set_ios(mmc);
1497 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1500 if (clock > mmc->cfg->f_max)
1501 clock = mmc->cfg->f_max;
1503 if (clock < mmc->cfg->f_min)
1504 clock = mmc->cfg->f_min;
1508 mmc->clk_disable = disable;
1510 return mmc_set_ios(mmc);
1513 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1515 mmc->bus_width = width;
1517 return mmc_set_ios(mmc);
1520 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1522 * helper function to display the capabilities in a human
1523 * friendly manner. The capabilities include bus width and
1526 void mmc_dump_capabilities(const char *text, uint caps)
1530 pr_debug("%s: widths [", text);
1531 if (caps & MMC_MODE_8BIT)
1533 if (caps & MMC_MODE_4BIT)
1535 if (caps & MMC_MODE_1BIT)
1537 pr_debug("\b\b] modes [");
1538 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1539 if (MMC_CAP(mode) & caps)
1540 pr_debug("%s, ", mmc_mode_name(mode));
1541 pr_debug("\b\b]\n");
1545 struct mode_width_tuning {
1548 #ifdef MMC_SUPPORTS_TUNING
1553 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1554 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1557 case MMC_SIGNAL_VOLTAGE_000: return 0;
1558 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1559 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1560 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1565 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1569 if (mmc->signal_voltage == signal_voltage)
1572 mmc->signal_voltage = signal_voltage;
1573 err = mmc_set_ios(mmc);
1575 pr_debug("unable to set voltage (err %d)\n", err);
1580 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1586 static const struct mode_width_tuning sd_modes_by_pref[] = {
1587 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1588 #ifdef MMC_SUPPORTS_TUNING
1591 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1592 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1597 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1601 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1605 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1610 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1612 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1615 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1620 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1624 #define for_each_sd_mode_by_pref(caps, mwt) \
1625 for (mwt = sd_modes_by_pref;\
1626 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1628 if (caps & MMC_CAP(mwt->mode))
1630 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1633 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1634 const struct mode_width_tuning *mwt;
1635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1636 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1638 bool uhs_en = false;
1643 mmc_dump_capabilities("sd card", card_caps);
1644 mmc_dump_capabilities("host", mmc->host_caps);
1647 /* Restrict card's capabilities by what the host can do */
1648 caps = card_caps & mmc->host_caps;
1653 for_each_sd_mode_by_pref(caps, mwt) {
1656 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1657 if (*w & caps & mwt->widths) {
1658 pr_debug("trying mode %s width %d (at %d MHz)\n",
1659 mmc_mode_name(mwt->mode),
1661 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1663 /* configure the bus width (card + host) */
1664 err = sd_select_bus_width(mmc, bus_width(*w));
1667 mmc_set_bus_width(mmc, bus_width(*w));
1669 /* configure the bus mode (card) */
1670 err = sd_set_card_speed(mmc, mwt->mode);
1674 /* configure the bus mode (host) */
1675 mmc_select_mode(mmc, mwt->mode);
1676 mmc_set_clock(mmc, mmc->tran_speed, false);
1678 #ifdef MMC_SUPPORTS_TUNING
1679 /* execute tuning if needed */
1680 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1681 err = mmc_execute_tuning(mmc,
1684 pr_debug("tuning failed\n");
1690 #if CONFIG_IS_ENABLED(MMC_WRITE)
1691 err = sd_read_ssr(mmc);
1693 pr_warn("unable to read ssr\n");
1699 /* revert to a safer bus speed */
1700 mmc_select_mode(mmc, SD_LEGACY);
1701 mmc_set_clock(mmc, mmc->tran_speed, false);
1706 pr_err("unable to select a mode\n");
1711 * read the compare the part of ext csd that is constant.
1712 * This can be used to check that the transfer is working
1715 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1718 const u8 *ext_csd = mmc->ext_csd;
1719 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1721 if (mmc->version < MMC_VERSION_4)
1724 err = mmc_send_ext_csd(mmc, test_csd);
1728 /* Only compare read only fields */
1729 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1730 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1731 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1732 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1733 ext_csd[EXT_CSD_REV]
1734 == test_csd[EXT_CSD_REV] &&
1735 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1736 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1737 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1738 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1744 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1745 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1746 uint32_t allowed_mask)
1752 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1753 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1754 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1755 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1758 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1759 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1760 MMC_SIGNAL_VOLTAGE_180;
1761 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1762 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1765 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1769 while (card_mask & allowed_mask) {
1770 enum mmc_voltage best_match;
1772 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1773 if (!mmc_set_signal_voltage(mmc, best_match))
1776 allowed_mask &= ~best_match;
1782 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1783 uint32_t allowed_mask)
1789 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1790 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1793 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1794 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1799 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1803 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1807 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1811 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1815 #define for_each_mmc_mode_by_pref(caps, mwt) \
1816 for (mwt = mmc_modes_by_pref;\
1817 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1819 if (caps & MMC_CAP(mwt->mode))
1821 static const struct ext_csd_bus_width {
1825 } ext_csd_bus_width[] = {
1826 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1827 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1828 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1829 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1830 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1833 #define for_each_supported_width(caps, ddr, ecbv) \
1834 for (ecbv = ext_csd_bus_width;\
1835 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1837 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1839 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1842 const struct mode_width_tuning *mwt;
1843 const struct ext_csd_bus_width *ecbw;
1846 mmc_dump_capabilities("mmc", card_caps);
1847 mmc_dump_capabilities("host", mmc->host_caps);
1850 /* Restrict card's capabilities by what the host can do */
1851 card_caps &= mmc->host_caps;
1853 /* Only version 4 of MMC supports wider bus widths */
1854 if (mmc->version < MMC_VERSION_4)
1857 if (!mmc->ext_csd) {
1858 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1862 mmc_set_clock(mmc, mmc->legacy_speed, false);
1864 for_each_mmc_mode_by_pref(card_caps, mwt) {
1865 for_each_supported_width(card_caps & mwt->widths,
1866 mmc_is_mode_ddr(mwt->mode), ecbw) {
1867 enum mmc_voltage old_voltage;
1868 pr_debug("trying mode %s width %d (at %d MHz)\n",
1869 mmc_mode_name(mwt->mode),
1870 bus_width(ecbw->cap),
1871 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1872 old_voltage = mmc->signal_voltage;
1873 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1874 MMC_ALL_SIGNAL_VOLTAGE);
1878 /* configure the bus width (card + host) */
1879 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1881 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1884 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1886 /* configure the bus speed (card) */
1887 err = mmc_set_card_speed(mmc, mwt->mode);
1892 * configure the bus width AND the ddr mode (card)
1893 * The host side will be taken care of in the next step
1895 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1896 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1898 ecbw->ext_csd_bits);
1903 /* configure the bus mode (host) */
1904 mmc_select_mode(mmc, mwt->mode);
1905 mmc_set_clock(mmc, mmc->tran_speed, false);
1906 #ifdef MMC_SUPPORTS_TUNING
1908 /* execute tuning if needed */
1910 err = mmc_execute_tuning(mmc, mwt->tuning);
1912 pr_debug("tuning failed\n");
1918 /* do a transfer to check the configuration */
1919 err = mmc_read_and_compare_ext_csd(mmc);
1923 mmc_set_signal_voltage(mmc, old_voltage);
1924 /* if an error occured, revert to a safer bus mode */
1925 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1926 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1927 mmc_select_mode(mmc, MMC_LEGACY);
1928 mmc_set_bus_width(mmc, 1);
1932 pr_err("unable to select a mode\n");
1937 static int mmc_startup_v4(struct mmc *mmc)
1941 bool has_parts = false;
1942 bool part_completed;
1943 static const u32 mmc_versions[] = {
1955 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1957 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1960 /* check ext_csd version and capacity */
1961 err = mmc_send_ext_csd(mmc, ext_csd);
1965 /* store the ext csd for future reference */
1967 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1970 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1972 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
1975 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1977 if (mmc->version >= MMC_VERSION_4_2) {
1979 * According to the JEDEC Standard, the value of
1980 * ext_csd's capacity is valid if the value is more
1983 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1984 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1985 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1986 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1987 capacity *= MMC_MAX_BLOCK_LEN;
1988 if ((capacity >> 20) > 2 * 1024)
1989 mmc->capacity_user = capacity;
1992 /* The partition data may be non-zero but it is only
1993 * effective if PARTITION_SETTING_COMPLETED is set in
1994 * EXT_CSD, so ignore any data if this bit is not set,
1995 * except for enabling the high-capacity group size
1996 * definition (see below).
1998 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1999 EXT_CSD_PARTITION_SETTING_COMPLETED);
2001 /* store the partition info of emmc */
2002 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2003 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2004 ext_csd[EXT_CSD_BOOT_MULT])
2005 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2006 if (part_completed &&
2007 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2008 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2010 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2012 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2014 for (i = 0; i < 4; i++) {
2015 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2016 uint mult = (ext_csd[idx + 2] << 16) +
2017 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2020 if (!part_completed)
2022 mmc->capacity_gp[i] = mult;
2023 mmc->capacity_gp[i] *=
2024 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2025 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2026 mmc->capacity_gp[i] <<= 19;
2029 #ifndef CONFIG_SPL_BUILD
2030 if (part_completed) {
2031 mmc->enh_user_size =
2032 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2033 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2034 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2035 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2036 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2037 mmc->enh_user_size <<= 19;
2038 mmc->enh_user_start =
2039 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2040 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2041 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2042 ext_csd[EXT_CSD_ENH_START_ADDR];
2043 if (mmc->high_capacity)
2044 mmc->enh_user_start <<= 9;
2049 * Host needs to enable ERASE_GRP_DEF bit if device is
2050 * partitioned. This bit will be lost every time after a reset
2051 * or power off. This will affect erase size.
2055 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2056 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2059 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2060 EXT_CSD_ERASE_GROUP_DEF, 1);
2065 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2068 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2069 #if CONFIG_IS_ENABLED(MMC_WRITE)
2070 /* Read out group size from ext_csd */
2071 mmc->erase_grp_size =
2072 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2075 * if high capacity and partition setting completed
2076 * SEC_COUNT is valid even if it is smaller than 2 GiB
2077 * JEDEC Standard JESD84-B45, 6.2.4
2079 if (mmc->high_capacity && part_completed) {
2080 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2081 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2082 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2083 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2084 capacity *= MMC_MAX_BLOCK_LEN;
2085 mmc->capacity_user = capacity;
2088 #if CONFIG_IS_ENABLED(MMC_WRITE)
2090 /* Calculate the group size from the csd value. */
2091 int erase_gsz, erase_gmul;
2093 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2094 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2095 mmc->erase_grp_size = (erase_gsz + 1)
2099 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2100 mmc->hc_wp_grp_size = 1024
2101 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2102 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2105 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2111 mmc->ext_csd = NULL;
2116 static int mmc_startup(struct mmc *mmc)
2122 struct blk_desc *bdesc;
2124 #ifdef CONFIG_MMC_SPI_CRC_ON
2125 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2126 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2127 cmd.resp_type = MMC_RSP_R1;
2129 err = mmc_send_cmd(mmc, &cmd, NULL);
2135 /* Put the Card in Identify Mode */
2136 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2137 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2138 cmd.resp_type = MMC_RSP_R2;
2141 err = mmc_send_cmd(mmc, &cmd, NULL);
2143 #ifdef CONFIG_MMC_QUIRKS
2144 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2147 * It has been seen that SEND_CID may fail on the first
2148 * attempt, let's try a few more time
2151 err = mmc_send_cmd(mmc, &cmd, NULL);
2154 } while (retries--);
2161 memcpy(mmc->cid, cmd.response, 16);
2164 * For MMC cards, set the Relative Address.
2165 * For SD cards, get the Relatvie Address.
2166 * This also puts the cards into Standby State
2168 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2169 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2170 cmd.cmdarg = mmc->rca << 16;
2171 cmd.resp_type = MMC_RSP_R6;
2173 err = mmc_send_cmd(mmc, &cmd, NULL);
2179 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2182 /* Get the Card-Specific Data */
2183 cmd.cmdidx = MMC_CMD_SEND_CSD;
2184 cmd.resp_type = MMC_RSP_R2;
2185 cmd.cmdarg = mmc->rca << 16;
2187 err = mmc_send_cmd(mmc, &cmd, NULL);
2192 mmc->csd[0] = cmd.response[0];
2193 mmc->csd[1] = cmd.response[1];
2194 mmc->csd[2] = cmd.response[2];
2195 mmc->csd[3] = cmd.response[3];
2197 if (mmc->version == MMC_VERSION_UNKNOWN) {
2198 int version = (cmd.response[0] >> 26) & 0xf;
2202 mmc->version = MMC_VERSION_1_2;
2205 mmc->version = MMC_VERSION_1_4;
2208 mmc->version = MMC_VERSION_2_2;
2211 mmc->version = MMC_VERSION_3;
2214 mmc->version = MMC_VERSION_4;
2217 mmc->version = MMC_VERSION_1_2;
2222 /* divide frequency by 10, since the mults are 10x bigger */
2223 freq = fbase[(cmd.response[0] & 0x7)];
2224 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2226 mmc->legacy_speed = freq * mult;
2227 mmc_select_mode(mmc, MMC_LEGACY);
2229 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2230 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2231 #if CONFIG_IS_ENABLED(MMC_WRITE)
2234 mmc->write_bl_len = mmc->read_bl_len;
2236 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2239 if (mmc->high_capacity) {
2240 csize = (mmc->csd[1] & 0x3f) << 16
2241 | (mmc->csd[2] & 0xffff0000) >> 16;
2244 csize = (mmc->csd[1] & 0x3ff) << 2
2245 | (mmc->csd[2] & 0xc0000000) >> 30;
2246 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2249 mmc->capacity_user = (csize + 1) << (cmult + 2);
2250 mmc->capacity_user *= mmc->read_bl_len;
2251 mmc->capacity_boot = 0;
2252 mmc->capacity_rpmb = 0;
2253 for (i = 0; i < 4; i++)
2254 mmc->capacity_gp[i] = 0;
2256 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2257 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2259 #if CONFIG_IS_ENABLED(MMC_WRITE)
2260 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2261 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2264 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2265 cmd.cmdidx = MMC_CMD_SET_DSR;
2266 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2267 cmd.resp_type = MMC_RSP_NONE;
2268 if (mmc_send_cmd(mmc, &cmd, NULL))
2269 pr_warn("MMC: SET_DSR failed\n");
2272 /* Select the card, and put it into Transfer Mode */
2273 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2274 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2275 cmd.resp_type = MMC_RSP_R1;
2276 cmd.cmdarg = mmc->rca << 16;
2277 err = mmc_send_cmd(mmc, &cmd, NULL);
2284 * For SD, its erase group is always one sector
2286 #if CONFIG_IS_ENABLED(MMC_WRITE)
2287 mmc->erase_grp_size = 1;
2289 mmc->part_config = MMCPART_NOAVAILABLE;
2291 err = mmc_startup_v4(mmc);
2295 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2300 err = sd_get_capabilities(mmc);
2303 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2305 err = mmc_get_capabilities(mmc);
2308 mmc_select_mode_and_width(mmc, mmc->card_caps);
2314 mmc->best_mode = mmc->selected_mode;
2316 /* Fix the block length for DDR mode */
2317 if (mmc->ddr_mode) {
2318 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2319 #if CONFIG_IS_ENABLED(MMC_WRITE)
2320 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2324 /* fill in device description */
2325 bdesc = mmc_get_blk_desc(mmc);
2329 bdesc->blksz = mmc->read_bl_len;
2330 bdesc->log2blksz = LOG2(bdesc->blksz);
2331 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2332 #if !defined(CONFIG_SPL_BUILD) || \
2333 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2334 !defined(CONFIG_USE_TINY_PRINTF))
2335 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2336 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2337 (mmc->cid[3] >> 16) & 0xffff);
2338 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2339 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2340 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2341 (mmc->cid[2] >> 24) & 0xff);
2342 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2343 (mmc->cid[2] >> 16) & 0xf);
2345 bdesc->vendor[0] = 0;
2346 bdesc->product[0] = 0;
2347 bdesc->revision[0] = 0;
2349 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2356 static int mmc_send_if_cond(struct mmc *mmc)
2361 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2362 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2363 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2364 cmd.resp_type = MMC_RSP_R7;
2366 err = mmc_send_cmd(mmc, &cmd, NULL);
2371 if ((cmd.response[0] & 0xff) != 0xaa)
2374 mmc->version = SD_VERSION_2;
2379 #if !CONFIG_IS_ENABLED(DM_MMC)
2380 /* board-specific MMC power initializations. */
2381 __weak void board_mmc_power_init(void)
2386 static int mmc_power_init(struct mmc *mmc)
2388 #if CONFIG_IS_ENABLED(DM_MMC)
2389 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2392 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2395 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2397 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2398 &mmc->vqmmc_supply);
2400 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2402 #else /* !CONFIG_DM_MMC */
2404 * Driver model should use a regulator, as above, rather than calling
2405 * out to board code.
2407 board_mmc_power_init();
2413 * put the host in the initial state:
2414 * - turn on Vdd (card power supply)
2415 * - configure the bus width and clock to minimal values
2417 static void mmc_set_initial_state(struct mmc *mmc)
2421 /* First try to set 3.3V. If it fails set to 1.8V */
2422 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2424 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2426 pr_warn("mmc: failed to set signal voltage\n");
2428 mmc_select_mode(mmc, MMC_LEGACY);
2429 mmc_set_bus_width(mmc, 1);
2430 mmc_set_clock(mmc, 0, false);
2433 static int mmc_power_on(struct mmc *mmc)
2435 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2436 if (mmc->vmmc_supply) {
2437 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2440 puts("Error enabling VMMC supply\n");
2448 static int mmc_power_off(struct mmc *mmc)
2450 mmc_set_clock(mmc, 0, true);
2451 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2452 if (mmc->vmmc_supply) {
2453 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2456 pr_debug("Error disabling VMMC supply\n");
2464 static int mmc_power_cycle(struct mmc *mmc)
2468 ret = mmc_power_off(mmc);
2472 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2473 * to be on the safer side.
2476 return mmc_power_on(mmc);
2479 int mmc_start_init(struct mmc *mmc)
2482 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2486 * all hosts are capable of 1 bit bus-width and able to use the legacy
2489 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2490 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2492 #if !defined(CONFIG_MMC_BROKEN_CD)
2493 /* we pretend there's no card when init is NULL */
2494 no_card = mmc_getcd(mmc) == 0;
2498 #if !CONFIG_IS_ENABLED(DM_MMC)
2499 no_card = no_card || (mmc->cfg->ops->init == NULL);
2503 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2504 pr_err("MMC: no card present\n");
2512 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2513 mmc_adapter_card_type_ident();
2515 err = mmc_power_init(mmc);
2519 #ifdef CONFIG_MMC_QUIRKS
2520 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2521 MMC_QUIRK_RETRY_SEND_CID;
2524 err = mmc_power_cycle(mmc);
2527 * if power cycling is not supported, we should not try
2528 * to use the UHS modes, because we wouldn't be able to
2529 * recover from an error during the UHS initialization.
2531 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2533 mmc->host_caps &= ~UHS_CAPS;
2534 err = mmc_power_on(mmc);
2539 #if CONFIG_IS_ENABLED(DM_MMC)
2540 /* The device has already been probed ready for use */
2542 /* made sure it's not NULL earlier */
2543 err = mmc->cfg->ops->init(mmc);
2550 mmc_set_initial_state(mmc);
2551 mmc_send_init_stream(mmc);
2553 /* Reset the Card */
2554 err = mmc_go_idle(mmc);
2559 /* The internal partition reset to user partition(0) at every CMD0*/
2560 mmc_get_blk_desc(mmc)->hwpart = 0;
2562 /* Test for SD version 2 */
2563 err = mmc_send_if_cond(mmc);
2565 /* Now try to get the SD card's operating condition */
2566 err = sd_send_op_cond(mmc, uhs_en);
2567 if (err && uhs_en) {
2569 mmc_power_cycle(mmc);
2573 /* If the command timed out, we check for an MMC card */
2574 if (err == -ETIMEDOUT) {
2575 err = mmc_send_op_cond(mmc);
2578 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2579 pr_err("Card did not respond to voltage select!\n");
2586 mmc->init_in_progress = 1;
2591 static int mmc_complete_init(struct mmc *mmc)
2595 mmc->init_in_progress = 0;
2596 if (mmc->op_cond_pending)
2597 err = mmc_complete_op_cond(mmc);
2600 err = mmc_startup(mmc);
2608 int mmc_init(struct mmc *mmc)
2611 __maybe_unused unsigned start;
2612 #if CONFIG_IS_ENABLED(DM_MMC)
2613 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2620 start = get_timer(0);
2622 if (!mmc->init_in_progress)
2623 err = mmc_start_init(mmc);
2626 err = mmc_complete_init(mmc);
2628 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2633 int mmc_set_dsr(struct mmc *mmc, u16 val)
2639 /* CPU-specific MMC initializations */
2640 __weak int cpu_mmc_init(bd_t *bis)
2645 /* board-specific MMC initializations. */
2646 __weak int board_mmc_init(bd_t *bis)
2651 void mmc_set_preinit(struct mmc *mmc, int preinit)
2653 mmc->preinit = preinit;
2656 #if CONFIG_IS_ENABLED(DM_MMC)
2657 static int mmc_probe(bd_t *bis)
2661 struct udevice *dev;
2663 ret = uclass_get(UCLASS_MMC, &uc);
2668 * Try to add them in sequence order. Really with driver model we
2669 * should allow holes, but the current MMC list does not allow that.
2670 * So if we request 0, 1, 3 we will get 0, 1, 2.
2672 for (i = 0; ; i++) {
2673 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2677 uclass_foreach_dev(dev, uc) {
2678 ret = device_probe(dev);
2680 pr_err("%s - probe failed: %d\n", dev->name, ret);
2686 static int mmc_probe(bd_t *bis)
2688 if (board_mmc_init(bis) < 0)
2695 int mmc_initialize(bd_t *bis)
2697 static int initialized = 0;
2699 if (initialized) /* Avoid initializing mmc multiple times */
2703 #if !CONFIG_IS_ENABLED(BLK)
2704 #if !CONFIG_IS_ENABLED(MMC_TINY)
2708 ret = mmc_probe(bis);
2712 #ifndef CONFIG_SPL_BUILD
2713 print_mmc_devices(',');
2720 #ifdef CONFIG_CMD_BKOPS_ENABLE
2721 int mmc_set_bkops_enable(struct mmc *mmc)
2724 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2726 err = mmc_send_ext_csd(mmc, ext_csd);
2728 puts("Could not get ext_csd register values\n");
2732 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2733 puts("Background operations not supported on device\n");
2734 return -EMEDIUMTYPE;
2737 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2738 puts("Background operations already enabled\n");
2742 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2744 puts("Failed to enable manual background operations\n");
2748 puts("Enabled manual background operations\n");