2 * Copyright 2008, Freescale Semiconductor, Inc
5 * Based vaguely on the Linux code
7 * SPDX-License-Identifier: GPL-2.0+
14 #include <dm/device-internal.h>
18 #include <power/regulator.h>
21 #include <linux/list.h>
23 #include "mmc_private.h"
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
44 void mmc_do_preinit(void)
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
56 return &mmc->block_dev;
60 #if !CONFIG_IS_ENABLED(DM_MMC)
62 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
67 __weak int board_mmc_getwp(struct mmc *mmc)
72 int mmc_getwp(struct mmc *mmc)
76 wp = board_mmc_getwp(mmc);
79 if (mmc->cfg->ops->getwp)
80 wp = mmc->cfg->ops->getwp(mmc);
88 __weak int board_mmc_getcd(struct mmc *mmc)
94 #ifdef CONFIG_MMC_TRACE
95 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
97 printf("CMD_SEND:%d\n", cmd->cmdidx);
98 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
101 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
107 printf("\t\tRET\t\t\t %d\n", ret);
109 switch (cmd->resp_type) {
111 printf("\t\tMMC_RSP_NONE\n");
114 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
118 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
122 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
124 printf("\t\t \t\t 0x%08X \n",
126 printf("\t\t \t\t 0x%08X \n",
128 printf("\t\t \t\t 0x%08X \n",
131 printf("\t\t\t\t\tDUMPING DATA\n");
132 for (i = 0; i < 4; i++) {
134 printf("\t\t\t\t\t%03d - ", i*4);
135 ptr = (u8 *)&cmd->response[i];
137 for (j = 0; j < 4; j++)
138 printf("%02X ", *ptr--);
143 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
147 printf("\t\tERROR MMC rsp not supported\n");
153 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
157 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
158 printf("CURR STATE:%d\n", status);
162 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
163 const char *mmc_mode_name(enum bus_mode mode)
165 static const char *const names[] = {
166 [MMC_LEGACY] = "MMC legacy",
167 [SD_LEGACY] = "SD Legacy",
168 [MMC_HS] = "MMC High Speed (26MHz)",
169 [SD_HS] = "SD High Speed (50MHz)",
170 [UHS_SDR12] = "UHS SDR12 (25MHz)",
171 [UHS_SDR25] = "UHS SDR25 (50MHz)",
172 [UHS_SDR50] = "UHS SDR50 (100MHz)",
173 [UHS_SDR104] = "UHS SDR104 (208MHz)",
174 [UHS_DDR50] = "UHS DDR50 (50MHz)",
175 [MMC_HS_52] = "MMC High Speed (52MHz)",
176 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
177 [MMC_HS_200] = "HS200 (200MHz)",
180 if (mode >= MMC_MODES_END)
181 return "Unknown mode";
187 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
189 static const int freqs[] = {
190 [SD_LEGACY] = 25000000,
193 [UHS_SDR12] = 25000000,
194 [UHS_SDR25] = 50000000,
195 [UHS_SDR50] = 100000000,
196 [UHS_SDR104] = 208000000,
197 [UHS_DDR50] = 50000000,
198 [MMC_HS_52] = 52000000,
199 [MMC_DDR_52] = 52000000,
200 [MMC_HS_200] = 200000000,
203 if (mode == MMC_LEGACY)
204 return mmc->legacy_speed;
205 else if (mode >= MMC_MODES_END)
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
213 mmc->selected_mode = mode;
214 mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 mmc->tran_speed / 1000000);
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
226 mmmc_trace_before_send(mmc, cmd);
227 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 mmmc_trace_after_send(mmc, cmd, ret);
234 int mmc_send_status(struct mmc *mmc, int timeout)
237 int err, retries = 5;
239 cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 cmd.resp_type = MMC_RSP_R1;
241 if (!mmc_host_is_spi(mmc))
242 cmd.cmdarg = mmc->rca << 16;
245 err = mmc_send_cmd(mmc, &cmd, NULL);
247 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
252 if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 printf("Status Error: 0x%08X\n",
259 } else if (--retries < 0)
268 mmc_trace_state(mmc, &cmd);
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 printf("Timeout waiting card ready\n");
279 int mmc_set_blocklen(struct mmc *mmc, int len)
287 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
288 cmd.resp_type = MMC_RSP_R1;
291 err = mmc_send_cmd(mmc, &cmd, NULL);
293 #ifdef CONFIG_MMC_QUIRKS
294 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
297 * It has been seen that SET_BLOCKLEN may fail on the first
298 * attempt, let's try a few more time
301 err = mmc_send_cmd(mmc, &cmd, NULL);
311 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
315 struct mmc_data data;
318 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
320 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
322 if (mmc->high_capacity)
325 cmd.cmdarg = start * mmc->read_bl_len;
327 cmd.resp_type = MMC_RSP_R1;
330 data.blocks = blkcnt;
331 data.blocksize = mmc->read_bl_len;
332 data.flags = MMC_DATA_READ;
334 if (mmc_send_cmd(mmc, &cmd, &data))
338 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
340 cmd.resp_type = MMC_RSP_R1b;
341 if (mmc_send_cmd(mmc, &cmd, NULL)) {
342 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
343 printf("mmc fail to send stop cmd\n");
352 #if CONFIG_IS_ENABLED(BLK)
353 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
355 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
359 #if CONFIG_IS_ENABLED(BLK)
360 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
362 int dev_num = block_dev->devnum;
364 lbaint_t cur, blocks_todo = blkcnt;
369 struct mmc *mmc = find_mmc_device(dev_num);
373 if (CONFIG_IS_ENABLED(MMC_TINY))
374 err = mmc_switch_part(mmc, block_dev->hwpart);
376 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
381 if ((start + blkcnt) > block_dev->lba) {
382 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
383 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
384 start + blkcnt, block_dev->lba);
389 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
390 debug("%s: Failed to set blocklen\n", __func__);
395 cur = (blocks_todo > mmc->cfg->b_max) ?
396 mmc->cfg->b_max : blocks_todo;
397 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
398 debug("%s: Failed to read blocks\n", __func__);
403 dst += cur * mmc->read_bl_len;
404 } while (blocks_todo > 0);
409 static int mmc_go_idle(struct mmc *mmc)
416 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
418 cmd.resp_type = MMC_RSP_NONE;
420 err = mmc_send_cmd(mmc, &cmd, NULL);
430 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
436 * Send CMD11 only if the request is to switch the card to
439 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
440 return mmc_set_signal_voltage(mmc, signal_voltage);
442 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
444 cmd.resp_type = MMC_RSP_R1;
446 err = mmc_send_cmd(mmc, &cmd, NULL);
450 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
454 * The card should drive cmd and dat[0:3] low immediately
455 * after the response of cmd11, but wait 100 us to be sure
457 err = mmc_wait_dat0(mmc, 0, 100);
464 * During a signal voltage level switch, the clock must be gated
465 * for 5 ms according to the SD spec
467 mmc_set_clock(mmc, mmc->clock, true);
469 err = mmc_set_signal_voltage(mmc, signal_voltage);
473 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
475 mmc_set_clock(mmc, mmc->clock, false);
478 * Failure to switch is indicated by the card holding
479 * dat[0:3] low. Wait for at least 1 ms according to spec
481 err = mmc_wait_dat0(mmc, 1, 1000);
490 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
497 cmd.cmdidx = MMC_CMD_APP_CMD;
498 cmd.resp_type = MMC_RSP_R1;
501 err = mmc_send_cmd(mmc, &cmd, NULL);
506 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
507 cmd.resp_type = MMC_RSP_R3;
510 * Most cards do not answer if some reserved bits
511 * in the ocr are set. However, Some controller
512 * can set bit 7 (reserved for low voltages), but
513 * how to manage low voltages SD card is not yet
516 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
517 (mmc->cfg->voltages & 0xff8000);
519 if (mmc->version == SD_VERSION_2)
520 cmd.cmdarg |= OCR_HCS;
523 cmd.cmdarg |= OCR_S18R;
525 err = mmc_send_cmd(mmc, &cmd, NULL);
530 if (cmd.response[0] & OCR_BUSY)
539 if (mmc->version != SD_VERSION_2)
540 mmc->version = SD_VERSION_1_0;
542 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
543 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
544 cmd.resp_type = MMC_RSP_R3;
547 err = mmc_send_cmd(mmc, &cmd, NULL);
553 mmc->ocr = cmd.response[0];
555 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
557 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
562 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
568 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
573 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
574 cmd.resp_type = MMC_RSP_R3;
576 if (use_arg && !mmc_host_is_spi(mmc))
577 cmd.cmdarg = OCR_HCS |
578 (mmc->cfg->voltages &
579 (mmc->ocr & OCR_VOLTAGE_MASK)) |
580 (mmc->ocr & OCR_ACCESS_MODE);
582 err = mmc_send_cmd(mmc, &cmd, NULL);
585 mmc->ocr = cmd.response[0];
589 static int mmc_send_op_cond(struct mmc *mmc)
593 /* Some cards seem to need this */
596 /* Asking to the card its capabilities */
597 for (i = 0; i < 2; i++) {
598 err = mmc_send_op_cond_iter(mmc, i != 0);
602 /* exit if not busy (flag seems to be inverted) */
603 if (mmc->ocr & OCR_BUSY)
606 mmc->op_cond_pending = 1;
610 static int mmc_complete_op_cond(struct mmc *mmc)
617 mmc->op_cond_pending = 0;
618 if (!(mmc->ocr & OCR_BUSY)) {
619 /* Some cards seem to need this */
622 start = get_timer(0);
624 err = mmc_send_op_cond_iter(mmc, 1);
627 if (mmc->ocr & OCR_BUSY)
629 if (get_timer(start) > timeout)
635 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
636 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
637 cmd.resp_type = MMC_RSP_R3;
640 err = mmc_send_cmd(mmc, &cmd, NULL);
645 mmc->ocr = cmd.response[0];
648 mmc->version = MMC_VERSION_UNKNOWN;
650 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
657 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
660 struct mmc_data data;
663 /* Get the Card Status Register */
664 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
665 cmd.resp_type = MMC_RSP_R1;
668 data.dest = (char *)ext_csd;
670 data.blocksize = MMC_MAX_BLOCK_LEN;
671 data.flags = MMC_DATA_READ;
673 err = mmc_send_cmd(mmc, &cmd, &data);
678 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
685 cmd.cmdidx = MMC_CMD_SWITCH;
686 cmd.resp_type = MMC_RSP_R1b;
687 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
691 while (retries > 0) {
692 ret = mmc_send_cmd(mmc, &cmd, NULL);
694 /* Waiting for the ready status */
696 ret = mmc_send_status(mmc, timeout);
707 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
712 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
718 speed_bits = EXT_CSD_TIMING_HS;
721 speed_bits = EXT_CSD_TIMING_HS200;
724 speed_bits = EXT_CSD_TIMING_LEGACY;
729 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
734 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
735 /* Now check to see that it worked */
736 err = mmc_send_ext_csd(mmc, test_csd);
740 /* No high-speed support */
741 if (!test_csd[EXT_CSD_HS_TIMING])
748 static int mmc_get_capabilities(struct mmc *mmc)
750 u8 *ext_csd = mmc->ext_csd;
753 mmc->card_caps = MMC_MODE_1BIT;
755 if (mmc_host_is_spi(mmc))
758 /* Only version 4 supports high-speed */
759 if (mmc->version < MMC_VERSION_4)
763 printf("No ext_csd found!\n"); /* this should enver happen */
767 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
769 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
770 mmc->cardtype = cardtype;
772 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
773 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
774 mmc->card_caps |= MMC_MODE_HS200;
776 if (cardtype & EXT_CSD_CARD_TYPE_52) {
777 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
778 mmc->card_caps |= MMC_MODE_DDR_52MHz;
779 mmc->card_caps |= MMC_MODE_HS_52MHz;
781 if (cardtype & EXT_CSD_CARD_TYPE_26)
782 mmc->card_caps |= MMC_MODE_HS;
787 static int mmc_set_capacity(struct mmc *mmc, int part_num)
791 mmc->capacity = mmc->capacity_user;
795 mmc->capacity = mmc->capacity_boot;
798 mmc->capacity = mmc->capacity_rpmb;
804 mmc->capacity = mmc->capacity_gp[part_num - 4];
810 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
815 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
820 if (part_num & PART_ACCESS_MASK)
821 forbidden = MMC_CAP(MMC_HS_200);
823 if (MMC_CAP(mmc->selected_mode) & forbidden) {
824 debug("selected mode (%s) is forbidden for part %d\n",
825 mmc_mode_name(mmc->selected_mode), part_num);
827 } else if (mmc->selected_mode != mmc->best_mode) {
828 debug("selected mode is not optimal\n");
833 return mmc_select_mode_and_width(mmc,
834 mmc->card_caps & ~forbidden);
839 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
843 ret = mmc_boot_part_access_chk(mmc, part_num);
847 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
848 (mmc->part_config & ~PART_ACCESS_MASK)
849 | (part_num & PART_ACCESS_MASK));
852 * Set the capacity if the switch succeeded or was intended
853 * to return to representing the raw device.
855 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
856 ret = mmc_set_capacity(mmc, part_num);
857 mmc_get_blk_desc(mmc)->hwpart = part_num;
863 int mmc_hwpart_config(struct mmc *mmc,
864 const struct mmc_hwpart_conf *conf,
865 enum mmc_hwpart_conf_mode mode)
871 u32 max_enh_size_mult;
872 u32 tot_enh_size_mult = 0;
875 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
877 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
880 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
881 printf("eMMC >= 4.4 required for enhanced user data area\n");
885 if (!(mmc->part_support & PART_SUPPORT)) {
886 printf("Card does not support partitioning\n");
890 if (!mmc->hc_wp_grp_size) {
891 printf("Card does not define HC WP group size\n");
895 /* check partition alignment and total enhanced size */
896 if (conf->user.enh_size) {
897 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
898 conf->user.enh_start % mmc->hc_wp_grp_size) {
899 printf("User data enhanced area not HC WP group "
903 part_attrs |= EXT_CSD_ENH_USR;
904 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
905 if (mmc->high_capacity) {
906 enh_start_addr = conf->user.enh_start;
908 enh_start_addr = (conf->user.enh_start << 9);
914 tot_enh_size_mult += enh_size_mult;
916 for (pidx = 0; pidx < 4; pidx++) {
917 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
918 printf("GP%i partition not HC WP group size "
919 "aligned\n", pidx+1);
922 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
923 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
924 part_attrs |= EXT_CSD_ENH_GP(pidx);
925 tot_enh_size_mult += gp_size_mult[pidx];
929 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
930 printf("Card does not support enhanced attribute\n");
934 err = mmc_send_ext_csd(mmc, ext_csd);
939 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
940 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
941 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
942 if (tot_enh_size_mult > max_enh_size_mult) {
943 printf("Total enhanced size exceeds maximum (%u > %u)\n",
944 tot_enh_size_mult, max_enh_size_mult);
948 /* The default value of EXT_CSD_WR_REL_SET is device
949 * dependent, the values can only be changed if the
950 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
951 * changed only once and before partitioning is completed. */
952 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
953 if (conf->user.wr_rel_change) {
954 if (conf->user.wr_rel_set)
955 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
957 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
959 for (pidx = 0; pidx < 4; pidx++) {
960 if (conf->gp_part[pidx].wr_rel_change) {
961 if (conf->gp_part[pidx].wr_rel_set)
962 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
964 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
968 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
969 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
970 puts("Card does not support host controlled partition write "
971 "reliability settings\n");
975 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
976 EXT_CSD_PARTITION_SETTING_COMPLETED) {
977 printf("Card already partitioned\n");
981 if (mode == MMC_HWPART_CONF_CHECK)
984 /* Partitioning requires high-capacity size definitions */
985 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
986 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
987 EXT_CSD_ERASE_GROUP_DEF, 1);
992 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
994 /* update erase group size to be high-capacity */
995 mmc->erase_grp_size =
996 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1000 /* all OK, write the configuration */
1001 for (i = 0; i < 4; i++) {
1002 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1003 EXT_CSD_ENH_START_ADDR+i,
1004 (enh_start_addr >> (i*8)) & 0xFF);
1008 for (i = 0; i < 3; i++) {
1009 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1010 EXT_CSD_ENH_SIZE_MULT+i,
1011 (enh_size_mult >> (i*8)) & 0xFF);
1015 for (pidx = 0; pidx < 4; pidx++) {
1016 for (i = 0; i < 3; i++) {
1017 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1018 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1019 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1024 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1025 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1029 if (mode == MMC_HWPART_CONF_SET)
1032 /* The WR_REL_SET is a write-once register but shall be
1033 * written before setting PART_SETTING_COMPLETED. As it is
1034 * write-once we can only write it when completing the
1036 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1037 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1038 EXT_CSD_WR_REL_SET, wr_rel_set);
1043 /* Setting PART_SETTING_COMPLETED confirms the partition
1044 * configuration but it only becomes effective after power
1045 * cycle, so we do not adjust the partition related settings
1046 * in the mmc struct. */
1048 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1049 EXT_CSD_PARTITION_SETTING,
1050 EXT_CSD_PARTITION_SETTING_COMPLETED);
1057 #if !CONFIG_IS_ENABLED(DM_MMC)
1058 int mmc_getcd(struct mmc *mmc)
1062 cd = board_mmc_getcd(mmc);
1065 if (mmc->cfg->ops->getcd)
1066 cd = mmc->cfg->ops->getcd(mmc);
1075 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1078 struct mmc_data data;
1080 /* Switch the frequency */
1081 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1082 cmd.resp_type = MMC_RSP_R1;
1083 cmd.cmdarg = (mode << 31) | 0xffffff;
1084 cmd.cmdarg &= ~(0xf << (group * 4));
1085 cmd.cmdarg |= value << (group * 4);
1087 data.dest = (char *)resp;
1088 data.blocksize = 64;
1090 data.flags = MMC_DATA_READ;
1092 return mmc_send_cmd(mmc, &cmd, &data);
1096 static int sd_get_capabilities(struct mmc *mmc)
1100 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1101 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1102 struct mmc_data data;
1106 mmc->card_caps = MMC_MODE_1BIT;
1108 if (mmc_host_is_spi(mmc))
1111 /* Read the SCR to find out if this card supports higher speeds */
1112 cmd.cmdidx = MMC_CMD_APP_CMD;
1113 cmd.resp_type = MMC_RSP_R1;
1114 cmd.cmdarg = mmc->rca << 16;
1116 err = mmc_send_cmd(mmc, &cmd, NULL);
1121 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1122 cmd.resp_type = MMC_RSP_R1;
1128 data.dest = (char *)scr;
1131 data.flags = MMC_DATA_READ;
1133 err = mmc_send_cmd(mmc, &cmd, &data);
1142 mmc->scr[0] = __be32_to_cpu(scr[0]);
1143 mmc->scr[1] = __be32_to_cpu(scr[1]);
1145 switch ((mmc->scr[0] >> 24) & 0xf) {
1147 mmc->version = SD_VERSION_1_0;
1150 mmc->version = SD_VERSION_1_10;
1153 mmc->version = SD_VERSION_2;
1154 if ((mmc->scr[0] >> 15) & 0x1)
1155 mmc->version = SD_VERSION_3;
1158 mmc->version = SD_VERSION_1_0;
1162 if (mmc->scr[0] & SD_DATA_4BIT)
1163 mmc->card_caps |= MMC_MODE_4BIT;
1165 /* Version 1.0 doesn't support switching */
1166 if (mmc->version == SD_VERSION_1_0)
1171 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1172 (u8 *)switch_status);
1177 /* The high-speed function is busy. Try again */
1178 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1182 /* If high-speed isn't supported, we return */
1183 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1184 mmc->card_caps |= MMC_CAP(SD_HS);
1186 /* Version before 3.0 don't support UHS modes */
1187 if (mmc->version < SD_VERSION_3)
1190 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1191 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1192 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1193 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1194 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1195 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1196 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1197 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1198 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1199 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1200 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1205 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1209 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1215 speed = UHS_SDR12_BUS_SPEED;
1219 speed = UHS_SDR25_BUS_SPEED;
1222 speed = UHS_SDR50_BUS_SPEED;
1225 speed = UHS_DDR50_BUS_SPEED;
1228 speed = UHS_SDR104_BUS_SPEED;
1234 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1238 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1244 int sd_select_bus_width(struct mmc *mmc, int w)
1249 if ((w != 4) && (w != 1))
1252 cmd.cmdidx = MMC_CMD_APP_CMD;
1253 cmd.resp_type = MMC_RSP_R1;
1254 cmd.cmdarg = mmc->rca << 16;
1256 err = mmc_send_cmd(mmc, &cmd, NULL);
1260 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1261 cmd.resp_type = MMC_RSP_R1;
1266 err = mmc_send_cmd(mmc, &cmd, NULL);
1273 static int sd_read_ssr(struct mmc *mmc)
1277 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1278 struct mmc_data data;
1280 unsigned int au, eo, et, es;
1282 cmd.cmdidx = MMC_CMD_APP_CMD;
1283 cmd.resp_type = MMC_RSP_R1;
1284 cmd.cmdarg = mmc->rca << 16;
1286 err = mmc_send_cmd(mmc, &cmd, NULL);
1290 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1291 cmd.resp_type = MMC_RSP_R1;
1295 data.dest = (char *)ssr;
1296 data.blocksize = 64;
1298 data.flags = MMC_DATA_READ;
1300 err = mmc_send_cmd(mmc, &cmd, &data);
1308 for (i = 0; i < 16; i++)
1309 ssr[i] = be32_to_cpu(ssr[i]);
1311 au = (ssr[2] >> 12) & 0xF;
1312 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1313 mmc->ssr.au = sd_au_size[au];
1314 es = (ssr[3] >> 24) & 0xFF;
1315 es |= (ssr[2] & 0xFF) << 8;
1316 et = (ssr[3] >> 18) & 0x3F;
1318 eo = (ssr[3] >> 16) & 0x3;
1319 mmc->ssr.erase_timeout = (et * 1000) / es;
1320 mmc->ssr.erase_offset = eo * 1000;
1323 debug("Invalid Allocation Unit Size.\n");
1329 /* frequency bases */
1330 /* divided by 10 to be nice to platforms without floating point */
1331 static const int fbase[] = {
1338 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1339 * to platforms without floating point.
1341 static const u8 multipliers[] = {
1360 static inline int bus_width(uint cap)
1362 if (cap == MMC_MODE_8BIT)
1364 if (cap == MMC_MODE_4BIT)
1366 if (cap == MMC_MODE_1BIT)
1368 printf("invalid bus witdh capability 0x%x\n", cap);
1372 #if !CONFIG_IS_ENABLED(DM_MMC)
1373 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1378 static void mmc_send_init_stream(struct mmc *mmc)
1382 static int mmc_set_ios(struct mmc *mmc)
1386 if (mmc->cfg->ops->set_ios)
1387 ret = mmc->cfg->ops->set_ios(mmc);
1393 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1395 if (clock > mmc->cfg->f_max)
1396 clock = mmc->cfg->f_max;
1398 if (clock < mmc->cfg->f_min)
1399 clock = mmc->cfg->f_min;
1402 mmc->clk_disable = disable;
1404 return mmc_set_ios(mmc);
1407 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1409 mmc->bus_width = width;
1411 return mmc_set_ios(mmc);
1414 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1416 * helper function to display the capabilities in a human
1417 * friendly manner. The capabilities include bus width and
1420 void mmc_dump_capabilities(const char *text, uint caps)
1424 printf("%s: widths [", text);
1425 if (caps & MMC_MODE_8BIT)
1427 if (caps & MMC_MODE_4BIT)
1429 if (caps & MMC_MODE_1BIT)
1431 printf("\b\b] modes [");
1432 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1433 if (MMC_CAP(mode) & caps)
1434 printf("%s, ", mmc_mode_name(mode));
1439 struct mode_width_tuning {
1445 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1448 case MMC_SIGNAL_VOLTAGE_000: return 0;
1449 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1450 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1451 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1456 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1460 if (mmc->signal_voltage == signal_voltage)
1463 mmc->signal_voltage = signal_voltage;
1464 err = mmc_set_ios(mmc);
1466 debug("unable to set voltage (err %d)\n", err);
1471 static const struct mode_width_tuning sd_modes_by_pref[] = {
1474 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1475 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1479 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1483 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1487 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1491 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1495 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1499 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1503 #define for_each_sd_mode_by_pref(caps, mwt) \
1504 for (mwt = sd_modes_by_pref;\
1505 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1507 if (caps & MMC_CAP(mwt->mode))
1509 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1512 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1513 const struct mode_width_tuning *mwt;
1514 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1518 /* Restrict card's capabilities by what the host can do */
1519 caps = card_caps & (mmc->host_caps | MMC_MODE_1BIT);
1524 for_each_sd_mode_by_pref(caps, mwt) {
1527 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1528 if (*w & caps & mwt->widths) {
1529 debug("trying mode %s width %d (at %d MHz)\n",
1530 mmc_mode_name(mwt->mode),
1532 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1534 /* configure the bus width (card + host) */
1535 err = sd_select_bus_width(mmc, bus_width(*w));
1538 mmc_set_bus_width(mmc, bus_width(*w));
1540 /* configure the bus mode (card) */
1541 err = sd_set_card_speed(mmc, mwt->mode);
1545 /* configure the bus mode (host) */
1546 mmc_select_mode(mmc, mwt->mode);
1547 mmc_set_clock(mmc, mmc->tran_speed, false);
1549 /* execute tuning if needed */
1550 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1551 err = mmc_execute_tuning(mmc,
1554 debug("tuning failed\n");
1559 err = sd_read_ssr(mmc);
1563 printf("bad ssr\n");
1566 /* revert to a safer bus speed */
1567 mmc_select_mode(mmc, SD_LEGACY);
1568 mmc_set_clock(mmc, mmc->tran_speed, false);
1573 printf("unable to select a mode\n");
1578 * read the compare the part of ext csd that is constant.
1579 * This can be used to check that the transfer is working
1582 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1585 const u8 *ext_csd = mmc->ext_csd;
1586 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1588 err = mmc_send_ext_csd(mmc, test_csd);
1592 /* Only compare read only fields */
1593 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1594 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1595 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1596 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1597 ext_csd[EXT_CSD_REV]
1598 == test_csd[EXT_CSD_REV] &&
1599 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1600 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1601 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1602 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1608 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1609 uint32_t allowed_mask)
1615 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1616 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1617 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1618 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1621 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1622 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1623 MMC_SIGNAL_VOLTAGE_180;
1624 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1625 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1628 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1632 while (card_mask & allowed_mask) {
1633 enum mmc_voltage best_match;
1635 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1636 if (!mmc_set_signal_voltage(mmc, best_match))
1639 allowed_mask &= ~best_match;
1645 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1648 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1649 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1653 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1657 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1661 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1665 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1669 #define for_each_mmc_mode_by_pref(caps, mwt) \
1670 for (mwt = mmc_modes_by_pref;\
1671 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1673 if (caps & MMC_CAP(mwt->mode))
1675 static const struct ext_csd_bus_width {
1679 } ext_csd_bus_width[] = {
1680 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1681 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1682 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1683 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1684 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1687 #define for_each_supported_width(caps, ddr, ecbv) \
1688 for (ecbv = ext_csd_bus_width;\
1689 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1691 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1693 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1696 const struct mode_width_tuning *mwt;
1697 const struct ext_csd_bus_width *ecbw;
1699 /* Restrict card's capabilities by what the host can do */
1700 card_caps &= (mmc->host_caps | MMC_MODE_1BIT);
1702 /* Only version 4 of MMC supports wider bus widths */
1703 if (mmc->version < MMC_VERSION_4)
1706 if (!mmc->ext_csd) {
1707 debug("No ext_csd found!\n"); /* this should enver happen */
1711 mmc_set_clock(mmc, mmc->legacy_speed, false);
1713 for_each_mmc_mode_by_pref(card_caps, mwt) {
1714 for_each_supported_width(card_caps & mwt->widths,
1715 mmc_is_mode_ddr(mwt->mode), ecbw) {
1716 enum mmc_voltage old_voltage;
1717 debug("trying mode %s width %d (at %d MHz)\n",
1718 mmc_mode_name(mwt->mode),
1719 bus_width(ecbw->cap),
1720 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1721 old_voltage = mmc->signal_voltage;
1722 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1723 MMC_ALL_SIGNAL_VOLTAGE);
1727 /* configure the bus width (card + host) */
1728 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1730 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1733 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1735 /* configure the bus speed (card) */
1736 err = mmc_set_card_speed(mmc, mwt->mode);
1741 * configure the bus width AND the ddr mode (card)
1742 * The host side will be taken care of in the next step
1744 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1745 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1747 ecbw->ext_csd_bits);
1752 /* configure the bus mode (host) */
1753 mmc_select_mode(mmc, mwt->mode);
1754 mmc_set_clock(mmc, mmc->tran_speed, false);
1756 /* execute tuning if needed */
1758 err = mmc_execute_tuning(mmc, mwt->tuning);
1760 debug("tuning failed\n");
1765 /* do a transfer to check the configuration */
1766 err = mmc_read_and_compare_ext_csd(mmc);
1770 mmc_set_signal_voltage(mmc, old_voltage);
1771 /* if an error occured, revert to a safer bus mode */
1772 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1773 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1774 mmc_select_mode(mmc, MMC_LEGACY);
1775 mmc_set_bus_width(mmc, 1);
1779 printf("unable to select a mode\n");
1784 static int mmc_startup_v4(struct mmc *mmc)
1788 bool has_parts = false;
1789 bool part_completed;
1792 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1795 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN);
1799 mmc->ext_csd = ext_csd;
1801 /* check ext_csd version and capacity */
1802 err = mmc_send_ext_csd(mmc, ext_csd);
1805 if (ext_csd[EXT_CSD_REV] >= 2) {
1807 * According to the JEDEC Standard, the value of
1808 * ext_csd's capacity is valid if the value is more
1811 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1812 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1813 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1814 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1815 capacity *= MMC_MAX_BLOCK_LEN;
1816 if ((capacity >> 20) > 2 * 1024)
1817 mmc->capacity_user = capacity;
1820 switch (ext_csd[EXT_CSD_REV]) {
1822 mmc->version = MMC_VERSION_4_1;
1825 mmc->version = MMC_VERSION_4_2;
1828 mmc->version = MMC_VERSION_4_3;
1831 mmc->version = MMC_VERSION_4_41;
1834 mmc->version = MMC_VERSION_4_5;
1837 mmc->version = MMC_VERSION_5_0;
1840 mmc->version = MMC_VERSION_5_1;
1844 /* The partition data may be non-zero but it is only
1845 * effective if PARTITION_SETTING_COMPLETED is set in
1846 * EXT_CSD, so ignore any data if this bit is not set,
1847 * except for enabling the high-capacity group size
1848 * definition (see below).
1850 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1851 EXT_CSD_PARTITION_SETTING_COMPLETED);
1853 /* store the partition info of emmc */
1854 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1855 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1856 ext_csd[EXT_CSD_BOOT_MULT])
1857 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1858 if (part_completed &&
1859 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1860 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1862 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1864 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1866 for (i = 0; i < 4; i++) {
1867 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1868 uint mult = (ext_csd[idx + 2] << 16) +
1869 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1872 if (!part_completed)
1874 mmc->capacity_gp[i] = mult;
1875 mmc->capacity_gp[i] *=
1876 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1877 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1878 mmc->capacity_gp[i] <<= 19;
1881 if (part_completed) {
1882 mmc->enh_user_size =
1883 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
1884 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
1885 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1886 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1887 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1888 mmc->enh_user_size <<= 19;
1889 mmc->enh_user_start =
1890 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
1891 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
1892 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
1893 ext_csd[EXT_CSD_ENH_START_ADDR];
1894 if (mmc->high_capacity)
1895 mmc->enh_user_start <<= 9;
1899 * Host needs to enable ERASE_GRP_DEF bit if device is
1900 * partitioned. This bit will be lost every time after a reset
1901 * or power off. This will affect erase size.
1905 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1906 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1909 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1910 EXT_CSD_ERASE_GROUP_DEF, 1);
1915 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1918 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1919 /* Read out group size from ext_csd */
1920 mmc->erase_grp_size =
1921 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1923 * if high capacity and partition setting completed
1924 * SEC_COUNT is valid even if it is smaller than 2 GiB
1925 * JEDEC Standard JESD84-B45, 6.2.4
1927 if (mmc->high_capacity && part_completed) {
1928 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1929 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1930 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1931 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1932 capacity *= MMC_MAX_BLOCK_LEN;
1933 mmc->capacity_user = capacity;
1936 /* Calculate the group size from the csd value. */
1937 int erase_gsz, erase_gmul;
1939 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1940 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1941 mmc->erase_grp_size = (erase_gsz + 1)
1945 mmc->hc_wp_grp_size = 1024
1946 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1947 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1949 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1954 static int mmc_startup(struct mmc *mmc)
1960 struct blk_desc *bdesc;
1962 #ifdef CONFIG_MMC_SPI_CRC_ON
1963 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1964 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1965 cmd.resp_type = MMC_RSP_R1;
1967 err = mmc_send_cmd(mmc, &cmd, NULL);
1973 /* Put the Card in Identify Mode */
1974 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1975 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1976 cmd.resp_type = MMC_RSP_R2;
1979 err = mmc_send_cmd(mmc, &cmd, NULL);
1981 #ifdef CONFIG_MMC_QUIRKS
1982 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
1985 * It has been seen that SEND_CID may fail on the first
1986 * attempt, let's try a few more time
1989 err = mmc_send_cmd(mmc, &cmd, NULL);
1992 } while (retries--);
1999 memcpy(mmc->cid, cmd.response, 16);
2002 * For MMC cards, set the Relative Address.
2003 * For SD cards, get the Relatvie Address.
2004 * This also puts the cards into Standby State
2006 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2007 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2008 cmd.cmdarg = mmc->rca << 16;
2009 cmd.resp_type = MMC_RSP_R6;
2011 err = mmc_send_cmd(mmc, &cmd, NULL);
2017 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2020 /* Get the Card-Specific Data */
2021 cmd.cmdidx = MMC_CMD_SEND_CSD;
2022 cmd.resp_type = MMC_RSP_R2;
2023 cmd.cmdarg = mmc->rca << 16;
2025 err = mmc_send_cmd(mmc, &cmd, NULL);
2030 mmc->csd[0] = cmd.response[0];
2031 mmc->csd[1] = cmd.response[1];
2032 mmc->csd[2] = cmd.response[2];
2033 mmc->csd[3] = cmd.response[3];
2035 if (mmc->version == MMC_VERSION_UNKNOWN) {
2036 int version = (cmd.response[0] >> 26) & 0xf;
2040 mmc->version = MMC_VERSION_1_2;
2043 mmc->version = MMC_VERSION_1_4;
2046 mmc->version = MMC_VERSION_2_2;
2049 mmc->version = MMC_VERSION_3;
2052 mmc->version = MMC_VERSION_4;
2055 mmc->version = MMC_VERSION_1_2;
2060 /* divide frequency by 10, since the mults are 10x bigger */
2061 freq = fbase[(cmd.response[0] & 0x7)];
2062 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2064 mmc->legacy_speed = freq * mult;
2065 mmc_select_mode(mmc, MMC_LEGACY);
2067 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2068 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2071 mmc->write_bl_len = mmc->read_bl_len;
2073 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2075 if (mmc->high_capacity) {
2076 csize = (mmc->csd[1] & 0x3f) << 16
2077 | (mmc->csd[2] & 0xffff0000) >> 16;
2080 csize = (mmc->csd[1] & 0x3ff) << 2
2081 | (mmc->csd[2] & 0xc0000000) >> 30;
2082 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2085 mmc->capacity_user = (csize + 1) << (cmult + 2);
2086 mmc->capacity_user *= mmc->read_bl_len;
2087 mmc->capacity_boot = 0;
2088 mmc->capacity_rpmb = 0;
2089 for (i = 0; i < 4; i++)
2090 mmc->capacity_gp[i] = 0;
2092 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2093 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2095 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2096 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2098 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2099 cmd.cmdidx = MMC_CMD_SET_DSR;
2100 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2101 cmd.resp_type = MMC_RSP_NONE;
2102 if (mmc_send_cmd(mmc, &cmd, NULL))
2103 printf("MMC: SET_DSR failed\n");
2106 /* Select the card, and put it into Transfer Mode */
2107 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2108 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2109 cmd.resp_type = MMC_RSP_R1;
2110 cmd.cmdarg = mmc->rca << 16;
2111 err = mmc_send_cmd(mmc, &cmd, NULL);
2118 * For SD, its erase group is always one sector
2120 mmc->erase_grp_size = 1;
2121 mmc->part_config = MMCPART_NOAVAILABLE;
2123 err = mmc_startup_v4(mmc);
2127 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2132 err = sd_get_capabilities(mmc);
2135 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2137 err = mmc_get_capabilities(mmc);
2140 mmc_select_mode_and_width(mmc, mmc->card_caps);
2146 mmc->best_mode = mmc->selected_mode;
2148 /* Fix the block length for DDR mode */
2149 if (mmc->ddr_mode) {
2150 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2151 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2154 /* fill in device description */
2155 bdesc = mmc_get_blk_desc(mmc);
2159 bdesc->blksz = mmc->read_bl_len;
2160 bdesc->log2blksz = LOG2(bdesc->blksz);
2161 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2162 #if !defined(CONFIG_SPL_BUILD) || \
2163 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2164 !defined(CONFIG_USE_TINY_PRINTF))
2165 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2166 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2167 (mmc->cid[3] >> 16) & 0xffff);
2168 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2169 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2170 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2171 (mmc->cid[2] >> 24) & 0xff);
2172 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2173 (mmc->cid[2] >> 16) & 0xf);
2175 bdesc->vendor[0] = 0;
2176 bdesc->product[0] = 0;
2177 bdesc->revision[0] = 0;
2179 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2186 static int mmc_send_if_cond(struct mmc *mmc)
2191 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2192 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2193 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2194 cmd.resp_type = MMC_RSP_R7;
2196 err = mmc_send_cmd(mmc, &cmd, NULL);
2201 if ((cmd.response[0] & 0xff) != 0xaa)
2204 mmc->version = SD_VERSION_2;
2209 #if !CONFIG_IS_ENABLED(DM_MMC)
2210 /* board-specific MMC power initializations. */
2211 __weak void board_mmc_power_init(void)
2216 static int mmc_power_init(struct mmc *mmc)
2218 #if CONFIG_IS_ENABLED(DM_MMC)
2219 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2222 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2225 debug("%s: No vmmc supply\n", mmc->dev->name);
2227 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2228 &mmc->vqmmc_supply);
2230 debug("%s: No vqmmc supply\n", mmc->dev->name);
2232 #else /* !CONFIG_DM_MMC */
2234 * Driver model should use a regulator, as above, rather than calling
2235 * out to board code.
2237 board_mmc_power_init();
2243 * put the host in the initial state:
2244 * - turn on Vdd (card power supply)
2245 * - configure the bus width and clock to minimal values
2247 static void mmc_set_initial_state(struct mmc *mmc)
2251 /* First try to set 3.3V. If it fails set to 1.8V */
2252 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2254 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2256 printf("mmc: failed to set signal voltage\n");
2258 mmc_select_mode(mmc, MMC_LEGACY);
2259 mmc_set_bus_width(mmc, 1);
2260 mmc_set_clock(mmc, 0, false);
2263 static int mmc_power_on(struct mmc *mmc)
2265 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2266 if (mmc->vmmc_supply) {
2267 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2270 puts("Error enabling VMMC supply\n");
2278 static int mmc_power_off(struct mmc *mmc)
2280 mmc_set_clock(mmc, 1, true);
2281 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2282 if (mmc->vmmc_supply) {
2283 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2286 debug("Error disabling VMMC supply\n");
2294 static int mmc_power_cycle(struct mmc *mmc)
2298 ret = mmc_power_off(mmc);
2302 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2303 * to be on the safer side.
2306 return mmc_power_on(mmc);
2309 int mmc_start_init(struct mmc *mmc)
2312 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2315 mmc->host_caps = mmc->cfg->host_caps;
2317 /* we pretend there's no card when init is NULL */
2318 no_card = mmc_getcd(mmc) == 0;
2319 #if !CONFIG_IS_ENABLED(DM_MMC)
2320 no_card = no_card || (mmc->cfg->ops->init == NULL);
2324 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2325 printf("MMC: no card present\n");
2333 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2334 mmc_adapter_card_type_ident();
2336 err = mmc_power_init(mmc);
2340 #ifdef CONFIG_MMC_QUIRKS
2341 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2342 MMC_QUIRK_RETRY_SEND_CID;
2345 err = mmc_power_cycle(mmc);
2348 * if power cycling is not supported, we should not try
2349 * to use the UHS modes, because we wouldn't be able to
2350 * recover from an error during the UHS initialization.
2352 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2354 mmc->host_caps &= ~UHS_CAPS;
2355 err = mmc_power_on(mmc);
2360 #if CONFIG_IS_ENABLED(DM_MMC)
2361 /* The device has already been probed ready for use */
2363 /* made sure it's not NULL earlier */
2364 err = mmc->cfg->ops->init(mmc);
2371 mmc_set_initial_state(mmc);
2372 mmc_send_init_stream(mmc);
2374 /* Reset the Card */
2375 err = mmc_go_idle(mmc);
2380 /* The internal partition reset to user partition(0) at every CMD0*/
2381 mmc_get_blk_desc(mmc)->hwpart = 0;
2383 /* Test for SD version 2 */
2384 err = mmc_send_if_cond(mmc);
2386 /* Now try to get the SD card's operating condition */
2387 err = sd_send_op_cond(mmc, uhs_en);
2388 if (err && uhs_en) {
2390 mmc_power_cycle(mmc);
2394 /* If the command timed out, we check for an MMC card */
2395 if (err == -ETIMEDOUT) {
2396 err = mmc_send_op_cond(mmc);
2399 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2400 printf("Card did not respond to voltage select!\n");
2407 mmc->init_in_progress = 1;
2412 static int mmc_complete_init(struct mmc *mmc)
2416 mmc->init_in_progress = 0;
2417 if (mmc->op_cond_pending)
2418 err = mmc_complete_op_cond(mmc);
2421 err = mmc_startup(mmc);
2429 int mmc_init(struct mmc *mmc)
2432 __maybe_unused unsigned start;
2433 #if CONFIG_IS_ENABLED(DM_MMC)
2434 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2441 start = get_timer(0);
2443 if (!mmc->init_in_progress)
2444 err = mmc_start_init(mmc);
2447 err = mmc_complete_init(mmc);
2449 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2454 int mmc_set_dsr(struct mmc *mmc, u16 val)
2460 /* CPU-specific MMC initializations */
2461 __weak int cpu_mmc_init(bd_t *bis)
2466 /* board-specific MMC initializations. */
2467 __weak int board_mmc_init(bd_t *bis)
2472 void mmc_set_preinit(struct mmc *mmc, int preinit)
2474 mmc->preinit = preinit;
2477 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2478 static int mmc_probe(bd_t *bis)
2482 #elif CONFIG_IS_ENABLED(DM_MMC)
2483 static int mmc_probe(bd_t *bis)
2487 struct udevice *dev;
2489 ret = uclass_get(UCLASS_MMC, &uc);
2494 * Try to add them in sequence order. Really with driver model we
2495 * should allow holes, but the current MMC list does not allow that.
2496 * So if we request 0, 1, 3 we will get 0, 1, 2.
2498 for (i = 0; ; i++) {
2499 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2503 uclass_foreach_dev(dev, uc) {
2504 ret = device_probe(dev);
2506 printf("%s - probe failed: %d\n", dev->name, ret);
2512 static int mmc_probe(bd_t *bis)
2514 if (board_mmc_init(bis) < 0)
2521 int mmc_initialize(bd_t *bis)
2523 static int initialized = 0;
2525 if (initialized) /* Avoid initializing mmc multiple times */
2529 #if !CONFIG_IS_ENABLED(BLK)
2530 #if !CONFIG_IS_ENABLED(MMC_TINY)
2534 ret = mmc_probe(bis);
2538 #ifndef CONFIG_SPL_BUILD
2539 print_mmc_devices(',');
2546 #ifdef CONFIG_CMD_BKOPS_ENABLE
2547 int mmc_set_bkops_enable(struct mmc *mmc)
2550 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2552 err = mmc_send_ext_csd(mmc, ext_csd);
2554 puts("Could not get ext_csd register values\n");
2558 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2559 puts("Background operations not supported on device\n");
2560 return -EMEDIUMTYPE;
2563 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2564 puts("Background operations already enabled\n");
2568 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2570 puts("Failed to enable manual background operations\n");
2574 puts("Enabled manual background operations\n");