1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
30 #if !CONFIG_IS_ENABLED(DM_MMC)
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
96 printf("\t\t \t\t 0x%08X \n",
98 printf("\t\t \t\t 0x%08X \n",
100 printf("\t\t \t\t 0x%08X \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02X ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, int timeout)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
223 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
227 if (cmd.response[0] & MMC_STATUS_MASK) {
228 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
229 pr_err("Status Error: 0x%08X\n",
234 } else if (--retries < 0)
243 mmc_trace_state(mmc, &cmd);
245 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
246 pr_err("Timeout waiting card ready\n");
254 int mmc_set_blocklen(struct mmc *mmc, int len)
262 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
263 cmd.resp_type = MMC_RSP_R1;
266 err = mmc_send_cmd(mmc, &cmd, NULL);
268 #ifdef CONFIG_MMC_QUIRKS
269 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
272 * It has been seen that SET_BLOCKLEN may fail on the first
273 * attempt, let's try a few more time
276 err = mmc_send_cmd(mmc, &cmd, NULL);
286 #ifdef MMC_SUPPORTS_TUNING
287 static const u8 tuning_blk_pattern_4bit[] = {
288 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
289 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
290 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
291 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
292 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
293 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
294 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
295 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
298 static const u8 tuning_blk_pattern_8bit[] = {
299 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
300 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
301 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
302 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
303 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
304 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
305 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
306 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
307 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
308 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
309 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
310 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
311 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
312 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
313 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
314 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
317 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
320 struct mmc_data data;
321 const u8 *tuning_block_pattern;
324 if (mmc->bus_width == 8) {
325 tuning_block_pattern = tuning_blk_pattern_8bit;
326 size = sizeof(tuning_blk_pattern_8bit);
327 } else if (mmc->bus_width == 4) {
328 tuning_block_pattern = tuning_blk_pattern_4bit;
329 size = sizeof(tuning_blk_pattern_4bit);
334 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
338 cmd.resp_type = MMC_RSP_R1;
340 data.dest = (void *)data_buf;
342 data.blocksize = size;
343 data.flags = MMC_DATA_READ;
345 err = mmc_send_cmd(mmc, &cmd, &data);
349 if (memcmp(data_buf, tuning_block_pattern, size))
356 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
360 struct mmc_data data;
363 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
365 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
367 if (mmc->high_capacity)
370 cmd.cmdarg = start * mmc->read_bl_len;
372 cmd.resp_type = MMC_RSP_R1;
375 data.blocks = blkcnt;
376 data.blocksize = mmc->read_bl_len;
377 data.flags = MMC_DATA_READ;
379 if (mmc_send_cmd(mmc, &cmd, &data))
383 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
385 cmd.resp_type = MMC_RSP_R1b;
386 if (mmc_send_cmd(mmc, &cmd, NULL)) {
387 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
388 pr_err("mmc fail to send stop cmd\n");
397 #if CONFIG_IS_ENABLED(BLK)
398 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
400 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
404 #if CONFIG_IS_ENABLED(BLK)
405 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
407 int dev_num = block_dev->devnum;
409 lbaint_t cur, blocks_todo = blkcnt;
414 struct mmc *mmc = find_mmc_device(dev_num);
418 if (CONFIG_IS_ENABLED(MMC_TINY))
419 err = mmc_switch_part(mmc, block_dev->hwpart);
421 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
426 if ((start + blkcnt) > block_dev->lba) {
427 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
428 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
429 start + blkcnt, block_dev->lba);
434 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
435 pr_debug("%s: Failed to set blocklen\n", __func__);
440 cur = (blocks_todo > mmc->cfg->b_max) ?
441 mmc->cfg->b_max : blocks_todo;
442 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
443 pr_debug("%s: Failed to read blocks\n", __func__);
448 dst += cur * mmc->read_bl_len;
449 } while (blocks_todo > 0);
454 static int mmc_go_idle(struct mmc *mmc)
461 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
463 cmd.resp_type = MMC_RSP_NONE;
465 err = mmc_send_cmd(mmc, &cmd, NULL);
475 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
476 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
482 * Send CMD11 only if the request is to switch the card to
485 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
486 return mmc_set_signal_voltage(mmc, signal_voltage);
488 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
490 cmd.resp_type = MMC_RSP_R1;
492 err = mmc_send_cmd(mmc, &cmd, NULL);
496 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
500 * The card should drive cmd and dat[0:3] low immediately
501 * after the response of cmd11, but wait 100 us to be sure
503 err = mmc_wait_dat0(mmc, 0, 100);
510 * During a signal voltage level switch, the clock must be gated
511 * for 5 ms according to the SD spec
513 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
515 err = mmc_set_signal_voltage(mmc, signal_voltage);
519 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
521 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
524 * Failure to switch is indicated by the card holding
525 * dat[0:3] low. Wait for at least 1 ms according to spec
527 err = mmc_wait_dat0(mmc, 1, 1000);
537 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
544 cmd.cmdidx = MMC_CMD_APP_CMD;
545 cmd.resp_type = MMC_RSP_R1;
548 err = mmc_send_cmd(mmc, &cmd, NULL);
553 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
554 cmd.resp_type = MMC_RSP_R3;
557 * Most cards do not answer if some reserved bits
558 * in the ocr are set. However, Some controller
559 * can set bit 7 (reserved for low voltages), but
560 * how to manage low voltages SD card is not yet
563 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
564 (mmc->cfg->voltages & 0xff8000);
566 if (mmc->version == SD_VERSION_2)
567 cmd.cmdarg |= OCR_HCS;
570 cmd.cmdarg |= OCR_S18R;
572 err = mmc_send_cmd(mmc, &cmd, NULL);
577 if (cmd.response[0] & OCR_BUSY)
586 if (mmc->version != SD_VERSION_2)
587 mmc->version = SD_VERSION_1_0;
589 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
590 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
591 cmd.resp_type = MMC_RSP_R3;
594 err = mmc_send_cmd(mmc, &cmd, NULL);
600 mmc->ocr = cmd.response[0];
602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
603 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
605 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
611 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
617 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
622 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
623 cmd.resp_type = MMC_RSP_R3;
625 if (use_arg && !mmc_host_is_spi(mmc))
626 cmd.cmdarg = OCR_HCS |
627 (mmc->cfg->voltages &
628 (mmc->ocr & OCR_VOLTAGE_MASK)) |
629 (mmc->ocr & OCR_ACCESS_MODE);
631 err = mmc_send_cmd(mmc, &cmd, NULL);
634 mmc->ocr = cmd.response[0];
638 static int mmc_send_op_cond(struct mmc *mmc)
642 /* Some cards seem to need this */
645 /* Asking to the card its capabilities */
646 for (i = 0; i < 2; i++) {
647 err = mmc_send_op_cond_iter(mmc, i != 0);
651 /* exit if not busy (flag seems to be inverted) */
652 if (mmc->ocr & OCR_BUSY)
655 mmc->op_cond_pending = 1;
659 static int mmc_complete_op_cond(struct mmc *mmc)
666 mmc->op_cond_pending = 0;
667 if (!(mmc->ocr & OCR_BUSY)) {
668 /* Some cards seem to need this */
671 start = get_timer(0);
673 err = mmc_send_op_cond_iter(mmc, 1);
676 if (mmc->ocr & OCR_BUSY)
678 if (get_timer(start) > timeout)
684 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
685 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
686 cmd.resp_type = MMC_RSP_R3;
689 err = mmc_send_cmd(mmc, &cmd, NULL);
694 mmc->ocr = cmd.response[0];
697 mmc->version = MMC_VERSION_UNKNOWN;
699 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
706 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
709 struct mmc_data data;
712 /* Get the Card Status Register */
713 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
714 cmd.resp_type = MMC_RSP_R1;
717 data.dest = (char *)ext_csd;
719 data.blocksize = MMC_MAX_BLOCK_LEN;
720 data.flags = MMC_DATA_READ;
722 err = mmc_send_cmd(mmc, &cmd, &data);
727 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
734 cmd.cmdidx = MMC_CMD_SWITCH;
735 cmd.resp_type = MMC_RSP_R1b;
736 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
740 while (retries > 0) {
741 ret = mmc_send_cmd(mmc, &cmd, NULL);
743 /* Waiting for the ready status */
745 ret = mmc_send_status(mmc, timeout);
756 #if !CONFIG_IS_ENABLED(MMC_TINY)
757 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
762 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
768 speed_bits = EXT_CSD_TIMING_HS;
770 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
772 speed_bits = EXT_CSD_TIMING_HS200;
775 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
777 speed_bits = EXT_CSD_TIMING_HS400;
781 speed_bits = EXT_CSD_TIMING_LEGACY;
786 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
791 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
792 /* Now check to see that it worked */
793 err = mmc_send_ext_csd(mmc, test_csd);
797 /* No high-speed support */
798 if (!test_csd[EXT_CSD_HS_TIMING])
805 static int mmc_get_capabilities(struct mmc *mmc)
807 u8 *ext_csd = mmc->ext_csd;
810 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
812 if (mmc_host_is_spi(mmc))
815 /* Only version 4 supports high-speed */
816 if (mmc->version < MMC_VERSION_4)
820 pr_err("No ext_csd found!\n"); /* this should enver happen */
824 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
826 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
827 mmc->cardtype = cardtype;
829 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
830 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
831 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
832 mmc->card_caps |= MMC_MODE_HS200;
835 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
836 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
837 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
838 mmc->card_caps |= MMC_MODE_HS400;
841 if (cardtype & EXT_CSD_CARD_TYPE_52) {
842 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
843 mmc->card_caps |= MMC_MODE_DDR_52MHz;
844 mmc->card_caps |= MMC_MODE_HS_52MHz;
846 if (cardtype & EXT_CSD_CARD_TYPE_26)
847 mmc->card_caps |= MMC_MODE_HS;
853 static int mmc_set_capacity(struct mmc *mmc, int part_num)
857 mmc->capacity = mmc->capacity_user;
861 mmc->capacity = mmc->capacity_boot;
864 mmc->capacity = mmc->capacity_rpmb;
870 mmc->capacity = mmc->capacity_gp[part_num - 4];
876 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
881 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
882 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
887 if (part_num & PART_ACCESS_MASK)
888 forbidden = MMC_CAP(MMC_HS_200);
890 if (MMC_CAP(mmc->selected_mode) & forbidden) {
891 pr_debug("selected mode (%s) is forbidden for part %d\n",
892 mmc_mode_name(mmc->selected_mode), part_num);
894 } else if (mmc->selected_mode != mmc->best_mode) {
895 pr_debug("selected mode is not optimal\n");
900 return mmc_select_mode_and_width(mmc,
901 mmc->card_caps & ~forbidden);
906 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
907 unsigned int part_num)
913 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
917 ret = mmc_boot_part_access_chk(mmc, part_num);
921 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
922 (mmc->part_config & ~PART_ACCESS_MASK)
923 | (part_num & PART_ACCESS_MASK));
926 * Set the capacity if the switch succeeded or was intended
927 * to return to representing the raw device.
929 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
930 ret = mmc_set_capacity(mmc, part_num);
931 mmc_get_blk_desc(mmc)->hwpart = part_num;
937 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
938 int mmc_hwpart_config(struct mmc *mmc,
939 const struct mmc_hwpart_conf *conf,
940 enum mmc_hwpart_conf_mode mode)
946 u32 max_enh_size_mult;
947 u32 tot_enh_size_mult = 0;
950 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
952 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
955 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
956 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
960 if (!(mmc->part_support & PART_SUPPORT)) {
961 pr_err("Card does not support partitioning\n");
965 if (!mmc->hc_wp_grp_size) {
966 pr_err("Card does not define HC WP group size\n");
970 /* check partition alignment and total enhanced size */
971 if (conf->user.enh_size) {
972 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
973 conf->user.enh_start % mmc->hc_wp_grp_size) {
974 pr_err("User data enhanced area not HC WP group "
978 part_attrs |= EXT_CSD_ENH_USR;
979 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
980 if (mmc->high_capacity) {
981 enh_start_addr = conf->user.enh_start;
983 enh_start_addr = (conf->user.enh_start << 9);
989 tot_enh_size_mult += enh_size_mult;
991 for (pidx = 0; pidx < 4; pidx++) {
992 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
993 pr_err("GP%i partition not HC WP group size "
994 "aligned\n", pidx+1);
997 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
998 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
999 part_attrs |= EXT_CSD_ENH_GP(pidx);
1000 tot_enh_size_mult += gp_size_mult[pidx];
1004 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1005 pr_err("Card does not support enhanced attribute\n");
1006 return -EMEDIUMTYPE;
1009 err = mmc_send_ext_csd(mmc, ext_csd);
1014 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1015 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1016 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1017 if (tot_enh_size_mult > max_enh_size_mult) {
1018 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1019 tot_enh_size_mult, max_enh_size_mult);
1020 return -EMEDIUMTYPE;
1023 /* The default value of EXT_CSD_WR_REL_SET is device
1024 * dependent, the values can only be changed if the
1025 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1026 * changed only once and before partitioning is completed. */
1027 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1028 if (conf->user.wr_rel_change) {
1029 if (conf->user.wr_rel_set)
1030 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1032 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1034 for (pidx = 0; pidx < 4; pidx++) {
1035 if (conf->gp_part[pidx].wr_rel_change) {
1036 if (conf->gp_part[pidx].wr_rel_set)
1037 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1039 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1043 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1044 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1045 puts("Card does not support host controlled partition write "
1046 "reliability settings\n");
1047 return -EMEDIUMTYPE;
1050 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1051 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1052 pr_err("Card already partitioned\n");
1056 if (mode == MMC_HWPART_CONF_CHECK)
1059 /* Partitioning requires high-capacity size definitions */
1060 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1061 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1062 EXT_CSD_ERASE_GROUP_DEF, 1);
1067 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1069 /* update erase group size to be high-capacity */
1070 mmc->erase_grp_size =
1071 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1075 /* all OK, write the configuration */
1076 for (i = 0; i < 4; i++) {
1077 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1078 EXT_CSD_ENH_START_ADDR+i,
1079 (enh_start_addr >> (i*8)) & 0xFF);
1083 for (i = 0; i < 3; i++) {
1084 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1085 EXT_CSD_ENH_SIZE_MULT+i,
1086 (enh_size_mult >> (i*8)) & 0xFF);
1090 for (pidx = 0; pidx < 4; pidx++) {
1091 for (i = 0; i < 3; i++) {
1092 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1093 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1094 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1099 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1104 if (mode == MMC_HWPART_CONF_SET)
1107 /* The WR_REL_SET is a write-once register but shall be
1108 * written before setting PART_SETTING_COMPLETED. As it is
1109 * write-once we can only write it when completing the
1111 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1112 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1113 EXT_CSD_WR_REL_SET, wr_rel_set);
1118 /* Setting PART_SETTING_COMPLETED confirms the partition
1119 * configuration but it only becomes effective after power
1120 * cycle, so we do not adjust the partition related settings
1121 * in the mmc struct. */
1123 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1124 EXT_CSD_PARTITION_SETTING,
1125 EXT_CSD_PARTITION_SETTING_COMPLETED);
1133 #if !CONFIG_IS_ENABLED(DM_MMC)
1134 int mmc_getcd(struct mmc *mmc)
1138 cd = board_mmc_getcd(mmc);
1141 if (mmc->cfg->ops->getcd)
1142 cd = mmc->cfg->ops->getcd(mmc);
1151 #if !CONFIG_IS_ENABLED(MMC_TINY)
1152 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1155 struct mmc_data data;
1157 /* Switch the frequency */
1158 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1159 cmd.resp_type = MMC_RSP_R1;
1160 cmd.cmdarg = (mode << 31) | 0xffffff;
1161 cmd.cmdarg &= ~(0xf << (group * 4));
1162 cmd.cmdarg |= value << (group * 4);
1164 data.dest = (char *)resp;
1165 data.blocksize = 64;
1167 data.flags = MMC_DATA_READ;
1169 return mmc_send_cmd(mmc, &cmd, &data);
1172 static int sd_get_capabilities(struct mmc *mmc)
1176 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1177 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1178 struct mmc_data data;
1180 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1184 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1186 if (mmc_host_is_spi(mmc))
1189 /* Read the SCR to find out if this card supports higher speeds */
1190 cmd.cmdidx = MMC_CMD_APP_CMD;
1191 cmd.resp_type = MMC_RSP_R1;
1192 cmd.cmdarg = mmc->rca << 16;
1194 err = mmc_send_cmd(mmc, &cmd, NULL);
1199 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1200 cmd.resp_type = MMC_RSP_R1;
1206 data.dest = (char *)scr;
1209 data.flags = MMC_DATA_READ;
1211 err = mmc_send_cmd(mmc, &cmd, &data);
1220 mmc->scr[0] = __be32_to_cpu(scr[0]);
1221 mmc->scr[1] = __be32_to_cpu(scr[1]);
1223 switch ((mmc->scr[0] >> 24) & 0xf) {
1225 mmc->version = SD_VERSION_1_0;
1228 mmc->version = SD_VERSION_1_10;
1231 mmc->version = SD_VERSION_2;
1232 if ((mmc->scr[0] >> 15) & 0x1)
1233 mmc->version = SD_VERSION_3;
1236 mmc->version = SD_VERSION_1_0;
1240 if (mmc->scr[0] & SD_DATA_4BIT)
1241 mmc->card_caps |= MMC_MODE_4BIT;
1243 /* Version 1.0 doesn't support switching */
1244 if (mmc->version == SD_VERSION_1_0)
1249 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1250 (u8 *)switch_status);
1255 /* The high-speed function is busy. Try again */
1256 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1260 /* If high-speed isn't supported, we return */
1261 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1262 mmc->card_caps |= MMC_CAP(SD_HS);
1264 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1265 /* Version before 3.0 don't support UHS modes */
1266 if (mmc->version < SD_VERSION_3)
1269 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1270 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1271 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1272 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1273 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1274 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1275 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1276 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1277 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1278 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1279 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1285 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1289 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1292 /* SD version 1.00 and 1.01 does not support CMD 6 */
1293 if (mmc->version == SD_VERSION_1_0)
1298 speed = UHS_SDR12_BUS_SPEED;
1301 speed = HIGH_SPEED_BUS_SPEED;
1303 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1305 speed = UHS_SDR12_BUS_SPEED;
1308 speed = UHS_SDR25_BUS_SPEED;
1311 speed = UHS_SDR50_BUS_SPEED;
1314 speed = UHS_DDR50_BUS_SPEED;
1317 speed = UHS_SDR104_BUS_SPEED;
1324 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1328 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1334 static int sd_select_bus_width(struct mmc *mmc, int w)
1339 if ((w != 4) && (w != 1))
1342 cmd.cmdidx = MMC_CMD_APP_CMD;
1343 cmd.resp_type = MMC_RSP_R1;
1344 cmd.cmdarg = mmc->rca << 16;
1346 err = mmc_send_cmd(mmc, &cmd, NULL);
1350 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1351 cmd.resp_type = MMC_RSP_R1;
1356 err = mmc_send_cmd(mmc, &cmd, NULL);
1364 #if CONFIG_IS_ENABLED(MMC_WRITE)
1365 static int sd_read_ssr(struct mmc *mmc)
1367 static const unsigned int sd_au_size[] = {
1368 0, SZ_16K / 512, SZ_32K / 512,
1369 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1370 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1371 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1372 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1377 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1378 struct mmc_data data;
1380 unsigned int au, eo, et, es;
1382 cmd.cmdidx = MMC_CMD_APP_CMD;
1383 cmd.resp_type = MMC_RSP_R1;
1384 cmd.cmdarg = mmc->rca << 16;
1386 err = mmc_send_cmd(mmc, &cmd, NULL);
1390 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1391 cmd.resp_type = MMC_RSP_R1;
1395 data.dest = (char *)ssr;
1396 data.blocksize = 64;
1398 data.flags = MMC_DATA_READ;
1400 err = mmc_send_cmd(mmc, &cmd, &data);
1408 for (i = 0; i < 16; i++)
1409 ssr[i] = be32_to_cpu(ssr[i]);
1411 au = (ssr[2] >> 12) & 0xF;
1412 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1413 mmc->ssr.au = sd_au_size[au];
1414 es = (ssr[3] >> 24) & 0xFF;
1415 es |= (ssr[2] & 0xFF) << 8;
1416 et = (ssr[3] >> 18) & 0x3F;
1418 eo = (ssr[3] >> 16) & 0x3;
1419 mmc->ssr.erase_timeout = (et * 1000) / es;
1420 mmc->ssr.erase_offset = eo * 1000;
1423 pr_debug("Invalid Allocation Unit Size.\n");
1429 /* frequency bases */
1430 /* divided by 10 to be nice to platforms without floating point */
1431 static const int fbase[] = {
1438 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1439 * to platforms without floating point.
1441 static const u8 multipliers[] = {
1460 static inline int bus_width(uint cap)
1462 if (cap == MMC_MODE_8BIT)
1464 if (cap == MMC_MODE_4BIT)
1466 if (cap == MMC_MODE_1BIT)
1468 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1472 #if !CONFIG_IS_ENABLED(DM_MMC)
1473 #ifdef MMC_SUPPORTS_TUNING
1474 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1480 static void mmc_send_init_stream(struct mmc *mmc)
1484 static int mmc_set_ios(struct mmc *mmc)
1488 if (mmc->cfg->ops->set_ios)
1489 ret = mmc->cfg->ops->set_ios(mmc);
1495 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1498 if (clock > mmc->cfg->f_max)
1499 clock = mmc->cfg->f_max;
1501 if (clock < mmc->cfg->f_min)
1502 clock = mmc->cfg->f_min;
1506 mmc->clk_disable = disable;
1508 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1510 return mmc_set_ios(mmc);
1513 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1515 mmc->bus_width = width;
1517 return mmc_set_ios(mmc);
1520 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1522 * helper function to display the capabilities in a human
1523 * friendly manner. The capabilities include bus width and
1526 void mmc_dump_capabilities(const char *text, uint caps)
1530 pr_debug("%s: widths [", text);
1531 if (caps & MMC_MODE_8BIT)
1533 if (caps & MMC_MODE_4BIT)
1535 if (caps & MMC_MODE_1BIT)
1537 pr_debug("\b\b] modes [");
1538 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1539 if (MMC_CAP(mode) & caps)
1540 pr_debug("%s, ", mmc_mode_name(mode));
1541 pr_debug("\b\b]\n");
1545 struct mode_width_tuning {
1548 #ifdef MMC_SUPPORTS_TUNING
1553 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1554 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1557 case MMC_SIGNAL_VOLTAGE_000: return 0;
1558 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1559 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1560 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1565 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1569 if (mmc->signal_voltage == signal_voltage)
1572 mmc->signal_voltage = signal_voltage;
1573 err = mmc_set_ios(mmc);
1575 pr_debug("unable to set voltage (err %d)\n", err);
1580 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1586 #if !CONFIG_IS_ENABLED(MMC_TINY)
1587 static const struct mode_width_tuning sd_modes_by_pref[] = {
1588 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1589 #ifdef MMC_SUPPORTS_TUNING
1592 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1593 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1598 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1602 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1606 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1611 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1613 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1616 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1621 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1625 #define for_each_sd_mode_by_pref(caps, mwt) \
1626 for (mwt = sd_modes_by_pref;\
1627 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1629 if (caps & MMC_CAP(mwt->mode))
1631 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1634 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1635 const struct mode_width_tuning *mwt;
1636 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1637 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1639 bool uhs_en = false;
1644 mmc_dump_capabilities("sd card", card_caps);
1645 mmc_dump_capabilities("host", mmc->host_caps);
1648 /* Restrict card's capabilities by what the host can do */
1649 caps = card_caps & mmc->host_caps;
1654 for_each_sd_mode_by_pref(caps, mwt) {
1657 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1658 if (*w & caps & mwt->widths) {
1659 pr_debug("trying mode %s width %d (at %d MHz)\n",
1660 mmc_mode_name(mwt->mode),
1662 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1664 /* configure the bus width (card + host) */
1665 err = sd_select_bus_width(mmc, bus_width(*w));
1668 mmc_set_bus_width(mmc, bus_width(*w));
1670 /* configure the bus mode (card) */
1671 err = sd_set_card_speed(mmc, mwt->mode);
1675 /* configure the bus mode (host) */
1676 mmc_select_mode(mmc, mwt->mode);
1677 mmc_set_clock(mmc, mmc->tran_speed,
1680 #ifdef MMC_SUPPORTS_TUNING
1681 /* execute tuning if needed */
1682 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1683 err = mmc_execute_tuning(mmc,
1686 pr_debug("tuning failed\n");
1692 #if CONFIG_IS_ENABLED(MMC_WRITE)
1693 err = sd_read_ssr(mmc);
1695 pr_warn("unable to read ssr\n");
1701 /* revert to a safer bus speed */
1702 mmc_select_mode(mmc, SD_LEGACY);
1703 mmc_set_clock(mmc, mmc->tran_speed,
1709 pr_err("unable to select a mode\n");
1714 * read the compare the part of ext csd that is constant.
1715 * This can be used to check that the transfer is working
1718 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1721 const u8 *ext_csd = mmc->ext_csd;
1722 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1724 if (mmc->version < MMC_VERSION_4)
1727 err = mmc_send_ext_csd(mmc, test_csd);
1731 /* Only compare read only fields */
1732 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1733 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1734 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1735 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1736 ext_csd[EXT_CSD_REV]
1737 == test_csd[EXT_CSD_REV] &&
1738 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1739 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1740 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1741 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1747 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1748 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1749 uint32_t allowed_mask)
1756 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1757 EXT_CSD_CARD_TYPE_HS400_1_8V))
1758 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1759 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1760 EXT_CSD_CARD_TYPE_HS400_1_2V))
1761 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1764 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1765 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1766 MMC_SIGNAL_VOLTAGE_180;
1767 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1768 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1771 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1775 while (card_mask & allowed_mask) {
1776 enum mmc_voltage best_match;
1778 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1779 if (!mmc_set_signal_voltage(mmc, best_match))
1782 allowed_mask &= ~best_match;
1788 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1789 uint32_t allowed_mask)
1795 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1796 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1799 .widths = MMC_MODE_8BIT,
1800 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1803 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1806 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1807 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1812 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1816 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1820 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1824 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1828 #define for_each_mmc_mode_by_pref(caps, mwt) \
1829 for (mwt = mmc_modes_by_pref;\
1830 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1832 if (caps & MMC_CAP(mwt->mode))
1834 static const struct ext_csd_bus_width {
1838 } ext_csd_bus_width[] = {
1839 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1840 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1841 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1842 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1843 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1846 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1847 static int mmc_select_hs400(struct mmc *mmc)
1851 /* Set timing to HS200 for tuning */
1852 err = mmc_set_card_speed(mmc, MMC_HS_200);
1856 /* configure the bus mode (host) */
1857 mmc_select_mode(mmc, MMC_HS_200);
1858 mmc_set_clock(mmc, mmc->tran_speed, false);
1860 /* execute tuning if needed */
1861 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1863 debug("tuning failed\n");
1867 /* Set back to HS */
1868 mmc_set_card_speed(mmc, MMC_HS);
1869 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
1871 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1872 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1876 err = mmc_set_card_speed(mmc, MMC_HS_400);
1880 mmc_select_mode(mmc, MMC_HS_400);
1881 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1888 static int mmc_select_hs400(struct mmc *mmc)
1894 #define for_each_supported_width(caps, ddr, ecbv) \
1895 for (ecbv = ext_csd_bus_width;\
1896 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1898 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1900 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1903 const struct mode_width_tuning *mwt;
1904 const struct ext_csd_bus_width *ecbw;
1907 mmc_dump_capabilities("mmc", card_caps);
1908 mmc_dump_capabilities("host", mmc->host_caps);
1911 /* Restrict card's capabilities by what the host can do */
1912 card_caps &= mmc->host_caps;
1914 /* Only version 4 of MMC supports wider bus widths */
1915 if (mmc->version < MMC_VERSION_4)
1918 if (!mmc->ext_csd) {
1919 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1923 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1925 for_each_mmc_mode_by_pref(card_caps, mwt) {
1926 for_each_supported_width(card_caps & mwt->widths,
1927 mmc_is_mode_ddr(mwt->mode), ecbw) {
1928 enum mmc_voltage old_voltage;
1929 pr_debug("trying mode %s width %d (at %d MHz)\n",
1930 mmc_mode_name(mwt->mode),
1931 bus_width(ecbw->cap),
1932 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1933 old_voltage = mmc->signal_voltage;
1934 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1935 MMC_ALL_SIGNAL_VOLTAGE);
1939 /* configure the bus width (card + host) */
1940 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1942 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1945 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1947 if (mwt->mode == MMC_HS_400) {
1948 err = mmc_select_hs400(mmc);
1950 printf("Select HS400 failed %d\n", err);
1954 /* configure the bus speed (card) */
1955 err = mmc_set_card_speed(mmc, mwt->mode);
1960 * configure the bus width AND the ddr mode
1961 * (card). The host side will be taken care
1962 * of in the next step
1964 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1965 err = mmc_switch(mmc,
1966 EXT_CSD_CMD_SET_NORMAL,
1968 ecbw->ext_csd_bits);
1973 /* configure the bus mode (host) */
1974 mmc_select_mode(mmc, mwt->mode);
1975 mmc_set_clock(mmc, mmc->tran_speed,
1977 #ifdef MMC_SUPPORTS_TUNING
1979 /* execute tuning if needed */
1981 err = mmc_execute_tuning(mmc,
1984 pr_debug("tuning failed\n");
1991 /* do a transfer to check the configuration */
1992 err = mmc_read_and_compare_ext_csd(mmc);
1996 mmc_set_signal_voltage(mmc, old_voltage);
1997 /* if an error occured, revert to a safer bus mode */
1998 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1999 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2000 mmc_select_mode(mmc, MMC_LEGACY);
2001 mmc_set_bus_width(mmc, 1);
2005 pr_err("unable to select a mode\n");
2011 #if CONFIG_IS_ENABLED(MMC_TINY)
2012 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2015 static int mmc_startup_v4(struct mmc *mmc)
2019 bool has_parts = false;
2020 bool part_completed;
2021 static const u32 mmc_versions[] = {
2033 #if CONFIG_IS_ENABLED(MMC_TINY)
2034 u8 *ext_csd = ext_csd_bkup;
2036 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2040 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2042 err = mmc_send_ext_csd(mmc, ext_csd);
2046 /* store the ext csd for future reference */
2048 mmc->ext_csd = ext_csd;
2050 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2052 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2055 /* check ext_csd version and capacity */
2056 err = mmc_send_ext_csd(mmc, ext_csd);
2060 /* store the ext csd for future reference */
2062 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2065 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2067 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2070 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2072 if (mmc->version >= MMC_VERSION_4_2) {
2074 * According to the JEDEC Standard, the value of
2075 * ext_csd's capacity is valid if the value is more
2078 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2079 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2080 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2081 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2082 capacity *= MMC_MAX_BLOCK_LEN;
2083 if ((capacity >> 20) > 2 * 1024)
2084 mmc->capacity_user = capacity;
2087 /* The partition data may be non-zero but it is only
2088 * effective if PARTITION_SETTING_COMPLETED is set in
2089 * EXT_CSD, so ignore any data if this bit is not set,
2090 * except for enabling the high-capacity group size
2091 * definition (see below).
2093 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2094 EXT_CSD_PARTITION_SETTING_COMPLETED);
2096 /* store the partition info of emmc */
2097 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2098 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2099 ext_csd[EXT_CSD_BOOT_MULT])
2100 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2101 if (part_completed &&
2102 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2103 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2105 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2107 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2109 for (i = 0; i < 4; i++) {
2110 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2111 uint mult = (ext_csd[idx + 2] << 16) +
2112 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2115 if (!part_completed)
2117 mmc->capacity_gp[i] = mult;
2118 mmc->capacity_gp[i] *=
2119 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2120 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2121 mmc->capacity_gp[i] <<= 19;
2124 #ifndef CONFIG_SPL_BUILD
2125 if (part_completed) {
2126 mmc->enh_user_size =
2127 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2128 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2129 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2130 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2131 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2132 mmc->enh_user_size <<= 19;
2133 mmc->enh_user_start =
2134 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2135 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2136 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2137 ext_csd[EXT_CSD_ENH_START_ADDR];
2138 if (mmc->high_capacity)
2139 mmc->enh_user_start <<= 9;
2144 * Host needs to enable ERASE_GRP_DEF bit if device is
2145 * partitioned. This bit will be lost every time after a reset
2146 * or power off. This will affect erase size.
2150 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2151 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2154 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2155 EXT_CSD_ERASE_GROUP_DEF, 1);
2160 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2163 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2164 #if CONFIG_IS_ENABLED(MMC_WRITE)
2165 /* Read out group size from ext_csd */
2166 mmc->erase_grp_size =
2167 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2170 * if high capacity and partition setting completed
2171 * SEC_COUNT is valid even if it is smaller than 2 GiB
2172 * JEDEC Standard JESD84-B45, 6.2.4
2174 if (mmc->high_capacity && part_completed) {
2175 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2176 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2177 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2178 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2179 capacity *= MMC_MAX_BLOCK_LEN;
2180 mmc->capacity_user = capacity;
2183 #if CONFIG_IS_ENABLED(MMC_WRITE)
2185 /* Calculate the group size from the csd value. */
2186 int erase_gsz, erase_gmul;
2188 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2189 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2190 mmc->erase_grp_size = (erase_gsz + 1)
2194 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2195 mmc->hc_wp_grp_size = 1024
2196 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2197 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2200 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2205 #if !CONFIG_IS_ENABLED(MMC_TINY)
2208 mmc->ext_csd = NULL;
2213 static int mmc_startup(struct mmc *mmc)
2219 struct blk_desc *bdesc;
2221 #ifdef CONFIG_MMC_SPI_CRC_ON
2222 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2223 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2224 cmd.resp_type = MMC_RSP_R1;
2226 err = mmc_send_cmd(mmc, &cmd, NULL);
2232 /* Put the Card in Identify Mode */
2233 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2234 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2235 cmd.resp_type = MMC_RSP_R2;
2238 err = mmc_send_cmd(mmc, &cmd, NULL);
2240 #ifdef CONFIG_MMC_QUIRKS
2241 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2244 * It has been seen that SEND_CID may fail on the first
2245 * attempt, let's try a few more time
2248 err = mmc_send_cmd(mmc, &cmd, NULL);
2251 } while (retries--);
2258 memcpy(mmc->cid, cmd.response, 16);
2261 * For MMC cards, set the Relative Address.
2262 * For SD cards, get the Relatvie Address.
2263 * This also puts the cards into Standby State
2265 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2266 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2267 cmd.cmdarg = mmc->rca << 16;
2268 cmd.resp_type = MMC_RSP_R6;
2270 err = mmc_send_cmd(mmc, &cmd, NULL);
2276 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2279 /* Get the Card-Specific Data */
2280 cmd.cmdidx = MMC_CMD_SEND_CSD;
2281 cmd.resp_type = MMC_RSP_R2;
2282 cmd.cmdarg = mmc->rca << 16;
2284 err = mmc_send_cmd(mmc, &cmd, NULL);
2289 mmc->csd[0] = cmd.response[0];
2290 mmc->csd[1] = cmd.response[1];
2291 mmc->csd[2] = cmd.response[2];
2292 mmc->csd[3] = cmd.response[3];
2294 if (mmc->version == MMC_VERSION_UNKNOWN) {
2295 int version = (cmd.response[0] >> 26) & 0xf;
2299 mmc->version = MMC_VERSION_1_2;
2302 mmc->version = MMC_VERSION_1_4;
2305 mmc->version = MMC_VERSION_2_2;
2308 mmc->version = MMC_VERSION_3;
2311 mmc->version = MMC_VERSION_4;
2314 mmc->version = MMC_VERSION_1_2;
2319 /* divide frequency by 10, since the mults are 10x bigger */
2320 freq = fbase[(cmd.response[0] & 0x7)];
2321 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2323 mmc->legacy_speed = freq * mult;
2324 mmc_select_mode(mmc, MMC_LEGACY);
2326 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2327 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2328 #if CONFIG_IS_ENABLED(MMC_WRITE)
2331 mmc->write_bl_len = mmc->read_bl_len;
2333 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2336 if (mmc->high_capacity) {
2337 csize = (mmc->csd[1] & 0x3f) << 16
2338 | (mmc->csd[2] & 0xffff0000) >> 16;
2341 csize = (mmc->csd[1] & 0x3ff) << 2
2342 | (mmc->csd[2] & 0xc0000000) >> 30;
2343 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2346 mmc->capacity_user = (csize + 1) << (cmult + 2);
2347 mmc->capacity_user *= mmc->read_bl_len;
2348 mmc->capacity_boot = 0;
2349 mmc->capacity_rpmb = 0;
2350 for (i = 0; i < 4; i++)
2351 mmc->capacity_gp[i] = 0;
2353 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2354 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2356 #if CONFIG_IS_ENABLED(MMC_WRITE)
2357 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2358 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2361 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2362 cmd.cmdidx = MMC_CMD_SET_DSR;
2363 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2364 cmd.resp_type = MMC_RSP_NONE;
2365 if (mmc_send_cmd(mmc, &cmd, NULL))
2366 pr_warn("MMC: SET_DSR failed\n");
2369 /* Select the card, and put it into Transfer Mode */
2370 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2371 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2372 cmd.resp_type = MMC_RSP_R1;
2373 cmd.cmdarg = mmc->rca << 16;
2374 err = mmc_send_cmd(mmc, &cmd, NULL);
2381 * For SD, its erase group is always one sector
2383 #if CONFIG_IS_ENABLED(MMC_WRITE)
2384 mmc->erase_grp_size = 1;
2386 mmc->part_config = MMCPART_NOAVAILABLE;
2388 err = mmc_startup_v4(mmc);
2392 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2396 #if CONFIG_IS_ENABLED(MMC_TINY)
2397 mmc_set_clock(mmc, mmc->legacy_speed, false);
2398 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2399 mmc_set_bus_width(mmc, 1);
2402 err = sd_get_capabilities(mmc);
2405 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2407 err = mmc_get_capabilities(mmc);
2410 mmc_select_mode_and_width(mmc, mmc->card_caps);
2416 mmc->best_mode = mmc->selected_mode;
2418 /* Fix the block length for DDR mode */
2419 if (mmc->ddr_mode) {
2420 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2421 #if CONFIG_IS_ENABLED(MMC_WRITE)
2422 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2426 /* fill in device description */
2427 bdesc = mmc_get_blk_desc(mmc);
2431 bdesc->blksz = mmc->read_bl_len;
2432 bdesc->log2blksz = LOG2(bdesc->blksz);
2433 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2434 #if !defined(CONFIG_SPL_BUILD) || \
2435 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2436 !defined(CONFIG_USE_TINY_PRINTF))
2437 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2438 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2439 (mmc->cid[3] >> 16) & 0xffff);
2440 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2441 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2442 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2443 (mmc->cid[2] >> 24) & 0xff);
2444 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2445 (mmc->cid[2] >> 16) & 0xf);
2447 bdesc->vendor[0] = 0;
2448 bdesc->product[0] = 0;
2449 bdesc->revision[0] = 0;
2452 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2459 static int mmc_send_if_cond(struct mmc *mmc)
2464 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2465 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2466 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2467 cmd.resp_type = MMC_RSP_R7;
2469 err = mmc_send_cmd(mmc, &cmd, NULL);
2474 if ((cmd.response[0] & 0xff) != 0xaa)
2477 mmc->version = SD_VERSION_2;
2482 #if !CONFIG_IS_ENABLED(DM_MMC)
2483 /* board-specific MMC power initializations. */
2484 __weak void board_mmc_power_init(void)
2489 static int mmc_power_init(struct mmc *mmc)
2491 #if CONFIG_IS_ENABLED(DM_MMC)
2492 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2495 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2498 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2500 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2501 &mmc->vqmmc_supply);
2503 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2505 #else /* !CONFIG_DM_MMC */
2507 * Driver model should use a regulator, as above, rather than calling
2508 * out to board code.
2510 board_mmc_power_init();
2516 * put the host in the initial state:
2517 * - turn on Vdd (card power supply)
2518 * - configure the bus width and clock to minimal values
2520 static void mmc_set_initial_state(struct mmc *mmc)
2524 /* First try to set 3.3V. If it fails set to 1.8V */
2525 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2527 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2529 pr_warn("mmc: failed to set signal voltage\n");
2531 mmc_select_mode(mmc, MMC_LEGACY);
2532 mmc_set_bus_width(mmc, 1);
2533 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2536 static int mmc_power_on(struct mmc *mmc)
2538 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2539 if (mmc->vmmc_supply) {
2540 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2543 puts("Error enabling VMMC supply\n");
2551 static int mmc_power_off(struct mmc *mmc)
2553 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2554 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2555 if (mmc->vmmc_supply) {
2556 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2559 pr_debug("Error disabling VMMC supply\n");
2567 static int mmc_power_cycle(struct mmc *mmc)
2571 ret = mmc_power_off(mmc);
2575 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2576 * to be on the safer side.
2579 return mmc_power_on(mmc);
2582 int mmc_get_op_cond(struct mmc *mmc)
2584 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2590 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2591 mmc_adapter_card_type_ident();
2593 err = mmc_power_init(mmc);
2597 #ifdef CONFIG_MMC_QUIRKS
2598 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2599 MMC_QUIRK_RETRY_SEND_CID;
2602 err = mmc_power_cycle(mmc);
2605 * if power cycling is not supported, we should not try
2606 * to use the UHS modes, because we wouldn't be able to
2607 * recover from an error during the UHS initialization.
2609 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2611 mmc->host_caps &= ~UHS_CAPS;
2612 err = mmc_power_on(mmc);
2617 #if CONFIG_IS_ENABLED(DM_MMC)
2618 /* The device has already been probed ready for use */
2620 /* made sure it's not NULL earlier */
2621 err = mmc->cfg->ops->init(mmc);
2628 mmc_set_initial_state(mmc);
2629 mmc_send_init_stream(mmc);
2631 /* Reset the Card */
2632 err = mmc_go_idle(mmc);
2637 /* The internal partition reset to user partition(0) at every CMD0*/
2638 mmc_get_blk_desc(mmc)->hwpart = 0;
2640 /* Test for SD version 2 */
2641 err = mmc_send_if_cond(mmc);
2643 /* Now try to get the SD card's operating condition */
2644 err = sd_send_op_cond(mmc, uhs_en);
2645 if (err && uhs_en) {
2647 mmc_power_cycle(mmc);
2651 /* If the command timed out, we check for an MMC card */
2652 if (err == -ETIMEDOUT) {
2653 err = mmc_send_op_cond(mmc);
2656 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2657 pr_err("Card did not respond to voltage select!\n");
2666 int mmc_start_init(struct mmc *mmc)
2672 * all hosts are capable of 1 bit bus-width and able to use the legacy
2675 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2676 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2678 #if !defined(CONFIG_MMC_BROKEN_CD)
2679 /* we pretend there's no card when init is NULL */
2680 no_card = mmc_getcd(mmc) == 0;
2684 #if !CONFIG_IS_ENABLED(DM_MMC)
2685 no_card = no_card || (mmc->cfg->ops->init == NULL);
2689 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2690 pr_err("MMC: no card present\n");
2695 err = mmc_get_op_cond(mmc);
2698 mmc->init_in_progress = 1;
2703 static int mmc_complete_init(struct mmc *mmc)
2707 mmc->init_in_progress = 0;
2708 if (mmc->op_cond_pending)
2709 err = mmc_complete_op_cond(mmc);
2712 err = mmc_startup(mmc);
2720 int mmc_init(struct mmc *mmc)
2723 __maybe_unused ulong start;
2724 #if CONFIG_IS_ENABLED(DM_MMC)
2725 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2732 start = get_timer(0);
2734 if (!mmc->init_in_progress)
2735 err = mmc_start_init(mmc);
2738 err = mmc_complete_init(mmc);
2740 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2745 int mmc_set_dsr(struct mmc *mmc, u16 val)
2751 /* CPU-specific MMC initializations */
2752 __weak int cpu_mmc_init(bd_t *bis)
2757 /* board-specific MMC initializations. */
2758 __weak int board_mmc_init(bd_t *bis)
2763 void mmc_set_preinit(struct mmc *mmc, int preinit)
2765 mmc->preinit = preinit;
2768 #if CONFIG_IS_ENABLED(DM_MMC)
2769 static int mmc_probe(bd_t *bis)
2773 struct udevice *dev;
2775 ret = uclass_get(UCLASS_MMC, &uc);
2780 * Try to add them in sequence order. Really with driver model we
2781 * should allow holes, but the current MMC list does not allow that.
2782 * So if we request 0, 1, 3 we will get 0, 1, 2.
2784 for (i = 0; ; i++) {
2785 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2789 uclass_foreach_dev(dev, uc) {
2790 ret = device_probe(dev);
2792 pr_err("%s - probe failed: %d\n", dev->name, ret);
2798 static int mmc_probe(bd_t *bis)
2800 if (board_mmc_init(bis) < 0)
2807 int mmc_initialize(bd_t *bis)
2809 static int initialized = 0;
2811 if (initialized) /* Avoid initializing mmc multiple times */
2815 #if !CONFIG_IS_ENABLED(BLK)
2816 #if !CONFIG_IS_ENABLED(MMC_TINY)
2820 ret = mmc_probe(bis);
2824 #ifndef CONFIG_SPL_BUILD
2825 print_mmc_devices(',');
2832 #ifdef CONFIG_CMD_BKOPS_ENABLE
2833 int mmc_set_bkops_enable(struct mmc *mmc)
2836 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2838 err = mmc_send_ext_csd(mmc, ext_csd);
2840 puts("Could not get ext_csd register values\n");
2844 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2845 puts("Background operations not supported on device\n");
2846 return -EMEDIUMTYPE;
2849 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2850 puts("Background operations already enabled\n");
2854 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2856 puts("Failed to enable manual background operations\n");
2860 puts("Enabled manual background operations\n");