2 * Copyright 2008, Freescale Semiconductor, Inc
5 * Based vaguely on the Linux code
7 * SPDX-License-Identifier: GPL-2.0+
14 #include <dm/device-internal.h>
18 #include <power/regulator.h>
21 #include <linux/list.h>
23 #include "mmc_private.h"
25 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
26 static int mmc_power_cycle(struct mmc *mmc);
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
29 #if CONFIG_IS_ENABLED(MMC_TINY)
30 static struct mmc mmc_static;
31 struct mmc *find_mmc_device(int dev_num)
36 void mmc_do_preinit(void)
38 struct mmc *m = &mmc_static;
39 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
40 mmc_set_preinit(m, 1);
46 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
48 return &mmc->block_dev;
52 #if !CONFIG_IS_ENABLED(DM_MMC)
54 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
55 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
61 __weak int board_mmc_getwp(struct mmc *mmc)
66 int mmc_getwp(struct mmc *mmc)
70 wp = board_mmc_getwp(mmc);
73 if (mmc->cfg->ops->getwp)
74 wp = mmc->cfg->ops->getwp(mmc);
82 __weak int board_mmc_getcd(struct mmc *mmc)
88 #ifdef CONFIG_MMC_TRACE
89 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
91 printf("CMD_SEND:%d\n", cmd->cmdidx);
92 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
95 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
101 printf("\t\tRET\t\t\t %d\n", ret);
103 switch (cmd->resp_type) {
105 printf("\t\tMMC_RSP_NONE\n");
108 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
112 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
116 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
118 printf("\t\t \t\t 0x%08X \n",
120 printf("\t\t \t\t 0x%08X \n",
122 printf("\t\t \t\t 0x%08X \n",
125 printf("\t\t\t\t\tDUMPING DATA\n");
126 for (i = 0; i < 4; i++) {
128 printf("\t\t\t\t\t%03d - ", i*4);
129 ptr = (u8 *)&cmd->response[i];
131 for (j = 0; j < 4; j++)
132 printf("%02X ", *ptr--);
137 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
141 printf("\t\tERROR MMC rsp not supported\n");
147 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
151 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
152 printf("CURR STATE:%d\n", status);
156 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
157 const char *mmc_mode_name(enum bus_mode mode)
159 static const char *const names[] = {
160 [MMC_LEGACY] = "MMC legacy",
161 [SD_LEGACY] = "SD Legacy",
162 [MMC_HS] = "MMC High Speed (26MHz)",
163 [SD_HS] = "SD High Speed (50MHz)",
164 [UHS_SDR12] = "UHS SDR12 (25MHz)",
165 [UHS_SDR25] = "UHS SDR25 (50MHz)",
166 [UHS_SDR50] = "UHS SDR50 (100MHz)",
167 [UHS_SDR104] = "UHS SDR104 (208MHz)",
168 [UHS_DDR50] = "UHS DDR50 (50MHz)",
169 [MMC_HS_52] = "MMC High Speed (52MHz)",
170 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
171 [MMC_HS_200] = "HS200 (200MHz)",
174 if (mode >= MMC_MODES_END)
175 return "Unknown mode";
181 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
183 static const int freqs[] = {
184 [SD_LEGACY] = 25000000,
187 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
188 [UHS_SDR12] = 25000000,
189 [UHS_SDR25] = 50000000,
190 [UHS_SDR50] = 100000000,
191 [UHS_DDR50] = 50000000,
192 #ifdef MMC_SUPPORTS_TUNING
193 [UHS_SDR104] = 208000000,
196 [MMC_HS_52] = 52000000,
197 [MMC_DDR_52] = 52000000,
198 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
199 [MMC_HS_200] = 200000000,
203 if (mode == MMC_LEGACY)
204 return mmc->legacy_speed;
205 else if (mode >= MMC_MODES_END)
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
213 mmc->selected_mode = mode;
214 mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 mmc->tran_speed / 1000000);
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
226 mmmc_trace_before_send(mmc, cmd);
227 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 mmmc_trace_after_send(mmc, cmd, ret);
234 int mmc_send_status(struct mmc *mmc, int timeout)
237 int err, retries = 5;
239 cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 cmd.resp_type = MMC_RSP_R1;
241 if (!mmc_host_is_spi(mmc))
242 cmd.cmdarg = mmc->rca << 16;
245 err = mmc_send_cmd(mmc, &cmd, NULL);
247 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
252 if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 pr_err("Status Error: 0x%08X\n",
259 } else if (--retries < 0)
268 mmc_trace_state(mmc, &cmd);
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 pr_err("Timeout waiting card ready\n");
279 int mmc_set_blocklen(struct mmc *mmc, int len)
287 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
288 cmd.resp_type = MMC_RSP_R1;
291 err = mmc_send_cmd(mmc, &cmd, NULL);
293 #ifdef CONFIG_MMC_QUIRKS
294 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
297 * It has been seen that SET_BLOCKLEN may fail on the first
298 * attempt, let's try a few more time
301 err = mmc_send_cmd(mmc, &cmd, NULL);
311 #ifdef MMC_SUPPORTS_TUNING
312 static const u8 tuning_blk_pattern_4bit[] = {
313 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
314 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
315 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
316 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
317 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
318 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
319 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
320 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
323 static const u8 tuning_blk_pattern_8bit[] = {
324 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
325 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
326 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
327 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
328 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
329 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
330 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
331 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
332 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
333 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
334 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
335 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
336 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
337 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
338 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
339 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
342 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
345 struct mmc_data data;
346 const u8 *tuning_block_pattern;
349 if (mmc->bus_width == 8) {
350 tuning_block_pattern = tuning_blk_pattern_8bit;
351 size = sizeof(tuning_blk_pattern_8bit);
352 } else if (mmc->bus_width == 4) {
353 tuning_block_pattern = tuning_blk_pattern_4bit;
354 size = sizeof(tuning_blk_pattern_4bit);
359 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
363 cmd.resp_type = MMC_RSP_R1;
365 data.dest = (void *)data_buf;
367 data.blocksize = size;
368 data.flags = MMC_DATA_READ;
370 err = mmc_send_cmd(mmc, &cmd, &data);
374 if (memcmp(data_buf, tuning_block_pattern, size))
381 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
385 struct mmc_data data;
388 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
390 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
392 if (mmc->high_capacity)
395 cmd.cmdarg = start * mmc->read_bl_len;
397 cmd.resp_type = MMC_RSP_R1;
400 data.blocks = blkcnt;
401 data.blocksize = mmc->read_bl_len;
402 data.flags = MMC_DATA_READ;
404 if (mmc_send_cmd(mmc, &cmd, &data))
408 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
410 cmd.resp_type = MMC_RSP_R1b;
411 if (mmc_send_cmd(mmc, &cmd, NULL)) {
412 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
413 pr_err("mmc fail to send stop cmd\n");
422 #if CONFIG_IS_ENABLED(BLK)
423 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
425 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
429 #if CONFIG_IS_ENABLED(BLK)
430 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
432 int dev_num = block_dev->devnum;
434 lbaint_t cur, blocks_todo = blkcnt;
439 struct mmc *mmc = find_mmc_device(dev_num);
443 if (CONFIG_IS_ENABLED(MMC_TINY))
444 err = mmc_switch_part(mmc, block_dev->hwpart);
446 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
451 if ((start + blkcnt) > block_dev->lba) {
452 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
453 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
454 start + blkcnt, block_dev->lba);
459 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
460 pr_debug("%s: Failed to set blocklen\n", __func__);
465 cur = (blocks_todo > mmc->cfg->b_max) ?
466 mmc->cfg->b_max : blocks_todo;
467 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
468 pr_debug("%s: Failed to read blocks\n", __func__);
473 dst += cur * mmc->read_bl_len;
474 } while (blocks_todo > 0);
479 static int mmc_go_idle(struct mmc *mmc)
486 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
488 cmd.resp_type = MMC_RSP_NONE;
490 err = mmc_send_cmd(mmc, &cmd, NULL);
500 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
501 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
507 * Send CMD11 only if the request is to switch the card to
510 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
511 return mmc_set_signal_voltage(mmc, signal_voltage);
513 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
515 cmd.resp_type = MMC_RSP_R1;
517 err = mmc_send_cmd(mmc, &cmd, NULL);
521 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
525 * The card should drive cmd and dat[0:3] low immediately
526 * after the response of cmd11, but wait 100 us to be sure
528 err = mmc_wait_dat0(mmc, 0, 100);
535 * During a signal voltage level switch, the clock must be gated
536 * for 5 ms according to the SD spec
538 mmc_set_clock(mmc, mmc->clock, true);
540 err = mmc_set_signal_voltage(mmc, signal_voltage);
544 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
546 mmc_set_clock(mmc, mmc->clock, false);
549 * Failure to switch is indicated by the card holding
550 * dat[0:3] low. Wait for at least 1 ms according to spec
552 err = mmc_wait_dat0(mmc, 1, 1000);
562 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
569 cmd.cmdidx = MMC_CMD_APP_CMD;
570 cmd.resp_type = MMC_RSP_R1;
573 err = mmc_send_cmd(mmc, &cmd, NULL);
578 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
579 cmd.resp_type = MMC_RSP_R3;
582 * Most cards do not answer if some reserved bits
583 * in the ocr are set. However, Some controller
584 * can set bit 7 (reserved for low voltages), but
585 * how to manage low voltages SD card is not yet
588 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
589 (mmc->cfg->voltages & 0xff8000);
591 if (mmc->version == SD_VERSION_2)
592 cmd.cmdarg |= OCR_HCS;
595 cmd.cmdarg |= OCR_S18R;
597 err = mmc_send_cmd(mmc, &cmd, NULL);
602 if (cmd.response[0] & OCR_BUSY)
611 if (mmc->version != SD_VERSION_2)
612 mmc->version = SD_VERSION_1_0;
614 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
615 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
616 cmd.resp_type = MMC_RSP_R3;
619 err = mmc_send_cmd(mmc, &cmd, NULL);
625 mmc->ocr = cmd.response[0];
627 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
628 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
630 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
636 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
642 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
647 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
648 cmd.resp_type = MMC_RSP_R3;
650 if (use_arg && !mmc_host_is_spi(mmc))
651 cmd.cmdarg = OCR_HCS |
652 (mmc->cfg->voltages &
653 (mmc->ocr & OCR_VOLTAGE_MASK)) |
654 (mmc->ocr & OCR_ACCESS_MODE);
656 err = mmc_send_cmd(mmc, &cmd, NULL);
659 mmc->ocr = cmd.response[0];
663 static int mmc_send_op_cond(struct mmc *mmc)
667 /* Some cards seem to need this */
670 /* Asking to the card its capabilities */
671 for (i = 0; i < 2; i++) {
672 err = mmc_send_op_cond_iter(mmc, i != 0);
676 /* exit if not busy (flag seems to be inverted) */
677 if (mmc->ocr & OCR_BUSY)
680 mmc->op_cond_pending = 1;
684 static int mmc_complete_op_cond(struct mmc *mmc)
691 mmc->op_cond_pending = 0;
692 if (!(mmc->ocr & OCR_BUSY)) {
693 /* Some cards seem to need this */
696 start = get_timer(0);
698 err = mmc_send_op_cond_iter(mmc, 1);
701 if (mmc->ocr & OCR_BUSY)
703 if (get_timer(start) > timeout)
709 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
710 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
711 cmd.resp_type = MMC_RSP_R3;
714 err = mmc_send_cmd(mmc, &cmd, NULL);
719 mmc->ocr = cmd.response[0];
722 mmc->version = MMC_VERSION_UNKNOWN;
724 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
731 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
734 struct mmc_data data;
737 /* Get the Card Status Register */
738 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
739 cmd.resp_type = MMC_RSP_R1;
742 data.dest = (char *)ext_csd;
744 data.blocksize = MMC_MAX_BLOCK_LEN;
745 data.flags = MMC_DATA_READ;
747 err = mmc_send_cmd(mmc, &cmd, &data);
752 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
759 cmd.cmdidx = MMC_CMD_SWITCH;
760 cmd.resp_type = MMC_RSP_R1b;
761 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
765 while (retries > 0) {
766 ret = mmc_send_cmd(mmc, &cmd, NULL);
768 /* Waiting for the ready status */
770 ret = mmc_send_status(mmc, timeout);
781 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
786 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
792 speed_bits = EXT_CSD_TIMING_HS;
794 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
796 speed_bits = EXT_CSD_TIMING_HS200;
800 speed_bits = EXT_CSD_TIMING_LEGACY;
805 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
810 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
811 /* Now check to see that it worked */
812 err = mmc_send_ext_csd(mmc, test_csd);
816 /* No high-speed support */
817 if (!test_csd[EXT_CSD_HS_TIMING])
824 static int mmc_get_capabilities(struct mmc *mmc)
826 u8 *ext_csd = mmc->ext_csd;
829 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
831 if (mmc_host_is_spi(mmc))
834 /* Only version 4 supports high-speed */
835 if (mmc->version < MMC_VERSION_4)
839 pr_err("No ext_csd found!\n"); /* this should enver happen */
843 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
845 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
846 mmc->cardtype = cardtype;
848 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
849 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
850 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
851 mmc->card_caps |= MMC_MODE_HS200;
854 if (cardtype & EXT_CSD_CARD_TYPE_52) {
855 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
856 mmc->card_caps |= MMC_MODE_DDR_52MHz;
857 mmc->card_caps |= MMC_MODE_HS_52MHz;
859 if (cardtype & EXT_CSD_CARD_TYPE_26)
860 mmc->card_caps |= MMC_MODE_HS;
865 static int mmc_set_capacity(struct mmc *mmc, int part_num)
869 mmc->capacity = mmc->capacity_user;
873 mmc->capacity = mmc->capacity_boot;
876 mmc->capacity = mmc->capacity_rpmb;
882 mmc->capacity = mmc->capacity_gp[part_num - 4];
888 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
893 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
894 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
899 if (part_num & PART_ACCESS_MASK)
900 forbidden = MMC_CAP(MMC_HS_200);
902 if (MMC_CAP(mmc->selected_mode) & forbidden) {
903 pr_debug("selected mode (%s) is forbidden for part %d\n",
904 mmc_mode_name(mmc->selected_mode), part_num);
906 } else if (mmc->selected_mode != mmc->best_mode) {
907 pr_debug("selected mode is not optimal\n");
912 return mmc_select_mode_and_width(mmc,
913 mmc->card_caps & ~forbidden);
918 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
919 unsigned int part_num)
925 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
929 ret = mmc_boot_part_access_chk(mmc, part_num);
933 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
934 (mmc->part_config & ~PART_ACCESS_MASK)
935 | (part_num & PART_ACCESS_MASK));
938 * Set the capacity if the switch succeeded or was intended
939 * to return to representing the raw device.
941 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
942 ret = mmc_set_capacity(mmc, part_num);
943 mmc_get_blk_desc(mmc)->hwpart = part_num;
949 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
950 int mmc_hwpart_config(struct mmc *mmc,
951 const struct mmc_hwpart_conf *conf,
952 enum mmc_hwpart_conf_mode mode)
958 u32 max_enh_size_mult;
959 u32 tot_enh_size_mult = 0;
962 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
964 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
967 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
968 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
972 if (!(mmc->part_support & PART_SUPPORT)) {
973 pr_err("Card does not support partitioning\n");
977 if (!mmc->hc_wp_grp_size) {
978 pr_err("Card does not define HC WP group size\n");
982 /* check partition alignment and total enhanced size */
983 if (conf->user.enh_size) {
984 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
985 conf->user.enh_start % mmc->hc_wp_grp_size) {
986 pr_err("User data enhanced area not HC WP group "
990 part_attrs |= EXT_CSD_ENH_USR;
991 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
992 if (mmc->high_capacity) {
993 enh_start_addr = conf->user.enh_start;
995 enh_start_addr = (conf->user.enh_start << 9);
1001 tot_enh_size_mult += enh_size_mult;
1003 for (pidx = 0; pidx < 4; pidx++) {
1004 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1005 pr_err("GP%i partition not HC WP group size "
1006 "aligned\n", pidx+1);
1009 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1010 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1011 part_attrs |= EXT_CSD_ENH_GP(pidx);
1012 tot_enh_size_mult += gp_size_mult[pidx];
1016 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1017 pr_err("Card does not support enhanced attribute\n");
1018 return -EMEDIUMTYPE;
1021 err = mmc_send_ext_csd(mmc, ext_csd);
1026 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1027 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1028 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1029 if (tot_enh_size_mult > max_enh_size_mult) {
1030 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1031 tot_enh_size_mult, max_enh_size_mult);
1032 return -EMEDIUMTYPE;
1035 /* The default value of EXT_CSD_WR_REL_SET is device
1036 * dependent, the values can only be changed if the
1037 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1038 * changed only once and before partitioning is completed. */
1039 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1040 if (conf->user.wr_rel_change) {
1041 if (conf->user.wr_rel_set)
1042 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1044 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1046 for (pidx = 0; pidx < 4; pidx++) {
1047 if (conf->gp_part[pidx].wr_rel_change) {
1048 if (conf->gp_part[pidx].wr_rel_set)
1049 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1051 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1055 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1056 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1057 puts("Card does not support host controlled partition write "
1058 "reliability settings\n");
1059 return -EMEDIUMTYPE;
1062 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1063 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1064 pr_err("Card already partitioned\n");
1068 if (mode == MMC_HWPART_CONF_CHECK)
1071 /* Partitioning requires high-capacity size definitions */
1072 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1073 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1074 EXT_CSD_ERASE_GROUP_DEF, 1);
1079 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1081 /* update erase group size to be high-capacity */
1082 mmc->erase_grp_size =
1083 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1087 /* all OK, write the configuration */
1088 for (i = 0; i < 4; i++) {
1089 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1090 EXT_CSD_ENH_START_ADDR+i,
1091 (enh_start_addr >> (i*8)) & 0xFF);
1095 for (i = 0; i < 3; i++) {
1096 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1097 EXT_CSD_ENH_SIZE_MULT+i,
1098 (enh_size_mult >> (i*8)) & 0xFF);
1102 for (pidx = 0; pidx < 4; pidx++) {
1103 for (i = 0; i < 3; i++) {
1104 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1105 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1106 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1111 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1112 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1116 if (mode == MMC_HWPART_CONF_SET)
1119 /* The WR_REL_SET is a write-once register but shall be
1120 * written before setting PART_SETTING_COMPLETED. As it is
1121 * write-once we can only write it when completing the
1123 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1124 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1125 EXT_CSD_WR_REL_SET, wr_rel_set);
1130 /* Setting PART_SETTING_COMPLETED confirms the partition
1131 * configuration but it only becomes effective after power
1132 * cycle, so we do not adjust the partition related settings
1133 * in the mmc struct. */
1135 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1136 EXT_CSD_PARTITION_SETTING,
1137 EXT_CSD_PARTITION_SETTING_COMPLETED);
1145 #if !CONFIG_IS_ENABLED(DM_MMC)
1146 int mmc_getcd(struct mmc *mmc)
1150 cd = board_mmc_getcd(mmc);
1153 if (mmc->cfg->ops->getcd)
1154 cd = mmc->cfg->ops->getcd(mmc);
1163 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1166 struct mmc_data data;
1168 /* Switch the frequency */
1169 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1170 cmd.resp_type = MMC_RSP_R1;
1171 cmd.cmdarg = (mode << 31) | 0xffffff;
1172 cmd.cmdarg &= ~(0xf << (group * 4));
1173 cmd.cmdarg |= value << (group * 4);
1175 data.dest = (char *)resp;
1176 data.blocksize = 64;
1178 data.flags = MMC_DATA_READ;
1180 return mmc_send_cmd(mmc, &cmd, &data);
1184 static int sd_get_capabilities(struct mmc *mmc)
1188 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1189 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1190 struct mmc_data data;
1192 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1196 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1198 if (mmc_host_is_spi(mmc))
1201 /* Read the SCR to find out if this card supports higher speeds */
1202 cmd.cmdidx = MMC_CMD_APP_CMD;
1203 cmd.resp_type = MMC_RSP_R1;
1204 cmd.cmdarg = mmc->rca << 16;
1206 err = mmc_send_cmd(mmc, &cmd, NULL);
1211 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1212 cmd.resp_type = MMC_RSP_R1;
1218 data.dest = (char *)scr;
1221 data.flags = MMC_DATA_READ;
1223 err = mmc_send_cmd(mmc, &cmd, &data);
1232 mmc->scr[0] = __be32_to_cpu(scr[0]);
1233 mmc->scr[1] = __be32_to_cpu(scr[1]);
1235 switch ((mmc->scr[0] >> 24) & 0xf) {
1237 mmc->version = SD_VERSION_1_0;
1240 mmc->version = SD_VERSION_1_10;
1243 mmc->version = SD_VERSION_2;
1244 if ((mmc->scr[0] >> 15) & 0x1)
1245 mmc->version = SD_VERSION_3;
1248 mmc->version = SD_VERSION_1_0;
1252 if (mmc->scr[0] & SD_DATA_4BIT)
1253 mmc->card_caps |= MMC_MODE_4BIT;
1255 /* Version 1.0 doesn't support switching */
1256 if (mmc->version == SD_VERSION_1_0)
1261 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1262 (u8 *)switch_status);
1267 /* The high-speed function is busy. Try again */
1268 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1272 /* If high-speed isn't supported, we return */
1273 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1274 mmc->card_caps |= MMC_CAP(SD_HS);
1276 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1277 /* Version before 3.0 don't support UHS modes */
1278 if (mmc->version < SD_VERSION_3)
1281 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1282 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1283 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1284 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1285 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1286 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1287 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1288 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1289 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1290 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1291 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1297 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1301 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1306 speed = UHS_SDR12_BUS_SPEED;
1309 speed = HIGH_SPEED_BUS_SPEED;
1311 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1313 speed = UHS_SDR12_BUS_SPEED;
1316 speed = UHS_SDR25_BUS_SPEED;
1319 speed = UHS_SDR50_BUS_SPEED;
1322 speed = UHS_DDR50_BUS_SPEED;
1325 speed = UHS_SDR104_BUS_SPEED;
1332 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1336 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1342 int sd_select_bus_width(struct mmc *mmc, int w)
1347 if ((w != 4) && (w != 1))
1350 cmd.cmdidx = MMC_CMD_APP_CMD;
1351 cmd.resp_type = MMC_RSP_R1;
1352 cmd.cmdarg = mmc->rca << 16;
1354 err = mmc_send_cmd(mmc, &cmd, NULL);
1358 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1359 cmd.resp_type = MMC_RSP_R1;
1364 err = mmc_send_cmd(mmc, &cmd, NULL);
1371 #if CONFIG_IS_ENABLED(MMC_WRITE)
1372 static int sd_read_ssr(struct mmc *mmc)
1374 static const unsigned int sd_au_size[] = {
1375 0, SZ_16K / 512, SZ_32K / 512,
1376 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1377 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1378 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1379 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1384 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1385 struct mmc_data data;
1387 unsigned int au, eo, et, es;
1389 cmd.cmdidx = MMC_CMD_APP_CMD;
1390 cmd.resp_type = MMC_RSP_R1;
1391 cmd.cmdarg = mmc->rca << 16;
1393 err = mmc_send_cmd(mmc, &cmd, NULL);
1397 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1398 cmd.resp_type = MMC_RSP_R1;
1402 data.dest = (char *)ssr;
1403 data.blocksize = 64;
1405 data.flags = MMC_DATA_READ;
1407 err = mmc_send_cmd(mmc, &cmd, &data);
1415 for (i = 0; i < 16; i++)
1416 ssr[i] = be32_to_cpu(ssr[i]);
1418 au = (ssr[2] >> 12) & 0xF;
1419 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1420 mmc->ssr.au = sd_au_size[au];
1421 es = (ssr[3] >> 24) & 0xFF;
1422 es |= (ssr[2] & 0xFF) << 8;
1423 et = (ssr[3] >> 18) & 0x3F;
1425 eo = (ssr[3] >> 16) & 0x3;
1426 mmc->ssr.erase_timeout = (et * 1000) / es;
1427 mmc->ssr.erase_offset = eo * 1000;
1430 pr_debug("Invalid Allocation Unit Size.\n");
1436 /* frequency bases */
1437 /* divided by 10 to be nice to platforms without floating point */
1438 static const int fbase[] = {
1445 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1446 * to platforms without floating point.
1448 static const u8 multipliers[] = {
1467 static inline int bus_width(uint cap)
1469 if (cap == MMC_MODE_8BIT)
1471 if (cap == MMC_MODE_4BIT)
1473 if (cap == MMC_MODE_1BIT)
1475 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1479 #if !CONFIG_IS_ENABLED(DM_MMC)
1480 #ifdef MMC_SUPPORTS_TUNING
1481 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1487 static void mmc_send_init_stream(struct mmc *mmc)
1491 static int mmc_set_ios(struct mmc *mmc)
1495 if (mmc->cfg->ops->set_ios)
1496 ret = mmc->cfg->ops->set_ios(mmc);
1502 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1505 if (clock > mmc->cfg->f_max)
1506 clock = mmc->cfg->f_max;
1508 if (clock < mmc->cfg->f_min)
1509 clock = mmc->cfg->f_min;
1513 mmc->clk_disable = disable;
1515 return mmc_set_ios(mmc);
1518 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1520 mmc->bus_width = width;
1522 return mmc_set_ios(mmc);
1525 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1527 * helper function to display the capabilities in a human
1528 * friendly manner. The capabilities include bus width and
1531 void mmc_dump_capabilities(const char *text, uint caps)
1535 pr_debug("%s: widths [", text);
1536 if (caps & MMC_MODE_8BIT)
1538 if (caps & MMC_MODE_4BIT)
1540 if (caps & MMC_MODE_1BIT)
1542 pr_debug("\b\b] modes [");
1543 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1544 if (MMC_CAP(mode) & caps)
1545 pr_debug("%s, ", mmc_mode_name(mode));
1546 pr_debug("\b\b]\n");
1550 struct mode_width_tuning {
1553 #ifdef MMC_SUPPORTS_TUNING
1558 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1559 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1562 case MMC_SIGNAL_VOLTAGE_000: return 0;
1563 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1564 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1565 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1570 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1574 if (mmc->signal_voltage == signal_voltage)
1577 mmc->signal_voltage = signal_voltage;
1578 err = mmc_set_ios(mmc);
1580 pr_debug("unable to set voltage (err %d)\n", err);
1585 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1591 static const struct mode_width_tuning sd_modes_by_pref[] = {
1592 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1593 #ifdef MMC_SUPPORTS_TUNING
1596 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1597 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1602 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1606 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1610 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1615 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1617 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1620 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1625 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1629 #define for_each_sd_mode_by_pref(caps, mwt) \
1630 for (mwt = sd_modes_by_pref;\
1631 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1633 if (caps & MMC_CAP(mwt->mode))
1635 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1638 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1639 const struct mode_width_tuning *mwt;
1640 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1641 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1643 bool uhs_en = false;
1648 mmc_dump_capabilities("sd card", card_caps);
1649 mmc_dump_capabilities("host", mmc->host_caps);
1652 /* Restrict card's capabilities by what the host can do */
1653 caps = card_caps & mmc->host_caps;
1658 for_each_sd_mode_by_pref(caps, mwt) {
1661 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1662 if (*w & caps & mwt->widths) {
1663 pr_debug("trying mode %s width %d (at %d MHz)\n",
1664 mmc_mode_name(mwt->mode),
1666 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1668 /* configure the bus width (card + host) */
1669 err = sd_select_bus_width(mmc, bus_width(*w));
1672 mmc_set_bus_width(mmc, bus_width(*w));
1674 /* configure the bus mode (card) */
1675 err = sd_set_card_speed(mmc, mwt->mode);
1679 /* configure the bus mode (host) */
1680 mmc_select_mode(mmc, mwt->mode);
1681 mmc_set_clock(mmc, mmc->tran_speed, false);
1683 #ifdef MMC_SUPPORTS_TUNING
1684 /* execute tuning if needed */
1685 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1686 err = mmc_execute_tuning(mmc,
1689 pr_debug("tuning failed\n");
1695 #if CONFIG_IS_ENABLED(MMC_WRITE)
1696 err = sd_read_ssr(mmc);
1698 pr_warn("unable to read ssr\n");
1704 /* revert to a safer bus speed */
1705 mmc_select_mode(mmc, SD_LEGACY);
1706 mmc_set_clock(mmc, mmc->tran_speed, false);
1711 pr_err("unable to select a mode\n");
1716 * read the compare the part of ext csd that is constant.
1717 * This can be used to check that the transfer is working
1720 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1723 const u8 *ext_csd = mmc->ext_csd;
1724 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1726 if (mmc->version < MMC_VERSION_4)
1729 err = mmc_send_ext_csd(mmc, test_csd);
1733 /* Only compare read only fields */
1734 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1735 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1736 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1737 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1738 ext_csd[EXT_CSD_REV]
1739 == test_csd[EXT_CSD_REV] &&
1740 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1741 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1742 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1743 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1749 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1750 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1751 uint32_t allowed_mask)
1757 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1758 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1759 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1760 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1763 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1764 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1765 MMC_SIGNAL_VOLTAGE_180;
1766 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1767 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1770 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1774 while (card_mask & allowed_mask) {
1775 enum mmc_voltage best_match;
1777 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1778 if (!mmc_set_signal_voltage(mmc, best_match))
1781 allowed_mask &= ~best_match;
1787 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1788 uint32_t allowed_mask)
1794 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1795 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1798 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1799 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1804 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1808 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1812 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1816 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1820 #define for_each_mmc_mode_by_pref(caps, mwt) \
1821 for (mwt = mmc_modes_by_pref;\
1822 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1824 if (caps & MMC_CAP(mwt->mode))
1826 static const struct ext_csd_bus_width {
1830 } ext_csd_bus_width[] = {
1831 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1832 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1833 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1834 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1835 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1838 #define for_each_supported_width(caps, ddr, ecbv) \
1839 for (ecbv = ext_csd_bus_width;\
1840 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1842 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1844 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1847 const struct mode_width_tuning *mwt;
1848 const struct ext_csd_bus_width *ecbw;
1851 mmc_dump_capabilities("mmc", card_caps);
1852 mmc_dump_capabilities("host", mmc->host_caps);
1855 /* Restrict card's capabilities by what the host can do */
1856 card_caps &= mmc->host_caps;
1858 /* Only version 4 of MMC supports wider bus widths */
1859 if (mmc->version < MMC_VERSION_4)
1862 if (!mmc->ext_csd) {
1863 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1867 mmc_set_clock(mmc, mmc->legacy_speed, false);
1869 for_each_mmc_mode_by_pref(card_caps, mwt) {
1870 for_each_supported_width(card_caps & mwt->widths,
1871 mmc_is_mode_ddr(mwt->mode), ecbw) {
1872 enum mmc_voltage old_voltage;
1873 pr_debug("trying mode %s width %d (at %d MHz)\n",
1874 mmc_mode_name(mwt->mode),
1875 bus_width(ecbw->cap),
1876 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1877 old_voltage = mmc->signal_voltage;
1878 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1879 MMC_ALL_SIGNAL_VOLTAGE);
1883 /* configure the bus width (card + host) */
1884 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1886 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1889 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1891 /* configure the bus speed (card) */
1892 err = mmc_set_card_speed(mmc, mwt->mode);
1897 * configure the bus width AND the ddr mode (card)
1898 * The host side will be taken care of in the next step
1900 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1901 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1903 ecbw->ext_csd_bits);
1908 /* configure the bus mode (host) */
1909 mmc_select_mode(mmc, mwt->mode);
1910 mmc_set_clock(mmc, mmc->tran_speed, false);
1911 #ifdef MMC_SUPPORTS_TUNING
1913 /* execute tuning if needed */
1915 err = mmc_execute_tuning(mmc, mwt->tuning);
1917 pr_debug("tuning failed\n");
1923 /* do a transfer to check the configuration */
1924 err = mmc_read_and_compare_ext_csd(mmc);
1928 mmc_set_signal_voltage(mmc, old_voltage);
1929 /* if an error occured, revert to a safer bus mode */
1930 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1931 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1932 mmc_select_mode(mmc, MMC_LEGACY);
1933 mmc_set_bus_width(mmc, 1);
1937 pr_err("unable to select a mode\n");
1942 static int mmc_startup_v4(struct mmc *mmc)
1946 bool has_parts = false;
1947 bool part_completed;
1948 static const u32 mmc_versions[] = {
1960 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1962 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1965 /* check ext_csd version and capacity */
1966 err = mmc_send_ext_csd(mmc, ext_csd);
1970 /* store the ext csd for future reference */
1972 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1975 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1977 if (ext_csd[EXT_CSD_REV] > ARRAY_SIZE(mmc_versions))
1980 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1982 if (mmc->version >= MMC_VERSION_4_2) {
1984 * According to the JEDEC Standard, the value of
1985 * ext_csd's capacity is valid if the value is more
1988 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1989 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1990 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1991 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1992 capacity *= MMC_MAX_BLOCK_LEN;
1993 if ((capacity >> 20) > 2 * 1024)
1994 mmc->capacity_user = capacity;
1997 /* The partition data may be non-zero but it is only
1998 * effective if PARTITION_SETTING_COMPLETED is set in
1999 * EXT_CSD, so ignore any data if this bit is not set,
2000 * except for enabling the high-capacity group size
2001 * definition (see below).
2003 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2004 EXT_CSD_PARTITION_SETTING_COMPLETED);
2006 /* store the partition info of emmc */
2007 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2008 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2009 ext_csd[EXT_CSD_BOOT_MULT])
2010 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2011 if (part_completed &&
2012 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2013 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2015 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2017 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2019 for (i = 0; i < 4; i++) {
2020 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2021 uint mult = (ext_csd[idx + 2] << 16) +
2022 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2025 if (!part_completed)
2027 mmc->capacity_gp[i] = mult;
2028 mmc->capacity_gp[i] *=
2029 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2030 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2031 mmc->capacity_gp[i] <<= 19;
2034 #ifndef CONFIG_SPL_BUILD
2035 if (part_completed) {
2036 mmc->enh_user_size =
2037 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2038 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2039 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2040 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2041 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2042 mmc->enh_user_size <<= 19;
2043 mmc->enh_user_start =
2044 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2045 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2046 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2047 ext_csd[EXT_CSD_ENH_START_ADDR];
2048 if (mmc->high_capacity)
2049 mmc->enh_user_start <<= 9;
2054 * Host needs to enable ERASE_GRP_DEF bit if device is
2055 * partitioned. This bit will be lost every time after a reset
2056 * or power off. This will affect erase size.
2060 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2061 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2064 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2065 EXT_CSD_ERASE_GROUP_DEF, 1);
2070 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2073 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2074 #if CONFIG_IS_ENABLED(MMC_WRITE)
2075 /* Read out group size from ext_csd */
2076 mmc->erase_grp_size =
2077 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2080 * if high capacity and partition setting completed
2081 * SEC_COUNT is valid even if it is smaller than 2 GiB
2082 * JEDEC Standard JESD84-B45, 6.2.4
2084 if (mmc->high_capacity && part_completed) {
2085 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2086 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2087 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2088 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2089 capacity *= MMC_MAX_BLOCK_LEN;
2090 mmc->capacity_user = capacity;
2093 #if CONFIG_IS_ENABLED(MMC_WRITE)
2095 /* Calculate the group size from the csd value. */
2096 int erase_gsz, erase_gmul;
2098 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2099 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2100 mmc->erase_grp_size = (erase_gsz + 1)
2104 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2105 mmc->hc_wp_grp_size = 1024
2106 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2107 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2110 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2116 mmc->ext_csd = NULL;
2121 static int mmc_startup(struct mmc *mmc)
2127 struct blk_desc *bdesc;
2129 #ifdef CONFIG_MMC_SPI_CRC_ON
2130 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2131 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2132 cmd.resp_type = MMC_RSP_R1;
2134 err = mmc_send_cmd(mmc, &cmd, NULL);
2140 /* Put the Card in Identify Mode */
2141 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2142 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2143 cmd.resp_type = MMC_RSP_R2;
2146 err = mmc_send_cmd(mmc, &cmd, NULL);
2148 #ifdef CONFIG_MMC_QUIRKS
2149 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2152 * It has been seen that SEND_CID may fail on the first
2153 * attempt, let's try a few more time
2156 err = mmc_send_cmd(mmc, &cmd, NULL);
2159 } while (retries--);
2166 memcpy(mmc->cid, cmd.response, 16);
2169 * For MMC cards, set the Relative Address.
2170 * For SD cards, get the Relatvie Address.
2171 * This also puts the cards into Standby State
2173 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2174 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2175 cmd.cmdarg = mmc->rca << 16;
2176 cmd.resp_type = MMC_RSP_R6;
2178 err = mmc_send_cmd(mmc, &cmd, NULL);
2184 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2187 /* Get the Card-Specific Data */
2188 cmd.cmdidx = MMC_CMD_SEND_CSD;
2189 cmd.resp_type = MMC_RSP_R2;
2190 cmd.cmdarg = mmc->rca << 16;
2192 err = mmc_send_cmd(mmc, &cmd, NULL);
2197 mmc->csd[0] = cmd.response[0];
2198 mmc->csd[1] = cmd.response[1];
2199 mmc->csd[2] = cmd.response[2];
2200 mmc->csd[3] = cmd.response[3];
2202 if (mmc->version == MMC_VERSION_UNKNOWN) {
2203 int version = (cmd.response[0] >> 26) & 0xf;
2207 mmc->version = MMC_VERSION_1_2;
2210 mmc->version = MMC_VERSION_1_4;
2213 mmc->version = MMC_VERSION_2_2;
2216 mmc->version = MMC_VERSION_3;
2219 mmc->version = MMC_VERSION_4;
2222 mmc->version = MMC_VERSION_1_2;
2227 /* divide frequency by 10, since the mults are 10x bigger */
2228 freq = fbase[(cmd.response[0] & 0x7)];
2229 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2231 mmc->legacy_speed = freq * mult;
2232 mmc_select_mode(mmc, MMC_LEGACY);
2234 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2235 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2236 #if CONFIG_IS_ENABLED(MMC_WRITE)
2239 mmc->write_bl_len = mmc->read_bl_len;
2241 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2244 if (mmc->high_capacity) {
2245 csize = (mmc->csd[1] & 0x3f) << 16
2246 | (mmc->csd[2] & 0xffff0000) >> 16;
2249 csize = (mmc->csd[1] & 0x3ff) << 2
2250 | (mmc->csd[2] & 0xc0000000) >> 30;
2251 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2254 mmc->capacity_user = (csize + 1) << (cmult + 2);
2255 mmc->capacity_user *= mmc->read_bl_len;
2256 mmc->capacity_boot = 0;
2257 mmc->capacity_rpmb = 0;
2258 for (i = 0; i < 4; i++)
2259 mmc->capacity_gp[i] = 0;
2261 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2262 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2264 #if CONFIG_IS_ENABLED(MMC_WRITE)
2265 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2266 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2269 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2270 cmd.cmdidx = MMC_CMD_SET_DSR;
2271 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2272 cmd.resp_type = MMC_RSP_NONE;
2273 if (mmc_send_cmd(mmc, &cmd, NULL))
2274 pr_warn("MMC: SET_DSR failed\n");
2277 /* Select the card, and put it into Transfer Mode */
2278 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2279 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2280 cmd.resp_type = MMC_RSP_R1;
2281 cmd.cmdarg = mmc->rca << 16;
2282 err = mmc_send_cmd(mmc, &cmd, NULL);
2289 * For SD, its erase group is always one sector
2291 #if CONFIG_IS_ENABLED(MMC_WRITE)
2292 mmc->erase_grp_size = 1;
2294 mmc->part_config = MMCPART_NOAVAILABLE;
2296 err = mmc_startup_v4(mmc);
2300 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2305 err = sd_get_capabilities(mmc);
2308 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2310 err = mmc_get_capabilities(mmc);
2313 mmc_select_mode_and_width(mmc, mmc->card_caps);
2319 mmc->best_mode = mmc->selected_mode;
2321 /* Fix the block length for DDR mode */
2322 if (mmc->ddr_mode) {
2323 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2324 #if CONFIG_IS_ENABLED(MMC_WRITE)
2325 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2329 /* fill in device description */
2330 bdesc = mmc_get_blk_desc(mmc);
2334 bdesc->blksz = mmc->read_bl_len;
2335 bdesc->log2blksz = LOG2(bdesc->blksz);
2336 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2337 #if !defined(CONFIG_SPL_BUILD) || \
2338 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2339 !defined(CONFIG_USE_TINY_PRINTF))
2340 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2341 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2342 (mmc->cid[3] >> 16) & 0xffff);
2343 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2344 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2345 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2346 (mmc->cid[2] >> 24) & 0xff);
2347 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2348 (mmc->cid[2] >> 16) & 0xf);
2350 bdesc->vendor[0] = 0;
2351 bdesc->product[0] = 0;
2352 bdesc->revision[0] = 0;
2354 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2361 static int mmc_send_if_cond(struct mmc *mmc)
2366 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2367 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2368 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2369 cmd.resp_type = MMC_RSP_R7;
2371 err = mmc_send_cmd(mmc, &cmd, NULL);
2376 if ((cmd.response[0] & 0xff) != 0xaa)
2379 mmc->version = SD_VERSION_2;
2384 #if !CONFIG_IS_ENABLED(DM_MMC)
2385 /* board-specific MMC power initializations. */
2386 __weak void board_mmc_power_init(void)
2391 static int mmc_power_init(struct mmc *mmc)
2393 #if CONFIG_IS_ENABLED(DM_MMC)
2394 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2397 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2400 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2402 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2403 &mmc->vqmmc_supply);
2405 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2407 #else /* !CONFIG_DM_MMC */
2409 * Driver model should use a regulator, as above, rather than calling
2410 * out to board code.
2412 board_mmc_power_init();
2418 * put the host in the initial state:
2419 * - turn on Vdd (card power supply)
2420 * - configure the bus width and clock to minimal values
2422 static void mmc_set_initial_state(struct mmc *mmc)
2426 /* First try to set 3.3V. If it fails set to 1.8V */
2427 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2429 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2431 pr_warn("mmc: failed to set signal voltage\n");
2433 mmc_select_mode(mmc, MMC_LEGACY);
2434 mmc_set_bus_width(mmc, 1);
2435 mmc_set_clock(mmc, 0, false);
2438 static int mmc_power_on(struct mmc *mmc)
2440 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2441 if (mmc->vmmc_supply) {
2442 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2445 puts("Error enabling VMMC supply\n");
2453 static int mmc_power_off(struct mmc *mmc)
2455 mmc_set_clock(mmc, 0, true);
2456 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2457 if (mmc->vmmc_supply) {
2458 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2461 pr_debug("Error disabling VMMC supply\n");
2469 static int mmc_power_cycle(struct mmc *mmc)
2473 ret = mmc_power_off(mmc);
2477 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2478 * to be on the safer side.
2481 return mmc_power_on(mmc);
2484 int mmc_start_init(struct mmc *mmc)
2487 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2491 * all hosts are capable of 1 bit bus-width and able to use the legacy
2494 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2495 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2497 #if !defined(CONFIG_MMC_BROKEN_CD)
2498 /* we pretend there's no card when init is NULL */
2499 no_card = mmc_getcd(mmc) == 0;
2503 #if !CONFIG_IS_ENABLED(DM_MMC)
2504 no_card = no_card || (mmc->cfg->ops->init == NULL);
2508 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2509 pr_err("MMC: no card present\n");
2517 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2518 mmc_adapter_card_type_ident();
2520 err = mmc_power_init(mmc);
2524 #ifdef CONFIG_MMC_QUIRKS
2525 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2526 MMC_QUIRK_RETRY_SEND_CID;
2529 err = mmc_power_cycle(mmc);
2532 * if power cycling is not supported, we should not try
2533 * to use the UHS modes, because we wouldn't be able to
2534 * recover from an error during the UHS initialization.
2536 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2538 mmc->host_caps &= ~UHS_CAPS;
2539 err = mmc_power_on(mmc);
2544 #if CONFIG_IS_ENABLED(DM_MMC)
2545 /* The device has already been probed ready for use */
2547 /* made sure it's not NULL earlier */
2548 err = mmc->cfg->ops->init(mmc);
2555 mmc_set_initial_state(mmc);
2556 mmc_send_init_stream(mmc);
2558 /* Reset the Card */
2559 err = mmc_go_idle(mmc);
2564 /* The internal partition reset to user partition(0) at every CMD0*/
2565 mmc_get_blk_desc(mmc)->hwpart = 0;
2567 /* Test for SD version 2 */
2568 err = mmc_send_if_cond(mmc);
2570 /* Now try to get the SD card's operating condition */
2571 err = sd_send_op_cond(mmc, uhs_en);
2572 if (err && uhs_en) {
2574 mmc_power_cycle(mmc);
2578 /* If the command timed out, we check for an MMC card */
2579 if (err == -ETIMEDOUT) {
2580 err = mmc_send_op_cond(mmc);
2583 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2584 pr_err("Card did not respond to voltage select!\n");
2591 mmc->init_in_progress = 1;
2596 static int mmc_complete_init(struct mmc *mmc)
2600 mmc->init_in_progress = 0;
2601 if (mmc->op_cond_pending)
2602 err = mmc_complete_op_cond(mmc);
2605 err = mmc_startup(mmc);
2613 int mmc_init(struct mmc *mmc)
2616 __maybe_unused unsigned start;
2617 #if CONFIG_IS_ENABLED(DM_MMC)
2618 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2625 start = get_timer(0);
2627 if (!mmc->init_in_progress)
2628 err = mmc_start_init(mmc);
2631 err = mmc_complete_init(mmc);
2633 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2638 int mmc_set_dsr(struct mmc *mmc, u16 val)
2644 /* CPU-specific MMC initializations */
2645 __weak int cpu_mmc_init(bd_t *bis)
2650 /* board-specific MMC initializations. */
2651 __weak int board_mmc_init(bd_t *bis)
2656 void mmc_set_preinit(struct mmc *mmc, int preinit)
2658 mmc->preinit = preinit;
2661 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2662 static int mmc_probe(bd_t *bis)
2666 #elif CONFIG_IS_ENABLED(DM_MMC)
2667 static int mmc_probe(bd_t *bis)
2671 struct udevice *dev;
2673 ret = uclass_get(UCLASS_MMC, &uc);
2678 * Try to add them in sequence order. Really with driver model we
2679 * should allow holes, but the current MMC list does not allow that.
2680 * So if we request 0, 1, 3 we will get 0, 1, 2.
2682 for (i = 0; ; i++) {
2683 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2687 uclass_foreach_dev(dev, uc) {
2688 ret = device_probe(dev);
2690 pr_err("%s - probe failed: %d\n", dev->name, ret);
2696 static int mmc_probe(bd_t *bis)
2698 if (board_mmc_init(bis) < 0)
2705 int mmc_initialize(bd_t *bis)
2707 static int initialized = 0;
2709 if (initialized) /* Avoid initializing mmc multiple times */
2713 #if !CONFIG_IS_ENABLED(BLK)
2714 #if !CONFIG_IS_ENABLED(MMC_TINY)
2718 ret = mmc_probe(bis);
2722 #ifndef CONFIG_SPL_BUILD
2723 print_mmc_devices(',');
2730 #ifdef CONFIG_CMD_BKOPS_ENABLE
2731 int mmc_set_bkops_enable(struct mmc *mmc)
2734 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2736 err = mmc_send_ext_csd(mmc, ext_csd);
2738 puts("Could not get ext_csd register values\n");
2742 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2743 puts("Background operations not supported on device\n");
2744 return -EMEDIUMTYPE;
2747 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2748 puts("Background operations already enabled\n");
2752 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2754 puts("Failed to enable manual background operations\n");
2758 puts("Enabled manual background operations\n");