1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
7 * Based vaguely on the Linux code
16 #include <dm/device-internal.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
25 #include <linux/list.h>
27 #include "mmc_private.h"
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33 #if !CONFIG_IS_ENABLED(DM_MMC)
35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
40 __weak int board_mmc_getwp(struct mmc *mmc)
45 int mmc_getwp(struct mmc *mmc)
49 wp = board_mmc_getwp(mmc);
52 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
61 __weak int board_mmc_getcd(struct mmc *mmc)
67 #ifdef CONFIG_MMC_TRACE
68 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
70 printf("CMD_SEND:%d\n", cmd->cmdidx);
71 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
74 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
80 printf("\t\tRET\t\t\t %d\n", ret);
82 switch (cmd->resp_type) {
84 printf("\t\tMMC_RSP_NONE\n");
87 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
91 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
95 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
97 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t \t\t 0x%08x \n",
101 printf("\t\t \t\t 0x%08x \n",
104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
110 for (j = 0; j < 4; j++)
111 printf("%02x ", *ptr--);
116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
120 printf("\t\tERROR MMC rsp not supported\n");
126 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
135 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136 const char *mmc_mode_name(enum bus_mode mode)
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
161 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
163 static const int freqs[] = {
164 [MMC_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
176 [MMC_HS_400_ES] = 200000000,
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
187 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
189 mmc->selected_mode = mode;
190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
197 #if !CONFIG_IS_ENABLED(DM_MMC)
198 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
211 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
213 * @dev: device to receive the command
214 * @cmd: command to send
215 * @data: additional data to send/receive
216 * @retries: how many times to retry; mmc_send_cmd is always called at least
218 * @return 0 if ok, -ve on error
220 static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
221 struct mmc_data *data, uint retries)
226 ret = mmc_send_cmd(mmc, cmd, data);
227 } while (ret && retries--);
233 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
234 * specific quirk is enabled
236 * @dev: device to receive the command
237 * @cmd: command to send
238 * @data: additional data to send/receive
239 * @quirk: retry only if this quirk is enabled
240 * @retries: how many times to retry; mmc_send_cmd is always called at least
242 * @return 0 if ok, -ve on error
244 static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
245 struct mmc_data *data, u32 quirk, uint retries)
247 if (CONFIG_IS_ENABLED(MMC_QUIRKS) && mmc->quirks & quirk)
248 return mmc_send_cmd_retry(mmc, cmd, data, retries);
250 return mmc_send_cmd(mmc, cmd, data);
253 int mmc_send_status(struct mmc *mmc, unsigned int *status)
258 cmd.cmdidx = MMC_CMD_SEND_STATUS;
259 cmd.resp_type = MMC_RSP_R1;
260 if (!mmc_host_is_spi(mmc))
261 cmd.cmdarg = mmc->rca << 16;
263 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
264 mmc_trace_state(mmc, &cmd);
266 *status = cmd.response[0];
271 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
276 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
281 err = mmc_send_status(mmc, &status);
285 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
286 (status & MMC_STATUS_CURR_STATE) !=
290 if (status & MMC_STATUS_MASK) {
291 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
292 pr_err("Status Error: 0x%08x\n", status);
297 if (timeout_ms-- <= 0)
303 if (timeout_ms <= 0) {
304 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
305 pr_err("Timeout waiting card ready\n");
313 int mmc_set_blocklen(struct mmc *mmc, int len)
320 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
321 cmd.resp_type = MMC_RSP_R1;
324 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
325 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
328 #ifdef MMC_SUPPORTS_TUNING
329 static const u8 tuning_blk_pattern_4bit[] = {
330 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
331 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
332 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
333 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
334 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
335 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
336 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
337 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
340 static const u8 tuning_blk_pattern_8bit[] = {
341 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
342 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
343 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
344 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
345 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
346 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
347 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
348 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
349 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
350 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
351 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
352 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
353 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
354 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
355 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
356 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
359 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
362 struct mmc_data data;
363 const u8 *tuning_block_pattern;
366 if (mmc->bus_width == 8) {
367 tuning_block_pattern = tuning_blk_pattern_8bit;
368 size = sizeof(tuning_blk_pattern_8bit);
369 } else if (mmc->bus_width == 4) {
370 tuning_block_pattern = tuning_blk_pattern_4bit;
371 size = sizeof(tuning_blk_pattern_4bit);
376 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
380 cmd.resp_type = MMC_RSP_R1;
382 data.dest = (void *)data_buf;
384 data.blocksize = size;
385 data.flags = MMC_DATA_READ;
387 err = mmc_send_cmd(mmc, &cmd, &data);
391 if (memcmp(data_buf, tuning_block_pattern, size))
398 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
402 struct mmc_data data;
405 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
407 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
409 if (mmc->high_capacity)
412 cmd.cmdarg = start * mmc->read_bl_len;
414 cmd.resp_type = MMC_RSP_R1;
417 data.blocks = blkcnt;
418 data.blocksize = mmc->read_bl_len;
419 data.flags = MMC_DATA_READ;
421 if (mmc_send_cmd(mmc, &cmd, &data))
425 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
427 cmd.resp_type = MMC_RSP_R1b;
428 if (mmc_send_cmd(mmc, &cmd, NULL)) {
429 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
430 pr_err("mmc fail to send stop cmd\n");
439 #if !CONFIG_IS_ENABLED(DM_MMC)
440 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
442 if (mmc->cfg->ops->get_b_max)
443 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
445 return mmc->cfg->b_max;
449 #if CONFIG_IS_ENABLED(BLK)
450 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
452 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
456 #if CONFIG_IS_ENABLED(BLK)
457 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
459 int dev_num = block_dev->devnum;
461 lbaint_t cur, blocks_todo = blkcnt;
467 struct mmc *mmc = find_mmc_device(dev_num);
471 if (CONFIG_IS_ENABLED(MMC_TINY))
472 err = mmc_switch_part(mmc, block_dev->hwpart);
474 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
479 if ((start + blkcnt) > block_dev->lba) {
480 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
481 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
482 start + blkcnt, block_dev->lba);
487 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
488 pr_debug("%s: Failed to set blocklen\n", __func__);
492 b_max = mmc_get_b_max(mmc, dst, blkcnt);
495 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
496 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
497 pr_debug("%s: Failed to read blocks\n", __func__);
502 dst += cur * mmc->read_bl_len;
503 } while (blocks_todo > 0);
508 static int mmc_go_idle(struct mmc *mmc)
515 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
517 cmd.resp_type = MMC_RSP_NONE;
519 err = mmc_send_cmd(mmc, &cmd, NULL);
529 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
530 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
536 * Send CMD11 only if the request is to switch the card to
539 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
540 return mmc_set_signal_voltage(mmc, signal_voltage);
542 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
544 cmd.resp_type = MMC_RSP_R1;
546 err = mmc_send_cmd(mmc, &cmd, NULL);
550 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
554 * The card should drive cmd and dat[0:3] low immediately
555 * after the response of cmd11, but wait 100 us to be sure
557 err = mmc_wait_dat0(mmc, 0, 100);
564 * During a signal voltage level switch, the clock must be gated
565 * for 5 ms according to the SD spec
567 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
569 err = mmc_set_signal_voltage(mmc, signal_voltage);
573 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
575 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
578 * Failure to switch is indicated by the card holding
579 * dat[0:3] low. Wait for at least 1 ms according to spec
581 err = mmc_wait_dat0(mmc, 1, 1000);
591 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
598 cmd.cmdidx = MMC_CMD_APP_CMD;
599 cmd.resp_type = MMC_RSP_R1;
602 err = mmc_send_cmd(mmc, &cmd, NULL);
607 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
608 cmd.resp_type = MMC_RSP_R3;
611 * Most cards do not answer if some reserved bits
612 * in the ocr are set. However, Some controller
613 * can set bit 7 (reserved for low voltages), but
614 * how to manage low voltages SD card is not yet
617 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
618 (mmc->cfg->voltages & 0xff8000);
620 if (mmc->version == SD_VERSION_2)
621 cmd.cmdarg |= OCR_HCS;
624 cmd.cmdarg |= OCR_S18R;
626 err = mmc_send_cmd(mmc, &cmd, NULL);
631 if (cmd.response[0] & OCR_BUSY)
640 if (mmc->version != SD_VERSION_2)
641 mmc->version = SD_VERSION_1_0;
643 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
644 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
645 cmd.resp_type = MMC_RSP_R3;
648 err = mmc_send_cmd(mmc, &cmd, NULL);
654 mmc->ocr = cmd.response[0];
656 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
657 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
659 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
665 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
671 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
676 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
677 cmd.resp_type = MMC_RSP_R3;
679 if (use_arg && !mmc_host_is_spi(mmc))
680 cmd.cmdarg = OCR_HCS |
681 (mmc->cfg->voltages &
682 (mmc->ocr & OCR_VOLTAGE_MASK)) |
683 (mmc->ocr & OCR_ACCESS_MODE);
685 err = mmc_send_cmd(mmc, &cmd, NULL);
688 mmc->ocr = cmd.response[0];
692 static int mmc_send_op_cond(struct mmc *mmc)
698 /* Some cards seem to need this */
701 start = get_timer(0);
702 /* Asking to the card its capabilities */
704 err = mmc_send_op_cond_iter(mmc, i != 0);
708 /* exit if not busy (flag seems to be inverted) */
709 if (mmc->ocr & OCR_BUSY)
712 if (get_timer(start) > timeout)
716 mmc->op_cond_pending = 1;
720 static int mmc_complete_op_cond(struct mmc *mmc)
727 mmc->op_cond_pending = 0;
728 if (!(mmc->ocr & OCR_BUSY)) {
729 /* Some cards seem to need this */
732 start = get_timer(0);
734 err = mmc_send_op_cond_iter(mmc, 1);
737 if (mmc->ocr & OCR_BUSY)
739 if (get_timer(start) > timeout)
745 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
746 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
747 cmd.resp_type = MMC_RSP_R3;
750 err = mmc_send_cmd(mmc, &cmd, NULL);
755 mmc->ocr = cmd.response[0];
758 mmc->version = MMC_VERSION_UNKNOWN;
760 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
767 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
770 struct mmc_data data;
773 /* Get the Card Status Register */
774 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
775 cmd.resp_type = MMC_RSP_R1;
778 data.dest = (char *)ext_csd;
780 data.blocksize = MMC_MAX_BLOCK_LEN;
781 data.flags = MMC_DATA_READ;
783 err = mmc_send_cmd(mmc, &cmd, &data);
788 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
791 unsigned int status, start;
793 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
794 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
795 (index == EXT_CSD_PART_CONF);
798 if (mmc->gen_cmd6_time)
799 timeout_ms = mmc->gen_cmd6_time * 10;
801 if (is_part_switch && mmc->part_switch_time)
802 timeout_ms = mmc->part_switch_time * 10;
804 cmd.cmdidx = MMC_CMD_SWITCH;
805 cmd.resp_type = MMC_RSP_R1b;
806 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
810 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
814 start = get_timer(0);
816 /* poll dat0 for rdy/buys status */
817 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
818 if (ret && ret != -ENOSYS)
822 * In cases when not allowed to poll by using CMD13 or because we aren't
823 * capable of polling by using mmc_wait_dat0, then rely on waiting the
824 * stated timeout to be sufficient.
826 if (ret == -ENOSYS || !send_status) {
831 /* Finally wait until the card is ready or indicates a failure
832 * to switch. It doesn't hurt to use CMD13 here even if send_status
833 * is false, because by now (after 'timeout_ms' ms) the bus should be
837 ret = mmc_send_status(mmc, &status);
839 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
840 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
844 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
845 (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
848 } while (get_timer(start) < timeout_ms);
853 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
855 return __mmc_switch(mmc, set, index, value, true);
858 int mmc_boot_wp(struct mmc *mmc)
860 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
863 #if !CONFIG_IS_ENABLED(MMC_TINY)
864 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
870 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
876 speed_bits = EXT_CSD_TIMING_HS;
878 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
880 speed_bits = EXT_CSD_TIMING_HS200;
883 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
885 speed_bits = EXT_CSD_TIMING_HS400;
888 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
890 speed_bits = EXT_CSD_TIMING_HS400;
894 speed_bits = EXT_CSD_TIMING_LEGACY;
900 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
901 speed_bits, !hsdowngrade);
905 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
906 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
908 * In case the eMMC is in HS200/HS400 mode and we are downgrading
909 * to HS mode, the card clock are still running much faster than
910 * the supported HS mode clock, so we can not reliably read out
911 * Extended CSD. Reconfigure the controller to run at HS mode.
914 mmc_select_mode(mmc, MMC_HS);
915 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
919 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
920 /* Now check to see that it worked */
921 err = mmc_send_ext_csd(mmc, test_csd);
925 /* No high-speed support */
926 if (!test_csd[EXT_CSD_HS_TIMING])
933 static int mmc_get_capabilities(struct mmc *mmc)
935 u8 *ext_csd = mmc->ext_csd;
938 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
940 if (mmc_host_is_spi(mmc))
943 /* Only version 4 supports high-speed */
944 if (mmc->version < MMC_VERSION_4)
948 pr_err("No ext_csd found!\n"); /* this should enver happen */
952 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
954 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
955 mmc->cardtype = cardtype;
957 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
958 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
959 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
960 mmc->card_caps |= MMC_MODE_HS200;
963 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
964 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
965 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
966 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
967 mmc->card_caps |= MMC_MODE_HS400;
970 if (cardtype & EXT_CSD_CARD_TYPE_52) {
971 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
972 mmc->card_caps |= MMC_MODE_DDR_52MHz;
973 mmc->card_caps |= MMC_MODE_HS_52MHz;
975 if (cardtype & EXT_CSD_CARD_TYPE_26)
976 mmc->card_caps |= MMC_MODE_HS;
978 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
979 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
980 (mmc->card_caps & MMC_MODE_HS400)) {
981 mmc->card_caps |= MMC_MODE_HS400_ES;
989 static int mmc_set_capacity(struct mmc *mmc, int part_num)
993 mmc->capacity = mmc->capacity_user;
997 mmc->capacity = mmc->capacity_boot;
1000 mmc->capacity = mmc->capacity_rpmb;
1006 mmc->capacity = mmc->capacity_gp[part_num - 4];
1012 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1017 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1023 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1025 (mmc->part_config & ~PART_ACCESS_MASK)
1026 | (part_num & PART_ACCESS_MASK));
1027 } while (ret && retry--);
1030 * Set the capacity if the switch succeeded or was intended
1031 * to return to representing the raw device.
1033 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1034 ret = mmc_set_capacity(mmc, part_num);
1035 mmc_get_blk_desc(mmc)->hwpart = part_num;
1041 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1042 int mmc_hwpart_config(struct mmc *mmc,
1043 const struct mmc_hwpart_conf *conf,
1044 enum mmc_hwpart_conf_mode mode)
1049 u32 gp_size_mult[4];
1050 u32 max_enh_size_mult;
1051 u32 tot_enh_size_mult = 0;
1054 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1056 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1059 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1060 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1061 return -EMEDIUMTYPE;
1064 if (!(mmc->part_support & PART_SUPPORT)) {
1065 pr_err("Card does not support partitioning\n");
1066 return -EMEDIUMTYPE;
1069 if (!mmc->hc_wp_grp_size) {
1070 pr_err("Card does not define HC WP group size\n");
1071 return -EMEDIUMTYPE;
1074 /* check partition alignment and total enhanced size */
1075 if (conf->user.enh_size) {
1076 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1077 conf->user.enh_start % mmc->hc_wp_grp_size) {
1078 pr_err("User data enhanced area not HC WP group "
1082 part_attrs |= EXT_CSD_ENH_USR;
1083 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1084 if (mmc->high_capacity) {
1085 enh_start_addr = conf->user.enh_start;
1087 enh_start_addr = (conf->user.enh_start << 9);
1093 tot_enh_size_mult += enh_size_mult;
1095 for (pidx = 0; pidx < 4; pidx++) {
1096 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1097 pr_err("GP%i partition not HC WP group size "
1098 "aligned\n", pidx+1);
1101 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1102 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1103 part_attrs |= EXT_CSD_ENH_GP(pidx);
1104 tot_enh_size_mult += gp_size_mult[pidx];
1108 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1109 pr_err("Card does not support enhanced attribute\n");
1110 return -EMEDIUMTYPE;
1113 err = mmc_send_ext_csd(mmc, ext_csd);
1118 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1119 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1120 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1121 if (tot_enh_size_mult > max_enh_size_mult) {
1122 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1123 tot_enh_size_mult, max_enh_size_mult);
1124 return -EMEDIUMTYPE;
1127 /* The default value of EXT_CSD_WR_REL_SET is device
1128 * dependent, the values can only be changed if the
1129 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1130 * changed only once and before partitioning is completed. */
1131 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1132 if (conf->user.wr_rel_change) {
1133 if (conf->user.wr_rel_set)
1134 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1136 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1138 for (pidx = 0; pidx < 4; pidx++) {
1139 if (conf->gp_part[pidx].wr_rel_change) {
1140 if (conf->gp_part[pidx].wr_rel_set)
1141 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1143 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1147 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1148 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1149 puts("Card does not support host controlled partition write "
1150 "reliability settings\n");
1151 return -EMEDIUMTYPE;
1154 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1155 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1156 pr_err("Card already partitioned\n");
1160 if (mode == MMC_HWPART_CONF_CHECK)
1163 /* Partitioning requires high-capacity size definitions */
1164 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1165 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1166 EXT_CSD_ERASE_GROUP_DEF, 1);
1171 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1173 #if CONFIG_IS_ENABLED(MMC_WRITE)
1174 /* update erase group size to be high-capacity */
1175 mmc->erase_grp_size =
1176 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1181 /* all OK, write the configuration */
1182 for (i = 0; i < 4; i++) {
1183 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 EXT_CSD_ENH_START_ADDR+i,
1185 (enh_start_addr >> (i*8)) & 0xFF);
1189 for (i = 0; i < 3; i++) {
1190 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1191 EXT_CSD_ENH_SIZE_MULT+i,
1192 (enh_size_mult >> (i*8)) & 0xFF);
1196 for (pidx = 0; pidx < 4; pidx++) {
1197 for (i = 0; i < 3; i++) {
1198 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1200 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1205 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1206 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1210 if (mode == MMC_HWPART_CONF_SET)
1213 /* The WR_REL_SET is a write-once register but shall be
1214 * written before setting PART_SETTING_COMPLETED. As it is
1215 * write-once we can only write it when completing the
1217 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1218 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1219 EXT_CSD_WR_REL_SET, wr_rel_set);
1224 /* Setting PART_SETTING_COMPLETED confirms the partition
1225 * configuration but it only becomes effective after power
1226 * cycle, so we do not adjust the partition related settings
1227 * in the mmc struct. */
1229 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1230 EXT_CSD_PARTITION_SETTING,
1231 EXT_CSD_PARTITION_SETTING_COMPLETED);
1239 #if !CONFIG_IS_ENABLED(DM_MMC)
1240 int mmc_getcd(struct mmc *mmc)
1244 cd = board_mmc_getcd(mmc);
1247 if (mmc->cfg->ops->getcd)
1248 cd = mmc->cfg->ops->getcd(mmc);
1257 #if !CONFIG_IS_ENABLED(MMC_TINY)
1258 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1261 struct mmc_data data;
1263 /* Switch the frequency */
1264 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1265 cmd.resp_type = MMC_RSP_R1;
1266 cmd.cmdarg = (mode << 31) | 0xffffff;
1267 cmd.cmdarg &= ~(0xf << (group * 4));
1268 cmd.cmdarg |= value << (group * 4);
1270 data.dest = (char *)resp;
1271 data.blocksize = 64;
1273 data.flags = MMC_DATA_READ;
1275 return mmc_send_cmd(mmc, &cmd, &data);
1278 static int sd_get_capabilities(struct mmc *mmc)
1282 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1283 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1284 struct mmc_data data;
1286 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1290 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1292 if (mmc_host_is_spi(mmc))
1295 /* Read the SCR to find out if this card supports higher speeds */
1296 cmd.cmdidx = MMC_CMD_APP_CMD;
1297 cmd.resp_type = MMC_RSP_R1;
1298 cmd.cmdarg = mmc->rca << 16;
1300 err = mmc_send_cmd(mmc, &cmd, NULL);
1305 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1306 cmd.resp_type = MMC_RSP_R1;
1309 data.dest = (char *)scr;
1312 data.flags = MMC_DATA_READ;
1314 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1319 mmc->scr[0] = __be32_to_cpu(scr[0]);
1320 mmc->scr[1] = __be32_to_cpu(scr[1]);
1322 switch ((mmc->scr[0] >> 24) & 0xf) {
1324 mmc->version = SD_VERSION_1_0;
1327 mmc->version = SD_VERSION_1_10;
1330 mmc->version = SD_VERSION_2;
1331 if ((mmc->scr[0] >> 15) & 0x1)
1332 mmc->version = SD_VERSION_3;
1335 mmc->version = SD_VERSION_1_0;
1339 if (mmc->scr[0] & SD_DATA_4BIT)
1340 mmc->card_caps |= MMC_MODE_4BIT;
1342 /* Version 1.0 doesn't support switching */
1343 if (mmc->version == SD_VERSION_1_0)
1348 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1349 (u8 *)switch_status);
1354 /* The high-speed function is busy. Try again */
1355 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1359 /* If high-speed isn't supported, we return */
1360 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1361 mmc->card_caps |= MMC_CAP(SD_HS);
1363 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1364 /* Version before 3.0 don't support UHS modes */
1365 if (mmc->version < SD_VERSION_3)
1368 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1369 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1370 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1371 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1372 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1373 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1374 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1375 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1376 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1377 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1378 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1384 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1388 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1391 /* SD version 1.00 and 1.01 does not support CMD 6 */
1392 if (mmc->version == SD_VERSION_1_0)
1397 speed = UHS_SDR12_BUS_SPEED;
1400 speed = HIGH_SPEED_BUS_SPEED;
1402 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1404 speed = UHS_SDR12_BUS_SPEED;
1407 speed = UHS_SDR25_BUS_SPEED;
1410 speed = UHS_SDR50_BUS_SPEED;
1413 speed = UHS_DDR50_BUS_SPEED;
1416 speed = UHS_SDR104_BUS_SPEED;
1423 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1427 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1433 static int sd_select_bus_width(struct mmc *mmc, int w)
1438 if ((w != 4) && (w != 1))
1441 cmd.cmdidx = MMC_CMD_APP_CMD;
1442 cmd.resp_type = MMC_RSP_R1;
1443 cmd.cmdarg = mmc->rca << 16;
1445 err = mmc_send_cmd(mmc, &cmd, NULL);
1449 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1450 cmd.resp_type = MMC_RSP_R1;
1455 err = mmc_send_cmd(mmc, &cmd, NULL);
1463 #if CONFIG_IS_ENABLED(MMC_WRITE)
1464 static int sd_read_ssr(struct mmc *mmc)
1466 static const unsigned int sd_au_size[] = {
1467 0, SZ_16K / 512, SZ_32K / 512,
1468 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1469 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1470 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1471 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1476 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1477 struct mmc_data data;
1478 unsigned int au, eo, et, es;
1480 cmd.cmdidx = MMC_CMD_APP_CMD;
1481 cmd.resp_type = MMC_RSP_R1;
1482 cmd.cmdarg = mmc->rca << 16;
1484 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1488 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1489 cmd.resp_type = MMC_RSP_R1;
1492 data.dest = (char *)ssr;
1493 data.blocksize = 64;
1495 data.flags = MMC_DATA_READ;
1497 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1501 for (i = 0; i < 16; i++)
1502 ssr[i] = be32_to_cpu(ssr[i]);
1504 au = (ssr[2] >> 12) & 0xF;
1505 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1506 mmc->ssr.au = sd_au_size[au];
1507 es = (ssr[3] >> 24) & 0xFF;
1508 es |= (ssr[2] & 0xFF) << 8;
1509 et = (ssr[3] >> 18) & 0x3F;
1511 eo = (ssr[3] >> 16) & 0x3;
1512 mmc->ssr.erase_timeout = (et * 1000) / es;
1513 mmc->ssr.erase_offset = eo * 1000;
1516 pr_debug("Invalid Allocation Unit Size.\n");
1522 /* frequency bases */
1523 /* divided by 10 to be nice to platforms without floating point */
1524 static const int fbase[] = {
1531 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1532 * to platforms without floating point.
1534 static const u8 multipliers[] = {
1553 static inline int bus_width(uint cap)
1555 if (cap == MMC_MODE_8BIT)
1557 if (cap == MMC_MODE_4BIT)
1559 if (cap == MMC_MODE_1BIT)
1561 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1565 #if !CONFIG_IS_ENABLED(DM_MMC)
1566 #ifdef MMC_SUPPORTS_TUNING
1567 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1573 static int mmc_set_ios(struct mmc *mmc)
1577 if (mmc->cfg->ops->set_ios)
1578 ret = mmc->cfg->ops->set_ios(mmc);
1583 static int mmc_host_power_cycle(struct mmc *mmc)
1587 if (mmc->cfg->ops->host_power_cycle)
1588 ret = mmc->cfg->ops->host_power_cycle(mmc);
1594 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1597 if (clock > mmc->cfg->f_max)
1598 clock = mmc->cfg->f_max;
1600 if (clock < mmc->cfg->f_min)
1601 clock = mmc->cfg->f_min;
1605 mmc->clk_disable = disable;
1607 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1609 return mmc_set_ios(mmc);
1612 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1614 mmc->bus_width = width;
1616 return mmc_set_ios(mmc);
1619 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1621 * helper function to display the capabilities in a human
1622 * friendly manner. The capabilities include bus width and
1625 void mmc_dump_capabilities(const char *text, uint caps)
1629 pr_debug("%s: widths [", text);
1630 if (caps & MMC_MODE_8BIT)
1632 if (caps & MMC_MODE_4BIT)
1634 if (caps & MMC_MODE_1BIT)
1636 pr_debug("\b\b] modes [");
1637 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1638 if (MMC_CAP(mode) & caps)
1639 pr_debug("%s, ", mmc_mode_name(mode));
1640 pr_debug("\b\b]\n");
1644 struct mode_width_tuning {
1647 #ifdef MMC_SUPPORTS_TUNING
1652 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1653 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1656 case MMC_SIGNAL_VOLTAGE_000: return 0;
1657 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1658 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1659 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1664 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1668 if (mmc->signal_voltage == signal_voltage)
1671 mmc->signal_voltage = signal_voltage;
1672 err = mmc_set_ios(mmc);
1674 pr_debug("unable to set voltage (err %d)\n", err);
1679 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1685 #if !CONFIG_IS_ENABLED(MMC_TINY)
1686 static const struct mode_width_tuning sd_modes_by_pref[] = {
1687 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1688 #ifdef MMC_SUPPORTS_TUNING
1691 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1692 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1701 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1705 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1710 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1712 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1715 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1720 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1724 #define for_each_sd_mode_by_pref(caps, mwt) \
1725 for (mwt = sd_modes_by_pref;\
1726 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1728 if (caps & MMC_CAP(mwt->mode))
1730 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1733 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1734 const struct mode_width_tuning *mwt;
1735 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1736 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1738 bool uhs_en = false;
1743 mmc_dump_capabilities("sd card", card_caps);
1744 mmc_dump_capabilities("host", mmc->host_caps);
1747 if (mmc_host_is_spi(mmc)) {
1748 mmc_set_bus_width(mmc, 1);
1749 mmc_select_mode(mmc, MMC_LEGACY);
1750 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1751 #if CONFIG_IS_ENABLED(MMC_WRITE)
1752 err = sd_read_ssr(mmc);
1754 pr_warn("unable to read ssr\n");
1759 /* Restrict card's capabilities by what the host can do */
1760 caps = card_caps & mmc->host_caps;
1765 for_each_sd_mode_by_pref(caps, mwt) {
1768 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1769 if (*w & caps & mwt->widths) {
1770 pr_debug("trying mode %s width %d (at %d MHz)\n",
1771 mmc_mode_name(mwt->mode),
1773 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1775 /* configure the bus width (card + host) */
1776 err = sd_select_bus_width(mmc, bus_width(*w));
1779 mmc_set_bus_width(mmc, bus_width(*w));
1781 /* configure the bus mode (card) */
1782 err = sd_set_card_speed(mmc, mwt->mode);
1786 /* configure the bus mode (host) */
1787 mmc_select_mode(mmc, mwt->mode);
1788 mmc_set_clock(mmc, mmc->tran_speed,
1791 #ifdef MMC_SUPPORTS_TUNING
1792 /* execute tuning if needed */
1793 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1794 err = mmc_execute_tuning(mmc,
1797 pr_debug("tuning failed\n");
1803 #if CONFIG_IS_ENABLED(MMC_WRITE)
1804 err = sd_read_ssr(mmc);
1806 pr_warn("unable to read ssr\n");
1812 /* revert to a safer bus speed */
1813 mmc_select_mode(mmc, MMC_LEGACY);
1814 mmc_set_clock(mmc, mmc->tran_speed,
1820 pr_err("unable to select a mode\n");
1825 * read the compare the part of ext csd that is constant.
1826 * This can be used to check that the transfer is working
1829 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1832 const u8 *ext_csd = mmc->ext_csd;
1833 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1835 if (mmc->version < MMC_VERSION_4)
1838 err = mmc_send_ext_csd(mmc, test_csd);
1842 /* Only compare read only fields */
1843 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1844 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1845 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1846 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1847 ext_csd[EXT_CSD_REV]
1848 == test_csd[EXT_CSD_REV] &&
1849 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1850 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1851 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1852 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1858 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1859 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1860 uint32_t allowed_mask)
1868 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1869 EXT_CSD_CARD_TYPE_HS400_1_8V))
1870 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1871 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1872 EXT_CSD_CARD_TYPE_HS400_1_2V))
1873 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1876 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1877 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1878 MMC_SIGNAL_VOLTAGE_180;
1879 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1880 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1883 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1887 while (card_mask & allowed_mask) {
1888 enum mmc_voltage best_match;
1890 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1891 if (!mmc_set_signal_voltage(mmc, best_match))
1894 allowed_mask &= ~best_match;
1900 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1901 uint32_t allowed_mask)
1907 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1908 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1910 .mode = MMC_HS_400_ES,
1911 .widths = MMC_MODE_8BIT,
1914 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1917 .widths = MMC_MODE_8BIT,
1918 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1921 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1924 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1925 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1930 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1934 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1938 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1942 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1946 #define for_each_mmc_mode_by_pref(caps, mwt) \
1947 for (mwt = mmc_modes_by_pref;\
1948 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1950 if (caps & MMC_CAP(mwt->mode))
1952 static const struct ext_csd_bus_width {
1956 } ext_csd_bus_width[] = {
1957 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1958 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1959 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1960 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1961 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1964 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1965 static int mmc_select_hs400(struct mmc *mmc)
1969 /* Set timing to HS200 for tuning */
1970 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1974 /* configure the bus mode (host) */
1975 mmc_select_mode(mmc, MMC_HS_200);
1976 mmc_set_clock(mmc, mmc->tran_speed, false);
1978 /* execute tuning if needed */
1979 mmc->hs400_tuning = 1;
1980 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1981 mmc->hs400_tuning = 0;
1983 debug("tuning failed\n");
1987 /* Set back to HS */
1988 mmc_set_card_speed(mmc, MMC_HS, true);
1990 err = mmc_hs400_prepare_ddr(mmc);
1994 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1995 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1999 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2003 mmc_select_mode(mmc, MMC_HS_400);
2004 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2011 static int mmc_select_hs400(struct mmc *mmc)
2017 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2018 #if !CONFIG_IS_ENABLED(DM_MMC)
2019 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2024 static int mmc_select_hs400es(struct mmc *mmc)
2028 err = mmc_set_card_speed(mmc, MMC_HS, true);
2032 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2033 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2034 EXT_CSD_BUS_WIDTH_STROBE);
2036 printf("switch to bus width for hs400 failed\n");
2039 /* TODO: driver strength */
2040 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2044 mmc_select_mode(mmc, MMC_HS_400_ES);
2045 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2049 return mmc_set_enhanced_strobe(mmc);
2052 static int mmc_select_hs400es(struct mmc *mmc)
2058 #define for_each_supported_width(caps, ddr, ecbv) \
2059 for (ecbv = ext_csd_bus_width;\
2060 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2062 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2064 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2067 const struct mode_width_tuning *mwt;
2068 const struct ext_csd_bus_width *ecbw;
2071 mmc_dump_capabilities("mmc", card_caps);
2072 mmc_dump_capabilities("host", mmc->host_caps);
2075 if (mmc_host_is_spi(mmc)) {
2076 mmc_set_bus_width(mmc, 1);
2077 mmc_select_mode(mmc, MMC_LEGACY);
2078 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2082 /* Restrict card's capabilities by what the host can do */
2083 card_caps &= mmc->host_caps;
2085 /* Only version 4 of MMC supports wider bus widths */
2086 if (mmc->version < MMC_VERSION_4)
2089 if (!mmc->ext_csd) {
2090 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2094 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2095 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
2096 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2098 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2099 * before doing anything else, since a transition from either of
2100 * the HS200/HS400 mode directly to legacy mode is not supported.
2102 if (mmc->selected_mode == MMC_HS_200 ||
2103 mmc->selected_mode == MMC_HS_400 ||
2104 mmc->selected_mode == MMC_HS_400_ES)
2105 mmc_set_card_speed(mmc, MMC_HS, true);
2108 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2110 for_each_mmc_mode_by_pref(card_caps, mwt) {
2111 for_each_supported_width(card_caps & mwt->widths,
2112 mmc_is_mode_ddr(mwt->mode), ecbw) {
2113 enum mmc_voltage old_voltage;
2114 pr_debug("trying mode %s width %d (at %d MHz)\n",
2115 mmc_mode_name(mwt->mode),
2116 bus_width(ecbw->cap),
2117 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2118 old_voltage = mmc->signal_voltage;
2119 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2120 MMC_ALL_SIGNAL_VOLTAGE);
2124 /* configure the bus width (card + host) */
2125 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2127 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2130 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2132 if (mwt->mode == MMC_HS_400) {
2133 err = mmc_select_hs400(mmc);
2135 printf("Select HS400 failed %d\n", err);
2138 } else if (mwt->mode == MMC_HS_400_ES) {
2139 err = mmc_select_hs400es(mmc);
2141 printf("Select HS400ES failed %d\n",
2146 /* configure the bus speed (card) */
2147 err = mmc_set_card_speed(mmc, mwt->mode, false);
2152 * configure the bus width AND the ddr mode
2153 * (card). The host side will be taken care
2154 * of in the next step
2156 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2157 err = mmc_switch(mmc,
2158 EXT_CSD_CMD_SET_NORMAL,
2160 ecbw->ext_csd_bits);
2165 /* configure the bus mode (host) */
2166 mmc_select_mode(mmc, mwt->mode);
2167 mmc_set_clock(mmc, mmc->tran_speed,
2169 #ifdef MMC_SUPPORTS_TUNING
2171 /* execute tuning if needed */
2173 err = mmc_execute_tuning(mmc,
2176 pr_debug("tuning failed : %d\n", err);
2183 /* do a transfer to check the configuration */
2184 err = mmc_read_and_compare_ext_csd(mmc);
2188 mmc_set_signal_voltage(mmc, old_voltage);
2189 /* if an error occurred, revert to a safer bus mode */
2190 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2191 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2192 mmc_select_mode(mmc, MMC_LEGACY);
2193 mmc_set_bus_width(mmc, 1);
2197 pr_err("unable to select a mode : %d\n", err);
2203 #if CONFIG_IS_ENABLED(MMC_TINY)
2204 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2207 static int mmc_startup_v4(struct mmc *mmc)
2211 bool has_parts = false;
2212 bool part_completed;
2213 static const u32 mmc_versions[] = {
2225 #if CONFIG_IS_ENABLED(MMC_TINY)
2226 u8 *ext_csd = ext_csd_bkup;
2228 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2232 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2234 err = mmc_send_ext_csd(mmc, ext_csd);
2238 /* store the ext csd for future reference */
2240 mmc->ext_csd = ext_csd;
2242 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2244 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2247 /* check ext_csd version and capacity */
2248 err = mmc_send_ext_csd(mmc, ext_csd);
2252 /* store the ext csd for future reference */
2254 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2257 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2259 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2262 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2264 if (mmc->version >= MMC_VERSION_4_2) {
2266 * According to the JEDEC Standard, the value of
2267 * ext_csd's capacity is valid if the value is more
2270 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2271 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2272 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2273 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2274 capacity *= MMC_MAX_BLOCK_LEN;
2275 if ((capacity >> 20) > 2 * 1024)
2276 mmc->capacity_user = capacity;
2279 if (mmc->version >= MMC_VERSION_4_5)
2280 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2282 /* The partition data may be non-zero but it is only
2283 * effective if PARTITION_SETTING_COMPLETED is set in
2284 * EXT_CSD, so ignore any data if this bit is not set,
2285 * except for enabling the high-capacity group size
2286 * definition (see below).
2288 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2289 EXT_CSD_PARTITION_SETTING_COMPLETED);
2291 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2292 /* Some eMMC set the value too low so set a minimum */
2293 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2294 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2296 /* store the partition info of emmc */
2297 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2298 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2299 ext_csd[EXT_CSD_BOOT_MULT])
2300 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2301 if (part_completed &&
2302 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2303 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2305 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2307 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2309 for (i = 0; i < 4; i++) {
2310 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2311 uint mult = (ext_csd[idx + 2] << 16) +
2312 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2315 if (!part_completed)
2317 mmc->capacity_gp[i] = mult;
2318 mmc->capacity_gp[i] *=
2319 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2320 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2321 mmc->capacity_gp[i] <<= 19;
2324 #ifndef CONFIG_SPL_BUILD
2325 if (part_completed) {
2326 mmc->enh_user_size =
2327 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2328 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2329 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2330 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2331 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2332 mmc->enh_user_size <<= 19;
2333 mmc->enh_user_start =
2334 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2335 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2336 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2337 ext_csd[EXT_CSD_ENH_START_ADDR];
2338 if (mmc->high_capacity)
2339 mmc->enh_user_start <<= 9;
2344 * Host needs to enable ERASE_GRP_DEF bit if device is
2345 * partitioned. This bit will be lost every time after a reset
2346 * or power off. This will affect erase size.
2350 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2351 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2354 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2355 EXT_CSD_ERASE_GROUP_DEF, 1);
2360 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2363 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2364 #if CONFIG_IS_ENABLED(MMC_WRITE)
2365 /* Read out group size from ext_csd */
2366 mmc->erase_grp_size =
2367 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2370 * if high capacity and partition setting completed
2371 * SEC_COUNT is valid even if it is smaller than 2 GiB
2372 * JEDEC Standard JESD84-B45, 6.2.4
2374 if (mmc->high_capacity && part_completed) {
2375 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2376 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2377 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2378 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2379 capacity *= MMC_MAX_BLOCK_LEN;
2380 mmc->capacity_user = capacity;
2383 #if CONFIG_IS_ENABLED(MMC_WRITE)
2385 /* Calculate the group size from the csd value. */
2386 int erase_gsz, erase_gmul;
2388 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2389 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2390 mmc->erase_grp_size = (erase_gsz + 1)
2394 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2395 mmc->hc_wp_grp_size = 1024
2396 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2397 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2400 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2405 #if !CONFIG_IS_ENABLED(MMC_TINY)
2408 mmc->ext_csd = NULL;
2413 static int mmc_startup(struct mmc *mmc)
2419 struct blk_desc *bdesc;
2421 #ifdef CONFIG_MMC_SPI_CRC_ON
2422 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2423 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2424 cmd.resp_type = MMC_RSP_R1;
2426 err = mmc_send_cmd(mmc, &cmd, NULL);
2432 /* Put the Card in Identify Mode */
2433 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2434 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2435 cmd.resp_type = MMC_RSP_R2;
2438 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2442 memcpy(mmc->cid, cmd.response, 16);
2445 * For MMC cards, set the Relative Address.
2446 * For SD cards, get the Relatvie Address.
2447 * This also puts the cards into Standby State
2449 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2450 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2451 cmd.cmdarg = mmc->rca << 16;
2452 cmd.resp_type = MMC_RSP_R6;
2454 err = mmc_send_cmd(mmc, &cmd, NULL);
2460 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2463 /* Get the Card-Specific Data */
2464 cmd.cmdidx = MMC_CMD_SEND_CSD;
2465 cmd.resp_type = MMC_RSP_R2;
2466 cmd.cmdarg = mmc->rca << 16;
2468 err = mmc_send_cmd(mmc, &cmd, NULL);
2473 mmc->csd[0] = cmd.response[0];
2474 mmc->csd[1] = cmd.response[1];
2475 mmc->csd[2] = cmd.response[2];
2476 mmc->csd[3] = cmd.response[3];
2478 if (mmc->version == MMC_VERSION_UNKNOWN) {
2479 int version = (cmd.response[0] >> 26) & 0xf;
2483 mmc->version = MMC_VERSION_1_2;
2486 mmc->version = MMC_VERSION_1_4;
2489 mmc->version = MMC_VERSION_2_2;
2492 mmc->version = MMC_VERSION_3;
2495 mmc->version = MMC_VERSION_4;
2498 mmc->version = MMC_VERSION_1_2;
2503 /* divide frequency by 10, since the mults are 10x bigger */
2504 freq = fbase[(cmd.response[0] & 0x7)];
2505 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2507 mmc->legacy_speed = freq * mult;
2508 mmc_select_mode(mmc, MMC_LEGACY);
2510 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2511 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2512 #if CONFIG_IS_ENABLED(MMC_WRITE)
2515 mmc->write_bl_len = mmc->read_bl_len;
2517 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2520 if (mmc->high_capacity) {
2521 csize = (mmc->csd[1] & 0x3f) << 16
2522 | (mmc->csd[2] & 0xffff0000) >> 16;
2525 csize = (mmc->csd[1] & 0x3ff) << 2
2526 | (mmc->csd[2] & 0xc0000000) >> 30;
2527 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2530 mmc->capacity_user = (csize + 1) << (cmult + 2);
2531 mmc->capacity_user *= mmc->read_bl_len;
2532 mmc->capacity_boot = 0;
2533 mmc->capacity_rpmb = 0;
2534 for (i = 0; i < 4; i++)
2535 mmc->capacity_gp[i] = 0;
2537 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2538 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2540 #if CONFIG_IS_ENABLED(MMC_WRITE)
2541 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2542 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2545 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2546 cmd.cmdidx = MMC_CMD_SET_DSR;
2547 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2548 cmd.resp_type = MMC_RSP_NONE;
2549 if (mmc_send_cmd(mmc, &cmd, NULL))
2550 pr_warn("MMC: SET_DSR failed\n");
2553 /* Select the card, and put it into Transfer Mode */
2554 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2555 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2556 cmd.resp_type = MMC_RSP_R1;
2557 cmd.cmdarg = mmc->rca << 16;
2558 err = mmc_send_cmd(mmc, &cmd, NULL);
2565 * For SD, its erase group is always one sector
2567 #if CONFIG_IS_ENABLED(MMC_WRITE)
2568 mmc->erase_grp_size = 1;
2570 mmc->part_config = MMCPART_NOAVAILABLE;
2572 err = mmc_startup_v4(mmc);
2576 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2580 #if CONFIG_IS_ENABLED(MMC_TINY)
2581 mmc_set_clock(mmc, mmc->legacy_speed, false);
2582 mmc_select_mode(mmc, MMC_LEGACY);
2583 mmc_set_bus_width(mmc, 1);
2586 err = sd_get_capabilities(mmc);
2589 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2591 err = mmc_get_capabilities(mmc);
2594 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2600 mmc->best_mode = mmc->selected_mode;
2602 /* Fix the block length for DDR mode */
2603 if (mmc->ddr_mode) {
2604 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2605 #if CONFIG_IS_ENABLED(MMC_WRITE)
2606 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2610 /* fill in device description */
2611 bdesc = mmc_get_blk_desc(mmc);
2615 bdesc->blksz = mmc->read_bl_len;
2616 bdesc->log2blksz = LOG2(bdesc->blksz);
2617 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2618 #if !defined(CONFIG_SPL_BUILD) || \
2619 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2620 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2621 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2622 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2623 (mmc->cid[3] >> 16) & 0xffff);
2624 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2625 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2626 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2627 (mmc->cid[2] >> 24) & 0xff);
2628 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2629 (mmc->cid[2] >> 16) & 0xf);
2631 bdesc->vendor[0] = 0;
2632 bdesc->product[0] = 0;
2633 bdesc->revision[0] = 0;
2636 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2643 static int mmc_send_if_cond(struct mmc *mmc)
2648 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2649 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2650 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2651 cmd.resp_type = MMC_RSP_R7;
2653 err = mmc_send_cmd(mmc, &cmd, NULL);
2658 if ((cmd.response[0] & 0xff) != 0xaa)
2661 mmc->version = SD_VERSION_2;
2666 #if !CONFIG_IS_ENABLED(DM_MMC)
2667 /* board-specific MMC power initializations. */
2668 __weak void board_mmc_power_init(void)
2673 static int mmc_power_init(struct mmc *mmc)
2675 #if CONFIG_IS_ENABLED(DM_MMC)
2676 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2679 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2682 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2684 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2685 &mmc->vqmmc_supply);
2687 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2689 #else /* !CONFIG_DM_MMC */
2691 * Driver model should use a regulator, as above, rather than calling
2692 * out to board code.
2694 board_mmc_power_init();
2700 * put the host in the initial state:
2701 * - turn on Vdd (card power supply)
2702 * - configure the bus width and clock to minimal values
2704 static void mmc_set_initial_state(struct mmc *mmc)
2708 /* First try to set 3.3V. If it fails set to 1.8V */
2709 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2711 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2713 pr_warn("mmc: failed to set signal voltage\n");
2715 mmc_select_mode(mmc, MMC_LEGACY);
2716 mmc_set_bus_width(mmc, 1);
2717 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2720 static int mmc_power_on(struct mmc *mmc)
2722 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2723 if (mmc->vmmc_supply) {
2724 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2726 if (ret && ret != -EACCES) {
2727 printf("Error enabling VMMC supply : %d\n", ret);
2735 static int mmc_power_off(struct mmc *mmc)
2737 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2738 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2739 if (mmc->vmmc_supply) {
2740 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2742 if (ret && ret != -EACCES) {
2743 pr_debug("Error disabling VMMC supply : %d\n", ret);
2751 static int mmc_power_cycle(struct mmc *mmc)
2755 ret = mmc_power_off(mmc);
2759 ret = mmc_host_power_cycle(mmc);
2764 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2765 * to be on the safer side.
2768 return mmc_power_on(mmc);
2771 int mmc_get_op_cond(struct mmc *mmc, bool quiet)
2773 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2779 err = mmc_power_init(mmc);
2783 #ifdef CONFIG_MMC_QUIRKS
2784 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2785 MMC_QUIRK_RETRY_SEND_CID |
2786 MMC_QUIRK_RETRY_APP_CMD;
2789 err = mmc_power_cycle(mmc);
2792 * if power cycling is not supported, we should not try
2793 * to use the UHS modes, because we wouldn't be able to
2794 * recover from an error during the UHS initialization.
2796 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2798 mmc->host_caps &= ~UHS_CAPS;
2799 err = mmc_power_on(mmc);
2804 #if CONFIG_IS_ENABLED(DM_MMC)
2806 * Re-initialization is needed to clear old configuration for
2809 err = mmc_reinit(mmc);
2811 /* made sure it's not NULL earlier */
2812 err = mmc->cfg->ops->init(mmc);
2819 mmc_set_initial_state(mmc);
2821 /* Reset the Card */
2822 err = mmc_go_idle(mmc);
2827 /* The internal partition reset to user partition(0) at every CMD0 */
2828 mmc_get_blk_desc(mmc)->hwpart = 0;
2830 /* Test for SD version 2 */
2831 err = mmc_send_if_cond(mmc);
2833 /* Now try to get the SD card's operating condition */
2834 err = sd_send_op_cond(mmc, uhs_en);
2835 if (err && uhs_en) {
2837 mmc_power_cycle(mmc);
2841 /* If the command timed out, we check for an MMC card */
2842 if (err == -ETIMEDOUT) {
2843 err = mmc_send_op_cond(mmc);
2846 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2848 pr_err("Card did not respond to voltage select! : %d\n", err);
2857 int mmc_start_init(struct mmc *mmc)
2863 * all hosts are capable of 1 bit bus-width and able to use the legacy
2866 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2869 if (IS_ENABLED(CONFIG_MMC_SPEED_MODE_SET)) {
2870 if (mmc->user_speed_mode != MMC_MODES_END) {
2873 if (mmc->host_caps & MMC_CAP(mmc->user_speed_mode)) {
2874 /* Remove all existing speed capabilities */
2875 for (i = MMC_LEGACY; i < MMC_MODES_END; i++)
2876 mmc->host_caps &= ~MMC_CAP(i);
2877 mmc->host_caps |= (MMC_CAP(mmc->user_speed_mode)
2878 | MMC_CAP(MMC_LEGACY) |
2881 pr_err("bus_mode requested is not supported\n");
2886 #if CONFIG_IS_ENABLED(DM_MMC)
2887 mmc_deferred_probe(mmc);
2889 #if !defined(CONFIG_MMC_BROKEN_CD)
2890 no_card = mmc_getcd(mmc) == 0;
2894 #if !CONFIG_IS_ENABLED(DM_MMC)
2895 /* we pretend there's no card when init is NULL */
2896 no_card = no_card || (mmc->cfg->ops->init == NULL);
2900 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2901 pr_err("MMC: no card present\n");
2906 err = mmc_get_op_cond(mmc, false);
2909 mmc->init_in_progress = 1;
2914 static int mmc_complete_init(struct mmc *mmc)
2918 mmc->init_in_progress = 0;
2919 if (mmc->op_cond_pending)
2920 err = mmc_complete_op_cond(mmc);
2923 err = mmc_startup(mmc);
2931 int mmc_init(struct mmc *mmc)
2934 __maybe_unused ulong start;
2935 #if CONFIG_IS_ENABLED(DM_MMC)
2936 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2943 start = get_timer(0);
2945 if (!mmc->init_in_progress)
2946 err = mmc_start_init(mmc);
2949 err = mmc_complete_init(mmc);
2951 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2956 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2957 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2958 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2959 int mmc_deinit(struct mmc *mmc)
2967 caps_filtered = mmc->card_caps &
2968 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2969 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2970 MMC_CAP(UHS_SDR104));
2972 return sd_select_mode_and_width(mmc, caps_filtered);
2974 caps_filtered = mmc->card_caps &
2975 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400) | MMC_CAP(MMC_HS_400_ES));
2977 return mmc_select_mode_and_width(mmc, caps_filtered);
2982 int mmc_set_dsr(struct mmc *mmc, u16 val)
2988 /* CPU-specific MMC initializations */
2989 __weak int cpu_mmc_init(struct bd_info *bis)
2994 /* board-specific MMC initializations. */
2995 __weak int board_mmc_init(struct bd_info *bis)
3000 void mmc_set_preinit(struct mmc *mmc, int preinit)
3002 mmc->preinit = preinit;
3005 #if CONFIG_IS_ENABLED(DM_MMC)
3006 static int mmc_probe(struct bd_info *bis)
3010 struct udevice *dev;
3012 ret = uclass_get(UCLASS_MMC, &uc);
3017 * Try to add them in sequence order. Really with driver model we
3018 * should allow holes, but the current MMC list does not allow that.
3019 * So if we request 0, 1, 3 we will get 0, 1, 2.
3021 for (i = 0; ; i++) {
3022 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3026 uclass_foreach_dev(dev, uc) {
3027 ret = device_probe(dev);
3029 pr_err("%s - probe failed: %d\n", dev->name, ret);
3035 static int mmc_probe(struct bd_info *bis)
3037 if (board_mmc_init(bis) < 0)
3044 int mmc_initialize(struct bd_info *bis)
3046 static int initialized = 0;
3048 if (initialized) /* Avoid initializing mmc multiple times */
3052 #if !CONFIG_IS_ENABLED(BLK)
3053 #if !CONFIG_IS_ENABLED(MMC_TINY)
3057 ret = mmc_probe(bis);
3061 #ifndef CONFIG_SPL_BUILD
3062 print_mmc_devices(',');
3069 #if CONFIG_IS_ENABLED(DM_MMC)
3070 int mmc_init_device(int num)
3072 struct udevice *dev;
3076 if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3077 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3082 m = mmc_get_mmc_dev(dev);
3083 m->user_speed_mode = MMC_MODES_END; /* Initialising user set speed mode */
3094 #ifdef CONFIG_CMD_BKOPS_ENABLE
3095 int mmc_set_bkops_enable(struct mmc *mmc)
3098 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3100 err = mmc_send_ext_csd(mmc, ext_csd);
3102 puts("Could not get ext_csd register values\n");
3106 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3107 puts("Background operations not supported on device\n");
3108 return -EMEDIUMTYPE;
3111 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3112 puts("Background operations already enabled\n");
3116 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3118 puts("Failed to enable manual background operations\n");
3122 puts("Enabled manual background operations\n");
3128 __weak int mmc_get_env_dev(void)
3130 #ifdef CONFIG_SYS_MMC_ENV_DEV
3131 return CONFIG_SYS_MMC_ENV_DEV;