1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
7 * Based vaguely on the Linux code
16 #include <dm/device-internal.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
25 #include <linux/list.h>
27 #include "mmc_private.h"
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33 #if !CONFIG_IS_ENABLED(DM_MMC)
35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
37 if (mmc->cfg->ops->wait_dat0)
38 return mmc->cfg->ops->wait_dat0(mmc, state, timeout_us);
43 __weak int board_mmc_getwp(struct mmc *mmc)
48 int mmc_getwp(struct mmc *mmc)
52 wp = board_mmc_getwp(mmc);
55 if (mmc->cfg->ops->getwp)
56 wp = mmc->cfg->ops->getwp(mmc);
64 __weak int board_mmc_getcd(struct mmc *mmc)
70 #ifdef CONFIG_MMC_TRACE
71 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
73 printf("CMD_SEND:%d\n", cmd->cmdidx);
74 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
77 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
83 printf("\t\tRET\t\t\t %d\n", ret);
85 switch (cmd->resp_type) {
87 printf("\t\tMMC_RSP_NONE\n");
90 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
94 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
98 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
102 printf("\t\t \t\t 0x%08x \n",
104 printf("\t\t \t\t 0x%08x \n",
107 printf("\t\t\t\t\tDUMPING DATA\n");
108 for (i = 0; i < 4; i++) {
110 printf("\t\t\t\t\t%03d - ", i*4);
111 ptr = (u8 *)&cmd->response[i];
113 for (j = 0; j < 4; j++)
114 printf("%02x ", *ptr--);
119 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
123 printf("\t\tERROR MMC rsp not supported\n");
129 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
133 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
134 printf("CURR STATE:%d\n", status);
138 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) || CONFIG_VAL(LOGLEVEL) >= LOGL_DEBUG
139 const char *mmc_mode_name(enum bus_mode mode)
141 static const char *const names[] = {
142 [MMC_LEGACY] = "MMC legacy",
143 [MMC_HS] = "MMC High Speed (26MHz)",
144 [SD_HS] = "SD High Speed (50MHz)",
145 [UHS_SDR12] = "UHS SDR12 (25MHz)",
146 [UHS_SDR25] = "UHS SDR25 (50MHz)",
147 [UHS_SDR50] = "UHS SDR50 (100MHz)",
148 [UHS_SDR104] = "UHS SDR104 (208MHz)",
149 [UHS_DDR50] = "UHS DDR50 (50MHz)",
150 [MMC_HS_52] = "MMC High Speed (52MHz)",
151 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
152 [MMC_HS_200] = "HS200 (200MHz)",
153 [MMC_HS_400] = "HS400 (200MHz)",
154 [MMC_HS_400_ES] = "HS400ES (200MHz)",
157 if (mode >= MMC_MODES_END)
158 return "Unknown mode";
164 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
166 static const int freqs[] = {
167 [MMC_LEGACY] = 25000000,
170 [MMC_HS_52] = 52000000,
171 [MMC_DDR_52] = 52000000,
172 [UHS_SDR12] = 25000000,
173 [UHS_SDR25] = 50000000,
174 [UHS_SDR50] = 100000000,
175 [UHS_DDR50] = 50000000,
176 [UHS_SDR104] = 208000000,
177 [MMC_HS_200] = 200000000,
178 [MMC_HS_400] = 200000000,
179 [MMC_HS_400_ES] = 200000000,
182 if (mode == MMC_LEGACY)
183 return mmc->legacy_speed;
184 else if (mode >= MMC_MODES_END)
190 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
192 mmc->selected_mode = mode;
193 mmc->tran_speed = mmc_mode2freq(mmc, mode);
194 mmc->ddr_mode = mmc_is_mode_ddr(mode);
195 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
196 mmc->tran_speed / 1000000);
200 #if !CONFIG_IS_ENABLED(DM_MMC)
201 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
205 mmmc_trace_before_send(mmc, cmd);
206 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
207 mmmc_trace_after_send(mmc, cmd, ret);
214 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
216 * @dev: device to receive the command
217 * @cmd: command to send
218 * @data: additional data to send/receive
219 * @retries: how many times to retry; mmc_send_cmd is always called at least
221 * Return: 0 if ok, -ve on error
223 static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
224 struct mmc_data *data, uint retries)
229 ret = mmc_send_cmd(mmc, cmd, data);
230 } while (ret && retries--);
236 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
237 * specific quirk is enabled
239 * @dev: device to receive the command
240 * @cmd: command to send
241 * @data: additional data to send/receive
242 * @quirk: retry only if this quirk is enabled
243 * @retries: how many times to retry; mmc_send_cmd is always called at least
245 * Return: 0 if ok, -ve on error
247 static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
248 struct mmc_data *data, u32 quirk, uint retries)
250 if (CONFIG_IS_ENABLED(MMC_QUIRKS) && mmc->quirks & quirk)
251 return mmc_send_cmd_retry(mmc, cmd, data, retries);
253 return mmc_send_cmd(mmc, cmd, data);
256 int mmc_send_status(struct mmc *mmc, unsigned int *status)
261 cmd.cmdidx = MMC_CMD_SEND_STATUS;
262 cmd.resp_type = MMC_RSP_R1;
263 if (!mmc_host_is_spi(mmc))
264 cmd.cmdarg = mmc->rca << 16;
266 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
267 mmc_trace_state(mmc, &cmd);
269 *status = cmd.response[0];
274 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
279 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
284 err = mmc_send_status(mmc, &status);
288 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
289 (status & MMC_STATUS_CURR_STATE) !=
293 if (status & MMC_STATUS_MASK) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 pr_err("Status Error: 0x%08x\n", status);
300 if (timeout_ms-- <= 0)
306 if (timeout_ms <= 0) {
307 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
308 pr_err("Timeout waiting card ready\n");
316 int mmc_set_blocklen(struct mmc *mmc, int len)
323 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
324 cmd.resp_type = MMC_RSP_R1;
327 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
328 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
331 #ifdef MMC_SUPPORTS_TUNING
332 static const u8 tuning_blk_pattern_4bit[] = {
333 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
334 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
335 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
336 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
337 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
338 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
339 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
340 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
343 static const u8 tuning_blk_pattern_8bit[] = {
344 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
345 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
346 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
347 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
348 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
349 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
350 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
351 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
352 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
353 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
354 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
355 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
356 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
357 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
358 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
359 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
362 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
365 struct mmc_data data;
366 const u8 *tuning_block_pattern;
369 if (mmc->bus_width == 8) {
370 tuning_block_pattern = tuning_blk_pattern_8bit;
371 size = sizeof(tuning_blk_pattern_8bit);
372 } else if (mmc->bus_width == 4) {
373 tuning_block_pattern = tuning_blk_pattern_4bit;
374 size = sizeof(tuning_blk_pattern_4bit);
379 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
383 cmd.resp_type = MMC_RSP_R1;
385 data.dest = (void *)data_buf;
387 data.blocksize = size;
388 data.flags = MMC_DATA_READ;
390 err = mmc_send_cmd(mmc, &cmd, &data);
394 if (memcmp(data_buf, tuning_block_pattern, size))
401 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
405 struct mmc_data data;
408 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
410 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
412 if (mmc->high_capacity)
415 cmd.cmdarg = start * mmc->read_bl_len;
417 cmd.resp_type = MMC_RSP_R1;
420 data.blocks = blkcnt;
421 data.blocksize = mmc->read_bl_len;
422 data.flags = MMC_DATA_READ;
424 if (mmc_send_cmd(mmc, &cmd, &data))
428 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
430 cmd.resp_type = MMC_RSP_R1b;
431 if (mmc_send_cmd(mmc, &cmd, NULL)) {
432 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
433 pr_err("mmc fail to send stop cmd\n");
442 #if !CONFIG_IS_ENABLED(DM_MMC)
443 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
445 if (mmc->cfg->ops->get_b_max)
446 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
448 return mmc->cfg->b_max;
452 #if CONFIG_IS_ENABLED(BLK)
453 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
455 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
459 #if CONFIG_IS_ENABLED(BLK)
460 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
462 int dev_num = block_dev->devnum;
464 lbaint_t cur, blocks_todo = blkcnt;
470 struct mmc *mmc = find_mmc_device(dev_num);
474 if (CONFIG_IS_ENABLED(MMC_TINY))
475 err = mmc_switch_part(mmc, block_dev->hwpart);
477 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
482 if ((start + blkcnt) > block_dev->lba) {
483 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
484 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
485 start + blkcnt, block_dev->lba);
490 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
491 pr_debug("%s: Failed to set blocklen\n", __func__);
495 b_max = mmc_get_b_max(mmc, dst, blkcnt);
498 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
499 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
500 pr_debug("%s: Failed to read blocks\n", __func__);
505 dst += cur * mmc->read_bl_len;
506 } while (blocks_todo > 0);
511 static int mmc_go_idle(struct mmc *mmc)
518 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
520 cmd.resp_type = MMC_RSP_NONE;
522 err = mmc_send_cmd(mmc, &cmd, NULL);
532 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
533 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
539 * Send CMD11 only if the request is to switch the card to
542 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
543 return mmc_set_signal_voltage(mmc, signal_voltage);
545 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
547 cmd.resp_type = MMC_RSP_R1;
549 err = mmc_send_cmd(mmc, &cmd, NULL);
553 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
557 * The card should drive cmd and dat[0:3] low immediately
558 * after the response of cmd11, but wait 100 us to be sure
560 err = mmc_wait_dat0(mmc, 0, 100);
567 * During a signal voltage level switch, the clock must be gated
568 * for 5 ms according to the SD spec
570 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
572 err = mmc_set_signal_voltage(mmc, signal_voltage);
576 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
578 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
581 * Failure to switch is indicated by the card holding
582 * dat[0:3] low. Wait for at least 1 ms according to spec
584 err = mmc_wait_dat0(mmc, 1, 1000);
594 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
601 cmd.cmdidx = MMC_CMD_APP_CMD;
602 cmd.resp_type = MMC_RSP_R1;
605 err = mmc_send_cmd(mmc, &cmd, NULL);
610 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
611 cmd.resp_type = MMC_RSP_R3;
614 * Most cards do not answer if some reserved bits
615 * in the ocr are set. However, Some controller
616 * can set bit 7 (reserved for low voltages), but
617 * how to manage low voltages SD card is not yet
620 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
621 (mmc->cfg->voltages & 0xff8000);
623 if (mmc->version == SD_VERSION_2)
624 cmd.cmdarg |= OCR_HCS;
627 cmd.cmdarg |= OCR_S18R;
629 err = mmc_send_cmd(mmc, &cmd, NULL);
634 if (cmd.response[0] & OCR_BUSY)
643 if (mmc->version != SD_VERSION_2)
644 mmc->version = SD_VERSION_1_0;
646 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
647 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
648 cmd.resp_type = MMC_RSP_R3;
651 err = mmc_send_cmd(mmc, &cmd, NULL);
657 mmc->ocr = cmd.response[0];
659 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
660 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
662 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
668 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
674 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
679 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
680 cmd.resp_type = MMC_RSP_R3;
682 if (use_arg && !mmc_host_is_spi(mmc))
683 cmd.cmdarg = OCR_HCS |
684 (mmc->cfg->voltages &
685 (mmc->ocr & OCR_VOLTAGE_MASK)) |
686 (mmc->ocr & OCR_ACCESS_MODE);
688 err = mmc_send_cmd(mmc, &cmd, NULL);
691 mmc->ocr = cmd.response[0];
695 static int mmc_send_op_cond(struct mmc *mmc)
701 /* Some cards seem to need this */
704 start = get_timer(0);
705 /* Asking to the card its capabilities */
707 err = mmc_send_op_cond_iter(mmc, i != 0);
711 /* exit if not busy (flag seems to be inverted) */
712 if (mmc->ocr & OCR_BUSY)
715 if (get_timer(start) > timeout)
719 mmc->op_cond_pending = 1;
723 static int mmc_complete_op_cond(struct mmc *mmc)
730 mmc->op_cond_pending = 0;
731 if (!(mmc->ocr & OCR_BUSY)) {
732 /* Some cards seem to need this */
735 start = get_timer(0);
737 err = mmc_send_op_cond_iter(mmc, 1);
740 if (mmc->ocr & OCR_BUSY)
742 if (get_timer(start) > timeout)
748 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
749 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
750 cmd.resp_type = MMC_RSP_R3;
753 err = mmc_send_cmd(mmc, &cmd, NULL);
758 mmc->ocr = cmd.response[0];
761 mmc->version = MMC_VERSION_UNKNOWN;
763 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
770 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
773 struct mmc_data data;
776 /* Get the Card Status Register */
777 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
778 cmd.resp_type = MMC_RSP_R1;
781 data.dest = (char *)ext_csd;
783 data.blocksize = MMC_MAX_BLOCK_LEN;
784 data.flags = MMC_DATA_READ;
786 err = mmc_send_cmd(mmc, &cmd, &data);
791 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
794 unsigned int status, start;
796 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
797 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
798 (index == EXT_CSD_PART_CONF);
801 if (mmc->gen_cmd6_time)
802 timeout_ms = mmc->gen_cmd6_time * 10;
804 if (is_part_switch && mmc->part_switch_time)
805 timeout_ms = mmc->part_switch_time * 10;
807 cmd.cmdidx = MMC_CMD_SWITCH;
808 cmd.resp_type = MMC_RSP_R1b;
809 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
813 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
817 start = get_timer(0);
819 /* poll dat0 for rdy/buys status */
820 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
821 if (ret && ret != -ENOSYS)
825 * In cases when neiter allowed to poll by using CMD13 nor we are
826 * capable of polling by using mmc_wait_dat0, then rely on waiting the
827 * stated timeout to be sufficient.
829 if (ret == -ENOSYS && !send_status) {
837 /* Finally wait until the card is ready or indicates a failure
838 * to switch. It doesn't hurt to use CMD13 here even if send_status
839 * is false, because by now (after 'timeout_ms' ms) the bus should be
843 ret = mmc_send_status(mmc, &status);
845 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
846 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
850 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
851 (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
854 } while (get_timer(start) < timeout_ms);
859 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
861 return __mmc_switch(mmc, set, index, value, true);
864 int mmc_boot_wp(struct mmc *mmc)
866 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
869 int mmc_boot_wp_single_partition(struct mmc *mmc, int partition)
874 value = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
876 if (partition == 0) {
877 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
878 ret = mmc_switch(mmc,
879 EXT_CSD_CMD_SET_NORMAL,
882 } else if (partition == 1) {
883 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
884 value |= EXT_CSD_BOOT_WP_B_PWR_WP_SEC_SEL;
885 ret = mmc_switch(mmc,
886 EXT_CSD_CMD_SET_NORMAL,
890 ret = mmc_boot_wp(mmc);
896 #if !CONFIG_IS_ENABLED(MMC_TINY)
897 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
903 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
909 speed_bits = EXT_CSD_TIMING_HS;
911 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
913 speed_bits = EXT_CSD_TIMING_HS200;
916 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
918 speed_bits = EXT_CSD_TIMING_HS400;
921 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
923 speed_bits = EXT_CSD_TIMING_HS400;
927 speed_bits = EXT_CSD_TIMING_LEGACY;
933 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
934 speed_bits, !hsdowngrade);
938 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
939 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
941 * In case the eMMC is in HS200/HS400 mode and we are downgrading
942 * to HS mode, the card clock are still running much faster than
943 * the supported HS mode clock, so we can not reliably read out
944 * Extended CSD. Reconfigure the controller to run at HS mode.
947 mmc_select_mode(mmc, MMC_HS);
948 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
952 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
953 /* Now check to see that it worked */
954 err = mmc_send_ext_csd(mmc, test_csd);
958 /* No high-speed support */
959 if (!test_csd[EXT_CSD_HS_TIMING])
966 static int mmc_get_capabilities(struct mmc *mmc)
968 u8 *ext_csd = mmc->ext_csd;
971 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
973 if (mmc_host_is_spi(mmc))
976 /* Only version 4 supports high-speed */
977 if (mmc->version < MMC_VERSION_4)
981 pr_err("No ext_csd found!\n"); /* this should enver happen */
985 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
987 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
988 mmc->cardtype = cardtype;
990 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
991 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
992 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
993 mmc->card_caps |= MMC_MODE_HS200;
996 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
997 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
998 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
999 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
1000 mmc->card_caps |= MMC_MODE_HS400;
1003 if (cardtype & EXT_CSD_CARD_TYPE_52) {
1004 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
1005 mmc->card_caps |= MMC_MODE_DDR_52MHz;
1006 mmc->card_caps |= MMC_MODE_HS_52MHz;
1008 if (cardtype & EXT_CSD_CARD_TYPE_26)
1009 mmc->card_caps |= MMC_MODE_HS;
1011 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1012 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1013 (mmc->card_caps & MMC_MODE_HS400)) {
1014 mmc->card_caps |= MMC_MODE_HS400_ES;
1022 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1026 mmc->capacity = mmc->capacity_user;
1030 mmc->capacity = mmc->capacity_boot;
1033 mmc->capacity = mmc->capacity_rpmb;
1039 mmc->capacity = mmc->capacity_gp[part_num - 4];
1045 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1050 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1056 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1058 (mmc->part_config & ~PART_ACCESS_MASK)
1059 | (part_num & PART_ACCESS_MASK));
1060 } while (ret && retry--);
1063 * Set the capacity if the switch succeeded or was intended
1064 * to return to representing the raw device.
1066 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1067 ret = mmc_set_capacity(mmc, part_num);
1068 mmc_get_blk_desc(mmc)->hwpart = part_num;
1074 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1075 int mmc_hwpart_config(struct mmc *mmc,
1076 const struct mmc_hwpart_conf *conf,
1077 enum mmc_hwpart_conf_mode mode)
1082 u32 gp_size_mult[4];
1083 u32 max_enh_size_mult;
1084 u32 tot_enh_size_mult = 0;
1087 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1089 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1092 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1093 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1094 return -EMEDIUMTYPE;
1097 if (!(mmc->part_support & PART_SUPPORT)) {
1098 pr_err("Card does not support partitioning\n");
1099 return -EMEDIUMTYPE;
1102 if (!mmc->hc_wp_grp_size) {
1103 pr_err("Card does not define HC WP group size\n");
1104 return -EMEDIUMTYPE;
1107 /* check partition alignment and total enhanced size */
1108 if (conf->user.enh_size) {
1109 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1110 conf->user.enh_start % mmc->hc_wp_grp_size) {
1111 pr_err("User data enhanced area not HC WP group "
1115 part_attrs |= EXT_CSD_ENH_USR;
1116 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1117 if (mmc->high_capacity) {
1118 enh_start_addr = conf->user.enh_start;
1120 enh_start_addr = (conf->user.enh_start << 9);
1126 tot_enh_size_mult += enh_size_mult;
1128 for (pidx = 0; pidx < 4; pidx++) {
1129 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1130 pr_err("GP%i partition not HC WP group size "
1131 "aligned\n", pidx+1);
1134 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1135 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1136 part_attrs |= EXT_CSD_ENH_GP(pidx);
1137 tot_enh_size_mult += gp_size_mult[pidx];
1141 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1142 pr_err("Card does not support enhanced attribute\n");
1143 return -EMEDIUMTYPE;
1146 err = mmc_send_ext_csd(mmc, ext_csd);
1151 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1152 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1153 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1154 if (tot_enh_size_mult > max_enh_size_mult) {
1155 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1156 tot_enh_size_mult, max_enh_size_mult);
1157 return -EMEDIUMTYPE;
1160 /* The default value of EXT_CSD_WR_REL_SET is device
1161 * dependent, the values can only be changed if the
1162 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1163 * changed only once and before partitioning is completed. */
1164 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1165 if (conf->user.wr_rel_change) {
1166 if (conf->user.wr_rel_set)
1167 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1169 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1171 for (pidx = 0; pidx < 4; pidx++) {
1172 if (conf->gp_part[pidx].wr_rel_change) {
1173 if (conf->gp_part[pidx].wr_rel_set)
1174 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1176 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1180 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1181 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1182 puts("Card does not support host controlled partition write "
1183 "reliability settings\n");
1184 return -EMEDIUMTYPE;
1187 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1188 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1189 pr_err("Card already partitioned\n");
1193 if (mode == MMC_HWPART_CONF_CHECK)
1196 /* Partitioning requires high-capacity size definitions */
1197 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1198 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 EXT_CSD_ERASE_GROUP_DEF, 1);
1204 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1206 #if CONFIG_IS_ENABLED(MMC_WRITE)
1207 /* update erase group size to be high-capacity */
1208 mmc->erase_grp_size =
1209 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1214 /* all OK, write the configuration */
1215 for (i = 0; i < 4; i++) {
1216 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1217 EXT_CSD_ENH_START_ADDR+i,
1218 (enh_start_addr >> (i*8)) & 0xFF);
1222 for (i = 0; i < 3; i++) {
1223 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1224 EXT_CSD_ENH_SIZE_MULT+i,
1225 (enh_size_mult >> (i*8)) & 0xFF);
1229 for (pidx = 0; pidx < 4; pidx++) {
1230 for (i = 0; i < 3; i++) {
1231 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1232 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1233 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1238 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1239 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1243 if (mode == MMC_HWPART_CONF_SET)
1246 /* The WR_REL_SET is a write-once register but shall be
1247 * written before setting PART_SETTING_COMPLETED. As it is
1248 * write-once we can only write it when completing the
1250 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1251 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1252 EXT_CSD_WR_REL_SET, wr_rel_set);
1257 /* Setting PART_SETTING_COMPLETED confirms the partition
1258 * configuration but it only becomes effective after power
1259 * cycle, so we do not adjust the partition related settings
1260 * in the mmc struct. */
1262 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1263 EXT_CSD_PARTITION_SETTING,
1264 EXT_CSD_PARTITION_SETTING_COMPLETED);
1272 #if !CONFIG_IS_ENABLED(DM_MMC)
1273 int mmc_getcd(struct mmc *mmc)
1277 cd = board_mmc_getcd(mmc);
1280 if (mmc->cfg->ops->getcd)
1281 cd = mmc->cfg->ops->getcd(mmc);
1290 #if !CONFIG_IS_ENABLED(MMC_TINY)
1291 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1294 struct mmc_data data;
1296 /* Switch the frequency */
1297 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1298 cmd.resp_type = MMC_RSP_R1;
1299 cmd.cmdarg = (mode << 31) | 0xffffff;
1300 cmd.cmdarg &= ~(0xf << (group * 4));
1301 cmd.cmdarg |= value << (group * 4);
1303 data.dest = (char *)resp;
1304 data.blocksize = 64;
1306 data.flags = MMC_DATA_READ;
1308 return mmc_send_cmd(mmc, &cmd, &data);
1311 static int sd_get_capabilities(struct mmc *mmc)
1315 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1316 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1317 struct mmc_data data;
1319 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1323 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1325 if (mmc_host_is_spi(mmc))
1328 /* Read the SCR to find out if this card supports higher speeds */
1329 cmd.cmdidx = MMC_CMD_APP_CMD;
1330 cmd.resp_type = MMC_RSP_R1;
1331 cmd.cmdarg = mmc->rca << 16;
1333 err = mmc_send_cmd(mmc, &cmd, NULL);
1338 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1339 cmd.resp_type = MMC_RSP_R1;
1342 data.dest = (char *)scr;
1345 data.flags = MMC_DATA_READ;
1347 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1352 mmc->scr[0] = __be32_to_cpu(scr[0]);
1353 mmc->scr[1] = __be32_to_cpu(scr[1]);
1355 switch ((mmc->scr[0] >> 24) & 0xf) {
1357 mmc->version = SD_VERSION_1_0;
1360 mmc->version = SD_VERSION_1_10;
1363 mmc->version = SD_VERSION_2;
1364 if ((mmc->scr[0] >> 15) & 0x1)
1365 mmc->version = SD_VERSION_3;
1368 mmc->version = SD_VERSION_1_0;
1372 if (mmc->scr[0] & SD_DATA_4BIT)
1373 mmc->card_caps |= MMC_MODE_4BIT;
1375 /* Version 1.0 doesn't support switching */
1376 if (mmc->version == SD_VERSION_1_0)
1381 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1382 (u8 *)switch_status);
1387 /* The high-speed function is busy. Try again */
1388 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1392 /* If high-speed isn't supported, we return */
1393 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1394 mmc->card_caps |= MMC_CAP(SD_HS);
1396 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1397 /* Version before 3.0 don't support UHS modes */
1398 if (mmc->version < SD_VERSION_3)
1401 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1402 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1403 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1404 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1405 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1406 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1407 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1408 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1409 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1410 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1411 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1417 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1421 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1424 /* SD version 1.00 and 1.01 does not support CMD 6 */
1425 if (mmc->version == SD_VERSION_1_0)
1430 speed = UHS_SDR12_BUS_SPEED;
1433 speed = HIGH_SPEED_BUS_SPEED;
1435 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1437 speed = UHS_SDR12_BUS_SPEED;
1440 speed = UHS_SDR25_BUS_SPEED;
1443 speed = UHS_SDR50_BUS_SPEED;
1446 speed = UHS_DDR50_BUS_SPEED;
1449 speed = UHS_SDR104_BUS_SPEED;
1456 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1460 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1466 static int sd_select_bus_width(struct mmc *mmc, int w)
1471 if ((w != 4) && (w != 1))
1474 cmd.cmdidx = MMC_CMD_APP_CMD;
1475 cmd.resp_type = MMC_RSP_R1;
1476 cmd.cmdarg = mmc->rca << 16;
1478 err = mmc_send_cmd(mmc, &cmd, NULL);
1482 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1483 cmd.resp_type = MMC_RSP_R1;
1488 err = mmc_send_cmd(mmc, &cmd, NULL);
1496 #if CONFIG_IS_ENABLED(MMC_WRITE)
1497 static int sd_read_ssr(struct mmc *mmc)
1499 static const unsigned int sd_au_size[] = {
1500 0, SZ_16K / 512, SZ_32K / 512,
1501 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1502 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1503 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1504 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1509 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1510 struct mmc_data data;
1511 unsigned int au, eo, et, es;
1513 cmd.cmdidx = MMC_CMD_APP_CMD;
1514 cmd.resp_type = MMC_RSP_R1;
1515 cmd.cmdarg = mmc->rca << 16;
1517 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1521 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1522 cmd.resp_type = MMC_RSP_R1;
1525 data.dest = (char *)ssr;
1526 data.blocksize = 64;
1528 data.flags = MMC_DATA_READ;
1530 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1534 for (i = 0; i < 16; i++)
1535 ssr[i] = be32_to_cpu(ssr[i]);
1537 au = (ssr[2] >> 12) & 0xF;
1538 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1539 mmc->ssr.au = sd_au_size[au];
1540 es = (ssr[3] >> 24) & 0xFF;
1541 es |= (ssr[2] & 0xFF) << 8;
1542 et = (ssr[3] >> 18) & 0x3F;
1544 eo = (ssr[3] >> 16) & 0x3;
1545 mmc->ssr.erase_timeout = (et * 1000) / es;
1546 mmc->ssr.erase_offset = eo * 1000;
1549 pr_debug("Invalid Allocation Unit Size.\n");
1555 /* frequency bases */
1556 /* divided by 10 to be nice to platforms without floating point */
1557 static const int fbase[] = {
1564 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1565 * to platforms without floating point.
1567 static const u8 multipliers[] = {
1586 static inline int bus_width(uint cap)
1588 if (cap == MMC_MODE_8BIT)
1590 if (cap == MMC_MODE_4BIT)
1592 if (cap == MMC_MODE_1BIT)
1594 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1598 #if !CONFIG_IS_ENABLED(DM_MMC)
1599 #ifdef MMC_SUPPORTS_TUNING
1600 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1606 static int mmc_set_ios(struct mmc *mmc)
1610 if (mmc->cfg->ops->set_ios)
1611 ret = mmc->cfg->ops->set_ios(mmc);
1616 static int mmc_host_power_cycle(struct mmc *mmc)
1620 if (mmc->cfg->ops->host_power_cycle)
1621 ret = mmc->cfg->ops->host_power_cycle(mmc);
1627 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1630 if (clock > mmc->cfg->f_max)
1631 clock = mmc->cfg->f_max;
1633 if (clock < mmc->cfg->f_min)
1634 clock = mmc->cfg->f_min;
1638 mmc->clk_disable = disable;
1640 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1642 return mmc_set_ios(mmc);
1645 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1647 mmc->bus_width = width;
1649 return mmc_set_ios(mmc);
1652 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1654 * helper function to display the capabilities in a human
1655 * friendly manner. The capabilities include bus width and
1658 void mmc_dump_capabilities(const char *text, uint caps)
1662 pr_debug("%s: widths [", text);
1663 if (caps & MMC_MODE_8BIT)
1665 if (caps & MMC_MODE_4BIT)
1667 if (caps & MMC_MODE_1BIT)
1669 pr_debug("\b\b] modes [");
1670 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1671 if (MMC_CAP(mode) & caps)
1672 pr_debug("%s, ", mmc_mode_name(mode));
1673 pr_debug("\b\b]\n");
1677 struct mode_width_tuning {
1680 #ifdef MMC_SUPPORTS_TUNING
1685 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1686 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1689 case MMC_SIGNAL_VOLTAGE_000: return 0;
1690 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1691 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1692 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1697 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1701 if (mmc->signal_voltage == signal_voltage)
1704 mmc->signal_voltage = signal_voltage;
1705 err = mmc_set_ios(mmc);
1707 pr_debug("unable to set voltage (err %d)\n", err);
1712 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1718 #if !CONFIG_IS_ENABLED(MMC_TINY)
1719 static const struct mode_width_tuning sd_modes_by_pref[] = {
1720 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1721 #ifdef MMC_SUPPORTS_TUNING
1724 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1725 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1730 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1734 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1738 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1743 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1745 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1748 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1753 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1757 #define for_each_sd_mode_by_pref(caps, mwt) \
1758 for (mwt = sd_modes_by_pref;\
1759 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1761 if (caps & MMC_CAP(mwt->mode))
1763 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1766 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1767 const struct mode_width_tuning *mwt;
1768 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1769 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1771 bool uhs_en = false;
1776 mmc_dump_capabilities("sd card", card_caps);
1777 mmc_dump_capabilities("host", mmc->host_caps);
1780 if (mmc_host_is_spi(mmc)) {
1781 mmc_set_bus_width(mmc, 1);
1782 mmc_select_mode(mmc, MMC_LEGACY);
1783 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1784 #if CONFIG_IS_ENABLED(MMC_WRITE)
1785 err = sd_read_ssr(mmc);
1787 pr_warn("unable to read ssr\n");
1792 /* Restrict card's capabilities by what the host can do */
1793 caps = card_caps & mmc->host_caps;
1798 for_each_sd_mode_by_pref(caps, mwt) {
1801 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1802 if (*w & caps & mwt->widths) {
1803 pr_debug("trying mode %s width %d (at %d MHz)\n",
1804 mmc_mode_name(mwt->mode),
1806 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1808 /* configure the bus width (card + host) */
1809 err = sd_select_bus_width(mmc, bus_width(*w));
1812 mmc_set_bus_width(mmc, bus_width(*w));
1814 /* configure the bus mode (card) */
1815 err = sd_set_card_speed(mmc, mwt->mode);
1819 /* configure the bus mode (host) */
1820 mmc_select_mode(mmc, mwt->mode);
1821 mmc_set_clock(mmc, mmc->tran_speed,
1824 #ifdef MMC_SUPPORTS_TUNING
1825 /* execute tuning if needed */
1826 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1827 err = mmc_execute_tuning(mmc,
1830 pr_debug("tuning failed\n");
1836 #if CONFIG_IS_ENABLED(MMC_WRITE)
1837 err = sd_read_ssr(mmc);
1839 pr_warn("unable to read ssr\n");
1845 /* revert to a safer bus speed */
1846 mmc_select_mode(mmc, MMC_LEGACY);
1847 mmc_set_clock(mmc, mmc->tran_speed,
1853 pr_err("unable to select a mode\n");
1858 * read the compare the part of ext csd that is constant.
1859 * This can be used to check that the transfer is working
1862 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1865 const u8 *ext_csd = mmc->ext_csd;
1866 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1868 if (mmc->version < MMC_VERSION_4)
1871 err = mmc_send_ext_csd(mmc, test_csd);
1875 /* Only compare read only fields */
1876 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1877 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1878 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1879 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1880 ext_csd[EXT_CSD_REV]
1881 == test_csd[EXT_CSD_REV] &&
1882 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1883 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1884 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1885 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1891 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1892 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1893 uint32_t allowed_mask)
1901 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1902 EXT_CSD_CARD_TYPE_HS400_1_8V))
1903 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1904 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1905 EXT_CSD_CARD_TYPE_HS400_1_2V))
1906 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1909 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1910 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1911 MMC_SIGNAL_VOLTAGE_180;
1912 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1913 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1916 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1920 while (card_mask & allowed_mask) {
1921 enum mmc_voltage best_match;
1923 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1924 if (!mmc_set_signal_voltage(mmc, best_match))
1927 allowed_mask &= ~best_match;
1933 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1934 uint32_t allowed_mask)
1940 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1941 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1943 .mode = MMC_HS_400_ES,
1944 .widths = MMC_MODE_8BIT,
1947 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1950 .widths = MMC_MODE_8BIT,
1951 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1954 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1957 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1958 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1963 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1967 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1971 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1975 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1979 #define for_each_mmc_mode_by_pref(caps, mwt) \
1980 for (mwt = mmc_modes_by_pref;\
1981 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1983 if (caps & MMC_CAP(mwt->mode))
1985 static const struct ext_csd_bus_width {
1989 } ext_csd_bus_width[] = {
1990 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1991 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1992 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1993 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1994 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1997 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1998 static int mmc_select_hs400(struct mmc *mmc)
2002 /* Set timing to HS200 for tuning */
2003 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
2007 /* configure the bus mode (host) */
2008 mmc_select_mode(mmc, MMC_HS_200);
2009 mmc_set_clock(mmc, mmc->tran_speed, false);
2011 /* execute tuning if needed */
2012 mmc->hs400_tuning = 1;
2013 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
2014 mmc->hs400_tuning = 0;
2016 debug("tuning failed\n");
2020 /* Set back to HS */
2021 mmc_set_card_speed(mmc, MMC_HS, true);
2023 err = mmc_hs400_prepare_ddr(mmc);
2027 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2028 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
2032 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2036 mmc_select_mode(mmc, MMC_HS_400);
2037 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2044 static int mmc_select_hs400(struct mmc *mmc)
2050 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2051 #if !CONFIG_IS_ENABLED(DM_MMC)
2052 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2057 static int mmc_select_hs400es(struct mmc *mmc)
2061 err = mmc_set_card_speed(mmc, MMC_HS, true);
2065 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2066 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2067 EXT_CSD_BUS_WIDTH_STROBE);
2069 printf("switch to bus width for hs400 failed\n");
2072 /* TODO: driver strength */
2073 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2077 mmc_select_mode(mmc, MMC_HS_400_ES);
2078 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2082 return mmc_set_enhanced_strobe(mmc);
2085 static int mmc_select_hs400es(struct mmc *mmc)
2091 #define for_each_supported_width(caps, ddr, ecbv) \
2092 for (ecbv = ext_csd_bus_width;\
2093 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2095 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2097 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2100 const struct mode_width_tuning *mwt;
2101 const struct ext_csd_bus_width *ecbw;
2104 mmc_dump_capabilities("mmc", card_caps);
2105 mmc_dump_capabilities("host", mmc->host_caps);
2108 if (mmc_host_is_spi(mmc)) {
2109 mmc_set_bus_width(mmc, 1);
2110 mmc_select_mode(mmc, MMC_LEGACY);
2111 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2115 /* Restrict card's capabilities by what the host can do */
2116 card_caps &= mmc->host_caps;
2118 /* Only version 4 of MMC supports wider bus widths */
2119 if (mmc->version < MMC_VERSION_4)
2122 if (!mmc->ext_csd) {
2123 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2127 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2128 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
2129 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2131 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2132 * before doing anything else, since a transition from either of
2133 * the HS200/HS400 mode directly to legacy mode is not supported.
2135 if (mmc->selected_mode == MMC_HS_200 ||
2136 mmc->selected_mode == MMC_HS_400 ||
2137 mmc->selected_mode == MMC_HS_400_ES)
2138 mmc_set_card_speed(mmc, MMC_HS, true);
2141 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2143 for_each_mmc_mode_by_pref(card_caps, mwt) {
2144 for_each_supported_width(card_caps & mwt->widths,
2145 mmc_is_mode_ddr(mwt->mode), ecbw) {
2146 enum mmc_voltage old_voltage;
2147 pr_debug("trying mode %s width %d (at %d MHz)\n",
2148 mmc_mode_name(mwt->mode),
2149 bus_width(ecbw->cap),
2150 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2151 old_voltage = mmc->signal_voltage;
2152 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2153 MMC_ALL_SIGNAL_VOLTAGE);
2157 /* configure the bus width (card + host) */
2158 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2160 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2163 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2165 if (mwt->mode == MMC_HS_400) {
2166 err = mmc_select_hs400(mmc);
2168 printf("Select HS400 failed %d\n", err);
2171 } else if (mwt->mode == MMC_HS_400_ES) {
2172 err = mmc_select_hs400es(mmc);
2174 printf("Select HS400ES failed %d\n",
2179 /* configure the bus speed (card) */
2180 err = mmc_set_card_speed(mmc, mwt->mode, false);
2185 * configure the bus width AND the ddr mode
2186 * (card). The host side will be taken care
2187 * of in the next step
2189 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2190 err = mmc_switch(mmc,
2191 EXT_CSD_CMD_SET_NORMAL,
2193 ecbw->ext_csd_bits);
2198 /* configure the bus mode (host) */
2199 mmc_select_mode(mmc, mwt->mode);
2200 mmc_set_clock(mmc, mmc->tran_speed,
2202 #ifdef MMC_SUPPORTS_TUNING
2204 /* execute tuning if needed */
2206 err = mmc_execute_tuning(mmc,
2209 pr_debug("tuning failed : %d\n", err);
2216 /* do a transfer to check the configuration */
2217 err = mmc_read_and_compare_ext_csd(mmc);
2221 mmc_set_signal_voltage(mmc, old_voltage);
2222 /* if an error occurred, revert to a safer bus mode */
2223 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2224 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2225 mmc_select_mode(mmc, MMC_LEGACY);
2226 mmc_set_bus_width(mmc, 1);
2230 pr_err("unable to select a mode : %d\n", err);
2236 #if CONFIG_IS_ENABLED(MMC_TINY)
2237 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2240 static int mmc_startup_v4(struct mmc *mmc)
2244 bool has_parts = false;
2245 bool part_completed;
2246 static const u32 mmc_versions[] = {
2258 #if CONFIG_IS_ENABLED(MMC_TINY)
2259 u8 *ext_csd = ext_csd_bkup;
2261 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2265 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2267 err = mmc_send_ext_csd(mmc, ext_csd);
2271 /* store the ext csd for future reference */
2273 mmc->ext_csd = ext_csd;
2275 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2277 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2280 /* check ext_csd version and capacity */
2281 err = mmc_send_ext_csd(mmc, ext_csd);
2285 /* store the ext csd for future reference */
2287 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2290 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2292 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2295 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2297 if (mmc->version >= MMC_VERSION_4_2) {
2299 * According to the JEDEC Standard, the value of
2300 * ext_csd's capacity is valid if the value is more
2303 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2304 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2305 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2306 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2307 capacity *= MMC_MAX_BLOCK_LEN;
2308 if ((capacity >> 20) > 2 * 1024)
2309 mmc->capacity_user = capacity;
2312 if (mmc->version >= MMC_VERSION_4_5)
2313 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2315 /* The partition data may be non-zero but it is only
2316 * effective if PARTITION_SETTING_COMPLETED is set in
2317 * EXT_CSD, so ignore any data if this bit is not set,
2318 * except for enabling the high-capacity group size
2319 * definition (see below).
2321 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2322 EXT_CSD_PARTITION_SETTING_COMPLETED);
2324 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2325 /* Some eMMC set the value too low so set a minimum */
2326 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2327 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2329 /* store the partition info of emmc */
2330 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2331 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2332 ext_csd[EXT_CSD_BOOT_MULT])
2333 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2334 if (part_completed &&
2335 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2336 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2338 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2340 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2342 for (i = 0; i < 4; i++) {
2343 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2344 uint mult = (ext_csd[idx + 2] << 16) +
2345 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2348 if (!part_completed)
2350 mmc->capacity_gp[i] = mult;
2351 mmc->capacity_gp[i] *=
2352 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2353 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2354 mmc->capacity_gp[i] <<= 19;
2357 #ifndef CONFIG_SPL_BUILD
2358 if (part_completed) {
2359 mmc->enh_user_size =
2360 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2361 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2362 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2363 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2364 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2365 mmc->enh_user_size <<= 19;
2366 mmc->enh_user_start =
2367 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2368 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2369 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2370 ext_csd[EXT_CSD_ENH_START_ADDR];
2371 if (mmc->high_capacity)
2372 mmc->enh_user_start <<= 9;
2377 * Host needs to enable ERASE_GRP_DEF bit if device is
2378 * partitioned. This bit will be lost every time after a reset
2379 * or power off. This will affect erase size.
2383 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2384 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2387 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2388 EXT_CSD_ERASE_GROUP_DEF, 1);
2393 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2396 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2397 #if CONFIG_IS_ENABLED(MMC_WRITE)
2398 /* Read out group size from ext_csd */
2399 mmc->erase_grp_size =
2400 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2403 * if high capacity and partition setting completed
2404 * SEC_COUNT is valid even if it is smaller than 2 GiB
2405 * JEDEC Standard JESD84-B45, 6.2.4
2407 if (mmc->high_capacity && part_completed) {
2408 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2409 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2410 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2411 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2412 capacity *= MMC_MAX_BLOCK_LEN;
2413 mmc->capacity_user = capacity;
2416 #if CONFIG_IS_ENABLED(MMC_WRITE)
2418 /* Calculate the group size from the csd value. */
2419 int erase_gsz, erase_gmul;
2421 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2422 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2423 mmc->erase_grp_size = (erase_gsz + 1)
2427 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2428 mmc->hc_wp_grp_size = 1024
2429 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2430 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2433 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2438 #if !CONFIG_IS_ENABLED(MMC_TINY)
2441 mmc->ext_csd = NULL;
2446 static int mmc_startup(struct mmc *mmc)
2452 struct blk_desc *bdesc;
2454 #ifdef CONFIG_MMC_SPI_CRC_ON
2455 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2456 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2457 cmd.resp_type = MMC_RSP_R1;
2459 err = mmc_send_cmd(mmc, &cmd, NULL);
2465 /* Put the Card in Identify Mode */
2466 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2467 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2468 cmd.resp_type = MMC_RSP_R2;
2471 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2475 memcpy(mmc->cid, cmd.response, 16);
2478 * For MMC cards, set the Relative Address.
2479 * For SD cards, get the Relatvie Address.
2480 * This also puts the cards into Standby State
2482 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2483 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2484 cmd.cmdarg = mmc->rca << 16;
2485 cmd.resp_type = MMC_RSP_R6;
2487 err = mmc_send_cmd(mmc, &cmd, NULL);
2493 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2496 /* Get the Card-Specific Data */
2497 cmd.cmdidx = MMC_CMD_SEND_CSD;
2498 cmd.resp_type = MMC_RSP_R2;
2499 cmd.cmdarg = mmc->rca << 16;
2501 err = mmc_send_cmd(mmc, &cmd, NULL);
2506 mmc->csd[0] = cmd.response[0];
2507 mmc->csd[1] = cmd.response[1];
2508 mmc->csd[2] = cmd.response[2];
2509 mmc->csd[3] = cmd.response[3];
2511 if (mmc->version == MMC_VERSION_UNKNOWN) {
2512 int version = (cmd.response[0] >> 26) & 0xf;
2516 mmc->version = MMC_VERSION_1_2;
2519 mmc->version = MMC_VERSION_1_4;
2522 mmc->version = MMC_VERSION_2_2;
2525 mmc->version = MMC_VERSION_3;
2528 mmc->version = MMC_VERSION_4;
2531 mmc->version = MMC_VERSION_1_2;
2536 /* divide frequency by 10, since the mults are 10x bigger */
2537 freq = fbase[(cmd.response[0] & 0x7)];
2538 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2540 mmc->legacy_speed = freq * mult;
2541 mmc_select_mode(mmc, MMC_LEGACY);
2543 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2544 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2545 #if CONFIG_IS_ENABLED(MMC_WRITE)
2548 mmc->write_bl_len = mmc->read_bl_len;
2550 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2553 if (mmc->high_capacity) {
2554 csize = (mmc->csd[1] & 0x3f) << 16
2555 | (mmc->csd[2] & 0xffff0000) >> 16;
2558 csize = (mmc->csd[1] & 0x3ff) << 2
2559 | (mmc->csd[2] & 0xc0000000) >> 30;
2560 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2563 mmc->capacity_user = (csize + 1) << (cmult + 2);
2564 mmc->capacity_user *= mmc->read_bl_len;
2565 mmc->capacity_boot = 0;
2566 mmc->capacity_rpmb = 0;
2567 for (i = 0; i < 4; i++)
2568 mmc->capacity_gp[i] = 0;
2570 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2571 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2573 #if CONFIG_IS_ENABLED(MMC_WRITE)
2574 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2575 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2578 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2579 cmd.cmdidx = MMC_CMD_SET_DSR;
2580 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2581 cmd.resp_type = MMC_RSP_NONE;
2582 if (mmc_send_cmd(mmc, &cmd, NULL))
2583 pr_warn("MMC: SET_DSR failed\n");
2586 /* Select the card, and put it into Transfer Mode */
2587 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2588 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2589 cmd.resp_type = MMC_RSP_R1;
2590 cmd.cmdarg = mmc->rca << 16;
2591 err = mmc_send_cmd(mmc, &cmd, NULL);
2598 * For SD, its erase group is always one sector
2600 #if CONFIG_IS_ENABLED(MMC_WRITE)
2601 mmc->erase_grp_size = 1;
2603 mmc->part_config = MMCPART_NOAVAILABLE;
2605 err = mmc_startup_v4(mmc);
2609 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2613 #if CONFIG_IS_ENABLED(MMC_TINY)
2614 mmc_set_clock(mmc, mmc->legacy_speed, false);
2615 mmc_select_mode(mmc, MMC_LEGACY);
2616 mmc_set_bus_width(mmc, 1);
2619 err = sd_get_capabilities(mmc);
2622 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2624 err = mmc_get_capabilities(mmc);
2627 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2633 mmc->best_mode = mmc->selected_mode;
2635 /* Fix the block length for DDR mode */
2636 if (mmc->ddr_mode) {
2637 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2638 #if CONFIG_IS_ENABLED(MMC_WRITE)
2639 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2643 /* fill in device description */
2644 bdesc = mmc_get_blk_desc(mmc);
2648 bdesc->blksz = mmc->read_bl_len;
2649 bdesc->log2blksz = LOG2(bdesc->blksz);
2650 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2651 #if !defined(CONFIG_SPL_BUILD) || \
2652 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2653 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2654 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2655 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2656 (mmc->cid[3] >> 16) & 0xffff);
2657 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2658 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2659 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2660 (mmc->cid[2] >> 24) & 0xff);
2661 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2662 (mmc->cid[2] >> 16) & 0xf);
2664 bdesc->vendor[0] = 0;
2665 bdesc->product[0] = 0;
2666 bdesc->revision[0] = 0;
2669 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2676 static int mmc_send_if_cond(struct mmc *mmc)
2681 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2682 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2683 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2684 cmd.resp_type = MMC_RSP_R7;
2686 err = mmc_send_cmd(mmc, &cmd, NULL);
2691 if ((cmd.response[0] & 0xff) != 0xaa)
2694 mmc->version = SD_VERSION_2;
2699 #if !CONFIG_IS_ENABLED(DM_MMC)
2700 /* board-specific MMC power initializations. */
2701 __weak void board_mmc_power_init(void)
2706 static int mmc_power_init(struct mmc *mmc)
2708 #if CONFIG_IS_ENABLED(DM_MMC)
2709 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2712 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2715 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2717 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2718 &mmc->vqmmc_supply);
2720 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2722 #else /* !CONFIG_DM_MMC */
2724 * Driver model should use a regulator, as above, rather than calling
2725 * out to board code.
2727 board_mmc_power_init();
2733 * put the host in the initial state:
2734 * - turn on Vdd (card power supply)
2735 * - configure the bus width and clock to minimal values
2737 static void mmc_set_initial_state(struct mmc *mmc)
2741 /* First try to set 3.3V. If it fails set to 1.8V */
2742 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2744 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2746 pr_warn("mmc: failed to set signal voltage\n");
2748 mmc_select_mode(mmc, MMC_LEGACY);
2749 mmc_set_bus_width(mmc, 1);
2750 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2753 static int mmc_power_on(struct mmc *mmc)
2755 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2756 if (mmc->vmmc_supply) {
2757 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2759 if (ret && ret != -EACCES) {
2760 printf("Error enabling VMMC supply : %d\n", ret);
2768 static int mmc_power_off(struct mmc *mmc)
2770 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2771 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2772 if (mmc->vmmc_supply) {
2773 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2775 if (ret && ret != -EACCES) {
2776 pr_debug("Error disabling VMMC supply : %d\n", ret);
2784 static int mmc_power_cycle(struct mmc *mmc)
2788 ret = mmc_power_off(mmc);
2792 ret = mmc_host_power_cycle(mmc);
2797 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2798 * to be on the safer side.
2801 return mmc_power_on(mmc);
2804 int mmc_get_op_cond(struct mmc *mmc, bool quiet)
2806 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2812 err = mmc_power_init(mmc);
2816 #ifdef CONFIG_MMC_QUIRKS
2817 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2818 MMC_QUIRK_RETRY_SEND_CID |
2819 MMC_QUIRK_RETRY_APP_CMD;
2822 err = mmc_power_cycle(mmc);
2825 * if power cycling is not supported, we should not try
2826 * to use the UHS modes, because we wouldn't be able to
2827 * recover from an error during the UHS initialization.
2829 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2831 mmc->host_caps &= ~UHS_CAPS;
2832 err = mmc_power_on(mmc);
2837 #if CONFIG_IS_ENABLED(DM_MMC)
2839 * Re-initialization is needed to clear old configuration for
2842 err = mmc_reinit(mmc);
2844 /* made sure it's not NULL earlier */
2845 err = mmc->cfg->ops->init(mmc);
2852 mmc_set_initial_state(mmc);
2854 /* Reset the Card */
2855 err = mmc_go_idle(mmc);
2860 /* The internal partition reset to user partition(0) at every CMD0 */
2861 mmc_get_blk_desc(mmc)->hwpart = 0;
2863 /* Test for SD version 2 */
2864 err = mmc_send_if_cond(mmc);
2866 /* Now try to get the SD card's operating condition */
2867 err = sd_send_op_cond(mmc, uhs_en);
2868 if (err && uhs_en) {
2870 mmc_power_cycle(mmc);
2874 /* If the command timed out, we check for an MMC card */
2875 if (err == -ETIMEDOUT) {
2876 err = mmc_send_op_cond(mmc);
2879 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2881 pr_err("Card did not respond to voltage select! : %d\n", err);
2890 int mmc_start_init(struct mmc *mmc)
2896 * all hosts are capable of 1 bit bus-width and able to use the legacy
2899 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2902 if (IS_ENABLED(CONFIG_MMC_SPEED_MODE_SET)) {
2903 if (mmc->user_speed_mode != MMC_MODES_END) {
2906 if (mmc->host_caps & MMC_CAP(mmc->user_speed_mode)) {
2907 /* Remove all existing speed capabilities */
2908 for (i = MMC_LEGACY; i < MMC_MODES_END; i++)
2909 mmc->host_caps &= ~MMC_CAP(i);
2910 mmc->host_caps |= (MMC_CAP(mmc->user_speed_mode)
2911 | MMC_CAP(MMC_LEGACY) |
2914 pr_err("bus_mode requested is not supported\n");
2919 #if CONFIG_IS_ENABLED(DM_MMC)
2920 mmc_deferred_probe(mmc);
2922 #if !defined(CONFIG_MMC_BROKEN_CD)
2923 no_card = mmc_getcd(mmc) == 0;
2927 #if !CONFIG_IS_ENABLED(DM_MMC)
2928 /* we pretend there's no card when init is NULL */
2929 no_card = no_card || (mmc->cfg->ops->init == NULL);
2933 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2934 pr_err("MMC: no card present\n");
2939 err = mmc_get_op_cond(mmc, false);
2942 mmc->init_in_progress = 1;
2947 static int mmc_complete_init(struct mmc *mmc)
2951 mmc->init_in_progress = 0;
2952 if (mmc->op_cond_pending)
2953 err = mmc_complete_op_cond(mmc);
2956 err = mmc_startup(mmc);
2964 int mmc_init(struct mmc *mmc)
2967 __maybe_unused ulong start;
2968 #if CONFIG_IS_ENABLED(DM_MMC)
2969 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2976 start = get_timer(0);
2978 if (!mmc->init_in_progress)
2979 err = mmc_start_init(mmc);
2982 err = mmc_complete_init(mmc);
2984 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2989 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2990 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2991 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2992 int mmc_deinit(struct mmc *mmc)
3000 caps_filtered = mmc->card_caps &
3001 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
3002 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
3003 MMC_CAP(UHS_SDR104));
3005 return sd_select_mode_and_width(mmc, caps_filtered);
3007 caps_filtered = mmc->card_caps &
3008 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400) | MMC_CAP(MMC_HS_400_ES));
3010 return mmc_select_mode_and_width(mmc, caps_filtered);
3015 int mmc_set_dsr(struct mmc *mmc, u16 val)
3021 /* CPU-specific MMC initializations */
3022 __weak int cpu_mmc_init(struct bd_info *bis)
3027 /* board-specific MMC initializations. */
3028 __weak int board_mmc_init(struct bd_info *bis)
3033 void mmc_set_preinit(struct mmc *mmc, int preinit)
3035 mmc->preinit = preinit;
3038 #if CONFIG_IS_ENABLED(DM_MMC)
3039 static int mmc_probe(struct bd_info *bis)
3043 struct udevice *dev;
3045 ret = uclass_get(UCLASS_MMC, &uc);
3050 * Try to add them in sequence order. Really with driver model we
3051 * should allow holes, but the current MMC list does not allow that.
3052 * So if we request 0, 1, 3 we will get 0, 1, 2.
3054 for (i = 0; ; i++) {
3055 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3059 uclass_foreach_dev(dev, uc) {
3060 ret = device_probe(dev);
3062 pr_err("%s - probe failed: %d\n", dev->name, ret);
3068 static int mmc_probe(struct bd_info *bis)
3070 if (board_mmc_init(bis) < 0)
3077 int mmc_initialize(struct bd_info *bis)
3079 static int initialized = 0;
3081 if (initialized) /* Avoid initializing mmc multiple times */
3085 #if !CONFIG_IS_ENABLED(BLK)
3086 #if !CONFIG_IS_ENABLED(MMC_TINY)
3090 ret = mmc_probe(bis);
3094 #ifndef CONFIG_SPL_BUILD
3095 print_mmc_devices(',');
3102 #if CONFIG_IS_ENABLED(DM_MMC)
3103 int mmc_init_device(int num)
3105 struct udevice *dev;
3109 if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3110 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3115 m = mmc_get_mmc_dev(dev);
3119 /* Initialising user set speed mode */
3120 m->user_speed_mode = MMC_MODES_END;
3129 #ifdef CONFIG_CMD_BKOPS_ENABLE
3130 int mmc_set_bkops_enable(struct mmc *mmc)
3133 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3135 err = mmc_send_ext_csd(mmc, ext_csd);
3137 puts("Could not get ext_csd register values\n");
3141 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3142 puts("Background operations not supported on device\n");
3143 return -EMEDIUMTYPE;
3146 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3147 puts("Background operations already enabled\n");
3151 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3153 puts("Failed to enable manual background operations\n");
3157 puts("Enabled manual background operations\n");
3163 __weak int mmc_get_env_dev(void)
3165 #ifdef CONFIG_SYS_MMC_ENV_DEV
3166 return CONFIG_SYS_MMC_ENV_DEV;