1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
7 * Based vaguely on the Linux code
16 #include <dm/device-internal.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
25 #include <linux/list.h>
27 #include "mmc_private.h"
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33 #if !CONFIG_IS_ENABLED(DM_MMC)
35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
37 if (mmc->cfg->ops->wait_dat0)
38 return mmc->cfg->ops->wait_dat0(mmc, state, timeout_us);
43 __weak int board_mmc_getwp(struct mmc *mmc)
48 int mmc_getwp(struct mmc *mmc)
52 wp = board_mmc_getwp(mmc);
55 if (mmc->cfg->ops->getwp)
56 wp = mmc->cfg->ops->getwp(mmc);
64 __weak int board_mmc_getcd(struct mmc *mmc)
70 #ifdef CONFIG_MMC_TRACE
71 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
73 printf("CMD_SEND:%d\n", cmd->cmdidx);
74 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
77 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
83 printf("\t\tRET\t\t\t %d\n", ret);
85 switch (cmd->resp_type) {
87 printf("\t\tMMC_RSP_NONE\n");
90 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
94 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
98 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
102 printf("\t\t \t\t 0x%08x \n",
104 printf("\t\t \t\t 0x%08x \n",
107 printf("\t\t\t\t\tDUMPING DATA\n");
108 for (i = 0; i < 4; i++) {
110 printf("\t\t\t\t\t%03d - ", i*4);
111 ptr = (u8 *)&cmd->response[i];
113 for (j = 0; j < 4; j++)
114 printf("%02x ", *ptr--);
119 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
123 printf("\t\tERROR MMC rsp not supported\n");
129 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
133 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
134 printf("CURR STATE:%d\n", status);
138 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) || CONFIG_VAL(LOGLEVEL) >= LOGL_DEBUG
139 const char *mmc_mode_name(enum bus_mode mode)
141 static const char *const names[] = {
142 [MMC_LEGACY] = "MMC legacy",
143 [MMC_HS] = "MMC High Speed (26MHz)",
144 [SD_HS] = "SD High Speed (50MHz)",
145 [UHS_SDR12] = "UHS SDR12 (25MHz)",
146 [UHS_SDR25] = "UHS SDR25 (50MHz)",
147 [UHS_SDR50] = "UHS SDR50 (100MHz)",
148 [UHS_SDR104] = "UHS SDR104 (208MHz)",
149 [UHS_DDR50] = "UHS DDR50 (50MHz)",
150 [MMC_HS_52] = "MMC High Speed (52MHz)",
151 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
152 [MMC_HS_200] = "HS200 (200MHz)",
153 [MMC_HS_400] = "HS400 (200MHz)",
154 [MMC_HS_400_ES] = "HS400ES (200MHz)",
157 if (mode >= MMC_MODES_END)
158 return "Unknown mode";
164 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
166 static const int freqs[] = {
167 [MMC_LEGACY] = 25000000,
170 [MMC_HS_52] = 52000000,
171 [MMC_DDR_52] = 52000000,
172 [UHS_SDR12] = 25000000,
173 [UHS_SDR25] = 50000000,
174 [UHS_SDR50] = 100000000,
175 [UHS_DDR50] = 50000000,
176 [UHS_SDR104] = 208000000,
177 [MMC_HS_200] = 200000000,
178 [MMC_HS_400] = 200000000,
179 [MMC_HS_400_ES] = 200000000,
182 if (mode == MMC_LEGACY)
183 return mmc->legacy_speed;
184 else if (mode >= MMC_MODES_END)
190 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
192 mmc->selected_mode = mode;
193 mmc->tran_speed = mmc_mode2freq(mmc, mode);
194 mmc->ddr_mode = mmc_is_mode_ddr(mode);
195 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
196 mmc->tran_speed / 1000000);
200 #if !CONFIG_IS_ENABLED(DM_MMC)
201 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
205 mmmc_trace_before_send(mmc, cmd);
206 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
207 mmmc_trace_after_send(mmc, cmd, ret);
214 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
216 * @dev: device to receive the command
217 * @cmd: command to send
218 * @data: additional data to send/receive
219 * @retries: how many times to retry; mmc_send_cmd is always called at least
221 * Return: 0 if ok, -ve on error
223 static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
224 struct mmc_data *data, uint retries)
229 ret = mmc_send_cmd(mmc, cmd, data);
230 } while (ret && retries--);
236 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
237 * specific quirk is enabled
239 * @dev: device to receive the command
240 * @cmd: command to send
241 * @data: additional data to send/receive
242 * @quirk: retry only if this quirk is enabled
243 * @retries: how many times to retry; mmc_send_cmd is always called at least
245 * Return: 0 if ok, -ve on error
247 static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
248 struct mmc_data *data, u32 quirk, uint retries)
250 if (IS_ENABLED(CONFIG_MMC_QUIRKS) && mmc->quirks & quirk)
251 return mmc_send_cmd_retry(mmc, cmd, data, retries);
253 return mmc_send_cmd(mmc, cmd, data);
256 int mmc_send_status(struct mmc *mmc, unsigned int *status)
261 cmd.cmdidx = MMC_CMD_SEND_STATUS;
262 cmd.resp_type = MMC_RSP_R1;
263 if (!mmc_host_is_spi(mmc))
264 cmd.cmdarg = mmc->rca << 16;
266 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
267 mmc_trace_state(mmc, &cmd);
269 *status = cmd.response[0];
274 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
279 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
284 err = mmc_send_status(mmc, &status);
288 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
289 (status & MMC_STATUS_CURR_STATE) !=
293 if (status & MMC_STATUS_MASK) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 pr_err("Status Error: 0x%08x\n", status);
300 if (timeout_ms-- <= 0)
306 if (timeout_ms <= 0) {
307 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
308 pr_err("Timeout waiting card ready\n");
316 int mmc_set_blocklen(struct mmc *mmc, int len)
323 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
324 cmd.resp_type = MMC_RSP_R1;
327 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
328 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
331 #ifdef MMC_SUPPORTS_TUNING
332 static const u8 tuning_blk_pattern_4bit[] = {
333 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
334 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
335 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
336 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
337 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
338 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
339 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
340 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
343 static const u8 tuning_blk_pattern_8bit[] = {
344 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
345 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
346 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
347 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
348 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
349 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
350 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
351 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
352 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
353 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
354 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
355 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
356 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
357 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
358 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
359 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
362 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
365 struct mmc_data data;
366 const u8 *tuning_block_pattern;
369 if (mmc->bus_width == 8) {
370 tuning_block_pattern = tuning_blk_pattern_8bit;
371 size = sizeof(tuning_blk_pattern_8bit);
372 } else if (mmc->bus_width == 4) {
373 tuning_block_pattern = tuning_blk_pattern_4bit;
374 size = sizeof(tuning_blk_pattern_4bit);
379 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
383 cmd.resp_type = MMC_RSP_R1;
385 data.dest = (void *)data_buf;
387 data.blocksize = size;
388 data.flags = MMC_DATA_READ;
390 err = mmc_send_cmd(mmc, &cmd, &data);
394 if (memcmp(data_buf, tuning_block_pattern, size))
401 int mmc_send_stop_transmission(struct mmc *mmc, bool write)
405 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
408 * JEDEC Standard No. 84-B51 Page 126
409 * CMD12 STOP_TRANSMISSION R1/R1b[3]
410 * NOTE 3 R1 for read cases and R1b for write cases.
412 * Physical Layer Simplified Specification Version 9.00
413 * 7.3.1.3 Detailed Command Description
416 cmd.resp_type = (IS_SD(mmc) || write) ? MMC_RSP_R1b : MMC_RSP_R1;
418 return mmc_send_cmd(mmc, &cmd, NULL);
421 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
425 struct mmc_data data;
428 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
430 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
432 if (mmc->high_capacity)
435 cmd.cmdarg = start * mmc->read_bl_len;
437 cmd.resp_type = MMC_RSP_R1;
440 data.blocks = blkcnt;
441 data.blocksize = mmc->read_bl_len;
442 data.flags = MMC_DATA_READ;
444 if (mmc_send_cmd(mmc, &cmd, &data))
448 if (mmc_send_stop_transmission(mmc, false)) {
449 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
450 pr_err("mmc fail to send stop cmd\n");
459 #if !CONFIG_IS_ENABLED(DM_MMC)
460 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
462 if (mmc->cfg->ops->get_b_max)
463 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
465 return mmc->cfg->b_max;
469 #if CONFIG_IS_ENABLED(BLK)
470 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
472 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
476 #if CONFIG_IS_ENABLED(BLK)
477 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
479 int dev_num = block_dev->devnum;
481 lbaint_t cur, blocks_todo = blkcnt;
487 struct mmc *mmc = find_mmc_device(dev_num);
491 if (CONFIG_IS_ENABLED(MMC_TINY))
492 err = mmc_switch_part(mmc, block_dev->hwpart);
494 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
499 if ((start + blkcnt) > block_dev->lba) {
500 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
501 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
502 start + blkcnt, block_dev->lba);
507 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
508 pr_debug("%s: Failed to set blocklen\n", __func__);
512 b_max = mmc_get_b_max(mmc, dst, blkcnt);
515 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
516 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
517 pr_debug("%s: Failed to read blocks\n", __func__);
522 dst += cur * mmc->read_bl_len;
523 } while (blocks_todo > 0);
528 static int mmc_go_idle(struct mmc *mmc)
535 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
537 cmd.resp_type = MMC_RSP_NONE;
539 err = mmc_send_cmd(mmc, &cmd, NULL);
549 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
550 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
556 * Send CMD11 only if the request is to switch the card to
559 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
560 return mmc_set_signal_voltage(mmc, signal_voltage);
562 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
564 cmd.resp_type = MMC_RSP_R1;
566 err = mmc_send_cmd(mmc, &cmd, NULL);
570 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
574 * The card should drive cmd and dat[0:3] low immediately
575 * after the response of cmd11, but wait 100 us to be sure
577 err = mmc_wait_dat0(mmc, 0, 100);
584 * During a signal voltage level switch, the clock must be gated
585 * for 5 ms according to the SD spec
587 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
589 err = mmc_set_signal_voltage(mmc, signal_voltage);
593 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
595 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
598 * Failure to switch is indicated by the card holding
599 * dat[0:3] low. Wait for at least 1 ms according to spec
601 err = mmc_wait_dat0(mmc, 1, 1000);
611 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
618 cmd.cmdidx = MMC_CMD_APP_CMD;
619 cmd.resp_type = MMC_RSP_R1;
622 err = mmc_send_cmd(mmc, &cmd, NULL);
627 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
628 cmd.resp_type = MMC_RSP_R3;
631 * Most cards do not answer if some reserved bits
632 * in the ocr are set. However, Some controller
633 * can set bit 7 (reserved for low voltages), but
634 * how to manage low voltages SD card is not yet
637 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
638 (mmc->cfg->voltages & 0xff8000);
640 if (mmc->version == SD_VERSION_2)
641 cmd.cmdarg |= OCR_HCS;
644 cmd.cmdarg |= OCR_S18R;
646 err = mmc_send_cmd(mmc, &cmd, NULL);
651 if (cmd.response[0] & OCR_BUSY)
660 if (mmc->version != SD_VERSION_2)
661 mmc->version = SD_VERSION_1_0;
663 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
664 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
665 cmd.resp_type = MMC_RSP_R3;
668 err = mmc_send_cmd(mmc, &cmd, NULL);
674 mmc->ocr = cmd.response[0];
676 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
677 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
679 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
685 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
691 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
696 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
697 cmd.resp_type = MMC_RSP_R3;
699 if (use_arg && !mmc_host_is_spi(mmc))
700 cmd.cmdarg = OCR_HCS |
701 (mmc->cfg->voltages &
702 (mmc->ocr & OCR_VOLTAGE_MASK)) |
703 (mmc->ocr & OCR_ACCESS_MODE);
705 err = mmc_send_cmd(mmc, &cmd, NULL);
708 mmc->ocr = cmd.response[0];
712 static int mmc_send_op_cond(struct mmc *mmc)
718 /* Some cards seem to need this */
721 start = get_timer(0);
722 /* Asking to the card its capabilities */
724 err = mmc_send_op_cond_iter(mmc, i != 0);
728 /* exit if not busy (flag seems to be inverted) */
729 if (mmc->ocr & OCR_BUSY)
732 if (get_timer(start) > timeout)
736 mmc->op_cond_pending = 1;
740 static int mmc_complete_op_cond(struct mmc *mmc)
747 mmc->op_cond_pending = 0;
748 if (!(mmc->ocr & OCR_BUSY)) {
749 /* Some cards seem to need this */
752 start = get_timer(0);
754 err = mmc_send_op_cond_iter(mmc, 1);
757 if (mmc->ocr & OCR_BUSY)
759 if (get_timer(start) > timeout)
765 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
766 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
767 cmd.resp_type = MMC_RSP_R3;
770 err = mmc_send_cmd(mmc, &cmd, NULL);
775 mmc->ocr = cmd.response[0];
778 mmc->version = MMC_VERSION_UNKNOWN;
780 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
787 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
790 struct mmc_data data;
793 /* Get the Card Status Register */
794 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
795 cmd.resp_type = MMC_RSP_R1;
798 data.dest = (char *)ext_csd;
800 data.blocksize = MMC_MAX_BLOCK_LEN;
801 data.flags = MMC_DATA_READ;
803 err = mmc_send_cmd(mmc, &cmd, &data);
808 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
811 unsigned int status, start;
813 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
814 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
815 (index == EXT_CSD_PART_CONF);
818 if (mmc->gen_cmd6_time)
819 timeout_ms = mmc->gen_cmd6_time * 10;
821 if (is_part_switch && mmc->part_switch_time)
822 timeout_ms = mmc->part_switch_time * 10;
824 cmd.cmdidx = MMC_CMD_SWITCH;
825 cmd.resp_type = MMC_RSP_R1b;
826 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
830 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
834 start = get_timer(0);
836 /* poll dat0 for rdy/buys status */
837 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
838 if (ret && ret != -ENOSYS)
842 * In cases when neiter allowed to poll by using CMD13 nor we are
843 * capable of polling by using mmc_wait_dat0, then rely on waiting the
844 * stated timeout to be sufficient.
846 if (ret == -ENOSYS && !send_status) {
854 /* Finally wait until the card is ready or indicates a failure
855 * to switch. It doesn't hurt to use CMD13 here even if send_status
856 * is false, because by now (after 'timeout_ms' ms) the bus should be
860 ret = mmc_send_status(mmc, &status);
862 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
863 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
867 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
868 (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
871 } while (get_timer(start) < timeout_ms);
876 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
878 return __mmc_switch(mmc, set, index, value, true);
881 int mmc_boot_wp(struct mmc *mmc)
883 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
886 int mmc_boot_wp_single_partition(struct mmc *mmc, int partition)
891 value = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
893 if (partition == 0) {
894 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
895 ret = mmc_switch(mmc,
896 EXT_CSD_CMD_SET_NORMAL,
899 } else if (partition == 1) {
900 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
901 value |= EXT_CSD_BOOT_WP_B_PWR_WP_SEC_SEL;
902 ret = mmc_switch(mmc,
903 EXT_CSD_CMD_SET_NORMAL,
907 ret = mmc_boot_wp(mmc);
913 #if !CONFIG_IS_ENABLED(MMC_TINY)
914 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
920 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
926 speed_bits = EXT_CSD_TIMING_HS;
928 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
930 speed_bits = EXT_CSD_TIMING_HS200;
933 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
935 speed_bits = EXT_CSD_TIMING_HS400;
938 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
940 speed_bits = EXT_CSD_TIMING_HS400;
944 speed_bits = EXT_CSD_TIMING_LEGACY;
950 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
951 speed_bits, !hsdowngrade);
955 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
956 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
958 * In case the eMMC is in HS200/HS400 mode and we are downgrading
959 * to HS mode, the card clock are still running much faster than
960 * the supported HS mode clock, so we can not reliably read out
961 * Extended CSD. Reconfigure the controller to run at HS mode.
964 mmc_select_mode(mmc, MMC_HS);
965 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
969 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
970 /* Now check to see that it worked */
971 err = mmc_send_ext_csd(mmc, test_csd);
975 /* No high-speed support */
976 if (!test_csd[EXT_CSD_HS_TIMING])
983 static int mmc_get_capabilities(struct mmc *mmc)
985 u8 *ext_csd = mmc->ext_csd;
988 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
990 if (mmc_host_is_spi(mmc))
993 /* Only version 4 supports high-speed */
994 if (mmc->version < MMC_VERSION_4)
998 pr_err("No ext_csd found!\n"); /* this should enver happen */
1002 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1004 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
1005 mmc->cardtype = cardtype;
1007 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1008 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1009 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
1010 mmc->card_caps |= MMC_MODE_HS200;
1013 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
1014 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1015 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
1016 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
1017 mmc->card_caps |= MMC_MODE_HS400;
1020 if (cardtype & EXT_CSD_CARD_TYPE_52) {
1021 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
1022 mmc->card_caps |= MMC_MODE_DDR_52MHz;
1023 mmc->card_caps |= MMC_MODE_HS_52MHz;
1025 if (cardtype & EXT_CSD_CARD_TYPE_26)
1026 mmc->card_caps |= MMC_MODE_HS;
1028 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1029 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1030 (mmc->card_caps & MMC_MODE_HS400)) {
1031 mmc->card_caps |= MMC_MODE_HS400_ES;
1039 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1043 mmc->capacity = mmc->capacity_user;
1047 mmc->capacity = mmc->capacity_boot;
1050 mmc->capacity = mmc->capacity_rpmb;
1056 mmc->capacity = mmc->capacity_gp[part_num - 4];
1062 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1067 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1073 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1075 (mmc->part_config & ~PART_ACCESS_MASK)
1076 | (part_num & PART_ACCESS_MASK));
1077 } while (ret && retry--);
1080 * Set the capacity if the switch succeeded or was intended
1081 * to return to representing the raw device.
1083 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1084 ret = mmc_set_capacity(mmc, part_num);
1085 mmc_get_blk_desc(mmc)->hwpart = part_num;
1091 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1092 int mmc_hwpart_config(struct mmc *mmc,
1093 const struct mmc_hwpart_conf *conf,
1094 enum mmc_hwpart_conf_mode mode)
1099 u32 gp_size_mult[4];
1100 u32 max_enh_size_mult;
1101 u32 tot_enh_size_mult = 0;
1104 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1106 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1109 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1110 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1111 return -EMEDIUMTYPE;
1114 if (!(mmc->part_support & PART_SUPPORT)) {
1115 pr_err("Card does not support partitioning\n");
1116 return -EMEDIUMTYPE;
1119 if (!mmc->hc_wp_grp_size) {
1120 pr_err("Card does not define HC WP group size\n");
1121 return -EMEDIUMTYPE;
1124 /* check partition alignment and total enhanced size */
1125 if (conf->user.enh_size) {
1126 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1127 conf->user.enh_start % mmc->hc_wp_grp_size) {
1128 pr_err("User data enhanced area not HC WP group "
1132 part_attrs |= EXT_CSD_ENH_USR;
1133 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1134 if (mmc->high_capacity) {
1135 enh_start_addr = conf->user.enh_start;
1137 enh_start_addr = (conf->user.enh_start << 9);
1143 tot_enh_size_mult += enh_size_mult;
1145 for (pidx = 0; pidx < 4; pidx++) {
1146 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1147 pr_err("GP%i partition not HC WP group size "
1148 "aligned\n", pidx+1);
1151 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1152 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1153 part_attrs |= EXT_CSD_ENH_GP(pidx);
1154 tot_enh_size_mult += gp_size_mult[pidx];
1158 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1159 pr_err("Card does not support enhanced attribute\n");
1160 return -EMEDIUMTYPE;
1163 err = mmc_send_ext_csd(mmc, ext_csd);
1168 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1169 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1170 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1171 if (tot_enh_size_mult > max_enh_size_mult) {
1172 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1173 tot_enh_size_mult, max_enh_size_mult);
1174 return -EMEDIUMTYPE;
1177 /* The default value of EXT_CSD_WR_REL_SET is device
1178 * dependent, the values can only be changed if the
1179 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1180 * changed only once and before partitioning is completed. */
1181 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1182 if (conf->user.wr_rel_change) {
1183 if (conf->user.wr_rel_set)
1184 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1186 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1188 for (pidx = 0; pidx < 4; pidx++) {
1189 if (conf->gp_part[pidx].wr_rel_change) {
1190 if (conf->gp_part[pidx].wr_rel_set)
1191 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1193 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1197 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1198 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1199 puts("Card does not support host controlled partition write "
1200 "reliability settings\n");
1201 return -EMEDIUMTYPE;
1204 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1205 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1206 pr_err("Card already partitioned\n");
1210 if (mode == MMC_HWPART_CONF_CHECK)
1213 /* Partitioning requires high-capacity size definitions */
1214 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1215 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1216 EXT_CSD_ERASE_GROUP_DEF, 1);
1221 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1223 #if CONFIG_IS_ENABLED(MMC_WRITE)
1224 /* update erase group size to be high-capacity */
1225 mmc->erase_grp_size =
1226 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1231 /* all OK, write the configuration */
1232 for (i = 0; i < 4; i++) {
1233 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1234 EXT_CSD_ENH_START_ADDR+i,
1235 (enh_start_addr >> (i*8)) & 0xFF);
1239 for (i = 0; i < 3; i++) {
1240 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1241 EXT_CSD_ENH_SIZE_MULT+i,
1242 (enh_size_mult >> (i*8)) & 0xFF);
1246 for (pidx = 0; pidx < 4; pidx++) {
1247 for (i = 0; i < 3; i++) {
1248 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1249 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1250 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1255 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1256 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1260 if (mode == MMC_HWPART_CONF_SET)
1263 /* The WR_REL_SET is a write-once register but shall be
1264 * written before setting PART_SETTING_COMPLETED. As it is
1265 * write-once we can only write it when completing the
1267 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1268 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1269 EXT_CSD_WR_REL_SET, wr_rel_set);
1274 /* Setting PART_SETTING_COMPLETED confirms the partition
1275 * configuration but it only becomes effective after power
1276 * cycle, so we do not adjust the partition related settings
1277 * in the mmc struct. */
1279 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1280 EXT_CSD_PARTITION_SETTING,
1281 EXT_CSD_PARTITION_SETTING_COMPLETED);
1289 #if !CONFIG_IS_ENABLED(DM_MMC)
1290 int mmc_getcd(struct mmc *mmc)
1294 cd = board_mmc_getcd(mmc);
1297 if (mmc->cfg->ops->getcd)
1298 cd = mmc->cfg->ops->getcd(mmc);
1307 #if !CONFIG_IS_ENABLED(MMC_TINY)
1308 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1311 struct mmc_data data;
1313 /* Switch the frequency */
1314 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1315 cmd.resp_type = MMC_RSP_R1;
1316 cmd.cmdarg = (mode << 31) | 0xffffff;
1317 cmd.cmdarg &= ~(0xf << (group * 4));
1318 cmd.cmdarg |= value << (group * 4);
1320 data.dest = (char *)resp;
1321 data.blocksize = 64;
1323 data.flags = MMC_DATA_READ;
1325 return mmc_send_cmd(mmc, &cmd, &data);
1328 static int sd_get_capabilities(struct mmc *mmc)
1332 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1333 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1334 struct mmc_data data;
1336 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1340 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1342 if (mmc_host_is_spi(mmc))
1345 /* Read the SCR to find out if this card supports higher speeds */
1346 cmd.cmdidx = MMC_CMD_APP_CMD;
1347 cmd.resp_type = MMC_RSP_R1;
1348 cmd.cmdarg = mmc->rca << 16;
1350 err = mmc_send_cmd(mmc, &cmd, NULL);
1355 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1356 cmd.resp_type = MMC_RSP_R1;
1359 data.dest = (char *)scr;
1362 data.flags = MMC_DATA_READ;
1364 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1369 mmc->scr[0] = __be32_to_cpu(scr[0]);
1370 mmc->scr[1] = __be32_to_cpu(scr[1]);
1372 switch ((mmc->scr[0] >> 24) & 0xf) {
1374 mmc->version = SD_VERSION_1_0;
1377 mmc->version = SD_VERSION_1_10;
1380 mmc->version = SD_VERSION_2;
1381 if ((mmc->scr[0] >> 15) & 0x1)
1382 mmc->version = SD_VERSION_3;
1385 mmc->version = SD_VERSION_1_0;
1389 if (mmc->scr[0] & SD_DATA_4BIT)
1390 mmc->card_caps |= MMC_MODE_4BIT;
1392 /* Version 1.0 doesn't support switching */
1393 if (mmc->version == SD_VERSION_1_0)
1398 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1399 (u8 *)switch_status);
1404 /* The high-speed function is busy. Try again */
1405 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1409 /* If high-speed isn't supported, we return */
1410 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1411 mmc->card_caps |= MMC_CAP(SD_HS);
1413 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1414 /* Version before 3.0 don't support UHS modes */
1415 if (mmc->version < SD_VERSION_3)
1418 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1419 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1420 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1421 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1422 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1423 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1424 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1425 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1426 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1427 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1428 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1434 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1438 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1441 /* SD version 1.00 and 1.01 does not support CMD 6 */
1442 if (mmc->version == SD_VERSION_1_0)
1447 speed = UHS_SDR12_BUS_SPEED;
1450 speed = HIGH_SPEED_BUS_SPEED;
1452 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1454 speed = UHS_SDR12_BUS_SPEED;
1457 speed = UHS_SDR25_BUS_SPEED;
1460 speed = UHS_SDR50_BUS_SPEED;
1463 speed = UHS_DDR50_BUS_SPEED;
1466 speed = UHS_SDR104_BUS_SPEED;
1473 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1477 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1483 static int sd_select_bus_width(struct mmc *mmc, int w)
1488 if ((w != 4) && (w != 1))
1491 cmd.cmdidx = MMC_CMD_APP_CMD;
1492 cmd.resp_type = MMC_RSP_R1;
1493 cmd.cmdarg = mmc->rca << 16;
1495 err = mmc_send_cmd(mmc, &cmd, NULL);
1499 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1500 cmd.resp_type = MMC_RSP_R1;
1505 err = mmc_send_cmd(mmc, &cmd, NULL);
1513 #if CONFIG_IS_ENABLED(MMC_WRITE)
1514 static int sd_read_ssr(struct mmc *mmc)
1516 static const unsigned int sd_au_size[] = {
1517 0, SZ_16K / 512, SZ_32K / 512,
1518 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1519 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1520 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1521 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1526 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1527 struct mmc_data data;
1528 unsigned int au, eo, et, es;
1530 cmd.cmdidx = MMC_CMD_APP_CMD;
1531 cmd.resp_type = MMC_RSP_R1;
1532 cmd.cmdarg = mmc->rca << 16;
1534 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1538 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1539 cmd.resp_type = MMC_RSP_R1;
1542 data.dest = (char *)ssr;
1543 data.blocksize = 64;
1545 data.flags = MMC_DATA_READ;
1547 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1551 for (i = 0; i < 16; i++)
1552 ssr[i] = be32_to_cpu(ssr[i]);
1554 au = (ssr[2] >> 12) & 0xF;
1555 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1556 mmc->ssr.au = sd_au_size[au];
1557 es = (ssr[3] >> 24) & 0xFF;
1558 es |= (ssr[2] & 0xFF) << 8;
1559 et = (ssr[3] >> 18) & 0x3F;
1561 eo = (ssr[3] >> 16) & 0x3;
1562 mmc->ssr.erase_timeout = (et * 1000) / es;
1563 mmc->ssr.erase_offset = eo * 1000;
1566 pr_debug("Invalid Allocation Unit Size.\n");
1572 /* frequency bases */
1573 /* divided by 10 to be nice to platforms without floating point */
1574 static const int fbase[] = {
1581 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1582 * to platforms without floating point.
1584 static const u8 multipliers[] = {
1603 static inline int bus_width(uint cap)
1605 if (cap == MMC_MODE_8BIT)
1607 if (cap == MMC_MODE_4BIT)
1609 if (cap == MMC_MODE_1BIT)
1611 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1615 #if !CONFIG_IS_ENABLED(DM_MMC)
1616 #ifdef MMC_SUPPORTS_TUNING
1617 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1623 static int mmc_set_ios(struct mmc *mmc)
1627 if (mmc->cfg->ops->set_ios)
1628 ret = mmc->cfg->ops->set_ios(mmc);
1633 static int mmc_host_power_cycle(struct mmc *mmc)
1637 if (mmc->cfg->ops->host_power_cycle)
1638 ret = mmc->cfg->ops->host_power_cycle(mmc);
1644 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1647 if (clock > mmc->cfg->f_max)
1648 clock = mmc->cfg->f_max;
1650 if (clock < mmc->cfg->f_min)
1651 clock = mmc->cfg->f_min;
1655 mmc->clk_disable = disable;
1657 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1659 return mmc_set_ios(mmc);
1662 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1664 mmc->bus_width = width;
1666 return mmc_set_ios(mmc);
1669 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1671 * helper function to display the capabilities in a human
1672 * friendly manner. The capabilities include bus width and
1675 void mmc_dump_capabilities(const char *text, uint caps)
1679 pr_debug("%s: widths [", text);
1680 if (caps & MMC_MODE_8BIT)
1682 if (caps & MMC_MODE_4BIT)
1684 if (caps & MMC_MODE_1BIT)
1686 pr_debug("\b\b] modes [");
1687 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1688 if (MMC_CAP(mode) & caps)
1689 pr_debug("%s, ", mmc_mode_name(mode));
1690 pr_debug("\b\b]\n");
1694 struct mode_width_tuning {
1697 #ifdef MMC_SUPPORTS_TUNING
1702 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1703 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1706 case MMC_SIGNAL_VOLTAGE_000: return 0;
1707 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1708 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1709 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1714 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1718 if (mmc->signal_voltage == signal_voltage)
1721 mmc->signal_voltage = signal_voltage;
1722 err = mmc_set_ios(mmc);
1724 pr_debug("unable to set voltage (err %d)\n", err);
1729 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1735 #if !CONFIG_IS_ENABLED(MMC_TINY)
1736 static const struct mode_width_tuning sd_modes_by_pref[] = {
1737 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1738 #ifdef MMC_SUPPORTS_TUNING
1741 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1742 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1747 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1751 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1755 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1760 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1762 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1765 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1770 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1774 #define for_each_sd_mode_by_pref(caps, mwt) \
1775 for (mwt = sd_modes_by_pref;\
1776 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1778 if (caps & MMC_CAP(mwt->mode))
1780 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1783 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1784 const struct mode_width_tuning *mwt;
1785 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1786 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1788 bool uhs_en = false;
1793 mmc_dump_capabilities("sd card", card_caps);
1794 mmc_dump_capabilities("host", mmc->host_caps);
1797 if (mmc_host_is_spi(mmc)) {
1798 mmc_set_bus_width(mmc, 1);
1799 mmc_select_mode(mmc, MMC_LEGACY);
1800 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1801 #if CONFIG_IS_ENABLED(MMC_WRITE)
1802 err = sd_read_ssr(mmc);
1804 pr_warn("unable to read ssr\n");
1809 /* Restrict card's capabilities by what the host can do */
1810 caps = card_caps & mmc->host_caps;
1815 for_each_sd_mode_by_pref(caps, mwt) {
1818 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1819 if (*w & caps & mwt->widths) {
1820 pr_debug("trying mode %s width %d (at %d MHz)\n",
1821 mmc_mode_name(mwt->mode),
1823 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1825 /* configure the bus width (card + host) */
1826 err = sd_select_bus_width(mmc, bus_width(*w));
1829 mmc_set_bus_width(mmc, bus_width(*w));
1831 /* configure the bus mode (card) */
1832 err = sd_set_card_speed(mmc, mwt->mode);
1836 /* configure the bus mode (host) */
1837 mmc_select_mode(mmc, mwt->mode);
1838 mmc_set_clock(mmc, mmc->tran_speed,
1841 #ifdef MMC_SUPPORTS_TUNING
1842 /* execute tuning if needed */
1843 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1844 err = mmc_execute_tuning(mmc,
1847 pr_debug("tuning failed\n");
1853 #if CONFIG_IS_ENABLED(MMC_WRITE)
1854 err = sd_read_ssr(mmc);
1856 pr_warn("unable to read ssr\n");
1862 /* revert to a safer bus speed */
1863 mmc_select_mode(mmc, MMC_LEGACY);
1864 mmc_set_clock(mmc, mmc->tran_speed,
1870 pr_err("unable to select a mode\n");
1875 * read the compare the part of ext csd that is constant.
1876 * This can be used to check that the transfer is working
1879 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1882 const u8 *ext_csd = mmc->ext_csd;
1883 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1885 if (mmc->version < MMC_VERSION_4)
1888 err = mmc_send_ext_csd(mmc, test_csd);
1892 /* Only compare read only fields */
1893 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1894 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1895 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1896 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1897 ext_csd[EXT_CSD_REV]
1898 == test_csd[EXT_CSD_REV] &&
1899 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1900 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1901 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1902 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1908 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1909 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1910 uint32_t allowed_mask)
1918 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1919 EXT_CSD_CARD_TYPE_HS400_1_8V))
1920 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1921 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1922 EXT_CSD_CARD_TYPE_HS400_1_2V))
1923 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1926 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1927 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1928 MMC_SIGNAL_VOLTAGE_180;
1929 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1930 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1933 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1937 while (card_mask & allowed_mask) {
1938 enum mmc_voltage best_match;
1940 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1941 if (!mmc_set_signal_voltage(mmc, best_match))
1944 allowed_mask &= ~best_match;
1950 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1951 uint32_t allowed_mask)
1957 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1958 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1960 .mode = MMC_HS_400_ES,
1961 .widths = MMC_MODE_8BIT,
1964 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1967 .widths = MMC_MODE_8BIT,
1968 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1971 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1974 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1975 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1980 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1984 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1988 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1992 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1996 #define for_each_mmc_mode_by_pref(caps, mwt) \
1997 for (mwt = mmc_modes_by_pref;\
1998 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
2000 if (caps & MMC_CAP(mwt->mode))
2002 static const struct ext_csd_bus_width {
2006 } ext_csd_bus_width[] = {
2007 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
2008 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
2009 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
2010 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
2011 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
2014 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2015 static int mmc_select_hs400(struct mmc *mmc)
2019 /* Set timing to HS200 for tuning */
2020 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
2024 /* configure the bus mode (host) */
2025 mmc_select_mode(mmc, MMC_HS_200);
2026 mmc_set_clock(mmc, mmc->tran_speed, false);
2028 /* execute tuning if needed */
2029 mmc->hs400_tuning = 1;
2030 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
2031 mmc->hs400_tuning = 0;
2033 debug("tuning failed\n");
2037 /* Set back to HS */
2038 mmc_set_card_speed(mmc, MMC_HS, true);
2040 err = mmc_hs400_prepare_ddr(mmc);
2044 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2045 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
2049 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2053 mmc_select_mode(mmc, MMC_HS_400);
2054 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2061 static int mmc_select_hs400(struct mmc *mmc)
2067 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2068 #if !CONFIG_IS_ENABLED(DM_MMC)
2069 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2074 static int mmc_select_hs400es(struct mmc *mmc)
2078 err = mmc_set_card_speed(mmc, MMC_HS, true);
2082 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2083 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2084 EXT_CSD_BUS_WIDTH_STROBE);
2086 printf("switch to bus width for hs400 failed\n");
2089 /* TODO: driver strength */
2090 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2094 mmc_select_mode(mmc, MMC_HS_400_ES);
2095 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2099 return mmc_set_enhanced_strobe(mmc);
2102 static int mmc_select_hs400es(struct mmc *mmc)
2108 #define for_each_supported_width(caps, ddr, ecbv) \
2109 for (ecbv = ext_csd_bus_width;\
2110 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2112 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2114 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2117 const struct mode_width_tuning *mwt;
2118 const struct ext_csd_bus_width *ecbw;
2121 mmc_dump_capabilities("mmc", card_caps);
2122 mmc_dump_capabilities("host", mmc->host_caps);
2125 if (mmc_host_is_spi(mmc)) {
2126 mmc_set_bus_width(mmc, 1);
2127 mmc_select_mode(mmc, MMC_LEGACY);
2128 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2132 /* Restrict card's capabilities by what the host can do */
2133 card_caps &= mmc->host_caps;
2135 /* Only version 4 of MMC supports wider bus widths */
2136 if (mmc->version < MMC_VERSION_4)
2139 if (!mmc->ext_csd) {
2140 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2144 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2145 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
2146 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2148 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2149 * before doing anything else, since a transition from either of
2150 * the HS200/HS400 mode directly to legacy mode is not supported.
2152 if (mmc->selected_mode == MMC_HS_200 ||
2153 mmc->selected_mode == MMC_HS_400 ||
2154 mmc->selected_mode == MMC_HS_400_ES)
2155 mmc_set_card_speed(mmc, MMC_HS, true);
2158 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2160 for_each_mmc_mode_by_pref(card_caps, mwt) {
2161 for_each_supported_width(card_caps & mwt->widths,
2162 mmc_is_mode_ddr(mwt->mode), ecbw) {
2163 enum mmc_voltage old_voltage;
2164 pr_debug("trying mode %s width %d (at %d MHz)\n",
2165 mmc_mode_name(mwt->mode),
2166 bus_width(ecbw->cap),
2167 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2168 old_voltage = mmc->signal_voltage;
2169 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2170 MMC_ALL_SIGNAL_VOLTAGE);
2174 /* configure the bus width (card + host) */
2175 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2177 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2180 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2182 if (mwt->mode == MMC_HS_400) {
2183 err = mmc_select_hs400(mmc);
2185 printf("Select HS400 failed %d\n", err);
2188 } else if (mwt->mode == MMC_HS_400_ES) {
2189 err = mmc_select_hs400es(mmc);
2191 printf("Select HS400ES failed %d\n",
2196 /* configure the bus speed (card) */
2197 err = mmc_set_card_speed(mmc, mwt->mode, false);
2202 * configure the bus width AND the ddr mode
2203 * (card). The host side will be taken care
2204 * of in the next step
2206 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2207 err = mmc_switch(mmc,
2208 EXT_CSD_CMD_SET_NORMAL,
2210 ecbw->ext_csd_bits);
2215 /* configure the bus mode (host) */
2216 mmc_select_mode(mmc, mwt->mode);
2217 mmc_set_clock(mmc, mmc->tran_speed,
2219 #ifdef MMC_SUPPORTS_TUNING
2221 /* execute tuning if needed */
2223 err = mmc_execute_tuning(mmc,
2226 pr_debug("tuning failed : %d\n", err);
2233 /* do a transfer to check the configuration */
2234 err = mmc_read_and_compare_ext_csd(mmc);
2238 mmc_set_signal_voltage(mmc, old_voltage);
2239 /* if an error occurred, revert to a safer bus mode */
2240 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2241 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2242 mmc_select_mode(mmc, MMC_LEGACY);
2243 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2244 mmc_set_bus_width(mmc, 1);
2248 pr_err("unable to select a mode : %d\n", err);
2254 #if CONFIG_IS_ENABLED(MMC_TINY)
2255 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2258 static int mmc_startup_v4(struct mmc *mmc)
2262 bool has_parts = false;
2263 bool part_completed;
2264 static const u32 mmc_versions[] = {
2276 #if CONFIG_IS_ENABLED(MMC_TINY)
2277 u8 *ext_csd = ext_csd_bkup;
2279 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2283 memset(ext_csd_bkup, 0, MMC_MAX_BLOCK_LEN);
2285 err = mmc_send_ext_csd(mmc, ext_csd);
2289 /* store the ext csd for future reference */
2291 mmc->ext_csd = ext_csd;
2293 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2295 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2298 /* check ext_csd version and capacity */
2299 err = mmc_send_ext_csd(mmc, ext_csd);
2303 /* store the ext csd for future reference */
2305 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2308 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2310 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2313 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2315 if (mmc->version >= MMC_VERSION_4_2) {
2317 * According to the JEDEC Standard, the value of
2318 * ext_csd's capacity is valid if the value is more
2321 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2322 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2323 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2324 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2325 capacity *= MMC_MAX_BLOCK_LEN;
2326 if ((capacity >> 20) > 2 * 1024)
2327 mmc->capacity_user = capacity;
2330 if (mmc->version >= MMC_VERSION_4_5)
2331 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2333 /* The partition data may be non-zero but it is only
2334 * effective if PARTITION_SETTING_COMPLETED is set in
2335 * EXT_CSD, so ignore any data if this bit is not set,
2336 * except for enabling the high-capacity group size
2337 * definition (see below).
2339 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2340 EXT_CSD_PARTITION_SETTING_COMPLETED);
2342 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2343 /* Some eMMC set the value too low so set a minimum */
2344 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2345 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2347 /* store the partition info of emmc */
2348 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2349 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2350 ext_csd[EXT_CSD_BOOT_MULT])
2351 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2352 if (part_completed &&
2353 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2354 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2356 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2358 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2360 for (i = 0; i < 4; i++) {
2361 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2362 uint mult = (ext_csd[idx + 2] << 16) +
2363 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2366 if (!part_completed)
2368 mmc->capacity_gp[i] = mult;
2369 mmc->capacity_gp[i] *=
2370 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2371 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2372 mmc->capacity_gp[i] <<= 19;
2375 #ifndef CONFIG_SPL_BUILD
2376 if (part_completed) {
2377 mmc->enh_user_size =
2378 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2379 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2380 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2381 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2382 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2383 mmc->enh_user_size <<= 19;
2384 mmc->enh_user_start =
2385 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2386 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2387 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2388 ext_csd[EXT_CSD_ENH_START_ADDR];
2389 if (mmc->high_capacity)
2390 mmc->enh_user_start <<= 9;
2395 * Host needs to enable ERASE_GRP_DEF bit if device is
2396 * partitioned. This bit will be lost every time after a reset
2397 * or power off. This will affect erase size.
2401 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2402 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2405 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2406 EXT_CSD_ERASE_GROUP_DEF, 1);
2411 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2414 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2415 #if CONFIG_IS_ENABLED(MMC_WRITE)
2416 /* Read out group size from ext_csd */
2417 mmc->erase_grp_size =
2418 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2421 * if high capacity and partition setting completed
2422 * SEC_COUNT is valid even if it is smaller than 2 GiB
2423 * JEDEC Standard JESD84-B45, 6.2.4
2425 if (mmc->high_capacity && part_completed) {
2426 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2427 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2428 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2429 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2430 capacity *= MMC_MAX_BLOCK_LEN;
2431 mmc->capacity_user = capacity;
2434 #if CONFIG_IS_ENABLED(MMC_WRITE)
2436 /* Calculate the group size from the csd value. */
2437 int erase_gsz, erase_gmul;
2439 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2440 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2441 mmc->erase_grp_size = (erase_gsz + 1)
2445 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2446 mmc->hc_wp_grp_size = 1024
2447 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2448 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2451 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2454 !!(ext_csd[EXT_CSD_SEC_FEATURE] & EXT_CSD_SEC_FEATURE_TRIM_EN);
2459 #if !CONFIG_IS_ENABLED(MMC_TINY)
2462 mmc->ext_csd = NULL;
2467 static int mmc_startup(struct mmc *mmc)
2473 struct blk_desc *bdesc;
2475 #ifdef CONFIG_MMC_SPI_CRC_ON
2476 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2477 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2478 cmd.resp_type = MMC_RSP_R1;
2480 err = mmc_send_cmd(mmc, &cmd, NULL);
2486 /* Put the Card in Identify Mode */
2487 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2488 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2489 cmd.resp_type = MMC_RSP_R2;
2492 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2496 memcpy(mmc->cid, cmd.response, 16);
2499 * For MMC cards, set the Relative Address.
2500 * For SD cards, get the Relatvie Address.
2501 * This also puts the cards into Standby State
2503 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2504 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2505 cmd.cmdarg = mmc->rca << 16;
2506 cmd.resp_type = MMC_RSP_R6;
2508 err = mmc_send_cmd(mmc, &cmd, NULL);
2514 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2517 /* Get the Card-Specific Data */
2518 cmd.cmdidx = MMC_CMD_SEND_CSD;
2519 cmd.resp_type = MMC_RSP_R2;
2520 cmd.cmdarg = mmc->rca << 16;
2522 err = mmc_send_cmd(mmc, &cmd, NULL);
2527 mmc->csd[0] = cmd.response[0];
2528 mmc->csd[1] = cmd.response[1];
2529 mmc->csd[2] = cmd.response[2];
2530 mmc->csd[3] = cmd.response[3];
2532 if (mmc->version == MMC_VERSION_UNKNOWN) {
2533 int version = (cmd.response[0] >> 26) & 0xf;
2537 mmc->version = MMC_VERSION_1_2;
2540 mmc->version = MMC_VERSION_1_4;
2543 mmc->version = MMC_VERSION_2_2;
2546 mmc->version = MMC_VERSION_3;
2549 mmc->version = MMC_VERSION_4;
2552 mmc->version = MMC_VERSION_1_2;
2557 /* divide frequency by 10, since the mults are 10x bigger */
2558 freq = fbase[(cmd.response[0] & 0x7)];
2559 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2561 mmc->legacy_speed = freq * mult;
2562 mmc_select_mode(mmc, MMC_LEGACY);
2564 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2565 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2566 #if CONFIG_IS_ENABLED(MMC_WRITE)
2569 mmc->write_bl_len = mmc->read_bl_len;
2571 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2574 if (mmc->high_capacity) {
2575 csize = (mmc->csd[1] & 0x3f) << 16
2576 | (mmc->csd[2] & 0xffff0000) >> 16;
2579 csize = (mmc->csd[1] & 0x3ff) << 2
2580 | (mmc->csd[2] & 0xc0000000) >> 30;
2581 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2584 mmc->capacity_user = (csize + 1) << (cmult + 2);
2585 mmc->capacity_user *= mmc->read_bl_len;
2586 mmc->capacity_boot = 0;
2587 mmc->capacity_rpmb = 0;
2588 for (i = 0; i < 4; i++)
2589 mmc->capacity_gp[i] = 0;
2591 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2592 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2594 #if CONFIG_IS_ENABLED(MMC_WRITE)
2595 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2596 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2599 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2600 cmd.cmdidx = MMC_CMD_SET_DSR;
2601 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2602 cmd.resp_type = MMC_RSP_NONE;
2603 if (mmc_send_cmd(mmc, &cmd, NULL))
2604 pr_warn("MMC: SET_DSR failed\n");
2607 /* Select the card, and put it into Transfer Mode */
2608 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2609 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2610 cmd.resp_type = MMC_RSP_R1;
2611 cmd.cmdarg = mmc->rca << 16;
2612 err = mmc_send_cmd(mmc, &cmd, NULL);
2619 * For SD, its erase group is always one sector
2621 #if CONFIG_IS_ENABLED(MMC_WRITE)
2622 mmc->erase_grp_size = 1;
2624 mmc->part_config = MMCPART_NOAVAILABLE;
2626 err = mmc_startup_v4(mmc);
2630 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2634 #if CONFIG_IS_ENABLED(MMC_TINY)
2635 mmc_set_clock(mmc, mmc->legacy_speed, false);
2636 mmc_select_mode(mmc, MMC_LEGACY);
2637 mmc_set_bus_width(mmc, 1);
2640 err = sd_get_capabilities(mmc);
2643 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2645 err = mmc_get_capabilities(mmc);
2648 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2654 mmc->best_mode = mmc->selected_mode;
2656 /* Fix the block length for DDR mode */
2657 if (mmc->ddr_mode) {
2658 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2659 #if CONFIG_IS_ENABLED(MMC_WRITE)
2660 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2664 /* fill in device description */
2665 bdesc = mmc_get_blk_desc(mmc);
2669 bdesc->blksz = mmc->read_bl_len;
2670 bdesc->log2blksz = LOG2(bdesc->blksz);
2671 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2672 #if !defined(CONFIG_SPL_BUILD) || \
2673 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2674 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2675 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2676 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2677 (mmc->cid[3] >> 16) & 0xffff);
2678 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2679 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2680 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2681 (mmc->cid[2] >> 24) & 0xff);
2682 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2683 (mmc->cid[2] >> 16) & 0xf);
2685 bdesc->vendor[0] = 0;
2686 bdesc->product[0] = 0;
2687 bdesc->revision[0] = 0;
2690 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2697 static int mmc_send_if_cond(struct mmc *mmc)
2702 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2703 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2704 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2705 cmd.resp_type = MMC_RSP_R7;
2707 err = mmc_send_cmd(mmc, &cmd, NULL);
2712 if ((cmd.response[0] & 0xff) != 0xaa)
2715 mmc->version = SD_VERSION_2;
2720 #if !CONFIG_IS_ENABLED(DM_MMC)
2721 /* board-specific MMC power initializations. */
2722 __weak void board_mmc_power_init(void)
2727 static int mmc_power_init(struct mmc *mmc)
2729 #if CONFIG_IS_ENABLED(DM_MMC)
2730 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2733 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2736 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2738 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2739 &mmc->vqmmc_supply);
2741 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2743 #else /* !CONFIG_DM_MMC */
2745 * Driver model should use a regulator, as above, rather than calling
2746 * out to board code.
2748 board_mmc_power_init();
2754 * put the host in the initial state:
2755 * - turn on Vdd (card power supply)
2756 * - configure the bus width and clock to minimal values
2758 static void mmc_set_initial_state(struct mmc *mmc)
2762 /* First try to set 3.3V. If it fails set to 1.8V */
2763 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2765 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2767 pr_warn("mmc: failed to set signal voltage\n");
2769 mmc_select_mode(mmc, MMC_LEGACY);
2770 mmc_set_bus_width(mmc, 1);
2771 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2774 static int mmc_power_on(struct mmc *mmc)
2776 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2777 if (mmc->vmmc_supply) {
2778 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2780 if (ret && ret != -EACCES) {
2781 printf("Error enabling VMMC supply : %d\n", ret);
2789 static int mmc_power_off(struct mmc *mmc)
2791 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2792 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2793 if (mmc->vmmc_supply) {
2794 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2796 if (ret && ret != -EACCES) {
2797 pr_debug("Error disabling VMMC supply : %d\n", ret);
2805 static int mmc_power_cycle(struct mmc *mmc)
2809 ret = mmc_power_off(mmc);
2813 ret = mmc_host_power_cycle(mmc);
2818 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2819 * to be on the safer side.
2822 return mmc_power_on(mmc);
2825 int mmc_get_op_cond(struct mmc *mmc, bool quiet)
2827 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2833 err = mmc_power_init(mmc);
2837 #ifdef CONFIG_MMC_QUIRKS
2838 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2839 MMC_QUIRK_RETRY_SEND_CID |
2840 MMC_QUIRK_RETRY_APP_CMD;
2843 err = mmc_power_cycle(mmc);
2846 * if power cycling is not supported, we should not try
2847 * to use the UHS modes, because we wouldn't be able to
2848 * recover from an error during the UHS initialization.
2850 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2852 mmc->host_caps &= ~UHS_CAPS;
2853 err = mmc_power_on(mmc);
2858 #if CONFIG_IS_ENABLED(DM_MMC)
2860 * Re-initialization is needed to clear old configuration for
2863 err = mmc_reinit(mmc);
2865 /* made sure it's not NULL earlier */
2866 err = mmc->cfg->ops->init(mmc);
2873 mmc_set_initial_state(mmc);
2875 /* Reset the Card */
2876 err = mmc_go_idle(mmc);
2881 /* The internal partition reset to user partition(0) at every CMD0 */
2882 mmc_get_blk_desc(mmc)->hwpart = 0;
2884 /* Test for SD version 2 */
2885 err = mmc_send_if_cond(mmc);
2887 /* Now try to get the SD card's operating condition */
2888 err = sd_send_op_cond(mmc, uhs_en);
2889 if (err && uhs_en) {
2891 mmc_power_cycle(mmc);
2895 /* If the command timed out, we check for an MMC card */
2896 if (err == -ETIMEDOUT) {
2897 err = mmc_send_op_cond(mmc);
2900 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2902 pr_err("Card did not respond to voltage select! : %d\n", err);
2911 int mmc_start_init(struct mmc *mmc)
2917 * all hosts are capable of 1 bit bus-width and able to use the legacy
2920 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2923 if (IS_ENABLED(CONFIG_MMC_SPEED_MODE_SET)) {
2924 if (mmc->user_speed_mode != MMC_MODES_END) {
2927 if (mmc->host_caps & MMC_CAP(mmc->user_speed_mode)) {
2928 /* Remove all existing speed capabilities */
2929 for (i = MMC_LEGACY; i < MMC_MODES_END; i++)
2930 mmc->host_caps &= ~MMC_CAP(i);
2931 mmc->host_caps |= (MMC_CAP(mmc->user_speed_mode)
2932 | MMC_CAP(MMC_LEGACY) |
2935 pr_err("bus_mode requested is not supported\n");
2940 #if CONFIG_IS_ENABLED(DM_MMC)
2941 mmc_deferred_probe(mmc);
2943 #if !defined(CONFIG_MMC_BROKEN_CD)
2944 no_card = mmc_getcd(mmc) == 0;
2948 #if !CONFIG_IS_ENABLED(DM_MMC)
2949 /* we pretend there's no card when init is NULL */
2950 no_card = no_card || (mmc->cfg->ops->init == NULL);
2954 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2955 pr_err("MMC: no card present\n");
2960 err = mmc_get_op_cond(mmc, false);
2963 mmc->init_in_progress = 1;
2968 static int mmc_complete_init(struct mmc *mmc)
2972 mmc->init_in_progress = 0;
2973 if (mmc->op_cond_pending)
2974 err = mmc_complete_op_cond(mmc);
2977 err = mmc_startup(mmc);
2985 int mmc_init(struct mmc *mmc)
2988 __maybe_unused ulong start;
2989 #if CONFIG_IS_ENABLED(DM_MMC)
2990 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2997 start = get_timer(0);
2999 if (!mmc->init_in_progress)
3000 err = mmc_start_init(mmc);
3003 err = mmc_complete_init(mmc);
3005 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
3010 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
3011 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
3012 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
3013 int mmc_deinit(struct mmc *mmc)
3021 caps_filtered = mmc->card_caps &
3022 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
3023 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
3024 MMC_CAP(UHS_SDR104));
3026 return sd_select_mode_and_width(mmc, caps_filtered);
3028 caps_filtered = mmc->card_caps &
3029 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400) | MMC_CAP(MMC_HS_400_ES));
3031 return mmc_select_mode_and_width(mmc, caps_filtered);
3036 int mmc_set_dsr(struct mmc *mmc, u16 val)
3042 /* CPU-specific MMC initializations */
3043 __weak int cpu_mmc_init(struct bd_info *bis)
3048 /* board-specific MMC initializations. */
3049 __weak int board_mmc_init(struct bd_info *bis)
3054 void mmc_set_preinit(struct mmc *mmc, int preinit)
3056 mmc->preinit = preinit;
3059 #if CONFIG_IS_ENABLED(DM_MMC)
3060 static int mmc_probe(struct bd_info *bis)
3064 struct udevice *dev;
3066 ret = uclass_get(UCLASS_MMC, &uc);
3071 * Try to add them in sequence order. Really with driver model we
3072 * should allow holes, but the current MMC list does not allow that.
3073 * So if we request 0, 1, 3 we will get 0, 1, 2.
3075 for (i = 0; ; i++) {
3076 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3080 uclass_foreach_dev(dev, uc) {
3081 ret = device_probe(dev);
3083 pr_err("%s - probe failed: %d\n", dev->name, ret);
3089 static int mmc_probe(struct bd_info *bis)
3091 if (board_mmc_init(bis) < 0)
3098 int mmc_initialize(struct bd_info *bis)
3100 static int initialized = 0;
3102 if (initialized) /* Avoid initializing mmc multiple times */
3106 #if !CONFIG_IS_ENABLED(BLK)
3107 #if !CONFIG_IS_ENABLED(MMC_TINY)
3111 ret = mmc_probe(bis);
3115 #ifndef CONFIG_SPL_BUILD
3116 print_mmc_devices(',');
3123 #if CONFIG_IS_ENABLED(DM_MMC)
3124 int mmc_init_device(int num)
3126 struct udevice *dev;
3130 if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3131 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3136 m = mmc_get_mmc_dev(dev);
3140 /* Initialising user set speed mode */
3141 m->user_speed_mode = MMC_MODES_END;
3150 #ifdef CONFIG_CMD_BKOPS_ENABLE
3151 int mmc_set_bkops_enable(struct mmc *mmc, bool autobkops, bool enable)
3154 u32 bit = autobkops ? BIT(1) : BIT(0);
3155 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3157 err = mmc_send_ext_csd(mmc, ext_csd);
3159 puts("Could not get ext_csd register values\n");
3163 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3164 puts("Background operations not supported on device\n");
3165 return -EMEDIUMTYPE;
3168 if (enable && (ext_csd[EXT_CSD_BKOPS_EN] & bit)) {
3169 puts("Background operations already enabled\n");
3173 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
3176 printf("Failed to %sable manual background operations\n",
3177 enable ? "en" : "dis");
3181 printf("%sabled %s background operations\n",
3182 enable ? "En" : "Dis", autobkops ? "auto" : "manual");
3188 __weak int mmc_get_env_dev(void)
3190 #ifdef CONFIG_SYS_MMC_ENV_DEV
3191 return CONFIG_SYS_MMC_ENV_DEV;