1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
16 #define PAGE_SIZE 4096
18 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
20 unsigned long timeout = 1000;
23 dwmci_writel(host, DWMCI_CTRL, value);
26 ctrl = dwmci_readl(host, DWMCI_CTRL);
27 if (!(ctrl & DWMCI_RESET_ALL))
33 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
34 u32 desc0, u32 desc1, u32 desc2)
36 struct dwmci_idmac *desc = idmac;
41 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
44 static void dwmci_prepare_data(struct dwmci_host *host,
45 struct mmc_data *data,
46 struct dwmci_idmac *cur_idmac,
50 unsigned int i = 0, flags, cnt, blk_cnt;
51 ulong data_start, data_end;
54 blk_cnt = data->blocks;
56 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
58 data_start = (ulong)cur_idmac;
59 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
62 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
63 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
65 flags |= DWMCI_IDMAC_LD;
66 cnt = data->blocksize * blk_cnt;
68 cnt = data->blocksize * 8;
70 dwmci_set_idma_desc(cur_idmac, flags, cnt,
71 (ulong)bounce_buffer + (i * PAGE_SIZE));
80 data_end = (ulong)cur_idmac;
81 flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
83 ctrl = dwmci_readl(host, DWMCI_CTRL);
84 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
85 dwmci_writel(host, DWMCI_CTRL, ctrl);
87 ctrl = dwmci_readl(host, DWMCI_BMOD);
88 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
89 dwmci_writel(host, DWMCI_BMOD, ctrl);
91 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
92 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
95 static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
99 *len = dwmci_readl(host, DWMCI_STATUS);
100 while (--timeout && (*len & bit)) {
102 *len = dwmci_readl(host, DWMCI_STATUS);
106 debug("%s: FIFO underflow timeout\n", __func__);
113 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
116 u32 timeout = 240000;
117 u32 mask, size, i, len = 0;
119 ulong start = get_timer(0);
120 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
121 RX_WMARK_SHIFT) + 1) * 2;
123 size = data->blocksize * data->blocks / 4;
124 if (data->flags == MMC_DATA_READ)
125 buf = (unsigned int *)data->dest;
127 buf = (unsigned int *)data->src;
130 mask = dwmci_readl(host, DWMCI_RINTSTS);
131 /* Error during data transfer. */
132 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
133 debug("%s: DATA ERROR!\n", __func__);
138 if (host->fifo_mode && size) {
140 if (data->flags == MMC_DATA_READ &&
141 (mask & DWMCI_INTMSK_RXDR)) {
143 ret = dwmci_fifo_ready(host,
149 len = (len >> DWMCI_FIFO_SHIFT) &
151 len = min(size, len);
152 for (i = 0; i < len; i++)
154 dwmci_readl(host, DWMCI_DATA);
155 size = size > len ? (size - len) : 0;
157 dwmci_writel(host, DWMCI_RINTSTS,
159 } else if (data->flags == MMC_DATA_WRITE &&
160 (mask & DWMCI_INTMSK_TXDR)) {
162 ret = dwmci_fifo_ready(host,
168 len = fifo_depth - ((len >>
171 len = min(size, len);
172 for (i = 0; i < len; i++)
173 dwmci_writel(host, DWMCI_DATA,
175 size = size > len ? (size - len) : 0;
177 dwmci_writel(host, DWMCI_RINTSTS,
182 /* Data arrived correctly. */
183 if (mask & DWMCI_INTMSK_DTO) {
188 /* Check for timeout. */
189 if (get_timer(start) > timeout) {
190 debug("%s: Timeout waiting for data!\n",
197 dwmci_writel(host, DWMCI_RINTSTS, mask);
202 static int dwmci_set_transfer_mode(struct dwmci_host *host,
203 struct mmc_data *data)
207 mode = DWMCI_CMD_DATA_EXP;
208 if (data->flags & MMC_DATA_WRITE)
209 mode |= DWMCI_CMD_RW;
215 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
216 struct mmc_data *data)
218 struct mmc *mmc = mmc_get_mmc_dev(dev);
220 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
221 struct mmc_data *data)
224 struct dwmci_host *host = mmc->priv;
225 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
226 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
227 int ret = 0, flags = 0, i;
228 unsigned int timeout = 500;
231 ulong start = get_timer(0);
232 struct bounce_buffer bbstate;
234 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
235 if (get_timer(start) > timeout) {
236 debug("%s: Timeout on data busy\n", __func__);
241 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
244 if (host->fifo_mode) {
245 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
246 dwmci_writel(host, DWMCI_BYTCNT,
247 data->blocksize * data->blocks);
248 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
250 if (data->flags == MMC_DATA_READ) {
251 bounce_buffer_start(&bbstate, (void*)data->dest,
253 data->blocks, GEN_BB_WRITE);
255 bounce_buffer_start(&bbstate, (void*)data->src,
257 data->blocks, GEN_BB_READ);
259 dwmci_prepare_data(host, data, cur_idmac,
260 bbstate.bounce_buffer);
264 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
267 flags = dwmci_set_transfer_mode(host, data);
269 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
272 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
273 flags |= DWMCI_CMD_ABORT_STOP;
275 flags |= DWMCI_CMD_PRV_DAT_WAIT;
277 if (cmd->resp_type & MMC_RSP_PRESENT) {
278 flags |= DWMCI_CMD_RESP_EXP;
279 if (cmd->resp_type & MMC_RSP_136)
280 flags |= DWMCI_CMD_RESP_LENGTH;
283 if (cmd->resp_type & MMC_RSP_CRC)
284 flags |= DWMCI_CMD_CHECK_CRC;
286 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
288 debug("Sending CMD%d\n",cmd->cmdidx);
290 dwmci_writel(host, DWMCI_CMD, flags);
292 for (i = 0; i < retry; i++) {
293 mask = dwmci_readl(host, DWMCI_RINTSTS);
294 if (mask & DWMCI_INTMSK_CDONE) {
296 dwmci_writel(host, DWMCI_RINTSTS, mask);
302 debug("%s: Timeout.\n", __func__);
306 if (mask & DWMCI_INTMSK_RTO) {
308 * Timeout here is not necessarily fatal. (e)MMC cards
309 * will splat here when they receive CMD55 as they do
310 * not support this command and that is exactly the way
311 * to tell them apart from SD cards. Thus, this output
312 * below shall be debug(). eMMC cards also do not favor
313 * CMD8, please keep that in mind.
315 debug("%s: Response Timeout.\n", __func__);
317 } else if (mask & DWMCI_INTMSK_RE) {
318 debug("%s: Response Error.\n", __func__);
320 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
321 (mask & DWMCI_INTMSK_RCRC)) {
322 debug("%s: Response CRC Error.\n", __func__);
327 if (cmd->resp_type & MMC_RSP_PRESENT) {
328 if (cmd->resp_type & MMC_RSP_136) {
329 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
330 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
331 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
332 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
334 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
339 ret = dwmci_data_transfer(host, data);
341 /* only dma mode need it */
342 if (!host->fifo_mode) {
343 ctrl = dwmci_readl(host, DWMCI_CTRL);
344 ctrl &= ~(DWMCI_DMA_EN);
345 dwmci_writel(host, DWMCI_CTRL, ctrl);
346 bounce_buffer_stop(&bbstate);
355 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
361 if ((freq == host->clock) || (freq == 0))
364 * If host->get_mmc_clk isn't defined,
365 * then assume that host->bus_hz is source clock value.
366 * host->bus_hz should be set by user.
368 if (host->get_mmc_clk)
369 sclk = host->get_mmc_clk(host, freq);
370 else if (host->bus_hz)
373 debug("%s: Didn't get source clock value.\n", __func__);
378 div = 0; /* bypass mode */
380 div = DIV_ROUND_UP(sclk, 2 * freq);
382 dwmci_writel(host, DWMCI_CLKENA, 0);
383 dwmci_writel(host, DWMCI_CLKSRC, 0);
385 dwmci_writel(host, DWMCI_CLKDIV, div);
386 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
387 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
390 status = dwmci_readl(host, DWMCI_CMD);
392 debug("%s: Timeout!\n", __func__);
395 } while (status & DWMCI_CMD_START);
397 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
398 DWMCI_CLKEN_LOW_PWR);
400 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
401 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
405 status = dwmci_readl(host, DWMCI_CMD);
407 debug("%s: Timeout!\n", __func__);
410 } while (status & DWMCI_CMD_START);
418 static int dwmci_set_ios(struct udevice *dev)
420 struct mmc *mmc = mmc_get_mmc_dev(dev);
422 static int dwmci_set_ios(struct mmc *mmc)
425 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
428 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
430 dwmci_setup_bus(host, mmc->clock);
431 switch (mmc->bus_width) {
433 ctype = DWMCI_CTYPE_8BIT;
436 ctype = DWMCI_CTYPE_4BIT;
439 ctype = DWMCI_CTYPE_1BIT;
443 dwmci_writel(host, DWMCI_CTYPE, ctype);
445 regs = dwmci_readl(host, DWMCI_UHS_REG);
447 regs |= DWMCI_DDR_MODE;
449 regs &= ~DWMCI_DDR_MODE;
451 dwmci_writel(host, DWMCI_UHS_REG, regs);
459 static int dwmci_init(struct mmc *mmc)
461 struct dwmci_host *host = mmc->priv;
463 if (host->board_init)
464 host->board_init(host);
466 dwmci_writel(host, DWMCI_PWREN, 1);
468 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
469 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
473 /* Enumerate at 400KHz */
474 dwmci_setup_bus(host, mmc->cfg->f_min);
476 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
477 dwmci_writel(host, DWMCI_INTMASK, 0);
479 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
481 dwmci_writel(host, DWMCI_IDINTEN, 0);
482 dwmci_writel(host, DWMCI_BMOD, 1);
484 if (!host->fifoth_val) {
487 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
488 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
489 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
490 TX_WMARK(fifo_size / 2);
492 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
494 dwmci_writel(host, DWMCI_CLKENA, 0);
495 dwmci_writel(host, DWMCI_CLKSRC, 0);
501 int dwmci_probe(struct udevice *dev)
503 struct mmc *mmc = mmc_get_mmc_dev(dev);
505 return dwmci_init(mmc);
508 const struct dm_mmc_ops dm_dwmci_ops = {
509 .send_cmd = dwmci_send_cmd,
510 .set_ios = dwmci_set_ios,
514 static const struct mmc_ops dwmci_ops = {
515 .send_cmd = dwmci_send_cmd,
516 .set_ios = dwmci_set_ios,
521 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
522 u32 max_clk, u32 min_clk)
524 cfg->name = host->name;
525 #ifndef CONFIG_DM_MMC
526 cfg->ops = &dwmci_ops;
528 cfg->f_min = min_clk;
529 cfg->f_max = max_clk;
531 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
533 cfg->host_caps = host->caps;
535 if (host->buswidth == 8) {
536 cfg->host_caps |= MMC_MODE_8BIT;
537 cfg->host_caps &= ~MMC_MODE_4BIT;
539 cfg->host_caps |= MMC_MODE_4BIT;
540 cfg->host_caps &= ~MMC_MODE_8BIT;
542 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
544 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
548 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
550 return mmc_bind(dev, mmc, cfg);
553 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
555 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
557 host->mmc = mmc_create(&host->cfg, host);
558 if (host->mmc == NULL)