1 // SPDX-License-Identifier: GPL-2.0-only
3 * Amlogic SD/eMMC driver for the GX/S905 family SoCs
5 * Copyright (c) 2016 BayLibre, SAS.
6 * Author: Kevin Hilman <khilman@baylibre.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/iopoll.h>
14 #include <linux/of_device.h>
15 #include <linux/platform_device.h>
16 #include <linux/ioport.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/mmc.h>
20 #include <linux/mmc/sdio.h>
21 #include <linux/mmc/slot-gpio.h>
23 #include <linux/clk.h>
24 #include <linux/clk-provider.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/interrupt.h>
28 #include <linux/bitfield.h>
29 #include <linux/pinctrl/consumer.h>
31 #define DRIVER_NAME "meson-gx-mmc"
33 #define SD_EMMC_CLOCK 0x0
34 #define CLK_DIV_MASK GENMASK(5, 0)
35 #define CLK_SRC_MASK GENMASK(7, 6)
36 #define CLK_CORE_PHASE_MASK GENMASK(9, 8)
37 #define CLK_TX_PHASE_MASK GENMASK(11, 10)
38 #define CLK_RX_PHASE_MASK GENMASK(13, 12)
40 #define CLK_PHASE_180 2
41 #define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
42 #define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
43 #define CLK_V2_ALWAYS_ON BIT(24)
44 #define CLK_V2_IRQ_SDIO_SLEEP BIT(25)
46 #define CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
47 #define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
48 #define CLK_V3_ALWAYS_ON BIT(28)
49 #define CLK_V3_IRQ_SDIO_SLEEP BIT(29)
51 #define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
52 #define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
53 #define CLK_ALWAYS_ON(h) (h->data->always_on)
54 #define CLK_IRQ_SDIO_SLEEP(h) (h->data->irq_sdio_sleep)
56 #define SD_EMMC_DELAY 0x4
57 #define SD_EMMC_ADJUST 0x8
58 #define ADJUST_ADJ_DELAY_MASK GENMASK(21, 16)
59 #define ADJUST_DS_EN BIT(15)
60 #define ADJUST_ADJ_EN BIT(13)
62 #define SD_EMMC_DELAY1 0x4
63 #define SD_EMMC_DELAY2 0x8
64 #define SD_EMMC_V3_ADJUST 0xc
66 #define SD_EMMC_CALOUT 0x10
67 #define SD_EMMC_START 0x40
68 #define START_DESC_INIT BIT(0)
69 #define START_DESC_BUSY BIT(1)
70 #define START_DESC_ADDR_MASK GENMASK(31, 2)
72 #define SD_EMMC_CFG 0x44
73 #define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
74 #define CFG_BUS_WIDTH_1 0x0
75 #define CFG_BUS_WIDTH_4 0x1
76 #define CFG_BUS_WIDTH_8 0x2
77 #define CFG_DDR BIT(2)
78 #define CFG_BLK_LEN_MASK GENMASK(7, 4)
79 #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
80 #define CFG_RC_CC_MASK GENMASK(15, 12)
81 #define CFG_STOP_CLOCK BIT(22)
82 #define CFG_CLK_ALWAYS_ON BIT(18)
83 #define CFG_CHK_DS BIT(20)
84 #define CFG_AUTO_CLK BIT(23)
85 #define CFG_ERR_ABORT BIT(27)
87 #define SD_EMMC_STATUS 0x48
88 #define STATUS_BUSY BIT(31)
89 #define STATUS_DESC_BUSY BIT(30)
90 #define STATUS_DATI GENMASK(23, 16)
92 #define SD_EMMC_IRQ_EN 0x4c
93 #define IRQ_RXD_ERR_MASK GENMASK(7, 0)
94 #define IRQ_TXD_ERR BIT(8)
95 #define IRQ_DESC_ERR BIT(9)
96 #define IRQ_RESP_ERR BIT(10)
98 (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR)
99 #define IRQ_RESP_TIMEOUT BIT(11)
100 #define IRQ_DESC_TIMEOUT BIT(12)
101 #define IRQ_TIMEOUTS \
102 (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT)
103 #define IRQ_END_OF_CHAIN BIT(13)
104 #define IRQ_RESP_STATUS BIT(14)
105 #define IRQ_SDIO BIT(15)
106 #define IRQ_EN_MASK \
107 (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN)
109 #define SD_EMMC_CMD_CFG 0x50
110 #define SD_EMMC_CMD_ARG 0x54
111 #define SD_EMMC_CMD_DAT 0x58
112 #define SD_EMMC_CMD_RSP 0x5c
113 #define SD_EMMC_CMD_RSP1 0x60
114 #define SD_EMMC_CMD_RSP2 0x64
115 #define SD_EMMC_CMD_RSP3 0x68
117 #define SD_EMMC_RXD 0x94
118 #define SD_EMMC_TXD 0x94
119 #define SD_EMMC_LAST_REG SD_EMMC_TXD
121 #define SD_EMMC_SRAM_DATA_BUF_LEN 1536
122 #define SD_EMMC_SRAM_DATA_BUF_OFF 0x200
124 #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
125 #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
126 #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
127 #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
128 #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
129 #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
131 #define SD_EMMC_PRE_REQ_DONE BIT(0)
132 #define SD_EMMC_DESC_CHAIN_MODE BIT(1)
134 #define MUX_CLK_NUM_PARENTS 2
136 struct meson_mmc_data {
137 unsigned int tx_delay_mask;
138 unsigned int rx_delay_mask;
139 unsigned int always_on;
141 unsigned int irq_sdio_sleep;
144 struct sd_emmc_desc {
153 struct meson_mmc_data *data;
154 struct mmc_host *mmc;
155 struct mmc_command *cmd;
158 struct clk *core_clk;
161 unsigned long req_rate;
164 bool dram_access_quirk;
166 struct pinctrl *pinctrl;
167 struct pinctrl_state *pins_clk_gate;
169 unsigned int bounce_buf_size;
171 void __iomem *bounce_iomem_buf;
172 dma_addr_t bounce_dma_addr;
173 struct sd_emmc_desc *descs;
174 dma_addr_t descs_dma_addr;
179 bool needs_pre_post_req;
184 #define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
185 #define CMD_CFG_BLOCK_MODE BIT(9)
186 #define CMD_CFG_R1B BIT(10)
187 #define CMD_CFG_END_OF_CHAIN BIT(11)
188 #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
189 #define CMD_CFG_NO_RESP BIT(16)
190 #define CMD_CFG_NO_CMD BIT(17)
191 #define CMD_CFG_DATA_IO BIT(18)
192 #define CMD_CFG_DATA_WR BIT(19)
193 #define CMD_CFG_RESP_NOCRC BIT(20)
194 #define CMD_CFG_RESP_128 BIT(21)
195 #define CMD_CFG_RESP_NUM BIT(22)
196 #define CMD_CFG_DATA_NUM BIT(23)
197 #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
198 #define CMD_CFG_ERROR BIT(30)
199 #define CMD_CFG_OWNER BIT(31)
201 #define CMD_DATA_MASK GENMASK(31, 2)
202 #define CMD_DATA_BIG_ENDIAN BIT(1)
203 #define CMD_DATA_SRAM BIT(0)
204 #define CMD_RESP_MASK GENMASK(31, 1)
205 #define CMD_RESP_SRAM BIT(0)
207 static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
209 unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
212 return SD_EMMC_CMD_TIMEOUT_DATA;
214 timeout = roundup_pow_of_two(timeout);
216 return min(timeout, 32768U); /* max. 2^15 ms */
219 static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
221 if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
222 return cmd->mrq->cmd;
223 else if (mmc_op_multi(cmd->opcode) &&
224 (!cmd->mrq->sbc || cmd->error || cmd->data->error))
225 return cmd->mrq->stop;
230 static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
231 struct mmc_request *mrq)
233 struct meson_host *host = mmc_priv(mmc);
234 struct mmc_data *data = mrq->data;
235 struct scatterlist *sg;
239 * When Controller DMA cannot directly access DDR memory, disable
240 * support for Chain Mode to directly use the internal SRAM using
241 * the bounce buffer mode.
243 if (host->dram_access_quirk)
246 /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
247 if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
249 * In block mode DMA descriptor format, "length" field indicates
250 * number of blocks and there is no way to pass DMA size that
251 * is not multiple of SDIO block size, making it impossible to
252 * tie more than one memory buffer with single SDIO block.
253 * Block mode sg buffer size should be aligned with SDIO block
254 * size, otherwise chain mode could not be used.
256 for_each_sg(data->sg, sg, data->sg_len, i) {
257 if (sg->length % data->blksz) {
258 dev_warn_once(mmc_dev(mmc),
259 "unaligned sg len %u blksize %u, disabling descriptor DMA for transfer\n",
260 sg->length, data->blksz);
266 for_each_sg(data->sg, sg, data->sg_len, i) {
267 /* check for 8 byte alignment */
268 if (sg->offset % 8) {
269 dev_warn_once(mmc_dev(mmc),
270 "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
276 data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
279 static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
281 return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
284 static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
286 return data && data->flags & MMC_DATA_READ &&
287 !meson_mmc_desc_chain_mode(data);
290 static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
292 struct mmc_data *data = mrq->data;
297 meson_mmc_get_transfer_mode(mmc, mrq);
298 data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
300 if (!meson_mmc_desc_chain_mode(data))
303 data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
304 mmc_get_dma_dir(data));
306 dev_err(mmc_dev(mmc), "dma_map_sg failed");
309 static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
312 struct mmc_data *data = mrq->data;
314 if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
315 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
316 mmc_get_dma_dir(data));
320 * Gating the clock on this controller is tricky. It seems the mmc clock
321 * is also used by the controller. It may crash during some operation if the
322 * clock is stopped. The safest thing to do, whenever possible, is to keep
323 * clock running at stop it at the pad using the pinmux.
325 static void meson_mmc_clk_gate(struct meson_host *host)
329 if (host->pins_clk_gate) {
330 pinctrl_select_state(host->pinctrl, host->pins_clk_gate);
333 * If the pinmux is not provided - default to the classic and
336 cfg = readl(host->regs + SD_EMMC_CFG);
337 cfg |= CFG_STOP_CLOCK;
338 writel(cfg, host->regs + SD_EMMC_CFG);
342 static void meson_mmc_clk_ungate(struct meson_host *host)
346 if (host->pins_clk_gate)
347 pinctrl_select_default_state(host->dev);
349 /* Make sure the clock is not stopped in the controller */
350 cfg = readl(host->regs + SD_EMMC_CFG);
351 cfg &= ~CFG_STOP_CLOCK;
352 writel(cfg, host->regs + SD_EMMC_CFG);
355 static int meson_mmc_clk_set(struct meson_host *host, unsigned long rate,
358 struct mmc_host *mmc = host->mmc;
362 /* Same request - bail-out */
363 if (host->ddr == ddr && host->req_rate == rate)
367 meson_mmc_clk_gate(host);
369 mmc->actual_clock = 0;
371 /* return with clock being stopped */
375 /* Stop the clock during rate change to avoid glitches */
376 cfg = readl(host->regs + SD_EMMC_CFG);
377 cfg |= CFG_STOP_CLOCK;
378 writel(cfg, host->regs + SD_EMMC_CFG);
381 /* DDR modes require higher module clock */
387 writel(cfg, host->regs + SD_EMMC_CFG);
390 ret = clk_set_rate(host->mmc_clk, rate);
392 dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
397 host->req_rate = rate;
398 mmc->actual_clock = clk_get_rate(host->mmc_clk);
400 /* We should report the real output frequency of the controller */
402 host->req_rate >>= 1;
403 mmc->actual_clock >>= 1;
406 dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
407 if (rate != mmc->actual_clock)
408 dev_dbg(host->dev, "requested rate was %lu\n", rate);
410 /* (re)start clock */
411 meson_mmc_clk_ungate(host);
417 * The SD/eMMC IP block has an internal mux and divider used for
418 * generating the MMC clock. Use the clock framework to create and
419 * manage these clocks.
421 static int meson_mmc_clk_init(struct meson_host *host)
423 struct clk_init_data init;
425 struct clk_divider *div;
428 const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
429 const char *clk_parent[1];
432 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
433 clk_reg = CLK_ALWAYS_ON(host);
434 clk_reg |= CLK_DIV_MASK;
435 clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
436 clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
437 clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
438 clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
439 writel(clk_reg, host->regs + SD_EMMC_CLOCK);
441 /* get the mux parents */
442 for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
446 snprintf(name, sizeof(name), "clkin%d", i);
447 clk = devm_clk_get(host->dev, name);
449 return dev_err_probe(host->dev, PTR_ERR(clk),
450 "Missing clock %s\n", name);
452 mux_parent_names[i] = __clk_get_name(clk);
456 mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL);
460 snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
461 init.name = clk_name;
462 init.ops = &clk_mux_ops;
464 init.parent_names = mux_parent_names;
465 init.num_parents = MUX_CLK_NUM_PARENTS;
467 mux->reg = host->regs + SD_EMMC_CLOCK;
468 mux->shift = __ffs(CLK_SRC_MASK);
469 mux->mask = CLK_SRC_MASK >> mux->shift;
470 mux->hw.init = &init;
472 host->mux_clk = devm_clk_register(host->dev, &mux->hw);
473 if (WARN_ON(IS_ERR(host->mux_clk)))
474 return PTR_ERR(host->mux_clk);
476 /* create the divider */
477 div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
481 snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
482 init.name = clk_name;
483 init.ops = &clk_divider_ops;
484 init.flags = CLK_SET_RATE_PARENT;
485 clk_parent[0] = __clk_get_name(host->mux_clk);
486 init.parent_names = clk_parent;
487 init.num_parents = 1;
489 div->reg = host->regs + SD_EMMC_CLOCK;
490 div->shift = __ffs(CLK_DIV_MASK);
491 div->width = __builtin_popcountl(CLK_DIV_MASK);
492 div->hw.init = &init;
493 div->flags = CLK_DIVIDER_ONE_BASED;
495 host->mmc_clk = devm_clk_register(host->dev, &div->hw);
496 if (WARN_ON(IS_ERR(host->mmc_clk)))
497 return PTR_ERR(host->mmc_clk);
499 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
500 host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
501 ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
505 return clk_prepare_enable(host->mmc_clk);
508 static void meson_mmc_disable_resampling(struct meson_host *host)
510 unsigned int val = readl(host->regs + host->data->adjust);
512 val &= ~ADJUST_ADJ_EN;
513 writel(val, host->regs + host->data->adjust);
516 static void meson_mmc_reset_resampling(struct meson_host *host)
520 meson_mmc_disable_resampling(host);
522 val = readl(host->regs + host->data->adjust);
523 val &= ~ADJUST_ADJ_DELAY_MASK;
524 writel(val, host->regs + host->data->adjust);
527 static int meson_mmc_resampling_tuning(struct mmc_host *mmc, u32 opcode)
529 struct meson_host *host = mmc_priv(mmc);
530 unsigned int val, dly, max_dly, i;
533 /* Resampling is done using the source clock */
534 max_dly = DIV_ROUND_UP(clk_get_rate(host->mux_clk),
535 clk_get_rate(host->mmc_clk));
537 val = readl(host->regs + host->data->adjust);
538 val |= ADJUST_ADJ_EN;
539 writel(val, host->regs + host->data->adjust);
541 if (mmc_doing_retune(mmc))
542 dly = FIELD_GET(ADJUST_ADJ_DELAY_MASK, val) + 1;
546 for (i = 0; i < max_dly; i++) {
547 val &= ~ADJUST_ADJ_DELAY_MASK;
548 val |= FIELD_PREP(ADJUST_ADJ_DELAY_MASK, (dly + i) % max_dly);
549 writel(val, host->regs + host->data->adjust);
551 ret = mmc_send_tuning(mmc, opcode, NULL);
553 dev_dbg(mmc_dev(mmc), "resampling delay: %u\n",
554 (dly + i) % max_dly);
559 meson_mmc_reset_resampling(host);
563 static int meson_mmc_prepare_ios_clock(struct meson_host *host,
568 switch (ios->timing) {
569 case MMC_TIMING_MMC_DDR52:
570 case MMC_TIMING_UHS_DDR50:
579 return meson_mmc_clk_set(host, ios->clock, ddr);
582 static void meson_mmc_check_resampling(struct meson_host *host,
585 switch (ios->timing) {
586 case MMC_TIMING_LEGACY:
587 case MMC_TIMING_MMC_HS:
588 case MMC_TIMING_SD_HS:
589 case MMC_TIMING_MMC_DDR52:
590 meson_mmc_disable_resampling(host);
595 static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
597 struct meson_host *host = mmc_priv(mmc);
602 * GPIO regulator, only controls switching between 1v8 and
603 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
605 switch (ios->power_mode) {
607 if (!IS_ERR(mmc->supply.vmmc))
608 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
610 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
611 regulator_disable(mmc->supply.vqmmc);
612 host->vqmmc_enabled = false;
618 if (!IS_ERR(mmc->supply.vmmc))
619 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
624 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
625 int ret = regulator_enable(mmc->supply.vqmmc);
629 "failed to enable vqmmc regulator\n");
631 host->vqmmc_enabled = true;
638 switch (ios->bus_width) {
639 case MMC_BUS_WIDTH_1:
640 bus_width = CFG_BUS_WIDTH_1;
642 case MMC_BUS_WIDTH_4:
643 bus_width = CFG_BUS_WIDTH_4;
645 case MMC_BUS_WIDTH_8:
646 bus_width = CFG_BUS_WIDTH_8;
649 dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
651 bus_width = CFG_BUS_WIDTH_4;
654 val = readl(host->regs + SD_EMMC_CFG);
655 val &= ~CFG_BUS_WIDTH_MASK;
656 val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
657 writel(val, host->regs + SD_EMMC_CFG);
659 meson_mmc_check_resampling(host, ios);
660 err = meson_mmc_prepare_ios_clock(host, ios);
662 dev_err(host->dev, "Failed to set clock: %d\n,", err);
664 dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val);
667 static void meson_mmc_request_done(struct mmc_host *mmc,
668 struct mmc_request *mrq)
670 struct meson_host *host = mmc_priv(mmc);
673 if (host->needs_pre_post_req)
674 meson_mmc_post_req(mmc, mrq, 0);
675 mmc_request_done(host->mmc, mrq);
678 static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
680 struct meson_host *host = mmc_priv(mmc);
683 cfg = readl(host->regs + SD_EMMC_CFG);
684 blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
686 if (!is_power_of_2(blksz))
687 dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
689 blksz = ilog2(blksz);
691 /* check if block-size matches, if not update */
692 if (blksz == blksz_old)
695 dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
698 cfg &= ~CFG_BLK_LEN_MASK;
699 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
700 writel(cfg, host->regs + SD_EMMC_CFG);
703 static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
705 if (cmd->flags & MMC_RSP_PRESENT) {
706 if (cmd->flags & MMC_RSP_136)
707 *cmd_cfg |= CMD_CFG_RESP_128;
708 *cmd_cfg |= CMD_CFG_RESP_NUM;
710 if (!(cmd->flags & MMC_RSP_CRC))
711 *cmd_cfg |= CMD_CFG_RESP_NOCRC;
713 if (cmd->flags & MMC_RSP_BUSY)
714 *cmd_cfg |= CMD_CFG_R1B;
716 *cmd_cfg |= CMD_CFG_NO_RESP;
720 static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
722 struct meson_host *host = mmc_priv(mmc);
723 struct sd_emmc_desc *desc = host->descs;
724 struct mmc_data *data = host->cmd->data;
725 struct scatterlist *sg;
729 if (data->flags & MMC_DATA_WRITE)
730 cmd_cfg |= CMD_CFG_DATA_WR;
732 if (data->blocks > 1) {
733 cmd_cfg |= CMD_CFG_BLOCK_MODE;
734 meson_mmc_set_blksz(mmc, data->blksz);
737 for_each_sg(data->sg, sg, data->sg_count, i) {
738 unsigned int len = sg_dma_len(sg);
740 if (data->blocks > 1)
743 desc[i].cmd_cfg = cmd_cfg;
744 desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
746 desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
747 desc[i].cmd_arg = host->cmd->arg;
748 desc[i].cmd_resp = 0;
749 desc[i].cmd_data = sg_dma_address(sg);
751 desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
753 dma_wmb(); /* ensure descriptor is written before kicked */
754 start = host->descs_dma_addr | START_DESC_BUSY;
755 writel(start, host->regs + SD_EMMC_START);
758 /* local sg copy for dram_access_quirk */
759 static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
760 size_t buflen, bool to_buffer)
762 unsigned int sg_flags = SG_MITER_ATOMIC;
763 struct scatterlist *sgl = data->sg;
764 unsigned int nents = data->sg_len;
765 struct sg_mapping_iter miter;
766 unsigned int offset = 0;
769 sg_flags |= SG_MITER_FROM_SG;
771 sg_flags |= SG_MITER_TO_SG;
773 sg_miter_start(&miter, sgl, nents, sg_flags);
775 while ((offset < buflen) && sg_miter_next(&miter)) {
776 unsigned int buf_offset = 0;
777 unsigned int len, left;
778 u32 *buf = miter.addr;
780 len = min(miter.length, buflen - offset);
785 writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
792 *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
802 sg_miter_stop(&miter);
805 static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
807 struct meson_host *host = mmc_priv(mmc);
808 struct mmc_data *data = cmd->data;
809 u32 cmd_cfg = 0, cmd_data = 0;
810 unsigned int xfer_bytes = 0;
812 /* Setup descriptors */
817 cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
818 cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
819 cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
821 meson_mmc_set_response_bits(cmd, &cmd_cfg);
825 data->bytes_xfered = 0;
826 cmd_cfg |= CMD_CFG_DATA_IO;
827 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
828 ilog2(meson_mmc_get_timeout_msecs(data)));
830 if (meson_mmc_desc_chain_mode(data)) {
831 meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
835 if (data->blocks > 1) {
836 cmd_cfg |= CMD_CFG_BLOCK_MODE;
837 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
839 meson_mmc_set_blksz(mmc, data->blksz);
841 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
844 xfer_bytes = data->blksz * data->blocks;
845 if (data->flags & MMC_DATA_WRITE) {
846 cmd_cfg |= CMD_CFG_DATA_WR;
847 WARN_ON(xfer_bytes > host->bounce_buf_size);
848 if (host->dram_access_quirk)
849 meson_mmc_copy_buffer(host, data, xfer_bytes, true);
851 sg_copy_to_buffer(data->sg, data->sg_len,
852 host->bounce_buf, xfer_bytes);
856 cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
858 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
859 ilog2(SD_EMMC_CMD_TIMEOUT));
862 /* Last descriptor */
863 cmd_cfg |= CMD_CFG_END_OF_CHAIN;
864 writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
865 writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
866 writel(0, host->regs + SD_EMMC_CMD_RSP);
867 wmb(); /* ensure descriptor is written before kicked */
868 writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
871 static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
873 struct scatterlist *sg;
876 /* Reject request if any element offset or size is not 32bit aligned */
877 for_each_sg(data->sg, sg, data->sg_len, i) {
878 if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
879 !IS_ALIGNED(sg->length, sizeof(u32))) {
880 dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
881 data->sg->offset, data->sg->length);
889 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
891 struct meson_host *host = mmc_priv(mmc);
892 host->needs_pre_post_req = mrq->data &&
893 !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
896 * The memory at the end of the controller used as bounce buffer for
897 * the dram_access_quirk only accepts 32bit read/write access,
898 * check the aligment and length of the data before starting the request.
900 if (host->dram_access_quirk && mrq->data) {
901 mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
902 if (mrq->cmd->error) {
903 mmc_request_done(mmc, mrq);
908 if (host->needs_pre_post_req) {
909 meson_mmc_get_transfer_mode(mmc, mrq);
910 if (!meson_mmc_desc_chain_mode(mrq->data))
911 host->needs_pre_post_req = false;
914 if (host->needs_pre_post_req)
915 meson_mmc_pre_req(mmc, mrq);
918 writel(0, host->regs + SD_EMMC_START);
920 meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
923 static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
925 struct meson_host *host = mmc_priv(mmc);
927 if (cmd->flags & MMC_RSP_136) {
928 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
929 cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
930 cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
931 cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
932 } else if (cmd->flags & MMC_RSP_PRESENT) {
933 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
937 static void __meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
939 struct meson_host *host = mmc_priv(mmc);
940 u32 reg_irqen = IRQ_EN_MASK;
943 reg_irqen |= IRQ_SDIO;
944 writel(reg_irqen, host->regs + SD_EMMC_IRQ_EN);
947 static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
949 struct meson_host *host = dev_id;
950 struct mmc_command *cmd;
951 u32 status, raw_status;
952 irqreturn_t ret = IRQ_NONE;
954 raw_status = readl(host->regs + SD_EMMC_STATUS);
955 status = raw_status & (IRQ_EN_MASK | IRQ_SDIO);
959 "Unexpected IRQ! irq_en 0x%08lx - status 0x%08x\n",
960 IRQ_EN_MASK | IRQ_SDIO, raw_status);
967 /* ack all raised interrupts */
968 writel(status, host->regs + SD_EMMC_STATUS);
972 if (status & IRQ_SDIO) {
973 spin_lock(&host->lock);
974 __meson_mmc_enable_sdio_irq(host->mmc, 0);
975 sdio_signal_irq(host->mmc);
976 spin_unlock(&host->lock);
986 if (status & IRQ_CRC_ERR) {
987 dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
988 cmd->error = -EILSEQ;
989 ret = IRQ_WAKE_THREAD;
993 if (status & IRQ_TIMEOUTS) {
994 dev_dbg(host->dev, "Timeout - status 0x%08x\n", status);
995 cmd->error = -ETIMEDOUT;
996 ret = IRQ_WAKE_THREAD;
1000 meson_mmc_read_resp(host->mmc, cmd);
1002 if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
1003 struct mmc_data *data = cmd->data;
1005 if (data && !cmd->error)
1006 data->bytes_xfered = data->blksz * data->blocks;
1007 if (meson_mmc_bounce_buf_read(data) ||
1008 meson_mmc_get_next_command(cmd))
1009 ret = IRQ_WAKE_THREAD;
1016 /* Stop desc in case of errors */
1017 u32 start = readl(host->regs + SD_EMMC_START);
1019 start &= ~START_DESC_BUSY;
1020 writel(start, host->regs + SD_EMMC_START);
1023 if (ret == IRQ_HANDLED)
1024 meson_mmc_request_done(host->mmc, cmd->mrq);
1029 static int meson_mmc_wait_desc_stop(struct meson_host *host)
1034 * It may sometimes take a while for it to actually halt. Here, we
1035 * are giving it 5ms to comply
1037 * If we don't confirm the descriptor is stopped, it might raise new
1038 * IRQs after we have called mmc_request_done() which is bad.
1041 return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status,
1042 !(status & (STATUS_BUSY | STATUS_DESC_BUSY)),
1046 static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
1048 struct meson_host *host = dev_id;
1049 struct mmc_command *next_cmd, *cmd = host->cmd;
1050 struct mmc_data *data;
1051 unsigned int xfer_bytes;
1057 meson_mmc_wait_desc_stop(host);
1058 meson_mmc_request_done(host->mmc, cmd->mrq);
1064 if (meson_mmc_bounce_buf_read(data)) {
1065 xfer_bytes = data->blksz * data->blocks;
1066 WARN_ON(xfer_bytes > host->bounce_buf_size);
1067 if (host->dram_access_quirk)
1068 meson_mmc_copy_buffer(host, data, xfer_bytes, false);
1070 sg_copy_from_buffer(data->sg, data->sg_len,
1071 host->bounce_buf, xfer_bytes);
1074 next_cmd = meson_mmc_get_next_command(cmd);
1076 meson_mmc_start_cmd(host->mmc, next_cmd);
1078 meson_mmc_request_done(host->mmc, cmd->mrq);
1084 * NOTE: we only need this until the GPIO/pinctrl driver can handle
1085 * interrupts. For now, the MMC core will use this for polling.
1087 static int meson_mmc_get_cd(struct mmc_host *mmc)
1089 int status = mmc_gpio_get_cd(mmc);
1091 if (status == -ENOSYS)
1092 return 1; /* assume present */
1097 static void meson_mmc_cfg_init(struct meson_host *host)
1101 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
1102 ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
1103 cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
1104 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
1106 /* abort chain on R/W errors */
1107 cfg |= CFG_ERR_ABORT;
1109 writel(cfg, host->regs + SD_EMMC_CFG);
1112 static int meson_mmc_card_busy(struct mmc_host *mmc)
1114 struct meson_host *host = mmc_priv(mmc);
1117 regval = readl(host->regs + SD_EMMC_STATUS);
1119 /* We are only interrested in lines 0 to 3, so mask the other ones */
1120 return !(FIELD_GET(STATUS_DATI, regval) & 0xf);
1123 static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1127 /* vqmmc regulator is available */
1128 if (!IS_ERR(mmc->supply.vqmmc)) {
1130 * The usual amlogic setup uses a GPIO to switch from one
1131 * regulator to the other. While the voltage ramp up is
1132 * pretty fast, care must be taken when switching from 3.3v
1133 * to 1.8v. Please make sure the regulator framework is aware
1134 * of your own regulator constraints
1136 ret = mmc_regulator_set_vqmmc(mmc, ios);
1137 return ret < 0 ? ret : 0;
1140 /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
1141 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1147 static void meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
1149 struct meson_host *host = mmc_priv(mmc);
1150 unsigned long flags;
1152 spin_lock_irqsave(&host->lock, flags);
1153 __meson_mmc_enable_sdio_irq(mmc, enable);
1154 spin_unlock_irqrestore(&host->lock, flags);
1157 static void meson_mmc_ack_sdio_irq(struct mmc_host *mmc)
1159 meson_mmc_enable_sdio_irq(mmc, 1);
1162 static const struct mmc_host_ops meson_mmc_ops = {
1163 .request = meson_mmc_request,
1164 .set_ios = meson_mmc_set_ios,
1165 .get_cd = meson_mmc_get_cd,
1166 .pre_req = meson_mmc_pre_req,
1167 .post_req = meson_mmc_post_req,
1168 .execute_tuning = meson_mmc_resampling_tuning,
1169 .card_busy = meson_mmc_card_busy,
1170 .start_signal_voltage_switch = meson_mmc_voltage_switch,
1171 .enable_sdio_irq = meson_mmc_enable_sdio_irq,
1172 .ack_sdio_irq = meson_mmc_ack_sdio_irq,
1175 static int meson_mmc_probe(struct platform_device *pdev)
1177 struct resource *res;
1178 struct meson_host *host;
1179 struct mmc_host *mmc;
1182 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1185 host = mmc_priv(mmc);
1187 host->dev = &pdev->dev;
1188 dev_set_drvdata(&pdev->dev, host);
1190 /* The G12A SDIO Controller needs an SRAM bounce buffer */
1191 host->dram_access_quirk = device_property_read_bool(&pdev->dev,
1192 "amlogic,dram-access-quirk");
1194 /* Get regulators and the supported OCR mask */
1195 host->vqmmc_enabled = false;
1196 ret = mmc_regulator_get_supply(mmc);
1200 ret = mmc_of_parse(mmc);
1202 if (ret != -EPROBE_DEFER)
1203 dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
1207 host->data = (struct meson_mmc_data *)
1208 of_device_get_match_data(&pdev->dev);
1214 ret = device_reset_optional(&pdev->dev);
1216 dev_err_probe(&pdev->dev, ret, "device reset failed\n");
1220 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1221 host->regs = devm_ioremap_resource(&pdev->dev, res);
1222 if (IS_ERR(host->regs)) {
1223 ret = PTR_ERR(host->regs);
1227 host->irq = platform_get_irq(pdev, 0);
1228 if (host->irq <= 0) {
1233 host->pinctrl = devm_pinctrl_get(&pdev->dev);
1234 if (IS_ERR(host->pinctrl)) {
1235 ret = PTR_ERR(host->pinctrl);
1239 host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
1241 if (IS_ERR(host->pins_clk_gate)) {
1242 dev_warn(&pdev->dev,
1243 "can't get clk-gate pinctrl, using clk_stop bit\n");
1244 host->pins_clk_gate = NULL;
1247 host->core_clk = devm_clk_get(&pdev->dev, "core");
1248 if (IS_ERR(host->core_clk)) {
1249 ret = PTR_ERR(host->core_clk);
1253 ret = clk_prepare_enable(host->core_clk);
1257 ret = meson_mmc_clk_init(host);
1261 /* set config to sane default */
1262 meson_mmc_cfg_init(host);
1264 /* Stop execution */
1265 writel(0, host->regs + SD_EMMC_START);
1267 /* clear, ack and enable interrupts */
1268 writel(0, host->regs + SD_EMMC_IRQ_EN);
1269 writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
1270 writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
1272 ret = request_threaded_irq(host->irq, meson_mmc_irq,
1273 meson_mmc_irq_thread, IRQF_ONESHOT,
1274 dev_name(&pdev->dev), host);
1278 spin_lock_init(&host->lock);
1280 mmc->caps |= MMC_CAP_CMD23;
1282 if (mmc->caps & MMC_CAP_SDIO_IRQ)
1283 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
1285 if (host->dram_access_quirk) {
1286 /* Limit segments to 1 due to low available sram memory */
1288 /* Limit to the available sram memory */
1289 mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
1292 mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
1293 mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
1294 sizeof(struct sd_emmc_desc);
1296 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
1297 mmc->max_seg_size = mmc->max_req_size;
1300 * At the moment, we don't know how to reliably enable HS400.
1301 * From the different datasheets, it is not even clear if this mode
1302 * is officially supported by any of the SoCs
1304 mmc->caps2 &= ~MMC_CAP2_HS400;
1306 if (host->dram_access_quirk) {
1308 * The MMC Controller embeds 1,5KiB of internal SRAM
1309 * that can be used to be used as bounce buffer.
1310 * In the case of the G12A SDIO controller, use these
1311 * instead of the DDR memory
1313 host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
1314 host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
1315 host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
1317 /* data bounce buffer */
1318 host->bounce_buf_size = mmc->max_req_size;
1320 dmam_alloc_coherent(host->dev, host->bounce_buf_size,
1321 &host->bounce_dma_addr, GFP_KERNEL);
1322 if (host->bounce_buf == NULL) {
1323 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1329 host->descs = dmam_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1330 &host->descs_dma_addr, GFP_KERNEL);
1332 dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
1337 mmc->ops = &meson_mmc_ops;
1343 free_irq(host->irq, host);
1345 clk_disable_unprepare(host->mmc_clk);
1347 clk_disable_unprepare(host->core_clk);
1353 static int meson_mmc_remove(struct platform_device *pdev)
1355 struct meson_host *host = dev_get_drvdata(&pdev->dev);
1357 mmc_remove_host(host->mmc);
1359 /* disable interrupts */
1360 writel(0, host->regs + SD_EMMC_IRQ_EN);
1361 free_irq(host->irq, host);
1363 clk_disable_unprepare(host->mmc_clk);
1364 clk_disable_unprepare(host->core_clk);
1366 mmc_free_host(host->mmc);
1370 static const struct meson_mmc_data meson_gx_data = {
1371 .tx_delay_mask = CLK_V2_TX_DELAY_MASK,
1372 .rx_delay_mask = CLK_V2_RX_DELAY_MASK,
1373 .always_on = CLK_V2_ALWAYS_ON,
1374 .adjust = SD_EMMC_ADJUST,
1375 .irq_sdio_sleep = CLK_V2_IRQ_SDIO_SLEEP,
1378 static const struct meson_mmc_data meson_axg_data = {
1379 .tx_delay_mask = CLK_V3_TX_DELAY_MASK,
1380 .rx_delay_mask = CLK_V3_RX_DELAY_MASK,
1381 .always_on = CLK_V3_ALWAYS_ON,
1382 .adjust = SD_EMMC_V3_ADJUST,
1383 .irq_sdio_sleep = CLK_V3_IRQ_SDIO_SLEEP,
1386 static const struct of_device_id meson_mmc_of_match[] = {
1387 { .compatible = "amlogic,meson-gx-mmc", .data = &meson_gx_data },
1388 { .compatible = "amlogic,meson-gxbb-mmc", .data = &meson_gx_data },
1389 { .compatible = "amlogic,meson-gxl-mmc", .data = &meson_gx_data },
1390 { .compatible = "amlogic,meson-gxm-mmc", .data = &meson_gx_data },
1391 { .compatible = "amlogic,meson-axg-mmc", .data = &meson_axg_data },
1394 MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
1396 static struct platform_driver meson_mmc_driver = {
1397 .probe = meson_mmc_probe,
1398 .remove = meson_mmc_remove,
1400 .name = DRIVER_NAME,
1401 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1402 .of_match_table = meson_mmc_of_match,
1406 module_platform_driver(meson_mmc_driver);
1408 MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver");
1409 MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
1410 MODULE_LICENSE("GPL v2");