1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 MediaTek Inc.
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_qos.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
30 #define CREATE_TRACE_POINTS
31 #include "ufs-mediatek-trace.h"
33 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
34 { .wmanufacturerid = UFS_ANY_VENDOR,
35 .model = UFS_ANY_MODEL,
36 .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
37 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
38 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
39 .model = "H9HQ21AFAMZDAR",
40 .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
44 static const struct of_device_id ufs_mtk_of_match[] = {
45 { .compatible = "mediatek,mt8183-ufshci" },
50 * Details of UIC Errors
52 static const char *const ufs_uic_err_str[] = {
56 "Transport Link Layer",
60 static const char *const ufs_uic_pa_err_str[] = {
61 "PHY error on Lane 0",
62 "PHY error on Lane 1",
63 "PHY error on Lane 2",
64 "PHY error on Lane 3",
65 "Generic PHY Adapter Error. This should be the LINERESET indication"
68 static const char *const ufs_uic_dl_err_str[] = {
70 "TCx_REPLAY_TIMER_EXPIRED",
71 "AFCx_REQUEST_TIMER_EXPIRED",
72 "FCx_PROTECTION_TIMER_EXPIRED",
75 "MAX_FRAME_LENGTH_EXCEEDED",
76 "WRONG_SEQUENCE_NUMBER",
77 "AFC_FRAME_SYNTAX_ERROR",
78 "NAC_FRAME_SYNTAX_ERROR",
81 "BAD_CTRL_SYMBOL_TYPE",
83 "PA_ERROR_IND_RECEIVED",
87 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
89 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
91 return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
94 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
96 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
98 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
101 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
103 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
105 return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
108 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
110 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
112 return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
115 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
121 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
123 (1 << RX_SYMBOL_CLK_GATE_EN) |
124 (1 << SYS_CLK_GATE_EN) |
125 (1 << TX_CLK_GATE_EN);
127 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
130 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
131 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
133 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
136 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
137 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
138 (1 << SYS_CLK_GATE_EN) |
139 (1 << TX_CLK_GATE_EN));
141 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
144 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
145 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
147 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
151 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
153 struct arm_smccc_res res;
155 ufs_mtk_crypto_ctrl(res, 1);
157 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
159 hba->caps &= ~UFSHCD_CAP_CRYPTO;
163 static void ufs_mtk_host_reset(struct ufs_hba *hba)
165 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
167 reset_control_assert(host->hci_reset);
168 reset_control_assert(host->crypto_reset);
169 reset_control_assert(host->unipro_reset);
171 usleep_range(100, 110);
173 reset_control_deassert(host->unipro_reset);
174 reset_control_deassert(host->crypto_reset);
175 reset_control_deassert(host->hci_reset);
178 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
179 struct reset_control **rc,
182 *rc = devm_reset_control_get(hba->dev, str);
184 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
190 static void ufs_mtk_init_reset(struct ufs_hba *hba)
192 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
194 ufs_mtk_init_reset_control(hba, &host->hci_reset,
196 ufs_mtk_init_reset_control(hba, &host->unipro_reset,
198 ufs_mtk_init_reset_control(hba, &host->crypto_reset,
202 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
203 enum ufs_notify_change_status status)
205 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
207 if (status == PRE_CHANGE) {
208 if (host->unipro_lpm) {
209 hba->vps->hba_enable_delay_us = 0;
211 hba->vps->hba_enable_delay_us = 600;
212 ufs_mtk_host_reset(hba);
215 if (hba->caps & UFSHCD_CAP_CRYPTO)
216 ufs_mtk_crypto_enable(hba);
218 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
219 ufshcd_writel(hba, 0,
220 REG_AUTO_HIBERNATE_IDLE_TIMER);
221 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
226 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
227 * to prevent host hang issue
230 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
237 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
239 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 struct device *dev = hba->dev;
241 struct device_node *np = dev->of_node;
244 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
246 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
248 * UFS driver might be probed before the phy driver does.
249 * In that case we would like to return EPROBE_DEFER code.
253 "%s: required phy hasn't probed yet. err = %d\n",
255 } else if (IS_ERR(host->mphy)) {
256 err = PTR_ERR(host->mphy);
257 if (err != -ENODEV) {
258 dev_info(dev, "%s: PHY get failed %d\n", __func__,
266 * Allow unbound mphy because not every platform needs specific
275 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
277 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278 struct arm_smccc_res res;
279 ktime_t timeout, time_checked;
282 if (host->ref_clk_enabled == on)
285 ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
288 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
290 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
291 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
295 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
297 time_checked = ktime_get();
298 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
300 /* Wait until ack bit equals to req bit */
301 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
304 usleep_range(100, 200);
305 } while (ktime_before(time_checked, timeout));
307 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
309 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
314 host->ref_clk_enabled = on;
316 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
318 ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
323 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
326 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
328 if (hba->dev_info.clk_gating_wait_us) {
329 host->ref_clk_gating_wait_us =
330 hba->dev_info.clk_gating_wait_us;
332 host->ref_clk_gating_wait_us = gating_us;
335 host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
338 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
340 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
342 if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
343 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
344 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
345 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
346 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
347 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
349 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
353 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
354 unsigned long retry_ms)
356 u64 timeout, time_checked;
360 /* cannot use plain ktime_get() in suspend */
361 timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
363 /* wait a specific time after check base */
368 time_checked = ktime_get_mono_fast_ns();
369 ufs_mtk_dbg_sel(hba);
370 val = ufshcd_readl(hba, REG_UFS_PROBE);
375 * if state is in H8 enter and H8 enter confirm
376 * wait until return to idle state.
378 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
382 } else if (!wait_idle)
385 if (wait_idle && (sm == VS_HCE_BASE))
387 } while (time_checked < timeout);
389 if (wait_idle && sm != VS_HCE_BASE)
390 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
393 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
394 unsigned long max_wait_ms)
396 ktime_t timeout, time_checked;
399 timeout = ktime_add_ms(ktime_get(), max_wait_ms);
401 time_checked = ktime_get();
402 ufs_mtk_dbg_sel(hba);
403 val = ufshcd_readl(hba, REG_UFS_PROBE);
409 /* Sleep for max. 200us */
410 usleep_range(100, 200);
411 } while (ktime_before(time_checked, timeout));
419 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
421 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
422 struct phy *mphy = host->mphy;
423 struct arm_smccc_res res;
426 if (!mphy || !(on ^ host->mphy_powered_on))
430 if (ufs_mtk_is_va09_supported(hba)) {
431 ret = regulator_enable(host->reg_va09);
434 /* wait 200 us to stablize VA09 */
435 usleep_range(200, 210);
436 ufs_mtk_va09_pwr_ctrl(res, 1);
441 if (ufs_mtk_is_va09_supported(hba)) {
442 ufs_mtk_va09_pwr_ctrl(res, 0);
443 ret = regulator_disable(host->reg_va09);
451 "failed to %s va09: %d\n",
452 on ? "enable" : "disable",
455 host->mphy_powered_on = on;
461 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
462 struct clk **clk_out)
467 clk = devm_clk_get(dev, name);
476 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
478 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
479 struct ufs_mtk_crypt_cfg *cfg;
480 struct regulator *reg;
483 if (!ufs_mtk_is_boost_crypt_enabled(hba))
487 volt = cfg->vcore_volt;
488 reg = cfg->reg_vcore;
490 ret = clk_prepare_enable(cfg->clk_crypt_mux);
492 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
498 ret = regulator_set_voltage(reg, volt, INT_MAX);
501 "failed to set vcore to %d\n", volt);
505 ret = clk_set_parent(cfg->clk_crypt_mux,
506 cfg->clk_crypt_perf);
509 "failed to set clk_crypt_perf\n");
510 regulator_set_voltage(reg, 0, INT_MAX);
514 ret = clk_set_parent(cfg->clk_crypt_mux,
518 "failed to set clk_crypt_lp\n");
522 ret = regulator_set_voltage(reg, 0, INT_MAX);
525 "failed to set vcore to MIN\n");
529 clk_disable_unprepare(cfg->clk_crypt_mux);
532 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
537 ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
539 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
546 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
548 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
549 struct ufs_mtk_crypt_cfg *cfg;
550 struct device *dev = hba->dev;
551 struct regulator *reg;
554 host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
559 reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
561 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
566 if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
568 dev_info(dev, "failed to get boost-crypt-vcore-min");
573 if (ufs_mtk_init_host_clk(hba, "crypt_mux",
574 &cfg->clk_crypt_mux))
577 if (ufs_mtk_init_host_clk(hba, "crypt_lp",
581 if (ufs_mtk_init_host_clk(hba, "crypt_perf",
582 &cfg->clk_crypt_perf))
585 cfg->reg_vcore = reg;
586 cfg->vcore_volt = volt;
587 host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
593 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
595 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
597 host->reg_va09 = regulator_get(hba->dev, "va09");
598 if (IS_ERR(host->reg_va09))
599 dev_info(hba->dev, "failed to get va09");
601 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
604 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
606 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
607 struct device_node *np = hba->dev->of_node;
609 if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
610 ufs_mtk_init_boost_crypt(hba);
612 if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
613 ufs_mtk_init_va09_pwr_ctrl(hba);
615 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
616 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
618 if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
619 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
621 if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
622 host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
624 dev_info(hba->dev, "caps: 0x%x", host->caps);
627 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
629 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
631 if (!host || !host->pm_qos_init)
634 cpu_latency_qos_update_request(&host->pm_qos_req,
635 boost ? 0 : PM_QOS_DEFAULT_VALUE);
638 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
640 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
643 phy_power_on(host->mphy);
644 ufs_mtk_setup_ref_clk(hba, on);
645 ufs_mtk_boost_crypt(hba, on);
646 ufs_mtk_boost_pm_qos(hba, on);
648 ufs_mtk_boost_pm_qos(hba, on);
649 ufs_mtk_boost_crypt(hba, on);
650 ufs_mtk_setup_ref_clk(hba, on);
651 phy_power_off(host->mphy);
656 * ufs_mtk_setup_clocks - enables/disable clocks
657 * @hba: host controller instance
658 * @on: If true, enable clocks else disable them.
659 * @status: PRE_CHANGE or POST_CHANGE notify
661 * Returns 0 on success, non-zero on failure.
663 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
664 enum ufs_notify_change_status status)
666 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
667 bool clk_pwr_off = false;
671 * In case ufs_mtk_init() is not yet done, simply ignore.
672 * This ufs_mtk_setup_clocks() shall be called from
673 * ufs_mtk_init() after init is done.
678 if (!on && status == PRE_CHANGE) {
679 if (ufshcd_is_link_off(hba)) {
681 } else if (ufshcd_is_link_hibern8(hba) ||
682 (!ufshcd_can_hibern8_during_gating(hba) &&
683 ufshcd_is_auto_hibern8_enabled(hba))) {
685 * Gate ref-clk and poweroff mphy if link state is in
686 * OFF or Hibern8 by either Auto-Hibern8 or
687 * ufshcd_link_state_transition().
689 ret = ufs_mtk_wait_link_state(hba,
697 ufs_mtk_pwr_ctrl(hba, false);
698 } else if (on && status == POST_CHANGE) {
699 ufs_mtk_pwr_ctrl(hba, true);
705 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
707 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
710 if (host->hw_ver.major)
713 /* Set default (minimum) version anyway */
714 host->hw_ver.major = 2;
716 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
718 if (ver >= UFS_UNIPRO_VER_1_8) {
719 host->hw_ver.major = 3;
721 * Fix HCI version for some platforms with
724 if (hba->ufs_version < ufshci_version(3, 0))
725 hba->ufs_version = ufshci_version(3, 0);
730 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
732 return hba->ufs_version;
735 #define MAX_VCC_NAME 30
736 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
738 struct ufs_vreg_info *info = &hba->vreg_info;
739 struct device_node *np = hba->dev->of_node;
740 struct device *dev = hba->dev;
741 char vcc_name[MAX_VCC_NAME];
742 struct arm_smccc_res res;
745 if (hba->vreg_info.vcc)
748 if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
749 ufs_mtk_get_vcc_num(res);
750 if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
751 snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
754 } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
755 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
756 snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
761 err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
765 err = ufshcd_get_vreg(dev, info->vcc);
769 err = regulator_enable(info->vcc->reg);
771 info->vcc->enabled = true;
772 dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
778 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
780 struct ufs_vreg_info *info = &hba->vreg_info;
781 struct ufs_vreg **vreg_on, **vreg_off;
783 if (hba->dev_info.wspecversion >= 0x0300) {
784 vreg_on = &info->vccq;
785 vreg_off = &info->vccq2;
787 vreg_on = &info->vccq2;
788 vreg_off = &info->vccq;
792 (*vreg_on)->always_on = true;
795 regulator_disable((*vreg_off)->reg);
796 devm_kfree(hba->dev, (*vreg_off)->name);
797 devm_kfree(hba->dev, *vreg_off);
803 * ufs_mtk_init - find other essential mmio bases
804 * @hba: host controller instance
806 * Binds PHY with controller and powers up PHY enabling clocks
809 * Returns -EPROBE_DEFER if binding fails, returns negative error
810 * on phy power up failure and returns zero on success.
812 static int ufs_mtk_init(struct ufs_hba *hba)
814 const struct of_device_id *id;
815 struct device *dev = hba->dev;
816 struct ufs_mtk_host *host;
819 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
822 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
827 ufshcd_set_variant(hba, host);
829 id = of_match_device(ufs_mtk_of_match, dev);
835 /* Initialize host capability */
836 ufs_mtk_init_host_caps(hba);
838 err = ufs_mtk_bind_mphy(hba);
840 goto out_variant_clear;
842 ufs_mtk_init_reset(hba);
844 /* Enable runtime autosuspend */
845 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
847 /* Enable clock-gating */
848 hba->caps |= UFSHCD_CAP_CLK_GATING;
850 /* Enable inline encryption */
851 hba->caps |= UFSHCD_CAP_CRYPTO;
853 /* Enable WriteBooster */
854 hba->caps |= UFSHCD_CAP_WB_EN;
855 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
856 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
858 if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
859 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
862 * ufshcd_vops_init() is invoked after
863 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
864 * phy clock setup is skipped.
866 * Enable phy clocks specifically here.
868 ufs_mtk_mphy_power_on(hba, true);
869 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
871 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
876 ufshcd_set_variant(hba, NULL);
881 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
882 struct ufs_pa_layer_attr *dev_req_params)
884 if (!ufs_mtk_is_pmc_via_fastauto(hba))
887 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
890 if (dev_req_params->pwr_tx != FAST_MODE &&
891 dev_req_params->gear_tx < UFS_HS_G4)
894 if (dev_req_params->pwr_rx != FAST_MODE &&
895 dev_req_params->gear_rx < UFS_HS_G4)
901 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
902 struct ufs_pa_layer_attr *dev_max_params,
903 struct ufs_pa_layer_attr *dev_req_params)
905 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
906 struct ufs_dev_params host_cap;
909 ufshcd_init_pwr_dev_param(&host_cap);
910 host_cap.hs_rx_gear = UFS_HS_G5;
911 host_cap.hs_tx_gear = UFS_HS_G5;
913 ret = ufshcd_get_pwr_dev_param(&host_cap,
917 pr_info("%s: failed to determine capabilities\n",
921 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
922 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
923 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
925 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
926 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
928 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
929 dev_req_params->lane_tx);
930 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
931 dev_req_params->lane_rx);
932 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
933 dev_req_params->hs_rate);
935 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
938 ret = ufshcd_uic_change_pwr_mode(hba,
939 FASTAUTO_MODE << 4 | FASTAUTO_MODE);
942 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
947 if (host->hw_ver.major >= 3) {
948 ret = ufshcd_dme_configure_adapt(hba,
949 dev_req_params->gear_tx,
956 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
957 enum ufs_notify_change_status stage,
958 struct ufs_pa_layer_attr *dev_max_params,
959 struct ufs_pa_layer_attr *dev_req_params)
965 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
978 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
981 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
983 ret = ufshcd_dme_set(hba,
984 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
988 * Forcibly set as non-LPM mode if UIC commands is failed
989 * to use default hba_enable_delay_us value for re-enabling
992 host->unipro_lpm = lpm;
998 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1003 ufs_mtk_get_controller_version(hba);
1005 ret = ufs_mtk_unipro_set_lpm(hba, false);
1010 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1011 * to make sure that both host and device TX LCC are disabled
1012 * once link startup is completed.
1014 ret = ufshcd_disable_host_tx_lcc(hba);
1018 /* disable deep stall */
1019 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1025 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1030 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1034 if (ufshcd_is_clkgating_allowed(hba)) {
1035 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1036 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1040 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1044 static int ufs_mtk_post_link(struct ufs_hba *hba)
1046 /* enable unipro clock gating feature */
1047 ufs_mtk_cfg_unipro_cg(hba, true);
1049 /* will be configured during probe hba */
1050 if (ufshcd_is_auto_hibern8_supported(hba))
1051 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1052 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1054 ufs_mtk_setup_clk_gating(hba);
1059 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1060 enum ufs_notify_change_status stage)
1066 ret = ufs_mtk_pre_link(hba);
1069 ret = ufs_mtk_post_link(hba);
1079 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1081 struct arm_smccc_res res;
1083 /* disable hba before device reset */
1084 ufshcd_hba_stop(hba);
1086 ufs_mtk_device_reset_ctrl(0, res);
1089 * The reset signal is active low. UFS devices shall detect
1090 * more than or equal to 1us of positive or negative RST_n
1093 * To be on safe side, keep the reset low for at least 10us.
1095 usleep_range(10, 15);
1097 ufs_mtk_device_reset_ctrl(1, res);
1099 /* Some devices may need time to respond to rst_n */
1100 usleep_range(10000, 15000);
1102 dev_info(hba->dev, "device reset done\n");
1107 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1111 err = ufshcd_hba_enable(hba);
1115 err = ufs_mtk_unipro_set_lpm(hba, false);
1119 err = ufshcd_uic_hibern8_exit(hba);
1121 ufshcd_set_link_active(hba);
1125 err = ufshcd_make_hba_operational(hba);
1132 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1136 /* Disable reset confirm feature by UniPro */
1138 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1139 REG_UFS_XOUFS_CTRL);
1141 err = ufs_mtk_unipro_set_lpm(hba, true);
1143 /* Resume UniPro state for following error recovery */
1144 ufs_mtk_unipro_set_lpm(hba, false);
1151 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1153 struct ufs_vreg *vccqx = NULL;
1155 if (hba->vreg_info.vccq)
1156 vccqx = hba->vreg_info.vccq;
1158 vccqx = hba->vreg_info.vccq2;
1160 regulator_set_mode(vccqx->reg,
1161 lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1164 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1166 struct arm_smccc_res res;
1168 ufs_mtk_device_pwr_ctrl(!lpm,
1169 (unsigned long)hba->dev_info.wspecversion,
1173 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1175 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1178 /* Skip if VCC is assumed always-on */
1179 if (!hba->vreg_info.vcc)
1182 /* Bypass LPM when device is still active */
1183 if (lpm && ufshcd_is_ufs_dev_active(hba))
1186 /* Bypass LPM if VCC is enabled */
1187 if (lpm && hba->vreg_info.vcc->enabled)
1191 ufs_mtk_vccqx_set_lpm(hba, lpm);
1192 ufs_mtk_vsx_set_lpm(hba, lpm);
1194 ufs_mtk_vsx_set_lpm(hba, lpm);
1195 ufs_mtk_vccqx_set_lpm(hba, lpm);
1199 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1203 /* disable auto-hibern8 */
1204 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1206 /* wait host return to idle state when auto-hibern8 off */
1207 ufs_mtk_wait_idle_state(hba, 5);
1209 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1211 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1214 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1215 enum ufs_notify_change_status status)
1218 struct arm_smccc_res res;
1220 if (status == PRE_CHANGE) {
1221 if (!ufshcd_is_auto_hibern8_supported(hba))
1223 ufs_mtk_auto_hibern8_disable(hba);
1227 if (ufshcd_is_link_hibern8(hba)) {
1228 err = ufs_mtk_link_set_lpm(hba);
1233 if (!ufshcd_is_link_active(hba)) {
1235 * Make sure no error will be returned to prevent
1236 * ufshcd_suspend() re-enabling regulators while vreg is still
1237 * in low-power mode.
1239 err = ufs_mtk_mphy_power_on(hba, false);
1244 if (ufshcd_is_link_off(hba))
1245 ufs_mtk_device_reset_ctrl(0, res);
1247 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1252 * Set link as off state enforcedly to trigger
1253 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1254 * for completed host reset.
1256 ufshcd_set_link_off(hba);
1260 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1263 struct arm_smccc_res res;
1265 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1266 ufs_mtk_dev_vreg_set_lpm(hba, false);
1268 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1270 err = ufs_mtk_mphy_power_on(hba, true);
1274 if (ufshcd_is_link_hibern8(hba)) {
1275 err = ufs_mtk_link_set_hpm(hba);
1282 return ufshcd_link_recovery(hba);
1285 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1287 ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1289 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1291 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1292 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1295 /* Direct debugging information to REG_MTK_PROBE */
1296 ufs_mtk_dbg_sel(hba);
1297 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1300 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1302 struct ufs_dev_info *dev_info = &hba->dev_info;
1303 u16 mid = dev_info->wmanufacturerid;
1305 if (mid == UFS_VENDOR_SAMSUNG) {
1306 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1307 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1311 * Decide waiting time before gating reference clock and
1312 * after ungating reference clock according to vendors'
1315 if (mid == UFS_VENDOR_SAMSUNG)
1316 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1317 else if (mid == UFS_VENDOR_SKHYNIX)
1318 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1319 else if (mid == UFS_VENDOR_TOSHIBA)
1320 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1322 ufs_mtk_setup_ref_clk_wait_us(hba,
1323 REFCLK_DEFAULT_WAIT_US);
1327 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1329 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1331 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1332 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1333 hba->vreg_info.vcc->always_on = true;
1335 * VCC will be kept always-on thus we don't
1336 * need any delay during regulator operations
1338 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1339 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1342 ufs_mtk_vreg_fix_vcc(hba);
1343 ufs_mtk_vreg_fix_vccqx(hba);
1346 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1347 enum ufs_event_type evt, void *data)
1349 unsigned int val = *(u32 *)data;
1353 trace_ufs_mtk_event(evt, val);
1355 /* Print details of UIC Errors */
1356 if (evt <= UFS_EVT_DME_ERR) {
1358 "Host UIC Error Code (%s): %08x\n",
1359 ufs_uic_err_str[evt], val);
1363 if (evt == UFS_EVT_PA_ERR) {
1364 for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_pa_err_str))
1365 dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1368 if (evt == UFS_EVT_DL_ERR) {
1369 for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_dl_err_str))
1370 dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1375 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1377 * The variant operations configure the necessary controller and PHY
1378 * handshake during initialization.
1380 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1381 .name = "mediatek.ufshci",
1382 .init = ufs_mtk_init,
1383 .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1384 .setup_clocks = ufs_mtk_setup_clocks,
1385 .hce_enable_notify = ufs_mtk_hce_enable_notify,
1386 .link_startup_notify = ufs_mtk_link_startup_notify,
1387 .pwr_change_notify = ufs_mtk_pwr_change_notify,
1388 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
1389 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
1390 .suspend = ufs_mtk_suspend,
1391 .resume = ufs_mtk_resume,
1392 .dbg_register_dump = ufs_mtk_dbg_register_dump,
1393 .device_reset = ufs_mtk_device_reset,
1394 .event_notify = ufs_mtk_event_notify,
1398 * ufs_mtk_probe - probe routine of the driver
1399 * @pdev: pointer to Platform device handle
1401 * Return zero for success and non-zero for failure
1403 static int ufs_mtk_probe(struct platform_device *pdev)
1406 struct device *dev = &pdev->dev;
1407 struct device_node *reset_node;
1408 struct platform_device *reset_pdev;
1409 struct device_link *link;
1411 reset_node = of_find_compatible_node(NULL, NULL,
1414 dev_notice(dev, "find ti,syscon-reset fail\n");
1417 reset_pdev = of_find_device_by_node(reset_node);
1419 dev_notice(dev, "find reset_pdev fail\n");
1422 link = device_link_add(dev, &reset_pdev->dev,
1423 DL_FLAG_AUTOPROBE_CONSUMER);
1424 put_device(&reset_pdev->dev);
1426 dev_notice(dev, "add reset device_link fail\n");
1429 /* supplier is not probed */
1430 if (link->status == DL_STATE_DORMANT) {
1431 err = -EPROBE_DEFER;
1436 /* perform generic probe */
1437 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1441 dev_info(dev, "probe failed %d\n", err);
1443 of_node_put(reset_node);
1448 * ufs_mtk_remove - set driver_data of the device to NULL
1449 * @pdev: pointer to platform device handle
1453 static int ufs_mtk_remove(struct platform_device *pdev)
1455 struct ufs_hba *hba = platform_get_drvdata(pdev);
1457 pm_runtime_get_sync(&(pdev)->dev);
1462 #ifdef CONFIG_PM_SLEEP
1463 static int ufs_mtk_system_suspend(struct device *dev)
1465 struct ufs_hba *hba = dev_get_drvdata(dev);
1468 ret = ufshcd_system_suspend(dev);
1472 ufs_mtk_dev_vreg_set_lpm(hba, true);
1477 static int ufs_mtk_system_resume(struct device *dev)
1479 struct ufs_hba *hba = dev_get_drvdata(dev);
1481 ufs_mtk_dev_vreg_set_lpm(hba, false);
1483 return ufshcd_system_resume(dev);
1487 static int ufs_mtk_runtime_suspend(struct device *dev)
1489 struct ufs_hba *hba = dev_get_drvdata(dev);
1492 ret = ufshcd_runtime_suspend(dev);
1496 ufs_mtk_dev_vreg_set_lpm(hba, true);
1501 static int ufs_mtk_runtime_resume(struct device *dev)
1503 struct ufs_hba *hba = dev_get_drvdata(dev);
1505 ufs_mtk_dev_vreg_set_lpm(hba, false);
1507 return ufshcd_runtime_resume(dev);
1510 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1511 SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1512 ufs_mtk_system_resume)
1513 SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1514 ufs_mtk_runtime_resume, NULL)
1515 .prepare = ufshcd_suspend_prepare,
1516 .complete = ufshcd_resume_complete,
1519 static struct platform_driver ufs_mtk_pltform = {
1520 .probe = ufs_mtk_probe,
1521 .remove = ufs_mtk_remove,
1522 .shutdown = ufshcd_pltfrm_shutdown,
1524 .name = "ufshcd-mtk",
1525 .pm = &ufs_mtk_pm_ops,
1526 .of_match_table = ufs_mtk_of_match,
1530 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1531 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1532 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1533 MODULE_LICENSE("GPL v2");
1535 module_platform_driver(ufs_mtk_pltform);