scsi: ufs: ufs-mediatek: Provide detailed description for UIC errors
[platform/kernel/linux-starfive.git] / drivers / ufs / host / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_qos.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
23
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "ufs-mediatek-trace.h"
32
33 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
34         { .wmanufacturerid = UFS_ANY_VENDOR,
35           .model = UFS_ANY_MODEL,
36           .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
37                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
38         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
39           .model = "H9HQ21AFAMZDAR",
40           .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
41         {}
42 };
43
44 static const struct of_device_id ufs_mtk_of_match[] = {
45         { .compatible = "mediatek,mt8183-ufshci" },
46         {},
47 };
48
49 /*
50  * Details of UIC Errors
51  */
52 static const char *const ufs_uic_err_str[] = {
53         "PHY Adapter Layer",
54         "Data Link Layer",
55         "Network Link Layer",
56         "Transport Link Layer",
57         "DME"
58 };
59
60 static const char *const ufs_uic_pa_err_str[] = {
61         "PHY error on Lane 0",
62         "PHY error on Lane 1",
63         "PHY error on Lane 2",
64         "PHY error on Lane 3",
65         "Generic PHY Adapter Error. This should be the LINERESET indication"
66 };
67
68 static const char *const ufs_uic_dl_err_str[] = {
69         "NAC_RECEIVED",
70         "TCx_REPLAY_TIMER_EXPIRED",
71         "AFCx_REQUEST_TIMER_EXPIRED",
72         "FCx_PROTECTION_TIMER_EXPIRED",
73         "CRC_ERROR",
74         "RX_BUFFER_OVERFLOW",
75         "MAX_FRAME_LENGTH_EXCEEDED",
76         "WRONG_SEQUENCE_NUMBER",
77         "AFC_FRAME_SYNTAX_ERROR",
78         "NAC_FRAME_SYNTAX_ERROR",
79         "EOF_SYNTAX_ERROR",
80         "FRAME_SYNTAX_ERROR",
81         "BAD_CTRL_SYMBOL_TYPE",
82         "PA_INIT_ERROR",
83         "PA_ERROR_IND_RECEIVED",
84         "PA_INIT"
85 };
86
87 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
88 {
89         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
90
91         return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
92 }
93
94 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
95 {
96         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97
98         return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
99 }
100
101 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
102 {
103         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104
105         return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
106 }
107
108 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
109 {
110         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111
112         return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
113 }
114
115 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
116 {
117         u32 tmp;
118
119         if (enable) {
120                 ufshcd_dme_get(hba,
121                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
122                 tmp = tmp |
123                       (1 << RX_SYMBOL_CLK_GATE_EN) |
124                       (1 << SYS_CLK_GATE_EN) |
125                       (1 << TX_CLK_GATE_EN);
126                 ufshcd_dme_set(hba,
127                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
128
129                 ufshcd_dme_get(hba,
130                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
131                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
132                 ufshcd_dme_set(hba,
133                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
134         } else {
135                 ufshcd_dme_get(hba,
136                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
137                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
138                               (1 << SYS_CLK_GATE_EN) |
139                               (1 << TX_CLK_GATE_EN));
140                 ufshcd_dme_set(hba,
141                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
142
143                 ufshcd_dme_get(hba,
144                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
145                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
146                 ufshcd_dme_set(hba,
147                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
148         }
149 }
150
151 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
152 {
153         struct arm_smccc_res res;
154
155         ufs_mtk_crypto_ctrl(res, 1);
156         if (res.a0) {
157                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
158                          __func__, res.a0);
159                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
160         }
161 }
162
163 static void ufs_mtk_host_reset(struct ufs_hba *hba)
164 {
165         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
166
167         reset_control_assert(host->hci_reset);
168         reset_control_assert(host->crypto_reset);
169         reset_control_assert(host->unipro_reset);
170
171         usleep_range(100, 110);
172
173         reset_control_deassert(host->unipro_reset);
174         reset_control_deassert(host->crypto_reset);
175         reset_control_deassert(host->hci_reset);
176 }
177
178 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
179                                        struct reset_control **rc,
180                                        char *str)
181 {
182         *rc = devm_reset_control_get(hba->dev, str);
183         if (IS_ERR(*rc)) {
184                 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
185                          str, PTR_ERR(*rc));
186                 *rc = NULL;
187         }
188 }
189
190 static void ufs_mtk_init_reset(struct ufs_hba *hba)
191 {
192         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
193
194         ufs_mtk_init_reset_control(hba, &host->hci_reset,
195                                    "hci_rst");
196         ufs_mtk_init_reset_control(hba, &host->unipro_reset,
197                                    "unipro_rst");
198         ufs_mtk_init_reset_control(hba, &host->crypto_reset,
199                                    "crypto_rst");
200 }
201
202 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
203                                      enum ufs_notify_change_status status)
204 {
205         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
206
207         if (status == PRE_CHANGE) {
208                 if (host->unipro_lpm) {
209                         hba->vps->hba_enable_delay_us = 0;
210                 } else {
211                         hba->vps->hba_enable_delay_us = 600;
212                         ufs_mtk_host_reset(hba);
213                 }
214
215                 if (hba->caps & UFSHCD_CAP_CRYPTO)
216                         ufs_mtk_crypto_enable(hba);
217
218                 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
219                         ufshcd_writel(hba, 0,
220                                       REG_AUTO_HIBERNATE_IDLE_TIMER);
221                         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
222                         hba->ahit = 0;
223                 }
224
225                 /*
226                  * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
227                  * to prevent host hang issue
228                  */
229                 ufshcd_writel(hba,
230                               ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
231                               REG_UFS_XOUFS_CTRL);
232         }
233
234         return 0;
235 }
236
237 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
238 {
239         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240         struct device *dev = hba->dev;
241         struct device_node *np = dev->of_node;
242         int err = 0;
243
244         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
245
246         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
247                 /*
248                  * UFS driver might be probed before the phy driver does.
249                  * In that case we would like to return EPROBE_DEFER code.
250                  */
251                 err = -EPROBE_DEFER;
252                 dev_info(dev,
253                          "%s: required phy hasn't probed yet. err = %d\n",
254                         __func__, err);
255         } else if (IS_ERR(host->mphy)) {
256                 err = PTR_ERR(host->mphy);
257                 if (err != -ENODEV) {
258                         dev_info(dev, "%s: PHY get failed %d\n", __func__,
259                                  err);
260                 }
261         }
262
263         if (err)
264                 host->mphy = NULL;
265         /*
266          * Allow unbound mphy because not every platform needs specific
267          * mphy control.
268          */
269         if (err == -ENODEV)
270                 err = 0;
271
272         return err;
273 }
274
275 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
276 {
277         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278         struct arm_smccc_res res;
279         ktime_t timeout, time_checked;
280         u32 value;
281
282         if (host->ref_clk_enabled == on)
283                 return 0;
284
285         ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
286
287         if (on) {
288                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
289         } else {
290                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
291                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
292         }
293
294         /* Wait for ack */
295         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
296         do {
297                 time_checked = ktime_get();
298                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
299
300                 /* Wait until ack bit equals to req bit */
301                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
302                         goto out;
303
304                 usleep_range(100, 200);
305         } while (ktime_before(time_checked, timeout));
306
307         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
308
309         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
310
311         return -ETIMEDOUT;
312
313 out:
314         host->ref_clk_enabled = on;
315         if (on)
316                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
317
318         ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
319
320         return 0;
321 }
322
323 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
324                                           u16 gating_us)
325 {
326         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
327
328         if (hba->dev_info.clk_gating_wait_us) {
329                 host->ref_clk_gating_wait_us =
330                         hba->dev_info.clk_gating_wait_us;
331         } else {
332                 host->ref_clk_gating_wait_us = gating_us;
333         }
334
335         host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
336 }
337
338 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
339 {
340         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
341
342         if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
343                 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
344                 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
345                 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
346                 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
347                 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
348         } else {
349                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
350         }
351 }
352
353 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
354                             unsigned long retry_ms)
355 {
356         u64 timeout, time_checked;
357         u32 val, sm;
358         bool wait_idle;
359
360         /* cannot use plain ktime_get() in suspend */
361         timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
362
363         /* wait a specific time after check base */
364         udelay(10);
365         wait_idle = false;
366
367         do {
368                 time_checked = ktime_get_mono_fast_ns();
369                 ufs_mtk_dbg_sel(hba);
370                 val = ufshcd_readl(hba, REG_UFS_PROBE);
371
372                 sm = val & 0x1f;
373
374                 /*
375                  * if state is in H8 enter and H8 enter confirm
376                  * wait until return to idle state.
377                  */
378                 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
379                         wait_idle = true;
380                         udelay(50);
381                         continue;
382                 } else if (!wait_idle)
383                         break;
384
385                 if (wait_idle && (sm == VS_HCE_BASE))
386                         break;
387         } while (time_checked < timeout);
388
389         if (wait_idle && sm != VS_HCE_BASE)
390                 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
391 }
392
393 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
394                                    unsigned long max_wait_ms)
395 {
396         ktime_t timeout, time_checked;
397         u32 val;
398
399         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
400         do {
401                 time_checked = ktime_get();
402                 ufs_mtk_dbg_sel(hba);
403                 val = ufshcd_readl(hba, REG_UFS_PROBE);
404                 val = val >> 28;
405
406                 if (val == state)
407                         return 0;
408
409                 /* Sleep for max. 200us */
410                 usleep_range(100, 200);
411         } while (ktime_before(time_checked, timeout));
412
413         if (val == state)
414                 return 0;
415
416         return -ETIMEDOUT;
417 }
418
419 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
420 {
421         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
422         struct phy *mphy = host->mphy;
423         struct arm_smccc_res res;
424         int ret = 0;
425
426         if (!mphy || !(on ^ host->mphy_powered_on))
427                 return 0;
428
429         if (on) {
430                 if (ufs_mtk_is_va09_supported(hba)) {
431                         ret = regulator_enable(host->reg_va09);
432                         if (ret < 0)
433                                 goto out;
434                         /* wait 200 us to stablize VA09 */
435                         usleep_range(200, 210);
436                         ufs_mtk_va09_pwr_ctrl(res, 1);
437                 }
438                 phy_power_on(mphy);
439         } else {
440                 phy_power_off(mphy);
441                 if (ufs_mtk_is_va09_supported(hba)) {
442                         ufs_mtk_va09_pwr_ctrl(res, 0);
443                         ret = regulator_disable(host->reg_va09);
444                         if (ret < 0)
445                                 goto out;
446                 }
447         }
448 out:
449         if (ret) {
450                 dev_info(hba->dev,
451                          "failed to %s va09: %d\n",
452                          on ? "enable" : "disable",
453                          ret);
454         } else {
455                 host->mphy_powered_on = on;
456         }
457
458         return ret;
459 }
460
461 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
462                                 struct clk **clk_out)
463 {
464         struct clk *clk;
465         int err = 0;
466
467         clk = devm_clk_get(dev, name);
468         if (IS_ERR(clk))
469                 err = PTR_ERR(clk);
470         else
471                 *clk_out = clk;
472
473         return err;
474 }
475
476 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
477 {
478         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
479         struct ufs_mtk_crypt_cfg *cfg;
480         struct regulator *reg;
481         int volt, ret;
482
483         if (!ufs_mtk_is_boost_crypt_enabled(hba))
484                 return;
485
486         cfg = host->crypt;
487         volt = cfg->vcore_volt;
488         reg = cfg->reg_vcore;
489
490         ret = clk_prepare_enable(cfg->clk_crypt_mux);
491         if (ret) {
492                 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
493                          ret);
494                 return;
495         }
496
497         if (boost) {
498                 ret = regulator_set_voltage(reg, volt, INT_MAX);
499                 if (ret) {
500                         dev_info(hba->dev,
501                                  "failed to set vcore to %d\n", volt);
502                         goto out;
503                 }
504
505                 ret = clk_set_parent(cfg->clk_crypt_mux,
506                                      cfg->clk_crypt_perf);
507                 if (ret) {
508                         dev_info(hba->dev,
509                                  "failed to set clk_crypt_perf\n");
510                         regulator_set_voltage(reg, 0, INT_MAX);
511                         goto out;
512                 }
513         } else {
514                 ret = clk_set_parent(cfg->clk_crypt_mux,
515                                      cfg->clk_crypt_lp);
516                 if (ret) {
517                         dev_info(hba->dev,
518                                  "failed to set clk_crypt_lp\n");
519                         goto out;
520                 }
521
522                 ret = regulator_set_voltage(reg, 0, INT_MAX);
523                 if (ret) {
524                         dev_info(hba->dev,
525                                  "failed to set vcore to MIN\n");
526                 }
527         }
528 out:
529         clk_disable_unprepare(cfg->clk_crypt_mux);
530 }
531
532 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
533                                  struct clk **clk)
534 {
535         int ret;
536
537         ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
538         if (ret) {
539                 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
540                          name, ret);
541         }
542
543         return ret;
544 }
545
546 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
547 {
548         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
549         struct ufs_mtk_crypt_cfg *cfg;
550         struct device *dev = hba->dev;
551         struct regulator *reg;
552         u32 volt;
553
554         host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
555                                    GFP_KERNEL);
556         if (!host->crypt)
557                 goto disable_caps;
558
559         reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
560         if (IS_ERR(reg)) {
561                 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
562                          PTR_ERR(reg));
563                 goto disable_caps;
564         }
565
566         if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
567                                  &volt)) {
568                 dev_info(dev, "failed to get boost-crypt-vcore-min");
569                 goto disable_caps;
570         }
571
572         cfg = host->crypt;
573         if (ufs_mtk_init_host_clk(hba, "crypt_mux",
574                                   &cfg->clk_crypt_mux))
575                 goto disable_caps;
576
577         if (ufs_mtk_init_host_clk(hba, "crypt_lp",
578                                   &cfg->clk_crypt_lp))
579                 goto disable_caps;
580
581         if (ufs_mtk_init_host_clk(hba, "crypt_perf",
582                                   &cfg->clk_crypt_perf))
583                 goto disable_caps;
584
585         cfg->reg_vcore = reg;
586         cfg->vcore_volt = volt;
587         host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
588
589 disable_caps:
590         return;
591 }
592
593 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
594 {
595         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
596
597         host->reg_va09 = regulator_get(hba->dev, "va09");
598         if (IS_ERR(host->reg_va09))
599                 dev_info(hba->dev, "failed to get va09");
600         else
601                 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
602 }
603
604 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
605 {
606         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
607         struct device_node *np = hba->dev->of_node;
608
609         if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
610                 ufs_mtk_init_boost_crypt(hba);
611
612         if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
613                 ufs_mtk_init_va09_pwr_ctrl(hba);
614
615         if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
616                 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
617
618         if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
619                 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
620
621         if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
622                 host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
623
624         dev_info(hba->dev, "caps: 0x%x", host->caps);
625 }
626
627 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
628 {
629         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
630
631         if (!host || !host->pm_qos_init)
632                 return;
633
634         cpu_latency_qos_update_request(&host->pm_qos_req,
635                                        boost ? 0 : PM_QOS_DEFAULT_VALUE);
636 }
637
638 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
639 {
640         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
641
642         if (on) {
643                 phy_power_on(host->mphy);
644                 ufs_mtk_setup_ref_clk(hba, on);
645                 ufs_mtk_boost_crypt(hba, on);
646                 ufs_mtk_boost_pm_qos(hba, on);
647         } else {
648                 ufs_mtk_boost_pm_qos(hba, on);
649                 ufs_mtk_boost_crypt(hba, on);
650                 ufs_mtk_setup_ref_clk(hba, on);
651                 phy_power_off(host->mphy);
652         }
653 }
654
655 /**
656  * ufs_mtk_setup_clocks - enables/disable clocks
657  * @hba: host controller instance
658  * @on: If true, enable clocks else disable them.
659  * @status: PRE_CHANGE or POST_CHANGE notify
660  *
661  * Returns 0 on success, non-zero on failure.
662  */
663 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
664                                 enum ufs_notify_change_status status)
665 {
666         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
667         bool clk_pwr_off = false;
668         int ret = 0;
669
670         /*
671          * In case ufs_mtk_init() is not yet done, simply ignore.
672          * This ufs_mtk_setup_clocks() shall be called from
673          * ufs_mtk_init() after init is done.
674          */
675         if (!host)
676                 return 0;
677
678         if (!on && status == PRE_CHANGE) {
679                 if (ufshcd_is_link_off(hba)) {
680                         clk_pwr_off = true;
681                 } else if (ufshcd_is_link_hibern8(hba) ||
682                          (!ufshcd_can_hibern8_during_gating(hba) &&
683                          ufshcd_is_auto_hibern8_enabled(hba))) {
684                         /*
685                          * Gate ref-clk and poweroff mphy if link state is in
686                          * OFF or Hibern8 by either Auto-Hibern8 or
687                          * ufshcd_link_state_transition().
688                          */
689                         ret = ufs_mtk_wait_link_state(hba,
690                                                       VS_LINK_HIBERN8,
691                                                       15);
692                         if (!ret)
693                                 clk_pwr_off = true;
694                 }
695
696                 if (clk_pwr_off)
697                         ufs_mtk_pwr_ctrl(hba, false);
698         } else if (on && status == POST_CHANGE) {
699                 ufs_mtk_pwr_ctrl(hba, true);
700         }
701
702         return ret;
703 }
704
705 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
706 {
707         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
708         int ret, ver = 0;
709
710         if (host->hw_ver.major)
711                 return;
712
713         /* Set default (minimum) version anyway */
714         host->hw_ver.major = 2;
715
716         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
717         if (!ret) {
718                 if (ver >= UFS_UNIPRO_VER_1_8) {
719                         host->hw_ver.major = 3;
720                         /*
721                          * Fix HCI version for some platforms with
722                          * incorrect version
723                          */
724                         if (hba->ufs_version < ufshci_version(3, 0))
725                                 hba->ufs_version = ufshci_version(3, 0);
726                 }
727         }
728 }
729
730 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
731 {
732         return hba->ufs_version;
733 }
734
735 #define MAX_VCC_NAME 30
736 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
737 {
738         struct ufs_vreg_info *info = &hba->vreg_info;
739         struct device_node *np = hba->dev->of_node;
740         struct device *dev = hba->dev;
741         char vcc_name[MAX_VCC_NAME];
742         struct arm_smccc_res res;
743         int err, ver;
744
745         if (hba->vreg_info.vcc)
746                 return 0;
747
748         if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
749                 ufs_mtk_get_vcc_num(res);
750                 if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
751                         snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
752                 else
753                         return -ENODEV;
754         } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
755                 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
756                 snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
757         } else {
758                 return 0;
759         }
760
761         err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
762         if (err)
763                 return err;
764
765         err = ufshcd_get_vreg(dev, info->vcc);
766         if (err)
767                 return err;
768
769         err = regulator_enable(info->vcc->reg);
770         if (!err) {
771                 info->vcc->enabled = true;
772                 dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
773         }
774
775         return err;
776 }
777
778 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
779 {
780         struct ufs_vreg_info *info = &hba->vreg_info;
781         struct ufs_vreg **vreg_on, **vreg_off;
782
783         if (hba->dev_info.wspecversion >= 0x0300) {
784                 vreg_on = &info->vccq;
785                 vreg_off = &info->vccq2;
786         } else {
787                 vreg_on = &info->vccq2;
788                 vreg_off = &info->vccq;
789         }
790
791         if (*vreg_on)
792                 (*vreg_on)->always_on = true;
793
794         if (*vreg_off) {
795                 regulator_disable((*vreg_off)->reg);
796                 devm_kfree(hba->dev, (*vreg_off)->name);
797                 devm_kfree(hba->dev, *vreg_off);
798                 *vreg_off = NULL;
799         }
800 }
801
802 /**
803  * ufs_mtk_init - find other essential mmio bases
804  * @hba: host controller instance
805  *
806  * Binds PHY with controller and powers up PHY enabling clocks
807  * and regulators.
808  *
809  * Returns -EPROBE_DEFER if binding fails, returns negative error
810  * on phy power up failure and returns zero on success.
811  */
812 static int ufs_mtk_init(struct ufs_hba *hba)
813 {
814         const struct of_device_id *id;
815         struct device *dev = hba->dev;
816         struct ufs_mtk_host *host;
817         int err = 0;
818
819         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
820         if (!host) {
821                 err = -ENOMEM;
822                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
823                 goto out;
824         }
825
826         host->hba = hba;
827         ufshcd_set_variant(hba, host);
828
829         id = of_match_device(ufs_mtk_of_match, dev);
830         if (!id) {
831                 err = -EINVAL;
832                 goto out;
833         }
834
835         /* Initialize host capability */
836         ufs_mtk_init_host_caps(hba);
837
838         err = ufs_mtk_bind_mphy(hba);
839         if (err)
840                 goto out_variant_clear;
841
842         ufs_mtk_init_reset(hba);
843
844         /* Enable runtime autosuspend */
845         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
846
847         /* Enable clock-gating */
848         hba->caps |= UFSHCD_CAP_CLK_GATING;
849
850         /* Enable inline encryption */
851         hba->caps |= UFSHCD_CAP_CRYPTO;
852
853         /* Enable WriteBooster */
854         hba->caps |= UFSHCD_CAP_WB_EN;
855         hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
856         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
857
858         if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
859                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
860
861         /*
862          * ufshcd_vops_init() is invoked after
863          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
864          * phy clock setup is skipped.
865          *
866          * Enable phy clocks specifically here.
867          */
868         ufs_mtk_mphy_power_on(hba, true);
869         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
870
871         host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
872
873         goto out;
874
875 out_variant_clear:
876         ufshcd_set_variant(hba, NULL);
877 out:
878         return err;
879 }
880
881 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
882                                      struct ufs_pa_layer_attr *dev_req_params)
883 {
884         if (!ufs_mtk_is_pmc_via_fastauto(hba))
885                 return false;
886
887         if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
888                 return false;
889
890         if (dev_req_params->pwr_tx != FAST_MODE &&
891             dev_req_params->gear_tx < UFS_HS_G4)
892                 return false;
893
894         if (dev_req_params->pwr_rx != FAST_MODE &&
895             dev_req_params->gear_rx < UFS_HS_G4)
896                 return false;
897
898         return true;
899 }
900
901 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
902                                   struct ufs_pa_layer_attr *dev_max_params,
903                                   struct ufs_pa_layer_attr *dev_req_params)
904 {
905         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
906         struct ufs_dev_params host_cap;
907         int ret;
908
909         ufshcd_init_pwr_dev_param(&host_cap);
910         host_cap.hs_rx_gear = UFS_HS_G5;
911         host_cap.hs_tx_gear = UFS_HS_G5;
912
913         ret = ufshcd_get_pwr_dev_param(&host_cap,
914                                        dev_max_params,
915                                        dev_req_params);
916         if (ret) {
917                 pr_info("%s: failed to determine capabilities\n",
918                         __func__);
919         }
920
921         if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
922                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
923                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
924
925                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
926                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
927
928                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
929                                dev_req_params->lane_tx);
930                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
931                                dev_req_params->lane_rx);
932                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
933                                dev_req_params->hs_rate);
934
935                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
936                                PA_NO_ADAPT);
937
938                 ret = ufshcd_uic_change_pwr_mode(hba,
939                                         FASTAUTO_MODE << 4 | FASTAUTO_MODE);
940
941                 if (ret) {
942                         dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
943                                 __func__, ret);
944                 }
945         }
946
947         if (host->hw_ver.major >= 3) {
948                 ret = ufshcd_dme_configure_adapt(hba,
949                                            dev_req_params->gear_tx,
950                                            PA_INITIAL_ADAPT);
951         }
952
953         return ret;
954 }
955
956 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
957                                      enum ufs_notify_change_status stage,
958                                      struct ufs_pa_layer_attr *dev_max_params,
959                                      struct ufs_pa_layer_attr *dev_req_params)
960 {
961         int ret = 0;
962
963         switch (stage) {
964         case PRE_CHANGE:
965                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
966                                              dev_req_params);
967                 break;
968         case POST_CHANGE:
969                 break;
970         default:
971                 ret = -EINVAL;
972                 break;
973         }
974
975         return ret;
976 }
977
978 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
979 {
980         int ret;
981         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
982
983         ret = ufshcd_dme_set(hba,
984                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
985                              lpm ? 1 : 0);
986         if (!ret || !lpm) {
987                 /*
988                  * Forcibly set as non-LPM mode if UIC commands is failed
989                  * to use default hba_enable_delay_us value for re-enabling
990                  * the host.
991                  */
992                 host->unipro_lpm = lpm;
993         }
994
995         return ret;
996 }
997
998 static int ufs_mtk_pre_link(struct ufs_hba *hba)
999 {
1000         int ret;
1001         u32 tmp;
1002
1003         ufs_mtk_get_controller_version(hba);
1004
1005         ret = ufs_mtk_unipro_set_lpm(hba, false);
1006         if (ret)
1007                 return ret;
1008
1009         /*
1010          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1011          * to make sure that both host and device TX LCC are disabled
1012          * once link startup is completed.
1013          */
1014         ret = ufshcd_disable_host_tx_lcc(hba);
1015         if (ret)
1016                 return ret;
1017
1018         /* disable deep stall */
1019         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1020         if (ret)
1021                 return ret;
1022
1023         tmp &= ~(1 << 6);
1024
1025         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1026
1027         return ret;
1028 }
1029
1030 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1031 {
1032         u32 ah_ms;
1033
1034         if (ufshcd_is_clkgating_allowed(hba)) {
1035                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1036                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1037                                           hba->ahit);
1038                 else
1039                         ah_ms = 10;
1040                 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1041         }
1042 }
1043
1044 static int ufs_mtk_post_link(struct ufs_hba *hba)
1045 {
1046         /* enable unipro clock gating feature */
1047         ufs_mtk_cfg_unipro_cg(hba, true);
1048
1049         /* will be configured during probe hba */
1050         if (ufshcd_is_auto_hibern8_supported(hba))
1051                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1052                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1053
1054         ufs_mtk_setup_clk_gating(hba);
1055
1056         return 0;
1057 }
1058
1059 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1060                                        enum ufs_notify_change_status stage)
1061 {
1062         int ret = 0;
1063
1064         switch (stage) {
1065         case PRE_CHANGE:
1066                 ret = ufs_mtk_pre_link(hba);
1067                 break;
1068         case POST_CHANGE:
1069                 ret = ufs_mtk_post_link(hba);
1070                 break;
1071         default:
1072                 ret = -EINVAL;
1073                 break;
1074         }
1075
1076         return ret;
1077 }
1078
1079 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1080 {
1081         struct arm_smccc_res res;
1082
1083         /* disable hba before device reset */
1084         ufshcd_hba_stop(hba);
1085
1086         ufs_mtk_device_reset_ctrl(0, res);
1087
1088         /*
1089          * The reset signal is active low. UFS devices shall detect
1090          * more than or equal to 1us of positive or negative RST_n
1091          * pulse width.
1092          *
1093          * To be on safe side, keep the reset low for at least 10us.
1094          */
1095         usleep_range(10, 15);
1096
1097         ufs_mtk_device_reset_ctrl(1, res);
1098
1099         /* Some devices may need time to respond to rst_n */
1100         usleep_range(10000, 15000);
1101
1102         dev_info(hba->dev, "device reset done\n");
1103
1104         return 0;
1105 }
1106
1107 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1108 {
1109         int err;
1110
1111         err = ufshcd_hba_enable(hba);
1112         if (err)
1113                 return err;
1114
1115         err = ufs_mtk_unipro_set_lpm(hba, false);
1116         if (err)
1117                 return err;
1118
1119         err = ufshcd_uic_hibern8_exit(hba);
1120         if (!err)
1121                 ufshcd_set_link_active(hba);
1122         else
1123                 return err;
1124
1125         err = ufshcd_make_hba_operational(hba);
1126         if (err)
1127                 return err;
1128
1129         return 0;
1130 }
1131
1132 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1133 {
1134         int err;
1135
1136         /* Disable reset confirm feature by UniPro */
1137         ufshcd_writel(hba,
1138                       (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1139                       REG_UFS_XOUFS_CTRL);
1140
1141         err = ufs_mtk_unipro_set_lpm(hba, true);
1142         if (err) {
1143                 /* Resume UniPro state for following error recovery */
1144                 ufs_mtk_unipro_set_lpm(hba, false);
1145                 return err;
1146         }
1147
1148         return 0;
1149 }
1150
1151 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1152 {
1153         struct ufs_vreg *vccqx = NULL;
1154
1155         if (hba->vreg_info.vccq)
1156                 vccqx = hba->vreg_info.vccq;
1157         else
1158                 vccqx = hba->vreg_info.vccq2;
1159
1160         regulator_set_mode(vccqx->reg,
1161                            lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1162 }
1163
1164 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1165 {
1166         struct arm_smccc_res res;
1167
1168         ufs_mtk_device_pwr_ctrl(!lpm,
1169                                 (unsigned long)hba->dev_info.wspecversion,
1170                                 res);
1171 }
1172
1173 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1174 {
1175         if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1176                 return;
1177
1178         /* Skip if VCC is assumed always-on */
1179         if (!hba->vreg_info.vcc)
1180                 return;
1181
1182         /* Bypass LPM when device is still active */
1183         if (lpm && ufshcd_is_ufs_dev_active(hba))
1184                 return;
1185
1186         /* Bypass LPM if VCC is enabled */
1187         if (lpm && hba->vreg_info.vcc->enabled)
1188                 return;
1189
1190         if (lpm) {
1191                 ufs_mtk_vccqx_set_lpm(hba, lpm);
1192                 ufs_mtk_vsx_set_lpm(hba, lpm);
1193         } else {
1194                 ufs_mtk_vsx_set_lpm(hba, lpm);
1195                 ufs_mtk_vccqx_set_lpm(hba, lpm);
1196         }
1197 }
1198
1199 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1200 {
1201         int ret;
1202
1203         /* disable auto-hibern8 */
1204         ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1205
1206         /* wait host return to idle state when auto-hibern8 off */
1207         ufs_mtk_wait_idle_state(hba, 5);
1208
1209         ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1210         if (ret)
1211                 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1212 }
1213
1214 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1215         enum ufs_notify_change_status status)
1216 {
1217         int err;
1218         struct arm_smccc_res res;
1219
1220         if (status == PRE_CHANGE) {
1221                 if (!ufshcd_is_auto_hibern8_supported(hba))
1222                         return 0;
1223                 ufs_mtk_auto_hibern8_disable(hba);
1224                 return 0;
1225         }
1226
1227         if (ufshcd_is_link_hibern8(hba)) {
1228                 err = ufs_mtk_link_set_lpm(hba);
1229                 if (err)
1230                         goto fail;
1231         }
1232
1233         if (!ufshcd_is_link_active(hba)) {
1234                 /*
1235                  * Make sure no error will be returned to prevent
1236                  * ufshcd_suspend() re-enabling regulators while vreg is still
1237                  * in low-power mode.
1238                  */
1239                 err = ufs_mtk_mphy_power_on(hba, false);
1240                 if (err)
1241                         goto fail;
1242         }
1243
1244         if (ufshcd_is_link_off(hba))
1245                 ufs_mtk_device_reset_ctrl(0, res);
1246
1247         ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1248
1249         return 0;
1250 fail:
1251         /*
1252          * Set link as off state enforcedly to trigger
1253          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1254          * for completed host reset.
1255          */
1256         ufshcd_set_link_off(hba);
1257         return -EAGAIN;
1258 }
1259
1260 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1261 {
1262         int err;
1263         struct arm_smccc_res res;
1264
1265         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1266                 ufs_mtk_dev_vreg_set_lpm(hba, false);
1267
1268         ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1269
1270         err = ufs_mtk_mphy_power_on(hba, true);
1271         if (err)
1272                 goto fail;
1273
1274         if (ufshcd_is_link_hibern8(hba)) {
1275                 err = ufs_mtk_link_set_hpm(hba);
1276                 if (err)
1277                         goto fail;
1278         }
1279
1280         return 0;
1281 fail:
1282         return ufshcd_link_recovery(hba);
1283 }
1284
1285 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1286 {
1287         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1288
1289         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1290
1291         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1292                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1293                          "MPHY Ctrl ");
1294
1295         /* Direct debugging information to REG_MTK_PROBE */
1296         ufs_mtk_dbg_sel(hba);
1297         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1298 }
1299
1300 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1301 {
1302         struct ufs_dev_info *dev_info = &hba->dev_info;
1303         u16 mid = dev_info->wmanufacturerid;
1304
1305         if (mid == UFS_VENDOR_SAMSUNG) {
1306                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1307                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1308         }
1309
1310         /*
1311          * Decide waiting time before gating reference clock and
1312          * after ungating reference clock according to vendors'
1313          * requirements.
1314          */
1315         if (mid == UFS_VENDOR_SAMSUNG)
1316                 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1317         else if (mid == UFS_VENDOR_SKHYNIX)
1318                 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1319         else if (mid == UFS_VENDOR_TOSHIBA)
1320                 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1321         else
1322                 ufs_mtk_setup_ref_clk_wait_us(hba,
1323                                               REFCLK_DEFAULT_WAIT_US);
1324         return 0;
1325 }
1326
1327 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1328 {
1329         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1330
1331         if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1332             (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1333                 hba->vreg_info.vcc->always_on = true;
1334                 /*
1335                  * VCC will be kept always-on thus we don't
1336                  * need any delay during regulator operations
1337                  */
1338                 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1339                         UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1340         }
1341
1342         ufs_mtk_vreg_fix_vcc(hba);
1343         ufs_mtk_vreg_fix_vccqx(hba);
1344 }
1345
1346 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1347                                  enum ufs_event_type evt, void *data)
1348 {
1349         unsigned int val = *(u32 *)data;
1350         unsigned long reg;
1351         u8 bit;
1352
1353         trace_ufs_mtk_event(evt, val);
1354
1355         /* Print details of UIC Errors */
1356         if (evt <= UFS_EVT_DME_ERR) {
1357                 dev_info(hba->dev,
1358                          "Host UIC Error Code (%s): %08x\n",
1359                          ufs_uic_err_str[evt], val);
1360                 reg = val;
1361         }
1362
1363         if (evt == UFS_EVT_PA_ERR) {
1364                 for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1365                         dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1366         }
1367
1368         if (evt == UFS_EVT_DL_ERR) {
1369                 for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1370                         dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1371         }
1372 }
1373
1374 /*
1375  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1376  *
1377  * The variant operations configure the necessary controller and PHY
1378  * handshake during initialization.
1379  */
1380 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1381         .name                = "mediatek.ufshci",
1382         .init                = ufs_mtk_init,
1383         .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1384         .setup_clocks        = ufs_mtk_setup_clocks,
1385         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
1386         .link_startup_notify = ufs_mtk_link_startup_notify,
1387         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
1388         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1389         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1390         .suspend             = ufs_mtk_suspend,
1391         .resume              = ufs_mtk_resume,
1392         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
1393         .device_reset        = ufs_mtk_device_reset,
1394         .event_notify        = ufs_mtk_event_notify,
1395 };
1396
1397 /**
1398  * ufs_mtk_probe - probe routine of the driver
1399  * @pdev: pointer to Platform device handle
1400  *
1401  * Return zero for success and non-zero for failure
1402  */
1403 static int ufs_mtk_probe(struct platform_device *pdev)
1404 {
1405         int err;
1406         struct device *dev = &pdev->dev;
1407         struct device_node *reset_node;
1408         struct platform_device *reset_pdev;
1409         struct device_link *link;
1410
1411         reset_node = of_find_compatible_node(NULL, NULL,
1412                                              "ti,syscon-reset");
1413         if (!reset_node) {
1414                 dev_notice(dev, "find ti,syscon-reset fail\n");
1415                 goto skip_reset;
1416         }
1417         reset_pdev = of_find_device_by_node(reset_node);
1418         if (!reset_pdev) {
1419                 dev_notice(dev, "find reset_pdev fail\n");
1420                 goto skip_reset;
1421         }
1422         link = device_link_add(dev, &reset_pdev->dev,
1423                 DL_FLAG_AUTOPROBE_CONSUMER);
1424         put_device(&reset_pdev->dev);
1425         if (!link) {
1426                 dev_notice(dev, "add reset device_link fail\n");
1427                 goto skip_reset;
1428         }
1429         /* supplier is not probed */
1430         if (link->status == DL_STATE_DORMANT) {
1431                 err = -EPROBE_DEFER;
1432                 goto out;
1433         }
1434
1435 skip_reset:
1436         /* perform generic probe */
1437         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1438
1439 out:
1440         if (err)
1441                 dev_info(dev, "probe failed %d\n", err);
1442
1443         of_node_put(reset_node);
1444         return err;
1445 }
1446
1447 /**
1448  * ufs_mtk_remove - set driver_data of the device to NULL
1449  * @pdev: pointer to platform device handle
1450  *
1451  * Always return 0
1452  */
1453 static int ufs_mtk_remove(struct platform_device *pdev)
1454 {
1455         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1456
1457         pm_runtime_get_sync(&(pdev)->dev);
1458         ufshcd_remove(hba);
1459         return 0;
1460 }
1461
1462 #ifdef CONFIG_PM_SLEEP
1463 static int ufs_mtk_system_suspend(struct device *dev)
1464 {
1465         struct ufs_hba *hba = dev_get_drvdata(dev);
1466         int ret;
1467
1468         ret = ufshcd_system_suspend(dev);
1469         if (ret)
1470                 return ret;
1471
1472         ufs_mtk_dev_vreg_set_lpm(hba, true);
1473
1474         return 0;
1475 }
1476
1477 static int ufs_mtk_system_resume(struct device *dev)
1478 {
1479         struct ufs_hba *hba = dev_get_drvdata(dev);
1480
1481         ufs_mtk_dev_vreg_set_lpm(hba, false);
1482
1483         return ufshcd_system_resume(dev);
1484 }
1485 #endif
1486
1487 static int ufs_mtk_runtime_suspend(struct device *dev)
1488 {
1489         struct ufs_hba *hba = dev_get_drvdata(dev);
1490         int ret = 0;
1491
1492         ret = ufshcd_runtime_suspend(dev);
1493         if (ret)
1494                 return ret;
1495
1496         ufs_mtk_dev_vreg_set_lpm(hba, true);
1497
1498         return 0;
1499 }
1500
1501 static int ufs_mtk_runtime_resume(struct device *dev)
1502 {
1503         struct ufs_hba *hba = dev_get_drvdata(dev);
1504
1505         ufs_mtk_dev_vreg_set_lpm(hba, false);
1506
1507         return ufshcd_runtime_resume(dev);
1508 }
1509
1510 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1511         SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1512                                 ufs_mtk_system_resume)
1513         SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1514                            ufs_mtk_runtime_resume, NULL)
1515         .prepare         = ufshcd_suspend_prepare,
1516         .complete        = ufshcd_resume_complete,
1517 };
1518
1519 static struct platform_driver ufs_mtk_pltform = {
1520         .probe      = ufs_mtk_probe,
1521         .remove     = ufs_mtk_remove,
1522         .shutdown   = ufshcd_pltfrm_shutdown,
1523         .driver = {
1524                 .name   = "ufshcd-mtk",
1525                 .pm     = &ufs_mtk_pm_ops,
1526                 .of_match_table = ufs_mtk_of_match,
1527         },
1528 };
1529
1530 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1531 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1532 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1533 MODULE_LICENSE("GPL v2");
1534
1535 module_platform_driver(ufs_mtk_pltform);