Merge tag 'pull-work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[platform/kernel/linux-starfive.git] / drivers / ufs / host / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_qos.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/sched/clock.h>
23 #include <linux/soc/mediatek/mtk_sip_svc.h>
24
25 #include <ufs/ufshcd.h>
26 #include "ufshcd-pltfrm.h"
27 #include <ufs/ufs_quirks.h>
28 #include <ufs/unipro.h>
29 #include "ufs-mediatek.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "ufs-mediatek-trace.h"
33
34 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
35         { .wmanufacturerid = UFS_ANY_VENDOR,
36           .model = UFS_ANY_MODEL,
37           .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
38                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
39         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
40           .model = "H9HQ21AFAMZDAR",
41           .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
42         {}
43 };
44
45 static const struct of_device_id ufs_mtk_of_match[] = {
46         { .compatible = "mediatek,mt8183-ufshci" },
47         {},
48 };
49
50 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
51 {
52         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
53
54         return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
55 }
56
57 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
58 {
59         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
60
61         return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
62 }
63
64 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
65 {
66         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
67
68         return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
69 }
70
71 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
72 {
73         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
74
75         return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
76 }
77
78 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
79 {
80         u32 tmp;
81
82         if (enable) {
83                 ufshcd_dme_get(hba,
84                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
85                 tmp = tmp |
86                       (1 << RX_SYMBOL_CLK_GATE_EN) |
87                       (1 << SYS_CLK_GATE_EN) |
88                       (1 << TX_CLK_GATE_EN);
89                 ufshcd_dme_set(hba,
90                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
91
92                 ufshcd_dme_get(hba,
93                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
94                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
95                 ufshcd_dme_set(hba,
96                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
97         } else {
98                 ufshcd_dme_get(hba,
99                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
100                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
101                               (1 << SYS_CLK_GATE_EN) |
102                               (1 << TX_CLK_GATE_EN));
103                 ufshcd_dme_set(hba,
104                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
105
106                 ufshcd_dme_get(hba,
107                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
108                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
109                 ufshcd_dme_set(hba,
110                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
111         }
112 }
113
114 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
115 {
116         struct arm_smccc_res res;
117
118         ufs_mtk_crypto_ctrl(res, 1);
119         if (res.a0) {
120                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
121                          __func__, res.a0);
122                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
123         }
124 }
125
126 static void ufs_mtk_host_reset(struct ufs_hba *hba)
127 {
128         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
129
130         reset_control_assert(host->hci_reset);
131         reset_control_assert(host->crypto_reset);
132         reset_control_assert(host->unipro_reset);
133
134         usleep_range(100, 110);
135
136         reset_control_deassert(host->unipro_reset);
137         reset_control_deassert(host->crypto_reset);
138         reset_control_deassert(host->hci_reset);
139 }
140
141 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
142                                        struct reset_control **rc,
143                                        char *str)
144 {
145         *rc = devm_reset_control_get(hba->dev, str);
146         if (IS_ERR(*rc)) {
147                 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
148                          str, PTR_ERR(*rc));
149                 *rc = NULL;
150         }
151 }
152
153 static void ufs_mtk_init_reset(struct ufs_hba *hba)
154 {
155         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
156
157         ufs_mtk_init_reset_control(hba, &host->hci_reset,
158                                    "hci_rst");
159         ufs_mtk_init_reset_control(hba, &host->unipro_reset,
160                                    "unipro_rst");
161         ufs_mtk_init_reset_control(hba, &host->crypto_reset,
162                                    "crypto_rst");
163 }
164
165 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
166                                      enum ufs_notify_change_status status)
167 {
168         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
169
170         if (status == PRE_CHANGE) {
171                 if (host->unipro_lpm) {
172                         hba->vps->hba_enable_delay_us = 0;
173                 } else {
174                         hba->vps->hba_enable_delay_us = 600;
175                         ufs_mtk_host_reset(hba);
176                 }
177
178                 if (hba->caps & UFSHCD_CAP_CRYPTO)
179                         ufs_mtk_crypto_enable(hba);
180
181                 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
182                         ufshcd_writel(hba, 0,
183                                       REG_AUTO_HIBERNATE_IDLE_TIMER);
184                         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
185                         hba->ahit = 0;
186                 }
187
188                 /*
189                  * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
190                  * to prevent host hang issue
191                  */
192                 ufshcd_writel(hba,
193                               ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
194                               REG_UFS_XOUFS_CTRL);
195         }
196
197         return 0;
198 }
199
200 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
201 {
202         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
203         struct device *dev = hba->dev;
204         struct device_node *np = dev->of_node;
205         int err = 0;
206
207         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
208
209         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
210                 /*
211                  * UFS driver might be probed before the phy driver does.
212                  * In that case we would like to return EPROBE_DEFER code.
213                  */
214                 err = -EPROBE_DEFER;
215                 dev_info(dev,
216                          "%s: required phy hasn't probed yet. err = %d\n",
217                         __func__, err);
218         } else if (IS_ERR(host->mphy)) {
219                 err = PTR_ERR(host->mphy);
220                 if (err != -ENODEV) {
221                         dev_info(dev, "%s: PHY get failed %d\n", __func__,
222                                  err);
223                 }
224         }
225
226         if (err)
227                 host->mphy = NULL;
228         /*
229          * Allow unbound mphy because not every platform needs specific
230          * mphy control.
231          */
232         if (err == -ENODEV)
233                 err = 0;
234
235         return err;
236 }
237
238 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
239 {
240         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
241         struct arm_smccc_res res;
242         ktime_t timeout, time_checked;
243         u32 value;
244
245         if (host->ref_clk_enabled == on)
246                 return 0;
247
248         ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
249
250         if (on) {
251                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
252         } else {
253                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
254                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
255         }
256
257         /* Wait for ack */
258         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
259         do {
260                 time_checked = ktime_get();
261                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
262
263                 /* Wait until ack bit equals to req bit */
264                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
265                         goto out;
266
267                 usleep_range(100, 200);
268         } while (ktime_before(time_checked, timeout));
269
270         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
271
272         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
273
274         return -ETIMEDOUT;
275
276 out:
277         host->ref_clk_enabled = on;
278         if (on)
279                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
280
281         ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
282
283         return 0;
284 }
285
286 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
287                                           u16 gating_us)
288 {
289         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
290
291         if (hba->dev_info.clk_gating_wait_us) {
292                 host->ref_clk_gating_wait_us =
293                         hba->dev_info.clk_gating_wait_us;
294         } else {
295                 host->ref_clk_gating_wait_us = gating_us;
296         }
297
298         host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
299 }
300
301 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
302 {
303         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
304
305         if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
306                 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
307                 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
308                 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
309                 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
310                 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
311         } else {
312                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
313         }
314 }
315
316 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
317                             unsigned long retry_ms)
318 {
319         u64 timeout, time_checked;
320         u32 val, sm;
321         bool wait_idle;
322
323         /* cannot use plain ktime_get() in suspend */
324         timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
325
326         /* wait a specific time after check base */
327         udelay(10);
328         wait_idle = false;
329
330         do {
331                 time_checked = ktime_get_mono_fast_ns();
332                 ufs_mtk_dbg_sel(hba);
333                 val = ufshcd_readl(hba, REG_UFS_PROBE);
334
335                 sm = val & 0x1f;
336
337                 /*
338                  * if state is in H8 enter and H8 enter confirm
339                  * wait until return to idle state.
340                  */
341                 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
342                         wait_idle = true;
343                         udelay(50);
344                         continue;
345                 } else if (!wait_idle)
346                         break;
347
348                 if (wait_idle && (sm == VS_HCE_BASE))
349                         break;
350         } while (time_checked < timeout);
351
352         if (wait_idle && sm != VS_HCE_BASE)
353                 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
354 }
355
356 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
357                                    unsigned long max_wait_ms)
358 {
359         ktime_t timeout, time_checked;
360         u32 val;
361
362         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
363         do {
364                 time_checked = ktime_get();
365                 ufs_mtk_dbg_sel(hba);
366                 val = ufshcd_readl(hba, REG_UFS_PROBE);
367                 val = val >> 28;
368
369                 if (val == state)
370                         return 0;
371
372                 /* Sleep for max. 200us */
373                 usleep_range(100, 200);
374         } while (ktime_before(time_checked, timeout));
375
376         if (val == state)
377                 return 0;
378
379         return -ETIMEDOUT;
380 }
381
382 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
383 {
384         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
385         struct phy *mphy = host->mphy;
386         struct arm_smccc_res res;
387         int ret = 0;
388
389         if (!mphy || !(on ^ host->mphy_powered_on))
390                 return 0;
391
392         if (on) {
393                 if (ufs_mtk_is_va09_supported(hba)) {
394                         ret = regulator_enable(host->reg_va09);
395                         if (ret < 0)
396                                 goto out;
397                         /* wait 200 us to stablize VA09 */
398                         usleep_range(200, 210);
399                         ufs_mtk_va09_pwr_ctrl(res, 1);
400                 }
401                 phy_power_on(mphy);
402         } else {
403                 phy_power_off(mphy);
404                 if (ufs_mtk_is_va09_supported(hba)) {
405                         ufs_mtk_va09_pwr_ctrl(res, 0);
406                         ret = regulator_disable(host->reg_va09);
407                         if (ret < 0)
408                                 goto out;
409                 }
410         }
411 out:
412         if (ret) {
413                 dev_info(hba->dev,
414                          "failed to %s va09: %d\n",
415                          on ? "enable" : "disable",
416                          ret);
417         } else {
418                 host->mphy_powered_on = on;
419         }
420
421         return ret;
422 }
423
424 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
425                                 struct clk **clk_out)
426 {
427         struct clk *clk;
428         int err = 0;
429
430         clk = devm_clk_get(dev, name);
431         if (IS_ERR(clk))
432                 err = PTR_ERR(clk);
433         else
434                 *clk_out = clk;
435
436         return err;
437 }
438
439 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
440 {
441         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
442         struct ufs_mtk_crypt_cfg *cfg;
443         struct regulator *reg;
444         int volt, ret;
445
446         if (!ufs_mtk_is_boost_crypt_enabled(hba))
447                 return;
448
449         cfg = host->crypt;
450         volt = cfg->vcore_volt;
451         reg = cfg->reg_vcore;
452
453         ret = clk_prepare_enable(cfg->clk_crypt_mux);
454         if (ret) {
455                 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
456                          ret);
457                 return;
458         }
459
460         if (boost) {
461                 ret = regulator_set_voltage(reg, volt, INT_MAX);
462                 if (ret) {
463                         dev_info(hba->dev,
464                                  "failed to set vcore to %d\n", volt);
465                         goto out;
466                 }
467
468                 ret = clk_set_parent(cfg->clk_crypt_mux,
469                                      cfg->clk_crypt_perf);
470                 if (ret) {
471                         dev_info(hba->dev,
472                                  "failed to set clk_crypt_perf\n");
473                         regulator_set_voltage(reg, 0, INT_MAX);
474                         goto out;
475                 }
476         } else {
477                 ret = clk_set_parent(cfg->clk_crypt_mux,
478                                      cfg->clk_crypt_lp);
479                 if (ret) {
480                         dev_info(hba->dev,
481                                  "failed to set clk_crypt_lp\n");
482                         goto out;
483                 }
484
485                 ret = regulator_set_voltage(reg, 0, INT_MAX);
486                 if (ret) {
487                         dev_info(hba->dev,
488                                  "failed to set vcore to MIN\n");
489                 }
490         }
491 out:
492         clk_disable_unprepare(cfg->clk_crypt_mux);
493 }
494
495 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
496                                  struct clk **clk)
497 {
498         int ret;
499
500         ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
501         if (ret) {
502                 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
503                          name, ret);
504         }
505
506         return ret;
507 }
508
509 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
510 {
511         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
512         struct ufs_mtk_crypt_cfg *cfg;
513         struct device *dev = hba->dev;
514         struct regulator *reg;
515         u32 volt;
516
517         host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
518                                    GFP_KERNEL);
519         if (!host->crypt)
520                 goto disable_caps;
521
522         reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
523         if (IS_ERR(reg)) {
524                 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
525                          PTR_ERR(reg));
526                 goto disable_caps;
527         }
528
529         if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
530                                  &volt)) {
531                 dev_info(dev, "failed to get boost-crypt-vcore-min");
532                 goto disable_caps;
533         }
534
535         cfg = host->crypt;
536         if (ufs_mtk_init_host_clk(hba, "crypt_mux",
537                                   &cfg->clk_crypt_mux))
538                 goto disable_caps;
539
540         if (ufs_mtk_init_host_clk(hba, "crypt_lp",
541                                   &cfg->clk_crypt_lp))
542                 goto disable_caps;
543
544         if (ufs_mtk_init_host_clk(hba, "crypt_perf",
545                                   &cfg->clk_crypt_perf))
546                 goto disable_caps;
547
548         cfg->reg_vcore = reg;
549         cfg->vcore_volt = volt;
550         host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
551
552 disable_caps:
553         return;
554 }
555
556 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
557 {
558         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
559
560         host->reg_va09 = regulator_get(hba->dev, "va09");
561         if (IS_ERR(host->reg_va09))
562                 dev_info(hba->dev, "failed to get va09");
563         else
564                 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
565 }
566
567 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
568 {
569         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
570         struct device_node *np = hba->dev->of_node;
571
572         if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
573                 ufs_mtk_init_boost_crypt(hba);
574
575         if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
576                 ufs_mtk_init_va09_pwr_ctrl(hba);
577
578         if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
579                 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
580
581         if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
582                 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
583
584         if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
585                 host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
586
587         dev_info(hba->dev, "caps: 0x%x", host->caps);
588 }
589
590 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
591 {
592         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
593
594         if (!host || !host->pm_qos_init)
595                 return;
596
597         cpu_latency_qos_update_request(&host->pm_qos_req,
598                                        boost ? 0 : PM_QOS_DEFAULT_VALUE);
599 }
600
601 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
602 {
603         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
604
605         if (on) {
606                 phy_power_on(host->mphy);
607                 ufs_mtk_setup_ref_clk(hba, on);
608                 ufs_mtk_boost_crypt(hba, on);
609                 ufs_mtk_boost_pm_qos(hba, on);
610         } else {
611                 ufs_mtk_boost_pm_qos(hba, on);
612                 ufs_mtk_boost_crypt(hba, on);
613                 ufs_mtk_setup_ref_clk(hba, on);
614                 phy_power_off(host->mphy);
615         }
616 }
617
618 /**
619  * ufs_mtk_setup_clocks - enables/disable clocks
620  * @hba: host controller instance
621  * @on: If true, enable clocks else disable them.
622  * @status: PRE_CHANGE or POST_CHANGE notify
623  *
624  * Returns 0 on success, non-zero on failure.
625  */
626 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
627                                 enum ufs_notify_change_status status)
628 {
629         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
630         bool clk_pwr_off = false;
631         int ret = 0;
632
633         /*
634          * In case ufs_mtk_init() is not yet done, simply ignore.
635          * This ufs_mtk_setup_clocks() shall be called from
636          * ufs_mtk_init() after init is done.
637          */
638         if (!host)
639                 return 0;
640
641         if (!on && status == PRE_CHANGE) {
642                 if (ufshcd_is_link_off(hba)) {
643                         clk_pwr_off = true;
644                 } else if (ufshcd_is_link_hibern8(hba) ||
645                          (!ufshcd_can_hibern8_during_gating(hba) &&
646                          ufshcd_is_auto_hibern8_enabled(hba))) {
647                         /*
648                          * Gate ref-clk and poweroff mphy if link state is in
649                          * OFF or Hibern8 by either Auto-Hibern8 or
650                          * ufshcd_link_state_transition().
651                          */
652                         ret = ufs_mtk_wait_link_state(hba,
653                                                       VS_LINK_HIBERN8,
654                                                       15);
655                         if (!ret)
656                                 clk_pwr_off = true;
657                 }
658
659                 if (clk_pwr_off)
660                         ufs_mtk_pwr_ctrl(hba, false);
661         } else if (on && status == POST_CHANGE) {
662                 ufs_mtk_pwr_ctrl(hba, true);
663         }
664
665         return ret;
666 }
667
668 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
669 {
670         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
671         int ret, ver = 0;
672
673         if (host->hw_ver.major)
674                 return;
675
676         /* Set default (minimum) version anyway */
677         host->hw_ver.major = 2;
678
679         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
680         if (!ret) {
681                 if (ver >= UFS_UNIPRO_VER_1_8) {
682                         host->hw_ver.major = 3;
683                         /*
684                          * Fix HCI version for some platforms with
685                          * incorrect version
686                          */
687                         if (hba->ufs_version < ufshci_version(3, 0))
688                                 hba->ufs_version = ufshci_version(3, 0);
689                 }
690         }
691 }
692
693 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
694 {
695         return hba->ufs_version;
696 }
697
698 #define MAX_VCC_NAME 30
699 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
700 {
701         struct ufs_vreg_info *info = &hba->vreg_info;
702         struct device_node *np = hba->dev->of_node;
703         struct device *dev = hba->dev;
704         char vcc_name[MAX_VCC_NAME];
705         struct arm_smccc_res res;
706         int err, ver;
707
708         if (hba->vreg_info.vcc)
709                 return 0;
710
711         if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
712                 ufs_mtk_get_vcc_num(res);
713                 if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
714                         snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
715                 else
716                         return -ENODEV;
717         } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
718                 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
719                 snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
720         } else {
721                 return 0;
722         }
723
724         err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
725         if (err)
726                 return err;
727
728         err = ufshcd_get_vreg(dev, info->vcc);
729         if (err)
730                 return err;
731
732         err = regulator_enable(info->vcc->reg);
733         if (!err) {
734                 info->vcc->enabled = true;
735                 dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
736         }
737
738         return err;
739 }
740
741 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
742 {
743         struct ufs_vreg_info *info = &hba->vreg_info;
744         struct ufs_vreg **vreg_on, **vreg_off;
745
746         if (hba->dev_info.wspecversion >= 0x0300) {
747                 vreg_on = &info->vccq;
748                 vreg_off = &info->vccq2;
749         } else {
750                 vreg_on = &info->vccq2;
751                 vreg_off = &info->vccq;
752         }
753
754         if (*vreg_on)
755                 (*vreg_on)->always_on = true;
756
757         if (*vreg_off) {
758                 regulator_disable((*vreg_off)->reg);
759                 devm_kfree(hba->dev, (*vreg_off)->name);
760                 devm_kfree(hba->dev, *vreg_off);
761                 *vreg_off = NULL;
762         }
763 }
764
765 /**
766  * ufs_mtk_init - find other essential mmio bases
767  * @hba: host controller instance
768  *
769  * Binds PHY with controller and powers up PHY enabling clocks
770  * and regulators.
771  *
772  * Returns -EPROBE_DEFER if binding fails, returns negative error
773  * on phy power up failure and returns zero on success.
774  */
775 static int ufs_mtk_init(struct ufs_hba *hba)
776 {
777         const struct of_device_id *id;
778         struct device *dev = hba->dev;
779         struct ufs_mtk_host *host;
780         int err = 0;
781
782         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
783         if (!host) {
784                 err = -ENOMEM;
785                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
786                 goto out;
787         }
788
789         host->hba = hba;
790         ufshcd_set_variant(hba, host);
791
792         id = of_match_device(ufs_mtk_of_match, dev);
793         if (!id) {
794                 err = -EINVAL;
795                 goto out;
796         }
797
798         /* Initialize host capability */
799         ufs_mtk_init_host_caps(hba);
800
801         err = ufs_mtk_bind_mphy(hba);
802         if (err)
803                 goto out_variant_clear;
804
805         ufs_mtk_init_reset(hba);
806
807         /* Enable runtime autosuspend */
808         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
809
810         /* Enable clock-gating */
811         hba->caps |= UFSHCD_CAP_CLK_GATING;
812
813         /* Enable inline encryption */
814         hba->caps |= UFSHCD_CAP_CRYPTO;
815
816         /* Enable WriteBooster */
817         hba->caps |= UFSHCD_CAP_WB_EN;
818         hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
819         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
820
821         if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
822                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
823
824         /*
825          * ufshcd_vops_init() is invoked after
826          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
827          * phy clock setup is skipped.
828          *
829          * Enable phy clocks specifically here.
830          */
831         ufs_mtk_mphy_power_on(hba, true);
832         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
833
834         host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
835
836         goto out;
837
838 out_variant_clear:
839         ufshcd_set_variant(hba, NULL);
840 out:
841         return err;
842 }
843
844 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
845                                      struct ufs_pa_layer_attr *dev_req_params)
846 {
847         if (!ufs_mtk_is_pmc_via_fastauto(hba))
848                 return false;
849
850         if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
851                 return false;
852
853         if (dev_req_params->pwr_tx != FAST_MODE &&
854             dev_req_params->gear_tx < UFS_HS_G4)
855                 return false;
856
857         if (dev_req_params->pwr_rx != FAST_MODE &&
858             dev_req_params->gear_rx < UFS_HS_G4)
859                 return false;
860
861         return true;
862 }
863
864 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
865                                   struct ufs_pa_layer_attr *dev_max_params,
866                                   struct ufs_pa_layer_attr *dev_req_params)
867 {
868         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
869         struct ufs_dev_params host_cap;
870         int ret;
871
872         ufshcd_init_pwr_dev_param(&host_cap);
873         host_cap.hs_rx_gear = UFS_HS_G5;
874         host_cap.hs_tx_gear = UFS_HS_G5;
875
876         ret = ufshcd_get_pwr_dev_param(&host_cap,
877                                        dev_max_params,
878                                        dev_req_params);
879         if (ret) {
880                 pr_info("%s: failed to determine capabilities\n",
881                         __func__);
882         }
883
884         if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
885                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
886                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
887
888                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
889                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
890
891                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
892                                dev_req_params->lane_tx);
893                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
894                                dev_req_params->lane_rx);
895                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
896                                dev_req_params->hs_rate);
897
898                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
899                                PA_NO_ADAPT);
900
901                 ret = ufshcd_uic_change_pwr_mode(hba,
902                                         FASTAUTO_MODE << 4 | FASTAUTO_MODE);
903
904                 if (ret) {
905                         dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
906                                 __func__, ret);
907                 }
908         }
909
910         if (host->hw_ver.major >= 3) {
911                 ret = ufshcd_dme_configure_adapt(hba,
912                                            dev_req_params->gear_tx,
913                                            PA_INITIAL_ADAPT);
914         }
915
916         return ret;
917 }
918
919 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
920                                      enum ufs_notify_change_status stage,
921                                      struct ufs_pa_layer_attr *dev_max_params,
922                                      struct ufs_pa_layer_attr *dev_req_params)
923 {
924         int ret = 0;
925
926         switch (stage) {
927         case PRE_CHANGE:
928                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
929                                              dev_req_params);
930                 break;
931         case POST_CHANGE:
932                 break;
933         default:
934                 ret = -EINVAL;
935                 break;
936         }
937
938         return ret;
939 }
940
941 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
942 {
943         int ret;
944         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
945
946         ret = ufshcd_dme_set(hba,
947                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
948                              lpm ? 1 : 0);
949         if (!ret || !lpm) {
950                 /*
951                  * Forcibly set as non-LPM mode if UIC commands is failed
952                  * to use default hba_enable_delay_us value for re-enabling
953                  * the host.
954                  */
955                 host->unipro_lpm = lpm;
956         }
957
958         return ret;
959 }
960
961 static int ufs_mtk_pre_link(struct ufs_hba *hba)
962 {
963         int ret;
964         u32 tmp;
965
966         ufs_mtk_get_controller_version(hba);
967
968         ret = ufs_mtk_unipro_set_lpm(hba, false);
969         if (ret)
970                 return ret;
971
972         /*
973          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
974          * to make sure that both host and device TX LCC are disabled
975          * once link startup is completed.
976          */
977         ret = ufshcd_disable_host_tx_lcc(hba);
978         if (ret)
979                 return ret;
980
981         /* disable deep stall */
982         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
983         if (ret)
984                 return ret;
985
986         tmp &= ~(1 << 6);
987
988         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
989
990         return ret;
991 }
992
993 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
994 {
995         u32 ah_ms;
996
997         if (ufshcd_is_clkgating_allowed(hba)) {
998                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
999                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1000                                           hba->ahit);
1001                 else
1002                         ah_ms = 10;
1003                 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1004         }
1005 }
1006
1007 static int ufs_mtk_post_link(struct ufs_hba *hba)
1008 {
1009         /* enable unipro clock gating feature */
1010         ufs_mtk_cfg_unipro_cg(hba, true);
1011
1012         /* will be configured during probe hba */
1013         if (ufshcd_is_auto_hibern8_supported(hba))
1014                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1015                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1016
1017         ufs_mtk_setup_clk_gating(hba);
1018
1019         return 0;
1020 }
1021
1022 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1023                                        enum ufs_notify_change_status stage)
1024 {
1025         int ret = 0;
1026
1027         switch (stage) {
1028         case PRE_CHANGE:
1029                 ret = ufs_mtk_pre_link(hba);
1030                 break;
1031         case POST_CHANGE:
1032                 ret = ufs_mtk_post_link(hba);
1033                 break;
1034         default:
1035                 ret = -EINVAL;
1036                 break;
1037         }
1038
1039         return ret;
1040 }
1041
1042 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1043 {
1044         struct arm_smccc_res res;
1045
1046         /* disable hba before device reset */
1047         ufshcd_hba_stop(hba);
1048
1049         ufs_mtk_device_reset_ctrl(0, res);
1050
1051         /*
1052          * The reset signal is active low. UFS devices shall detect
1053          * more than or equal to 1us of positive or negative RST_n
1054          * pulse width.
1055          *
1056          * To be on safe side, keep the reset low for at least 10us.
1057          */
1058         usleep_range(10, 15);
1059
1060         ufs_mtk_device_reset_ctrl(1, res);
1061
1062         /* Some devices may need time to respond to rst_n */
1063         usleep_range(10000, 15000);
1064
1065         dev_info(hba->dev, "device reset done\n");
1066
1067         return 0;
1068 }
1069
1070 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1071 {
1072         int err;
1073
1074         err = ufshcd_hba_enable(hba);
1075         if (err)
1076                 return err;
1077
1078         err = ufs_mtk_unipro_set_lpm(hba, false);
1079         if (err)
1080                 return err;
1081
1082         err = ufshcd_uic_hibern8_exit(hba);
1083         if (!err)
1084                 ufshcd_set_link_active(hba);
1085         else
1086                 return err;
1087
1088         err = ufshcd_make_hba_operational(hba);
1089         if (err)
1090                 return err;
1091
1092         return 0;
1093 }
1094
1095 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1096 {
1097         int err;
1098
1099         /* Disable reset confirm feature by UniPro */
1100         ufshcd_writel(hba,
1101                       (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1102                       REG_UFS_XOUFS_CTRL);
1103
1104         err = ufs_mtk_unipro_set_lpm(hba, true);
1105         if (err) {
1106                 /* Resume UniPro state for following error recovery */
1107                 ufs_mtk_unipro_set_lpm(hba, false);
1108                 return err;
1109         }
1110
1111         return 0;
1112 }
1113
1114 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1115 {
1116         struct ufs_vreg *vccqx = NULL;
1117
1118         if (hba->vreg_info.vccq)
1119                 vccqx = hba->vreg_info.vccq;
1120         else
1121                 vccqx = hba->vreg_info.vccq2;
1122
1123         regulator_set_mode(vccqx->reg,
1124                            lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1125 }
1126
1127 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1128 {
1129         struct arm_smccc_res res;
1130
1131         ufs_mtk_device_pwr_ctrl(!lpm,
1132                                 (unsigned long)hba->dev_info.wspecversion,
1133                                 res);
1134 }
1135
1136 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1137 {
1138         if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1139                 return;
1140
1141         /* Skip if VCC is assumed always-on */
1142         if (!hba->vreg_info.vcc)
1143                 return;
1144
1145         /* Bypass LPM when device is still active */
1146         if (lpm && ufshcd_is_ufs_dev_active(hba))
1147                 return;
1148
1149         /* Bypass LPM if VCC is enabled */
1150         if (lpm && hba->vreg_info.vcc->enabled)
1151                 return;
1152
1153         if (lpm) {
1154                 ufs_mtk_vccqx_set_lpm(hba, lpm);
1155                 ufs_mtk_vsx_set_lpm(hba, lpm);
1156         } else {
1157                 ufs_mtk_vsx_set_lpm(hba, lpm);
1158                 ufs_mtk_vccqx_set_lpm(hba, lpm);
1159         }
1160 }
1161
1162 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1163 {
1164         int ret;
1165
1166         /* disable auto-hibern8 */
1167         ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1168
1169         /* wait host return to idle state when auto-hibern8 off */
1170         ufs_mtk_wait_idle_state(hba, 5);
1171
1172         ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1173         if (ret)
1174                 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1175 }
1176
1177 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1178         enum ufs_notify_change_status status)
1179 {
1180         int err;
1181         struct arm_smccc_res res;
1182
1183         if (status == PRE_CHANGE) {
1184                 if (!ufshcd_is_auto_hibern8_supported(hba))
1185                         return 0;
1186                 ufs_mtk_auto_hibern8_disable(hba);
1187                 return 0;
1188         }
1189
1190         if (ufshcd_is_link_hibern8(hba)) {
1191                 err = ufs_mtk_link_set_lpm(hba);
1192                 if (err)
1193                         goto fail;
1194         }
1195
1196         if (!ufshcd_is_link_active(hba)) {
1197                 /*
1198                  * Make sure no error will be returned to prevent
1199                  * ufshcd_suspend() re-enabling regulators while vreg is still
1200                  * in low-power mode.
1201                  */
1202                 err = ufs_mtk_mphy_power_on(hba, false);
1203                 if (err)
1204                         goto fail;
1205         }
1206
1207         if (ufshcd_is_link_off(hba))
1208                 ufs_mtk_device_reset_ctrl(0, res);
1209
1210         ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1211
1212         return 0;
1213 fail:
1214         /*
1215          * Set link as off state enforcedly to trigger
1216          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1217          * for completed host reset.
1218          */
1219         ufshcd_set_link_off(hba);
1220         return -EAGAIN;
1221 }
1222
1223 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1224 {
1225         int err;
1226         struct arm_smccc_res res;
1227
1228         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1229                 ufs_mtk_dev_vreg_set_lpm(hba, false);
1230
1231         ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1232
1233         err = ufs_mtk_mphy_power_on(hba, true);
1234         if (err)
1235                 goto fail;
1236
1237         if (ufshcd_is_link_hibern8(hba)) {
1238                 err = ufs_mtk_link_set_hpm(hba);
1239                 if (err)
1240                         goto fail;
1241         }
1242
1243         return 0;
1244 fail:
1245         return ufshcd_link_recovery(hba);
1246 }
1247
1248 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1249 {
1250         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1251
1252         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1253
1254         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1255                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1256                          "MPHY Ctrl ");
1257
1258         /* Direct debugging information to REG_MTK_PROBE */
1259         ufs_mtk_dbg_sel(hba);
1260         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1261 }
1262
1263 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1264 {
1265         struct ufs_dev_info *dev_info = &hba->dev_info;
1266         u16 mid = dev_info->wmanufacturerid;
1267
1268         if (mid == UFS_VENDOR_SAMSUNG) {
1269                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1270                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1271         }
1272
1273         /*
1274          * Decide waiting time before gating reference clock and
1275          * after ungating reference clock according to vendors'
1276          * requirements.
1277          */
1278         if (mid == UFS_VENDOR_SAMSUNG)
1279                 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1280         else if (mid == UFS_VENDOR_SKHYNIX)
1281                 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1282         else if (mid == UFS_VENDOR_TOSHIBA)
1283                 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1284         else
1285                 ufs_mtk_setup_ref_clk_wait_us(hba,
1286                                               REFCLK_DEFAULT_WAIT_US);
1287         return 0;
1288 }
1289
1290 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1291 {
1292         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1293
1294         if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1295             (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1296                 hba->vreg_info.vcc->always_on = true;
1297                 /*
1298                  * VCC will be kept always-on thus we don't
1299                  * need any delay during regulator operations
1300                  */
1301                 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1302                         UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1303         }
1304
1305         ufs_mtk_vreg_fix_vcc(hba);
1306         ufs_mtk_vreg_fix_vccqx(hba);
1307 }
1308
1309 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1310                                  enum ufs_event_type evt, void *data)
1311 {
1312         unsigned int val = *(u32 *)data;
1313
1314         trace_ufs_mtk_event(evt, val);
1315 }
1316
1317 /*
1318  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1319  *
1320  * The variant operations configure the necessary controller and PHY
1321  * handshake during initialization.
1322  */
1323 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1324         .name                = "mediatek.ufshci",
1325         .init                = ufs_mtk_init,
1326         .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1327         .setup_clocks        = ufs_mtk_setup_clocks,
1328         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
1329         .link_startup_notify = ufs_mtk_link_startup_notify,
1330         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
1331         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1332         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1333         .suspend             = ufs_mtk_suspend,
1334         .resume              = ufs_mtk_resume,
1335         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
1336         .device_reset        = ufs_mtk_device_reset,
1337         .event_notify        = ufs_mtk_event_notify,
1338 };
1339
1340 /**
1341  * ufs_mtk_probe - probe routine of the driver
1342  * @pdev: pointer to Platform device handle
1343  *
1344  * Return zero for success and non-zero for failure
1345  */
1346 static int ufs_mtk_probe(struct platform_device *pdev)
1347 {
1348         int err;
1349         struct device *dev = &pdev->dev;
1350         struct device_node *reset_node;
1351         struct platform_device *reset_pdev;
1352         struct device_link *link;
1353
1354         reset_node = of_find_compatible_node(NULL, NULL,
1355                                              "ti,syscon-reset");
1356         if (!reset_node) {
1357                 dev_notice(dev, "find ti,syscon-reset fail\n");
1358                 goto skip_reset;
1359         }
1360         reset_pdev = of_find_device_by_node(reset_node);
1361         if (!reset_pdev) {
1362                 dev_notice(dev, "find reset_pdev fail\n");
1363                 goto skip_reset;
1364         }
1365         link = device_link_add(dev, &reset_pdev->dev,
1366                 DL_FLAG_AUTOPROBE_CONSUMER);
1367         put_device(&reset_pdev->dev);
1368         if (!link) {
1369                 dev_notice(dev, "add reset device_link fail\n");
1370                 goto skip_reset;
1371         }
1372         /* supplier is not probed */
1373         if (link->status == DL_STATE_DORMANT) {
1374                 err = -EPROBE_DEFER;
1375                 goto out;
1376         }
1377
1378 skip_reset:
1379         /* perform generic probe */
1380         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1381
1382 out:
1383         if (err)
1384                 dev_info(dev, "probe failed %d\n", err);
1385
1386         of_node_put(reset_node);
1387         return err;
1388 }
1389
1390 /**
1391  * ufs_mtk_remove - set driver_data of the device to NULL
1392  * @pdev: pointer to platform device handle
1393  *
1394  * Always return 0
1395  */
1396 static int ufs_mtk_remove(struct platform_device *pdev)
1397 {
1398         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1399
1400         pm_runtime_get_sync(&(pdev)->dev);
1401         ufshcd_remove(hba);
1402         return 0;
1403 }
1404
1405 #ifdef CONFIG_PM_SLEEP
1406 static int ufs_mtk_system_suspend(struct device *dev)
1407 {
1408         struct ufs_hba *hba = dev_get_drvdata(dev);
1409         int ret;
1410
1411         ret = ufshcd_system_suspend(dev);
1412         if (ret)
1413                 return ret;
1414
1415         ufs_mtk_dev_vreg_set_lpm(hba, true);
1416
1417         return 0;
1418 }
1419
1420 static int ufs_mtk_system_resume(struct device *dev)
1421 {
1422         struct ufs_hba *hba = dev_get_drvdata(dev);
1423
1424         ufs_mtk_dev_vreg_set_lpm(hba, false);
1425
1426         return ufshcd_system_resume(dev);
1427 }
1428 #endif
1429
1430 static int ufs_mtk_runtime_suspend(struct device *dev)
1431 {
1432         struct ufs_hba *hba = dev_get_drvdata(dev);
1433         int ret = 0;
1434
1435         ret = ufshcd_runtime_suspend(dev);
1436         if (ret)
1437                 return ret;
1438
1439         ufs_mtk_dev_vreg_set_lpm(hba, true);
1440
1441         return 0;
1442 }
1443
1444 static int ufs_mtk_runtime_resume(struct device *dev)
1445 {
1446         struct ufs_hba *hba = dev_get_drvdata(dev);
1447
1448         ufs_mtk_dev_vreg_set_lpm(hba, false);
1449
1450         return ufshcd_runtime_resume(dev);
1451 }
1452
1453 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1454         SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1455                                 ufs_mtk_system_resume)
1456         SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1457                            ufs_mtk_runtime_resume, NULL)
1458         .prepare         = ufshcd_suspend_prepare,
1459         .complete        = ufshcd_resume_complete,
1460 };
1461
1462 static struct platform_driver ufs_mtk_pltform = {
1463         .probe      = ufs_mtk_probe,
1464         .remove     = ufs_mtk_remove,
1465         .shutdown   = ufshcd_pltfrm_shutdown,
1466         .driver = {
1467                 .name   = "ufshcd-mtk",
1468                 .pm     = &ufs_mtk_pm_ops,
1469                 .of_match_table = ufs_mtk_of_match,
1470         },
1471 };
1472
1473 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1474 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1475 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1476 MODULE_LICENSE("GPL v2");
1477
1478 module_platform_driver(ufs_mtk_pltform);