ARM: 9148/1: handle CONFIG_CPU_ENDIAN_BE32 in arch/arm/kernel/head.S
[platform/kernel/linux-rpi.git] / drivers / scsi / ufs / ufshcd-pci.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller PCI glue driver
4  *
5  * This code is based on drivers/scsi/ufs/ufshcd-pci.c
6  * Copyright (C) 2011-2013 Samsung India Software Operations
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  */
12
13 #include "ufshcd.h"
14 #include <linux/pci.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_qos.h>
17 #include <linux/debugfs.h>
18 #include <linux/uuid.h>
19 #include <linux/acpi.h>
20 #include <linux/gpio/consumer.h>
21
22 struct ufs_host {
23         void (*late_init)(struct ufs_hba *hba);
24 };
25
26 enum {
27         INTEL_DSM_FNS           =  0,
28         INTEL_DSM_RESET         =  1,
29 };
30
31 struct intel_host {
32         struct ufs_host ufs_host;
33         u32             dsm_fns;
34         u32             active_ltr;
35         u32             idle_ltr;
36         struct dentry   *debugfs_root;
37         struct gpio_desc *reset_gpio;
38 };
39
40 static const guid_t intel_dsm_guid =
41         GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
42                   0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
43
44 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
45                        unsigned int fn, u32 *result)
46 {
47         union acpi_object *obj;
48         int err = 0;
49         size_t len;
50
51         obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
52         if (!obj)
53                 return -EOPNOTSUPP;
54
55         if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
56                 err = -EINVAL;
57                 goto out;
58         }
59
60         len = min_t(size_t, obj->buffer.length, 4);
61
62         *result = 0;
63         memcpy(result, obj->buffer.pointer, len);
64 out:
65         ACPI_FREE(obj);
66
67         return err;
68 }
69
70 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
71                      unsigned int fn, u32 *result)
72 {
73         if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
74                 return -EOPNOTSUPP;
75
76         return __intel_dsm(intel_host, dev, fn, result);
77 }
78
79 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
80 {
81         int err;
82
83         err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
84         dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
85 }
86
87 static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
88                                        enum ufs_notify_change_status status)
89 {
90         /* Cannot enable ICE until after HC enable */
91         if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
92                 u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
93
94                 hce |= CRYPTO_GENERAL_ENABLE;
95                 ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
96         }
97
98         return 0;
99 }
100
101 static int ufs_intel_disable_lcc(struct ufs_hba *hba)
102 {
103         u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
104         u32 lcc_enable = 0;
105
106         ufshcd_dme_get(hba, attr, &lcc_enable);
107         if (lcc_enable)
108                 ufshcd_disable_host_tx_lcc(hba);
109
110         return 0;
111 }
112
113 static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
114                                          enum ufs_notify_change_status status)
115 {
116         int err = 0;
117
118         switch (status) {
119         case PRE_CHANGE:
120                 err = ufs_intel_disable_lcc(hba);
121                 break;
122         case POST_CHANGE:
123                 break;
124         default:
125                 break;
126         }
127
128         return err;
129 }
130
131 #define INTEL_ACTIVELTR         0x804
132 #define INTEL_IDLELTR           0x808
133
134 #define INTEL_LTR_REQ           BIT(15)
135 #define INTEL_LTR_SCALE_MASK    GENMASK(11, 10)
136 #define INTEL_LTR_SCALE_1US     (2 << 10)
137 #define INTEL_LTR_SCALE_32US    (3 << 10)
138 #define INTEL_LTR_VALUE_MASK    GENMASK(9, 0)
139
140 static void intel_cache_ltr(struct ufs_hba *hba)
141 {
142         struct intel_host *host = ufshcd_get_variant(hba);
143
144         host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
145         host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
146 }
147
148 static void intel_ltr_set(struct device *dev, s32 val)
149 {
150         struct ufs_hba *hba = dev_get_drvdata(dev);
151         struct intel_host *host = ufshcd_get_variant(hba);
152         u32 ltr;
153
154         pm_runtime_get_sync(dev);
155
156         /*
157          * Program latency tolerance (LTR) accordingly what has been asked
158          * by the PM QoS layer or disable it in case we were passed
159          * negative value or PM_QOS_LATENCY_ANY.
160          */
161         ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
162
163         if (val == PM_QOS_LATENCY_ANY || val < 0) {
164                 ltr &= ~INTEL_LTR_REQ;
165         } else {
166                 ltr |= INTEL_LTR_REQ;
167                 ltr &= ~INTEL_LTR_SCALE_MASK;
168                 ltr &= ~INTEL_LTR_VALUE_MASK;
169
170                 if (val > INTEL_LTR_VALUE_MASK) {
171                         val >>= 5;
172                         if (val > INTEL_LTR_VALUE_MASK)
173                                 val = INTEL_LTR_VALUE_MASK;
174                         ltr |= INTEL_LTR_SCALE_32US | val;
175                 } else {
176                         ltr |= INTEL_LTR_SCALE_1US | val;
177                 }
178         }
179
180         if (ltr == host->active_ltr)
181                 goto out;
182
183         writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
184         writel(ltr, hba->mmio_base + INTEL_IDLELTR);
185
186         /* Cache the values into intel_host structure */
187         intel_cache_ltr(hba);
188 out:
189         pm_runtime_put(dev);
190 }
191
192 static void intel_ltr_expose(struct device *dev)
193 {
194         dev->power.set_latency_tolerance = intel_ltr_set;
195         dev_pm_qos_expose_latency_tolerance(dev);
196 }
197
198 static void intel_ltr_hide(struct device *dev)
199 {
200         dev_pm_qos_hide_latency_tolerance(dev);
201         dev->power.set_latency_tolerance = NULL;
202 }
203
204 static void intel_add_debugfs(struct ufs_hba *hba)
205 {
206         struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
207         struct intel_host *host = ufshcd_get_variant(hba);
208
209         intel_cache_ltr(hba);
210
211         host->debugfs_root = dir;
212         debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
213         debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
214 }
215
216 static void intel_remove_debugfs(struct ufs_hba *hba)
217 {
218         struct intel_host *host = ufshcd_get_variant(hba);
219
220         debugfs_remove_recursive(host->debugfs_root);
221 }
222
223 static int ufs_intel_device_reset(struct ufs_hba *hba)
224 {
225         struct intel_host *host = ufshcd_get_variant(hba);
226
227         if (host->dsm_fns & INTEL_DSM_RESET) {
228                 u32 result = 0;
229                 int err;
230
231                 err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
232                 if (!err && !result)
233                         err = -EIO;
234                 if (err)
235                         dev_err(hba->dev, "%s: DSM error %d result %u\n",
236                                 __func__, err, result);
237                 return err;
238         }
239
240         if (!host->reset_gpio)
241                 return -EOPNOTSUPP;
242
243         gpiod_set_value_cansleep(host->reset_gpio, 1);
244         usleep_range(10, 15);
245
246         gpiod_set_value_cansleep(host->reset_gpio, 0);
247         usleep_range(10, 15);
248
249         return 0;
250 }
251
252 static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
253 {
254         /* GPIO in _DSD has active low setting */
255         return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
256 }
257
258 static int ufs_intel_common_init(struct ufs_hba *hba)
259 {
260         struct intel_host *host;
261
262         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
263
264         host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
265         if (!host)
266                 return -ENOMEM;
267         ufshcd_set_variant(hba, host);
268         intel_dsm_init(host, hba->dev);
269         if (host->dsm_fns & INTEL_DSM_RESET) {
270                 if (hba->vops->device_reset)
271                         hba->caps |= UFSHCD_CAP_DEEPSLEEP;
272         } else {
273                 if (hba->vops->device_reset)
274                         host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
275                 if (IS_ERR(host->reset_gpio)) {
276                         dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
277                                 __func__, PTR_ERR(host->reset_gpio));
278                         host->reset_gpio = NULL;
279                 }
280                 if (host->reset_gpio) {
281                         gpiod_set_value_cansleep(host->reset_gpio, 0);
282                         hba->caps |= UFSHCD_CAP_DEEPSLEEP;
283                 }
284         }
285         intel_ltr_expose(hba->dev);
286         intel_add_debugfs(hba);
287         return 0;
288 }
289
290 static void ufs_intel_common_exit(struct ufs_hba *hba)
291 {
292         intel_remove_debugfs(hba);
293         intel_ltr_hide(hba->dev);
294 }
295
296 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
297 {
298         /*
299          * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
300          * address registers must be restored because the restore kernel can
301          * have used different addresses.
302          */
303         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
304                       REG_UTP_TRANSFER_REQ_LIST_BASE_L);
305         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
306                       REG_UTP_TRANSFER_REQ_LIST_BASE_H);
307         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
308                       REG_UTP_TASK_REQ_LIST_BASE_L);
309         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
310                       REG_UTP_TASK_REQ_LIST_BASE_H);
311
312         if (ufshcd_is_link_hibern8(hba)) {
313                 int ret = ufshcd_uic_hibern8_exit(hba);
314
315                 if (!ret) {
316                         ufshcd_set_link_active(hba);
317                 } else {
318                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
319                                 __func__, ret);
320                         /*
321                          * Force reset and restore. Any other actions can lead
322                          * to an unrecoverable state.
323                          */
324                         ufshcd_set_link_off(hba);
325                 }
326         }
327
328         return 0;
329 }
330
331 static int ufs_intel_ehl_init(struct ufs_hba *hba)
332 {
333         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
334         return ufs_intel_common_init(hba);
335 }
336
337 static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
338 {
339         /* LKF always needs a full reset, so set PM accordingly */
340         if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
341                 hba->spm_lvl = UFS_PM_LVL_6;
342                 hba->rpm_lvl = UFS_PM_LVL_6;
343         } else {
344                 hba->spm_lvl = UFS_PM_LVL_5;
345                 hba->rpm_lvl = UFS_PM_LVL_5;
346         }
347 }
348
349 static int ufs_intel_lkf_init(struct ufs_hba *hba)
350 {
351         struct ufs_host *ufs_host;
352         int err;
353
354         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
355         hba->caps |= UFSHCD_CAP_CRYPTO;
356         err = ufs_intel_common_init(hba);
357         ufs_host = ufshcd_get_variant(hba);
358         ufs_host->late_init = ufs_intel_lkf_late_init;
359         return err;
360 }
361
362 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
363         .name                   = "intel-pci",
364         .init                   = ufs_intel_common_init,
365         .exit                   = ufs_intel_common_exit,
366         .link_startup_notify    = ufs_intel_link_startup_notify,
367         .resume                 = ufs_intel_resume,
368 };
369
370 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
371         .name                   = "intel-pci",
372         .init                   = ufs_intel_ehl_init,
373         .exit                   = ufs_intel_common_exit,
374         .link_startup_notify    = ufs_intel_link_startup_notify,
375         .resume                 = ufs_intel_resume,
376 };
377
378 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
379         .name                   = "intel-pci",
380         .init                   = ufs_intel_lkf_init,
381         .exit                   = ufs_intel_common_exit,
382         .hce_enable_notify      = ufs_intel_hce_enable_notify,
383         .link_startup_notify    = ufs_intel_link_startup_notify,
384         .resume                 = ufs_intel_resume,
385         .device_reset           = ufs_intel_device_reset,
386 };
387
388 /**
389  * ufshcd_pci_shutdown - main function to put the controller in reset state
390  * @pdev: pointer to PCI device handle
391  */
392 static void ufshcd_pci_shutdown(struct pci_dev *pdev)
393 {
394         ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
395 }
396
397 /**
398  * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
399  *              data structure memory
400  * @pdev: pointer to PCI handle
401  */
402 static void ufshcd_pci_remove(struct pci_dev *pdev)
403 {
404         struct ufs_hba *hba = pci_get_drvdata(pdev);
405
406         pm_runtime_forbid(&pdev->dev);
407         pm_runtime_get_noresume(&pdev->dev);
408         ufshcd_remove(hba);
409         ufshcd_dealloc_host(hba);
410 }
411
412 /**
413  * ufshcd_pci_probe - probe routine of the driver
414  * @pdev: pointer to PCI device handle
415  * @id: PCI device id
416  *
417  * Returns 0 on success, non-zero value on failure
418  */
419 static int
420 ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
421 {
422         struct ufs_host *ufs_host;
423         struct ufs_hba *hba;
424         void __iomem *mmio_base;
425         int err;
426
427         err = pcim_enable_device(pdev);
428         if (err) {
429                 dev_err(&pdev->dev, "pcim_enable_device failed\n");
430                 return err;
431         }
432
433         pci_set_master(pdev);
434
435         err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
436         if (err < 0) {
437                 dev_err(&pdev->dev, "request and iomap failed\n");
438                 return err;
439         }
440
441         mmio_base = pcim_iomap_table(pdev)[0];
442
443         err = ufshcd_alloc_host(&pdev->dev, &hba);
444         if (err) {
445                 dev_err(&pdev->dev, "Allocation failed\n");
446                 return err;
447         }
448
449         pci_set_drvdata(pdev, hba);
450
451         hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
452
453         err = ufshcd_init(hba, mmio_base, pdev->irq);
454         if (err) {
455                 dev_err(&pdev->dev, "Initialization failed\n");
456                 ufshcd_dealloc_host(hba);
457                 return err;
458         }
459
460         ufs_host = ufshcd_get_variant(hba);
461         if (ufs_host && ufs_host->late_init)
462                 ufs_host->late_init(hba);
463
464         pm_runtime_put_noidle(&pdev->dev);
465         pm_runtime_allow(&pdev->dev);
466
467         return 0;
468 }
469
470 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
471         SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
472         SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
473 #ifdef CONFIG_PM_SLEEP
474         .prepare        = ufshcd_suspend_prepare,
475         .complete       = ufshcd_resume_complete,
476 #endif
477 };
478
479 static const struct pci_device_id ufshcd_pci_tbl[] = {
480         { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
481         { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
482         { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
483         { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
484         { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
485         { }     /* terminate list */
486 };
487
488 MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
489
490 static struct pci_driver ufshcd_pci_driver = {
491         .name = UFSHCD,
492         .id_table = ufshcd_pci_tbl,
493         .probe = ufshcd_pci_probe,
494         .remove = ufshcd_pci_remove,
495         .shutdown = ufshcd_pci_shutdown,
496         .driver = {
497                 .pm = &ufshcd_pci_pm_ops
498         },
499 };
500
501 module_pci_driver(ufshcd_pci_driver);
502
503 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
504 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
505 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
506 MODULE_LICENSE("GPL");
507 MODULE_VERSION(UFSHCD_DRIVER_VERSION);