scsi: ufs: core: Combine 32-bit command_desc_base_addr_lo/hi
[platform/kernel/linux-rpi.git] / drivers / ufs / core / ufshcd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller driver Core
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/sched/clock.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/scsi_eh.h>
29 #include "ufshcd-priv.h"
30 #include <ufs/ufs_quirks.h>
31 #include <ufs/unipro.h>
32 #include "ufs-sysfs.h"
33 #include "ufs-debugfs.h"
34 #include "ufs-fault-injection.h"
35 #include "ufs_bsg.h"
36 #include "ufshcd-crypto.h"
37 #include "ufshpb.h"
38 #include <asm/unaligned.h>
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/ufs.h>
42
43 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
44                                  UTP_TASK_REQ_COMPL |\
45                                  UFSHCD_ERROR_MASK)
46
47 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
48                                  UFSHCD_ERROR_MASK |\
49                                  MCQ_CQ_EVENT_STATUS)
50
51
52 /* UIC command timeout, unit: ms */
53 #define UIC_CMD_TIMEOUT 500
54
55 /* NOP OUT retries waiting for NOP IN response */
56 #define NOP_OUT_RETRIES    10
57 /* Timeout after 50 msecs if NOP OUT hangs without response */
58 #define NOP_OUT_TIMEOUT    50 /* msecs */
59
60 /* Query request retries */
61 #define QUERY_REQ_RETRIES 3
62 /* Query request timeout */
63 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
64
65 /* Advanced RPMB request timeout */
66 #define ADVANCED_RPMB_REQ_TIMEOUT  3000 /* 3 seconds */
67
68 /* Task management command timeout */
69 #define TM_CMD_TIMEOUT  100 /* msecs */
70
71 /* maximum number of retries for a general UIC command  */
72 #define UFS_UIC_COMMAND_RETRIES 3
73
74 /* maximum number of link-startup retries */
75 #define DME_LINKSTARTUP_RETRIES 3
76
77 /* maximum number of reset retries before giving up */
78 #define MAX_HOST_RESET_RETRIES 5
79
80 /* Maximum number of error handler retries before giving up */
81 #define MAX_ERR_HANDLER_RETRIES 5
82
83 /* Expose the flag value from utp_upiu_query.value */
84 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
85
86 /* Interrupt aggregation default timeout, unit: 40us */
87 #define INT_AGGR_DEF_TO 0x02
88
89 /* default delay of autosuspend: 2000 ms */
90 #define RPM_AUTOSUSPEND_DELAY_MS 2000
91
92 /* Default delay of RPM device flush delayed work */
93 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
94
95 /* Default value of wait time before gating device ref clock */
96 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
97
98 /* Polling time to wait for fDeviceInit */
99 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
100
101 /* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
102 static bool use_mcq_mode = true;
103
104 static bool is_mcq_supported(struct ufs_hba *hba)
105 {
106         return hba->mcq_sup && use_mcq_mode;
107 }
108
109 static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
110 {
111         int ret;
112
113         ret = param_set_bool(val, kp);
114         if (ret)
115                 return ret;
116
117         return 0;
118 }
119
120 static const struct kernel_param_ops mcq_mode_ops = {
121         .set = param_set_mcq_mode,
122         .get = param_get_bool,
123 };
124
125 module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
126 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
127
128 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
129         ({                                                              \
130                 int _ret;                                               \
131                 if (_on)                                                \
132                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
133                 else                                                    \
134                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
135                 _ret;                                                   \
136         })
137
138 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
139         size_t __len = (len);                                            \
140         print_hex_dump(KERN_ERR, prefix_str,                             \
141                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
142                        16, 4, buf, __len, false);                        \
143 } while (0)
144
145 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
146                      const char *prefix)
147 {
148         u32 *regs;
149         size_t pos;
150
151         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
152                 return -EINVAL;
153
154         regs = kzalloc(len, GFP_ATOMIC);
155         if (!regs)
156                 return -ENOMEM;
157
158         for (pos = 0; pos < len; pos += 4) {
159                 if (offset == 0 &&
160                     pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
161                     pos <= REG_UIC_ERROR_CODE_DME)
162                         continue;
163                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
164         }
165
166         ufshcd_hex_dump(prefix, regs, len);
167         kfree(regs);
168
169         return 0;
170 }
171 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
172
173 enum {
174         UFSHCD_MAX_CHANNEL      = 0,
175         UFSHCD_MAX_ID           = 1,
176         UFSHCD_NUM_RESERVED     = 1,
177         UFSHCD_CMD_PER_LUN      = 32 - UFSHCD_NUM_RESERVED,
178         UFSHCD_CAN_QUEUE        = 32 - UFSHCD_NUM_RESERVED,
179 };
180
181 static const char *const ufshcd_state_name[] = {
182         [UFSHCD_STATE_RESET]                    = "reset",
183         [UFSHCD_STATE_OPERATIONAL]              = "operational",
184         [UFSHCD_STATE_ERROR]                    = "error",
185         [UFSHCD_STATE_EH_SCHEDULED_FATAL]       = "eh_fatal",
186         [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL]   = "eh_non_fatal",
187 };
188
189 /* UFSHCD error handling flags */
190 enum {
191         UFSHCD_EH_IN_PROGRESS = (1 << 0),
192 };
193
194 /* UFSHCD UIC layer error flags */
195 enum {
196         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
197         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
198         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
199         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
200         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
201         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
202         UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
203 };
204
205 #define ufshcd_set_eh_in_progress(h) \
206         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
207 #define ufshcd_eh_in_progress(h) \
208         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
209 #define ufshcd_clear_eh_in_progress(h) \
210         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
211
212 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
213         [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
214         [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
215         [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
216         [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
217         [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
218         [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
219         /*
220          * For DeepSleep, the link is first put in hibern8 and then off.
221          * Leaving the link in hibern8 is not supported.
222          */
223         [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
224 };
225
226 static inline enum ufs_dev_pwr_mode
227 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
228 {
229         return ufs_pm_lvl_states[lvl].dev_state;
230 }
231
232 static inline enum uic_link_state
233 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
234 {
235         return ufs_pm_lvl_states[lvl].link_state;
236 }
237
238 static inline enum ufs_pm_level
239 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
240                                         enum uic_link_state link_state)
241 {
242         enum ufs_pm_level lvl;
243
244         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
245                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
246                         (ufs_pm_lvl_states[lvl].link_state == link_state))
247                         return lvl;
248         }
249
250         /* if no match found, return the level 0 */
251         return UFS_PM_LVL_0;
252 }
253
254 static const struct ufs_dev_quirk ufs_fixups[] = {
255         /* UFS cards deviations table */
256         { .wmanufacturerid = UFS_VENDOR_MICRON,
257           .model = UFS_ANY_MODEL,
258           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
259                    UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ },
260         { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
261           .model = UFS_ANY_MODEL,
262           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
263                    UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
264                    UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
265         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
266           .model = UFS_ANY_MODEL,
267           .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
268         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
269           .model = "hB8aL1" /*H28U62301AMR*/,
270           .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
271         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
272           .model = UFS_ANY_MODEL,
273           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
274         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
275           .model = "THGLF2G9C8KBADG",
276           .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
277         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
278           .model = "THGLF2G9D8KBADG",
279           .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
280         {}
281 };
282
283 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
284 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
285 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
286 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
287 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
288 static void ufshcd_hba_exit(struct ufs_hba *hba);
289 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
290 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
291 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
292 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
293 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
294 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
295 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
296 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
297 static irqreturn_t ufshcd_intr(int irq, void *__hba);
298 static int ufshcd_change_power_mode(struct ufs_hba *hba,
299                              struct ufs_pa_layer_attr *pwr_mode);
300 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
301 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
302 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
303                                          struct ufs_vreg *vreg);
304 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
305 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
306                                                  bool enable);
307 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
308 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
309
310 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
311 {
312         if (!hba->is_irq_enabled) {
313                 enable_irq(hba->irq);
314                 hba->is_irq_enabled = true;
315         }
316 }
317
318 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
319 {
320         if (hba->is_irq_enabled) {
321                 disable_irq(hba->irq);
322                 hba->is_irq_enabled = false;
323         }
324 }
325
326 static void ufshcd_configure_wb(struct ufs_hba *hba)
327 {
328         if (!ufshcd_is_wb_allowed(hba))
329                 return;
330
331         ufshcd_wb_toggle(hba, true);
332
333         ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
334
335         if (ufshcd_is_wb_buf_flush_allowed(hba))
336                 ufshcd_wb_toggle_buf_flush(hba, true);
337 }
338
339 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
340 {
341         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
342                 scsi_unblock_requests(hba->host);
343 }
344
345 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
346 {
347         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
348                 scsi_block_requests(hba->host);
349 }
350
351 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
352                                       enum ufs_trace_str_t str_t)
353 {
354         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
355         struct utp_upiu_header *header;
356
357         if (!trace_ufshcd_upiu_enabled())
358                 return;
359
360         if (str_t == UFS_CMD_SEND)
361                 header = &rq->header;
362         else
363                 header = &hba->lrb[tag].ucd_rsp_ptr->header;
364
365         trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
366                           UFS_TSF_CDB);
367 }
368
369 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
370                                         enum ufs_trace_str_t str_t,
371                                         struct utp_upiu_req *rq_rsp)
372 {
373         if (!trace_ufshcd_upiu_enabled())
374                 return;
375
376         trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
377                           &rq_rsp->qr, UFS_TSF_OSF);
378 }
379
380 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
381                                      enum ufs_trace_str_t str_t)
382 {
383         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
384
385         if (!trace_ufshcd_upiu_enabled())
386                 return;
387
388         if (str_t == UFS_TM_SEND)
389                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
390                                   &descp->upiu_req.req_header,
391                                   &descp->upiu_req.input_param1,
392                                   UFS_TSF_TM_INPUT);
393         else
394                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
395                                   &descp->upiu_rsp.rsp_header,
396                                   &descp->upiu_rsp.output_param1,
397                                   UFS_TSF_TM_OUTPUT);
398 }
399
400 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
401                                          const struct uic_command *ucmd,
402                                          enum ufs_trace_str_t str_t)
403 {
404         u32 cmd;
405
406         if (!trace_ufshcd_uic_command_enabled())
407                 return;
408
409         if (str_t == UFS_CMD_SEND)
410                 cmd = ucmd->command;
411         else
412                 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
413
414         trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
415                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
416                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
417                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
418 }
419
420 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
421                                      enum ufs_trace_str_t str_t)
422 {
423         u64 lba = 0;
424         u8 opcode = 0, group_id = 0;
425         u32 doorbell = 0;
426         u32 intr;
427         int hwq_id = -1;
428         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
429         struct scsi_cmnd *cmd = lrbp->cmd;
430         struct request *rq = scsi_cmd_to_rq(cmd);
431         int transfer_len = -1;
432
433         if (!cmd)
434                 return;
435
436         /* trace UPIU also */
437         ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
438         if (!trace_ufshcd_command_enabled())
439                 return;
440
441         opcode = cmd->cmnd[0];
442
443         if (opcode == READ_10 || opcode == WRITE_10) {
444                 /*
445                  * Currently we only fully trace read(10) and write(10) commands
446                  */
447                 transfer_len =
448                        be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
449                 lba = scsi_get_lba(cmd);
450                 if (opcode == WRITE_10)
451                         group_id = lrbp->cmd->cmnd[6];
452         } else if (opcode == UNMAP) {
453                 /*
454                  * The number of Bytes to be unmapped beginning with the lba.
455                  */
456                 transfer_len = blk_rq_bytes(rq);
457                 lba = scsi_get_lba(cmd);
458         }
459
460         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
461
462         if (is_mcq_enabled(hba)) {
463                 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
464
465                 hwq_id = hwq->id;
466         } else {
467                 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
468         }
469         trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
470                         doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id);
471 }
472
473 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
474 {
475         struct ufs_clk_info *clki;
476         struct list_head *head = &hba->clk_list_head;
477
478         if (list_empty(head))
479                 return;
480
481         list_for_each_entry(clki, head, list) {
482                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
483                                 clki->max_freq)
484                         dev_err(hba->dev, "clk: %s, rate: %u\n",
485                                         clki->name, clki->curr_freq);
486         }
487 }
488
489 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
490                              const char *err_name)
491 {
492         int i;
493         bool found = false;
494         const struct ufs_event_hist *e;
495
496         if (id >= UFS_EVT_CNT)
497                 return;
498
499         e = &hba->ufs_stats.event[id];
500
501         for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
502                 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
503
504                 if (e->tstamp[p] == 0)
505                         continue;
506                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
507                         e->val[p], div_u64(e->tstamp[p], 1000));
508                 found = true;
509         }
510
511         if (!found)
512                 dev_err(hba->dev, "No record of %s\n", err_name);
513         else
514                 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
515 }
516
517 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
518 {
519         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
520
521         ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
522         ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
523         ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
524         ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
525         ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
526         ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
527                          "auto_hibern8_err");
528         ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
529         ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
530                          "link_startup_fail");
531         ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
532         ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
533                          "suspend_fail");
534         ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
535         ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
536                          "wlun suspend_fail");
537         ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
538         ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
539         ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
540
541         ufshcd_vops_dbg_register_dump(hba);
542 }
543
544 static
545 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
546 {
547         const struct ufshcd_lrb *lrbp;
548         int prdt_length;
549
550         lrbp = &hba->lrb[tag];
551
552         dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
553                         tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
554         dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
555                         tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
556         dev_err(hba->dev,
557                 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
558                 tag, (u64)lrbp->utrd_dma_addr);
559
560         ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
561                         sizeof(struct utp_transfer_req_desc));
562         dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
563                 (u64)lrbp->ucd_req_dma_addr);
564         ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
565                         sizeof(struct utp_upiu_req));
566         dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
567                 (u64)lrbp->ucd_rsp_dma_addr);
568         ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
569                         sizeof(struct utp_upiu_rsp));
570
571         prdt_length = le16_to_cpu(
572                 lrbp->utr_descriptor_ptr->prd_table_length);
573         if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
574                 prdt_length /= ufshcd_sg_entry_size(hba);
575
576         dev_err(hba->dev,
577                 "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
578                 tag, prdt_length,
579                 (u64)lrbp->ucd_prdt_dma_addr);
580
581         if (pr_prdt)
582                 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
583                         ufshcd_sg_entry_size(hba) * prdt_length);
584 }
585
586 static bool ufshcd_print_tr_iter(struct request *req, void *priv)
587 {
588         struct scsi_device *sdev = req->q->queuedata;
589         struct Scsi_Host *shost = sdev->host;
590         struct ufs_hba *hba = shost_priv(shost);
591
592         ufshcd_print_tr(hba, req->tag, *(bool *)priv);
593
594         return true;
595 }
596
597 /**
598  * ufshcd_print_trs_all - print trs for all started requests.
599  * @hba: per-adapter instance.
600  * @pr_prdt: need to print prdt or not.
601  */
602 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
603 {
604         blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
605 }
606
607 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
608 {
609         int tag;
610
611         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
612                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
613
614                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
615                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
616         }
617 }
618
619 static void ufshcd_print_host_state(struct ufs_hba *hba)
620 {
621         const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
622
623         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
624         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
625                 hba->outstanding_reqs, hba->outstanding_tasks);
626         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
627                 hba->saved_err, hba->saved_uic_err);
628         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
629                 hba->curr_dev_pwr_mode, hba->uic_link_state);
630         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
631                 hba->pm_op_in_progress, hba->is_sys_suspended);
632         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
633                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
634         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
635         dev_err(hba->dev,
636                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
637                 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
638                 hba->ufs_stats.hibern8_exit_cnt);
639         dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
640                 div_u64(hba->ufs_stats.last_intr_ts, 1000),
641                 hba->ufs_stats.last_intr_status);
642         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
643                 hba->eh_flags, hba->req_abort_count);
644         dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
645                 hba->ufs_version, hba->capabilities, hba->caps);
646         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
647                 hba->dev_quirks);
648         if (sdev_ufs)
649                 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
650                         sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
651
652         ufshcd_print_clk_freqs(hba);
653 }
654
655 /**
656  * ufshcd_print_pwr_info - print power params as saved in hba
657  * power info
658  * @hba: per-adapter instance
659  */
660 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
661 {
662         static const char * const names[] = {
663                 "INVALID MODE",
664                 "FAST MODE",
665                 "SLOW_MODE",
666                 "INVALID MODE",
667                 "FASTAUTO_MODE",
668                 "SLOWAUTO_MODE",
669                 "INVALID MODE",
670         };
671
672         /*
673          * Using dev_dbg to avoid messages during runtime PM to avoid
674          * never-ending cycles of messages written back to storage by user space
675          * causing runtime resume, causing more messages and so on.
676          */
677         dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
678                  __func__,
679                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
680                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
681                  names[hba->pwr_info.pwr_rx],
682                  names[hba->pwr_info.pwr_tx],
683                  hba->pwr_info.hs_rate);
684 }
685
686 static void ufshcd_device_reset(struct ufs_hba *hba)
687 {
688         int err;
689
690         err = ufshcd_vops_device_reset(hba);
691
692         if (!err) {
693                 ufshcd_set_ufs_dev_active(hba);
694                 if (ufshcd_is_wb_allowed(hba)) {
695                         hba->dev_info.wb_enabled = false;
696                         hba->dev_info.wb_buf_flush_enabled = false;
697                 }
698         }
699         if (err != -EOPNOTSUPP)
700                 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
701 }
702
703 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
704 {
705         if (!us)
706                 return;
707
708         if (us < 10)
709                 udelay(us);
710         else
711                 usleep_range(us, us + tolerance);
712 }
713 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
714
715 /**
716  * ufshcd_wait_for_register - wait for register value to change
717  * @hba: per-adapter interface
718  * @reg: mmio register offset
719  * @mask: mask to apply to the read register value
720  * @val: value to wait for
721  * @interval_us: polling interval in microseconds
722  * @timeout_ms: timeout in milliseconds
723  *
724  * Return:
725  * -ETIMEDOUT on error, zero on success.
726  */
727 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
728                                 u32 val, unsigned long interval_us,
729                                 unsigned long timeout_ms)
730 {
731         int err = 0;
732         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
733
734         /* ignore bits that we don't intend to wait on */
735         val = val & mask;
736
737         while ((ufshcd_readl(hba, reg) & mask) != val) {
738                 usleep_range(interval_us, interval_us + 50);
739                 if (time_after(jiffies, timeout)) {
740                         if ((ufshcd_readl(hba, reg) & mask) != val)
741                                 err = -ETIMEDOUT;
742                         break;
743                 }
744         }
745
746         return err;
747 }
748
749 /**
750  * ufshcd_get_intr_mask - Get the interrupt bit mask
751  * @hba: Pointer to adapter instance
752  *
753  * Returns interrupt bit mask per version
754  */
755 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
756 {
757         if (hba->ufs_version == ufshci_version(1, 0))
758                 return INTERRUPT_MASK_ALL_VER_10;
759         if (hba->ufs_version <= ufshci_version(2, 0))
760                 return INTERRUPT_MASK_ALL_VER_11;
761
762         return INTERRUPT_MASK_ALL_VER_21;
763 }
764
765 /**
766  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
767  * @hba: Pointer to adapter instance
768  *
769  * Returns UFSHCI version supported by the controller
770  */
771 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
772 {
773         u32 ufshci_ver;
774
775         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
776                 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
777         else
778                 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
779
780         /*
781          * UFSHCI v1.x uses a different version scheme, in order
782          * to allow the use of comparisons with the ufshci_version
783          * function, we convert it to the same scheme as ufs 2.0+.
784          */
785         if (ufshci_ver & 0x00010000)
786                 return ufshci_version(1, ufshci_ver & 0x00000100);
787
788         return ufshci_ver;
789 }
790
791 /**
792  * ufshcd_is_device_present - Check if any device connected to
793  *                            the host controller
794  * @hba: pointer to adapter instance
795  *
796  * Returns true if device present, false if no device detected
797  */
798 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
799 {
800         return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
801 }
802
803 /**
804  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
805  * @lrbp: pointer to local command reference block
806  * @cqe: pointer to the completion queue entry
807  *
808  * This function is used to get the OCS field from UTRD
809  * Returns the OCS field in the UTRD
810  */
811 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
812                                       struct cq_entry *cqe)
813 {
814         if (cqe)
815                 return le32_to_cpu(cqe->status) & MASK_OCS;
816
817         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
818 }
819
820 /**
821  * ufshcd_utrl_clear() - Clear requests from the controller request list.
822  * @hba: per adapter instance
823  * @mask: mask with one bit set for each request to be cleared
824  */
825 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
826 {
827         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
828                 mask = ~mask;
829         /*
830          * From the UFSHCI specification: "UTP Transfer Request List CLear
831          * Register (UTRLCLR): This field is bit significant. Each bit
832          * corresponds to a slot in the UTP Transfer Request List, where bit 0
833          * corresponds to request slot 0. A bit in this field is set to ‘0’
834          * by host software to indicate to the host controller that a transfer
835          * request slot is cleared. The host controller
836          * shall free up any resources associated to the request slot
837          * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
838          * host software indicates no change to request slots by setting the
839          * associated bits in this field to ‘1’. Bits in this field shall only
840          * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
841          */
842         ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
843 }
844
845 /**
846  * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
847  * @hba: per adapter instance
848  * @pos: position of the bit to be cleared
849  */
850 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
851 {
852         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
853                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
854         else
855                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
856 }
857
858 /**
859  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
860  * @reg: Register value of host controller status
861  *
862  * Returns integer, 0 on Success and positive value if failed
863  */
864 static inline int ufshcd_get_lists_status(u32 reg)
865 {
866         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
867 }
868
869 /**
870  * ufshcd_get_uic_cmd_result - Get the UIC command result
871  * @hba: Pointer to adapter instance
872  *
873  * This function gets the result of UIC command completion
874  * Returns 0 on success, non zero value on error
875  */
876 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
877 {
878         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
879                MASK_UIC_COMMAND_RESULT;
880 }
881
882 /**
883  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
884  * @hba: Pointer to adapter instance
885  *
886  * This function gets UIC command argument3
887  * Returns 0 on success, non zero value on error
888  */
889 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
890 {
891         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
892 }
893
894 /**
895  * ufshcd_get_req_rsp - returns the TR response transaction type
896  * @ucd_rsp_ptr: pointer to response UPIU
897  */
898 static inline int
899 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
900 {
901         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
902 }
903
904 /**
905  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
906  * @ucd_rsp_ptr: pointer to response UPIU
907  *
908  * This function gets the response status and scsi_status from response UPIU
909  * Returns the response result code.
910  */
911 static inline int
912 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
913 {
914         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
915 }
916
917 /*
918  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
919  *                              from response UPIU
920  * @ucd_rsp_ptr: pointer to response UPIU
921  *
922  * Return the data segment length.
923  */
924 static inline unsigned int
925 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
926 {
927         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
928                 MASK_RSP_UPIU_DATA_SEG_LEN;
929 }
930
931 /**
932  * ufshcd_is_exception_event - Check if the device raised an exception event
933  * @ucd_rsp_ptr: pointer to response UPIU
934  *
935  * The function checks if the device raised an exception event indicated in
936  * the Device Information field of response UPIU.
937  *
938  * Returns true if exception is raised, false otherwise.
939  */
940 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
941 {
942         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
943                         MASK_RSP_EXCEPTION_EVENT;
944 }
945
946 /**
947  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
948  * @hba: per adapter instance
949  */
950 static inline void
951 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
952 {
953         ufshcd_writel(hba, INT_AGGR_ENABLE |
954                       INT_AGGR_COUNTER_AND_TIMER_RESET,
955                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
956 }
957
958 /**
959  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
960  * @hba: per adapter instance
961  * @cnt: Interrupt aggregation counter threshold
962  * @tmout: Interrupt aggregation timeout value
963  */
964 static inline void
965 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
966 {
967         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
968                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
969                       INT_AGGR_TIMEOUT_VAL(tmout),
970                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
971 }
972
973 /**
974  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
975  * @hba: per adapter instance
976  */
977 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
978 {
979         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
980 }
981
982 /**
983  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
984  *                      When run-stop registers are set to 1, it indicates the
985  *                      host controller that it can process the requests
986  * @hba: per adapter instance
987  */
988 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
989 {
990         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
991                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
992         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
993                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
994 }
995
996 /**
997  * ufshcd_hba_start - Start controller initialization sequence
998  * @hba: per adapter instance
999  */
1000 static inline void ufshcd_hba_start(struct ufs_hba *hba)
1001 {
1002         u32 val = CONTROLLER_ENABLE;
1003
1004         if (ufshcd_crypto_enable(hba))
1005                 val |= CRYPTO_GENERAL_ENABLE;
1006
1007         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
1008 }
1009
1010 /**
1011  * ufshcd_is_hba_active - Get controller state
1012  * @hba: per adapter instance
1013  *
1014  * Returns true if and only if the controller is active.
1015  */
1016 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
1017 {
1018         return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
1019 }
1020
1021 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
1022 {
1023         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
1024         if (hba->ufs_version <= ufshci_version(1, 1))
1025                 return UFS_UNIPRO_VER_1_41;
1026         else
1027                 return UFS_UNIPRO_VER_1_6;
1028 }
1029 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1030
1031 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1032 {
1033         /*
1034          * If both host and device support UniPro ver1.6 or later, PA layer
1035          * parameters tuning happens during link startup itself.
1036          *
1037          * We can manually tune PA layer parameters if either host or device
1038          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1039          * logic simple, we will only do manual tuning if local unipro version
1040          * doesn't support ver1.6 or later.
1041          */
1042         return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
1043 }
1044
1045 /**
1046  * ufshcd_set_clk_freq - set UFS controller clock frequencies
1047  * @hba: per adapter instance
1048  * @scale_up: If True, set max possible frequency othewise set low frequency
1049  *
1050  * Returns 0 if successful
1051  * Returns < 0 for any other errors
1052  */
1053 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1054 {
1055         int ret = 0;
1056         struct ufs_clk_info *clki;
1057         struct list_head *head = &hba->clk_list_head;
1058
1059         if (list_empty(head))
1060                 goto out;
1061
1062         list_for_each_entry(clki, head, list) {
1063                 if (!IS_ERR_OR_NULL(clki->clk)) {
1064                         if (scale_up && clki->max_freq) {
1065                                 if (clki->curr_freq == clki->max_freq)
1066                                         continue;
1067
1068                                 ret = clk_set_rate(clki->clk, clki->max_freq);
1069                                 if (ret) {
1070                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1071                                                 __func__, clki->name,
1072                                                 clki->max_freq, ret);
1073                                         break;
1074                                 }
1075                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1076                                                 "scaled up", clki->name,
1077                                                 clki->curr_freq,
1078                                                 clki->max_freq);
1079
1080                                 clki->curr_freq = clki->max_freq;
1081
1082                         } else if (!scale_up && clki->min_freq) {
1083                                 if (clki->curr_freq == clki->min_freq)
1084                                         continue;
1085
1086                                 ret = clk_set_rate(clki->clk, clki->min_freq);
1087                                 if (ret) {
1088                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1089                                                 __func__, clki->name,
1090                                                 clki->min_freq, ret);
1091                                         break;
1092                                 }
1093                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1094                                                 "scaled down", clki->name,
1095                                                 clki->curr_freq,
1096                                                 clki->min_freq);
1097                                 clki->curr_freq = clki->min_freq;
1098                         }
1099                 }
1100                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1101                                 clki->name, clk_get_rate(clki->clk));
1102         }
1103
1104 out:
1105         return ret;
1106 }
1107
1108 /**
1109  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1110  * @hba: per adapter instance
1111  * @scale_up: True if scaling up and false if scaling down
1112  *
1113  * Returns 0 if successful
1114  * Returns < 0 for any other errors
1115  */
1116 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1117 {
1118         int ret = 0;
1119         ktime_t start = ktime_get();
1120
1121         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1122         if (ret)
1123                 goto out;
1124
1125         ret = ufshcd_set_clk_freq(hba, scale_up);
1126         if (ret)
1127                 goto out;
1128
1129         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1130         if (ret)
1131                 ufshcd_set_clk_freq(hba, !scale_up);
1132
1133 out:
1134         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1135                         (scale_up ? "up" : "down"),
1136                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1137         return ret;
1138 }
1139
1140 /**
1141  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1142  * @hba: per adapter instance
1143  * @scale_up: True if scaling up and false if scaling down
1144  *
1145  * Returns true if scaling is required, false otherwise.
1146  */
1147 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1148                                                bool scale_up)
1149 {
1150         struct ufs_clk_info *clki;
1151         struct list_head *head = &hba->clk_list_head;
1152
1153         if (list_empty(head))
1154                 return false;
1155
1156         list_for_each_entry(clki, head, list) {
1157                 if (!IS_ERR_OR_NULL(clki->clk)) {
1158                         if (scale_up && clki->max_freq) {
1159                                 if (clki->curr_freq == clki->max_freq)
1160                                         continue;
1161                                 return true;
1162                         } else if (!scale_up && clki->min_freq) {
1163                                 if (clki->curr_freq == clki->min_freq)
1164                                         continue;
1165                                 return true;
1166                         }
1167                 }
1168         }
1169
1170         return false;
1171 }
1172
1173 /*
1174  * Determine the number of pending commands by counting the bits in the SCSI
1175  * device budget maps. This approach has been selected because a bit is set in
1176  * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1177  * flag. The host_self_blocked flag can be modified by calling
1178  * scsi_block_requests() or scsi_unblock_requests().
1179  */
1180 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1181 {
1182         const struct scsi_device *sdev;
1183         u32 pending = 0;
1184
1185         lockdep_assert_held(hba->host->host_lock);
1186         __shost_for_each_device(sdev, hba->host)
1187                 pending += sbitmap_weight(&sdev->budget_map);
1188
1189         return pending;
1190 }
1191
1192 /*
1193  * Wait until all pending SCSI commands and TMFs have finished or the timeout
1194  * has expired.
1195  *
1196  * Return: 0 upon success; -EBUSY upon timeout.
1197  */
1198 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1199                                         u64 wait_timeout_us)
1200 {
1201         unsigned long flags;
1202         int ret = 0;
1203         u32 tm_doorbell;
1204         u32 tr_pending;
1205         bool timeout = false, do_last_check = false;
1206         ktime_t start;
1207
1208         ufshcd_hold(hba, false);
1209         spin_lock_irqsave(hba->host->host_lock, flags);
1210         /*
1211          * Wait for all the outstanding tasks/transfer requests.
1212          * Verify by checking the doorbell registers are clear.
1213          */
1214         start = ktime_get();
1215         do {
1216                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1217                         ret = -EBUSY;
1218                         goto out;
1219                 }
1220
1221                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1222                 tr_pending = ufshcd_pending_cmds(hba);
1223                 if (!tm_doorbell && !tr_pending) {
1224                         timeout = false;
1225                         break;
1226                 } else if (do_last_check) {
1227                         break;
1228                 }
1229
1230                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1231                 io_schedule_timeout(msecs_to_jiffies(20));
1232                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1233                     wait_timeout_us) {
1234                         timeout = true;
1235                         /*
1236                          * We might have scheduled out for long time so make
1237                          * sure to check if doorbells are cleared by this time
1238                          * or not.
1239                          */
1240                         do_last_check = true;
1241                 }
1242                 spin_lock_irqsave(hba->host->host_lock, flags);
1243         } while (tm_doorbell || tr_pending);
1244
1245         if (timeout) {
1246                 dev_err(hba->dev,
1247                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1248                         __func__, tm_doorbell, tr_pending);
1249                 ret = -EBUSY;
1250         }
1251 out:
1252         spin_unlock_irqrestore(hba->host->host_lock, flags);
1253         ufshcd_release(hba);
1254         return ret;
1255 }
1256
1257 /**
1258  * ufshcd_scale_gear - scale up/down UFS gear
1259  * @hba: per adapter instance
1260  * @scale_up: True for scaling up gear and false for scaling down
1261  *
1262  * Returns 0 for success,
1263  * Returns -EBUSY if scaling can't happen at this time
1264  * Returns non-zero for any other errors
1265  */
1266 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1267 {
1268         int ret = 0;
1269         struct ufs_pa_layer_attr new_pwr_info;
1270
1271         if (scale_up) {
1272                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
1273                        sizeof(struct ufs_pa_layer_attr));
1274         } else {
1275                 memcpy(&new_pwr_info, &hba->pwr_info,
1276                        sizeof(struct ufs_pa_layer_attr));
1277
1278                 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1279                     hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1280                         /* save the current power mode */
1281                         memcpy(&hba->clk_scaling.saved_pwr_info,
1282                                 &hba->pwr_info,
1283                                 sizeof(struct ufs_pa_layer_attr));
1284
1285                         /* scale down gear */
1286                         new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1287                         new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1288                 }
1289         }
1290
1291         /* check if the power mode needs to be changed or not? */
1292         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1293         if (ret)
1294                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1295                         __func__, ret,
1296                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1297                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1298
1299         return ret;
1300 }
1301
1302 /*
1303  * Wait until all pending SCSI commands and TMFs have finished or the timeout
1304  * has expired.
1305  *
1306  * Return: 0 upon success; -EBUSY upon timeout.
1307  */
1308 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
1309 {
1310         int ret = 0;
1311         /*
1312          * make sure that there are no outstanding requests when
1313          * clock scaling is in progress
1314          */
1315         ufshcd_scsi_block_requests(hba);
1316         mutex_lock(&hba->wb_mutex);
1317         down_write(&hba->clk_scaling_lock);
1318
1319         if (!hba->clk_scaling.is_allowed ||
1320             ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
1321                 ret = -EBUSY;
1322                 up_write(&hba->clk_scaling_lock);
1323                 mutex_unlock(&hba->wb_mutex);
1324                 ufshcd_scsi_unblock_requests(hba);
1325                 goto out;
1326         }
1327
1328         /* let's not get into low power until clock scaling is completed */
1329         ufshcd_hold(hba, false);
1330
1331 out:
1332         return ret;
1333 }
1334
1335 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
1336 {
1337         up_write(&hba->clk_scaling_lock);
1338
1339         /* Enable Write Booster if we have scaled up else disable it */
1340         if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
1341                 ufshcd_wb_toggle(hba, scale_up);
1342
1343         mutex_unlock(&hba->wb_mutex);
1344
1345         ufshcd_scsi_unblock_requests(hba);
1346         ufshcd_release(hba);
1347 }
1348
1349 /**
1350  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1351  * @hba: per adapter instance
1352  * @scale_up: True for scaling up and false for scalin down
1353  *
1354  * Returns 0 for success,
1355  * Returns -EBUSY if scaling can't happen at this time
1356  * Returns non-zero for any other errors
1357  */
1358 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1359 {
1360         int ret = 0;
1361
1362         ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
1363         if (ret)
1364                 return ret;
1365
1366         /* scale down the gear before scaling down clocks */
1367         if (!scale_up) {
1368                 ret = ufshcd_scale_gear(hba, false);
1369                 if (ret)
1370                         goto out_unprepare;
1371         }
1372
1373         ret = ufshcd_scale_clks(hba, scale_up);
1374         if (ret) {
1375                 if (!scale_up)
1376                         ufshcd_scale_gear(hba, true);
1377                 goto out_unprepare;
1378         }
1379
1380         /* scale up the gear after scaling up clocks */
1381         if (scale_up) {
1382                 ret = ufshcd_scale_gear(hba, true);
1383                 if (ret) {
1384                         ufshcd_scale_clks(hba, false);
1385                         goto out_unprepare;
1386                 }
1387         }
1388
1389 out_unprepare:
1390         ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
1391         return ret;
1392 }
1393
1394 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1395 {
1396         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1397                                            clk_scaling.suspend_work);
1398         unsigned long irq_flags;
1399
1400         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1401         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1402                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1403                 return;
1404         }
1405         hba->clk_scaling.is_suspended = true;
1406         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1407
1408         __ufshcd_suspend_clkscaling(hba);
1409 }
1410
1411 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1412 {
1413         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1414                                            clk_scaling.resume_work);
1415         unsigned long irq_flags;
1416
1417         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1418         if (!hba->clk_scaling.is_suspended) {
1419                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1420                 return;
1421         }
1422         hba->clk_scaling.is_suspended = false;
1423         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1424
1425         devfreq_resume_device(hba->devfreq);
1426 }
1427
1428 static int ufshcd_devfreq_target(struct device *dev,
1429                                 unsigned long *freq, u32 flags)
1430 {
1431         int ret = 0;
1432         struct ufs_hba *hba = dev_get_drvdata(dev);
1433         ktime_t start;
1434         bool scale_up, sched_clk_scaling_suspend_work = false;
1435         struct list_head *clk_list = &hba->clk_list_head;
1436         struct ufs_clk_info *clki;
1437         unsigned long irq_flags;
1438
1439         if (!ufshcd_is_clkscaling_supported(hba))
1440                 return -EINVAL;
1441
1442         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1443         /* Override with the closest supported frequency */
1444         *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1445         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1446         if (ufshcd_eh_in_progress(hba)) {
1447                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1448                 return 0;
1449         }
1450
1451         if (!hba->clk_scaling.active_reqs)
1452                 sched_clk_scaling_suspend_work = true;
1453
1454         if (list_empty(clk_list)) {
1455                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1456                 goto out;
1457         }
1458
1459         /* Decide based on the rounded-off frequency and update */
1460         scale_up = *freq == clki->max_freq;
1461         if (!scale_up)
1462                 *freq = clki->min_freq;
1463         /* Update the frequency */
1464         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1465                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1466                 ret = 0;
1467                 goto out; /* no state change required */
1468         }
1469         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1470
1471         start = ktime_get();
1472         ret = ufshcd_devfreq_scale(hba, scale_up);
1473
1474         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1475                 (scale_up ? "up" : "down"),
1476                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1477
1478 out:
1479         if (sched_clk_scaling_suspend_work)
1480                 queue_work(hba->clk_scaling.workq,
1481                            &hba->clk_scaling.suspend_work);
1482
1483         return ret;
1484 }
1485
1486 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1487                 struct devfreq_dev_status *stat)
1488 {
1489         struct ufs_hba *hba = dev_get_drvdata(dev);
1490         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1491         unsigned long flags;
1492         struct list_head *clk_list = &hba->clk_list_head;
1493         struct ufs_clk_info *clki;
1494         ktime_t curr_t;
1495
1496         if (!ufshcd_is_clkscaling_supported(hba))
1497                 return -EINVAL;
1498
1499         memset(stat, 0, sizeof(*stat));
1500
1501         spin_lock_irqsave(hba->host->host_lock, flags);
1502         curr_t = ktime_get();
1503         if (!scaling->window_start_t)
1504                 goto start_window;
1505
1506         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1507         /*
1508          * If current frequency is 0, then the ondemand governor considers
1509          * there's no initial frequency set. And it always requests to set
1510          * to max. frequency.
1511          */
1512         stat->current_frequency = clki->curr_freq;
1513         if (scaling->is_busy_started)
1514                 scaling->tot_busy_t += ktime_us_delta(curr_t,
1515                                 scaling->busy_start_t);
1516
1517         stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1518         stat->busy_time = scaling->tot_busy_t;
1519 start_window:
1520         scaling->window_start_t = curr_t;
1521         scaling->tot_busy_t = 0;
1522
1523         if (scaling->active_reqs) {
1524                 scaling->busy_start_t = curr_t;
1525                 scaling->is_busy_started = true;
1526         } else {
1527                 scaling->busy_start_t = 0;
1528                 scaling->is_busy_started = false;
1529         }
1530         spin_unlock_irqrestore(hba->host->host_lock, flags);
1531         return 0;
1532 }
1533
1534 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1535 {
1536         struct list_head *clk_list = &hba->clk_list_head;
1537         struct ufs_clk_info *clki;
1538         struct devfreq *devfreq;
1539         int ret;
1540
1541         /* Skip devfreq if we don't have any clocks in the list */
1542         if (list_empty(clk_list))
1543                 return 0;
1544
1545         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1546         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1547         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1548
1549         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1550                                          &hba->vps->ondemand_data);
1551         devfreq = devfreq_add_device(hba->dev,
1552                         &hba->vps->devfreq_profile,
1553                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1554                         &hba->vps->ondemand_data);
1555         if (IS_ERR(devfreq)) {
1556                 ret = PTR_ERR(devfreq);
1557                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1558
1559                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1560                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1561                 return ret;
1562         }
1563
1564         hba->devfreq = devfreq;
1565
1566         return 0;
1567 }
1568
1569 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1570 {
1571         struct list_head *clk_list = &hba->clk_list_head;
1572         struct ufs_clk_info *clki;
1573
1574         if (!hba->devfreq)
1575                 return;
1576
1577         devfreq_remove_device(hba->devfreq);
1578         hba->devfreq = NULL;
1579
1580         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1581         dev_pm_opp_remove(hba->dev, clki->min_freq);
1582         dev_pm_opp_remove(hba->dev, clki->max_freq);
1583 }
1584
1585 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1586 {
1587         unsigned long flags;
1588
1589         devfreq_suspend_device(hba->devfreq);
1590         spin_lock_irqsave(hba->host->host_lock, flags);
1591         hba->clk_scaling.window_start_t = 0;
1592         spin_unlock_irqrestore(hba->host->host_lock, flags);
1593 }
1594
1595 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1596 {
1597         unsigned long flags;
1598         bool suspend = false;
1599
1600         cancel_work_sync(&hba->clk_scaling.suspend_work);
1601         cancel_work_sync(&hba->clk_scaling.resume_work);
1602
1603         spin_lock_irqsave(hba->host->host_lock, flags);
1604         if (!hba->clk_scaling.is_suspended) {
1605                 suspend = true;
1606                 hba->clk_scaling.is_suspended = true;
1607         }
1608         spin_unlock_irqrestore(hba->host->host_lock, flags);
1609
1610         if (suspend)
1611                 __ufshcd_suspend_clkscaling(hba);
1612 }
1613
1614 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1615 {
1616         unsigned long flags;
1617         bool resume = false;
1618
1619         spin_lock_irqsave(hba->host->host_lock, flags);
1620         if (hba->clk_scaling.is_suspended) {
1621                 resume = true;
1622                 hba->clk_scaling.is_suspended = false;
1623         }
1624         spin_unlock_irqrestore(hba->host->host_lock, flags);
1625
1626         if (resume)
1627                 devfreq_resume_device(hba->devfreq);
1628 }
1629
1630 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1631                 struct device_attribute *attr, char *buf)
1632 {
1633         struct ufs_hba *hba = dev_get_drvdata(dev);
1634
1635         return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1636 }
1637
1638 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1639                 struct device_attribute *attr, const char *buf, size_t count)
1640 {
1641         struct ufs_hba *hba = dev_get_drvdata(dev);
1642         u32 value;
1643         int err = 0;
1644
1645         if (kstrtou32(buf, 0, &value))
1646                 return -EINVAL;
1647
1648         down(&hba->host_sem);
1649         if (!ufshcd_is_user_access_allowed(hba)) {
1650                 err = -EBUSY;
1651                 goto out;
1652         }
1653
1654         value = !!value;
1655         if (value == hba->clk_scaling.is_enabled)
1656                 goto out;
1657
1658         ufshcd_rpm_get_sync(hba);
1659         ufshcd_hold(hba, false);
1660
1661         hba->clk_scaling.is_enabled = value;
1662
1663         if (value) {
1664                 ufshcd_resume_clkscaling(hba);
1665         } else {
1666                 ufshcd_suspend_clkscaling(hba);
1667                 err = ufshcd_devfreq_scale(hba, true);
1668                 if (err)
1669                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1670                                         __func__, err);
1671         }
1672
1673         ufshcd_release(hba);
1674         ufshcd_rpm_put_sync(hba);
1675 out:
1676         up(&hba->host_sem);
1677         return err ? err : count;
1678 }
1679
1680 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1681 {
1682         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1683         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1684         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1685         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1686         hba->clk_scaling.enable_attr.attr.mode = 0644;
1687         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1688                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1689 }
1690
1691 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1692 {
1693         if (hba->clk_scaling.enable_attr.attr.name)
1694                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1695 }
1696
1697 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1698 {
1699         char wq_name[sizeof("ufs_clkscaling_00")];
1700
1701         if (!ufshcd_is_clkscaling_supported(hba))
1702                 return;
1703
1704         if (!hba->clk_scaling.min_gear)
1705                 hba->clk_scaling.min_gear = UFS_HS_G1;
1706
1707         INIT_WORK(&hba->clk_scaling.suspend_work,
1708                   ufshcd_clk_scaling_suspend_work);
1709         INIT_WORK(&hba->clk_scaling.resume_work,
1710                   ufshcd_clk_scaling_resume_work);
1711
1712         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1713                  hba->host->host_no);
1714         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1715
1716         hba->clk_scaling.is_initialized = true;
1717 }
1718
1719 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1720 {
1721         if (!hba->clk_scaling.is_initialized)
1722                 return;
1723
1724         ufshcd_remove_clk_scaling_sysfs(hba);
1725         destroy_workqueue(hba->clk_scaling.workq);
1726         ufshcd_devfreq_remove(hba);
1727         hba->clk_scaling.is_initialized = false;
1728 }
1729
1730 static void ufshcd_ungate_work(struct work_struct *work)
1731 {
1732         int ret;
1733         unsigned long flags;
1734         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1735                         clk_gating.ungate_work);
1736
1737         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1738
1739         spin_lock_irqsave(hba->host->host_lock, flags);
1740         if (hba->clk_gating.state == CLKS_ON) {
1741                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1742                 goto unblock_reqs;
1743         }
1744
1745         spin_unlock_irqrestore(hba->host->host_lock, flags);
1746         ufshcd_hba_vreg_set_hpm(hba);
1747         ufshcd_setup_clocks(hba, true);
1748
1749         ufshcd_enable_irq(hba);
1750
1751         /* Exit from hibern8 */
1752         if (ufshcd_can_hibern8_during_gating(hba)) {
1753                 /* Prevent gating in this path */
1754                 hba->clk_gating.is_suspended = true;
1755                 if (ufshcd_is_link_hibern8(hba)) {
1756                         ret = ufshcd_uic_hibern8_exit(hba);
1757                         if (ret)
1758                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1759                                         __func__, ret);
1760                         else
1761                                 ufshcd_set_link_active(hba);
1762                 }
1763                 hba->clk_gating.is_suspended = false;
1764         }
1765 unblock_reqs:
1766         ufshcd_scsi_unblock_requests(hba);
1767 }
1768
1769 /**
1770  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1771  * Also, exit from hibern8 mode and set the link as active.
1772  * @hba: per adapter instance
1773  * @async: This indicates whether caller should ungate clocks asynchronously.
1774  */
1775 int ufshcd_hold(struct ufs_hba *hba, bool async)
1776 {
1777         int rc = 0;
1778         bool flush_result;
1779         unsigned long flags;
1780
1781         if (!ufshcd_is_clkgating_allowed(hba) ||
1782             !hba->clk_gating.is_initialized)
1783                 goto out;
1784         spin_lock_irqsave(hba->host->host_lock, flags);
1785         hba->clk_gating.active_reqs++;
1786
1787 start:
1788         switch (hba->clk_gating.state) {
1789         case CLKS_ON:
1790                 /*
1791                  * Wait for the ungate work to complete if in progress.
1792                  * Though the clocks may be in ON state, the link could
1793                  * still be in hibner8 state if hibern8 is allowed
1794                  * during clock gating.
1795                  * Make sure we exit hibern8 state also in addition to
1796                  * clocks being ON.
1797                  */
1798                 if (ufshcd_can_hibern8_during_gating(hba) &&
1799                     ufshcd_is_link_hibern8(hba)) {
1800                         if (async) {
1801                                 rc = -EAGAIN;
1802                                 hba->clk_gating.active_reqs--;
1803                                 break;
1804                         }
1805                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1806                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1807                         if (hba->clk_gating.is_suspended && !flush_result)
1808                                 goto out;
1809                         spin_lock_irqsave(hba->host->host_lock, flags);
1810                         goto start;
1811                 }
1812                 break;
1813         case REQ_CLKS_OFF:
1814                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1815                         hba->clk_gating.state = CLKS_ON;
1816                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1817                                                 hba->clk_gating.state);
1818                         break;
1819                 }
1820                 /*
1821                  * If we are here, it means gating work is either done or
1822                  * currently running. Hence, fall through to cancel gating
1823                  * work and to enable clocks.
1824                  */
1825                 fallthrough;
1826         case CLKS_OFF:
1827                 hba->clk_gating.state = REQ_CLKS_ON;
1828                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1829                                         hba->clk_gating.state);
1830                 if (queue_work(hba->clk_gating.clk_gating_workq,
1831                                &hba->clk_gating.ungate_work))
1832                         ufshcd_scsi_block_requests(hba);
1833                 /*
1834                  * fall through to check if we should wait for this
1835                  * work to be done or not.
1836                  */
1837                 fallthrough;
1838         case REQ_CLKS_ON:
1839                 if (async) {
1840                         rc = -EAGAIN;
1841                         hba->clk_gating.active_reqs--;
1842                         break;
1843                 }
1844
1845                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1846                 flush_work(&hba->clk_gating.ungate_work);
1847                 /* Make sure state is CLKS_ON before returning */
1848                 spin_lock_irqsave(hba->host->host_lock, flags);
1849                 goto start;
1850         default:
1851                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1852                                 __func__, hba->clk_gating.state);
1853                 break;
1854         }
1855         spin_unlock_irqrestore(hba->host->host_lock, flags);
1856 out:
1857         return rc;
1858 }
1859 EXPORT_SYMBOL_GPL(ufshcd_hold);
1860
1861 static void ufshcd_gate_work(struct work_struct *work)
1862 {
1863         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1864                         clk_gating.gate_work.work);
1865         unsigned long flags;
1866         int ret;
1867
1868         spin_lock_irqsave(hba->host->host_lock, flags);
1869         /*
1870          * In case you are here to cancel this work the gating state
1871          * would be marked as REQ_CLKS_ON. In this case save time by
1872          * skipping the gating work and exit after changing the clock
1873          * state to CLKS_ON.
1874          */
1875         if (hba->clk_gating.is_suspended ||
1876                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1877                 hba->clk_gating.state = CLKS_ON;
1878                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1879                                         hba->clk_gating.state);
1880                 goto rel_lock;
1881         }
1882
1883         if (hba->clk_gating.active_reqs
1884                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1885                 || hba->outstanding_reqs || hba->outstanding_tasks
1886                 || hba->active_uic_cmd || hba->uic_async_done)
1887                 goto rel_lock;
1888
1889         spin_unlock_irqrestore(hba->host->host_lock, flags);
1890
1891         /* put the link into hibern8 mode before turning off clocks */
1892         if (ufshcd_can_hibern8_during_gating(hba)) {
1893                 ret = ufshcd_uic_hibern8_enter(hba);
1894                 if (ret) {
1895                         hba->clk_gating.state = CLKS_ON;
1896                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1897                                         __func__, ret);
1898                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1899                                                 hba->clk_gating.state);
1900                         goto out;
1901                 }
1902                 ufshcd_set_link_hibern8(hba);
1903         }
1904
1905         ufshcd_disable_irq(hba);
1906
1907         ufshcd_setup_clocks(hba, false);
1908
1909         /* Put the host controller in low power mode if possible */
1910         ufshcd_hba_vreg_set_lpm(hba);
1911         /*
1912          * In case you are here to cancel this work the gating state
1913          * would be marked as REQ_CLKS_ON. In this case keep the state
1914          * as REQ_CLKS_ON which would anyway imply that clocks are off
1915          * and a request to turn them on is pending. By doing this way,
1916          * we keep the state machine in tact and this would ultimately
1917          * prevent from doing cancel work multiple times when there are
1918          * new requests arriving before the current cancel work is done.
1919          */
1920         spin_lock_irqsave(hba->host->host_lock, flags);
1921         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1922                 hba->clk_gating.state = CLKS_OFF;
1923                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1924                                         hba->clk_gating.state);
1925         }
1926 rel_lock:
1927         spin_unlock_irqrestore(hba->host->host_lock, flags);
1928 out:
1929         return;
1930 }
1931
1932 /* host lock must be held before calling this variant */
1933 static void __ufshcd_release(struct ufs_hba *hba)
1934 {
1935         if (!ufshcd_is_clkgating_allowed(hba))
1936                 return;
1937
1938         hba->clk_gating.active_reqs--;
1939
1940         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1941             hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1942             hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
1943             hba->active_uic_cmd || hba->uic_async_done ||
1944             hba->clk_gating.state == CLKS_OFF)
1945                 return;
1946
1947         hba->clk_gating.state = REQ_CLKS_OFF;
1948         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1949         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1950                            &hba->clk_gating.gate_work,
1951                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1952 }
1953
1954 void ufshcd_release(struct ufs_hba *hba)
1955 {
1956         unsigned long flags;
1957
1958         spin_lock_irqsave(hba->host->host_lock, flags);
1959         __ufshcd_release(hba);
1960         spin_unlock_irqrestore(hba->host->host_lock, flags);
1961 }
1962 EXPORT_SYMBOL_GPL(ufshcd_release);
1963
1964 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1965                 struct device_attribute *attr, char *buf)
1966 {
1967         struct ufs_hba *hba = dev_get_drvdata(dev);
1968
1969         return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1970 }
1971
1972 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
1973 {
1974         struct ufs_hba *hba = dev_get_drvdata(dev);
1975         unsigned long flags;
1976
1977         spin_lock_irqsave(hba->host->host_lock, flags);
1978         hba->clk_gating.delay_ms = value;
1979         spin_unlock_irqrestore(hba->host->host_lock, flags);
1980 }
1981 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
1982
1983 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1984                 struct device_attribute *attr, const char *buf, size_t count)
1985 {
1986         unsigned long value;
1987
1988         if (kstrtoul(buf, 0, &value))
1989                 return -EINVAL;
1990
1991         ufshcd_clkgate_delay_set(dev, value);
1992         return count;
1993 }
1994
1995 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1996                 struct device_attribute *attr, char *buf)
1997 {
1998         struct ufs_hba *hba = dev_get_drvdata(dev);
1999
2000         return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
2001 }
2002
2003 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
2004                 struct device_attribute *attr, const char *buf, size_t count)
2005 {
2006         struct ufs_hba *hba = dev_get_drvdata(dev);
2007         unsigned long flags;
2008         u32 value;
2009
2010         if (kstrtou32(buf, 0, &value))
2011                 return -EINVAL;
2012
2013         value = !!value;
2014
2015         spin_lock_irqsave(hba->host->host_lock, flags);
2016         if (value == hba->clk_gating.is_enabled)
2017                 goto out;
2018
2019         if (value)
2020                 __ufshcd_release(hba);
2021         else
2022                 hba->clk_gating.active_reqs++;
2023
2024         hba->clk_gating.is_enabled = value;
2025 out:
2026         spin_unlock_irqrestore(hba->host->host_lock, flags);
2027         return count;
2028 }
2029
2030 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
2031 {
2032         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
2033         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
2034         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
2035         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
2036         hba->clk_gating.delay_attr.attr.mode = 0644;
2037         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
2038                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
2039
2040         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
2041         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
2042         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
2043         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
2044         hba->clk_gating.enable_attr.attr.mode = 0644;
2045         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
2046                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
2047 }
2048
2049 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
2050 {
2051         if (hba->clk_gating.delay_attr.attr.name)
2052                 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
2053         if (hba->clk_gating.enable_attr.attr.name)
2054                 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
2055 }
2056
2057 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2058 {
2059         char wq_name[sizeof("ufs_clk_gating_00")];
2060
2061         if (!ufshcd_is_clkgating_allowed(hba))
2062                 return;
2063
2064         hba->clk_gating.state = CLKS_ON;
2065
2066         hba->clk_gating.delay_ms = 150;
2067         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2068         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2069
2070         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
2071                  hba->host->host_no);
2072         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
2073                                         WQ_MEM_RECLAIM | WQ_HIGHPRI);
2074
2075         ufshcd_init_clk_gating_sysfs(hba);
2076
2077         hba->clk_gating.is_enabled = true;
2078         hba->clk_gating.is_initialized = true;
2079 }
2080
2081 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2082 {
2083         if (!hba->clk_gating.is_initialized)
2084                 return;
2085
2086         ufshcd_remove_clk_gating_sysfs(hba);
2087
2088         /* Ungate the clock if necessary. */
2089         ufshcd_hold(hba, false);
2090         hba->clk_gating.is_initialized = false;
2091         ufshcd_release(hba);
2092
2093         destroy_workqueue(hba->clk_gating.clk_gating_workq);
2094 }
2095
2096 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2097 {
2098         bool queue_resume_work = false;
2099         ktime_t curr_t = ktime_get();
2100         unsigned long flags;
2101
2102         if (!ufshcd_is_clkscaling_supported(hba))
2103                 return;
2104
2105         spin_lock_irqsave(hba->host->host_lock, flags);
2106         if (!hba->clk_scaling.active_reqs++)
2107                 queue_resume_work = true;
2108
2109         if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2110                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2111                 return;
2112         }
2113
2114         if (queue_resume_work)
2115                 queue_work(hba->clk_scaling.workq,
2116                            &hba->clk_scaling.resume_work);
2117
2118         if (!hba->clk_scaling.window_start_t) {
2119                 hba->clk_scaling.window_start_t = curr_t;
2120                 hba->clk_scaling.tot_busy_t = 0;
2121                 hba->clk_scaling.is_busy_started = false;
2122         }
2123
2124         if (!hba->clk_scaling.is_busy_started) {
2125                 hba->clk_scaling.busy_start_t = curr_t;
2126                 hba->clk_scaling.is_busy_started = true;
2127         }
2128         spin_unlock_irqrestore(hba->host->host_lock, flags);
2129 }
2130
2131 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2132 {
2133         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2134         unsigned long flags;
2135
2136         if (!ufshcd_is_clkscaling_supported(hba))
2137                 return;
2138
2139         spin_lock_irqsave(hba->host->host_lock, flags);
2140         hba->clk_scaling.active_reqs--;
2141         if (!scaling->active_reqs && scaling->is_busy_started) {
2142                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2143                                         scaling->busy_start_t));
2144                 scaling->busy_start_t = 0;
2145                 scaling->is_busy_started = false;
2146         }
2147         spin_unlock_irqrestore(hba->host->host_lock, flags);
2148 }
2149
2150 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2151 {
2152         if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2153                 return READ;
2154         else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2155                 return WRITE;
2156         else
2157                 return -EINVAL;
2158 }
2159
2160 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2161                                                 struct ufshcd_lrb *lrbp)
2162 {
2163         const struct ufs_hba_monitor *m = &hba->monitor;
2164
2165         return (m->enabled && lrbp && lrbp->cmd &&
2166                 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2167                 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2168 }
2169
2170 static void ufshcd_start_monitor(struct ufs_hba *hba,
2171                                  const struct ufshcd_lrb *lrbp)
2172 {
2173         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2174         unsigned long flags;
2175
2176         spin_lock_irqsave(hba->host->host_lock, flags);
2177         if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2178                 hba->monitor.busy_start_ts[dir] = ktime_get();
2179         spin_unlock_irqrestore(hba->host->host_lock, flags);
2180 }
2181
2182 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2183 {
2184         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2185         unsigned long flags;
2186
2187         spin_lock_irqsave(hba->host->host_lock, flags);
2188         if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2189                 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2190                 struct ufs_hba_monitor *m = &hba->monitor;
2191                 ktime_t now, inc, lat;
2192
2193                 now = lrbp->compl_time_stamp;
2194                 inc = ktime_sub(now, m->busy_start_ts[dir]);
2195                 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2196                 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2197
2198                 /* Update latencies */
2199                 m->nr_req[dir]++;
2200                 lat = ktime_sub(now, lrbp->issue_time_stamp);
2201                 m->lat_sum[dir] += lat;
2202                 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2203                         m->lat_max[dir] = lat;
2204                 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2205                         m->lat_min[dir] = lat;
2206
2207                 m->nr_queued[dir]--;
2208                 /* Push forward the busy start of monitor */
2209                 m->busy_start_ts[dir] = now;
2210         }
2211         spin_unlock_irqrestore(hba->host->host_lock, flags);
2212 }
2213
2214 /**
2215  * ufshcd_send_command - Send SCSI or device management commands
2216  * @hba: per adapter instance
2217  * @task_tag: Task tag of the command
2218  * @hwq: pointer to hardware queue instance
2219  */
2220 static inline
2221 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2222                          struct ufs_hw_queue *hwq)
2223 {
2224         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2225         unsigned long flags;
2226
2227         lrbp->issue_time_stamp = ktime_get();
2228         lrbp->issue_time_stamp_local_clock = local_clock();
2229         lrbp->compl_time_stamp = ktime_set(0, 0);
2230         lrbp->compl_time_stamp_local_clock = 0;
2231         ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2232         ufshcd_clk_scaling_start_busy(hba);
2233         if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2234                 ufshcd_start_monitor(hba, lrbp);
2235
2236         if (is_mcq_enabled(hba)) {
2237                 int utrd_size = sizeof(struct utp_transfer_req_desc);
2238                 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
2239                 struct utp_transfer_req_desc *dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
2240
2241                 spin_lock(&hwq->sq_lock);
2242                 memcpy(dest, src, utrd_size);
2243                 ufshcd_inc_sq_tail(hwq);
2244                 spin_unlock(&hwq->sq_lock);
2245         } else {
2246                 spin_lock_irqsave(&hba->outstanding_lock, flags);
2247                 if (hba->vops && hba->vops->setup_xfer_req)
2248                         hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2249                                                   !!lrbp->cmd);
2250                 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2251                 ufshcd_writel(hba, 1 << lrbp->task_tag,
2252                               REG_UTP_TRANSFER_REQ_DOOR_BELL);
2253                 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2254         }
2255 }
2256
2257 /**
2258  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2259  * @lrbp: pointer to local reference block
2260  */
2261 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2262 {
2263         u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2264         int len;
2265
2266         if (sense_buffer &&
2267             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2268                 int len_to_copy;
2269
2270                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2271                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2272
2273                 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2274                        len_to_copy);
2275         }
2276 }
2277
2278 /**
2279  * ufshcd_copy_query_response() - Copy the Query Response and the data
2280  * descriptor
2281  * @hba: per adapter instance
2282  * @lrbp: pointer to local reference block
2283  */
2284 static
2285 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2286 {
2287         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2288
2289         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2290
2291         /* Get the descriptor */
2292         if (hba->dev_cmd.query.descriptor &&
2293             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2294                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2295                                 GENERAL_UPIU_REQUEST_SIZE;
2296                 u16 resp_len;
2297                 u16 buf_len;
2298
2299                 /* data segment length */
2300                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2301                                                 MASK_QUERY_DATA_SEG_LEN;
2302                 buf_len = be16_to_cpu(
2303                                 hba->dev_cmd.query.request.upiu_req.length);
2304                 if (likely(buf_len >= resp_len)) {
2305                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2306                 } else {
2307                         dev_warn(hba->dev,
2308                                  "%s: rsp size %d is bigger than buffer size %d",
2309                                  __func__, resp_len, buf_len);
2310                         return -EINVAL;
2311                 }
2312         }
2313
2314         return 0;
2315 }
2316
2317 /**
2318  * ufshcd_hba_capabilities - Read controller capabilities
2319  * @hba: per adapter instance
2320  *
2321  * Return: 0 on success, negative on error.
2322  */
2323 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2324 {
2325         int err;
2326
2327         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2328         if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
2329                 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
2330
2331         /* nutrs and nutmrs are 0 based values */
2332         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2333         hba->nutmrs =
2334         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2335         hba->reserved_slot = hba->nutrs - 1;
2336
2337         /* Read crypto capabilities */
2338         err = ufshcd_hba_init_crypto_capabilities(hba);
2339         if (err)
2340                 dev_err(hba->dev, "crypto setup failed\n");
2341
2342         hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2343         if (!hba->mcq_sup)
2344                 return err;
2345
2346         hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2347         hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
2348                                      hba->mcq_capabilities);
2349
2350         return err;
2351 }
2352
2353 /**
2354  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2355  *                            to accept UIC commands
2356  * @hba: per adapter instance
2357  * Return true on success, else false
2358  */
2359 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2360 {
2361         return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
2362 }
2363
2364 /**
2365  * ufshcd_get_upmcrs - Get the power mode change request status
2366  * @hba: Pointer to adapter instance
2367  *
2368  * This function gets the UPMCRS field of HCS register
2369  * Returns value of UPMCRS field
2370  */
2371 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2372 {
2373         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2374 }
2375
2376 /**
2377  * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2378  * @hba: per adapter instance
2379  * @uic_cmd: UIC command
2380  */
2381 static inline void
2382 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2383 {
2384         lockdep_assert_held(&hba->uic_cmd_mutex);
2385
2386         WARN_ON(hba->active_uic_cmd);
2387
2388         hba->active_uic_cmd = uic_cmd;
2389
2390         /* Write Args */
2391         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2392         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2393         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2394
2395         ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2396
2397         /* Write UIC Cmd */
2398         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2399                       REG_UIC_COMMAND);
2400 }
2401
2402 /**
2403  * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2404  * @hba: per adapter instance
2405  * @uic_cmd: UIC command
2406  *
2407  * Returns 0 only if success.
2408  */
2409 static int
2410 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2411 {
2412         int ret;
2413         unsigned long flags;
2414
2415         lockdep_assert_held(&hba->uic_cmd_mutex);
2416
2417         if (wait_for_completion_timeout(&uic_cmd->done,
2418                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2419                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2420         } else {
2421                 ret = -ETIMEDOUT;
2422                 dev_err(hba->dev,
2423                         "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2424                         uic_cmd->command, uic_cmd->argument3);
2425
2426                 if (!uic_cmd->cmd_active) {
2427                         dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2428                                 __func__);
2429                         ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2430                 }
2431         }
2432
2433         spin_lock_irqsave(hba->host->host_lock, flags);
2434         hba->active_uic_cmd = NULL;
2435         spin_unlock_irqrestore(hba->host->host_lock, flags);
2436
2437         return ret;
2438 }
2439
2440 /**
2441  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2442  * @hba: per adapter instance
2443  * @uic_cmd: UIC command
2444  * @completion: initialize the completion only if this is set to true
2445  *
2446  * Returns 0 only if success.
2447  */
2448 static int
2449 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2450                       bool completion)
2451 {
2452         lockdep_assert_held(&hba->uic_cmd_mutex);
2453         lockdep_assert_held(hba->host->host_lock);
2454
2455         if (!ufshcd_ready_for_uic_cmd(hba)) {
2456                 dev_err(hba->dev,
2457                         "Controller not ready to accept UIC commands\n");
2458                 return -EIO;
2459         }
2460
2461         if (completion)
2462                 init_completion(&uic_cmd->done);
2463
2464         uic_cmd->cmd_active = 1;
2465         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2466
2467         return 0;
2468 }
2469
2470 /**
2471  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2472  * @hba: per adapter instance
2473  * @uic_cmd: UIC command
2474  *
2475  * Returns 0 only if success.
2476  */
2477 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2478 {
2479         int ret;
2480         unsigned long flags;
2481
2482         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2483                 return 0;
2484
2485         ufshcd_hold(hba, false);
2486         mutex_lock(&hba->uic_cmd_mutex);
2487         ufshcd_add_delay_before_dme_cmd(hba);
2488
2489         spin_lock_irqsave(hba->host->host_lock, flags);
2490         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2491         spin_unlock_irqrestore(hba->host->host_lock, flags);
2492         if (!ret)
2493                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2494
2495         mutex_unlock(&hba->uic_cmd_mutex);
2496
2497         ufshcd_release(hba);
2498         return ret;
2499 }
2500
2501 /**
2502  * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2503  * @hba:        per-adapter instance
2504  * @lrbp:       pointer to local reference block
2505  * @sg_entries: The number of sg lists actually used
2506  * @sg_list:    Pointer to SG list
2507  */
2508 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2509                                struct scatterlist *sg_list)
2510 {
2511         struct ufshcd_sg_entry *prd;
2512         struct scatterlist *sg;
2513         int i;
2514
2515         if (sg_entries) {
2516
2517                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2518                         lrbp->utr_descriptor_ptr->prd_table_length =
2519                                 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
2520                 else
2521                         lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
2522
2523                 prd = lrbp->ucd_prdt_ptr;
2524
2525                 for_each_sg(sg_list, sg, sg_entries, i) {
2526                         const unsigned int len = sg_dma_len(sg);
2527
2528                         /*
2529                          * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2530                          * based value that indicates the length, in bytes, of
2531                          * the data block. A maximum of length of 256KB may
2532                          * exist for any entry. Bits 1:0 of this field shall be
2533                          * 11b to indicate Dword granularity. A value of '3'
2534                          * indicates 4 bytes, '7' indicates 8 bytes, etc."
2535                          */
2536                         WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
2537                         prd->size = cpu_to_le32(len - 1);
2538                         prd->addr = cpu_to_le64(sg->dma_address);
2539                         prd->reserved = 0;
2540                         prd = (void *)prd + ufshcd_sg_entry_size(hba);
2541                 }
2542         } else {
2543                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2544         }
2545 }
2546
2547 /**
2548  * ufshcd_map_sg - Map scatter-gather list to prdt
2549  * @hba: per adapter instance
2550  * @lrbp: pointer to local reference block
2551  *
2552  * Returns 0 in case of success, non-zero value in case of failure
2553  */
2554 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2555 {
2556         struct scsi_cmnd *cmd = lrbp->cmd;
2557         int sg_segments = scsi_dma_map(cmd);
2558
2559         if (sg_segments < 0)
2560                 return sg_segments;
2561
2562         ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
2563
2564         return 0;
2565 }
2566
2567 /**
2568  * ufshcd_enable_intr - enable interrupts
2569  * @hba: per adapter instance
2570  * @intrs: interrupt bits
2571  */
2572 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2573 {
2574         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2575
2576         if (hba->ufs_version == ufshci_version(1, 0)) {
2577                 u32 rw;
2578                 rw = set & INTERRUPT_MASK_RW_VER_10;
2579                 set = rw | ((set ^ intrs) & intrs);
2580         } else {
2581                 set |= intrs;
2582         }
2583
2584         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2585 }
2586
2587 /**
2588  * ufshcd_disable_intr - disable interrupts
2589  * @hba: per adapter instance
2590  * @intrs: interrupt bits
2591  */
2592 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2593 {
2594         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2595
2596         if (hba->ufs_version == ufshci_version(1, 0)) {
2597                 u32 rw;
2598                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2599                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2600                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2601
2602         } else {
2603                 set &= ~intrs;
2604         }
2605
2606         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2607 }
2608
2609 /**
2610  * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2611  * descriptor according to request
2612  * @lrbp: pointer to local reference block
2613  * @upiu_flags: flags required in the header
2614  * @cmd_dir: requests data direction
2615  * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2616  */
2617 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
2618                                         enum dma_data_direction cmd_dir, int ehs_length)
2619 {
2620         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2621         u32 data_direction;
2622         u32 dword_0;
2623         u32 dword_1 = 0;
2624         u32 dword_3 = 0;
2625
2626         if (cmd_dir == DMA_FROM_DEVICE) {
2627                 data_direction = UTP_DEVICE_TO_HOST;
2628                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2629         } else if (cmd_dir == DMA_TO_DEVICE) {
2630                 data_direction = UTP_HOST_TO_DEVICE;
2631                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2632         } else {
2633                 data_direction = UTP_NO_DATA_TRANSFER;
2634                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2635         }
2636
2637         dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
2638                 ehs_length << 8;
2639         if (lrbp->intr_cmd)
2640                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2641
2642         /* Prepare crypto related dwords */
2643         ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2644
2645         /* Transfer request descriptor header fields */
2646         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2647         req_desc->header.dword_1 = cpu_to_le32(dword_1);
2648         /*
2649          * assigning invalid value for command status. Controller
2650          * updates OCS on command completion, with the command
2651          * status
2652          */
2653         req_desc->header.dword_2 =
2654                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2655         req_desc->header.dword_3 = cpu_to_le32(dword_3);
2656
2657         req_desc->prd_table_length = 0;
2658 }
2659
2660 /**
2661  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2662  * for scsi commands
2663  * @lrbp: local reference block pointer
2664  * @upiu_flags: flags
2665  */
2666 static
2667 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2668 {
2669         struct scsi_cmnd *cmd = lrbp->cmd;
2670         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2671         unsigned short cdb_len;
2672
2673         /* command descriptor fields */
2674         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2675                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2676                                 lrbp->lun, lrbp->task_tag);
2677         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2678                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2679
2680         /* Total EHS length and Data segment length will be zero */
2681         ucd_req_ptr->header.dword_2 = 0;
2682
2683         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2684
2685         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2686         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2687         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2688
2689         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2690 }
2691
2692 /**
2693  * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2694  * @hba: UFS hba
2695  * @lrbp: local reference block pointer
2696  * @upiu_flags: flags
2697  */
2698 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2699                                 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2700 {
2701         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2702         struct ufs_query *query = &hba->dev_cmd.query;
2703         u16 len = be16_to_cpu(query->request.upiu_req.length);
2704
2705         /* Query request header */
2706         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2707                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2708                         lrbp->lun, lrbp->task_tag);
2709         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2710                         0, query->request.query_func, 0, 0);
2711
2712         /* Data segment length only need for WRITE_DESC */
2713         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2714                 ucd_req_ptr->header.dword_2 =
2715                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2716         else
2717                 ucd_req_ptr->header.dword_2 = 0;
2718
2719         /* Copy the Query Request buffer as is */
2720         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2721                         QUERY_OSF_SIZE);
2722
2723         /* Copy the Descriptor */
2724         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2725                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2726
2727         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2728 }
2729
2730 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2731 {
2732         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2733
2734         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2735
2736         /* command descriptor fields */
2737         ucd_req_ptr->header.dword_0 =
2738                 UPIU_HEADER_DWORD(
2739                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2740         /* clear rest of the fields of basic header */
2741         ucd_req_ptr->header.dword_1 = 0;
2742         ucd_req_ptr->header.dword_2 = 0;
2743
2744         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2745 }
2746
2747 /**
2748  * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2749  *                           for Device Management Purposes
2750  * @hba: per adapter instance
2751  * @lrbp: pointer to local reference block
2752  */
2753 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2754                                       struct ufshcd_lrb *lrbp)
2755 {
2756         u8 upiu_flags;
2757         int ret = 0;
2758
2759         if (hba->ufs_version <= ufshci_version(1, 1))
2760                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2761         else
2762                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2763
2764         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
2765         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2766                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2767         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2768                 ufshcd_prepare_utp_nop_upiu(lrbp);
2769         else
2770                 ret = -EINVAL;
2771
2772         return ret;
2773 }
2774
2775 /**
2776  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2777  *                         for SCSI Purposes
2778  * @hba: per adapter instance
2779  * @lrbp: pointer to local reference block
2780  */
2781 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2782 {
2783         u8 upiu_flags;
2784         int ret = 0;
2785
2786         if (hba->ufs_version <= ufshci_version(1, 1))
2787                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2788         else
2789                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2790
2791         if (likely(lrbp->cmd)) {
2792                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
2793                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2794         } else {
2795                 ret = -EINVAL;
2796         }
2797
2798         return ret;
2799 }
2800
2801 /**
2802  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2803  * @upiu_wlun_id: UPIU W-LUN id
2804  *
2805  * Returns SCSI W-LUN id
2806  */
2807 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2808 {
2809         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2810 }
2811
2812 static inline bool is_device_wlun(struct scsi_device *sdev)
2813 {
2814         return sdev->lun ==
2815                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2816 }
2817
2818 /*
2819  * Associate the UFS controller queue with the default and poll HCTX types.
2820  * Initialize the mq_map[] arrays.
2821  */
2822 static void ufshcd_map_queues(struct Scsi_Host *shost)
2823 {
2824         struct ufs_hba *hba = shost_priv(shost);
2825         int i, queue_offset = 0;
2826
2827         if (!is_mcq_supported(hba)) {
2828                 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2829                 hba->nr_queues[HCTX_TYPE_READ] = 0;
2830                 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2831                 hba->nr_hw_queues = 1;
2832         }
2833
2834         for (i = 0; i < shost->nr_maps; i++) {
2835                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2836
2837                 map->nr_queues = hba->nr_queues[i];
2838                 if (!map->nr_queues)
2839                         continue;
2840                 map->queue_offset = queue_offset;
2841                 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2842                         map->queue_offset = 0;
2843
2844                 blk_mq_map_queues(map);
2845                 queue_offset += map->nr_queues;
2846         }
2847 }
2848
2849 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2850 {
2851         struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2852                 i * sizeof_utp_transfer_cmd_desc(hba);
2853         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2854         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2855                 i * sizeof_utp_transfer_cmd_desc(hba);
2856         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2857                                        response_upiu);
2858         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2859
2860         lrb->utr_descriptor_ptr = utrdlp + i;
2861         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2862                 i * sizeof(struct utp_transfer_req_desc);
2863         lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
2864         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2865         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2866         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2867         lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2868         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2869 }
2870
2871 /**
2872  * ufshcd_queuecommand - main entry point for SCSI requests
2873  * @host: SCSI host pointer
2874  * @cmd: command from SCSI Midlayer
2875  *
2876  * Returns 0 for success, non-zero in case of failure
2877  */
2878 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2879 {
2880         struct ufs_hba *hba = shost_priv(host);
2881         int tag = scsi_cmd_to_rq(cmd)->tag;
2882         struct ufshcd_lrb *lrbp;
2883         int err = 0;
2884         struct ufs_hw_queue *hwq = NULL;
2885
2886         WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
2887
2888         /*
2889          * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
2890          * calls.
2891          */
2892         rcu_read_lock();
2893
2894         switch (hba->ufshcd_state) {
2895         case UFSHCD_STATE_OPERATIONAL:
2896                 break;
2897         case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2898                 /*
2899                  * SCSI error handler can call ->queuecommand() while UFS error
2900                  * handler is in progress. Error interrupts could change the
2901                  * state from UFSHCD_STATE_RESET to
2902                  * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2903                  * being issued in that case.
2904                  */
2905                 if (ufshcd_eh_in_progress(hba)) {
2906                         err = SCSI_MLQUEUE_HOST_BUSY;
2907                         goto out;
2908                 }
2909                 break;
2910         case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2911                 /*
2912                  * pm_runtime_get_sync() is used at error handling preparation
2913                  * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2914                  * PM ops, it can never be finished if we let SCSI layer keep
2915                  * retrying it, which gets err handler stuck forever. Neither
2916                  * can we let the scsi cmd pass through, because UFS is in bad
2917                  * state, the scsi cmd may eventually time out, which will get
2918                  * err handler blocked for too long. So, just fail the scsi cmd
2919                  * sent from PM ops, err handler can recover PM error anyways.
2920                  */
2921                 if (hba->pm_op_in_progress) {
2922                         hba->force_reset = true;
2923                         set_host_byte(cmd, DID_BAD_TARGET);
2924                         scsi_done(cmd);
2925                         goto out;
2926                 }
2927                 fallthrough;
2928         case UFSHCD_STATE_RESET:
2929                 err = SCSI_MLQUEUE_HOST_BUSY;
2930                 goto out;
2931         case UFSHCD_STATE_ERROR:
2932                 set_host_byte(cmd, DID_ERROR);
2933                 scsi_done(cmd);
2934                 goto out;
2935         }
2936
2937         hba->req_abort_count = 0;
2938
2939         err = ufshcd_hold(hba, true);
2940         if (err) {
2941                 err = SCSI_MLQUEUE_HOST_BUSY;
2942                 goto out;
2943         }
2944         WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2945                 (hba->clk_gating.state != CLKS_ON));
2946
2947         lrbp = &hba->lrb[tag];
2948         WARN_ON(lrbp->cmd);
2949         lrbp->cmd = cmd;
2950         lrbp->task_tag = tag;
2951         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2952         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
2953
2954         ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
2955
2956         lrbp->req_abort_skip = false;
2957
2958         ufshpb_prep(hba, lrbp);
2959
2960         ufshcd_comp_scsi_upiu(hba, lrbp);
2961
2962         err = ufshcd_map_sg(hba, lrbp);
2963         if (err) {
2964                 lrbp->cmd = NULL;
2965                 ufshcd_release(hba);
2966                 goto out;
2967         }
2968
2969         if (is_mcq_enabled(hba))
2970                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
2971
2972         ufshcd_send_command(hba, tag, hwq);
2973
2974 out:
2975         rcu_read_unlock();
2976
2977         if (ufs_trigger_eh()) {
2978                 unsigned long flags;
2979
2980                 spin_lock_irqsave(hba->host->host_lock, flags);
2981                 ufshcd_schedule_eh_work(hba);
2982                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2983         }
2984
2985         return err;
2986 }
2987
2988 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2989                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2990 {
2991         lrbp->cmd = NULL;
2992         lrbp->task_tag = tag;
2993         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2994         lrbp->intr_cmd = true; /* No interrupt aggregation */
2995         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2996         hba->dev_cmd.type = cmd_type;
2997
2998         return ufshcd_compose_devman_upiu(hba, lrbp);
2999 }
3000
3001 /*
3002  * Clear all the requests from the controller for which a bit has been set in
3003  * @mask and wait until the controller confirms that these requests have been
3004  * cleared.
3005  */
3006 static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
3007 {
3008         unsigned long flags;
3009
3010         /* clear outstanding transaction before retry */
3011         spin_lock_irqsave(hba->host->host_lock, flags);
3012         ufshcd_utrl_clear(hba, mask);
3013         spin_unlock_irqrestore(hba->host->host_lock, flags);
3014
3015         /*
3016          * wait for h/w to clear corresponding bit in door-bell.
3017          * max. wait is 1 sec.
3018          */
3019         return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
3020                                         mask, ~mask, 1000, 1000);
3021 }
3022
3023 static int
3024 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3025 {
3026         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
3027
3028         /* Get the UPIU response */
3029         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
3030                                 UPIU_RSP_CODE_OFFSET;
3031         return query_res->response;
3032 }
3033
3034 /**
3035  * ufshcd_dev_cmd_completion() - handles device management command responses
3036  * @hba: per adapter instance
3037  * @lrbp: pointer to local reference block
3038  */
3039 static int
3040 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3041 {
3042         int resp;
3043         int err = 0;
3044
3045         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3046         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3047
3048         switch (resp) {
3049         case UPIU_TRANSACTION_NOP_IN:
3050                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3051                         err = -EINVAL;
3052                         dev_err(hba->dev, "%s: unexpected response %x\n",
3053                                         __func__, resp);
3054                 }
3055                 break;
3056         case UPIU_TRANSACTION_QUERY_RSP:
3057                 err = ufshcd_check_query_response(hba, lrbp);
3058                 if (!err)
3059                         err = ufshcd_copy_query_response(hba, lrbp);
3060                 break;
3061         case UPIU_TRANSACTION_REJECT_UPIU:
3062                 /* TODO: handle Reject UPIU Response */
3063                 err = -EPERM;
3064                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3065                                 __func__);
3066                 break;
3067         case UPIU_TRANSACTION_RESPONSE:
3068                 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3069                         err = -EINVAL;
3070                         dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3071                 }
3072                 break;
3073         default:
3074                 err = -EINVAL;
3075                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3076                                 __func__, resp);
3077                 break;
3078         }
3079
3080         return err;
3081 }
3082
3083 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3084                 struct ufshcd_lrb *lrbp, int max_timeout)
3085 {
3086         unsigned long time_left = msecs_to_jiffies(max_timeout);
3087         unsigned long flags;
3088         bool pending;
3089         int err;
3090
3091 retry:
3092         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3093                                                 time_left);
3094
3095         if (likely(time_left)) {
3096                 /*
3097                  * The completion handler called complete() and the caller of
3098                  * this function still owns the @lrbp tag so the code below does
3099                  * not trigger any race conditions.
3100                  */
3101                 hba->dev_cmd.complete = NULL;
3102                 err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
3103                 if (!err)
3104                         err = ufshcd_dev_cmd_completion(hba, lrbp);
3105         } else {
3106                 err = -ETIMEDOUT;
3107                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3108                         __func__, lrbp->task_tag);
3109                 if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
3110                         /* successfully cleared the command, retry if needed */
3111                         err = -EAGAIN;
3112                         /*
3113                          * Since clearing the command succeeded we also need to
3114                          * clear the task tag bit from the outstanding_reqs
3115                          * variable.
3116                          */
3117                         spin_lock_irqsave(&hba->outstanding_lock, flags);
3118                         pending = test_bit(lrbp->task_tag,
3119                                            &hba->outstanding_reqs);
3120                         if (pending) {
3121                                 hba->dev_cmd.complete = NULL;
3122                                 __clear_bit(lrbp->task_tag,
3123                                             &hba->outstanding_reqs);
3124                         }
3125                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3126
3127                         if (!pending) {
3128                                 /*
3129                                  * The completion handler ran while we tried to
3130                                  * clear the command.
3131                                  */
3132                                 time_left = 1;
3133                                 goto retry;
3134                         }
3135                 } else {
3136                         dev_err(hba->dev, "%s: failed to clear tag %d\n",
3137                                 __func__, lrbp->task_tag);
3138
3139                         spin_lock_irqsave(&hba->outstanding_lock, flags);
3140                         pending = test_bit(lrbp->task_tag,
3141                                            &hba->outstanding_reqs);
3142                         if (pending)
3143                                 hba->dev_cmd.complete = NULL;
3144                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3145
3146                         if (!pending) {
3147                                 /*
3148                                  * The completion handler ran while we tried to
3149                                  * clear the command.
3150                                  */
3151                                 time_left = 1;
3152                                 goto retry;
3153                         }
3154                 }
3155         }
3156
3157         return err;
3158 }
3159
3160 /**
3161  * ufshcd_exec_dev_cmd - API for sending device management requests
3162  * @hba: UFS hba
3163  * @cmd_type: specifies the type (NOP, Query...)
3164  * @timeout: timeout in milliseconds
3165  *
3166  * NOTE: Since there is only one available tag for device management commands,
3167  * it is expected you hold the hba->dev_cmd.lock mutex.
3168  */
3169 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3170                 enum dev_cmd_type cmd_type, int timeout)
3171 {
3172         DECLARE_COMPLETION_ONSTACK(wait);
3173         const u32 tag = hba->reserved_slot;
3174         struct ufshcd_lrb *lrbp;
3175         int err;
3176
3177         /* Protects use of hba->reserved_slot. */
3178         lockdep_assert_held(&hba->dev_cmd.lock);
3179
3180         down_read(&hba->clk_scaling_lock);
3181
3182         lrbp = &hba->lrb[tag];
3183         WARN_ON(lrbp->cmd);
3184         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3185         if (unlikely(err))
3186                 goto out;
3187
3188         hba->dev_cmd.complete = &wait;
3189         hba->dev_cmd.cqe = NULL;
3190
3191         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3192
3193         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
3194         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3195         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3196                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3197
3198 out:
3199         up_read(&hba->clk_scaling_lock);
3200         return err;
3201 }
3202
3203 /**
3204  * ufshcd_init_query() - init the query response and request parameters
3205  * @hba: per-adapter instance
3206  * @request: address of the request pointer to be initialized
3207  * @response: address of the response pointer to be initialized
3208  * @opcode: operation to perform
3209  * @idn: flag idn to access
3210  * @index: LU number to access
3211  * @selector: query/flag/descriptor further identification
3212  */
3213 static inline void ufshcd_init_query(struct ufs_hba *hba,
3214                 struct ufs_query_req **request, struct ufs_query_res **response,
3215                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3216 {
3217         *request = &hba->dev_cmd.query.request;
3218         *response = &hba->dev_cmd.query.response;
3219         memset(*request, 0, sizeof(struct ufs_query_req));
3220         memset(*response, 0, sizeof(struct ufs_query_res));
3221         (*request)->upiu_req.opcode = opcode;
3222         (*request)->upiu_req.idn = idn;
3223         (*request)->upiu_req.index = index;
3224         (*request)->upiu_req.selector = selector;
3225 }
3226
3227 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3228         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3229 {
3230         int ret;
3231         int retries;
3232
3233         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3234                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3235                 if (ret)
3236                         dev_dbg(hba->dev,
3237                                 "%s: failed with error %d, retries %d\n",
3238                                 __func__, ret, retries);
3239                 else
3240                         break;
3241         }
3242
3243         if (ret)
3244                 dev_err(hba->dev,
3245                         "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3246                         __func__, opcode, idn, ret, retries);
3247         return ret;
3248 }
3249
3250 /**
3251  * ufshcd_query_flag() - API function for sending flag query requests
3252  * @hba: per-adapter instance
3253  * @opcode: flag query to perform
3254  * @idn: flag idn to access
3255  * @index: flag index to access
3256  * @flag_res: the flag value after the query request completes
3257  *
3258  * Returns 0 for success, non-zero in case of failure
3259  */
3260 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3261                         enum flag_idn idn, u8 index, bool *flag_res)
3262 {
3263         struct ufs_query_req *request = NULL;
3264         struct ufs_query_res *response = NULL;
3265         int err, selector = 0;
3266         int timeout = QUERY_REQ_TIMEOUT;
3267
3268         BUG_ON(!hba);
3269
3270         ufshcd_hold(hba, false);
3271         mutex_lock(&hba->dev_cmd.lock);
3272         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3273                         selector);
3274
3275         switch (opcode) {
3276         case UPIU_QUERY_OPCODE_SET_FLAG:
3277         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3278         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3279                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3280                 break;
3281         case UPIU_QUERY_OPCODE_READ_FLAG:
3282                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3283                 if (!flag_res) {
3284                         /* No dummy reads */
3285                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
3286                                         __func__);
3287                         err = -EINVAL;
3288                         goto out_unlock;
3289                 }
3290                 break;
3291         default:
3292                 dev_err(hba->dev,
3293                         "%s: Expected query flag opcode but got = %d\n",
3294                         __func__, opcode);
3295                 err = -EINVAL;
3296                 goto out_unlock;
3297         }
3298
3299         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3300
3301         if (err) {
3302                 dev_err(hba->dev,
3303                         "%s: Sending flag query for idn %d failed, err = %d\n",
3304                         __func__, idn, err);
3305                 goto out_unlock;
3306         }
3307
3308         if (flag_res)
3309                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3310                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3311
3312 out_unlock:
3313         mutex_unlock(&hba->dev_cmd.lock);
3314         ufshcd_release(hba);
3315         return err;
3316 }
3317
3318 /**
3319  * ufshcd_query_attr - API function for sending attribute requests
3320  * @hba: per-adapter instance
3321  * @opcode: attribute opcode
3322  * @idn: attribute idn to access
3323  * @index: index field
3324  * @selector: selector field
3325  * @attr_val: the attribute value after the query request completes
3326  *
3327  * Returns 0 for success, non-zero in case of failure
3328 */
3329 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3330                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3331 {
3332         struct ufs_query_req *request = NULL;
3333         struct ufs_query_res *response = NULL;
3334         int err;
3335
3336         BUG_ON(!hba);
3337
3338         if (!attr_val) {
3339                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3340                                 __func__, opcode);
3341                 return -EINVAL;
3342         }
3343
3344         ufshcd_hold(hba, false);
3345
3346         mutex_lock(&hba->dev_cmd.lock);
3347         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3348                         selector);
3349
3350         switch (opcode) {
3351         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3352                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3353                 request->upiu_req.value = cpu_to_be32(*attr_val);
3354                 break;
3355         case UPIU_QUERY_OPCODE_READ_ATTR:
3356                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3357                 break;
3358         default:
3359                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3360                                 __func__, opcode);
3361                 err = -EINVAL;
3362                 goto out_unlock;
3363         }
3364
3365         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3366
3367         if (err) {
3368                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3369                                 __func__, opcode, idn, index, err);
3370                 goto out_unlock;
3371         }
3372
3373         *attr_val = be32_to_cpu(response->upiu_res.value);
3374
3375 out_unlock:
3376         mutex_unlock(&hba->dev_cmd.lock);
3377         ufshcd_release(hba);
3378         return err;
3379 }
3380
3381 /**
3382  * ufshcd_query_attr_retry() - API function for sending query
3383  * attribute with retries
3384  * @hba: per-adapter instance
3385  * @opcode: attribute opcode
3386  * @idn: attribute idn to access
3387  * @index: index field
3388  * @selector: selector field
3389  * @attr_val: the attribute value after the query request
3390  * completes
3391  *
3392  * Returns 0 for success, non-zero in case of failure
3393 */
3394 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3395         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3396         u32 *attr_val)
3397 {
3398         int ret = 0;
3399         u32 retries;
3400
3401         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3402                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3403                                                 selector, attr_val);
3404                 if (ret)
3405                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3406                                 __func__, ret, retries);
3407                 else
3408                         break;
3409         }
3410
3411         if (ret)
3412                 dev_err(hba->dev,
3413                         "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3414                         __func__, idn, ret, QUERY_REQ_RETRIES);
3415         return ret;
3416 }
3417
3418 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3419                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3420                         u8 selector, u8 *desc_buf, int *buf_len)
3421 {
3422         struct ufs_query_req *request = NULL;
3423         struct ufs_query_res *response = NULL;
3424         int err;
3425
3426         BUG_ON(!hba);
3427
3428         if (!desc_buf) {
3429                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3430                                 __func__, opcode);
3431                 return -EINVAL;
3432         }
3433
3434         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3435                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3436                                 __func__, *buf_len);
3437                 return -EINVAL;
3438         }
3439
3440         ufshcd_hold(hba, false);
3441
3442         mutex_lock(&hba->dev_cmd.lock);
3443         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3444                         selector);
3445         hba->dev_cmd.query.descriptor = desc_buf;
3446         request->upiu_req.length = cpu_to_be16(*buf_len);
3447
3448         switch (opcode) {
3449         case UPIU_QUERY_OPCODE_WRITE_DESC:
3450                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3451                 break;
3452         case UPIU_QUERY_OPCODE_READ_DESC:
3453                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3454                 break;
3455         default:
3456                 dev_err(hba->dev,
3457                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3458                                 __func__, opcode);
3459                 err = -EINVAL;
3460                 goto out_unlock;
3461         }
3462
3463         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3464
3465         if (err) {
3466                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3467                                 __func__, opcode, idn, index, err);
3468                 goto out_unlock;
3469         }
3470
3471         *buf_len = be16_to_cpu(response->upiu_res.length);
3472
3473 out_unlock:
3474         hba->dev_cmd.query.descriptor = NULL;
3475         mutex_unlock(&hba->dev_cmd.lock);
3476         ufshcd_release(hba);
3477         return err;
3478 }
3479
3480 /**
3481  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3482  * @hba: per-adapter instance
3483  * @opcode: attribute opcode
3484  * @idn: attribute idn to access
3485  * @index: index field
3486  * @selector: selector field
3487  * @desc_buf: the buffer that contains the descriptor
3488  * @buf_len: length parameter passed to the device
3489  *
3490  * Returns 0 for success, non-zero in case of failure.
3491  * The buf_len parameter will contain, on return, the length parameter
3492  * received on the response.
3493  */
3494 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3495                                   enum query_opcode opcode,
3496                                   enum desc_idn idn, u8 index,
3497                                   u8 selector,
3498                                   u8 *desc_buf, int *buf_len)
3499 {
3500         int err;
3501         int retries;
3502
3503         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3504                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3505                                                 selector, desc_buf, buf_len);
3506                 if (!err || err == -EINVAL)
3507                         break;
3508         }
3509
3510         return err;
3511 }
3512
3513 /**
3514  * ufshcd_read_desc_param - read the specified descriptor parameter
3515  * @hba: Pointer to adapter instance
3516  * @desc_id: descriptor idn value
3517  * @desc_index: descriptor index
3518  * @param_offset: offset of the parameter to read
3519  * @param_read_buf: pointer to buffer where parameter would be read
3520  * @param_size: sizeof(param_read_buf)
3521  *
3522  * Return 0 in case of success, non-zero otherwise
3523  */
3524 int ufshcd_read_desc_param(struct ufs_hba *hba,
3525                            enum desc_idn desc_id,
3526                            int desc_index,
3527                            u8 param_offset,
3528                            u8 *param_read_buf,
3529                            u8 param_size)
3530 {
3531         int ret;
3532         u8 *desc_buf;
3533         int buff_len = QUERY_DESC_MAX_SIZE;
3534         bool is_kmalloc = true;
3535
3536         /* Safety check */
3537         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3538                 return -EINVAL;
3539
3540         /* Check whether we need temp memory */
3541         if (param_offset != 0 || param_size < buff_len) {
3542                 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3543                 if (!desc_buf)
3544                         return -ENOMEM;
3545         } else {
3546                 desc_buf = param_read_buf;
3547                 is_kmalloc = false;
3548         }
3549
3550         /* Request for full descriptor */
3551         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3552                                             desc_id, desc_index, 0,
3553                                             desc_buf, &buff_len);
3554         if (ret) {
3555                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3556                         __func__, desc_id, desc_index, param_offset, ret);
3557                 goto out;
3558         }
3559
3560         /* Update descriptor length */
3561         buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3562
3563         if (param_offset >= buff_len) {
3564                 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3565                         __func__, param_offset, desc_id, buff_len);
3566                 ret = -EINVAL;
3567                 goto out;
3568         }
3569
3570         /* Sanity check */
3571         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3572                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3573                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3574                 ret = -EINVAL;
3575                 goto out;
3576         }
3577
3578         if (is_kmalloc) {
3579                 /* Make sure we don't copy more data than available */
3580                 if (param_offset >= buff_len)
3581                         ret = -EINVAL;
3582                 else
3583                         memcpy(param_read_buf, &desc_buf[param_offset],
3584                                min_t(u32, param_size, buff_len - param_offset));
3585         }
3586 out:
3587         if (is_kmalloc)
3588                 kfree(desc_buf);
3589         return ret;
3590 }
3591
3592 /**
3593  * struct uc_string_id - unicode string
3594  *
3595  * @len: size of this descriptor inclusive
3596  * @type: descriptor type
3597  * @uc: unicode string character
3598  */
3599 struct uc_string_id {
3600         u8 len;
3601         u8 type;
3602         wchar_t uc[];
3603 } __packed;
3604
3605 /* replace non-printable or non-ASCII characters with spaces */
3606 static inline char ufshcd_remove_non_printable(u8 ch)
3607 {
3608         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3609 }
3610
3611 /**
3612  * ufshcd_read_string_desc - read string descriptor
3613  * @hba: pointer to adapter instance
3614  * @desc_index: descriptor index
3615  * @buf: pointer to buffer where descriptor would be read,
3616  *       the caller should free the memory.
3617  * @ascii: if true convert from unicode to ascii characters
3618  *         null terminated string.
3619  *
3620  * Return:
3621  * *      string size on success.
3622  * *      -ENOMEM: on allocation failure
3623  * *      -EINVAL: on a wrong parameter
3624  */
3625 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3626                             u8 **buf, bool ascii)
3627 {
3628         struct uc_string_id *uc_str;
3629         u8 *str;
3630         int ret;
3631
3632         if (!buf)
3633                 return -EINVAL;
3634
3635         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3636         if (!uc_str)
3637                 return -ENOMEM;
3638
3639         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3640                                      (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3641         if (ret < 0) {
3642                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3643                         QUERY_REQ_RETRIES, ret);
3644                 str = NULL;
3645                 goto out;
3646         }
3647
3648         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3649                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3650                 str = NULL;
3651                 ret = 0;
3652                 goto out;
3653         }
3654
3655         if (ascii) {
3656                 ssize_t ascii_len;
3657                 int i;
3658                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3659                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3660                 str = kzalloc(ascii_len, GFP_KERNEL);
3661                 if (!str) {
3662                         ret = -ENOMEM;
3663                         goto out;
3664                 }
3665
3666                 /*
3667                  * the descriptor contains string in UTF16 format
3668                  * we need to convert to utf-8 so it can be displayed
3669                  */
3670                 ret = utf16s_to_utf8s(uc_str->uc,
3671                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3672                                       UTF16_BIG_ENDIAN, str, ascii_len);
3673
3674                 /* replace non-printable or non-ASCII characters with spaces */
3675                 for (i = 0; i < ret; i++)
3676                         str[i] = ufshcd_remove_non_printable(str[i]);
3677
3678                 str[ret++] = '\0';
3679
3680         } else {
3681                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3682                 if (!str) {
3683                         ret = -ENOMEM;
3684                         goto out;
3685                 }
3686                 ret = uc_str->len;
3687         }
3688 out:
3689         *buf = str;
3690         kfree(uc_str);
3691         return ret;
3692 }
3693
3694 /**
3695  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3696  * @hba: Pointer to adapter instance
3697  * @lun: lun id
3698  * @param_offset: offset of the parameter to read
3699  * @param_read_buf: pointer to buffer where parameter would be read
3700  * @param_size: sizeof(param_read_buf)
3701  *
3702  * Return 0 in case of success, non-zero otherwise
3703  */
3704 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3705                                               int lun,
3706                                               enum unit_desc_param param_offset,
3707                                               u8 *param_read_buf,
3708                                               u32 param_size)
3709 {
3710         /*
3711          * Unit descriptors are only available for general purpose LUs (LUN id
3712          * from 0 to 7) and RPMB Well known LU.
3713          */
3714         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3715                 return -EOPNOTSUPP;
3716
3717         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3718                                       param_offset, param_read_buf, param_size);
3719 }
3720
3721 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3722 {
3723         int err = 0;
3724         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3725
3726         if (hba->dev_info.wspecversion >= 0x300) {
3727                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3728                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3729                                 &gating_wait);
3730                 if (err)
3731                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3732                                          err, gating_wait);
3733
3734                 if (gating_wait == 0) {
3735                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3736                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3737                                          gating_wait);
3738                 }
3739
3740                 hba->dev_info.clk_gating_wait_us = gating_wait;
3741         }
3742
3743         return err;
3744 }
3745
3746 /**
3747  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3748  * @hba: per adapter instance
3749  *
3750  * 1. Allocate DMA memory for Command Descriptor array
3751  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3752  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3753  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3754  *      (UTMRDL)
3755  * 4. Allocate memory for local reference block(lrb).
3756  *
3757  * Returns 0 for success, non-zero in case of failure
3758  */
3759 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3760 {
3761         size_t utmrdl_size, utrdl_size, ucdl_size;
3762
3763         /* Allocate memory for UTP command descriptors */
3764         ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
3765         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3766                                                   ucdl_size,
3767                                                   &hba->ucdl_dma_addr,
3768                                                   GFP_KERNEL);
3769
3770         /*
3771          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3772          */
3773         if (!hba->ucdl_base_addr ||
3774             WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
3775                 dev_err(hba->dev,
3776                         "Command Descriptor Memory allocation failed\n");
3777                 goto out;
3778         }
3779
3780         /*
3781          * Allocate memory for UTP Transfer descriptors
3782          * UFSHCI requires 1024 byte alignment of UTRD
3783          */
3784         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3785         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3786                                                    utrdl_size,
3787                                                    &hba->utrdl_dma_addr,
3788                                                    GFP_KERNEL);
3789         if (!hba->utrdl_base_addr ||
3790             WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
3791                 dev_err(hba->dev,
3792                         "Transfer Descriptor Memory allocation failed\n");
3793                 goto out;
3794         }
3795
3796         /*
3797          * Skip utmrdl allocation; it may have been
3798          * allocated during first pass and not released during
3799          * MCQ memory allocation.
3800          * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3801          */
3802         if (hba->utmrdl_base_addr)
3803                 goto skip_utmrdl;
3804         /*
3805          * Allocate memory for UTP Task Management descriptors
3806          * UFSHCI requires 1024 byte alignment of UTMRD
3807          */
3808         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3809         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3810                                                     utmrdl_size,
3811                                                     &hba->utmrdl_dma_addr,
3812                                                     GFP_KERNEL);
3813         if (!hba->utmrdl_base_addr ||
3814             WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
3815                 dev_err(hba->dev,
3816                 "Task Management Descriptor Memory allocation failed\n");
3817                 goto out;
3818         }
3819
3820 skip_utmrdl:
3821         /* Allocate memory for local reference block */
3822         hba->lrb = devm_kcalloc(hba->dev,
3823                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3824                                 GFP_KERNEL);
3825         if (!hba->lrb) {
3826                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3827                 goto out;
3828         }
3829         return 0;
3830 out:
3831         return -ENOMEM;
3832 }
3833
3834 /**
3835  * ufshcd_host_memory_configure - configure local reference block with
3836  *                              memory offsets
3837  * @hba: per adapter instance
3838  *
3839  * Configure Host memory space
3840  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3841  * address.
3842  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3843  * and PRDT offset.
3844  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3845  * into local reference block.
3846  */
3847 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3848 {
3849         struct utp_transfer_req_desc *utrdlp;
3850         dma_addr_t cmd_desc_dma_addr;
3851         dma_addr_t cmd_desc_element_addr;
3852         u16 response_offset;
3853         u16 prdt_offset;
3854         int cmd_desc_size;
3855         int i;
3856
3857         utrdlp = hba->utrdl_base_addr;
3858
3859         response_offset =
3860                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3861         prdt_offset =
3862                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3863
3864         cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
3865         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3866
3867         for (i = 0; i < hba->nutrs; i++) {
3868                 /* Configure UTRD with command descriptor base address */
3869                 cmd_desc_element_addr =
3870                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3871                 utrdlp[i].command_desc_base_addr =
3872                                 cpu_to_le64(cmd_desc_element_addr);
3873
3874                 /* Response upiu and prdt offset should be in double words */
3875                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3876                         utrdlp[i].response_upiu_offset =
3877                                 cpu_to_le16(response_offset);
3878                         utrdlp[i].prd_table_offset =
3879                                 cpu_to_le16(prdt_offset);
3880                         utrdlp[i].response_upiu_length =
3881                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3882                 } else {
3883                         utrdlp[i].response_upiu_offset =
3884                                 cpu_to_le16(response_offset >> 2);
3885                         utrdlp[i].prd_table_offset =
3886                                 cpu_to_le16(prdt_offset >> 2);
3887                         utrdlp[i].response_upiu_length =
3888                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3889                 }
3890
3891                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3892         }
3893 }
3894
3895 /**
3896  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3897  * @hba: per adapter instance
3898  *
3899  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3900  * in order to initialize the Unipro link startup procedure.
3901  * Once the Unipro links are up, the device connected to the controller
3902  * is detected.
3903  *
3904  * Returns 0 on success, non-zero value on failure
3905  */
3906 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3907 {
3908         struct uic_command uic_cmd = {0};
3909         int ret;
3910
3911         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3912
3913         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3914         if (ret)
3915                 dev_dbg(hba->dev,
3916                         "dme-link-startup: error code %d\n", ret);
3917         return ret;
3918 }
3919 /**
3920  * ufshcd_dme_reset - UIC command for DME_RESET
3921  * @hba: per adapter instance
3922  *
3923  * DME_RESET command is issued in order to reset UniPro stack.
3924  * This function now deals with cold reset.
3925  *
3926  * Returns 0 on success, non-zero value on failure
3927  */
3928 static int ufshcd_dme_reset(struct ufs_hba *hba)
3929 {
3930         struct uic_command uic_cmd = {0};
3931         int ret;
3932
3933         uic_cmd.command = UIC_CMD_DME_RESET;
3934
3935         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3936         if (ret)
3937                 dev_err(hba->dev,
3938                         "dme-reset: error code %d\n", ret);
3939
3940         return ret;
3941 }
3942
3943 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3944                                int agreed_gear,
3945                                int adapt_val)
3946 {
3947         int ret;
3948
3949         if (agreed_gear < UFS_HS_G4)
3950                 adapt_val = PA_NO_ADAPT;
3951
3952         ret = ufshcd_dme_set(hba,
3953                              UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3954                              adapt_val);
3955         return ret;
3956 }
3957 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3958
3959 /**
3960  * ufshcd_dme_enable - UIC command for DME_ENABLE
3961  * @hba: per adapter instance
3962  *
3963  * DME_ENABLE command is issued in order to enable UniPro stack.
3964  *
3965  * Returns 0 on success, non-zero value on failure
3966  */
3967 static int ufshcd_dme_enable(struct ufs_hba *hba)
3968 {
3969         struct uic_command uic_cmd = {0};
3970         int ret;
3971
3972         uic_cmd.command = UIC_CMD_DME_ENABLE;
3973
3974         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3975         if (ret)
3976                 dev_err(hba->dev,
3977                         "dme-enable: error code %d\n", ret);
3978
3979         return ret;
3980 }
3981
3982 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3983 {
3984         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3985         unsigned long min_sleep_time_us;
3986
3987         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3988                 return;
3989
3990         /*
3991          * last_dme_cmd_tstamp will be 0 only for 1st call to
3992          * this function
3993          */
3994         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3995                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3996         } else {
3997                 unsigned long delta =
3998                         (unsigned long) ktime_to_us(
3999                                 ktime_sub(ktime_get(),
4000                                 hba->last_dme_cmd_tstamp));
4001
4002                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4003                         min_sleep_time_us =
4004                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4005                 else
4006                         return; /* no more delay required */
4007         }
4008
4009         /* allow sleep for extra 50us if needed */
4010         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4011 }
4012
4013 /**
4014  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4015  * @hba: per adapter instance
4016  * @attr_sel: uic command argument1
4017  * @attr_set: attribute set type as uic command argument2
4018  * @mib_val: setting value as uic command argument3
4019  * @peer: indicate whether peer or local
4020  *
4021  * Returns 0 on success, non-zero value on failure
4022  */
4023 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4024                         u8 attr_set, u32 mib_val, u8 peer)
4025 {
4026         struct uic_command uic_cmd = {0};
4027         static const char *const action[] = {
4028                 "dme-set",
4029                 "dme-peer-set"
4030         };
4031         const char *set = action[!!peer];
4032         int ret;
4033         int retries = UFS_UIC_COMMAND_RETRIES;
4034
4035         uic_cmd.command = peer ?
4036                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4037         uic_cmd.argument1 = attr_sel;
4038         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4039         uic_cmd.argument3 = mib_val;
4040
4041         do {
4042                 /* for peer attributes we retry upon failure */
4043                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4044                 if (ret)
4045                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4046                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4047         } while (ret && peer && --retries);
4048
4049         if (ret)
4050                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4051                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4052                         UFS_UIC_COMMAND_RETRIES - retries);
4053
4054         return ret;
4055 }
4056 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4057
4058 /**
4059  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4060  * @hba: per adapter instance
4061  * @attr_sel: uic command argument1
4062  * @mib_val: the value of the attribute as returned by the UIC command
4063  * @peer: indicate whether peer or local
4064  *
4065  * Returns 0 on success, non-zero value on failure
4066  */
4067 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4068                         u32 *mib_val, u8 peer)
4069 {
4070         struct uic_command uic_cmd = {0};
4071         static const char *const action[] = {
4072                 "dme-get",
4073                 "dme-peer-get"
4074         };
4075         const char *get = action[!!peer];
4076         int ret;
4077         int retries = UFS_UIC_COMMAND_RETRIES;
4078         struct ufs_pa_layer_attr orig_pwr_info;
4079         struct ufs_pa_layer_attr temp_pwr_info;
4080         bool pwr_mode_change = false;
4081
4082         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4083                 orig_pwr_info = hba->pwr_info;
4084                 temp_pwr_info = orig_pwr_info;
4085
4086                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4087                     orig_pwr_info.pwr_rx == FAST_MODE) {
4088                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4089                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4090                         pwr_mode_change = true;
4091                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4092                     orig_pwr_info.pwr_rx == SLOW_MODE) {
4093                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4094                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4095                         pwr_mode_change = true;
4096                 }
4097                 if (pwr_mode_change) {
4098                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4099                         if (ret)
4100                                 goto out;
4101                 }
4102         }
4103
4104         uic_cmd.command = peer ?
4105                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4106         uic_cmd.argument1 = attr_sel;
4107
4108         do {
4109                 /* for peer attributes we retry upon failure */
4110                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4111                 if (ret)
4112                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4113                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
4114         } while (ret && peer && --retries);
4115
4116         if (ret)
4117                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4118                         get, UIC_GET_ATTR_ID(attr_sel),
4119                         UFS_UIC_COMMAND_RETRIES - retries);
4120
4121         if (mib_val && !ret)
4122                 *mib_val = uic_cmd.argument3;
4123
4124         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4125             && pwr_mode_change)
4126                 ufshcd_change_power_mode(hba, &orig_pwr_info);
4127 out:
4128         return ret;
4129 }
4130 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4131
4132 /**
4133  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4134  * state) and waits for it to take effect.
4135  *
4136  * @hba: per adapter instance
4137  * @cmd: UIC command to execute
4138  *
4139  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4140  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4141  * and device UniPro link and hence it's final completion would be indicated by
4142  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4143  * addition to normal UIC command completion Status (UCCS). This function only
4144  * returns after the relevant status bits indicate the completion.
4145  *
4146  * Returns 0 on success, non-zero value on failure
4147  */
4148 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4149 {
4150         DECLARE_COMPLETION_ONSTACK(uic_async_done);
4151         unsigned long flags;
4152         u8 status;
4153         int ret;
4154         bool reenable_intr = false;
4155
4156         mutex_lock(&hba->uic_cmd_mutex);
4157         ufshcd_add_delay_before_dme_cmd(hba);
4158
4159         spin_lock_irqsave(hba->host->host_lock, flags);
4160         if (ufshcd_is_link_broken(hba)) {
4161                 ret = -ENOLINK;
4162                 goto out_unlock;
4163         }
4164         hba->uic_async_done = &uic_async_done;
4165         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4166                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4167                 /*
4168                  * Make sure UIC command completion interrupt is disabled before
4169                  * issuing UIC command.
4170                  */
4171                 wmb();
4172                 reenable_intr = true;
4173         }
4174         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4175         spin_unlock_irqrestore(hba->host->host_lock, flags);
4176         if (ret) {
4177                 dev_err(hba->dev,
4178                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4179                         cmd->command, cmd->argument3, ret);
4180                 goto out;
4181         }
4182
4183         if (!wait_for_completion_timeout(hba->uic_async_done,
4184                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4185                 dev_err(hba->dev,
4186                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4187                         cmd->command, cmd->argument3);
4188
4189                 if (!cmd->cmd_active) {
4190                         dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4191                                 __func__);
4192                         goto check_upmcrs;
4193                 }
4194
4195                 ret = -ETIMEDOUT;
4196                 goto out;
4197         }
4198
4199 check_upmcrs:
4200         status = ufshcd_get_upmcrs(hba);
4201         if (status != PWR_LOCAL) {
4202                 dev_err(hba->dev,
4203                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4204                         cmd->command, status);
4205                 ret = (status != PWR_OK) ? status : -1;
4206         }
4207 out:
4208         if (ret) {
4209                 ufshcd_print_host_state(hba);
4210                 ufshcd_print_pwr_info(hba);
4211                 ufshcd_print_evt_hist(hba);
4212         }
4213
4214         spin_lock_irqsave(hba->host->host_lock, flags);
4215         hba->active_uic_cmd = NULL;
4216         hba->uic_async_done = NULL;
4217         if (reenable_intr)
4218                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4219         if (ret) {
4220                 ufshcd_set_link_broken(hba);
4221                 ufshcd_schedule_eh_work(hba);
4222         }
4223 out_unlock:
4224         spin_unlock_irqrestore(hba->host->host_lock, flags);
4225         mutex_unlock(&hba->uic_cmd_mutex);
4226
4227         return ret;
4228 }
4229
4230 /**
4231  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4232  *                              using DME_SET primitives.
4233  * @hba: per adapter instance
4234  * @mode: powr mode value
4235  *
4236  * Returns 0 on success, non-zero value on failure
4237  */
4238 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4239 {
4240         struct uic_command uic_cmd = {0};
4241         int ret;
4242
4243         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4244                 ret = ufshcd_dme_set(hba,
4245                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4246                 if (ret) {
4247                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4248                                                 __func__, ret);
4249                         goto out;
4250                 }
4251         }
4252
4253         uic_cmd.command = UIC_CMD_DME_SET;
4254         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4255         uic_cmd.argument3 = mode;
4256         ufshcd_hold(hba, false);
4257         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4258         ufshcd_release(hba);
4259
4260 out:
4261         return ret;
4262 }
4263 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4264
4265 int ufshcd_link_recovery(struct ufs_hba *hba)
4266 {
4267         int ret;
4268         unsigned long flags;
4269
4270         spin_lock_irqsave(hba->host->host_lock, flags);
4271         hba->ufshcd_state = UFSHCD_STATE_RESET;
4272         ufshcd_set_eh_in_progress(hba);
4273         spin_unlock_irqrestore(hba->host->host_lock, flags);
4274
4275         /* Reset the attached device */
4276         ufshcd_device_reset(hba);
4277
4278         ret = ufshcd_host_reset_and_restore(hba);
4279
4280         spin_lock_irqsave(hba->host->host_lock, flags);
4281         if (ret)
4282                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4283         ufshcd_clear_eh_in_progress(hba);
4284         spin_unlock_irqrestore(hba->host->host_lock, flags);
4285
4286         if (ret)
4287                 dev_err(hba->dev, "%s: link recovery failed, err %d",
4288                         __func__, ret);
4289
4290         return ret;
4291 }
4292 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4293
4294 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4295 {
4296         int ret;
4297         struct uic_command uic_cmd = {0};
4298         ktime_t start = ktime_get();
4299
4300         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4301
4302         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4303         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4304         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4305                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4306
4307         if (ret)
4308                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4309                         __func__, ret);
4310         else
4311                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4312                                                                 POST_CHANGE);
4313
4314         return ret;
4315 }
4316 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4317
4318 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4319 {
4320         struct uic_command uic_cmd = {0};
4321         int ret;
4322         ktime_t start = ktime_get();
4323
4324         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4325
4326         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4327         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4328         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4329                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4330
4331         if (ret) {
4332                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4333                         __func__, ret);
4334         } else {
4335                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4336                                                                 POST_CHANGE);
4337                 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
4338                 hba->ufs_stats.hibern8_exit_cnt++;
4339         }
4340
4341         return ret;
4342 }
4343 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4344
4345 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4346 {
4347         unsigned long flags;
4348         bool update = false;
4349
4350         if (!ufshcd_is_auto_hibern8_supported(hba))
4351                 return;
4352
4353         spin_lock_irqsave(hba->host->host_lock, flags);
4354         if (hba->ahit != ahit) {
4355                 hba->ahit = ahit;
4356                 update = true;
4357         }
4358         spin_unlock_irqrestore(hba->host->host_lock, flags);
4359
4360         if (update &&
4361             !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4362                 ufshcd_rpm_get_sync(hba);
4363                 ufshcd_hold(hba, false);
4364                 ufshcd_auto_hibern8_enable(hba);
4365                 ufshcd_release(hba);
4366                 ufshcd_rpm_put_sync(hba);
4367         }
4368 }
4369 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4370
4371 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4372 {
4373         if (!ufshcd_is_auto_hibern8_supported(hba))
4374                 return;
4375
4376         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4377 }
4378
4379  /**
4380  * ufshcd_init_pwr_info - setting the POR (power on reset)
4381  * values in hba power info
4382  * @hba: per-adapter instance
4383  */
4384 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4385 {
4386         hba->pwr_info.gear_rx = UFS_PWM_G1;
4387         hba->pwr_info.gear_tx = UFS_PWM_G1;
4388         hba->pwr_info.lane_rx = 1;
4389         hba->pwr_info.lane_tx = 1;
4390         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4391         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4392         hba->pwr_info.hs_rate = 0;
4393 }
4394
4395 /**
4396  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4397  * @hba: per-adapter instance
4398  */
4399 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4400 {
4401         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4402
4403         if (hba->max_pwr_info.is_valid)
4404                 return 0;
4405
4406         if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4407                 pwr_info->pwr_tx = FASTAUTO_MODE;
4408                 pwr_info->pwr_rx = FASTAUTO_MODE;
4409         } else {
4410                 pwr_info->pwr_tx = FAST_MODE;
4411                 pwr_info->pwr_rx = FAST_MODE;
4412         }
4413         pwr_info->hs_rate = PA_HS_MODE_B;
4414
4415         /* Get the connected lane count */
4416         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4417                         &pwr_info->lane_rx);
4418         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4419                         &pwr_info->lane_tx);
4420
4421         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4422                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4423                                 __func__,
4424                                 pwr_info->lane_rx,
4425                                 pwr_info->lane_tx);
4426                 return -EINVAL;
4427         }
4428
4429         /*
4430          * First, get the maximum gears of HS speed.
4431          * If a zero value, it means there is no HSGEAR capability.
4432          * Then, get the maximum gears of PWM speed.
4433          */
4434         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4435         if (!pwr_info->gear_rx) {
4436                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4437                                 &pwr_info->gear_rx);
4438                 if (!pwr_info->gear_rx) {
4439                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4440                                 __func__, pwr_info->gear_rx);
4441                         return -EINVAL;
4442                 }
4443                 pwr_info->pwr_rx = SLOW_MODE;
4444         }
4445
4446         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4447                         &pwr_info->gear_tx);
4448         if (!pwr_info->gear_tx) {
4449                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4450                                 &pwr_info->gear_tx);
4451                 if (!pwr_info->gear_tx) {
4452                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4453                                 __func__, pwr_info->gear_tx);
4454                         return -EINVAL;
4455                 }
4456                 pwr_info->pwr_tx = SLOW_MODE;
4457         }
4458
4459         hba->max_pwr_info.is_valid = true;
4460         return 0;
4461 }
4462
4463 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4464                              struct ufs_pa_layer_attr *pwr_mode)
4465 {
4466         int ret;
4467
4468         /* if already configured to the requested pwr_mode */
4469         if (!hba->force_pmc &&
4470             pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4471             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4472             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4473             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4474             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4475             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4476             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4477                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4478                 return 0;
4479         }
4480
4481         /*
4482          * Configure attributes for power mode change with below.
4483          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4484          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4485          * - PA_HSSERIES
4486          */
4487         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4488         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4489                         pwr_mode->lane_rx);
4490         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4491                         pwr_mode->pwr_rx == FAST_MODE)
4492                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4493         else
4494                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4495
4496         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4497         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4498                         pwr_mode->lane_tx);
4499         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4500                         pwr_mode->pwr_tx == FAST_MODE)
4501                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4502         else
4503                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4504
4505         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4506             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4507             pwr_mode->pwr_rx == FAST_MODE ||
4508             pwr_mode->pwr_tx == FAST_MODE)
4509                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4510                                                 pwr_mode->hs_rate);
4511
4512         if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4513                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4514                                 DL_FC0ProtectionTimeOutVal_Default);
4515                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4516                                 DL_TC0ReplayTimeOutVal_Default);
4517                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4518                                 DL_AFC0ReqTimeOutVal_Default);
4519                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4520                                 DL_FC1ProtectionTimeOutVal_Default);
4521                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4522                                 DL_TC1ReplayTimeOutVal_Default);
4523                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4524                                 DL_AFC1ReqTimeOutVal_Default);
4525
4526                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4527                                 DL_FC0ProtectionTimeOutVal_Default);
4528                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4529                                 DL_TC0ReplayTimeOutVal_Default);
4530                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4531                                 DL_AFC0ReqTimeOutVal_Default);
4532         }
4533
4534         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4535                         | pwr_mode->pwr_tx);
4536
4537         if (ret) {
4538                 dev_err(hba->dev,
4539                         "%s: power mode change failed %d\n", __func__, ret);
4540         } else {
4541                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4542                                                                 pwr_mode);
4543
4544                 memcpy(&hba->pwr_info, pwr_mode,
4545                         sizeof(struct ufs_pa_layer_attr));
4546         }
4547
4548         return ret;
4549 }
4550
4551 /**
4552  * ufshcd_config_pwr_mode - configure a new power mode
4553  * @hba: per-adapter instance
4554  * @desired_pwr_mode: desired power configuration
4555  */
4556 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4557                 struct ufs_pa_layer_attr *desired_pwr_mode)
4558 {
4559         struct ufs_pa_layer_attr final_params = { 0 };
4560         int ret;
4561
4562         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4563                                         desired_pwr_mode, &final_params);
4564
4565         if (ret)
4566                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4567
4568         ret = ufshcd_change_power_mode(hba, &final_params);
4569
4570         return ret;
4571 }
4572 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4573
4574 /**
4575  * ufshcd_complete_dev_init() - checks device readiness
4576  * @hba: per-adapter instance
4577  *
4578  * Set fDeviceInit flag and poll until device toggles it.
4579  */
4580 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4581 {
4582         int err;
4583         bool flag_res = true;
4584         ktime_t timeout;
4585
4586         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4587                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4588         if (err) {
4589                 dev_err(hba->dev,
4590                         "%s: setting fDeviceInit flag failed with error %d\n",
4591                         __func__, err);
4592                 goto out;
4593         }
4594
4595         /* Poll fDeviceInit flag to be cleared */
4596         timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4597         do {
4598                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4599                                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4600                 if (!flag_res)
4601                         break;
4602                 usleep_range(500, 1000);
4603         } while (ktime_before(ktime_get(), timeout));
4604
4605         if (err) {
4606                 dev_err(hba->dev,
4607                                 "%s: reading fDeviceInit flag failed with error %d\n",
4608                                 __func__, err);
4609         } else if (flag_res) {
4610                 dev_err(hba->dev,
4611                                 "%s: fDeviceInit was not cleared by the device\n",
4612                                 __func__);
4613                 err = -EBUSY;
4614         }
4615 out:
4616         return err;
4617 }
4618
4619 /**
4620  * ufshcd_make_hba_operational - Make UFS controller operational
4621  * @hba: per adapter instance
4622  *
4623  * To bring UFS host controller to operational state,
4624  * 1. Enable required interrupts
4625  * 2. Configure interrupt aggregation
4626  * 3. Program UTRL and UTMRL base address
4627  * 4. Configure run-stop-registers
4628  *
4629  * Returns 0 on success, non-zero value on failure
4630  */
4631 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4632 {
4633         int err = 0;
4634         u32 reg;
4635
4636         /* Enable required interrupts */
4637         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4638
4639         /* Configure interrupt aggregation */
4640         if (ufshcd_is_intr_aggr_allowed(hba))
4641                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4642         else
4643                 ufshcd_disable_intr_aggr(hba);
4644
4645         /* Configure UTRL and UTMRL base address registers */
4646         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4647                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4648         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4649                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4650         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4651                         REG_UTP_TASK_REQ_LIST_BASE_L);
4652         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4653                         REG_UTP_TASK_REQ_LIST_BASE_H);
4654
4655         /*
4656          * Make sure base address and interrupt setup are updated before
4657          * enabling the run/stop registers below.
4658          */
4659         wmb();
4660
4661         /*
4662          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4663          */
4664         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4665         if (!(ufshcd_get_lists_status(reg))) {
4666                 ufshcd_enable_run_stop_reg(hba);
4667         } else {
4668                 dev_err(hba->dev,
4669                         "Host controller not ready to process requests");
4670                 err = -EIO;
4671         }
4672
4673         return err;
4674 }
4675 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4676
4677 /**
4678  * ufshcd_hba_stop - Send controller to reset state
4679  * @hba: per adapter instance
4680  */
4681 void ufshcd_hba_stop(struct ufs_hba *hba)
4682 {
4683         unsigned long flags;
4684         int err;
4685
4686         /*
4687          * Obtain the host lock to prevent that the controller is disabled
4688          * while the UFS interrupt handler is active on another CPU.
4689          */
4690         spin_lock_irqsave(hba->host->host_lock, flags);
4691         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4692         spin_unlock_irqrestore(hba->host->host_lock, flags);
4693
4694         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4695                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4696                                         10, 1);
4697         if (err)
4698                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4699 }
4700 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4701
4702 /**
4703  * ufshcd_hba_execute_hce - initialize the controller
4704  * @hba: per adapter instance
4705  *
4706  * The controller resets itself and controller firmware initialization
4707  * sequence kicks off. When controller is ready it will set
4708  * the Host Controller Enable bit to 1.
4709  *
4710  * Returns 0 on success, non-zero value on failure
4711  */
4712 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4713 {
4714         int retry_outer = 3;
4715         int retry_inner;
4716
4717 start:
4718         if (ufshcd_is_hba_active(hba))
4719                 /* change controller state to "reset state" */
4720                 ufshcd_hba_stop(hba);
4721
4722         /* UniPro link is disabled at this point */
4723         ufshcd_set_link_off(hba);
4724
4725         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4726
4727         /* start controller initialization sequence */
4728         ufshcd_hba_start(hba);
4729
4730         /*
4731          * To initialize a UFS host controller HCE bit must be set to 1.
4732          * During initialization the HCE bit value changes from 1->0->1.
4733          * When the host controller completes initialization sequence
4734          * it sets the value of HCE bit to 1. The same HCE bit is read back
4735          * to check if the controller has completed initialization sequence.
4736          * So without this delay the value HCE = 1, set in the previous
4737          * instruction might be read back.
4738          * This delay can be changed based on the controller.
4739          */
4740         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4741
4742         /* wait for the host controller to complete initialization */
4743         retry_inner = 50;
4744         while (!ufshcd_is_hba_active(hba)) {
4745                 if (retry_inner) {
4746                         retry_inner--;
4747                 } else {
4748                         dev_err(hba->dev,
4749                                 "Controller enable failed\n");
4750                         if (retry_outer) {
4751                                 retry_outer--;
4752                                 goto start;
4753                         }
4754                         return -EIO;
4755                 }
4756                 usleep_range(1000, 1100);
4757         }
4758
4759         /* enable UIC related interrupts */
4760         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4761
4762         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4763
4764         return 0;
4765 }
4766
4767 int ufshcd_hba_enable(struct ufs_hba *hba)
4768 {
4769         int ret;
4770
4771         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4772                 ufshcd_set_link_off(hba);
4773                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4774
4775                 /* enable UIC related interrupts */
4776                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4777                 ret = ufshcd_dme_reset(hba);
4778                 if (ret) {
4779                         dev_err(hba->dev, "DME_RESET failed\n");
4780                         return ret;
4781                 }
4782
4783                 ret = ufshcd_dme_enable(hba);
4784                 if (ret) {
4785                         dev_err(hba->dev, "Enabling DME failed\n");
4786                         return ret;
4787                 }
4788
4789                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4790         } else {
4791                 ret = ufshcd_hba_execute_hce(hba);
4792         }
4793
4794         return ret;
4795 }
4796 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4797
4798 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4799 {
4800         int tx_lanes = 0, i, err = 0;
4801
4802         if (!peer)
4803                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4804                                &tx_lanes);
4805         else
4806                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4807                                     &tx_lanes);
4808         for (i = 0; i < tx_lanes; i++) {
4809                 if (!peer)
4810                         err = ufshcd_dme_set(hba,
4811                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4812                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4813                                         0);
4814                 else
4815                         err = ufshcd_dme_peer_set(hba,
4816                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4817                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4818                                         0);
4819                 if (err) {
4820                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4821                                 __func__, peer, i, err);
4822                         break;
4823                 }
4824         }
4825
4826         return err;
4827 }
4828
4829 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4830 {
4831         return ufshcd_disable_tx_lcc(hba, true);
4832 }
4833
4834 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4835 {
4836         struct ufs_event_hist *e;
4837
4838         if (id >= UFS_EVT_CNT)
4839                 return;
4840
4841         e = &hba->ufs_stats.event[id];
4842         e->val[e->pos] = val;
4843         e->tstamp[e->pos] = local_clock();
4844         e->cnt += 1;
4845         e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4846
4847         ufshcd_vops_event_notify(hba, id, &val);
4848 }
4849 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4850
4851 /**
4852  * ufshcd_link_startup - Initialize unipro link startup
4853  * @hba: per adapter instance
4854  *
4855  * Returns 0 for success, non-zero in case of failure
4856  */
4857 static int ufshcd_link_startup(struct ufs_hba *hba)
4858 {
4859         int ret;
4860         int retries = DME_LINKSTARTUP_RETRIES;
4861         bool link_startup_again = false;
4862
4863         /*
4864          * If UFS device isn't active then we will have to issue link startup
4865          * 2 times to make sure the device state move to active.
4866          */
4867         if (!ufshcd_is_ufs_dev_active(hba))
4868                 link_startup_again = true;
4869
4870 link_startup:
4871         do {
4872                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4873
4874                 ret = ufshcd_dme_link_startup(hba);
4875
4876                 /* check if device is detected by inter-connect layer */
4877                 if (!ret && !ufshcd_is_device_present(hba)) {
4878                         ufshcd_update_evt_hist(hba,
4879                                                UFS_EVT_LINK_STARTUP_FAIL,
4880                                                0);
4881                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4882                         ret = -ENXIO;
4883                         goto out;
4884                 }
4885
4886                 /*
4887                  * DME link lost indication is only received when link is up,
4888                  * but we can't be sure if the link is up until link startup
4889                  * succeeds. So reset the local Uni-Pro and try again.
4890                  */
4891                 if (ret && retries && ufshcd_hba_enable(hba)) {
4892                         ufshcd_update_evt_hist(hba,
4893                                                UFS_EVT_LINK_STARTUP_FAIL,
4894                                                (u32)ret);
4895                         goto out;
4896                 }
4897         } while (ret && retries--);
4898
4899         if (ret) {
4900                 /* failed to get the link up... retire */
4901                 ufshcd_update_evt_hist(hba,
4902                                        UFS_EVT_LINK_STARTUP_FAIL,
4903                                        (u32)ret);
4904                 goto out;
4905         }
4906
4907         if (link_startup_again) {
4908                 link_startup_again = false;
4909                 retries = DME_LINKSTARTUP_RETRIES;
4910                 goto link_startup;
4911         }
4912
4913         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4914         ufshcd_init_pwr_info(hba);
4915         ufshcd_print_pwr_info(hba);
4916
4917         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4918                 ret = ufshcd_disable_device_tx_lcc(hba);
4919                 if (ret)
4920                         goto out;
4921         }
4922
4923         /* Include any host controller configuration via UIC commands */
4924         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4925         if (ret)
4926                 goto out;
4927
4928         /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4929         ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4930         ret = ufshcd_make_hba_operational(hba);
4931 out:
4932         if (ret) {
4933                 dev_err(hba->dev, "link startup failed %d\n", ret);
4934                 ufshcd_print_host_state(hba);
4935                 ufshcd_print_pwr_info(hba);
4936                 ufshcd_print_evt_hist(hba);
4937         }
4938         return ret;
4939 }
4940
4941 /**
4942  * ufshcd_verify_dev_init() - Verify device initialization
4943  * @hba: per-adapter instance
4944  *
4945  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4946  * device Transport Protocol (UTP) layer is ready after a reset.
4947  * If the UTP layer at the device side is not initialized, it may
4948  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4949  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4950  */
4951 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4952 {
4953         int err = 0;
4954         int retries;
4955
4956         ufshcd_hold(hba, false);
4957         mutex_lock(&hba->dev_cmd.lock);
4958         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4959                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4960                                           hba->nop_out_timeout);
4961
4962                 if (!err || err == -ETIMEDOUT)
4963                         break;
4964
4965                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4966         }
4967         mutex_unlock(&hba->dev_cmd.lock);
4968         ufshcd_release(hba);
4969
4970         if (err)
4971                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4972         return err;
4973 }
4974
4975 /**
4976  * ufshcd_setup_links - associate link b/w device wlun and other luns
4977  * @sdev: pointer to SCSI device
4978  * @hba: pointer to ufs hba
4979  */
4980 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
4981 {
4982         struct device_link *link;
4983
4984         /*
4985          * Device wlun is the supplier & rest of the luns are consumers.
4986          * This ensures that device wlun suspends after all other luns.
4987          */
4988         if (hba->ufs_device_wlun) {
4989                 link = device_link_add(&sdev->sdev_gendev,
4990                                        &hba->ufs_device_wlun->sdev_gendev,
4991                                        DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
4992                 if (!link) {
4993                         dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
4994                                 dev_name(&hba->ufs_device_wlun->sdev_gendev));
4995                         return;
4996                 }
4997                 hba->luns_avail--;
4998                 /* Ignore REPORT_LUN wlun probing */
4999                 if (hba->luns_avail == 1) {
5000                         ufshcd_rpm_put(hba);
5001                         return;
5002                 }
5003         } else {
5004                 /*
5005                  * Device wlun is probed. The assumption is that WLUNs are
5006                  * scanned before other LUNs.
5007                  */
5008                 hba->luns_avail--;
5009         }
5010 }
5011
5012 /**
5013  * ufshcd_lu_init - Initialize the relevant parameters of the LU
5014  * @hba: per-adapter instance
5015  * @sdev: pointer to SCSI device
5016  */
5017 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
5018 {
5019         int len = QUERY_DESC_MAX_SIZE;
5020         u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
5021         u8 lun_qdepth = hba->nutrs;
5022         u8 *desc_buf;
5023         int ret;
5024
5025         desc_buf = kzalloc(len, GFP_KERNEL);
5026         if (!desc_buf)
5027                 goto set_qdepth;
5028
5029         ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
5030         if (ret < 0) {
5031                 if (ret == -EOPNOTSUPP)
5032                         /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5033                         lun_qdepth = 1;
5034                 kfree(desc_buf);
5035                 goto set_qdepth;
5036         }
5037
5038         if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5039                 /*
5040                  * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5041                  * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5042                  */
5043                 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5044         }
5045         /*
5046          * According to UFS device specification, the write protection mode is only supported by
5047          * normal LU, not supported by WLUN.
5048          */
5049         if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5050             !hba->dev_info.is_lu_power_on_wp &&
5051             desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5052                 hba->dev_info.is_lu_power_on_wp = true;
5053
5054         /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5055         if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5056             desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5057                 hba->dev_info.b_advanced_rpmb_en = true;
5058
5059
5060         kfree(desc_buf);
5061 set_qdepth:
5062         /*
5063          * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5064          * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5065          */
5066         dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5067         scsi_change_queue_depth(sdev, lun_qdepth);
5068 }
5069
5070 /**
5071  * ufshcd_slave_alloc - handle initial SCSI device configurations
5072  * @sdev: pointer to SCSI device
5073  *
5074  * Returns success
5075  */
5076 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5077 {
5078         struct ufs_hba *hba;
5079
5080         hba = shost_priv(sdev->host);
5081
5082         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5083         sdev->use_10_for_ms = 1;
5084
5085         /* DBD field should be set to 1 in mode sense(10) */
5086         sdev->set_dbd_for_ms = 1;
5087
5088         /* allow SCSI layer to restart the device in case of errors */
5089         sdev->allow_restart = 1;
5090
5091         /* REPORT SUPPORTED OPERATION CODES is not supported */
5092         sdev->no_report_opcodes = 1;
5093
5094         /* WRITE_SAME command is not supported */
5095         sdev->no_write_same = 1;
5096
5097         ufshcd_lu_init(hba, sdev);
5098
5099         ufshcd_setup_links(hba, sdev);
5100
5101         return 0;
5102 }
5103
5104 /**
5105  * ufshcd_change_queue_depth - change queue depth
5106  * @sdev: pointer to SCSI device
5107  * @depth: required depth to set
5108  *
5109  * Change queue depth and make sure the max. limits are not crossed.
5110  */
5111 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5112 {
5113         return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5114 }
5115
5116 static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
5117 {
5118         /* skip well-known LU */
5119         if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
5120             !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
5121                 return;
5122
5123         ufshpb_destroy_lu(hba, sdev);
5124 }
5125
5126 static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
5127 {
5128         /* skip well-known LU */
5129         if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
5130             !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
5131                 return;
5132
5133         ufshpb_init_hpb_lu(hba, sdev);
5134 }
5135
5136 /**
5137  * ufshcd_slave_configure - adjust SCSI device configurations
5138  * @sdev: pointer to SCSI device
5139  */
5140 static int ufshcd_slave_configure(struct scsi_device *sdev)
5141 {
5142         struct ufs_hba *hba = shost_priv(sdev->host);
5143         struct request_queue *q = sdev->request_queue;
5144
5145         ufshcd_hpb_configure(hba, sdev);
5146
5147         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5148         if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
5149                 blk_queue_update_dma_alignment(q, 4096 - 1);
5150         /*
5151          * Block runtime-pm until all consumers are added.
5152          * Refer ufshcd_setup_links().
5153          */
5154         if (is_device_wlun(sdev))
5155                 pm_runtime_get_noresume(&sdev->sdev_gendev);
5156         else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5157                 sdev->rpm_autosuspend = 1;
5158         /*
5159          * Do not print messages during runtime PM to avoid never-ending cycles
5160          * of messages written back to storage by user space causing runtime
5161          * resume, causing more messages and so on.
5162          */
5163         sdev->silence_suspend = 1;
5164
5165         ufshcd_crypto_register(hba, q);
5166
5167         return 0;
5168 }
5169
5170 /**
5171  * ufshcd_slave_destroy - remove SCSI device configurations
5172  * @sdev: pointer to SCSI device
5173  */
5174 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5175 {
5176         struct ufs_hba *hba;
5177         unsigned long flags;
5178
5179         hba = shost_priv(sdev->host);
5180
5181         ufshcd_hpb_destroy(hba, sdev);
5182
5183         /* Drop the reference as it won't be needed anymore */
5184         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5185                 spin_lock_irqsave(hba->host->host_lock, flags);
5186                 hba->ufs_device_wlun = NULL;
5187                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5188         } else if (hba->ufs_device_wlun) {
5189                 struct device *supplier = NULL;
5190
5191                 /* Ensure UFS Device WLUN exists and does not disappear */
5192                 spin_lock_irqsave(hba->host->host_lock, flags);
5193                 if (hba->ufs_device_wlun) {
5194                         supplier = &hba->ufs_device_wlun->sdev_gendev;
5195                         get_device(supplier);
5196                 }
5197                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5198
5199                 if (supplier) {
5200                         /*
5201                          * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5202                          * device will not have been registered but can still
5203                          * have a device link holding a reference to the device.
5204                          */
5205                         device_link_remove(&sdev->sdev_gendev, supplier);
5206                         put_device(supplier);
5207                 }
5208         }
5209 }
5210
5211 /**
5212  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5213  * @lrbp: pointer to local reference block of completed command
5214  * @scsi_status: SCSI command status
5215  *
5216  * Returns value base on SCSI command status
5217  */
5218 static inline int
5219 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5220 {
5221         int result = 0;
5222
5223         switch (scsi_status) {
5224         case SAM_STAT_CHECK_CONDITION:
5225                 ufshcd_copy_sense_data(lrbp);
5226                 fallthrough;
5227         case SAM_STAT_GOOD:
5228                 result |= DID_OK << 16 | scsi_status;
5229                 break;
5230         case SAM_STAT_TASK_SET_FULL:
5231         case SAM_STAT_BUSY:
5232         case SAM_STAT_TASK_ABORTED:
5233                 ufshcd_copy_sense_data(lrbp);
5234                 result |= scsi_status;
5235                 break;
5236         default:
5237                 result |= DID_ERROR << 16;
5238                 break;
5239         } /* end of switch */
5240
5241         return result;
5242 }
5243
5244 /**
5245  * ufshcd_transfer_rsp_status - Get overall status of the response
5246  * @hba: per adapter instance
5247  * @lrbp: pointer to local reference block of completed command
5248  * @cqe: pointer to the completion queue entry
5249  *
5250  * Returns result of the command to notify SCSI midlayer
5251  */
5252 static inline int
5253 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5254                            struct cq_entry *cqe)
5255 {
5256         int result = 0;
5257         int scsi_status;
5258         enum utp_ocs ocs;
5259
5260         scsi_set_resid(lrbp->cmd,
5261                 be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count));
5262
5263         /* overall command status of utrd */
5264         ocs = ufshcd_get_tr_ocs(lrbp, cqe);
5265
5266         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5267                 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5268                                         MASK_RSP_UPIU_RESULT)
5269                         ocs = OCS_SUCCESS;
5270         }
5271
5272         switch (ocs) {
5273         case OCS_SUCCESS:
5274                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
5275                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5276                 switch (result) {
5277                 case UPIU_TRANSACTION_RESPONSE:
5278                         /*
5279                          * get the response UPIU result to extract
5280                          * the SCSI command status
5281                          */
5282                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5283
5284                         /*
5285                          * get the result based on SCSI status response
5286                          * to notify the SCSI midlayer of the command status
5287                          */
5288                         scsi_status = result & MASK_SCSI_STATUS;
5289                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5290
5291                         /*
5292                          * Currently we are only supporting BKOPs exception
5293                          * events hence we can ignore BKOPs exception event
5294                          * during power management callbacks. BKOPs exception
5295                          * event is not expected to be raised in runtime suspend
5296                          * callback as it allows the urgent bkops.
5297                          * During system suspend, we are anyway forcefully
5298                          * disabling the bkops and if urgent bkops is needed
5299                          * it will be enabled on system resume. Long term
5300                          * solution could be to abort the system suspend if
5301                          * UFS device needs urgent BKOPs.
5302                          */
5303                         if (!hba->pm_op_in_progress &&
5304                             !ufshcd_eh_in_progress(hba) &&
5305                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5306                                 /* Flushed in suspend */
5307                                 schedule_work(&hba->eeh_work);
5308
5309                         if (scsi_status == SAM_STAT_GOOD)
5310                                 ufshpb_rsp_upiu(hba, lrbp);
5311                         break;
5312                 case UPIU_TRANSACTION_REJECT_UPIU:
5313                         /* TODO: handle Reject UPIU Response */
5314                         result = DID_ERROR << 16;
5315                         dev_err(hba->dev,
5316                                 "Reject UPIU not fully implemented\n");
5317                         break;
5318                 default:
5319                         dev_err(hba->dev,
5320                                 "Unexpected request response code = %x\n",
5321                                 result);
5322                         result = DID_ERROR << 16;
5323                         break;
5324                 }
5325                 break;
5326         case OCS_ABORTED:
5327                 result |= DID_ABORT << 16;
5328                 break;
5329         case OCS_INVALID_COMMAND_STATUS:
5330                 result |= DID_REQUEUE << 16;
5331                 break;
5332         case OCS_INVALID_CMD_TABLE_ATTR:
5333         case OCS_INVALID_PRDT_ATTR:
5334         case OCS_MISMATCH_DATA_BUF_SIZE:
5335         case OCS_MISMATCH_RESP_UPIU_SIZE:
5336         case OCS_PEER_COMM_FAILURE:
5337         case OCS_FATAL_ERROR:
5338         case OCS_DEVICE_FATAL_ERROR:
5339         case OCS_INVALID_CRYPTO_CONFIG:
5340         case OCS_GENERAL_CRYPTO_ERROR:
5341         default:
5342                 result |= DID_ERROR << 16;
5343                 dev_err(hba->dev,
5344                                 "OCS error from controller = %x for tag %d\n",
5345                                 ocs, lrbp->task_tag);
5346                 ufshcd_print_evt_hist(hba);
5347                 ufshcd_print_host_state(hba);
5348                 break;
5349         } /* end of switch */
5350
5351         if ((host_byte(result) != DID_OK) &&
5352             (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5353                 ufshcd_print_tr(hba, lrbp->task_tag, true);
5354         return result;
5355 }
5356
5357 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5358                                          u32 intr_mask)
5359 {
5360         if (!ufshcd_is_auto_hibern8_supported(hba) ||
5361             !ufshcd_is_auto_hibern8_enabled(hba))
5362                 return false;
5363
5364         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5365                 return false;
5366
5367         if (hba->active_uic_cmd &&
5368             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5369             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5370                 return false;
5371
5372         return true;
5373 }
5374
5375 /**
5376  * ufshcd_uic_cmd_compl - handle completion of uic command
5377  * @hba: per adapter instance
5378  * @intr_status: interrupt status generated by the controller
5379  *
5380  * Returns
5381  *  IRQ_HANDLED - If interrupt is valid
5382  *  IRQ_NONE    - If invalid interrupt
5383  */
5384 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5385 {
5386         irqreturn_t retval = IRQ_NONE;
5387
5388         spin_lock(hba->host->host_lock);
5389         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5390                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5391
5392         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5393                 hba->active_uic_cmd->argument2 |=
5394                         ufshcd_get_uic_cmd_result(hba);
5395                 hba->active_uic_cmd->argument3 =
5396                         ufshcd_get_dme_attr_val(hba);
5397                 if (!hba->uic_async_done)
5398                         hba->active_uic_cmd->cmd_active = 0;
5399                 complete(&hba->active_uic_cmd->done);
5400                 retval = IRQ_HANDLED;
5401         }
5402
5403         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5404                 hba->active_uic_cmd->cmd_active = 0;
5405                 complete(hba->uic_async_done);
5406                 retval = IRQ_HANDLED;
5407         }
5408
5409         if (retval == IRQ_HANDLED)
5410                 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5411                                              UFS_CMD_COMP);
5412         spin_unlock(hba->host->host_lock);
5413         return retval;
5414 }
5415
5416 /* Release the resources allocated for processing a SCSI command. */
5417 static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5418                                     struct ufshcd_lrb *lrbp)
5419 {
5420         struct scsi_cmnd *cmd = lrbp->cmd;
5421
5422         scsi_dma_unmap(cmd);
5423         lrbp->cmd = NULL;       /* Mark the command as completed. */
5424         ufshcd_release(hba);
5425         ufshcd_clk_scaling_update_busy(hba);
5426 }
5427
5428 /**
5429  * ufshcd_compl_one_cqe - handle a completion queue entry
5430  * @hba: per adapter instance
5431  * @task_tag: the task tag of the request to be completed
5432  * @cqe: pointer to the completion queue entry
5433  */
5434 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5435                           struct cq_entry *cqe)
5436 {
5437         struct ufshcd_lrb *lrbp;
5438         struct scsi_cmnd *cmd;
5439
5440         lrbp = &hba->lrb[task_tag];
5441         lrbp->compl_time_stamp = ktime_get();
5442         cmd = lrbp->cmd;
5443         if (cmd) {
5444                 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5445                         ufshcd_update_monitor(hba, lrbp);
5446                 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5447                 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5448                 ufshcd_release_scsi_cmd(hba, lrbp);
5449                 /* Do not touch lrbp after scsi done */
5450                 scsi_done(cmd);
5451         } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5452                    lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5453                 if (hba->dev_cmd.complete) {
5454                         hba->dev_cmd.cqe = cqe;
5455                         ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
5456                         complete(hba->dev_cmd.complete);
5457                         ufshcd_clk_scaling_update_busy(hba);
5458                 }
5459         }
5460 }
5461
5462 /**
5463  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5464  * @hba: per adapter instance
5465  * @completed_reqs: bitmask that indicates which requests to complete
5466  */
5467 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5468                                         unsigned long completed_reqs)
5469 {
5470         int tag;
5471
5472         for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5473                 ufshcd_compl_one_cqe(hba, tag, NULL);
5474 }
5475
5476 /* Any value that is not an existing queue number is fine for this constant. */
5477 enum {
5478         UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5479 };
5480
5481 static void ufshcd_clear_polled(struct ufs_hba *hba,
5482                                 unsigned long *completed_reqs)
5483 {
5484         int tag;
5485
5486         for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5487                 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5488
5489                 if (!cmd)
5490                         continue;
5491                 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5492                         __clear_bit(tag, completed_reqs);
5493         }
5494 }
5495
5496 /*
5497  * Returns > 0 if one or more commands have been completed or 0 if no
5498  * requests have been completed.
5499  */
5500 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5501 {
5502         struct ufs_hba *hba = shost_priv(shost);
5503         unsigned long completed_reqs, flags;
5504         u32 tr_doorbell;
5505         struct ufs_hw_queue *hwq;
5506
5507         if (is_mcq_enabled(hba)) {
5508                 hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
5509
5510                 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5511         }
5512
5513         spin_lock_irqsave(&hba->outstanding_lock, flags);
5514         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5515         completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5516         WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5517                   "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5518                   hba->outstanding_reqs);
5519         if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5520                 /* Do not complete polled requests from interrupt context. */
5521                 ufshcd_clear_polled(hba, &completed_reqs);
5522         }
5523         hba->outstanding_reqs &= ~completed_reqs;
5524         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5525
5526         if (completed_reqs)
5527                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5528
5529         return completed_reqs != 0;
5530 }
5531
5532 /**
5533  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5534  * @hba: per adapter instance
5535  *
5536  * Returns
5537  *  IRQ_HANDLED - If interrupt is valid
5538  *  IRQ_NONE    - If invalid interrupt
5539  */
5540 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5541 {
5542         /* Resetting interrupt aggregation counters first and reading the
5543          * DOOR_BELL afterward allows us to handle all the completed requests.
5544          * In order to prevent other interrupts starvation the DB is read once
5545          * after reset. The down side of this solution is the possibility of
5546          * false interrupt if device completes another request after resetting
5547          * aggregation and before reading the DB.
5548          */
5549         if (ufshcd_is_intr_aggr_allowed(hba) &&
5550             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5551                 ufshcd_reset_intr_aggr(hba);
5552
5553         if (ufs_fail_completion())
5554                 return IRQ_HANDLED;
5555
5556         /*
5557          * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5558          * do not want polling to trigger spurious interrupt complaints.
5559          */
5560         ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
5561
5562         return IRQ_HANDLED;
5563 }
5564
5565 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5566 {
5567         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5568                                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5569                                        &ee_ctrl_mask);
5570 }
5571
5572 int ufshcd_write_ee_control(struct ufs_hba *hba)
5573 {
5574         int err;
5575
5576         mutex_lock(&hba->ee_ctrl_mutex);
5577         err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5578         mutex_unlock(&hba->ee_ctrl_mutex);
5579         if (err)
5580                 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5581                         __func__, err);
5582         return err;
5583 }
5584
5585 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5586                              const u16 *other_mask, u16 set, u16 clr)
5587 {
5588         u16 new_mask, ee_ctrl_mask;
5589         int err = 0;
5590
5591         mutex_lock(&hba->ee_ctrl_mutex);
5592         new_mask = (*mask & ~clr) | set;
5593         ee_ctrl_mask = new_mask | *other_mask;
5594         if (ee_ctrl_mask != hba->ee_ctrl_mask)
5595                 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5596         /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5597         if (!err) {
5598                 hba->ee_ctrl_mask = ee_ctrl_mask;
5599                 *mask = new_mask;
5600         }
5601         mutex_unlock(&hba->ee_ctrl_mutex);
5602         return err;
5603 }
5604
5605 /**
5606  * ufshcd_disable_ee - disable exception event
5607  * @hba: per-adapter instance
5608  * @mask: exception event to disable
5609  *
5610  * Disables exception event in the device so that the EVENT_ALERT
5611  * bit is not set.
5612  *
5613  * Returns zero on success, non-zero error value on failure.
5614  */
5615 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5616 {
5617         return ufshcd_update_ee_drv_mask(hba, 0, mask);
5618 }
5619
5620 /**
5621  * ufshcd_enable_ee - enable exception event
5622  * @hba: per-adapter instance
5623  * @mask: exception event to enable
5624  *
5625  * Enable corresponding exception event in the device to allow
5626  * device to alert host in critical scenarios.
5627  *
5628  * Returns zero on success, non-zero error value on failure.
5629  */
5630 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5631 {
5632         return ufshcd_update_ee_drv_mask(hba, mask, 0);
5633 }
5634
5635 /**
5636  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5637  * @hba: per-adapter instance
5638  *
5639  * Allow device to manage background operations on its own. Enabling
5640  * this might lead to inconsistent latencies during normal data transfers
5641  * as the device is allowed to manage its own way of handling background
5642  * operations.
5643  *
5644  * Returns zero on success, non-zero on failure.
5645  */
5646 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5647 {
5648         int err = 0;
5649
5650         if (hba->auto_bkops_enabled)
5651                 goto out;
5652
5653         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5654                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5655         if (err) {
5656                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5657                                 __func__, err);
5658                 goto out;
5659         }
5660
5661         hba->auto_bkops_enabled = true;
5662         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5663
5664         /* No need of URGENT_BKOPS exception from the device */
5665         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5666         if (err)
5667                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5668                                 __func__, err);
5669 out:
5670         return err;
5671 }
5672
5673 /**
5674  * ufshcd_disable_auto_bkops - block device in doing background operations
5675  * @hba: per-adapter instance
5676  *
5677  * Disabling background operations improves command response latency but
5678  * has drawback of device moving into critical state where the device is
5679  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5680  * host is idle so that BKOPS are managed effectively without any negative
5681  * impacts.
5682  *
5683  * Returns zero on success, non-zero on failure.
5684  */
5685 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5686 {
5687         int err = 0;
5688
5689         if (!hba->auto_bkops_enabled)
5690                 goto out;
5691
5692         /*
5693          * If host assisted BKOPs is to be enabled, make sure
5694          * urgent bkops exception is allowed.
5695          */
5696         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5697         if (err) {
5698                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5699                                 __func__, err);
5700                 goto out;
5701         }
5702
5703         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5704                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5705         if (err) {
5706                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5707                                 __func__, err);
5708                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5709                 goto out;
5710         }
5711
5712         hba->auto_bkops_enabled = false;
5713         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5714         hba->is_urgent_bkops_lvl_checked = false;
5715 out:
5716         return err;
5717 }
5718
5719 /**
5720  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5721  * @hba: per adapter instance
5722  *
5723  * After a device reset the device may toggle the BKOPS_EN flag
5724  * to default value. The s/w tracking variables should be updated
5725  * as well. This function would change the auto-bkops state based on
5726  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5727  */
5728 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5729 {
5730         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5731                 hba->auto_bkops_enabled = false;
5732                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5733                 ufshcd_enable_auto_bkops(hba);
5734         } else {
5735                 hba->auto_bkops_enabled = true;
5736                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5737                 ufshcd_disable_auto_bkops(hba);
5738         }
5739         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5740         hba->is_urgent_bkops_lvl_checked = false;
5741 }
5742
5743 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5744 {
5745         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5746                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5747 }
5748
5749 /**
5750  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5751  * @hba: per-adapter instance
5752  * @status: bkops_status value
5753  *
5754  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5755  * flag in the device to permit background operations if the device
5756  * bkops_status is greater than or equal to "status" argument passed to
5757  * this function, disable otherwise.
5758  *
5759  * Returns 0 for success, non-zero in case of failure.
5760  *
5761  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5762  * to know whether auto bkops is enabled or disabled after this function
5763  * returns control to it.
5764  */
5765 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5766                              enum bkops_status status)
5767 {
5768         int err;
5769         u32 curr_status = 0;
5770
5771         err = ufshcd_get_bkops_status(hba, &curr_status);
5772         if (err) {
5773                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5774                                 __func__, err);
5775                 goto out;
5776         } else if (curr_status > BKOPS_STATUS_MAX) {
5777                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5778                                 __func__, curr_status);
5779                 err = -EINVAL;
5780                 goto out;
5781         }
5782
5783         if (curr_status >= status)
5784                 err = ufshcd_enable_auto_bkops(hba);
5785         else
5786                 err = ufshcd_disable_auto_bkops(hba);
5787 out:
5788         return err;
5789 }
5790
5791 /**
5792  * ufshcd_urgent_bkops - handle urgent bkops exception event
5793  * @hba: per-adapter instance
5794  *
5795  * Enable fBackgroundOpsEn flag in the device to permit background
5796  * operations.
5797  *
5798  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5799  * and negative error value for any other failure.
5800  */
5801 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5802 {
5803         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5804 }
5805
5806 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5807 {
5808         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5809                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5810 }
5811
5812 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5813 {
5814         int err;
5815         u32 curr_status = 0;
5816
5817         if (hba->is_urgent_bkops_lvl_checked)
5818                 goto enable_auto_bkops;
5819
5820         err = ufshcd_get_bkops_status(hba, &curr_status);
5821         if (err) {
5822                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5823                                 __func__, err);
5824                 goto out;
5825         }
5826
5827         /*
5828          * We are seeing that some devices are raising the urgent bkops
5829          * exception events even when BKOPS status doesn't indicate performace
5830          * impacted or critical. Handle these device by determining their urgent
5831          * bkops status at runtime.
5832          */
5833         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5834                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5835                                 __func__, curr_status);
5836                 /* update the current status as the urgent bkops level */
5837                 hba->urgent_bkops_lvl = curr_status;
5838                 hba->is_urgent_bkops_lvl_checked = true;
5839         }
5840
5841 enable_auto_bkops:
5842         err = ufshcd_enable_auto_bkops(hba);
5843 out:
5844         if (err < 0)
5845                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5846                                 __func__, err);
5847 }
5848
5849 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5850 {
5851         u32 value;
5852
5853         if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5854                                 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5855                 return;
5856
5857         dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5858
5859         ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5860
5861         /*
5862          * A placeholder for the platform vendors to add whatever additional
5863          * steps required
5864          */
5865 }
5866
5867 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5868 {
5869         u8 index;
5870         enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5871                                    UPIU_QUERY_OPCODE_CLEAR_FLAG;
5872
5873         index = ufshcd_wb_get_query_index(hba);
5874         return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5875 }
5876
5877 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5878 {
5879         int ret;
5880
5881         if (!ufshcd_is_wb_allowed(hba) ||
5882             hba->dev_info.wb_enabled == enable)
5883                 return 0;
5884
5885         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
5886         if (ret) {
5887                 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
5888                         __func__, enable ? "enabling" : "disabling", ret);
5889                 return ret;
5890         }
5891
5892         hba->dev_info.wb_enabled = enable;
5893         dev_dbg(hba->dev, "%s: Write Booster %s\n",
5894                         __func__, enable ? "enabled" : "disabled");
5895
5896         return ret;
5897 }
5898
5899 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
5900                                                  bool enable)
5901 {
5902         int ret;
5903
5904         ret = __ufshcd_wb_toggle(hba, enable,
5905                         QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5906         if (ret) {
5907                 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
5908                         __func__, enable ? "enabling" : "disabling", ret);
5909                 return;
5910         }
5911         dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
5912                         __func__, enable ? "enabled" : "disabled");
5913 }
5914
5915 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
5916 {
5917         int ret;
5918
5919         if (!ufshcd_is_wb_allowed(hba) ||
5920             hba->dev_info.wb_buf_flush_enabled == enable)
5921                 return 0;
5922
5923         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
5924         if (ret) {
5925                 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
5926                         __func__, enable ? "enabling" : "disabling", ret);
5927                 return ret;
5928         }
5929
5930         hba->dev_info.wb_buf_flush_enabled = enable;
5931         dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
5932                         __func__, enable ? "enabled" : "disabled");
5933
5934         return ret;
5935 }
5936
5937 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5938                                                 u32 avail_buf)
5939 {
5940         u32 cur_buf;
5941         int ret;
5942         u8 index;
5943
5944         index = ufshcd_wb_get_query_index(hba);
5945         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5946                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5947                                               index, 0, &cur_buf);
5948         if (ret) {
5949                 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
5950                         __func__, ret);
5951                 return false;
5952         }
5953
5954         if (!cur_buf) {
5955                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5956                          cur_buf);
5957                 return false;
5958         }
5959         /* Let it continue to flush when available buffer exceeds threshold */
5960         return avail_buf < hba->vps->wb_flush_threshold;
5961 }
5962
5963 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
5964 {
5965         if (ufshcd_is_wb_buf_flush_allowed(hba))
5966                 ufshcd_wb_toggle_buf_flush(hba, false);
5967
5968         ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
5969         ufshcd_wb_toggle(hba, false);
5970         hba->caps &= ~UFSHCD_CAP_WB_EN;
5971
5972         dev_info(hba->dev, "%s: WB force disabled\n", __func__);
5973 }
5974
5975 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
5976 {
5977         u32 lifetime;
5978         int ret;
5979         u8 index;
5980
5981         index = ufshcd_wb_get_query_index(hba);
5982         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5983                                       QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
5984                                       index, 0, &lifetime);
5985         if (ret) {
5986                 dev_err(hba->dev,
5987                         "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
5988                         __func__, ret);
5989                 return false;
5990         }
5991
5992         if (lifetime == UFS_WB_EXCEED_LIFETIME) {
5993                 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
5994                         __func__, lifetime);
5995                 return false;
5996         }
5997
5998         dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
5999                 __func__, lifetime);
6000
6001         return true;
6002 }
6003
6004 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
6005 {
6006         int ret;
6007         u32 avail_buf;
6008         u8 index;
6009
6010         if (!ufshcd_is_wb_allowed(hba))
6011                 return false;
6012
6013         if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
6014                 ufshcd_wb_force_disable(hba);
6015                 return false;
6016         }
6017
6018         /*
6019          * The ufs device needs the vcc to be ON to flush.
6020          * With user-space reduction enabled, it's enough to enable flush
6021          * by checking only the available buffer. The threshold
6022          * defined here is > 90% full.
6023          * With user-space preserved enabled, the current-buffer
6024          * should be checked too because the wb buffer size can reduce
6025          * when disk tends to be full. This info is provided by current
6026          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6027          * keeping vcc on when current buffer is empty.
6028          */
6029         index = ufshcd_wb_get_query_index(hba);
6030         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6031                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
6032                                       index, 0, &avail_buf);
6033         if (ret) {
6034                 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6035                          __func__, ret);
6036                 return false;
6037         }
6038
6039         if (!hba->dev_info.b_presrv_uspc_en)
6040                 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
6041
6042         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
6043 }
6044
6045 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6046 {
6047         struct ufs_hba *hba = container_of(to_delayed_work(work),
6048                                            struct ufs_hba,
6049                                            rpm_dev_flush_recheck_work);
6050         /*
6051          * To prevent unnecessary VCC power drain after device finishes
6052          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6053          * after a certain delay to recheck the threshold by next runtime
6054          * suspend.
6055          */
6056         ufshcd_rpm_get_sync(hba);
6057         ufshcd_rpm_put_sync(hba);
6058 }
6059
6060 /**
6061  * ufshcd_exception_event_handler - handle exceptions raised by device
6062  * @work: pointer to work data
6063  *
6064  * Read bExceptionEventStatus attribute from the device and handle the
6065  * exception event accordingly.
6066  */
6067 static void ufshcd_exception_event_handler(struct work_struct *work)
6068 {
6069         struct ufs_hba *hba;
6070         int err;
6071         u32 status = 0;
6072         hba = container_of(work, struct ufs_hba, eeh_work);
6073
6074         ufshcd_scsi_block_requests(hba);
6075         err = ufshcd_get_ee_status(hba, &status);
6076         if (err) {
6077                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6078                                 __func__, err);
6079                 goto out;
6080         }
6081
6082         trace_ufshcd_exception_event(dev_name(hba->dev), status);
6083
6084         if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
6085                 ufshcd_bkops_exception_event_handler(hba);
6086
6087         if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6088                 ufshcd_temp_exception_event_handler(hba, status);
6089
6090         ufs_debugfs_exception_event(hba, status);
6091 out:
6092         ufshcd_scsi_unblock_requests(hba);
6093 }
6094
6095 /* Complete requests that have door-bell cleared */
6096 static void ufshcd_complete_requests(struct ufs_hba *hba)
6097 {
6098         ufshcd_transfer_req_compl(hba);
6099         ufshcd_tmc_handler(hba);
6100 }
6101
6102 /**
6103  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6104  *                              to recover from the DL NAC errors or not.
6105  * @hba: per-adapter instance
6106  *
6107  * Returns true if error handling is required, false otherwise
6108  */
6109 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6110 {
6111         unsigned long flags;
6112         bool err_handling = true;
6113
6114         spin_lock_irqsave(hba->host->host_lock, flags);
6115         /*
6116          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6117          * device fatal error and/or DL NAC & REPLAY timeout errors.
6118          */
6119         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6120                 goto out;
6121
6122         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6123             ((hba->saved_err & UIC_ERROR) &&
6124              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6125                 goto out;
6126
6127         if ((hba->saved_err & UIC_ERROR) &&
6128             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6129                 int err;
6130                 /*
6131                  * wait for 50ms to see if we can get any other errors or not.
6132                  */
6133                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6134                 msleep(50);
6135                 spin_lock_irqsave(hba->host->host_lock, flags);
6136
6137                 /*
6138                  * now check if we have got any other severe errors other than
6139                  * DL NAC error?
6140                  */
6141                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6142                     ((hba->saved_err & UIC_ERROR) &&
6143                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6144                         goto out;
6145
6146                 /*
6147                  * As DL NAC is the only error received so far, send out NOP
6148                  * command to confirm if link is still active or not.
6149                  *   - If we don't get any response then do error recovery.
6150                  *   - If we get response then clear the DL NAC error bit.
6151                  */
6152
6153                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6154                 err = ufshcd_verify_dev_init(hba);
6155                 spin_lock_irqsave(hba->host->host_lock, flags);
6156
6157                 if (err)
6158                         goto out;
6159
6160                 /* Link seems to be alive hence ignore the DL NAC errors */
6161                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6162                         hba->saved_err &= ~UIC_ERROR;
6163                 /* clear NAC error */
6164                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6165                 if (!hba->saved_uic_err)
6166                         err_handling = false;
6167         }
6168 out:
6169         spin_unlock_irqrestore(hba->host->host_lock, flags);
6170         return err_handling;
6171 }
6172
6173 /* host lock must be held before calling this func */
6174 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6175 {
6176         return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6177                (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6178 }
6179
6180 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6181 {
6182         lockdep_assert_held(hba->host->host_lock);
6183
6184         /* handle fatal errors only when link is not in error state */
6185         if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6186                 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6187                     ufshcd_is_saved_err_fatal(hba))
6188                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6189                 else
6190                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6191                 queue_work(hba->eh_wq, &hba->eh_work);
6192         }
6193 }
6194
6195 static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6196 {
6197         spin_lock_irq(hba->host->host_lock);
6198         hba->force_reset = true;
6199         ufshcd_schedule_eh_work(hba);
6200         spin_unlock_irq(hba->host->host_lock);
6201 }
6202
6203 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6204 {
6205         mutex_lock(&hba->wb_mutex);
6206         down_write(&hba->clk_scaling_lock);
6207         hba->clk_scaling.is_allowed = allow;
6208         up_write(&hba->clk_scaling_lock);
6209         mutex_unlock(&hba->wb_mutex);
6210 }
6211
6212 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6213 {
6214         if (suspend) {
6215                 if (hba->clk_scaling.is_enabled)
6216                         ufshcd_suspend_clkscaling(hba);
6217                 ufshcd_clk_scaling_allow(hba, false);
6218         } else {
6219                 ufshcd_clk_scaling_allow(hba, true);
6220                 if (hba->clk_scaling.is_enabled)
6221                         ufshcd_resume_clkscaling(hba);
6222         }
6223 }
6224
6225 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6226 {
6227         ufshcd_rpm_get_sync(hba);
6228         if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6229             hba->is_sys_suspended) {
6230                 enum ufs_pm_op pm_op;
6231
6232                 /*
6233                  * Don't assume anything of resume, if
6234                  * resume fails, irq and clocks can be OFF, and powers
6235                  * can be OFF or in LPM.
6236                  */
6237                 ufshcd_setup_hba_vreg(hba, true);
6238                 ufshcd_enable_irq(hba);
6239                 ufshcd_setup_vreg(hba, true);
6240                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6241                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6242                 ufshcd_hold(hba, false);
6243                 if (!ufshcd_is_clkgating_allowed(hba))
6244                         ufshcd_setup_clocks(hba, true);
6245                 ufshcd_release(hba);
6246                 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6247                 ufshcd_vops_resume(hba, pm_op);
6248         } else {
6249                 ufshcd_hold(hba, false);
6250                 if (ufshcd_is_clkscaling_supported(hba) &&
6251                     hba->clk_scaling.is_enabled)
6252                         ufshcd_suspend_clkscaling(hba);
6253                 ufshcd_clk_scaling_allow(hba, false);
6254         }
6255         ufshcd_scsi_block_requests(hba);
6256         /* Drain ufshcd_queuecommand() */
6257         synchronize_rcu();
6258         cancel_work_sync(&hba->eeh_work);
6259 }
6260
6261 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6262 {
6263         ufshcd_scsi_unblock_requests(hba);
6264         ufshcd_release(hba);
6265         if (ufshcd_is_clkscaling_supported(hba))
6266                 ufshcd_clk_scaling_suspend(hba, false);
6267         ufshcd_rpm_put(hba);
6268 }
6269
6270 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6271 {
6272         return (!hba->is_powered || hba->shutting_down ||
6273                 !hba->ufs_device_wlun ||
6274                 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6275                 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6276                    ufshcd_is_link_broken(hba))));
6277 }
6278
6279 #ifdef CONFIG_PM
6280 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6281 {
6282         struct Scsi_Host *shost = hba->host;
6283         struct scsi_device *sdev;
6284         struct request_queue *q;
6285         int ret;
6286
6287         hba->is_sys_suspended = false;
6288         /*
6289          * Set RPM status of wlun device to RPM_ACTIVE,
6290          * this also clears its runtime error.
6291          */
6292         ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6293
6294         /* hba device might have a runtime error otherwise */
6295         if (ret)
6296                 ret = pm_runtime_set_active(hba->dev);
6297         /*
6298          * If wlun device had runtime error, we also need to resume those
6299          * consumer scsi devices in case any of them has failed to be
6300          * resumed due to supplier runtime resume failure. This is to unblock
6301          * blk_queue_enter in case there are bios waiting inside it.
6302          */
6303         if (!ret) {
6304                 shost_for_each_device(sdev, shost) {
6305                         q = sdev->request_queue;
6306                         if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6307                                        q->rpm_status == RPM_SUSPENDING))
6308                                 pm_request_resume(q->dev);
6309                 }
6310         }
6311 }
6312 #else
6313 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6314 {
6315 }
6316 #endif
6317
6318 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6319 {
6320         struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6321         u32 mode;
6322
6323         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6324
6325         if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6326                 return true;
6327
6328         if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6329                 return true;
6330
6331         return false;
6332 }
6333
6334 static bool ufshcd_abort_all(struct ufs_hba *hba)
6335 {
6336         bool needs_reset = false;
6337         int tag, ret;
6338
6339         /* Clear pending transfer requests */
6340         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6341                 ret = ufshcd_try_to_abort_task(hba, tag);
6342                 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6343                         hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6344                         ret ? "failed" : "succeeded");
6345                 if (ret) {
6346                         needs_reset = true;
6347                         goto out;
6348                 }
6349         }
6350
6351         /* Clear pending task management requests */
6352         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6353                 if (ufshcd_clear_tm_cmd(hba, tag)) {
6354                         needs_reset = true;
6355                         goto out;
6356                 }
6357         }
6358
6359 out:
6360         /* Complete the requests that are cleared by s/w */
6361         ufshcd_complete_requests(hba);
6362
6363         return needs_reset;
6364 }
6365
6366 /**
6367  * ufshcd_err_handler - handle UFS errors that require s/w attention
6368  * @work: pointer to work structure
6369  */
6370 static void ufshcd_err_handler(struct work_struct *work)
6371 {
6372         int retries = MAX_ERR_HANDLER_RETRIES;
6373         struct ufs_hba *hba;
6374         unsigned long flags;
6375         bool needs_restore;
6376         bool needs_reset;
6377         int pmc_err;
6378
6379         hba = container_of(work, struct ufs_hba, eh_work);
6380
6381         dev_info(hba->dev,
6382                  "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6383                  __func__, ufshcd_state_name[hba->ufshcd_state],
6384                  hba->is_powered, hba->shutting_down, hba->saved_err,
6385                  hba->saved_uic_err, hba->force_reset,
6386                  ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6387
6388         down(&hba->host_sem);
6389         spin_lock_irqsave(hba->host->host_lock, flags);
6390         if (ufshcd_err_handling_should_stop(hba)) {
6391                 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6392                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6393                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6394                 up(&hba->host_sem);
6395                 return;
6396         }
6397         ufshcd_set_eh_in_progress(hba);
6398         spin_unlock_irqrestore(hba->host->host_lock, flags);
6399         ufshcd_err_handling_prepare(hba);
6400         /* Complete requests that have door-bell cleared by h/w */
6401         ufshcd_complete_requests(hba);
6402         spin_lock_irqsave(hba->host->host_lock, flags);
6403 again:
6404         needs_restore = false;
6405         needs_reset = false;
6406
6407         if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6408                 hba->ufshcd_state = UFSHCD_STATE_RESET;
6409         /*
6410          * A full reset and restore might have happened after preparation
6411          * is finished, double check whether we should stop.
6412          */
6413         if (ufshcd_err_handling_should_stop(hba))
6414                 goto skip_err_handling;
6415
6416         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6417                 bool ret;
6418
6419                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6420                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6421                 ret = ufshcd_quirk_dl_nac_errors(hba);
6422                 spin_lock_irqsave(hba->host->host_lock, flags);
6423                 if (!ret && ufshcd_err_handling_should_stop(hba))
6424                         goto skip_err_handling;
6425         }
6426
6427         if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6428             (hba->saved_uic_err &&
6429              (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6430                 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6431
6432                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6433                 ufshcd_print_host_state(hba);
6434                 ufshcd_print_pwr_info(hba);
6435                 ufshcd_print_evt_hist(hba);
6436                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6437                 ufshcd_print_trs_all(hba, pr_prdt);
6438                 spin_lock_irqsave(hba->host->host_lock, flags);
6439         }
6440
6441         /*
6442          * if host reset is required then skip clearing the pending
6443          * transfers forcefully because they will get cleared during
6444          * host reset and restore
6445          */
6446         if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6447             ufshcd_is_saved_err_fatal(hba) ||
6448             ((hba->saved_err & UIC_ERROR) &&
6449              (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6450                                     UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6451                 needs_reset = true;
6452                 goto do_reset;
6453         }
6454
6455         /*
6456          * If LINERESET was caught, UFS might have been put to PWM mode,
6457          * check if power mode restore is needed.
6458          */
6459         if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6460                 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6461                 if (!hba->saved_uic_err)
6462                         hba->saved_err &= ~UIC_ERROR;
6463                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6464                 if (ufshcd_is_pwr_mode_restore_needed(hba))
6465                         needs_restore = true;
6466                 spin_lock_irqsave(hba->host->host_lock, flags);
6467                 if (!hba->saved_err && !needs_restore)
6468                         goto skip_err_handling;
6469         }
6470
6471         hba->silence_err_logs = true;
6472         /* release lock as clear command might sleep */
6473         spin_unlock_irqrestore(hba->host->host_lock, flags);
6474
6475         needs_reset = ufshcd_abort_all(hba);
6476
6477         spin_lock_irqsave(hba->host->host_lock, flags);
6478         hba->silence_err_logs = false;
6479         if (needs_reset)
6480                 goto do_reset;
6481
6482         /*
6483          * After all reqs and tasks are cleared from doorbell,
6484          * now it is safe to retore power mode.
6485          */
6486         if (needs_restore) {
6487                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6488                 /*
6489                  * Hold the scaling lock just in case dev cmds
6490                  * are sent via bsg and/or sysfs.
6491                  */
6492                 down_write(&hba->clk_scaling_lock);
6493                 hba->force_pmc = true;
6494                 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6495                 if (pmc_err) {
6496                         needs_reset = true;
6497                         dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6498                                         __func__, pmc_err);
6499                 }
6500                 hba->force_pmc = false;
6501                 ufshcd_print_pwr_info(hba);
6502                 up_write(&hba->clk_scaling_lock);
6503                 spin_lock_irqsave(hba->host->host_lock, flags);
6504         }
6505
6506 do_reset:
6507         /* Fatal errors need reset */
6508         if (needs_reset) {
6509                 int err;
6510
6511                 hba->force_reset = false;
6512                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6513                 err = ufshcd_reset_and_restore(hba);
6514                 if (err)
6515                         dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6516                                         __func__, err);
6517                 else
6518                         ufshcd_recover_pm_error(hba);
6519                 spin_lock_irqsave(hba->host->host_lock, flags);
6520         }
6521
6522 skip_err_handling:
6523         if (!needs_reset) {
6524                 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6525                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6526                 if (hba->saved_err || hba->saved_uic_err)
6527                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6528                             __func__, hba->saved_err, hba->saved_uic_err);
6529         }
6530         /* Exit in an operational state or dead */
6531         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6532             hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6533                 if (--retries)
6534                         goto again;
6535                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6536         }
6537         ufshcd_clear_eh_in_progress(hba);
6538         spin_unlock_irqrestore(hba->host->host_lock, flags);
6539         ufshcd_err_handling_unprepare(hba);
6540         up(&hba->host_sem);
6541
6542         dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6543                  ufshcd_state_name[hba->ufshcd_state]);
6544 }
6545
6546 /**
6547  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6548  * @hba: per-adapter instance
6549  *
6550  * Returns
6551  *  IRQ_HANDLED - If interrupt is valid
6552  *  IRQ_NONE    - If invalid interrupt
6553  */
6554 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6555 {
6556         u32 reg;
6557         irqreturn_t retval = IRQ_NONE;
6558
6559         /* PHY layer error */
6560         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6561         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6562             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6563                 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6564                 /*
6565                  * To know whether this error is fatal or not, DB timeout
6566                  * must be checked but this error is handled separately.
6567                  */
6568                 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6569                         dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6570                                         __func__);
6571
6572                 /* Got a LINERESET indication. */
6573                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6574                         struct uic_command *cmd = NULL;
6575
6576                         hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6577                         if (hba->uic_async_done && hba->active_uic_cmd)
6578                                 cmd = hba->active_uic_cmd;
6579                         /*
6580                          * Ignore the LINERESET during power mode change
6581                          * operation via DME_SET command.
6582                          */
6583                         if (cmd && (cmd->command == UIC_CMD_DME_SET))
6584                                 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6585                 }
6586                 retval |= IRQ_HANDLED;
6587         }
6588
6589         /* PA_INIT_ERROR is fatal and needs UIC reset */
6590         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6591         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6592             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6593                 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6594
6595                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6596                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6597                 else if (hba->dev_quirks &
6598                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6599                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6600                                 hba->uic_error |=
6601                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6602                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6603                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6604                 }
6605                 retval |= IRQ_HANDLED;
6606         }
6607
6608         /* UIC NL/TL/DME errors needs software retry */
6609         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6610         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6611             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6612                 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6613                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6614                 retval |= IRQ_HANDLED;
6615         }
6616
6617         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6618         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6619             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6620                 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6621                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6622                 retval |= IRQ_HANDLED;
6623         }
6624
6625         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6626         if ((reg & UIC_DME_ERROR) &&
6627             (reg & UIC_DME_ERROR_CODE_MASK)) {
6628                 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6629                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6630                 retval |= IRQ_HANDLED;
6631         }
6632
6633         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6634                         __func__, hba->uic_error);
6635         return retval;
6636 }
6637
6638 /**
6639  * ufshcd_check_errors - Check for errors that need s/w attention
6640  * @hba: per-adapter instance
6641  * @intr_status: interrupt status generated by the controller
6642  *
6643  * Returns
6644  *  IRQ_HANDLED - If interrupt is valid
6645  *  IRQ_NONE    - If invalid interrupt
6646  */
6647 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6648 {
6649         bool queue_eh_work = false;
6650         irqreturn_t retval = IRQ_NONE;
6651
6652         spin_lock(hba->host->host_lock);
6653         hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6654
6655         if (hba->errors & INT_FATAL_ERRORS) {
6656                 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6657                                        hba->errors);
6658                 queue_eh_work = true;
6659         }
6660
6661         if (hba->errors & UIC_ERROR) {
6662                 hba->uic_error = 0;
6663                 retval = ufshcd_update_uic_error(hba);
6664                 if (hba->uic_error)
6665                         queue_eh_work = true;
6666         }
6667
6668         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6669                 dev_err(hba->dev,
6670                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6671                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6672                         "Enter" : "Exit",
6673                         hba->errors, ufshcd_get_upmcrs(hba));
6674                 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6675                                        hba->errors);
6676                 ufshcd_set_link_broken(hba);
6677                 queue_eh_work = true;
6678         }
6679
6680         if (queue_eh_work) {
6681                 /*
6682                  * update the transfer error masks to sticky bits, let's do this
6683                  * irrespective of current ufshcd_state.
6684                  */
6685                 hba->saved_err |= hba->errors;
6686                 hba->saved_uic_err |= hba->uic_error;
6687
6688                 /* dump controller state before resetting */
6689                 if ((hba->saved_err &
6690                      (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6691                     (hba->saved_uic_err &&
6692                      (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6693                         dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6694                                         __func__, hba->saved_err,
6695                                         hba->saved_uic_err);
6696                         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6697                                          "host_regs: ");
6698                         ufshcd_print_pwr_info(hba);
6699                 }
6700                 ufshcd_schedule_eh_work(hba);
6701                 retval |= IRQ_HANDLED;
6702         }
6703         /*
6704          * if (!queue_eh_work) -
6705          * Other errors are either non-fatal where host recovers
6706          * itself without s/w intervention or errors that will be
6707          * handled by the SCSI core layer.
6708          */
6709         hba->errors = 0;
6710         hba->uic_error = 0;
6711         spin_unlock(hba->host->host_lock);
6712         return retval;
6713 }
6714
6715 /**
6716  * ufshcd_tmc_handler - handle task management function completion
6717  * @hba: per adapter instance
6718  *
6719  * Returns
6720  *  IRQ_HANDLED - If interrupt is valid
6721  *  IRQ_NONE    - If invalid interrupt
6722  */
6723 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6724 {
6725         unsigned long flags, pending, issued;
6726         irqreturn_t ret = IRQ_NONE;
6727         int tag;
6728
6729         spin_lock_irqsave(hba->host->host_lock, flags);
6730         pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6731         issued = hba->outstanding_tasks & ~pending;
6732         for_each_set_bit(tag, &issued, hba->nutmrs) {
6733                 struct request *req = hba->tmf_rqs[tag];
6734                 struct completion *c = req->end_io_data;
6735
6736                 complete(c);
6737                 ret = IRQ_HANDLED;
6738         }
6739         spin_unlock_irqrestore(hba->host->host_lock, flags);
6740
6741         return ret;
6742 }
6743
6744 /**
6745  * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6746  * @hba: per adapter instance
6747  *
6748  * Returns IRQ_HANDLED if interrupt is handled
6749  */
6750 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6751 {
6752         struct ufs_hw_queue *hwq;
6753         unsigned long outstanding_cqs;
6754         unsigned int nr_queues;
6755         int i, ret;
6756         u32 events;
6757
6758         ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6759         if (ret)
6760                 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6761
6762         /* Exclude the poll queues */
6763         nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6764         for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6765                 hwq = &hba->uhq[i];
6766
6767                 events = ufshcd_mcq_read_cqis(hba, i);
6768                 if (events)
6769                         ufshcd_mcq_write_cqis(hba, events, i);
6770
6771                 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6772                         ufshcd_mcq_poll_cqe_nolock(hba, hwq);
6773         }
6774
6775         return IRQ_HANDLED;
6776 }
6777
6778 /**
6779  * ufshcd_sl_intr - Interrupt service routine
6780  * @hba: per adapter instance
6781  * @intr_status: contains interrupts generated by the controller
6782  *
6783  * Returns
6784  *  IRQ_HANDLED - If interrupt is valid
6785  *  IRQ_NONE    - If invalid interrupt
6786  */
6787 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6788 {
6789         irqreturn_t retval = IRQ_NONE;
6790
6791         if (intr_status & UFSHCD_UIC_MASK)
6792                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6793
6794         if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6795                 retval |= ufshcd_check_errors(hba, intr_status);
6796
6797         if (intr_status & UTP_TASK_REQ_COMPL)
6798                 retval |= ufshcd_tmc_handler(hba);
6799
6800         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6801                 retval |= ufshcd_transfer_req_compl(hba);
6802
6803         if (intr_status & MCQ_CQ_EVENT_STATUS)
6804                 retval |= ufshcd_handle_mcq_cq_events(hba);
6805
6806         return retval;
6807 }
6808
6809 /**
6810  * ufshcd_intr - Main interrupt service routine
6811  * @irq: irq number
6812  * @__hba: pointer to adapter instance
6813  *
6814  * Returns
6815  *  IRQ_HANDLED - If interrupt is valid
6816  *  IRQ_NONE    - If invalid interrupt
6817  */
6818 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6819 {
6820         u32 intr_status, enabled_intr_status = 0;
6821         irqreturn_t retval = IRQ_NONE;
6822         struct ufs_hba *hba = __hba;
6823         int retries = hba->nutrs;
6824
6825         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6826         hba->ufs_stats.last_intr_status = intr_status;
6827         hba->ufs_stats.last_intr_ts = local_clock();
6828
6829         /*
6830          * There could be max of hba->nutrs reqs in flight and in worst case
6831          * if the reqs get finished 1 by 1 after the interrupt status is
6832          * read, make sure we handle them by checking the interrupt status
6833          * again in a loop until we process all of the reqs before returning.
6834          */
6835         while (intr_status && retries--) {
6836                 enabled_intr_status =
6837                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6838                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6839                 if (enabled_intr_status)
6840                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6841
6842                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6843         }
6844
6845         if (enabled_intr_status && retval == IRQ_NONE &&
6846             (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6847              hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
6848                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6849                                         __func__,
6850                                         intr_status,
6851                                         hba->ufs_stats.last_intr_status,
6852                                         enabled_intr_status);
6853                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6854         }
6855
6856         return retval;
6857 }
6858
6859 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6860 {
6861         int err = 0;
6862         u32 mask = 1 << tag;
6863         unsigned long flags;
6864
6865         if (!test_bit(tag, &hba->outstanding_tasks))
6866                 goto out;
6867
6868         spin_lock_irqsave(hba->host->host_lock, flags);
6869         ufshcd_utmrl_clear(hba, tag);
6870         spin_unlock_irqrestore(hba->host->host_lock, flags);
6871
6872         /* poll for max. 1 sec to clear door bell register by h/w */
6873         err = ufshcd_wait_for_register(hba,
6874                         REG_UTP_TASK_REQ_DOOR_BELL,
6875                         mask, 0, 1000, 1000);
6876
6877         dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6878                 tag, err ? "succeeded" : "failed");
6879
6880 out:
6881         return err;
6882 }
6883
6884 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6885                 struct utp_task_req_desc *treq, u8 tm_function)
6886 {
6887         struct request_queue *q = hba->tmf_queue;
6888         struct Scsi_Host *host = hba->host;
6889         DECLARE_COMPLETION_ONSTACK(wait);
6890         struct request *req;
6891         unsigned long flags;
6892         int task_tag, err;
6893
6894         /*
6895          * blk_mq_alloc_request() is used here only to get a free tag.
6896          */
6897         req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
6898         if (IS_ERR(req))
6899                 return PTR_ERR(req);
6900
6901         req->end_io_data = &wait;
6902         ufshcd_hold(hba, false);
6903
6904         spin_lock_irqsave(host->host_lock, flags);
6905
6906         task_tag = req->tag;
6907         WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
6908                   task_tag);
6909         hba->tmf_rqs[req->tag] = req;
6910         treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
6911
6912         memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6913         ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6914
6915         /* send command to the controller */
6916         __set_bit(task_tag, &hba->outstanding_tasks);
6917
6918         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6919         /* Make sure that doorbell is committed immediately */
6920         wmb();
6921
6922         spin_unlock_irqrestore(host->host_lock, flags);
6923
6924         ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
6925
6926         /* wait until the task management command is completed */
6927         err = wait_for_completion_io_timeout(&wait,
6928                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6929         if (!err) {
6930                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
6931                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6932                                 __func__, tm_function);
6933                 if (ufshcd_clear_tm_cmd(hba, task_tag))
6934                         dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6935                                         __func__, task_tag);
6936                 err = -ETIMEDOUT;
6937         } else {
6938                 err = 0;
6939                 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6940
6941                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
6942         }
6943
6944         spin_lock_irqsave(hba->host->host_lock, flags);
6945         hba->tmf_rqs[req->tag] = NULL;
6946         __clear_bit(task_tag, &hba->outstanding_tasks);
6947         spin_unlock_irqrestore(hba->host->host_lock, flags);
6948
6949         ufshcd_release(hba);
6950         blk_mq_free_request(req);
6951
6952         return err;
6953 }
6954
6955 /**
6956  * ufshcd_issue_tm_cmd - issues task management commands to controller
6957  * @hba: per adapter instance
6958  * @lun_id: LUN ID to which TM command is sent
6959  * @task_id: task ID to which the TM command is applicable
6960  * @tm_function: task management function opcode
6961  * @tm_response: task management service response return value
6962  *
6963  * Returns non-zero value on error, zero on success.
6964  */
6965 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6966                 u8 tm_function, u8 *tm_response)
6967 {
6968         struct utp_task_req_desc treq = { { 0 }, };
6969         enum utp_ocs ocs_value;
6970         int err;
6971
6972         /* Configure task request descriptor */
6973         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6974         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6975
6976         /* Configure task request UPIU */
6977         treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6978                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6979         treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6980
6981         /*
6982          * The host shall provide the same value for LUN field in the basic
6983          * header and for Input Parameter.
6984          */
6985         treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
6986         treq.upiu_req.input_param2 = cpu_to_be32(task_id);
6987
6988         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6989         if (err == -ETIMEDOUT)
6990                 return err;
6991
6992         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6993         if (ocs_value != OCS_SUCCESS)
6994                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6995                                 __func__, ocs_value);
6996         else if (tm_response)
6997                 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
6998                                 MASK_TM_SERVICE_RESP;
6999         return err;
7000 }
7001
7002 /**
7003  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7004  * @hba:        per-adapter instance
7005  * @req_upiu:   upiu request
7006  * @rsp_upiu:   upiu reply
7007  * @desc_buff:  pointer to descriptor buffer, NULL if NA
7008  * @buff_len:   descriptor size, 0 if NA
7009  * @cmd_type:   specifies the type (NOP, Query...)
7010  * @desc_op:    descriptor operation
7011  *
7012  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7013  * Therefore, it "rides" the device management infrastructure: uses its tag and
7014  * tasks work queues.
7015  *
7016  * Since there is only one available tag for device management commands,
7017  * the caller is expected to hold the hba->dev_cmd.lock mutex.
7018  */
7019 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7020                                         struct utp_upiu_req *req_upiu,
7021                                         struct utp_upiu_req *rsp_upiu,
7022                                         u8 *desc_buff, int *buff_len,
7023                                         enum dev_cmd_type cmd_type,
7024                                         enum query_opcode desc_op)
7025 {
7026         DECLARE_COMPLETION_ONSTACK(wait);
7027         const u32 tag = hba->reserved_slot;
7028         struct ufshcd_lrb *lrbp;
7029         int err = 0;
7030         u8 upiu_flags;
7031
7032         /* Protects use of hba->reserved_slot. */
7033         lockdep_assert_held(&hba->dev_cmd.lock);
7034
7035         down_read(&hba->clk_scaling_lock);
7036
7037         lrbp = &hba->lrb[tag];
7038         WARN_ON(lrbp->cmd);
7039         lrbp->cmd = NULL;
7040         lrbp->task_tag = tag;
7041         lrbp->lun = 0;
7042         lrbp->intr_cmd = true;
7043         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7044         hba->dev_cmd.type = cmd_type;
7045
7046         if (hba->ufs_version <= ufshci_version(1, 1))
7047                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
7048         else
7049                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7050
7051         /* update the task tag in the request upiu */
7052         req_upiu->header.dword_0 |= cpu_to_be32(tag);
7053
7054         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
7055
7056         /* just copy the upiu request as it is */
7057         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7058         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7059                 /* The Data Segment Area is optional depending upon the query
7060                  * function value. for WRITE DESCRIPTOR, the data segment
7061                  * follows right after the tsf.
7062                  */
7063                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7064                 *buff_len = 0;
7065         }
7066
7067         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7068
7069         hba->dev_cmd.complete = &wait;
7070
7071         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
7072
7073         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7074         /*
7075          * ignore the returning value here - ufshcd_check_query_response is
7076          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7077          * read the response directly ignoring all errors.
7078          */
7079         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
7080
7081         /* just copy the upiu response as it is */
7082         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7083         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7084                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7085                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
7086                                MASK_QUERY_DATA_SEG_LEN;
7087
7088                 if (*buff_len >= resp_len) {
7089                         memcpy(desc_buff, descp, resp_len);
7090                         *buff_len = resp_len;
7091                 } else {
7092                         dev_warn(hba->dev,
7093                                  "%s: rsp size %d is bigger than buffer size %d",
7094                                  __func__, resp_len, *buff_len);
7095                         *buff_len = 0;
7096                         err = -EINVAL;
7097                 }
7098         }
7099         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
7100                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
7101
7102         up_read(&hba->clk_scaling_lock);
7103         return err;
7104 }
7105
7106 /**
7107  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7108  * @hba:        per-adapter instance
7109  * @req_upiu:   upiu request
7110  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
7111  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
7112  * @desc_buff:  pointer to descriptor buffer, NULL if NA
7113  * @buff_len:   descriptor size, 0 if NA
7114  * @desc_op:    descriptor operation
7115  *
7116  * Supports UTP Transfer requests (nop and query), and UTP Task
7117  * Management requests.
7118  * It is up to the caller to fill the upiu conent properly, as it will
7119  * be copied without any further input validations.
7120  */
7121 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7122                              struct utp_upiu_req *req_upiu,
7123                              struct utp_upiu_req *rsp_upiu,
7124                              int msgcode,
7125                              u8 *desc_buff, int *buff_len,
7126                              enum query_opcode desc_op)
7127 {
7128         int err;
7129         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
7130         struct utp_task_req_desc treq = { { 0 }, };
7131         enum utp_ocs ocs_value;
7132         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
7133
7134         switch (msgcode) {
7135         case UPIU_TRANSACTION_NOP_OUT:
7136                 cmd_type = DEV_CMD_TYPE_NOP;
7137                 fallthrough;
7138         case UPIU_TRANSACTION_QUERY_REQ:
7139                 ufshcd_hold(hba, false);
7140                 mutex_lock(&hba->dev_cmd.lock);
7141                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7142                                                    desc_buff, buff_len,
7143                                                    cmd_type, desc_op);
7144                 mutex_unlock(&hba->dev_cmd.lock);
7145                 ufshcd_release(hba);
7146
7147                 break;
7148         case UPIU_TRANSACTION_TASK_REQ:
7149                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
7150                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
7151
7152                 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
7153
7154                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7155                 if (err == -ETIMEDOUT)
7156                         break;
7157
7158                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
7159                 if (ocs_value != OCS_SUCCESS) {
7160                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7161                                 ocs_value);
7162                         break;
7163                 }
7164
7165                 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
7166
7167                 break;
7168         default:
7169                 err = -EINVAL;
7170
7171                 break;
7172         }
7173
7174         return err;
7175 }
7176
7177 /**
7178  * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7179  * @hba:        per adapter instance
7180  * @req_upiu:   upiu request
7181  * @rsp_upiu:   upiu reply
7182  * @req_ehs:    EHS field which contains Advanced RPMB Request Message
7183  * @rsp_ehs:    EHS field which returns Advanced RPMB Response Message
7184  * @sg_cnt:     The number of sg lists actually used
7185  * @sg_list:    Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7186  * @dir:        DMA direction
7187  *
7188  * Returns zero on success, non-zero on failure
7189  */
7190 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7191                          struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7192                          struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7193                          enum dma_data_direction dir)
7194 {
7195         DECLARE_COMPLETION_ONSTACK(wait);
7196         const u32 tag = hba->reserved_slot;
7197         struct ufshcd_lrb *lrbp;
7198         int err = 0;
7199         int result;
7200         u8 upiu_flags;
7201         u8 *ehs_data;
7202         u16 ehs_len;
7203
7204         /* Protects use of hba->reserved_slot. */
7205         ufshcd_hold(hba, false);
7206         mutex_lock(&hba->dev_cmd.lock);
7207         down_read(&hba->clk_scaling_lock);
7208
7209         lrbp = &hba->lrb[tag];
7210         WARN_ON(lrbp->cmd);
7211         lrbp->cmd = NULL;
7212         lrbp->task_tag = tag;
7213         lrbp->lun = UFS_UPIU_RPMB_WLUN;
7214
7215         lrbp->intr_cmd = true;
7216         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7217         hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
7218
7219         /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7220         lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7221
7222         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
7223
7224         /* update the task tag and LUN in the request upiu */
7225         req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
7226
7227         /* copy the UPIU(contains CDB) request as it is */
7228         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7229         /* Copy EHS, starting with byte32, immediately after the CDB package */
7230         memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7231
7232         if (dir != DMA_NONE && sg_list)
7233                 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7234
7235         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7236
7237         hba->dev_cmd.complete = &wait;
7238
7239         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7240
7241         err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
7242
7243         if (!err) {
7244                 /* Just copy the upiu response as it is */
7245                 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7246                 /* Get the response UPIU result */
7247                 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
7248
7249                 ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
7250                 /*
7251                  * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7252                  * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7253                  * Message is 02h
7254                  */
7255                 if (ehs_len == 2 && rsp_ehs) {
7256                         /*
7257                          * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7258                          * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7259                          */
7260                         ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7261                         memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7262                 }
7263         }
7264
7265         up_read(&hba->clk_scaling_lock);
7266         mutex_unlock(&hba->dev_cmd.lock);
7267         ufshcd_release(hba);
7268         return err ? : result;
7269 }
7270
7271 /**
7272  * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7273  * @cmd: SCSI command pointer
7274  *
7275  * Returns SUCCESS/FAILED
7276  */
7277 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7278 {
7279         unsigned long flags, pending_reqs = 0, not_cleared = 0;
7280         struct Scsi_Host *host;
7281         struct ufs_hba *hba;
7282         u32 pos;
7283         int err;
7284         u8 resp = 0xF, lun;
7285
7286         host = cmd->device->host;
7287         hba = shost_priv(host);
7288
7289         lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7290         err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7291         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7292                 if (!err)
7293                         err = resp;
7294                 goto out;
7295         }
7296
7297         /* clear the commands that were pending for corresponding LUN */
7298         spin_lock_irqsave(&hba->outstanding_lock, flags);
7299         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7300                 if (hba->lrb[pos].lun == lun)
7301                         __set_bit(pos, &pending_reqs);
7302         hba->outstanding_reqs &= ~pending_reqs;
7303         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7304
7305         if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
7306                 spin_lock_irqsave(&hba->outstanding_lock, flags);
7307                 not_cleared = pending_reqs &
7308                         ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7309                 hba->outstanding_reqs |= not_cleared;
7310                 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7311
7312                 dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
7313                         __func__, not_cleared);
7314         }
7315         __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
7316
7317 out:
7318         hba->req_abort_count = 0;
7319         ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7320         if (!err) {
7321                 err = SUCCESS;
7322         } else {
7323                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7324                 err = FAILED;
7325         }
7326         return err;
7327 }
7328
7329 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7330 {
7331         struct ufshcd_lrb *lrbp;
7332         int tag;
7333
7334         for_each_set_bit(tag, &bitmap, hba->nutrs) {
7335                 lrbp = &hba->lrb[tag];
7336                 lrbp->req_abort_skip = true;
7337         }
7338 }
7339
7340 /**
7341  * ufshcd_try_to_abort_task - abort a specific task
7342  * @hba: Pointer to adapter instance
7343  * @tag: Task tag/index to be aborted
7344  *
7345  * Abort the pending command in device by sending UFS_ABORT_TASK task management
7346  * command, and in host controller by clearing the door-bell register. There can
7347  * be race between controller sending the command to the device while abort is
7348  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7349  * really issued and then try to abort it.
7350  *
7351  * Returns zero on success, non-zero on failure
7352  */
7353 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7354 {
7355         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7356         int err = 0;
7357         int poll_cnt;
7358         u8 resp = 0xF;
7359         u32 reg;
7360
7361         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7362                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7363                                 UFS_QUERY_TASK, &resp);
7364                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7365                         /* cmd pending in the device */
7366                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7367                                 __func__, tag);
7368                         break;
7369                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7370                         /*
7371                          * cmd not pending in the device, check if it is
7372                          * in transition.
7373                          */
7374                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7375                                 __func__, tag);
7376                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7377                         if (reg & (1 << tag)) {
7378                                 /* sleep for max. 200us to stabilize */
7379                                 usleep_range(100, 200);
7380                                 continue;
7381                         }
7382                         /* command completed already */
7383                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7384                                 __func__, tag);
7385                         goto out;
7386                 } else {
7387                         dev_err(hba->dev,
7388                                 "%s: no response from device. tag = %d, err %d\n",
7389                                 __func__, tag, err);
7390                         if (!err)
7391                                 err = resp; /* service response error */
7392                         goto out;
7393                 }
7394         }
7395
7396         if (!poll_cnt) {
7397                 err = -EBUSY;
7398                 goto out;
7399         }
7400
7401         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7402                         UFS_ABORT_TASK, &resp);
7403         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7404                 if (!err) {
7405                         err = resp; /* service response error */
7406                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7407                                 __func__, tag, err);
7408                 }
7409                 goto out;
7410         }
7411
7412         err = ufshcd_clear_cmds(hba, 1U << tag);
7413         if (err)
7414                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7415                         __func__, tag, err);
7416
7417 out:
7418         return err;
7419 }
7420
7421 /**
7422  * ufshcd_abort - scsi host template eh_abort_handler callback
7423  * @cmd: SCSI command pointer
7424  *
7425  * Returns SUCCESS/FAILED
7426  */
7427 static int ufshcd_abort(struct scsi_cmnd *cmd)
7428 {
7429         struct Scsi_Host *host = cmd->device->host;
7430         struct ufs_hba *hba = shost_priv(host);
7431         int tag = scsi_cmd_to_rq(cmd)->tag;
7432         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7433         unsigned long flags;
7434         int err = FAILED;
7435         bool outstanding;
7436         u32 reg;
7437
7438         WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
7439
7440         ufshcd_hold(hba, false);
7441         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7442         /* If command is already aborted/completed, return FAILED. */
7443         if (!(test_bit(tag, &hba->outstanding_reqs))) {
7444                 dev_err(hba->dev,
7445                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7446                         __func__, tag, hba->outstanding_reqs, reg);
7447                 goto release;
7448         }
7449
7450         /* Print Transfer Request of aborted task */
7451         dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7452
7453         /*
7454          * Print detailed info about aborted request.
7455          * As more than one request might get aborted at the same time,
7456          * print full information only for the first aborted request in order
7457          * to reduce repeated printouts. For other aborted requests only print
7458          * basic details.
7459          */
7460         scsi_print_command(cmd);
7461         if (!hba->req_abort_count) {
7462                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7463                 ufshcd_print_evt_hist(hba);
7464                 ufshcd_print_host_state(hba);
7465                 ufshcd_print_pwr_info(hba);
7466                 ufshcd_print_tr(hba, tag, true);
7467         } else {
7468                 ufshcd_print_tr(hba, tag, false);
7469         }
7470         hba->req_abort_count++;
7471
7472         if (!(reg & (1 << tag))) {
7473                 dev_err(hba->dev,
7474                 "%s: cmd was completed, but without a notifying intr, tag = %d",
7475                 __func__, tag);
7476                 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7477                 goto release;
7478         }
7479
7480         /*
7481          * Task abort to the device W-LUN is illegal. When this command
7482          * will fail, due to spec violation, scsi err handling next step
7483          * will be to send LU reset which, again, is a spec violation.
7484          * To avoid these unnecessary/illegal steps, first we clean up
7485          * the lrb taken by this cmd and re-set it in outstanding_reqs,
7486          * then queue the eh_work and bail.
7487          */
7488         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7489                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7490
7491                 spin_lock_irqsave(host->host_lock, flags);
7492                 hba->force_reset = true;
7493                 ufshcd_schedule_eh_work(hba);
7494                 spin_unlock_irqrestore(host->host_lock, flags);
7495                 goto release;
7496         }
7497
7498         /* Skip task abort in case previous aborts failed and report failure */
7499         if (lrbp->req_abort_skip) {
7500                 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7501                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7502                 goto release;
7503         }
7504
7505         err = ufshcd_try_to_abort_task(hba, tag);
7506         if (err) {
7507                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7508                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7509                 err = FAILED;
7510                 goto release;
7511         }
7512
7513         /*
7514          * Clear the corresponding bit from outstanding_reqs since the command
7515          * has been aborted successfully.
7516          */
7517         spin_lock_irqsave(&hba->outstanding_lock, flags);
7518         outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7519         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7520
7521         if (outstanding)
7522                 ufshcd_release_scsi_cmd(hba, lrbp);
7523
7524         err = SUCCESS;
7525
7526 release:
7527         /* Matches the ufshcd_hold() call at the start of this function. */
7528         ufshcd_release(hba);
7529         return err;
7530 }
7531
7532 /**
7533  * ufshcd_host_reset_and_restore - reset and restore host controller
7534  * @hba: per-adapter instance
7535  *
7536  * Note that host controller reset may issue DME_RESET to
7537  * local and remote (device) Uni-Pro stack and the attributes
7538  * are reset to default state.
7539  *
7540  * Returns zero on success, non-zero on failure
7541  */
7542 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7543 {
7544         int err;
7545
7546         /*
7547          * Stop the host controller and complete the requests
7548          * cleared by h/w
7549          */
7550         ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
7551         ufshcd_hba_stop(hba);
7552         hba->silence_err_logs = true;
7553         ufshcd_complete_requests(hba);
7554         hba->silence_err_logs = false;
7555
7556         /* scale up clocks to max frequency before full reinitialization */
7557         ufshcd_scale_clks(hba, true);
7558
7559         err = ufshcd_hba_enable(hba);
7560
7561         /* Establish the link again and restore the device */
7562         if (!err)
7563                 err = ufshcd_probe_hba(hba, false);
7564
7565         if (err)
7566                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7567         ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7568         return err;
7569 }
7570
7571 /**
7572  * ufshcd_reset_and_restore - reset and re-initialize host/device
7573  * @hba: per-adapter instance
7574  *
7575  * Reset and recover device, host and re-establish link. This
7576  * is helpful to recover the communication in fatal error conditions.
7577  *
7578  * Returns zero on success, non-zero on failure
7579  */
7580 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7581 {
7582         u32 saved_err = 0;
7583         u32 saved_uic_err = 0;
7584         int err = 0;
7585         unsigned long flags;
7586         int retries = MAX_HOST_RESET_RETRIES;
7587
7588         spin_lock_irqsave(hba->host->host_lock, flags);
7589         do {
7590                 /*
7591                  * This is a fresh start, cache and clear saved error first,
7592                  * in case new error generated during reset and restore.
7593                  */
7594                 saved_err |= hba->saved_err;
7595                 saved_uic_err |= hba->saved_uic_err;
7596                 hba->saved_err = 0;
7597                 hba->saved_uic_err = 0;
7598                 hba->force_reset = false;
7599                 hba->ufshcd_state = UFSHCD_STATE_RESET;
7600                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7601
7602                 /* Reset the attached device */
7603                 ufshcd_device_reset(hba);
7604
7605                 err = ufshcd_host_reset_and_restore(hba);
7606
7607                 spin_lock_irqsave(hba->host->host_lock, flags);
7608                 if (err)
7609                         continue;
7610                 /* Do not exit unless operational or dead */
7611                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7612                     hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7613                     hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7614                         err = -EAGAIN;
7615         } while (err && --retries);
7616
7617         /*
7618          * Inform scsi mid-layer that we did reset and allow to handle
7619          * Unit Attention properly.
7620          */
7621         scsi_report_bus_reset(hba->host, 0);
7622         if (err) {
7623                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7624                 hba->saved_err |= saved_err;
7625                 hba->saved_uic_err |= saved_uic_err;
7626         }
7627         spin_unlock_irqrestore(hba->host->host_lock, flags);
7628
7629         return err;
7630 }
7631
7632 /**
7633  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7634  * @cmd: SCSI command pointer
7635  *
7636  * Returns SUCCESS/FAILED
7637  */
7638 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7639 {
7640         int err = SUCCESS;
7641         unsigned long flags;
7642         struct ufs_hba *hba;
7643
7644         hba = shost_priv(cmd->device->host);
7645
7646         spin_lock_irqsave(hba->host->host_lock, flags);
7647         hba->force_reset = true;
7648         ufshcd_schedule_eh_work(hba);
7649         dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7650         spin_unlock_irqrestore(hba->host->host_lock, flags);
7651
7652         flush_work(&hba->eh_work);
7653
7654         spin_lock_irqsave(hba->host->host_lock, flags);
7655         if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7656                 err = FAILED;
7657         spin_unlock_irqrestore(hba->host->host_lock, flags);
7658
7659         return err;
7660 }
7661
7662 /**
7663  * ufshcd_get_max_icc_level - calculate the ICC level
7664  * @sup_curr_uA: max. current supported by the regulator
7665  * @start_scan: row at the desc table to start scan from
7666  * @buff: power descriptor buffer
7667  *
7668  * Returns calculated max ICC level for specific regulator
7669  */
7670 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7671                                     const char *buff)
7672 {
7673         int i;
7674         int curr_uA;
7675         u16 data;
7676         u16 unit;
7677
7678         for (i = start_scan; i >= 0; i--) {
7679                 data = get_unaligned_be16(&buff[2 * i]);
7680                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7681                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7682                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7683                 switch (unit) {
7684                 case UFSHCD_NANO_AMP:
7685                         curr_uA = curr_uA / 1000;
7686                         break;
7687                 case UFSHCD_MILI_AMP:
7688                         curr_uA = curr_uA * 1000;
7689                         break;
7690                 case UFSHCD_AMP:
7691                         curr_uA = curr_uA * 1000 * 1000;
7692                         break;
7693                 case UFSHCD_MICRO_AMP:
7694                 default:
7695                         break;
7696                 }
7697                 if (sup_curr_uA >= curr_uA)
7698                         break;
7699         }
7700         if (i < 0) {
7701                 i = 0;
7702                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7703         }
7704
7705         return (u32)i;
7706 }
7707
7708 /**
7709  * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7710  * In case regulators are not initialized we'll return 0
7711  * @hba: per-adapter instance
7712  * @desc_buf: power descriptor buffer to extract ICC levels from.
7713  *
7714  * Returns calculated ICC level
7715  */
7716 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7717                                                 const u8 *desc_buf)
7718 {
7719         u32 icc_level = 0;
7720
7721         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7722                                                 !hba->vreg_info.vccq2) {
7723                 /*
7724                  * Using dev_dbg to avoid messages during runtime PM to avoid
7725                  * never-ending cycles of messages written back to storage by
7726                  * user space causing runtime resume, causing more messages and
7727                  * so on.
7728                  */
7729                 dev_dbg(hba->dev,
7730                         "%s: Regulator capability was not set, actvIccLevel=%d",
7731                                                         __func__, icc_level);
7732                 goto out;
7733         }
7734
7735         if (hba->vreg_info.vcc->max_uA)
7736                 icc_level = ufshcd_get_max_icc_level(
7737                                 hba->vreg_info.vcc->max_uA,
7738                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7739                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7740
7741         if (hba->vreg_info.vccq->max_uA)
7742                 icc_level = ufshcd_get_max_icc_level(
7743                                 hba->vreg_info.vccq->max_uA,
7744                                 icc_level,
7745                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7746
7747         if (hba->vreg_info.vccq2->max_uA)
7748                 icc_level = ufshcd_get_max_icc_level(
7749                                 hba->vreg_info.vccq2->max_uA,
7750                                 icc_level,
7751                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7752 out:
7753         return icc_level;
7754 }
7755
7756 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7757 {
7758         int ret;
7759         u8 *desc_buf;
7760         u32 icc_level;
7761
7762         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7763         if (!desc_buf)
7764                 return;
7765
7766         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7767                                      desc_buf, QUERY_DESC_MAX_SIZE);
7768         if (ret) {
7769                 dev_err(hba->dev,
7770                         "%s: Failed reading power descriptor ret = %d",
7771                         __func__, ret);
7772                 goto out;
7773         }
7774
7775         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
7776         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7777
7778         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7779                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7780
7781         if (ret)
7782                 dev_err(hba->dev,
7783                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7784                         __func__, icc_level, ret);
7785
7786 out:
7787         kfree(desc_buf);
7788 }
7789
7790 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7791 {
7792         scsi_autopm_get_device(sdev);
7793         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7794         if (sdev->rpm_autosuspend)
7795                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7796                                                  RPM_AUTOSUSPEND_DELAY_MS);
7797         scsi_autopm_put_device(sdev);
7798 }
7799
7800 /**
7801  * ufshcd_scsi_add_wlus - Adds required W-LUs
7802  * @hba: per-adapter instance
7803  *
7804  * UFS device specification requires the UFS devices to support 4 well known
7805  * logical units:
7806  *      "REPORT_LUNS" (address: 01h)
7807  *      "UFS Device" (address: 50h)
7808  *      "RPMB" (address: 44h)
7809  *      "BOOT" (address: 30h)
7810  * UFS device's power management needs to be controlled by "POWER CONDITION"
7811  * field of SSU (START STOP UNIT) command. But this "power condition" field
7812  * will take effect only when its sent to "UFS device" well known logical unit
7813  * hence we require the scsi_device instance to represent this logical unit in
7814  * order for the UFS host driver to send the SSU command for power management.
7815  *
7816  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7817  * Block) LU so user space process can control this LU. User space may also
7818  * want to have access to BOOT LU.
7819  *
7820  * This function adds scsi device instances for each of all well known LUs
7821  * (except "REPORT LUNS" LU).
7822  *
7823  * Returns zero on success (all required W-LUs are added successfully),
7824  * non-zero error value on failure (if failed to add any of the required W-LU).
7825  */
7826 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7827 {
7828         int ret = 0;
7829         struct scsi_device *sdev_boot, *sdev_rpmb;
7830
7831         hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
7832                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7833         if (IS_ERR(hba->ufs_device_wlun)) {
7834                 ret = PTR_ERR(hba->ufs_device_wlun);
7835                 hba->ufs_device_wlun = NULL;
7836                 goto out;
7837         }
7838         scsi_device_put(hba->ufs_device_wlun);
7839
7840         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7841                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7842         if (IS_ERR(sdev_rpmb)) {
7843                 ret = PTR_ERR(sdev_rpmb);
7844                 goto remove_ufs_device_wlun;
7845         }
7846         ufshcd_blk_pm_runtime_init(sdev_rpmb);
7847         scsi_device_put(sdev_rpmb);
7848
7849         sdev_boot = __scsi_add_device(hba->host, 0, 0,
7850                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7851         if (IS_ERR(sdev_boot)) {
7852                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7853         } else {
7854                 ufshcd_blk_pm_runtime_init(sdev_boot);
7855                 scsi_device_put(sdev_boot);
7856         }
7857         goto out;
7858
7859 remove_ufs_device_wlun:
7860         scsi_remove_device(hba->ufs_device_wlun);
7861 out:
7862         return ret;
7863 }
7864
7865 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
7866 {
7867         struct ufs_dev_info *dev_info = &hba->dev_info;
7868         u8 lun;
7869         u32 d_lu_wb_buf_alloc;
7870         u32 ext_ufs_feature;
7871
7872         if (!ufshcd_is_wb_allowed(hba))
7873                 return;
7874
7875         /*
7876          * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7877          * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7878          * enabled
7879          */
7880         if (!(dev_info->wspecversion >= 0x310 ||
7881               dev_info->wspecversion == 0x220 ||
7882              (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7883                 goto wb_disabled;
7884
7885         ext_ufs_feature = get_unaligned_be32(desc_buf +
7886                                         DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7887
7888         if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
7889                 goto wb_disabled;
7890
7891         /*
7892          * WB may be supported but not configured while provisioning. The spec
7893          * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7894          * buffer configured.
7895          */
7896         dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7897
7898         dev_info->b_presrv_uspc_en =
7899                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7900
7901         if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
7902                 if (!get_unaligned_be32(desc_buf +
7903                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
7904                         goto wb_disabled;
7905         } else {
7906                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7907                         d_lu_wb_buf_alloc = 0;
7908                         ufshcd_read_unit_desc_param(hba,
7909                                         lun,
7910                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7911                                         (u8 *)&d_lu_wb_buf_alloc,
7912                                         sizeof(d_lu_wb_buf_alloc));
7913                         if (d_lu_wb_buf_alloc) {
7914                                 dev_info->wb_dedicated_lu = lun;
7915                                 break;
7916                         }
7917                 }
7918
7919                 if (!d_lu_wb_buf_alloc)
7920                         goto wb_disabled;
7921         }
7922
7923         if (!ufshcd_is_wb_buf_lifetime_available(hba))
7924                 goto wb_disabled;
7925
7926         return;
7927
7928 wb_disabled:
7929         hba->caps &= ~UFSHCD_CAP_WB_EN;
7930 }
7931
7932 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
7933 {
7934         struct ufs_dev_info *dev_info = &hba->dev_info;
7935         u32 ext_ufs_feature;
7936         u8 mask = 0;
7937
7938         if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
7939                 return;
7940
7941         ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7942
7943         if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
7944                 mask |= MASK_EE_TOO_LOW_TEMP;
7945
7946         if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
7947                 mask |= MASK_EE_TOO_HIGH_TEMP;
7948
7949         if (mask) {
7950                 ufshcd_enable_ee(hba, mask);
7951                 ufs_hwmon_probe(hba, mask);
7952         }
7953 }
7954
7955 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
7956 {
7957         struct ufs_dev_info *dev_info = &hba->dev_info;
7958         u32 ext_ufs_feature;
7959         u32 ext_iid_en = 0;
7960         int err;
7961
7962         /* Only UFS-4.0 and above may support EXT_IID */
7963         if (dev_info->wspecversion < 0x400)
7964                 goto out;
7965
7966         ext_ufs_feature = get_unaligned_be32(desc_buf +
7967                                      DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7968         if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
7969                 goto out;
7970
7971         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7972                                       QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
7973         if (err)
7974                 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
7975
7976 out:
7977         dev_info->b_ext_iid_en = ext_iid_en;
7978 }
7979
7980 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
7981                              const struct ufs_dev_quirk *fixups)
7982 {
7983         const struct ufs_dev_quirk *f;
7984         struct ufs_dev_info *dev_info = &hba->dev_info;
7985
7986         if (!fixups)
7987                 return;
7988
7989         for (f = fixups; f->quirk; f++) {
7990                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7991                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
7992                      ((dev_info->model &&
7993                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7994                       !strcmp(f->model, UFS_ANY_MODEL)))
7995                         hba->dev_quirks |= f->quirk;
7996         }
7997 }
7998 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7999
8000 static void ufs_fixup_device_setup(struct ufs_hba *hba)
8001 {
8002         /* fix by general quirk table */
8003         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
8004
8005         /* allow vendors to fix quirks */
8006         ufshcd_vops_fixup_dev_quirks(hba);
8007 }
8008
8009 static int ufs_get_device_desc(struct ufs_hba *hba)
8010 {
8011         int err;
8012         u8 model_index;
8013         u8 b_ufs_feature_sup;
8014         u8 *desc_buf;
8015         struct ufs_dev_info *dev_info = &hba->dev_info;
8016
8017         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8018         if (!desc_buf) {
8019                 err = -ENOMEM;
8020                 goto out;
8021         }
8022
8023         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
8024                                      QUERY_DESC_MAX_SIZE);
8025         if (err) {
8026                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
8027                         __func__, err);
8028                 goto out;
8029         }
8030
8031         /*
8032          * getting vendor (manufacturerID) and Bank Index in big endian
8033          * format
8034          */
8035         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
8036                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
8037
8038         /* getting Specification Version in big endian format */
8039         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
8040                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
8041         dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
8042         b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
8043
8044         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
8045
8046         if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
8047             (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
8048                 bool hpb_en = false;
8049
8050                 ufshpb_get_dev_info(hba, desc_buf);
8051
8052                 if (!ufshpb_is_legacy(hba))
8053                         err = ufshcd_query_flag_retry(hba,
8054                                                       UPIU_QUERY_OPCODE_READ_FLAG,
8055                                                       QUERY_FLAG_IDN_HPB_EN, 0,
8056                                                       &hpb_en);
8057
8058                 if (ufshpb_is_legacy(hba) || (!err && hpb_en))
8059                         dev_info->hpb_enabled = true;
8060         }
8061
8062         err = ufshcd_read_string_desc(hba, model_index,
8063                                       &dev_info->model, SD_ASCII_STD);
8064         if (err < 0) {
8065                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8066                         __func__, err);
8067                 goto out;
8068         }
8069
8070         hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8071                 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8072
8073         ufs_fixup_device_setup(hba);
8074
8075         ufshcd_wb_probe(hba, desc_buf);
8076
8077         ufshcd_temp_notif_probe(hba, desc_buf);
8078
8079         if (hba->ext_iid_sup)
8080                 ufshcd_ext_iid_probe(hba, desc_buf);
8081
8082         /*
8083          * ufshcd_read_string_desc returns size of the string
8084          * reset the error value
8085          */
8086         err = 0;
8087
8088 out:
8089         kfree(desc_buf);
8090         return err;
8091 }
8092
8093 static void ufs_put_device_desc(struct ufs_hba *hba)
8094 {
8095         struct ufs_dev_info *dev_info = &hba->dev_info;
8096
8097         kfree(dev_info->model);
8098         dev_info->model = NULL;
8099 }
8100
8101 /**
8102  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8103  * @hba: per-adapter instance
8104  *
8105  * PA_TActivate parameter can be tuned manually if UniPro version is less than
8106  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8107  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8108  * the hibern8 exit latency.
8109  *
8110  * Returns zero on success, non-zero error value on failure.
8111  */
8112 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
8113 {
8114         int ret = 0;
8115         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
8116
8117         ret = ufshcd_dme_peer_get(hba,
8118                                   UIC_ARG_MIB_SEL(
8119                                         RX_MIN_ACTIVATETIME_CAPABILITY,
8120                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8121                                   &peer_rx_min_activatetime);
8122         if (ret)
8123                 goto out;
8124
8125         /* make sure proper unit conversion is applied */
8126         tuned_pa_tactivate =
8127                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
8128                  / PA_TACTIVATE_TIME_UNIT_US);
8129         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8130                              tuned_pa_tactivate);
8131
8132 out:
8133         return ret;
8134 }
8135
8136 /**
8137  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8138  * @hba: per-adapter instance
8139  *
8140  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8141  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8142  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8143  * This optimal value can help reduce the hibern8 exit latency.
8144  *
8145  * Returns zero on success, non-zero error value on failure.
8146  */
8147 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
8148 {
8149         int ret = 0;
8150         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
8151         u32 max_hibern8_time, tuned_pa_hibern8time;
8152
8153         ret = ufshcd_dme_get(hba,
8154                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
8155                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8156                                   &local_tx_hibern8_time_cap);
8157         if (ret)
8158                 goto out;
8159
8160         ret = ufshcd_dme_peer_get(hba,
8161                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
8162                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8163                                   &peer_rx_hibern8_time_cap);
8164         if (ret)
8165                 goto out;
8166
8167         max_hibern8_time = max(local_tx_hibern8_time_cap,
8168                                peer_rx_hibern8_time_cap);
8169         /* make sure proper unit conversion is applied */
8170         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
8171                                 / PA_HIBERN8_TIME_UNIT_US);
8172         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
8173                              tuned_pa_hibern8time);
8174 out:
8175         return ret;
8176 }
8177
8178 /**
8179  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8180  * less than device PA_TACTIVATE time.
8181  * @hba: per-adapter instance
8182  *
8183  * Some UFS devices require host PA_TACTIVATE to be lower than device
8184  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8185  * for such devices.
8186  *
8187  * Returns zero on success, non-zero error value on failure.
8188  */
8189 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8190 {
8191         int ret = 0;
8192         u32 granularity, peer_granularity;
8193         u32 pa_tactivate, peer_pa_tactivate;
8194         u32 pa_tactivate_us, peer_pa_tactivate_us;
8195         static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
8196
8197         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8198                                   &granularity);
8199         if (ret)
8200                 goto out;
8201
8202         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8203                                   &peer_granularity);
8204         if (ret)
8205                 goto out;
8206
8207         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8208             (granularity > PA_GRANULARITY_MAX_VAL)) {
8209                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8210                         __func__, granularity);
8211                 return -EINVAL;
8212         }
8213
8214         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8215             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8216                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8217                         __func__, peer_granularity);
8218                 return -EINVAL;
8219         }
8220
8221         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8222         if (ret)
8223                 goto out;
8224
8225         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8226                                   &peer_pa_tactivate);
8227         if (ret)
8228                 goto out;
8229
8230         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8231         peer_pa_tactivate_us = peer_pa_tactivate *
8232                              gran_to_us_table[peer_granularity - 1];
8233
8234         if (pa_tactivate_us >= peer_pa_tactivate_us) {
8235                 u32 new_peer_pa_tactivate;
8236
8237                 new_peer_pa_tactivate = pa_tactivate_us /
8238                                       gran_to_us_table[peer_granularity - 1];
8239                 new_peer_pa_tactivate++;
8240                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8241                                           new_peer_pa_tactivate);
8242         }
8243
8244 out:
8245         return ret;
8246 }
8247
8248 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
8249 {
8250         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
8251                 ufshcd_tune_pa_tactivate(hba);
8252                 ufshcd_tune_pa_hibern8time(hba);
8253         }
8254
8255         ufshcd_vops_apply_dev_quirks(hba);
8256
8257         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8258                 /* set 1ms timeout for PA_TACTIVATE */
8259                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
8260
8261         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8262                 ufshcd_quirk_tune_host_pa_tactivate(hba);
8263 }
8264
8265 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8266 {
8267         hba->ufs_stats.hibern8_exit_cnt = 0;
8268         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
8269         hba->req_abort_count = 0;
8270 }
8271
8272 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8273 {
8274         int err;
8275         u8 *desc_buf;
8276
8277         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8278         if (!desc_buf) {
8279                 err = -ENOMEM;
8280                 goto out;
8281         }
8282
8283         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
8284                                      desc_buf, QUERY_DESC_MAX_SIZE);
8285         if (err) {
8286                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8287                                 __func__, err);
8288                 goto out;
8289         }
8290
8291         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8292                 hba->dev_info.max_lu_supported = 32;
8293         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8294                 hba->dev_info.max_lu_supported = 8;
8295
8296         if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
8297                 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
8298                 ufshpb_get_geo_info(hba, desc_buf);
8299
8300 out:
8301         kfree(desc_buf);
8302         return err;
8303 }
8304
8305 struct ufs_ref_clk {
8306         unsigned long freq_hz;
8307         enum ufs_ref_clk_freq val;
8308 };
8309
8310 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8311         {19200000, REF_CLK_FREQ_19_2_MHZ},
8312         {26000000, REF_CLK_FREQ_26_MHZ},
8313         {38400000, REF_CLK_FREQ_38_4_MHZ},
8314         {52000000, REF_CLK_FREQ_52_MHZ},
8315         {0, REF_CLK_FREQ_INVAL},
8316 };
8317
8318 static enum ufs_ref_clk_freq
8319 ufs_get_bref_clk_from_hz(unsigned long freq)
8320 {
8321         int i;
8322
8323         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8324                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8325                         return ufs_ref_clk_freqs[i].val;
8326
8327         return REF_CLK_FREQ_INVAL;
8328 }
8329
8330 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8331 {
8332         unsigned long freq;
8333
8334         freq = clk_get_rate(refclk);
8335
8336         hba->dev_ref_clk_freq =
8337                 ufs_get_bref_clk_from_hz(freq);
8338
8339         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8340                 dev_err(hba->dev,
8341                 "invalid ref_clk setting = %ld\n", freq);
8342 }
8343
8344 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8345 {
8346         int err;
8347         u32 ref_clk;
8348         u32 freq = hba->dev_ref_clk_freq;
8349
8350         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8351                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8352
8353         if (err) {
8354                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8355                         err);
8356                 goto out;
8357         }
8358
8359         if (ref_clk == freq)
8360                 goto out; /* nothing to update */
8361
8362         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8363                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8364
8365         if (err) {
8366                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8367                         ufs_ref_clk_freqs[freq].freq_hz);
8368                 goto out;
8369         }
8370
8371         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8372                         ufs_ref_clk_freqs[freq].freq_hz);
8373
8374 out:
8375         return err;
8376 }
8377
8378 static int ufshcd_device_params_init(struct ufs_hba *hba)
8379 {
8380         bool flag;
8381         int ret;
8382
8383         /* Init UFS geometry descriptor related parameters */
8384         ret = ufshcd_device_geo_params_init(hba);
8385         if (ret)
8386                 goto out;
8387
8388         /* Check and apply UFS device quirks */
8389         ret = ufs_get_device_desc(hba);
8390         if (ret) {
8391                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8392                         __func__, ret);
8393                 goto out;
8394         }
8395
8396         ufshcd_get_ref_clk_gating_wait(hba);
8397
8398         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8399                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8400                 hba->dev_info.f_power_on_wp_en = flag;
8401
8402         /* Probe maximum power mode co-supported by both UFS host and device */
8403         if (ufshcd_get_max_pwr_mode(hba))
8404                 dev_err(hba->dev,
8405                         "%s: Failed getting max supported power mode\n",
8406                         __func__);
8407 out:
8408         return ret;
8409 }
8410
8411 /**
8412  * ufshcd_add_lus - probe and add UFS logical units
8413  * @hba: per-adapter instance
8414  */
8415 static int ufshcd_add_lus(struct ufs_hba *hba)
8416 {
8417         int ret;
8418
8419         /* Add required well known logical units to scsi mid layer */
8420         ret = ufshcd_scsi_add_wlus(hba);
8421         if (ret)
8422                 goto out;
8423
8424         /* Initialize devfreq after UFS device is detected */
8425         if (ufshcd_is_clkscaling_supported(hba)) {
8426                 memcpy(&hba->clk_scaling.saved_pwr_info,
8427                         &hba->pwr_info,
8428                         sizeof(struct ufs_pa_layer_attr));
8429                 hba->clk_scaling.is_allowed = true;
8430
8431                 ret = ufshcd_devfreq_init(hba);
8432                 if (ret)
8433                         goto out;
8434
8435                 hba->clk_scaling.is_enabled = true;
8436                 ufshcd_init_clk_scaling_sysfs(hba);
8437         }
8438
8439         ufs_bsg_probe(hba);
8440         ufshpb_init(hba);
8441         scsi_scan_host(hba->host);
8442         pm_runtime_put_sync(hba->dev);
8443
8444 out:
8445         return ret;
8446 }
8447
8448 /* SDB - Single Doorbell */
8449 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8450 {
8451         size_t ucdl_size, utrdl_size;
8452
8453         ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
8454         dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8455                            hba->ucdl_dma_addr);
8456
8457         utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8458         dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8459                            hba->utrdl_dma_addr);
8460
8461         devm_kfree(hba->dev, hba->lrb);
8462 }
8463
8464 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8465 {
8466         int ret;
8467         int old_nutrs = hba->nutrs;
8468
8469         ret = ufshcd_mcq_decide_queue_depth(hba);
8470         if (ret < 0)
8471                 return ret;
8472
8473         hba->nutrs = ret;
8474         ret = ufshcd_mcq_init(hba);
8475         if (ret)
8476                 goto err;
8477
8478         /*
8479          * Previously allocated memory for nutrs may not be enough in MCQ mode.
8480          * Number of supported tags in MCQ mode may be larger than SDB mode.
8481          */
8482         if (hba->nutrs != old_nutrs) {
8483                 ufshcd_release_sdb_queue(hba, old_nutrs);
8484                 ret = ufshcd_memory_alloc(hba);
8485                 if (ret)
8486                         goto err;
8487                 ufshcd_host_memory_configure(hba);
8488         }
8489
8490         ret = ufshcd_mcq_memory_alloc(hba);
8491         if (ret)
8492                 goto err;
8493
8494         return 0;
8495 err:
8496         hba->nutrs = old_nutrs;
8497         return ret;
8498 }
8499
8500 static void ufshcd_config_mcq(struct ufs_hba *hba)
8501 {
8502         int ret;
8503
8504         ret = ufshcd_mcq_vops_config_esi(hba);
8505         dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
8506
8507         ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
8508         ufshcd_mcq_make_queues_operational(hba);
8509         ufshcd_mcq_config_mac(hba, hba->nutrs);
8510
8511         hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8512         hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
8513
8514         /* Select MCQ mode */
8515         ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
8516                       REG_UFS_MEM_CFG);
8517         hba->mcq_enabled = true;
8518
8519         dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8520                  hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8521                  hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8522                  hba->nutrs);
8523 }
8524
8525 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
8526 {
8527         int ret;
8528         struct Scsi_Host *host = hba->host;
8529
8530         hba->ufshcd_state = UFSHCD_STATE_RESET;
8531
8532         ret = ufshcd_link_startup(hba);
8533         if (ret)
8534                 return ret;
8535
8536         if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8537                 return ret;
8538
8539         /* Debug counters initialization */
8540         ufshcd_clear_dbg_ufs_stats(hba);
8541
8542         /* UniPro link is active now */
8543         ufshcd_set_link_active(hba);
8544
8545         /* Reconfigure MCQ upon reset */
8546         if (is_mcq_enabled(hba) && !init_dev_params)
8547                 ufshcd_config_mcq(hba);
8548
8549         /* Verify device initialization by sending NOP OUT UPIU */
8550         ret = ufshcd_verify_dev_init(hba);
8551         if (ret)
8552                 return ret;
8553
8554         /* Initiate UFS initialization, and waiting until completion */
8555         ret = ufshcd_complete_dev_init(hba);
8556         if (ret)
8557                 return ret;
8558
8559         /*
8560          * Initialize UFS device parameters used by driver, these
8561          * parameters are associated with UFS descriptors.
8562          */
8563         if (init_dev_params) {
8564                 ret = ufshcd_device_params_init(hba);
8565                 if (ret)
8566                         return ret;
8567                 if (is_mcq_supported(hba) && !hba->scsi_host_added) {
8568                         ret = ufshcd_alloc_mcq(hba);
8569                         if (!ret) {
8570                                 ufshcd_config_mcq(hba);
8571                         } else {
8572                                 /* Continue with SDB mode */
8573                                 use_mcq_mode = false;
8574                                 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
8575                                          ret);
8576                         }
8577                         ret = scsi_add_host(host, hba->dev);
8578                         if (ret) {
8579                                 dev_err(hba->dev, "scsi_add_host failed\n");
8580                                 return ret;
8581                         }
8582                         hba->scsi_host_added = true;
8583                 } else if (is_mcq_supported(hba)) {
8584                         /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8585                         ufshcd_config_mcq(hba);
8586                 }
8587         }
8588
8589         ufshcd_tune_unipro_params(hba);
8590
8591         /* UFS device is also active now */
8592         ufshcd_set_ufs_dev_active(hba);
8593         ufshcd_force_reset_auto_bkops(hba);
8594
8595         /* Gear up to HS gear if supported */
8596         if (hba->max_pwr_info.is_valid) {
8597                 /*
8598                  * Set the right value to bRefClkFreq before attempting to
8599                  * switch to HS gears.
8600                  */
8601                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8602                         ufshcd_set_dev_ref_clk(hba);
8603                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8604                 if (ret) {
8605                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8606                                         __func__, ret);
8607                         return ret;
8608                 }
8609         }
8610
8611         return 0;
8612 }
8613
8614 /**
8615  * ufshcd_probe_hba - probe hba to detect device and initialize it
8616  * @hba: per-adapter instance
8617  * @init_dev_params: whether or not to call ufshcd_device_params_init().
8618  *
8619  * Execute link-startup and verify device initialization
8620  */
8621 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8622 {
8623         ktime_t start = ktime_get();
8624         unsigned long flags;
8625         int ret;
8626
8627         ret = ufshcd_device_init(hba, init_dev_params);
8628         if (ret)
8629                 goto out;
8630
8631         if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
8632                 /* Reset the device and controller before doing reinit */
8633                 ufshcd_device_reset(hba);
8634                 ufshcd_hba_stop(hba);
8635                 ufshcd_vops_reinit_notify(hba);
8636                 ret = ufshcd_hba_enable(hba);
8637                 if (ret) {
8638                         dev_err(hba->dev, "Host controller enable failed\n");
8639                         ufshcd_print_evt_hist(hba);
8640                         ufshcd_print_host_state(hba);
8641                         goto out;
8642                 }
8643
8644                 /* Reinit the device */
8645                 ret = ufshcd_device_init(hba, init_dev_params);
8646                 if (ret)
8647                         goto out;
8648         }
8649
8650         ufshcd_print_pwr_info(hba);
8651
8652         /*
8653          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8654          * and for removable UFS card as well, hence always set the parameter.
8655          * Note: Error handler may issue the device reset hence resetting
8656          * bActiveICCLevel as well so it is always safe to set this here.
8657          */
8658         ufshcd_set_active_icc_lvl(hba);
8659
8660         /* Enable UFS Write Booster if supported */
8661         ufshcd_configure_wb(hba);
8662
8663         if (hba->ee_usr_mask)
8664                 ufshcd_write_ee_control(hba);
8665         /* Enable Auto-Hibernate if configured */
8666         ufshcd_auto_hibern8_enable(hba);
8667
8668         ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
8669 out:
8670         spin_lock_irqsave(hba->host->host_lock, flags);
8671         if (ret)
8672                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8673         else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8674                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8675         spin_unlock_irqrestore(hba->host->host_lock, flags);
8676
8677         trace_ufshcd_init(dev_name(hba->dev), ret,
8678                 ktime_to_us(ktime_sub(ktime_get(), start)),
8679                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8680         return ret;
8681 }
8682
8683 /**
8684  * ufshcd_async_scan - asynchronous execution for probing hba
8685  * @data: data pointer to pass to this function
8686  * @cookie: cookie data
8687  */
8688 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8689 {
8690         struct ufs_hba *hba = (struct ufs_hba *)data;
8691         int ret;
8692
8693         down(&hba->host_sem);
8694         /* Initialize hba, detect and initialize UFS device */
8695         ret = ufshcd_probe_hba(hba, true);
8696         up(&hba->host_sem);
8697         if (ret)
8698                 goto out;
8699
8700         /* Probe and add UFS logical units  */
8701         ret = ufshcd_add_lus(hba);
8702 out:
8703         /*
8704          * If we failed to initialize the device or the device is not
8705          * present, turn off the power/clocks etc.
8706          */
8707         if (ret) {
8708                 pm_runtime_put_sync(hba->dev);
8709                 ufshcd_hba_exit(hba);
8710         }
8711 }
8712
8713 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
8714 {
8715         struct ufs_hba *hba = shost_priv(scmd->device->host);
8716
8717         if (!hba->system_suspending) {
8718                 /* Activate the error handler in the SCSI core. */
8719                 return SCSI_EH_NOT_HANDLED;
8720         }
8721
8722         /*
8723          * If we get here we know that no TMFs are outstanding and also that
8724          * the only pending command is a START STOP UNIT command. Handle the
8725          * timeout of that command directly to prevent a deadlock between
8726          * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8727          */
8728         ufshcd_link_recovery(hba);
8729         dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
8730                  __func__, hba->outstanding_tasks);
8731
8732         return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
8733 }
8734
8735 static const struct attribute_group *ufshcd_driver_groups[] = {
8736         &ufs_sysfs_unit_descriptor_group,
8737         &ufs_sysfs_lun_attributes_group,
8738 #ifdef CONFIG_SCSI_UFS_HPB
8739         &ufs_sysfs_hpb_stat_group,
8740         &ufs_sysfs_hpb_param_group,
8741 #endif
8742         NULL,
8743 };
8744
8745 static struct ufs_hba_variant_params ufs_hba_vps = {
8746         .hba_enable_delay_us            = 1000,
8747         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
8748         .devfreq_profile.polling_ms     = 100,
8749         .devfreq_profile.target         = ufshcd_devfreq_target,
8750         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8751         .ondemand_data.upthreshold      = 70,
8752         .ondemand_data.downdifferential = 5,
8753 };
8754
8755 static const struct scsi_host_template ufshcd_driver_template = {
8756         .module                 = THIS_MODULE,
8757         .name                   = UFSHCD,
8758         .proc_name              = UFSHCD,
8759         .map_queues             = ufshcd_map_queues,
8760         .queuecommand           = ufshcd_queuecommand,
8761         .mq_poll                = ufshcd_poll,
8762         .slave_alloc            = ufshcd_slave_alloc,
8763         .slave_configure        = ufshcd_slave_configure,
8764         .slave_destroy          = ufshcd_slave_destroy,
8765         .change_queue_depth     = ufshcd_change_queue_depth,
8766         .eh_abort_handler       = ufshcd_abort,
8767         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8768         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
8769         .eh_timed_out           = ufshcd_eh_timed_out,
8770         .this_id                = -1,
8771         .sg_tablesize           = SG_ALL,
8772         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
8773         .can_queue              = UFSHCD_CAN_QUEUE,
8774         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
8775         .max_sectors            = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
8776         .max_host_blocked       = 1,
8777         .track_queue_depth      = 1,
8778         .skip_settle_delay      = 1,
8779         .sdev_groups            = ufshcd_driver_groups,
8780         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
8781 };
8782
8783 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8784                                    int ua)
8785 {
8786         int ret;
8787
8788         if (!vreg)
8789                 return 0;
8790
8791         /*
8792          * "set_load" operation shall be required on those regulators
8793          * which specifically configured current limitation. Otherwise
8794          * zero max_uA may cause unexpected behavior when regulator is
8795          * enabled or set as high power mode.
8796          */
8797         if (!vreg->max_uA)
8798                 return 0;
8799
8800         ret = regulator_set_load(vreg->reg, ua);
8801         if (ret < 0) {
8802                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8803                                 __func__, vreg->name, ua, ret);
8804         }
8805
8806         return ret;
8807 }
8808
8809 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8810                                          struct ufs_vreg *vreg)
8811 {
8812         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
8813 }
8814
8815 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8816                                          struct ufs_vreg *vreg)
8817 {
8818         if (!vreg)
8819                 return 0;
8820
8821         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8822 }
8823
8824 static int ufshcd_config_vreg(struct device *dev,
8825                 struct ufs_vreg *vreg, bool on)
8826 {
8827         if (regulator_count_voltages(vreg->reg) <= 0)
8828                 return 0;
8829
8830         return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
8831 }
8832
8833 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8834 {
8835         int ret = 0;
8836
8837         if (!vreg || vreg->enabled)
8838                 goto out;
8839
8840         ret = ufshcd_config_vreg(dev, vreg, true);
8841         if (!ret)
8842                 ret = regulator_enable(vreg->reg);
8843
8844         if (!ret)
8845                 vreg->enabled = true;
8846         else
8847                 dev_err(dev, "%s: %s enable failed, err=%d\n",
8848                                 __func__, vreg->name, ret);
8849 out:
8850         return ret;
8851 }
8852
8853 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8854 {
8855         int ret = 0;
8856
8857         if (!vreg || !vreg->enabled || vreg->always_on)
8858                 goto out;
8859
8860         ret = regulator_disable(vreg->reg);
8861
8862         if (!ret) {
8863                 /* ignore errors on applying disable config */
8864                 ufshcd_config_vreg(dev, vreg, false);
8865                 vreg->enabled = false;
8866         } else {
8867                 dev_err(dev, "%s: %s disable failed, err=%d\n",
8868                                 __func__, vreg->name, ret);
8869         }
8870 out:
8871         return ret;
8872 }
8873
8874 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8875 {
8876         int ret = 0;
8877         struct device *dev = hba->dev;
8878         struct ufs_vreg_info *info = &hba->vreg_info;
8879
8880         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8881         if (ret)
8882                 goto out;
8883
8884         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8885         if (ret)
8886                 goto out;
8887
8888         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8889
8890 out:
8891         if (ret) {
8892                 ufshcd_toggle_vreg(dev, info->vccq2, false);
8893                 ufshcd_toggle_vreg(dev, info->vccq, false);
8894                 ufshcd_toggle_vreg(dev, info->vcc, false);
8895         }
8896         return ret;
8897 }
8898
8899 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8900 {
8901         struct ufs_vreg_info *info = &hba->vreg_info;
8902
8903         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8904 }
8905
8906 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8907 {
8908         int ret = 0;
8909
8910         if (!vreg)
8911                 goto out;
8912
8913         vreg->reg = devm_regulator_get(dev, vreg->name);
8914         if (IS_ERR(vreg->reg)) {
8915                 ret = PTR_ERR(vreg->reg);
8916                 dev_err(dev, "%s: %s get failed, err=%d\n",
8917                                 __func__, vreg->name, ret);
8918         }
8919 out:
8920         return ret;
8921 }
8922 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
8923
8924 static int ufshcd_init_vreg(struct ufs_hba *hba)
8925 {
8926         int ret = 0;
8927         struct device *dev = hba->dev;
8928         struct ufs_vreg_info *info = &hba->vreg_info;
8929
8930         ret = ufshcd_get_vreg(dev, info->vcc);
8931         if (ret)
8932                 goto out;
8933
8934         ret = ufshcd_get_vreg(dev, info->vccq);
8935         if (!ret)
8936                 ret = ufshcd_get_vreg(dev, info->vccq2);
8937 out:
8938         return ret;
8939 }
8940
8941 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8942 {
8943         struct ufs_vreg_info *info = &hba->vreg_info;
8944
8945         return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8946 }
8947
8948 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8949 {
8950         int ret = 0;
8951         struct ufs_clk_info *clki;
8952         struct list_head *head = &hba->clk_list_head;
8953         unsigned long flags;
8954         ktime_t start = ktime_get();
8955         bool clk_state_changed = false;
8956
8957         if (list_empty(head))
8958                 goto out;
8959
8960         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8961         if (ret)
8962                 return ret;
8963
8964         list_for_each_entry(clki, head, list) {
8965                 if (!IS_ERR_OR_NULL(clki->clk)) {
8966                         /*
8967                          * Don't disable clocks which are needed
8968                          * to keep the link active.
8969                          */
8970                         if (ufshcd_is_link_active(hba) &&
8971                             clki->keep_link_active)
8972                                 continue;
8973
8974                         clk_state_changed = on ^ clki->enabled;
8975                         if (on && !clki->enabled) {
8976                                 ret = clk_prepare_enable(clki->clk);
8977                                 if (ret) {
8978                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8979                                                 __func__, clki->name, ret);
8980                                         goto out;
8981                                 }
8982                         } else if (!on && clki->enabled) {
8983                                 clk_disable_unprepare(clki->clk);
8984                         }
8985                         clki->enabled = on;
8986                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8987                                         clki->name, on ? "en" : "dis");
8988                 }
8989         }
8990
8991         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8992         if (ret)
8993                 return ret;
8994
8995 out:
8996         if (ret) {
8997                 list_for_each_entry(clki, head, list) {
8998                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8999                                 clk_disable_unprepare(clki->clk);
9000                 }
9001         } else if (!ret && on) {
9002                 spin_lock_irqsave(hba->host->host_lock, flags);
9003                 hba->clk_gating.state = CLKS_ON;
9004                 trace_ufshcd_clk_gating(dev_name(hba->dev),
9005                                         hba->clk_gating.state);
9006                 spin_unlock_irqrestore(hba->host->host_lock, flags);
9007         }
9008
9009         if (clk_state_changed)
9010                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
9011                         (on ? "on" : "off"),
9012                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9013         return ret;
9014 }
9015
9016 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
9017 {
9018         u32 freq;
9019         int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
9020
9021         if (ret) {
9022                 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
9023                 return REF_CLK_FREQ_INVAL;
9024         }
9025
9026         return ufs_get_bref_clk_from_hz(freq);
9027 }
9028
9029 static int ufshcd_init_clocks(struct ufs_hba *hba)
9030 {
9031         int ret = 0;
9032         struct ufs_clk_info *clki;
9033         struct device *dev = hba->dev;
9034         struct list_head *head = &hba->clk_list_head;
9035
9036         if (list_empty(head))
9037                 goto out;
9038
9039         list_for_each_entry(clki, head, list) {
9040                 if (!clki->name)
9041                         continue;
9042
9043                 clki->clk = devm_clk_get(dev, clki->name);
9044                 if (IS_ERR(clki->clk)) {
9045                         ret = PTR_ERR(clki->clk);
9046                         dev_err(dev, "%s: %s clk get failed, %d\n",
9047                                         __func__, clki->name, ret);
9048                         goto out;
9049                 }
9050
9051                 /*
9052                  * Parse device ref clk freq as per device tree "ref_clk".
9053                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9054                  * in ufshcd_alloc_host().
9055                  */
9056                 if (!strcmp(clki->name, "ref_clk"))
9057                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9058
9059                 if (clki->max_freq) {
9060                         ret = clk_set_rate(clki->clk, clki->max_freq);
9061                         if (ret) {
9062                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9063                                         __func__, clki->name,
9064                                         clki->max_freq, ret);
9065                                 goto out;
9066                         }
9067                         clki->curr_freq = clki->max_freq;
9068                 }
9069                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9070                                 clki->name, clk_get_rate(clki->clk));
9071         }
9072 out:
9073         return ret;
9074 }
9075
9076 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9077 {
9078         int err = 0;
9079
9080         if (!hba->vops)
9081                 goto out;
9082
9083         err = ufshcd_vops_init(hba);
9084         if (err)
9085                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
9086                         __func__, ufshcd_get_var_name(hba), err);
9087 out:
9088         return err;
9089 }
9090
9091 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9092 {
9093         if (!hba->vops)
9094                 return;
9095
9096         ufshcd_vops_exit(hba);
9097 }
9098
9099 static int ufshcd_hba_init(struct ufs_hba *hba)
9100 {
9101         int err;
9102
9103         /*
9104          * Handle host controller power separately from the UFS device power
9105          * rails as it will help controlling the UFS host controller power
9106          * collapse easily which is different than UFS device power collapse.
9107          * Also, enable the host controller power before we go ahead with rest
9108          * of the initialization here.
9109          */
9110         err = ufshcd_init_hba_vreg(hba);
9111         if (err)
9112                 goto out;
9113
9114         err = ufshcd_setup_hba_vreg(hba, true);
9115         if (err)
9116                 goto out;
9117
9118         err = ufshcd_init_clocks(hba);
9119         if (err)
9120                 goto out_disable_hba_vreg;
9121
9122         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9123                 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9124
9125         err = ufshcd_setup_clocks(hba, true);
9126         if (err)
9127                 goto out_disable_hba_vreg;
9128
9129         err = ufshcd_init_vreg(hba);
9130         if (err)
9131                 goto out_disable_clks;
9132
9133         err = ufshcd_setup_vreg(hba, true);
9134         if (err)
9135                 goto out_disable_clks;
9136
9137         err = ufshcd_variant_hba_init(hba);
9138         if (err)
9139                 goto out_disable_vreg;
9140
9141         ufs_debugfs_hba_init(hba);
9142
9143         hba->is_powered = true;
9144         goto out;
9145
9146 out_disable_vreg:
9147         ufshcd_setup_vreg(hba, false);
9148 out_disable_clks:
9149         ufshcd_setup_clocks(hba, false);
9150 out_disable_hba_vreg:
9151         ufshcd_setup_hba_vreg(hba, false);
9152 out:
9153         return err;
9154 }
9155
9156 static void ufshcd_hba_exit(struct ufs_hba *hba)
9157 {
9158         if (hba->is_powered) {
9159                 ufshcd_exit_clk_scaling(hba);
9160                 ufshcd_exit_clk_gating(hba);
9161                 if (hba->eh_wq)
9162                         destroy_workqueue(hba->eh_wq);
9163                 ufs_debugfs_hba_exit(hba);
9164                 ufshcd_variant_hba_exit(hba);
9165                 ufshcd_setup_vreg(hba, false);
9166                 ufshcd_setup_clocks(hba, false);
9167                 ufshcd_setup_hba_vreg(hba, false);
9168                 hba->is_powered = false;
9169                 ufs_put_device_desc(hba);
9170         }
9171 }
9172
9173 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9174                                      enum ufs_dev_pwr_mode pwr_mode,
9175                                      struct scsi_sense_hdr *sshdr)
9176 {
9177         const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9178         const struct scsi_exec_args args = {
9179                 .sshdr = sshdr,
9180                 .req_flags = BLK_MQ_REQ_PM,
9181                 .scmd_flags = SCMD_FAIL_IF_RECOVERING,
9182         };
9183
9184         return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
9185                         /*bufflen=*/0, /*timeout=*/HZ, /*retries=*/0, &args);
9186 }
9187
9188 /**
9189  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9190  *                           power mode
9191  * @hba: per adapter instance
9192  * @pwr_mode: device power mode to set
9193  *
9194  * Returns 0 if requested power mode is set successfully
9195  * Returns < 0 if failed to set the requested power mode
9196  */
9197 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9198                                      enum ufs_dev_pwr_mode pwr_mode)
9199 {
9200         struct scsi_sense_hdr sshdr;
9201         struct scsi_device *sdp;
9202         unsigned long flags;
9203         int ret, retries;
9204
9205         spin_lock_irqsave(hba->host->host_lock, flags);
9206         sdp = hba->ufs_device_wlun;
9207         if (sdp && scsi_device_online(sdp))
9208                 ret = scsi_device_get(sdp);
9209         else
9210                 ret = -ENODEV;
9211         spin_unlock_irqrestore(hba->host->host_lock, flags);
9212
9213         if (ret)
9214                 return ret;
9215
9216         /*
9217          * If scsi commands fail, the scsi mid-layer schedules scsi error-
9218          * handling, which would wait for host to be resumed. Since we know
9219          * we are functional while we are here, skip host resume in error
9220          * handling context.
9221          */
9222         hba->host->eh_noresume = 1;
9223
9224         /*
9225          * Current function would be generally called from the power management
9226          * callbacks hence set the RQF_PM flag so that it doesn't resume the
9227          * already suspended childs.
9228          */
9229         for (retries = 3; retries > 0; --retries) {
9230                 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
9231                 /*
9232                  * scsi_execute() only returns a negative value if the request
9233                  * queue is dying.
9234                  */
9235                 if (ret <= 0)
9236                         break;
9237         }
9238         if (ret) {
9239                 sdev_printk(KERN_WARNING, sdp,
9240                             "START_STOP failed for power mode: %d, result %x\n",
9241                             pwr_mode, ret);
9242                 if (ret > 0) {
9243                         if (scsi_sense_valid(&sshdr))
9244                                 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9245                         ret = -EIO;
9246                 }
9247         } else {
9248                 hba->curr_dev_pwr_mode = pwr_mode;
9249         }
9250
9251         scsi_device_put(sdp);
9252         hba->host->eh_noresume = 0;
9253         return ret;
9254 }
9255
9256 static int ufshcd_link_state_transition(struct ufs_hba *hba,
9257                                         enum uic_link_state req_link_state,
9258                                         bool check_for_bkops)
9259 {
9260         int ret = 0;
9261
9262         if (req_link_state == hba->uic_link_state)
9263                 return 0;
9264
9265         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9266                 ret = ufshcd_uic_hibern8_enter(hba);
9267                 if (!ret) {
9268                         ufshcd_set_link_hibern8(hba);
9269                 } else {
9270                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9271                                         __func__, ret);
9272                         goto out;
9273                 }
9274         }
9275         /*
9276          * If autobkops is enabled, link can't be turned off because
9277          * turning off the link would also turn off the device, except in the
9278          * case of DeepSleep where the device is expected to remain powered.
9279          */
9280         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9281                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
9282                 /*
9283                  * Let's make sure that link is in low power mode, we are doing
9284                  * this currently by putting the link in Hibern8. Otherway to
9285                  * put the link in low power mode is to send the DME end point
9286                  * to device and then send the DME reset command to local
9287                  * unipro. But putting the link in hibern8 is much faster.
9288                  *
9289                  * Note also that putting the link in Hibern8 is a requirement
9290                  * for entering DeepSleep.
9291                  */
9292                 ret = ufshcd_uic_hibern8_enter(hba);
9293                 if (ret) {
9294                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9295                                         __func__, ret);
9296                         goto out;
9297                 }
9298                 /*
9299                  * Change controller state to "reset state" which
9300                  * should also put the link in off/reset state
9301                  */
9302                 ufshcd_hba_stop(hba);
9303                 /*
9304                  * TODO: Check if we need any delay to make sure that
9305                  * controller is reset
9306                  */
9307                 ufshcd_set_link_off(hba);
9308         }
9309
9310 out:
9311         return ret;
9312 }
9313
9314 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9315 {
9316         bool vcc_off = false;
9317
9318         /*
9319          * It seems some UFS devices may keep drawing more than sleep current
9320          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9321          * To avoid this situation, add 2ms delay before putting these UFS
9322          * rails in LPM mode.
9323          */
9324         if (!ufshcd_is_link_active(hba) &&
9325             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9326                 usleep_range(2000, 2100);
9327
9328         /*
9329          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9330          * power.
9331          *
9332          * If UFS device and link is in OFF state, all power supplies (VCC,
9333          * VCCQ, VCCQ2) can be turned off if power on write protect is not
9334          * required. If UFS link is inactive (Hibern8 or OFF state) and device
9335          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9336          *
9337          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9338          * in low power state which would save some power.
9339          *
9340          * If Write Booster is enabled and the device needs to flush the WB
9341          * buffer OR if bkops status is urgent for WB, keep Vcc on.
9342          */
9343         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9344             !hba->dev_info.is_lu_power_on_wp) {
9345                 ufshcd_setup_vreg(hba, false);
9346                 vcc_off = true;
9347         } else if (!ufshcd_is_ufs_dev_active(hba)) {
9348                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9349                 vcc_off = true;
9350                 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
9351                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9352                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9353                 }
9354         }
9355
9356         /*
9357          * Some UFS devices require delay after VCC power rail is turned-off.
9358          */
9359         if (vcc_off && hba->vreg_info.vcc &&
9360                 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9361                 usleep_range(5000, 5100);
9362 }
9363
9364 #ifdef CONFIG_PM
9365 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9366 {
9367         int ret = 0;
9368
9369         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9370             !hba->dev_info.is_lu_power_on_wp) {
9371                 ret = ufshcd_setup_vreg(hba, true);
9372         } else if (!ufshcd_is_ufs_dev_active(hba)) {
9373                 if (!ufshcd_is_link_active(hba)) {
9374                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9375                         if (ret)
9376                                 goto vcc_disable;
9377                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9378                         if (ret)
9379                                 goto vccq_lpm;
9380                 }
9381                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
9382         }
9383         goto out;
9384
9385 vccq_lpm:
9386         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9387 vcc_disable:
9388         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9389 out:
9390         return ret;
9391 }
9392 #endif /* CONFIG_PM */
9393
9394 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9395 {
9396         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9397                 ufshcd_setup_hba_vreg(hba, false);
9398 }
9399
9400 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9401 {
9402         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9403                 ufshcd_setup_hba_vreg(hba, true);
9404 }
9405
9406 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9407 {
9408         int ret = 0;
9409         bool check_for_bkops;
9410         enum ufs_pm_level pm_lvl;
9411         enum ufs_dev_pwr_mode req_dev_pwr_mode;
9412         enum uic_link_state req_link_state;
9413
9414         hba->pm_op_in_progress = true;
9415         if (pm_op != UFS_SHUTDOWN_PM) {
9416                 pm_lvl = pm_op == UFS_RUNTIME_PM ?
9417                          hba->rpm_lvl : hba->spm_lvl;
9418                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9419                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9420         } else {
9421                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9422                 req_link_state = UIC_LINK_OFF_STATE;
9423         }
9424
9425         ufshpb_suspend(hba);
9426
9427         /*
9428          * If we can't transition into any of the low power modes
9429          * just gate the clocks.
9430          */
9431         ufshcd_hold(hba, false);
9432         hba->clk_gating.is_suspended = true;
9433
9434         if (ufshcd_is_clkscaling_supported(hba))
9435                 ufshcd_clk_scaling_suspend(hba, true);
9436
9437         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9438                         req_link_state == UIC_LINK_ACTIVE_STATE) {
9439                 goto vops_suspend;
9440         }
9441
9442         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9443             (req_link_state == hba->uic_link_state))
9444                 goto enable_scaling;
9445
9446         /* UFS device & link must be active before we enter in this function */
9447         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9448                 ret = -EINVAL;
9449                 goto enable_scaling;
9450         }
9451
9452         if (pm_op == UFS_RUNTIME_PM) {
9453                 if (ufshcd_can_autobkops_during_suspend(hba)) {
9454                         /*
9455                          * The device is idle with no requests in the queue,
9456                          * allow background operations if bkops status shows
9457                          * that performance might be impacted.
9458                          */
9459                         ret = ufshcd_urgent_bkops(hba);
9460                         if (ret)
9461                                 goto enable_scaling;
9462                 } else {
9463                         /* make sure that auto bkops is disabled */
9464                         ufshcd_disable_auto_bkops(hba);
9465                 }
9466                 /*
9467                  * If device needs to do BKOP or WB buffer flush during
9468                  * Hibern8, keep device power mode as "active power mode"
9469                  * and VCC supply.
9470                  */
9471                 hba->dev_info.b_rpm_dev_flush_capable =
9472                         hba->auto_bkops_enabled ||
9473                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9474                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9475                         ufshcd_is_auto_hibern8_enabled(hba))) &&
9476                         ufshcd_wb_need_flush(hba));
9477         }
9478
9479         flush_work(&hba->eeh_work);
9480
9481         ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9482         if (ret)
9483                 goto enable_scaling;
9484
9485         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9486                 if (pm_op != UFS_RUNTIME_PM)
9487                         /* ensure that bkops is disabled */
9488                         ufshcd_disable_auto_bkops(hba);
9489
9490                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9491                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9492                         if (ret && pm_op != UFS_SHUTDOWN_PM) {
9493                                 /*
9494                                  * If return err in suspend flow, IO will hang.
9495                                  * Trigger error handler and break suspend for
9496                                  * error recovery.
9497                                  */
9498                                 ufshcd_force_error_recovery(hba);
9499                                 ret = -EBUSY;
9500                         }
9501                         if (ret)
9502                                 goto enable_scaling;
9503                 }
9504         }
9505
9506         /*
9507          * In the case of DeepSleep, the device is expected to remain powered
9508          * with the link off, so do not check for bkops.
9509          */
9510         check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9511         ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9512         if (ret && pm_op != UFS_SHUTDOWN_PM) {
9513                 /*
9514                  * If return err in suspend flow, IO will hang.
9515                  * Trigger error handler and break suspend for
9516                  * error recovery.
9517                  */
9518                 ufshcd_force_error_recovery(hba);
9519                 ret = -EBUSY;
9520         }
9521         if (ret)
9522                 goto set_dev_active;
9523
9524 vops_suspend:
9525         /*
9526          * Call vendor specific suspend callback. As these callbacks may access
9527          * vendor specific host controller register space call them before the
9528          * host clocks are ON.
9529          */
9530         ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9531         if (ret)
9532                 goto set_link_active;
9533         goto out;
9534
9535 set_link_active:
9536         /*
9537          * Device hardware reset is required to exit DeepSleep. Also, for
9538          * DeepSleep, the link is off so host reset and restore will be done
9539          * further below.
9540          */
9541         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9542                 ufshcd_device_reset(hba);
9543                 WARN_ON(!ufshcd_is_link_off(hba));
9544         }
9545         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9546                 ufshcd_set_link_active(hba);
9547         else if (ufshcd_is_link_off(hba))
9548                 ufshcd_host_reset_and_restore(hba);
9549 set_dev_active:
9550         /* Can also get here needing to exit DeepSleep */
9551         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9552                 ufshcd_device_reset(hba);
9553                 ufshcd_host_reset_and_restore(hba);
9554         }
9555         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9556                 ufshcd_disable_auto_bkops(hba);
9557 enable_scaling:
9558         if (ufshcd_is_clkscaling_supported(hba))
9559                 ufshcd_clk_scaling_suspend(hba, false);
9560
9561         hba->dev_info.b_rpm_dev_flush_capable = false;
9562 out:
9563         if (hba->dev_info.b_rpm_dev_flush_capable) {
9564                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9565                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9566         }
9567
9568         if (ret) {
9569                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9570                 hba->clk_gating.is_suspended = false;
9571                 ufshcd_release(hba);
9572                 ufshpb_resume(hba);
9573         }
9574         hba->pm_op_in_progress = false;
9575         return ret;
9576 }
9577
9578 #ifdef CONFIG_PM
9579 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9580 {
9581         int ret;
9582         enum uic_link_state old_link_state = hba->uic_link_state;
9583
9584         hba->pm_op_in_progress = true;
9585
9586         /*
9587          * Call vendor specific resume callback. As these callbacks may access
9588          * vendor specific host controller register space call them when the
9589          * host clocks are ON.
9590          */
9591         ret = ufshcd_vops_resume(hba, pm_op);
9592         if (ret)
9593                 goto out;
9594
9595         /* For DeepSleep, the only supported option is to have the link off */
9596         WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9597
9598         if (ufshcd_is_link_hibern8(hba)) {
9599                 ret = ufshcd_uic_hibern8_exit(hba);
9600                 if (!ret) {
9601                         ufshcd_set_link_active(hba);
9602                 } else {
9603                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9604                                         __func__, ret);
9605                         goto vendor_suspend;
9606                 }
9607         } else if (ufshcd_is_link_off(hba)) {
9608                 /*
9609                  * A full initialization of the host and the device is
9610                  * required since the link was put to off during suspend.
9611                  * Note, in the case of DeepSleep, the device will exit
9612                  * DeepSleep due to device reset.
9613                  */
9614                 ret = ufshcd_reset_and_restore(hba);
9615                 /*
9616                  * ufshcd_reset_and_restore() should have already
9617                  * set the link state as active
9618                  */
9619                 if (ret || !ufshcd_is_link_active(hba))
9620                         goto vendor_suspend;
9621         }
9622
9623         if (!ufshcd_is_ufs_dev_active(hba)) {
9624                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9625                 if (ret)
9626                         goto set_old_link_state;
9627         }
9628
9629         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9630                 ufshcd_enable_auto_bkops(hba);
9631         else
9632                 /*
9633                  * If BKOPs operations are urgently needed at this moment then
9634                  * keep auto-bkops enabled or else disable it.
9635                  */
9636                 ufshcd_urgent_bkops(hba);
9637
9638         if (hba->ee_usr_mask)
9639                 ufshcd_write_ee_control(hba);
9640
9641         if (ufshcd_is_clkscaling_supported(hba))
9642                 ufshcd_clk_scaling_suspend(hba, false);
9643
9644         if (hba->dev_info.b_rpm_dev_flush_capable) {
9645                 hba->dev_info.b_rpm_dev_flush_capable = false;
9646                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9647         }
9648
9649         /* Enable Auto-Hibernate if configured */
9650         ufshcd_auto_hibern8_enable(hba);
9651
9652         ufshpb_resume(hba);
9653         goto out;
9654
9655 set_old_link_state:
9656         ufshcd_link_state_transition(hba, old_link_state, 0);
9657 vendor_suspend:
9658         ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9659         ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9660 out:
9661         if (ret)
9662                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9663         hba->clk_gating.is_suspended = false;
9664         ufshcd_release(hba);
9665         hba->pm_op_in_progress = false;
9666         return ret;
9667 }
9668
9669 static int ufshcd_wl_runtime_suspend(struct device *dev)
9670 {
9671         struct scsi_device *sdev = to_scsi_device(dev);
9672         struct ufs_hba *hba;
9673         int ret;
9674         ktime_t start = ktime_get();
9675
9676         hba = shost_priv(sdev->host);
9677
9678         ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9679         if (ret)
9680                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9681
9682         trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9683                 ktime_to_us(ktime_sub(ktime_get(), start)),
9684                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9685
9686         return ret;
9687 }
9688
9689 static int ufshcd_wl_runtime_resume(struct device *dev)
9690 {
9691         struct scsi_device *sdev = to_scsi_device(dev);
9692         struct ufs_hba *hba;
9693         int ret = 0;
9694         ktime_t start = ktime_get();
9695
9696         hba = shost_priv(sdev->host);
9697
9698         ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9699         if (ret)
9700                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9701
9702         trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9703                 ktime_to_us(ktime_sub(ktime_get(), start)),
9704                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9705
9706         return ret;
9707 }
9708 #endif
9709
9710 #ifdef CONFIG_PM_SLEEP
9711 static int ufshcd_wl_suspend(struct device *dev)
9712 {
9713         struct scsi_device *sdev = to_scsi_device(dev);
9714         struct ufs_hba *hba;
9715         int ret = 0;
9716         ktime_t start = ktime_get();
9717
9718         hba = shost_priv(sdev->host);
9719         down(&hba->host_sem);
9720         hba->system_suspending = true;
9721
9722         if (pm_runtime_suspended(dev))
9723                 goto out;
9724
9725         ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9726         if (ret) {
9727                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__,  ret);
9728                 up(&hba->host_sem);
9729         }
9730
9731 out:
9732         if (!ret)
9733                 hba->is_sys_suspended = true;
9734         trace_ufshcd_wl_suspend(dev_name(dev), ret,
9735                 ktime_to_us(ktime_sub(ktime_get(), start)),
9736                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9737
9738         return ret;
9739 }
9740
9741 static int ufshcd_wl_resume(struct device *dev)
9742 {
9743         struct scsi_device *sdev = to_scsi_device(dev);
9744         struct ufs_hba *hba;
9745         int ret = 0;
9746         ktime_t start = ktime_get();
9747
9748         hba = shost_priv(sdev->host);
9749
9750         if (pm_runtime_suspended(dev))
9751                 goto out;
9752
9753         ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9754         if (ret)
9755                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9756 out:
9757         trace_ufshcd_wl_resume(dev_name(dev), ret,
9758                 ktime_to_us(ktime_sub(ktime_get(), start)),
9759                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9760         if (!ret)
9761                 hba->is_sys_suspended = false;
9762         hba->system_suspending = false;
9763         up(&hba->host_sem);
9764         return ret;
9765 }
9766 #endif
9767
9768 static void ufshcd_wl_shutdown(struct device *dev)
9769 {
9770         struct scsi_device *sdev = to_scsi_device(dev);
9771         struct ufs_hba *hba;
9772
9773         hba = shost_priv(sdev->host);
9774
9775         down(&hba->host_sem);
9776         hba->shutting_down = true;
9777         up(&hba->host_sem);
9778
9779         /* Turn on everything while shutting down */
9780         ufshcd_rpm_get_sync(hba);
9781         scsi_device_quiesce(sdev);
9782         shost_for_each_device(sdev, hba->host) {
9783                 if (sdev == hba->ufs_device_wlun)
9784                         continue;
9785                 scsi_device_quiesce(sdev);
9786         }
9787         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9788 }
9789
9790 /**
9791  * ufshcd_suspend - helper function for suspend operations
9792  * @hba: per adapter instance
9793  *
9794  * This function will put disable irqs, turn off clocks
9795  * and set vreg and hba-vreg in lpm mode.
9796  */
9797 static int ufshcd_suspend(struct ufs_hba *hba)
9798 {
9799         int ret;
9800
9801         if (!hba->is_powered)
9802                 return 0;
9803         /*
9804          * Disable the host irq as host controller as there won't be any
9805          * host controller transaction expected till resume.
9806          */
9807         ufshcd_disable_irq(hba);
9808         ret = ufshcd_setup_clocks(hba, false);
9809         if (ret) {
9810                 ufshcd_enable_irq(hba);
9811                 return ret;
9812         }
9813         if (ufshcd_is_clkgating_allowed(hba)) {
9814                 hba->clk_gating.state = CLKS_OFF;
9815                 trace_ufshcd_clk_gating(dev_name(hba->dev),
9816                                         hba->clk_gating.state);
9817         }
9818
9819         ufshcd_vreg_set_lpm(hba);
9820         /* Put the host controller in low power mode if possible */
9821         ufshcd_hba_vreg_set_lpm(hba);
9822         return ret;
9823 }
9824
9825 #ifdef CONFIG_PM
9826 /**
9827  * ufshcd_resume - helper function for resume operations
9828  * @hba: per adapter instance
9829  *
9830  * This function basically turns on the regulators, clocks and
9831  * irqs of the hba.
9832  *
9833  * Returns 0 for success and non-zero for failure
9834  */
9835 static int ufshcd_resume(struct ufs_hba *hba)
9836 {
9837         int ret;
9838
9839         if (!hba->is_powered)
9840                 return 0;
9841
9842         ufshcd_hba_vreg_set_hpm(hba);
9843         ret = ufshcd_vreg_set_hpm(hba);
9844         if (ret)
9845                 goto out;
9846
9847         /* Make sure clocks are enabled before accessing controller */
9848         ret = ufshcd_setup_clocks(hba, true);
9849         if (ret)
9850                 goto disable_vreg;
9851
9852         /* enable the host irq as host controller would be active soon */
9853         ufshcd_enable_irq(hba);
9854
9855         goto out;
9856
9857 disable_vreg:
9858         ufshcd_vreg_set_lpm(hba);
9859 out:
9860         if (ret)
9861                 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
9862         return ret;
9863 }
9864 #endif /* CONFIG_PM */
9865
9866 #ifdef CONFIG_PM_SLEEP
9867 /**
9868  * ufshcd_system_suspend - system suspend callback
9869  * @dev: Device associated with the UFS controller.
9870  *
9871  * Executed before putting the system into a sleep state in which the contents
9872  * of main memory are preserved.
9873  *
9874  * Returns 0 for success and non-zero for failure
9875  */
9876 int ufshcd_system_suspend(struct device *dev)
9877 {
9878         struct ufs_hba *hba = dev_get_drvdata(dev);
9879         int ret = 0;
9880         ktime_t start = ktime_get();
9881
9882         if (pm_runtime_suspended(hba->dev))
9883                 goto out;
9884
9885         ret = ufshcd_suspend(hba);
9886 out:
9887         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9888                 ktime_to_us(ktime_sub(ktime_get(), start)),
9889                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9890         return ret;
9891 }
9892 EXPORT_SYMBOL(ufshcd_system_suspend);
9893
9894 /**
9895  * ufshcd_system_resume - system resume callback
9896  * @dev: Device associated with the UFS controller.
9897  *
9898  * Executed after waking the system up from a sleep state in which the contents
9899  * of main memory were preserved.
9900  *
9901  * Returns 0 for success and non-zero for failure
9902  */
9903 int ufshcd_system_resume(struct device *dev)
9904 {
9905         struct ufs_hba *hba = dev_get_drvdata(dev);
9906         ktime_t start = ktime_get();
9907         int ret = 0;
9908
9909         if (pm_runtime_suspended(hba->dev))
9910                 goto out;
9911
9912         ret = ufshcd_resume(hba);
9913
9914 out:
9915         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9916                 ktime_to_us(ktime_sub(ktime_get(), start)),
9917                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9918
9919         return ret;
9920 }
9921 EXPORT_SYMBOL(ufshcd_system_resume);
9922 #endif /* CONFIG_PM_SLEEP */
9923
9924 #ifdef CONFIG_PM
9925 /**
9926  * ufshcd_runtime_suspend - runtime suspend callback
9927  * @dev: Device associated with the UFS controller.
9928  *
9929  * Check the description of ufshcd_suspend() function for more details.
9930  *
9931  * Returns 0 for success and non-zero for failure
9932  */
9933 int ufshcd_runtime_suspend(struct device *dev)
9934 {
9935         struct ufs_hba *hba = dev_get_drvdata(dev);
9936         int ret;
9937         ktime_t start = ktime_get();
9938
9939         ret = ufshcd_suspend(hba);
9940
9941         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9942                 ktime_to_us(ktime_sub(ktime_get(), start)),
9943                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9944         return ret;
9945 }
9946 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9947
9948 /**
9949  * ufshcd_runtime_resume - runtime resume routine
9950  * @dev: Device associated with the UFS controller.
9951  *
9952  * This function basically brings controller
9953  * to active state. Following operations are done in this function:
9954  *
9955  * 1. Turn on all the controller related clocks
9956  * 2. Turn ON VCC rail
9957  */
9958 int ufshcd_runtime_resume(struct device *dev)
9959 {
9960         struct ufs_hba *hba = dev_get_drvdata(dev);
9961         int ret;
9962         ktime_t start = ktime_get();
9963
9964         ret = ufshcd_resume(hba);
9965
9966         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9967                 ktime_to_us(ktime_sub(ktime_get(), start)),
9968                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9969         return ret;
9970 }
9971 EXPORT_SYMBOL(ufshcd_runtime_resume);
9972 #endif /* CONFIG_PM */
9973
9974 /**
9975  * ufshcd_shutdown - shutdown routine
9976  * @hba: per adapter instance
9977  *
9978  * This function would turn off both UFS device and UFS hba
9979  * regulators. It would also disable clocks.
9980  *
9981  * Returns 0 always to allow force shutdown even in case of errors.
9982  */
9983 int ufshcd_shutdown(struct ufs_hba *hba)
9984 {
9985         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9986                 ufshcd_suspend(hba);
9987
9988         hba->is_powered = false;
9989         /* allow force shutdown even in case of errors */
9990         return 0;
9991 }
9992 EXPORT_SYMBOL(ufshcd_shutdown);
9993
9994 /**
9995  * ufshcd_remove - de-allocate SCSI host and host memory space
9996  *              data structure memory
9997  * @hba: per adapter instance
9998  */
9999 void ufshcd_remove(struct ufs_hba *hba)
10000 {
10001         if (hba->ufs_device_wlun)
10002                 ufshcd_rpm_get_sync(hba);
10003         ufs_hwmon_remove(hba);
10004         ufs_bsg_remove(hba);
10005         ufshpb_remove(hba);
10006         ufs_sysfs_remove_nodes(hba->dev);
10007         blk_mq_destroy_queue(hba->tmf_queue);
10008         blk_put_queue(hba->tmf_queue);
10009         blk_mq_free_tag_set(&hba->tmf_tag_set);
10010         scsi_remove_host(hba->host);
10011         /* disable interrupts */
10012         ufshcd_disable_intr(hba, hba->intr_mask);
10013         ufshcd_hba_stop(hba);
10014         ufshcd_hba_exit(hba);
10015 }
10016 EXPORT_SYMBOL_GPL(ufshcd_remove);
10017
10018 #ifdef CONFIG_PM_SLEEP
10019 int ufshcd_system_freeze(struct device *dev)
10020 {
10021
10022         return ufshcd_system_suspend(dev);
10023
10024 }
10025 EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
10026
10027 int ufshcd_system_restore(struct device *dev)
10028 {
10029
10030         struct ufs_hba *hba = dev_get_drvdata(dev);
10031         int ret;
10032
10033         ret = ufshcd_system_resume(dev);
10034         if (ret)
10035                 return ret;
10036
10037         /* Configure UTRL and UTMRL base address registers */
10038         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
10039                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
10040         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
10041                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
10042         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
10043                         REG_UTP_TASK_REQ_LIST_BASE_L);
10044         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
10045                         REG_UTP_TASK_REQ_LIST_BASE_H);
10046         /*
10047          * Make sure that UTRL and UTMRL base address registers
10048          * are updated with the latest queue addresses. Only after
10049          * updating these addresses, we can queue the new commands.
10050          */
10051         mb();
10052
10053         /* Resuming from hibernate, assume that link was OFF */
10054         ufshcd_set_link_off(hba);
10055
10056         return 0;
10057
10058 }
10059 EXPORT_SYMBOL_GPL(ufshcd_system_restore);
10060
10061 int ufshcd_system_thaw(struct device *dev)
10062 {
10063         return ufshcd_system_resume(dev);
10064 }
10065 EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
10066 #endif /* CONFIG_PM_SLEEP  */
10067
10068 /**
10069  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10070  * @hba: pointer to Host Bus Adapter (HBA)
10071  */
10072 void ufshcd_dealloc_host(struct ufs_hba *hba)
10073 {
10074         scsi_host_put(hba->host);
10075 }
10076 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
10077
10078 /**
10079  * ufshcd_set_dma_mask - Set dma mask based on the controller
10080  *                       addressing capability
10081  * @hba: per adapter instance
10082  *
10083  * Returns 0 for success, non-zero for failure
10084  */
10085 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
10086 {
10087         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
10088                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
10089                         return 0;
10090         }
10091         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
10092 }
10093
10094 /**
10095  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10096  * @dev: pointer to device handle
10097  * @hba_handle: driver private handle
10098  * Returns 0 on success, non-zero value on failure
10099  */
10100 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
10101 {
10102         struct Scsi_Host *host;
10103         struct ufs_hba *hba;
10104         int err = 0;
10105
10106         if (!dev) {
10107                 dev_err(dev,
10108                 "Invalid memory reference for dev is NULL\n");
10109                 err = -ENODEV;
10110                 goto out_error;
10111         }
10112
10113         host = scsi_host_alloc(&ufshcd_driver_template,
10114                                 sizeof(struct ufs_hba));
10115         if (!host) {
10116                 dev_err(dev, "scsi_host_alloc failed\n");
10117                 err = -ENOMEM;
10118                 goto out_error;
10119         }
10120         host->nr_maps = HCTX_TYPE_POLL + 1;
10121         hba = shost_priv(host);
10122         hba->host = host;
10123         hba->dev = dev;
10124         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
10125         hba->nop_out_timeout = NOP_OUT_TIMEOUT;
10126         ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
10127         INIT_LIST_HEAD(&hba->clk_list_head);
10128         spin_lock_init(&hba->outstanding_lock);
10129
10130         *hba_handle = hba;
10131
10132 out_error:
10133         return err;
10134 }
10135 EXPORT_SYMBOL(ufshcd_alloc_host);
10136
10137 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10138 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10139                                      const struct blk_mq_queue_data *qd)
10140 {
10141         WARN_ON_ONCE(true);
10142         return BLK_STS_NOTSUPP;
10143 }
10144
10145 static const struct blk_mq_ops ufshcd_tmf_ops = {
10146         .queue_rq = ufshcd_queue_tmf,
10147 };
10148
10149 /**
10150  * ufshcd_init - Driver initialization routine
10151  * @hba: per-adapter instance
10152  * @mmio_base: base register address
10153  * @irq: Interrupt line of device
10154  * Returns 0 on success, non-zero value on failure
10155  */
10156 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10157 {
10158         int err;
10159         struct Scsi_Host *host = hba->host;
10160         struct device *dev = hba->dev;
10161         char eh_wq_name[sizeof("ufs_eh_wq_00")];
10162
10163         /*
10164          * dev_set_drvdata() must be called before any callbacks are registered
10165          * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10166          * sysfs).
10167          */
10168         dev_set_drvdata(dev, hba);
10169
10170         if (!mmio_base) {
10171                 dev_err(hba->dev,
10172                 "Invalid memory reference for mmio_base is NULL\n");
10173                 err = -ENODEV;
10174                 goto out_error;
10175         }
10176
10177         hba->mmio_base = mmio_base;
10178         hba->irq = irq;
10179         hba->vps = &ufs_hba_vps;
10180
10181         err = ufshcd_hba_init(hba);
10182         if (err)
10183                 goto out_error;
10184
10185         /* Read capabilities registers */
10186         err = ufshcd_hba_capabilities(hba);
10187         if (err)
10188                 goto out_disable;
10189
10190         /* Get UFS version supported by the controller */
10191         hba->ufs_version = ufshcd_get_ufs_version(hba);
10192
10193         /* Get Interrupt bit mask per version */
10194         hba->intr_mask = ufshcd_get_intr_mask(hba);
10195
10196         err = ufshcd_set_dma_mask(hba);
10197         if (err) {
10198                 dev_err(hba->dev, "set dma mask failed\n");
10199                 goto out_disable;
10200         }
10201
10202         /* Allocate memory for host memory space */
10203         err = ufshcd_memory_alloc(hba);
10204         if (err) {
10205                 dev_err(hba->dev, "Memory allocation failed\n");
10206                 goto out_disable;
10207         }
10208
10209         /* Configure LRB */
10210         ufshcd_host_memory_configure(hba);
10211
10212         host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10213         host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
10214         host->max_id = UFSHCD_MAX_ID;
10215         host->max_lun = UFS_MAX_LUNS;
10216         host->max_channel = UFSHCD_MAX_CHANNEL;
10217         host->unique_id = host->host_no;
10218         host->max_cmd_len = UFS_CDB_SIZE;
10219
10220         hba->max_pwr_info.is_valid = false;
10221
10222         /* Initialize work queues */
10223         snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
10224                  hba->host->host_no);
10225         hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
10226         if (!hba->eh_wq) {
10227                 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10228                         __func__);
10229                 err = -ENOMEM;
10230                 goto out_disable;
10231         }
10232         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10233         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10234
10235         sema_init(&hba->host_sem, 1);
10236
10237         /* Initialize UIC command mutex */
10238         mutex_init(&hba->uic_cmd_mutex);
10239
10240         /* Initialize mutex for device management commands */
10241         mutex_init(&hba->dev_cmd.lock);
10242
10243         /* Initialize mutex for exception event control */
10244         mutex_init(&hba->ee_ctrl_mutex);
10245
10246         mutex_init(&hba->wb_mutex);
10247         init_rwsem(&hba->clk_scaling_lock);
10248
10249         ufshcd_init_clk_gating(hba);
10250
10251         ufshcd_init_clk_scaling(hba);
10252
10253         /*
10254          * In order to avoid any spurious interrupt immediately after
10255          * registering UFS controller interrupt handler, clear any pending UFS
10256          * interrupt status and disable all the UFS interrupts.
10257          */
10258         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10259                       REG_INTERRUPT_STATUS);
10260         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10261         /*
10262          * Make sure that UFS interrupts are disabled and any pending interrupt
10263          * status is cleared before registering UFS interrupt handler.
10264          */
10265         mb();
10266
10267         /* IRQ registration */
10268         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10269         if (err) {
10270                 dev_err(hba->dev, "request irq failed\n");
10271                 goto out_disable;
10272         } else {
10273                 hba->is_irq_enabled = true;
10274         }
10275
10276         if (!is_mcq_supported(hba)) {
10277                 err = scsi_add_host(host, hba->dev);
10278                 if (err) {
10279                         dev_err(hba->dev, "scsi_add_host failed\n");
10280                         goto out_disable;
10281                 }
10282         }
10283
10284         hba->tmf_tag_set = (struct blk_mq_tag_set) {
10285                 .nr_hw_queues   = 1,
10286                 .queue_depth    = hba->nutmrs,
10287                 .ops            = &ufshcd_tmf_ops,
10288                 .flags          = BLK_MQ_F_NO_SCHED,
10289         };
10290         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10291         if (err < 0)
10292                 goto out_remove_scsi_host;
10293         hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
10294         if (IS_ERR(hba->tmf_queue)) {
10295                 err = PTR_ERR(hba->tmf_queue);
10296                 goto free_tmf_tag_set;
10297         }
10298         hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10299                                     sizeof(*hba->tmf_rqs), GFP_KERNEL);
10300         if (!hba->tmf_rqs) {
10301                 err = -ENOMEM;
10302                 goto free_tmf_queue;
10303         }
10304
10305         /* Reset the attached device */
10306         ufshcd_device_reset(hba);
10307
10308         ufshcd_init_crypto(hba);
10309
10310         /* Host controller enable */
10311         err = ufshcd_hba_enable(hba);
10312         if (err) {
10313                 dev_err(hba->dev, "Host controller enable failed\n");
10314                 ufshcd_print_evt_hist(hba);
10315                 ufshcd_print_host_state(hba);
10316                 goto free_tmf_queue;
10317         }
10318
10319         /*
10320          * Set the default power management level for runtime and system PM.
10321          * Default power saving mode is to keep UFS link in Hibern8 state
10322          * and UFS device in sleep state.
10323          */
10324         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10325                                                 UFS_SLEEP_PWR_MODE,
10326                                                 UIC_LINK_HIBERN8_STATE);
10327         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10328                                                 UFS_SLEEP_PWR_MODE,
10329                                                 UIC_LINK_HIBERN8_STATE);
10330
10331         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
10332                           ufshcd_rpm_dev_flush_recheck_work);
10333
10334         /* Set the default auto-hiberate idle timer value to 150 ms */
10335         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
10336                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10337                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10338         }
10339
10340         /* Hold auto suspend until async scan completes */
10341         pm_runtime_get_sync(dev);
10342         atomic_set(&hba->scsi_block_reqs_cnt, 0);
10343         /*
10344          * We are assuming that device wasn't put in sleep/power-down
10345          * state exclusively during the boot stage before kernel.
10346          * This assumption helps avoid doing link startup twice during
10347          * ufshcd_probe_hba().
10348          */
10349         ufshcd_set_ufs_dev_active(hba);
10350
10351         async_schedule(ufshcd_async_scan, hba);
10352         ufs_sysfs_add_nodes(hba->dev);
10353
10354         device_enable_async_suspend(dev);
10355         return 0;
10356
10357 free_tmf_queue:
10358         blk_mq_destroy_queue(hba->tmf_queue);
10359         blk_put_queue(hba->tmf_queue);
10360 free_tmf_tag_set:
10361         blk_mq_free_tag_set(&hba->tmf_tag_set);
10362 out_remove_scsi_host:
10363         scsi_remove_host(hba->host);
10364 out_disable:
10365         hba->is_irq_enabled = false;
10366         ufshcd_hba_exit(hba);
10367 out_error:
10368         return err;
10369 }
10370 EXPORT_SYMBOL_GPL(ufshcd_init);
10371
10372 void ufshcd_resume_complete(struct device *dev)
10373 {
10374         struct ufs_hba *hba = dev_get_drvdata(dev);
10375
10376         if (hba->complete_put) {
10377                 ufshcd_rpm_put(hba);
10378                 hba->complete_put = false;
10379         }
10380 }
10381 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10382
10383 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10384 {
10385         struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
10386         enum ufs_dev_pwr_mode dev_pwr_mode;
10387         enum uic_link_state link_state;
10388         unsigned long flags;
10389         bool res;
10390
10391         spin_lock_irqsave(&dev->power.lock, flags);
10392         dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10393         link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10394         res = pm_runtime_suspended(dev) &&
10395               hba->curr_dev_pwr_mode == dev_pwr_mode &&
10396               hba->uic_link_state == link_state &&
10397               !hba->dev_info.b_rpm_dev_flush_capable;
10398         spin_unlock_irqrestore(&dev->power.lock, flags);
10399
10400         return res;
10401 }
10402
10403 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
10404 {
10405         struct ufs_hba *hba = dev_get_drvdata(dev);
10406         int ret;
10407
10408         /*
10409          * SCSI assumes that runtime-pm and system-pm for scsi drivers
10410          * are same. And it doesn't wake up the device for system-suspend
10411          * if it's runtime suspended. But ufs doesn't follow that.
10412          * Refer ufshcd_resume_complete()
10413          */
10414         if (hba->ufs_device_wlun) {
10415                 /* Prevent runtime suspend */
10416                 ufshcd_rpm_get_noresume(hba);
10417                 /*
10418                  * Check if already runtime suspended in same state as system
10419                  * suspend would be.
10420                  */
10421                 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10422                         /* RPM state is not ok for SPM, so runtime resume */
10423                         ret = ufshcd_rpm_resume(hba);
10424                         if (ret < 0 && ret != -EACCES) {
10425                                 ufshcd_rpm_put(hba);
10426                                 return ret;
10427                         }
10428                 }
10429                 hba->complete_put = true;
10430         }
10431         return 0;
10432 }
10433 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10434
10435 int ufshcd_suspend_prepare(struct device *dev)
10436 {
10437         return __ufshcd_suspend_prepare(dev, true);
10438 }
10439 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10440
10441 #ifdef CONFIG_PM_SLEEP
10442 static int ufshcd_wl_poweroff(struct device *dev)
10443 {
10444         struct scsi_device *sdev = to_scsi_device(dev);
10445         struct ufs_hba *hba = shost_priv(sdev->host);
10446
10447         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10448         return 0;
10449 }
10450 #endif
10451
10452 static int ufshcd_wl_probe(struct device *dev)
10453 {
10454         struct scsi_device *sdev = to_scsi_device(dev);
10455
10456         if (!is_device_wlun(sdev))
10457                 return -ENODEV;
10458
10459         blk_pm_runtime_init(sdev->request_queue, dev);
10460         pm_runtime_set_autosuspend_delay(dev, 0);
10461         pm_runtime_allow(dev);
10462
10463         return  0;
10464 }
10465
10466 static int ufshcd_wl_remove(struct device *dev)
10467 {
10468         pm_runtime_forbid(dev);
10469         return 0;
10470 }
10471
10472 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10473 #ifdef CONFIG_PM_SLEEP
10474         .suspend = ufshcd_wl_suspend,
10475         .resume = ufshcd_wl_resume,
10476         .freeze = ufshcd_wl_suspend,
10477         .thaw = ufshcd_wl_resume,
10478         .poweroff = ufshcd_wl_poweroff,
10479         .restore = ufshcd_wl_resume,
10480 #endif
10481         SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10482 };
10483
10484 /*
10485  * ufs_dev_wlun_template - describes ufs device wlun
10486  * ufs-device wlun - used to send pm commands
10487  * All luns are consumers of ufs-device wlun.
10488  *
10489  * Currently, no sd driver is present for wluns.
10490  * Hence the no specific pm operations are performed.
10491  * With ufs design, SSU should be sent to ufs-device wlun.
10492  * Hence register a scsi driver for ufs wluns only.
10493  */
10494 static struct scsi_driver ufs_dev_wlun_template = {
10495         .gendrv = {
10496                 .name = "ufs_device_wlun",
10497                 .owner = THIS_MODULE,
10498                 .probe = ufshcd_wl_probe,
10499                 .remove = ufshcd_wl_remove,
10500                 .pm = &ufshcd_wl_pm_ops,
10501                 .shutdown = ufshcd_wl_shutdown,
10502         },
10503 };
10504
10505 static int __init ufshcd_core_init(void)
10506 {
10507         int ret;
10508
10509         ufs_debugfs_init();
10510
10511         ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10512         if (ret)
10513                 ufs_debugfs_exit();
10514         return ret;
10515 }
10516
10517 static void __exit ufshcd_core_exit(void)
10518 {
10519         ufs_debugfs_exit();
10520         scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10521 }
10522
10523 module_init(ufshcd_core_init);
10524 module_exit(ufshcd_core_exit);
10525
10526 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10527 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10528 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10529 MODULE_SOFTDEP("pre: governor_simpleondemand");
10530 MODULE_LICENSE("GPL");