2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
44 #include <linux/bitfield.h>
45 #include <linux/blk-pm.h>
47 #include "ufs_quirks.h"
49 #include "ufs-sysfs.h"
51 #include <asm/unaligned.h>
52 #include <linux/blkdev.h>
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ufs.h>
57 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
60 /* UIC command timeout, unit: ms */
61 #define UIC_CMD_TIMEOUT 500
63 /* NOP OUT retries waiting for NOP IN response */
64 #define NOP_OUT_RETRIES 10
65 /* Timeout after 30 msecs if NOP OUT hangs without response */
66 #define NOP_OUT_TIMEOUT 30 /* msecs */
68 /* Query request retries */
69 #define QUERY_REQ_RETRIES 3
70 /* Query request timeout */
71 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
73 /* Task management command timeout */
74 #define TM_CMD_TIMEOUT 100 /* msecs */
76 /* maximum number of retries for a general UIC command */
77 #define UFS_UIC_COMMAND_RETRIES 3
79 /* maximum number of link-startup retries */
80 #define DME_LINKSTARTUP_RETRIES 3
82 /* Maximum retries for Hibern8 enter */
83 #define UIC_HIBERN8_ENTER_RETRIES 3
85 /* maximum number of reset retries before giving up */
86 #define MAX_HOST_RESET_RETRIES 5
88 /* Expose the flag value from utp_upiu_query.value */
89 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
91 /* Interrupt aggregation default timeout, unit: 40us */
92 #define INT_AGGR_DEF_TO 0x02
94 /* default delay of autosuspend: 2000 ms */
95 #define RPM_AUTOSUSPEND_DELAY_MS 2000
97 /* Default delay of RPM device flush delayed work */
98 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
100 /* Default value of wait time before gating device ref clock */
101 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
103 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
107 _ret = ufshcd_enable_vreg(_dev, _vreg); \
109 _ret = ufshcd_disable_vreg(_dev, _vreg); \
113 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
114 size_t __len = (len); \
115 print_hex_dump(KERN_ERR, prefix_str, \
116 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
117 16, 4, buf, __len, false); \
120 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
126 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
129 regs = kzalloc(len, GFP_ATOMIC);
133 for (pos = 0; pos < len; pos += 4)
134 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
136 ufshcd_hex_dump(prefix, regs, len);
141 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
144 UFSHCD_MAX_CHANNEL = 0,
146 UFSHCD_CMD_PER_LUN = 32,
147 UFSHCD_CAN_QUEUE = 32,
154 UFSHCD_STATE_OPERATIONAL,
155 UFSHCD_STATE_EH_SCHEDULED,
158 /* UFSHCD error handling flags */
160 UFSHCD_EH_IN_PROGRESS = (1 << 0),
163 /* UFSHCD UIC layer error flags */
165 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
166 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
167 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
168 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
169 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
170 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
173 #define ufshcd_set_eh_in_progress(h) \
174 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
175 #define ufshcd_eh_in_progress(h) \
176 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
177 #define ufshcd_clear_eh_in_progress(h) \
178 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
180 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
181 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
182 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
183 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
184 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
185 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
186 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
189 static inline enum ufs_dev_pwr_mode
190 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
192 return ufs_pm_lvl_states[lvl].dev_state;
195 static inline enum uic_link_state
196 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
198 return ufs_pm_lvl_states[lvl].link_state;
201 static inline enum ufs_pm_level
202 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
203 enum uic_link_state link_state)
205 enum ufs_pm_level lvl;
207 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
208 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
209 (ufs_pm_lvl_states[lvl].link_state == link_state))
213 /* if no match found, return the level 0 */
217 static struct ufs_dev_fix ufs_fixups[] = {
218 /* UFS cards deviations table */
219 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
220 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
221 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
222 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
223 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
224 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
225 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
226 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
227 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
228 UFS_DEVICE_QUIRK_PA_TACTIVATE),
229 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
230 UFS_DEVICE_QUIRK_PA_TACTIVATE),
231 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
232 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
233 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
234 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
239 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
240 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
241 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
242 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
243 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
244 static void ufshcd_hba_exit(struct ufs_hba *hba);
245 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
246 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
248 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
249 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
250 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
251 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
252 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
253 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
254 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
255 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
256 static irqreturn_t ufshcd_intr(int irq, void *__hba);
257 static int ufshcd_change_power_mode(struct ufs_hba *hba,
258 struct ufs_pa_layer_attr *pwr_mode);
259 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
260 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
261 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
262 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
263 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
265 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
267 return tag >= 0 && tag < hba->nutrs;
270 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
272 if (!hba->is_irq_enabled) {
273 enable_irq(hba->irq);
274 hba->is_irq_enabled = true;
278 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
280 if (hba->is_irq_enabled) {
281 disable_irq(hba->irq);
282 hba->is_irq_enabled = false;
286 static inline void ufshcd_wb_config(struct ufs_hba *hba)
290 if (!ufshcd_is_wb_allowed(hba))
293 ret = ufshcd_wb_ctrl(hba, true);
295 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
297 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
298 ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
300 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
302 ufshcd_wb_toggle_flush(hba, true);
305 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
307 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
308 scsi_unblock_requests(hba->host);
311 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
313 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
314 scsi_block_requests(hba->host);
317 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
320 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
322 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
325 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
328 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
330 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
333 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
336 int off = (int)tag - hba->nutrs;
337 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
339 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
340 &descp->input_param1);
343 static void ufshcd_add_command_trace(struct ufs_hba *hba,
344 unsigned int tag, const char *str)
349 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
350 struct scsi_cmnd *cmd = lrbp->cmd;
351 int transfer_len = -1;
353 if (!trace_ufshcd_command_enabled()) {
354 /* trace UPIU W/O tracing command */
356 ufshcd_add_cmd_upiu_trace(hba, tag, str);
360 if (cmd) { /* data phase exists */
361 /* trace UPIU also */
362 ufshcd_add_cmd_upiu_trace(hba, tag, str);
363 opcode = cmd->cmnd[0];
364 if ((opcode == READ_10) || (opcode == WRITE_10)) {
366 * Currently we only fully trace read(10) and write(10)
369 if (cmd->request && cmd->request->bio)
370 lba = cmd->request->bio->bi_iter.bi_sector;
371 transfer_len = be32_to_cpu(
372 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
376 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
377 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
378 trace_ufshcd_command(dev_name(hba->dev), str, tag,
379 doorbell, transfer_len, intr, lba, opcode);
382 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
384 struct ufs_clk_info *clki;
385 struct list_head *head = &hba->clk_list_head;
387 if (list_empty(head))
390 list_for_each_entry(clki, head, list) {
391 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
393 dev_err(hba->dev, "clk: %s, rate: %u\n",
394 clki->name, clki->curr_freq);
398 static void ufshcd_print_err_hist(struct ufs_hba *hba,
399 struct ufs_err_reg_hist *err_hist,
405 for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
406 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
408 if (err_hist->tstamp[p] == 0)
410 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
411 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
416 dev_err(hba->dev, "No record of %s\n", err_name);
419 static void ufshcd_print_host_regs(struct ufs_hba *hba)
421 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
422 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
423 hba->ufs_version, hba->capabilities);
425 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
426 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
428 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
429 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
430 hba->ufs_stats.hibern8_exit_cnt);
432 ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
433 ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
434 ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
435 ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
436 ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
437 ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
439 ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
440 ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
441 "link_startup_fail");
442 ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
443 ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
445 ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
446 ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
447 ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
449 ufshcd_print_clk_freqs(hba);
451 ufshcd_vops_dbg_register_dump(hba);
455 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
457 struct ufshcd_lrb *lrbp;
461 for_each_set_bit(tag, &bitmap, hba->nutrs) {
462 lrbp = &hba->lrb[tag];
464 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
465 tag, ktime_to_us(lrbp->issue_time_stamp));
466 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
467 tag, ktime_to_us(lrbp->compl_time_stamp));
469 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
470 tag, (u64)lrbp->utrd_dma_addr);
472 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
473 sizeof(struct utp_transfer_req_desc));
474 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
475 (u64)lrbp->ucd_req_dma_addr);
476 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
477 sizeof(struct utp_upiu_req));
478 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
479 (u64)lrbp->ucd_rsp_dma_addr);
480 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
481 sizeof(struct utp_upiu_rsp));
483 prdt_length = le16_to_cpu(
484 lrbp->utr_descriptor_ptr->prd_table_length);
486 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
488 (u64)lrbp->ucd_prdt_dma_addr);
491 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
492 sizeof(struct ufshcd_sg_entry) * prdt_length);
496 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
500 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
501 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
503 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
504 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
508 static void ufshcd_print_host_state(struct ufs_hba *hba)
510 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
511 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
512 hba->outstanding_reqs, hba->outstanding_tasks);
513 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
514 hba->saved_err, hba->saved_uic_err);
515 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
516 hba->curr_dev_pwr_mode, hba->uic_link_state);
517 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
518 hba->pm_op_in_progress, hba->is_sys_suspended);
519 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
520 hba->auto_bkops_enabled, hba->host->host_self_blocked);
521 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
522 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
523 hba->eh_flags, hba->req_abort_count);
524 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
525 hba->capabilities, hba->caps);
526 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
531 * ufshcd_print_pwr_info - print power params as saved in hba
533 * @hba: per-adapter instance
535 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
537 static const char * const names[] = {
547 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
549 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
550 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
551 names[hba->pwr_info.pwr_rx],
552 names[hba->pwr_info.pwr_tx],
553 hba->pwr_info.hs_rate);
556 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
564 usleep_range(us, us + tolerance);
566 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
569 * ufshcd_wait_for_register - wait for register value to change
570 * @hba: per-adapter interface
571 * @reg: mmio register offset
572 * @mask: mask to apply to the read register value
573 * @val: value to wait for
574 * @interval_us: polling interval in microseconds
575 * @timeout_ms: timeout in milliseconds
578 * -ETIMEDOUT on error, zero on success.
580 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
581 u32 val, unsigned long interval_us,
582 unsigned long timeout_ms)
585 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
587 /* ignore bits that we don't intend to wait on */
590 while ((ufshcd_readl(hba, reg) & mask) != val) {
591 usleep_range(interval_us, interval_us + 50);
592 if (time_after(jiffies, timeout)) {
593 if ((ufshcd_readl(hba, reg) & mask) != val)
603 * ufshcd_get_intr_mask - Get the interrupt bit mask
604 * @hba: Pointer to adapter instance
606 * Returns interrupt bit mask per version
608 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
612 switch (hba->ufs_version) {
613 case UFSHCI_VERSION_10:
614 intr_mask = INTERRUPT_MASK_ALL_VER_10;
616 case UFSHCI_VERSION_11:
617 case UFSHCI_VERSION_20:
618 intr_mask = INTERRUPT_MASK_ALL_VER_11;
620 case UFSHCI_VERSION_21:
622 intr_mask = INTERRUPT_MASK_ALL_VER_21;
630 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
631 * @hba: Pointer to adapter instance
633 * Returns UFSHCI version supported by the controller
635 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
637 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
638 return ufshcd_vops_get_ufs_hci_version(hba);
640 return ufshcd_readl(hba, REG_UFS_VERSION);
644 * ufshcd_is_device_present - Check if any device connected to
645 * the host controller
646 * @hba: pointer to adapter instance
648 * Returns true if device present, false if no device detected
650 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
652 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
653 DEVICE_PRESENT) ? true : false;
657 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
658 * @lrbp: pointer to local command reference block
660 * This function is used to get the OCS field from UTRD
661 * Returns the OCS field in the UTRD
663 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
665 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
669 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
670 * @hba: per adapter instance
671 * @pos: position of the bit to be cleared
673 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
675 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
679 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
680 * @hba: per adapter instance
681 * @pos: position of the bit to be cleared
683 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
685 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
689 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
690 * @hba: per adapter instance
691 * @tag: position of the bit to be cleared
693 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
695 __clear_bit(tag, &hba->outstanding_reqs);
699 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
700 * @reg: Register value of host controller status
702 * Returns integer, 0 on Success and positive value if failed
704 static inline int ufshcd_get_lists_status(u32 reg)
706 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
710 * ufshcd_get_uic_cmd_result - Get the UIC command result
711 * @hba: Pointer to adapter instance
713 * This function gets the result of UIC command completion
714 * Returns 0 on success, non zero value on error
716 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
718 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
719 MASK_UIC_COMMAND_RESULT;
723 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
724 * @hba: Pointer to adapter instance
726 * This function gets UIC command argument3
727 * Returns 0 on success, non zero value on error
729 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
731 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
735 * ufshcd_get_req_rsp - returns the TR response transaction type
736 * @ucd_rsp_ptr: pointer to response UPIU
739 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
741 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
745 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
746 * @ucd_rsp_ptr: pointer to response UPIU
748 * This function gets the response status and scsi_status from response UPIU
749 * Returns the response result code.
752 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
754 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
758 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
760 * @ucd_rsp_ptr: pointer to response UPIU
762 * Return the data segment length.
764 static inline unsigned int
765 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
767 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
768 MASK_RSP_UPIU_DATA_SEG_LEN;
772 * ufshcd_is_exception_event - Check if the device raised an exception event
773 * @ucd_rsp_ptr: pointer to response UPIU
775 * The function checks if the device raised an exception event indicated in
776 * the Device Information field of response UPIU.
778 * Returns true if exception is raised, false otherwise.
780 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
782 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
783 MASK_RSP_EXCEPTION_EVENT ? true : false;
787 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
788 * @hba: per adapter instance
791 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
793 ufshcd_writel(hba, INT_AGGR_ENABLE |
794 INT_AGGR_COUNTER_AND_TIMER_RESET,
795 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
799 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
800 * @hba: per adapter instance
801 * @cnt: Interrupt aggregation counter threshold
802 * @tmout: Interrupt aggregation timeout value
805 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
807 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
808 INT_AGGR_COUNTER_THLD_VAL(cnt) |
809 INT_AGGR_TIMEOUT_VAL(tmout),
810 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
814 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
815 * @hba: per adapter instance
817 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
819 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
823 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
824 * When run-stop registers are set to 1, it indicates the
825 * host controller that it can process the requests
826 * @hba: per adapter instance
828 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
830 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
831 REG_UTP_TASK_REQ_LIST_RUN_STOP);
832 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
833 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
837 * ufshcd_hba_start - Start controller initialization sequence
838 * @hba: per adapter instance
840 static inline void ufshcd_hba_start(struct ufs_hba *hba)
842 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
846 * ufshcd_is_hba_active - Get controller state
847 * @hba: per adapter instance
849 * Returns false if controller is active, true otherwise
851 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
853 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
857 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
859 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
860 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
861 (hba->ufs_version == UFSHCI_VERSION_11))
862 return UFS_UNIPRO_VER_1_41;
864 return UFS_UNIPRO_VER_1_6;
866 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
868 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
871 * If both host and device support UniPro ver1.6 or later, PA layer
872 * parameters tuning happens during link startup itself.
874 * We can manually tune PA layer parameters if either host or device
875 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
876 * logic simple, we will only do manual tuning if local unipro version
877 * doesn't support ver1.6 or later.
879 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
886 * ufshcd_set_clk_freq - set UFS controller clock frequencies
887 * @hba: per adapter instance
888 * @scale_up: If True, set max possible frequency othewise set low frequency
890 * Returns 0 if successful
891 * Returns < 0 for any other errors
893 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
896 struct ufs_clk_info *clki;
897 struct list_head *head = &hba->clk_list_head;
899 if (list_empty(head))
902 list_for_each_entry(clki, head, list) {
903 if (!IS_ERR_OR_NULL(clki->clk)) {
904 if (scale_up && clki->max_freq) {
905 if (clki->curr_freq == clki->max_freq)
908 ret = clk_set_rate(clki->clk, clki->max_freq);
910 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
911 __func__, clki->name,
912 clki->max_freq, ret);
915 trace_ufshcd_clk_scaling(dev_name(hba->dev),
916 "scaled up", clki->name,
920 clki->curr_freq = clki->max_freq;
922 } else if (!scale_up && clki->min_freq) {
923 if (clki->curr_freq == clki->min_freq)
926 ret = clk_set_rate(clki->clk, clki->min_freq);
928 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
929 __func__, clki->name,
930 clki->min_freq, ret);
933 trace_ufshcd_clk_scaling(dev_name(hba->dev),
934 "scaled down", clki->name,
937 clki->curr_freq = clki->min_freq;
940 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
941 clki->name, clk_get_rate(clki->clk));
949 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
950 * @hba: per adapter instance
951 * @scale_up: True if scaling up and false if scaling down
953 * Returns 0 if successful
954 * Returns < 0 for any other errors
956 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
959 ktime_t start = ktime_get();
961 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
965 ret = ufshcd_set_clk_freq(hba, scale_up);
969 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
971 ufshcd_set_clk_freq(hba, !scale_up);
974 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
975 (scale_up ? "up" : "down"),
976 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
981 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
982 * @hba: per adapter instance
983 * @scale_up: True if scaling up and false if scaling down
985 * Returns true if scaling is required, false otherwise.
987 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
990 struct ufs_clk_info *clki;
991 struct list_head *head = &hba->clk_list_head;
993 if (list_empty(head))
996 list_for_each_entry(clki, head, list) {
997 if (!IS_ERR_OR_NULL(clki->clk)) {
998 if (scale_up && clki->max_freq) {
999 if (clki->curr_freq == clki->max_freq)
1002 } else if (!scale_up && clki->min_freq) {
1003 if (clki->curr_freq == clki->min_freq)
1013 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1014 u64 wait_timeout_us)
1016 unsigned long flags;
1020 bool timeout = false, do_last_check = false;
1023 ufshcd_hold(hba, false);
1024 spin_lock_irqsave(hba->host->host_lock, flags);
1026 * Wait for all the outstanding tasks/transfer requests.
1027 * Verify by checking the doorbell registers are clear.
1029 start = ktime_get();
1031 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1036 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1037 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1038 if (!tm_doorbell && !tr_doorbell) {
1041 } else if (do_last_check) {
1045 spin_unlock_irqrestore(hba->host->host_lock, flags);
1047 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1051 * We might have scheduled out for long time so make
1052 * sure to check if doorbells are cleared by this time
1055 do_last_check = true;
1057 spin_lock_irqsave(hba->host->host_lock, flags);
1058 } while (tm_doorbell || tr_doorbell);
1062 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1063 __func__, tm_doorbell, tr_doorbell);
1067 spin_unlock_irqrestore(hba->host->host_lock, flags);
1068 ufshcd_release(hba);
1073 * ufshcd_scale_gear - scale up/down UFS gear
1074 * @hba: per adapter instance
1075 * @scale_up: True for scaling up gear and false for scaling down
1077 * Returns 0 for success,
1078 * Returns -EBUSY if scaling can't happen at this time
1079 * Returns non-zero for any other errors
1081 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1083 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1085 struct ufs_pa_layer_attr new_pwr_info;
1088 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1089 sizeof(struct ufs_pa_layer_attr));
1091 memcpy(&new_pwr_info, &hba->pwr_info,
1092 sizeof(struct ufs_pa_layer_attr));
1094 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1095 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1096 /* save the current power mode */
1097 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1099 sizeof(struct ufs_pa_layer_attr));
1101 /* scale down gear */
1102 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1103 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1107 /* check if the power mode needs to be changed or not? */
1108 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1110 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1112 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1113 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1118 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1120 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1123 * make sure that there are no outstanding requests when
1124 * clock scaling is in progress
1126 ufshcd_scsi_block_requests(hba);
1127 down_write(&hba->clk_scaling_lock);
1128 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1130 up_write(&hba->clk_scaling_lock);
1131 ufshcd_scsi_unblock_requests(hba);
1137 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1139 up_write(&hba->clk_scaling_lock);
1140 ufshcd_scsi_unblock_requests(hba);
1144 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1145 * @hba: per adapter instance
1146 * @scale_up: True for scaling up and false for scalin down
1148 * Returns 0 for success,
1149 * Returns -EBUSY if scaling can't happen at this time
1150 * Returns non-zero for any other errors
1152 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1156 /* let's not get into low power until clock scaling is completed */
1157 ufshcd_hold(hba, false);
1159 ret = ufshcd_clock_scaling_prepare(hba);
1163 /* scale down the gear before scaling down clocks */
1165 ret = ufshcd_scale_gear(hba, false);
1170 ret = ufshcd_scale_clks(hba, scale_up);
1173 ufshcd_scale_gear(hba, true);
1177 /* scale up the gear after scaling up clocks */
1179 ret = ufshcd_scale_gear(hba, true);
1181 ufshcd_scale_clks(hba, false);
1186 /* Enable Write Booster if we have scaled up else disable it */
1187 up_write(&hba->clk_scaling_lock);
1188 ufshcd_wb_ctrl(hba, scale_up);
1189 down_write(&hba->clk_scaling_lock);
1192 ufshcd_clock_scaling_unprepare(hba);
1194 ufshcd_release(hba);
1198 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1200 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1201 clk_scaling.suspend_work);
1202 unsigned long irq_flags;
1204 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1205 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1206 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1209 hba->clk_scaling.is_suspended = true;
1210 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1212 __ufshcd_suspend_clkscaling(hba);
1215 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1217 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1218 clk_scaling.resume_work);
1219 unsigned long irq_flags;
1221 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1222 if (!hba->clk_scaling.is_suspended) {
1223 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1226 hba->clk_scaling.is_suspended = false;
1227 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1229 devfreq_resume_device(hba->devfreq);
1232 static int ufshcd_devfreq_target(struct device *dev,
1233 unsigned long *freq, u32 flags)
1236 struct ufs_hba *hba = dev_get_drvdata(dev);
1238 bool scale_up, sched_clk_scaling_suspend_work = false;
1239 struct list_head *clk_list = &hba->clk_list_head;
1240 struct ufs_clk_info *clki;
1241 unsigned long irq_flags;
1243 if (!ufshcd_is_clkscaling_supported(hba))
1246 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1247 /* Override with the closest supported frequency */
1248 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1249 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1250 if (ufshcd_eh_in_progress(hba)) {
1251 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1255 if (!hba->clk_scaling.active_reqs)
1256 sched_clk_scaling_suspend_work = true;
1258 if (list_empty(clk_list)) {
1259 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1263 /* Decide based on the rounded-off frequency and update */
1264 scale_up = (*freq == clki->max_freq) ? true : false;
1266 *freq = clki->min_freq;
1267 /* Update the frequency */
1268 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1269 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1271 goto out; /* no state change required */
1273 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1275 start = ktime_get();
1276 ret = ufshcd_devfreq_scale(hba, scale_up);
1278 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1279 (scale_up ? "up" : "down"),
1280 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1283 if (sched_clk_scaling_suspend_work)
1284 queue_work(hba->clk_scaling.workq,
1285 &hba->clk_scaling.suspend_work);
1290 static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1294 WARN_ON_ONCE(reserved);
1299 /* Whether or not any tag is in use by a request that is in progress. */
1300 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1302 struct request_queue *q = hba->cmd_queue;
1305 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1309 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1310 struct devfreq_dev_status *stat)
1312 struct ufs_hba *hba = dev_get_drvdata(dev);
1313 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1314 unsigned long flags;
1315 struct list_head *clk_list = &hba->clk_list_head;
1316 struct ufs_clk_info *clki;
1318 if (!ufshcd_is_clkscaling_supported(hba))
1321 memset(stat, 0, sizeof(*stat));
1323 spin_lock_irqsave(hba->host->host_lock, flags);
1324 if (!scaling->window_start_t)
1327 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1329 * If current frequency is 0, then the ondemand governor considers
1330 * there's no initial frequency set. And it always requests to set
1331 * to max. frequency.
1333 stat->current_frequency = clki->curr_freq;
1334 if (scaling->is_busy_started)
1335 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1336 scaling->busy_start_t));
1338 stat->total_time = jiffies_to_usecs((long)jiffies -
1339 (long)scaling->window_start_t);
1340 stat->busy_time = scaling->tot_busy_t;
1342 scaling->window_start_t = jiffies;
1343 scaling->tot_busy_t = 0;
1345 if (hba->outstanding_reqs) {
1346 scaling->busy_start_t = ktime_get();
1347 scaling->is_busy_started = true;
1349 scaling->busy_start_t = 0;
1350 scaling->is_busy_started = false;
1352 spin_unlock_irqrestore(hba->host->host_lock, flags);
1356 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1358 struct list_head *clk_list = &hba->clk_list_head;
1359 struct ufs_clk_info *clki;
1360 struct devfreq *devfreq;
1363 /* Skip devfreq if we don't have any clocks in the list */
1364 if (list_empty(clk_list))
1367 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1368 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1369 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1371 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1372 &hba->vps->ondemand_data);
1373 devfreq = devfreq_add_device(hba->dev,
1374 &hba->vps->devfreq_profile,
1375 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1376 &hba->vps->ondemand_data);
1377 if (IS_ERR(devfreq)) {
1378 ret = PTR_ERR(devfreq);
1379 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1381 dev_pm_opp_remove(hba->dev, clki->min_freq);
1382 dev_pm_opp_remove(hba->dev, clki->max_freq);
1386 hba->devfreq = devfreq;
1391 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1393 struct list_head *clk_list = &hba->clk_list_head;
1394 struct ufs_clk_info *clki;
1399 devfreq_remove_device(hba->devfreq);
1400 hba->devfreq = NULL;
1402 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1403 dev_pm_opp_remove(hba->dev, clki->min_freq);
1404 dev_pm_opp_remove(hba->dev, clki->max_freq);
1407 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1409 unsigned long flags;
1411 devfreq_suspend_device(hba->devfreq);
1412 spin_lock_irqsave(hba->host->host_lock, flags);
1413 hba->clk_scaling.window_start_t = 0;
1414 spin_unlock_irqrestore(hba->host->host_lock, flags);
1417 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1419 unsigned long flags;
1420 bool suspend = false;
1422 if (!ufshcd_is_clkscaling_supported(hba))
1425 spin_lock_irqsave(hba->host->host_lock, flags);
1426 if (!hba->clk_scaling.is_suspended) {
1428 hba->clk_scaling.is_suspended = true;
1430 spin_unlock_irqrestore(hba->host->host_lock, flags);
1433 __ufshcd_suspend_clkscaling(hba);
1436 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1438 unsigned long flags;
1439 bool resume = false;
1441 if (!ufshcd_is_clkscaling_supported(hba))
1444 spin_lock_irqsave(hba->host->host_lock, flags);
1445 if (hba->clk_scaling.is_suspended) {
1447 hba->clk_scaling.is_suspended = false;
1449 spin_unlock_irqrestore(hba->host->host_lock, flags);
1452 devfreq_resume_device(hba->devfreq);
1455 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1456 struct device_attribute *attr, char *buf)
1458 struct ufs_hba *hba = dev_get_drvdata(dev);
1460 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1463 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1464 struct device_attribute *attr, const char *buf, size_t count)
1466 struct ufs_hba *hba = dev_get_drvdata(dev);
1470 if (kstrtou32(buf, 0, &value))
1474 if (value == hba->clk_scaling.is_allowed)
1477 pm_runtime_get_sync(hba->dev);
1478 ufshcd_hold(hba, false);
1480 cancel_work_sync(&hba->clk_scaling.suspend_work);
1481 cancel_work_sync(&hba->clk_scaling.resume_work);
1483 hba->clk_scaling.is_allowed = value;
1486 ufshcd_resume_clkscaling(hba);
1488 ufshcd_suspend_clkscaling(hba);
1489 err = ufshcd_devfreq_scale(hba, true);
1491 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1495 ufshcd_release(hba);
1496 pm_runtime_put_sync(hba->dev);
1501 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1503 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1504 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1505 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1506 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1507 hba->clk_scaling.enable_attr.attr.mode = 0644;
1508 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1509 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1512 static void ufshcd_ungate_work(struct work_struct *work)
1515 unsigned long flags;
1516 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1517 clk_gating.ungate_work);
1519 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1521 spin_lock_irqsave(hba->host->host_lock, flags);
1522 if (hba->clk_gating.state == CLKS_ON) {
1523 spin_unlock_irqrestore(hba->host->host_lock, flags);
1527 spin_unlock_irqrestore(hba->host->host_lock, flags);
1528 ufshcd_setup_clocks(hba, true);
1530 ufshcd_enable_irq(hba);
1532 /* Exit from hibern8 */
1533 if (ufshcd_can_hibern8_during_gating(hba)) {
1534 /* Prevent gating in this path */
1535 hba->clk_gating.is_suspended = true;
1536 if (ufshcd_is_link_hibern8(hba)) {
1537 ret = ufshcd_uic_hibern8_exit(hba);
1539 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1542 ufshcd_set_link_active(hba);
1544 hba->clk_gating.is_suspended = false;
1547 ufshcd_scsi_unblock_requests(hba);
1551 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1552 * Also, exit from hibern8 mode and set the link as active.
1553 * @hba: per adapter instance
1554 * @async: This indicates whether caller should ungate clocks asynchronously.
1556 int ufshcd_hold(struct ufs_hba *hba, bool async)
1559 unsigned long flags;
1561 if (!ufshcd_is_clkgating_allowed(hba))
1563 spin_lock_irqsave(hba->host->host_lock, flags);
1564 hba->clk_gating.active_reqs++;
1566 if (ufshcd_eh_in_progress(hba)) {
1567 spin_unlock_irqrestore(hba->host->host_lock, flags);
1572 switch (hba->clk_gating.state) {
1575 * Wait for the ungate work to complete if in progress.
1576 * Though the clocks may be in ON state, the link could
1577 * still be in hibner8 state if hibern8 is allowed
1578 * during clock gating.
1579 * Make sure we exit hibern8 state also in addition to
1582 if (ufshcd_can_hibern8_during_gating(hba) &&
1583 ufshcd_is_link_hibern8(hba)) {
1586 hba->clk_gating.active_reqs--;
1589 spin_unlock_irqrestore(hba->host->host_lock, flags);
1590 flush_work(&hba->clk_gating.ungate_work);
1591 spin_lock_irqsave(hba->host->host_lock, flags);
1596 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1597 hba->clk_gating.state = CLKS_ON;
1598 trace_ufshcd_clk_gating(dev_name(hba->dev),
1599 hba->clk_gating.state);
1603 * If we are here, it means gating work is either done or
1604 * currently running. Hence, fall through to cancel gating
1605 * work and to enable clocks.
1609 ufshcd_scsi_block_requests(hba);
1610 hba->clk_gating.state = REQ_CLKS_ON;
1611 trace_ufshcd_clk_gating(dev_name(hba->dev),
1612 hba->clk_gating.state);
1613 queue_work(hba->clk_gating.clk_gating_workq,
1614 &hba->clk_gating.ungate_work);
1616 * fall through to check if we should wait for this
1617 * work to be done or not.
1623 hba->clk_gating.active_reqs--;
1627 spin_unlock_irqrestore(hba->host->host_lock, flags);
1628 flush_work(&hba->clk_gating.ungate_work);
1629 /* Make sure state is CLKS_ON before returning */
1630 spin_lock_irqsave(hba->host->host_lock, flags);
1633 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1634 __func__, hba->clk_gating.state);
1637 spin_unlock_irqrestore(hba->host->host_lock, flags);
1641 EXPORT_SYMBOL_GPL(ufshcd_hold);
1643 static void ufshcd_gate_work(struct work_struct *work)
1645 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1646 clk_gating.gate_work.work);
1647 unsigned long flags;
1649 spin_lock_irqsave(hba->host->host_lock, flags);
1651 * In case you are here to cancel this work the gating state
1652 * would be marked as REQ_CLKS_ON. In this case save time by
1653 * skipping the gating work and exit after changing the clock
1656 if (hba->clk_gating.is_suspended ||
1657 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1658 hba->clk_gating.state = CLKS_ON;
1659 trace_ufshcd_clk_gating(dev_name(hba->dev),
1660 hba->clk_gating.state);
1664 if (hba->clk_gating.active_reqs
1665 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1666 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1667 || hba->active_uic_cmd || hba->uic_async_done)
1670 spin_unlock_irqrestore(hba->host->host_lock, flags);
1672 /* put the link into hibern8 mode before turning off clocks */
1673 if (ufshcd_can_hibern8_during_gating(hba)) {
1674 if (ufshcd_uic_hibern8_enter(hba)) {
1675 hba->clk_gating.state = CLKS_ON;
1676 trace_ufshcd_clk_gating(dev_name(hba->dev),
1677 hba->clk_gating.state);
1680 ufshcd_set_link_hibern8(hba);
1683 ufshcd_disable_irq(hba);
1685 if (!ufshcd_is_link_active(hba))
1686 ufshcd_setup_clocks(hba, false);
1688 /* If link is active, device ref_clk can't be switched off */
1689 __ufshcd_setup_clocks(hba, false, true);
1692 * In case you are here to cancel this work the gating state
1693 * would be marked as REQ_CLKS_ON. In this case keep the state
1694 * as REQ_CLKS_ON which would anyway imply that clocks are off
1695 * and a request to turn them on is pending. By doing this way,
1696 * we keep the state machine in tact and this would ultimately
1697 * prevent from doing cancel work multiple times when there are
1698 * new requests arriving before the current cancel work is done.
1700 spin_lock_irqsave(hba->host->host_lock, flags);
1701 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1702 hba->clk_gating.state = CLKS_OFF;
1703 trace_ufshcd_clk_gating(dev_name(hba->dev),
1704 hba->clk_gating.state);
1707 spin_unlock_irqrestore(hba->host->host_lock, flags);
1712 /* host lock must be held before calling this variant */
1713 static void __ufshcd_release(struct ufs_hba *hba)
1715 if (!ufshcd_is_clkgating_allowed(hba))
1718 hba->clk_gating.active_reqs--;
1720 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1721 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1722 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1723 || hba->active_uic_cmd || hba->uic_async_done
1724 || ufshcd_eh_in_progress(hba))
1727 hba->clk_gating.state = REQ_CLKS_OFF;
1728 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1729 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1730 &hba->clk_gating.gate_work,
1731 msecs_to_jiffies(hba->clk_gating.delay_ms));
1734 void ufshcd_release(struct ufs_hba *hba)
1736 unsigned long flags;
1738 spin_lock_irqsave(hba->host->host_lock, flags);
1739 __ufshcd_release(hba);
1740 spin_unlock_irqrestore(hba->host->host_lock, flags);
1742 EXPORT_SYMBOL_GPL(ufshcd_release);
1744 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1745 struct device_attribute *attr, char *buf)
1747 struct ufs_hba *hba = dev_get_drvdata(dev);
1749 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1752 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1753 struct device_attribute *attr, const char *buf, size_t count)
1755 struct ufs_hba *hba = dev_get_drvdata(dev);
1756 unsigned long flags, value;
1758 if (kstrtoul(buf, 0, &value))
1761 spin_lock_irqsave(hba->host->host_lock, flags);
1762 hba->clk_gating.delay_ms = value;
1763 spin_unlock_irqrestore(hba->host->host_lock, flags);
1767 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1768 struct device_attribute *attr, char *buf)
1770 struct ufs_hba *hba = dev_get_drvdata(dev);
1772 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1775 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1776 struct device_attribute *attr, const char *buf, size_t count)
1778 struct ufs_hba *hba = dev_get_drvdata(dev);
1779 unsigned long flags;
1782 if (kstrtou32(buf, 0, &value))
1786 if (value == hba->clk_gating.is_enabled)
1790 ufshcd_release(hba);
1792 spin_lock_irqsave(hba->host->host_lock, flags);
1793 hba->clk_gating.active_reqs++;
1794 spin_unlock_irqrestore(hba->host->host_lock, flags);
1797 hba->clk_gating.is_enabled = value;
1802 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1804 char wq_name[sizeof("ufs_clkscaling_00")];
1806 if (!ufshcd_is_clkscaling_supported(hba))
1809 INIT_WORK(&hba->clk_scaling.suspend_work,
1810 ufshcd_clk_scaling_suspend_work);
1811 INIT_WORK(&hba->clk_scaling.resume_work,
1812 ufshcd_clk_scaling_resume_work);
1814 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1815 hba->host->host_no);
1816 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1818 ufshcd_clkscaling_init_sysfs(hba);
1821 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1823 if (!ufshcd_is_clkscaling_supported(hba))
1826 destroy_workqueue(hba->clk_scaling.workq);
1827 ufshcd_devfreq_remove(hba);
1830 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1832 char wq_name[sizeof("ufs_clk_gating_00")];
1834 if (!ufshcd_is_clkgating_allowed(hba))
1837 hba->clk_gating.delay_ms = 150;
1838 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1839 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1841 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1842 hba->host->host_no);
1843 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1846 hba->clk_gating.is_enabled = true;
1848 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1849 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1850 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1851 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1852 hba->clk_gating.delay_attr.attr.mode = 0644;
1853 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1854 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1856 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1857 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1858 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1859 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1860 hba->clk_gating.enable_attr.attr.mode = 0644;
1861 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1862 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1865 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1867 if (!ufshcd_is_clkgating_allowed(hba))
1869 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1870 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1871 cancel_work_sync(&hba->clk_gating.ungate_work);
1872 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1873 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1876 /* Must be called with host lock acquired */
1877 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1879 bool queue_resume_work = false;
1881 if (!ufshcd_is_clkscaling_supported(hba))
1884 if (!hba->clk_scaling.active_reqs++)
1885 queue_resume_work = true;
1887 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1890 if (queue_resume_work)
1891 queue_work(hba->clk_scaling.workq,
1892 &hba->clk_scaling.resume_work);
1894 if (!hba->clk_scaling.window_start_t) {
1895 hba->clk_scaling.window_start_t = jiffies;
1896 hba->clk_scaling.tot_busy_t = 0;
1897 hba->clk_scaling.is_busy_started = false;
1900 if (!hba->clk_scaling.is_busy_started) {
1901 hba->clk_scaling.busy_start_t = ktime_get();
1902 hba->clk_scaling.is_busy_started = true;
1906 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1908 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1910 if (!ufshcd_is_clkscaling_supported(hba))
1913 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1914 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1915 scaling->busy_start_t));
1916 scaling->busy_start_t = 0;
1917 scaling->is_busy_started = false;
1921 * ufshcd_send_command - Send SCSI or device management commands
1922 * @hba: per adapter instance
1923 * @task_tag: Task tag of the command
1926 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1928 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1929 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1930 ufshcd_add_command_trace(hba, task_tag, "send");
1931 ufshcd_clk_scaling_start_busy(hba);
1932 __set_bit(task_tag, &hba->outstanding_reqs);
1933 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1934 /* Make sure that doorbell is committed immediately */
1939 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1940 * @lrbp: pointer to local reference block
1942 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1945 if (lrbp->sense_buffer &&
1946 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1949 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1950 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
1952 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1958 * ufshcd_copy_query_response() - Copy the Query Response and the data
1960 * @hba: per adapter instance
1961 * @lrbp: pointer to local reference block
1964 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1966 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1968 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1970 /* Get the descriptor */
1971 if (hba->dev_cmd.query.descriptor &&
1972 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1973 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1974 GENERAL_UPIU_REQUEST_SIZE;
1978 /* data segment length */
1979 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1980 MASK_QUERY_DATA_SEG_LEN;
1981 buf_len = be16_to_cpu(
1982 hba->dev_cmd.query.request.upiu_req.length);
1983 if (likely(buf_len >= resp_len)) {
1984 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1987 "%s: rsp size %d is bigger than buffer size %d",
1988 __func__, resp_len, buf_len);
1997 * ufshcd_hba_capabilities - Read controller capabilities
1998 * @hba: per adapter instance
2000 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2002 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2004 /* nutrs and nutmrs are 0 based values */
2005 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2007 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2011 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2012 * to accept UIC commands
2013 * @hba: per adapter instance
2014 * Return true on success, else false
2016 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2018 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2025 * ufshcd_get_upmcrs - Get the power mode change request status
2026 * @hba: Pointer to adapter instance
2028 * This function gets the UPMCRS field of HCS register
2029 * Returns value of UPMCRS field
2031 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2033 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2037 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2038 * @hba: per adapter instance
2039 * @uic_cmd: UIC command
2041 * Mutex must be held.
2044 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2046 WARN_ON(hba->active_uic_cmd);
2048 hba->active_uic_cmd = uic_cmd;
2051 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2052 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2053 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2056 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2061 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2062 * @hba: per adapter instance
2063 * @uic_cmd: UIC command
2065 * Must be called with mutex held.
2066 * Returns 0 only if success.
2069 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2072 unsigned long flags;
2074 if (wait_for_completion_timeout(&uic_cmd->done,
2075 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2076 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2080 spin_lock_irqsave(hba->host->host_lock, flags);
2081 hba->active_uic_cmd = NULL;
2082 spin_unlock_irqrestore(hba->host->host_lock, flags);
2088 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2089 * @hba: per adapter instance
2090 * @uic_cmd: UIC command
2091 * @completion: initialize the completion only if this is set to true
2093 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2094 * with mutex held and host_lock locked.
2095 * Returns 0 only if success.
2098 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2101 if (!ufshcd_ready_for_uic_cmd(hba)) {
2103 "Controller not ready to accept UIC commands\n");
2108 init_completion(&uic_cmd->done);
2110 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2116 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2117 * @hba: per adapter instance
2118 * @uic_cmd: UIC command
2120 * Returns 0 only if success.
2122 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2125 unsigned long flags;
2127 ufshcd_hold(hba, false);
2128 mutex_lock(&hba->uic_cmd_mutex);
2129 ufshcd_add_delay_before_dme_cmd(hba);
2131 spin_lock_irqsave(hba->host->host_lock, flags);
2132 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2133 spin_unlock_irqrestore(hba->host->host_lock, flags);
2135 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2137 mutex_unlock(&hba->uic_cmd_mutex);
2139 ufshcd_release(hba);
2144 * ufshcd_map_sg - Map scatter-gather list to prdt
2145 * @hba: per adapter instance
2146 * @lrbp: pointer to local reference block
2148 * Returns 0 in case of success, non-zero value in case of failure
2150 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2152 struct ufshcd_sg_entry *prd_table;
2153 struct scatterlist *sg;
2154 struct scsi_cmnd *cmd;
2159 sg_segments = scsi_dma_map(cmd);
2160 if (sg_segments < 0)
2164 lrbp->utr_descriptor_ptr->prd_table_length =
2165 cpu_to_le16((u16)sg_segments);
2167 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2169 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2171 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2172 prd_table[i].base_addr =
2173 cpu_to_le32(lower_32_bits(sg->dma_address));
2174 prd_table[i].upper_addr =
2175 cpu_to_le32(upper_32_bits(sg->dma_address));
2176 prd_table[i].reserved = 0;
2179 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2186 * ufshcd_enable_intr - enable interrupts
2187 * @hba: per adapter instance
2188 * @intrs: interrupt bits
2190 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2192 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2194 if (hba->ufs_version == UFSHCI_VERSION_10) {
2196 rw = set & INTERRUPT_MASK_RW_VER_10;
2197 set = rw | ((set ^ intrs) & intrs);
2202 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2206 * ufshcd_disable_intr - disable interrupts
2207 * @hba: per adapter instance
2208 * @intrs: interrupt bits
2210 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2212 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2214 if (hba->ufs_version == UFSHCI_VERSION_10) {
2216 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2217 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2218 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2224 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2228 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2229 * descriptor according to request
2230 * @lrbp: pointer to local reference block
2231 * @upiu_flags: flags required in the header
2232 * @cmd_dir: requests data direction
2234 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2235 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2237 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2241 if (cmd_dir == DMA_FROM_DEVICE) {
2242 data_direction = UTP_DEVICE_TO_HOST;
2243 *upiu_flags = UPIU_CMD_FLAGS_READ;
2244 } else if (cmd_dir == DMA_TO_DEVICE) {
2245 data_direction = UTP_HOST_TO_DEVICE;
2246 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2248 data_direction = UTP_NO_DATA_TRANSFER;
2249 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2252 dword_0 = data_direction | (lrbp->command_type
2253 << UPIU_COMMAND_TYPE_OFFSET);
2255 dword_0 |= UTP_REQ_DESC_INT_CMD;
2257 /* Transfer request descriptor header fields */
2258 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2259 /* dword_1 is reserved, hence it is set to 0 */
2260 req_desc->header.dword_1 = 0;
2262 * assigning invalid value for command status. Controller
2263 * updates OCS on command completion, with the command
2266 req_desc->header.dword_2 =
2267 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2268 /* dword_3 is reserved, hence it is set to 0 */
2269 req_desc->header.dword_3 = 0;
2271 req_desc->prd_table_length = 0;
2275 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2277 * @lrbp: local reference block pointer
2278 * @upiu_flags: flags
2281 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2283 struct scsi_cmnd *cmd = lrbp->cmd;
2284 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2285 unsigned short cdb_len;
2287 /* command descriptor fields */
2288 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2289 UPIU_TRANSACTION_COMMAND, upiu_flags,
2290 lrbp->lun, lrbp->task_tag);
2291 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2292 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2294 /* Total EHS length and Data segment length will be zero */
2295 ucd_req_ptr->header.dword_2 = 0;
2297 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2299 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2300 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2301 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2303 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2307 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2310 * @lrbp: local reference block pointer
2311 * @upiu_flags: flags
2313 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2314 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2316 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2317 struct ufs_query *query = &hba->dev_cmd.query;
2318 u16 len = be16_to_cpu(query->request.upiu_req.length);
2320 /* Query request header */
2321 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2322 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2323 lrbp->lun, lrbp->task_tag);
2324 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2325 0, query->request.query_func, 0, 0);
2327 /* Data segment length only need for WRITE_DESC */
2328 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2329 ucd_req_ptr->header.dword_2 =
2330 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2332 ucd_req_ptr->header.dword_2 = 0;
2334 /* Copy the Query Request buffer as is */
2335 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2338 /* Copy the Descriptor */
2339 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2340 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2342 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2345 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2347 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2349 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2351 /* command descriptor fields */
2352 ucd_req_ptr->header.dword_0 =
2354 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2355 /* clear rest of the fields of basic header */
2356 ucd_req_ptr->header.dword_1 = 0;
2357 ucd_req_ptr->header.dword_2 = 0;
2359 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2363 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2364 * for Device Management Purposes
2365 * @hba: per adapter instance
2366 * @lrbp: pointer to local reference block
2368 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2373 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2374 (hba->ufs_version == UFSHCI_VERSION_11))
2375 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2377 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2379 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2380 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2381 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2382 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2383 ufshcd_prepare_utp_nop_upiu(lrbp);
2391 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2393 * @hba: per adapter instance
2394 * @lrbp: pointer to local reference block
2396 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2401 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2402 (hba->ufs_version == UFSHCI_VERSION_11))
2403 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2405 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2407 if (likely(lrbp->cmd)) {
2408 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2409 lrbp->cmd->sc_data_direction);
2410 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2419 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2420 * @upiu_wlun_id: UPIU W-LUN id
2422 * Returns SCSI W-LUN id
2424 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2426 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2429 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2431 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2432 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2433 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2434 i * sizeof(struct utp_transfer_cmd_desc);
2435 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2437 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2439 lrb->utr_descriptor_ptr = utrdlp + i;
2440 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2441 i * sizeof(struct utp_transfer_req_desc);
2442 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2443 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2444 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2445 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2446 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2447 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2451 * ufshcd_queuecommand - main entry point for SCSI requests
2452 * @host: SCSI host pointer
2453 * @cmd: command from SCSI Midlayer
2455 * Returns 0 for success, non-zero in case of failure
2457 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2459 struct ufshcd_lrb *lrbp;
2460 struct ufs_hba *hba;
2461 unsigned long flags;
2465 hba = shost_priv(host);
2467 tag = cmd->request->tag;
2468 if (!ufshcd_valid_tag(hba, tag)) {
2470 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2471 __func__, tag, cmd, cmd->request);
2475 if (!down_read_trylock(&hba->clk_scaling_lock))
2476 return SCSI_MLQUEUE_HOST_BUSY;
2478 spin_lock_irqsave(hba->host->host_lock, flags);
2479 switch (hba->ufshcd_state) {
2480 case UFSHCD_STATE_OPERATIONAL:
2482 case UFSHCD_STATE_EH_SCHEDULED:
2483 case UFSHCD_STATE_RESET:
2484 err = SCSI_MLQUEUE_HOST_BUSY;
2486 case UFSHCD_STATE_ERROR:
2487 set_host_byte(cmd, DID_ERROR);
2488 cmd->scsi_done(cmd);
2491 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2492 __func__, hba->ufshcd_state);
2493 set_host_byte(cmd, DID_BAD_TARGET);
2494 cmd->scsi_done(cmd);
2498 /* if error handling is in progress, don't issue commands */
2499 if (ufshcd_eh_in_progress(hba)) {
2500 set_host_byte(cmd, DID_ERROR);
2501 cmd->scsi_done(cmd);
2504 spin_unlock_irqrestore(hba->host->host_lock, flags);
2506 hba->req_abort_count = 0;
2508 err = ufshcd_hold(hba, true);
2510 err = SCSI_MLQUEUE_HOST_BUSY;
2513 WARN_ON(hba->clk_gating.state != CLKS_ON);
2515 lrbp = &hba->lrb[tag];
2519 lrbp->sense_bufflen = UFS_SENSE_SIZE;
2520 lrbp->sense_buffer = cmd->sense_buffer;
2521 lrbp->task_tag = tag;
2522 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2523 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2524 lrbp->req_abort_skip = false;
2526 ufshcd_comp_scsi_upiu(hba, lrbp);
2528 err = ufshcd_map_sg(hba, lrbp);
2531 ufshcd_release(hba);
2534 /* Make sure descriptors are ready before ringing the doorbell */
2537 /* issue command to the controller */
2538 spin_lock_irqsave(hba->host->host_lock, flags);
2539 ufshcd_vops_setup_xfer_req(hba, tag, true);
2540 ufshcd_send_command(hba, tag);
2542 spin_unlock_irqrestore(hba->host->host_lock, flags);
2544 up_read(&hba->clk_scaling_lock);
2548 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2549 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2552 lrbp->sense_bufflen = 0;
2553 lrbp->sense_buffer = NULL;
2554 lrbp->task_tag = tag;
2555 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2556 lrbp->intr_cmd = true; /* No interrupt aggregation */
2557 hba->dev_cmd.type = cmd_type;
2559 return ufshcd_comp_devman_upiu(hba, lrbp);
2563 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2566 unsigned long flags;
2567 u32 mask = 1 << tag;
2569 /* clear outstanding transaction before retry */
2570 spin_lock_irqsave(hba->host->host_lock, flags);
2571 ufshcd_utrl_clear(hba, tag);
2572 spin_unlock_irqrestore(hba->host->host_lock, flags);
2575 * wait for for h/w to clear corresponding bit in door-bell.
2576 * max. wait is 1 sec.
2578 err = ufshcd_wait_for_register(hba,
2579 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2580 mask, ~mask, 1000, 1000);
2586 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2588 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2590 /* Get the UPIU response */
2591 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2592 UPIU_RSP_CODE_OFFSET;
2593 return query_res->response;
2597 * ufshcd_dev_cmd_completion() - handles device management command responses
2598 * @hba: per adapter instance
2599 * @lrbp: pointer to local reference block
2602 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2607 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2608 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2611 case UPIU_TRANSACTION_NOP_IN:
2612 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2614 dev_err(hba->dev, "%s: unexpected response %x\n",
2618 case UPIU_TRANSACTION_QUERY_RSP:
2619 err = ufshcd_check_query_response(hba, lrbp);
2621 err = ufshcd_copy_query_response(hba, lrbp);
2623 case UPIU_TRANSACTION_REJECT_UPIU:
2624 /* TODO: handle Reject UPIU Response */
2626 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2631 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2639 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2640 struct ufshcd_lrb *lrbp, int max_timeout)
2643 unsigned long time_left;
2644 unsigned long flags;
2646 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2647 msecs_to_jiffies(max_timeout));
2649 /* Make sure descriptors are ready before ringing the doorbell */
2651 spin_lock_irqsave(hba->host->host_lock, flags);
2652 hba->dev_cmd.complete = NULL;
2653 if (likely(time_left)) {
2654 err = ufshcd_get_tr_ocs(lrbp);
2656 err = ufshcd_dev_cmd_completion(hba, lrbp);
2658 spin_unlock_irqrestore(hba->host->host_lock, flags);
2662 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2663 __func__, lrbp->task_tag);
2664 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2665 /* successfully cleared the command, retry if needed */
2668 * in case of an error, after clearing the doorbell,
2669 * we also need to clear the outstanding_request
2672 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2679 * ufshcd_exec_dev_cmd - API for sending device management requests
2681 * @cmd_type: specifies the type (NOP, Query...)
2682 * @timeout: time in seconds
2684 * NOTE: Since there is only one available tag for device management commands,
2685 * it is expected you hold the hba->dev_cmd.lock mutex.
2687 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2688 enum dev_cmd_type cmd_type, int timeout)
2690 struct request_queue *q = hba->cmd_queue;
2691 struct request *req;
2692 struct ufshcd_lrb *lrbp;
2695 struct completion wait;
2696 unsigned long flags;
2698 down_read(&hba->clk_scaling_lock);
2701 * Get free slot, sleep if slots are unavailable.
2702 * Even though we use wait_event() which sleeps indefinitely,
2703 * the maximum wait time is bounded by SCSI request timeout.
2705 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2711 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2713 init_completion(&wait);
2714 lrbp = &hba->lrb[tag];
2716 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2720 hba->dev_cmd.complete = &wait;
2722 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2723 /* Make sure descriptors are ready before ringing the doorbell */
2725 spin_lock_irqsave(hba->host->host_lock, flags);
2726 ufshcd_vops_setup_xfer_req(hba, tag, false);
2727 ufshcd_send_command(hba, tag);
2728 spin_unlock_irqrestore(hba->host->host_lock, flags);
2730 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2732 ufshcd_add_query_upiu_trace(hba, tag,
2733 err ? "query_complete_err" : "query_complete");
2736 blk_put_request(req);
2738 up_read(&hba->clk_scaling_lock);
2743 * ufshcd_init_query() - init the query response and request parameters
2744 * @hba: per-adapter instance
2745 * @request: address of the request pointer to be initialized
2746 * @response: address of the response pointer to be initialized
2747 * @opcode: operation to perform
2748 * @idn: flag idn to access
2749 * @index: LU number to access
2750 * @selector: query/flag/descriptor further identification
2752 static inline void ufshcd_init_query(struct ufs_hba *hba,
2753 struct ufs_query_req **request, struct ufs_query_res **response,
2754 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2756 *request = &hba->dev_cmd.query.request;
2757 *response = &hba->dev_cmd.query.response;
2758 memset(*request, 0, sizeof(struct ufs_query_req));
2759 memset(*response, 0, sizeof(struct ufs_query_res));
2760 (*request)->upiu_req.opcode = opcode;
2761 (*request)->upiu_req.idn = idn;
2762 (*request)->upiu_req.index = index;
2763 (*request)->upiu_req.selector = selector;
2766 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2767 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
2772 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2773 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
2776 "%s: failed with error %d, retries %d\n",
2777 __func__, ret, retries);
2784 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2785 __func__, opcode, idn, ret, retries);
2790 * ufshcd_query_flag() - API function for sending flag query requests
2791 * @hba: per-adapter instance
2792 * @opcode: flag query to perform
2793 * @idn: flag idn to access
2794 * @index: flag index to access
2795 * @flag_res: the flag value after the query request completes
2797 * Returns 0 for success, non-zero in case of failure
2799 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2800 enum flag_idn idn, u8 index, bool *flag_res)
2802 struct ufs_query_req *request = NULL;
2803 struct ufs_query_res *response = NULL;
2804 int err, selector = 0;
2805 int timeout = QUERY_REQ_TIMEOUT;
2809 ufshcd_hold(hba, false);
2810 mutex_lock(&hba->dev_cmd.lock);
2811 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2815 case UPIU_QUERY_OPCODE_SET_FLAG:
2816 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2817 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2818 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2820 case UPIU_QUERY_OPCODE_READ_FLAG:
2821 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2823 /* No dummy reads */
2824 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2832 "%s: Expected query flag opcode but got = %d\n",
2838 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2842 "%s: Sending flag query for idn %d failed, err = %d\n",
2843 __func__, idn, err);
2848 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2849 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2852 mutex_unlock(&hba->dev_cmd.lock);
2853 ufshcd_release(hba);
2858 * ufshcd_query_attr - API function for sending attribute requests
2859 * @hba: per-adapter instance
2860 * @opcode: attribute opcode
2861 * @idn: attribute idn to access
2862 * @index: index field
2863 * @selector: selector field
2864 * @attr_val: the attribute value after the query request completes
2866 * Returns 0 for success, non-zero in case of failure
2868 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2869 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2871 struct ufs_query_req *request = NULL;
2872 struct ufs_query_res *response = NULL;
2877 ufshcd_hold(hba, false);
2879 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2885 mutex_lock(&hba->dev_cmd.lock);
2886 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2890 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2891 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2892 request->upiu_req.value = cpu_to_be32(*attr_val);
2894 case UPIU_QUERY_OPCODE_READ_ATTR:
2895 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2898 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2904 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2907 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2908 __func__, opcode, idn, index, err);
2912 *attr_val = be32_to_cpu(response->upiu_res.value);
2915 mutex_unlock(&hba->dev_cmd.lock);
2917 ufshcd_release(hba);
2922 * ufshcd_query_attr_retry() - API function for sending query
2923 * attribute with retries
2924 * @hba: per-adapter instance
2925 * @opcode: attribute opcode
2926 * @idn: attribute idn to access
2927 * @index: index field
2928 * @selector: selector field
2929 * @attr_val: the attribute value after the query request
2932 * Returns 0 for success, non-zero in case of failure
2934 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2935 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2941 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2942 ret = ufshcd_query_attr(hba, opcode, idn, index,
2943 selector, attr_val);
2945 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2946 __func__, ret, retries);
2953 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2954 __func__, idn, ret, QUERY_REQ_RETRIES);
2958 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2959 enum query_opcode opcode, enum desc_idn idn, u8 index,
2960 u8 selector, u8 *desc_buf, int *buf_len)
2962 struct ufs_query_req *request = NULL;
2963 struct ufs_query_res *response = NULL;
2968 ufshcd_hold(hba, false);
2970 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2976 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2977 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2978 __func__, *buf_len);
2983 mutex_lock(&hba->dev_cmd.lock);
2984 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2986 hba->dev_cmd.query.descriptor = desc_buf;
2987 request->upiu_req.length = cpu_to_be16(*buf_len);
2990 case UPIU_QUERY_OPCODE_WRITE_DESC:
2991 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2993 case UPIU_QUERY_OPCODE_READ_DESC:
2994 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2998 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3004 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3007 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3008 __func__, opcode, idn, index, err);
3012 *buf_len = be16_to_cpu(response->upiu_res.length);
3015 hba->dev_cmd.query.descriptor = NULL;
3016 mutex_unlock(&hba->dev_cmd.lock);
3018 ufshcd_release(hba);
3023 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3024 * @hba: per-adapter instance
3025 * @opcode: attribute opcode
3026 * @idn: attribute idn to access
3027 * @index: index field
3028 * @selector: selector field
3029 * @desc_buf: the buffer that contains the descriptor
3030 * @buf_len: length parameter passed to the device
3032 * Returns 0 for success, non-zero in case of failure.
3033 * The buf_len parameter will contain, on return, the length parameter
3034 * received on the response.
3036 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3037 enum query_opcode opcode,
3038 enum desc_idn idn, u8 index,
3040 u8 *desc_buf, int *buf_len)
3045 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3046 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3047 selector, desc_buf, buf_len);
3048 if (!err || err == -EINVAL)
3056 * ufshcd_read_desc_length - read the specified descriptor length from header
3057 * @hba: Pointer to adapter instance
3058 * @desc_id: descriptor idn value
3059 * @desc_index: descriptor index
3060 * @desc_length: pointer to variable to read the length of descriptor
3062 * Return 0 in case of success, non-zero otherwise
3064 static int ufshcd_read_desc_length(struct ufs_hba *hba,
3065 enum desc_idn desc_id,
3070 u8 header[QUERY_DESC_HDR_SIZE];
3071 int header_len = QUERY_DESC_HDR_SIZE;
3073 if (desc_id >= QUERY_DESC_IDN_MAX)
3076 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3077 desc_id, desc_index, 0, header,
3081 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3084 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3085 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3086 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3091 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3097 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3098 * @hba: Pointer to adapter instance
3099 * @desc_id: descriptor idn value
3100 * @desc_len: mapped desc length (out)
3102 * Return 0 in case of success, non-zero otherwise
3104 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3105 enum desc_idn desc_id, int *desc_len)
3108 case QUERY_DESC_IDN_DEVICE:
3109 *desc_len = hba->desc_size.dev_desc;
3111 case QUERY_DESC_IDN_POWER:
3112 *desc_len = hba->desc_size.pwr_desc;
3114 case QUERY_DESC_IDN_GEOMETRY:
3115 *desc_len = hba->desc_size.geom_desc;
3117 case QUERY_DESC_IDN_CONFIGURATION:
3118 *desc_len = hba->desc_size.conf_desc;
3120 case QUERY_DESC_IDN_UNIT:
3121 *desc_len = hba->desc_size.unit_desc;
3123 case QUERY_DESC_IDN_INTERCONNECT:
3124 *desc_len = hba->desc_size.interc_desc;
3126 case QUERY_DESC_IDN_STRING:
3127 *desc_len = QUERY_DESC_MAX_SIZE;
3129 case QUERY_DESC_IDN_HEALTH:
3130 *desc_len = hba->desc_size.hlth_desc;
3132 case QUERY_DESC_IDN_RFU_0:
3133 case QUERY_DESC_IDN_RFU_1:
3142 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3145 * ufshcd_read_desc_param - read the specified descriptor parameter
3146 * @hba: Pointer to adapter instance
3147 * @desc_id: descriptor idn value
3148 * @desc_index: descriptor index
3149 * @param_offset: offset of the parameter to read
3150 * @param_read_buf: pointer to buffer where parameter would be read
3151 * @param_size: sizeof(param_read_buf)
3153 * Return 0 in case of success, non-zero otherwise
3155 int ufshcd_read_desc_param(struct ufs_hba *hba,
3156 enum desc_idn desc_id,
3165 bool is_kmalloc = true;
3168 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3171 /* Get the max length of descriptor from structure filled up at probe
3174 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3177 if (ret || !buff_len) {
3178 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3183 /* Check whether we need temp memory */
3184 if (param_offset != 0 || param_size < buff_len) {
3185 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3189 desc_buf = param_read_buf;
3193 /* Request for full descriptor */
3194 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3195 desc_id, desc_index, 0,
3196 desc_buf, &buff_len);
3199 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3200 __func__, desc_id, desc_index, param_offset, ret);
3205 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3206 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3207 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3212 /* Check wherher we will not copy more data, than available */
3213 if (is_kmalloc && param_size > buff_len)
3214 param_size = buff_len;
3217 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3224 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3225 enum desc_idn desc_id,
3230 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3235 * struct uc_string_id - unicode string
3237 * @len: size of this descriptor inclusive
3238 * @type: descriptor type
3239 * @uc: unicode string character
3241 struct uc_string_id {
3247 /* replace non-printable or non-ASCII characters with spaces */
3248 static inline char ufshcd_remove_non_printable(u8 ch)
3250 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3254 * ufshcd_read_string_desc - read string descriptor
3255 * @hba: pointer to adapter instance
3256 * @desc_index: descriptor index
3257 * @buf: pointer to buffer where descriptor would be read,
3258 * the caller should free the memory.
3259 * @ascii: if true convert from unicode to ascii characters
3260 * null terminated string.
3263 * * string size on success.
3264 * * -ENOMEM: on allocation failure
3265 * * -EINVAL: on a wrong parameter
3267 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3268 u8 **buf, bool ascii)
3270 struct uc_string_id *uc_str;
3277 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3281 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3283 QUERY_DESC_MAX_SIZE);
3285 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3286 QUERY_REQ_RETRIES, ret);
3291 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3292 dev_dbg(hba->dev, "String Desc is of zero length\n");
3301 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3302 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3303 str = kzalloc(ascii_len, GFP_KERNEL);
3310 * the descriptor contains string in UTF16 format
3311 * we need to convert to utf-8 so it can be displayed
3313 ret = utf16s_to_utf8s(uc_str->uc,
3314 uc_str->len - QUERY_DESC_HDR_SIZE,
3315 UTF16_BIG_ENDIAN, str, ascii_len);
3317 /* replace non-printable or non-ASCII characters with spaces */
3318 for (i = 0; i < ret; i++)
3319 str[i] = ufshcd_remove_non_printable(str[i]);
3324 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3338 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3339 * @hba: Pointer to adapter instance
3341 * @param_offset: offset of the parameter to read
3342 * @param_read_buf: pointer to buffer where parameter would be read
3343 * @param_size: sizeof(param_read_buf)
3345 * Return 0 in case of success, non-zero otherwise
3347 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3349 enum unit_desc_param param_offset,
3354 * Unit descriptors are only available for general purpose LUs (LUN id
3355 * from 0 to 7) and RPMB Well known LU.
3357 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3360 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3361 param_offset, param_read_buf, param_size);
3364 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3367 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3369 if (hba->dev_info.wspecversion >= 0x300) {
3370 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3371 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3374 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3377 if (gating_wait == 0) {
3378 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3379 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3383 hba->dev_info.clk_gating_wait_us = gating_wait;
3390 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3391 * @hba: per adapter instance
3393 * 1. Allocate DMA memory for Command Descriptor array
3394 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3395 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3396 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3398 * 4. Allocate memory for local reference block(lrb).
3400 * Returns 0 for success, non-zero in case of failure
3402 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3404 size_t utmrdl_size, utrdl_size, ucdl_size;
3406 /* Allocate memory for UTP command descriptors */
3407 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3408 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3410 &hba->ucdl_dma_addr,
3414 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3415 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3416 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3417 * be aligned to 128 bytes as well
3419 if (!hba->ucdl_base_addr ||
3420 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3422 "Command Descriptor Memory allocation failed\n");
3427 * Allocate memory for UTP Transfer descriptors
3428 * UFSHCI requires 1024 byte alignment of UTRD
3430 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3431 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3433 &hba->utrdl_dma_addr,
3435 if (!hba->utrdl_base_addr ||
3436 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3438 "Transfer Descriptor Memory allocation failed\n");
3443 * Allocate memory for UTP Task Management descriptors
3444 * UFSHCI requires 1024 byte alignment of UTMRD
3446 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3447 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3449 &hba->utmrdl_dma_addr,
3451 if (!hba->utmrdl_base_addr ||
3452 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3454 "Task Management Descriptor Memory allocation failed\n");
3458 /* Allocate memory for local reference block */
3459 hba->lrb = devm_kcalloc(hba->dev,
3460 hba->nutrs, sizeof(struct ufshcd_lrb),
3463 dev_err(hba->dev, "LRB Memory allocation failed\n");
3472 * ufshcd_host_memory_configure - configure local reference block with
3474 * @hba: per adapter instance
3476 * Configure Host memory space
3477 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3479 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3481 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3482 * into local reference block.
3484 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3486 struct utp_transfer_req_desc *utrdlp;
3487 dma_addr_t cmd_desc_dma_addr;
3488 dma_addr_t cmd_desc_element_addr;
3489 u16 response_offset;
3494 utrdlp = hba->utrdl_base_addr;
3497 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3499 offsetof(struct utp_transfer_cmd_desc, prd_table);
3501 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3502 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3504 for (i = 0; i < hba->nutrs; i++) {
3505 /* Configure UTRD with command descriptor base address */
3506 cmd_desc_element_addr =
3507 (cmd_desc_dma_addr + (cmd_desc_size * i));
3508 utrdlp[i].command_desc_base_addr_lo =
3509 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3510 utrdlp[i].command_desc_base_addr_hi =
3511 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3513 /* Response upiu and prdt offset should be in double words */
3514 utrdlp[i].response_upiu_offset =
3515 cpu_to_le16(response_offset >> 2);
3516 utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
3517 utrdlp[i].response_upiu_length =
3518 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3520 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3525 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3526 * @hba: per adapter instance
3528 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3529 * in order to initialize the Unipro link startup procedure.
3530 * Once the Unipro links are up, the device connected to the controller
3533 * Returns 0 on success, non-zero value on failure
3535 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3537 struct uic_command uic_cmd = {0};
3540 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3542 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3545 "dme-link-startup: error code %d\n", ret);
3549 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3551 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3552 unsigned long min_sleep_time_us;
3554 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3558 * last_dme_cmd_tstamp will be 0 only for 1st call to
3561 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3562 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3564 unsigned long delta =
3565 (unsigned long) ktime_to_us(
3566 ktime_sub(ktime_get(),
3567 hba->last_dme_cmd_tstamp));
3569 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3571 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3573 return; /* no more delay required */
3576 /* allow sleep for extra 50us if needed */
3577 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3581 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3582 * @hba: per adapter instance
3583 * @attr_sel: uic command argument1
3584 * @attr_set: attribute set type as uic command argument2
3585 * @mib_val: setting value as uic command argument3
3586 * @peer: indicate whether peer or local
3588 * Returns 0 on success, non-zero value on failure
3590 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3591 u8 attr_set, u32 mib_val, u8 peer)
3593 struct uic_command uic_cmd = {0};
3594 static const char *const action[] = {
3598 const char *set = action[!!peer];
3600 int retries = UFS_UIC_COMMAND_RETRIES;
3602 uic_cmd.command = peer ?
3603 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3604 uic_cmd.argument1 = attr_sel;
3605 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3606 uic_cmd.argument3 = mib_val;
3609 /* for peer attributes we retry upon failure */
3610 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3612 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3613 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3614 } while (ret && peer && --retries);
3617 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3618 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3619 UFS_UIC_COMMAND_RETRIES - retries);
3623 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3626 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3627 * @hba: per adapter instance
3628 * @attr_sel: uic command argument1
3629 * @mib_val: the value of the attribute as returned by the UIC command
3630 * @peer: indicate whether peer or local
3632 * Returns 0 on success, non-zero value on failure
3634 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3635 u32 *mib_val, u8 peer)
3637 struct uic_command uic_cmd = {0};
3638 static const char *const action[] = {
3642 const char *get = action[!!peer];
3644 int retries = UFS_UIC_COMMAND_RETRIES;
3645 struct ufs_pa_layer_attr orig_pwr_info;
3646 struct ufs_pa_layer_attr temp_pwr_info;
3647 bool pwr_mode_change = false;
3649 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3650 orig_pwr_info = hba->pwr_info;
3651 temp_pwr_info = orig_pwr_info;
3653 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3654 orig_pwr_info.pwr_rx == FAST_MODE) {
3655 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3656 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3657 pwr_mode_change = true;
3658 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3659 orig_pwr_info.pwr_rx == SLOW_MODE) {
3660 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3661 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3662 pwr_mode_change = true;
3664 if (pwr_mode_change) {
3665 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3671 uic_cmd.command = peer ?
3672 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3673 uic_cmd.argument1 = attr_sel;
3676 /* for peer attributes we retry upon failure */
3677 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3679 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3680 get, UIC_GET_ATTR_ID(attr_sel), ret);
3681 } while (ret && peer && --retries);
3684 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3685 get, UIC_GET_ATTR_ID(attr_sel),
3686 UFS_UIC_COMMAND_RETRIES - retries);
3688 if (mib_val && !ret)
3689 *mib_val = uic_cmd.argument3;
3691 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3693 ufshcd_change_power_mode(hba, &orig_pwr_info);
3697 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3700 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3701 * state) and waits for it to take effect.
3703 * @hba: per adapter instance
3704 * @cmd: UIC command to execute
3706 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3707 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3708 * and device UniPro link and hence it's final completion would be indicated by
3709 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3710 * addition to normal UIC command completion Status (UCCS). This function only
3711 * returns after the relevant status bits indicate the completion.
3713 * Returns 0 on success, non-zero value on failure
3715 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3717 struct completion uic_async_done;
3718 unsigned long flags;
3721 bool reenable_intr = false;
3723 mutex_lock(&hba->uic_cmd_mutex);
3724 init_completion(&uic_async_done);
3725 ufshcd_add_delay_before_dme_cmd(hba);
3727 spin_lock_irqsave(hba->host->host_lock, flags);
3728 hba->uic_async_done = &uic_async_done;
3729 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3730 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3732 * Make sure UIC command completion interrupt is disabled before
3733 * issuing UIC command.
3736 reenable_intr = true;
3738 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3739 spin_unlock_irqrestore(hba->host->host_lock, flags);
3742 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3743 cmd->command, cmd->argument3, ret);
3747 if (!wait_for_completion_timeout(hba->uic_async_done,
3748 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3750 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3751 cmd->command, cmd->argument3);
3756 status = ufshcd_get_upmcrs(hba);
3757 if (status != PWR_LOCAL) {
3759 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3760 cmd->command, status);
3761 ret = (status != PWR_OK) ? status : -1;
3765 ufshcd_print_host_state(hba);
3766 ufshcd_print_pwr_info(hba);
3767 ufshcd_print_host_regs(hba);
3770 spin_lock_irqsave(hba->host->host_lock, flags);
3771 hba->active_uic_cmd = NULL;
3772 hba->uic_async_done = NULL;
3774 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3775 spin_unlock_irqrestore(hba->host->host_lock, flags);
3776 mutex_unlock(&hba->uic_cmd_mutex);
3782 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3783 * using DME_SET primitives.
3784 * @hba: per adapter instance
3785 * @mode: powr mode value
3787 * Returns 0 on success, non-zero value on failure
3789 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3791 struct uic_command uic_cmd = {0};
3794 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3795 ret = ufshcd_dme_set(hba,
3796 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3798 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3804 uic_cmd.command = UIC_CMD_DME_SET;
3805 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3806 uic_cmd.argument3 = mode;
3807 ufshcd_hold(hba, false);
3808 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3809 ufshcd_release(hba);
3815 int ufshcd_link_recovery(struct ufs_hba *hba)
3818 unsigned long flags;
3820 spin_lock_irqsave(hba->host->host_lock, flags);
3821 hba->ufshcd_state = UFSHCD_STATE_RESET;
3822 ufshcd_set_eh_in_progress(hba);
3823 spin_unlock_irqrestore(hba->host->host_lock, flags);
3825 /* Reset the attached device */
3826 ufshcd_vops_device_reset(hba);
3828 ret = ufshcd_host_reset_and_restore(hba);
3830 spin_lock_irqsave(hba->host->host_lock, flags);
3832 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3833 ufshcd_clear_eh_in_progress(hba);
3834 spin_unlock_irqrestore(hba->host->host_lock, flags);
3837 dev_err(hba->dev, "%s: link recovery failed, err %d",
3842 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
3844 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3847 struct uic_command uic_cmd = {0};
3848 ktime_t start = ktime_get();
3850 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3852 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3853 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3854 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3855 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3860 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3864 * If link recovery fails then return error code returned from
3865 * ufshcd_link_recovery().
3866 * If link recovery succeeds then return -EAGAIN to attempt
3867 * hibern8 enter retry again.
3869 err = ufshcd_link_recovery(hba);
3871 dev_err(hba->dev, "%s: link recovery failed", __func__);
3877 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3883 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3885 int ret = 0, retries;
3887 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3888 ret = __ufshcd_uic_hibern8_enter(hba);
3896 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3898 struct uic_command uic_cmd = {0};
3900 ktime_t start = ktime_get();
3902 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3904 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3905 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3906 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3907 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3910 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3912 ret = ufshcd_link_recovery(hba);
3914 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3916 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3917 hba->ufs_stats.hibern8_exit_cnt++;
3922 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
3924 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
3926 unsigned long flags;
3927 bool update = false;
3929 if (!ufshcd_is_auto_hibern8_supported(hba))
3932 spin_lock_irqsave(hba->host->host_lock, flags);
3933 if (hba->ahit != ahit) {
3937 spin_unlock_irqrestore(hba->host->host_lock, flags);
3939 if (update && !pm_runtime_suspended(hba->dev)) {
3940 pm_runtime_get_sync(hba->dev);
3941 ufshcd_hold(hba, false);
3942 ufshcd_auto_hibern8_enable(hba);
3943 ufshcd_release(hba);
3944 pm_runtime_put(hba->dev);
3947 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
3949 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3951 unsigned long flags;
3953 if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
3956 spin_lock_irqsave(hba->host->host_lock, flags);
3957 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3958 spin_unlock_irqrestore(hba->host->host_lock, flags);
3962 * ufshcd_init_pwr_info - setting the POR (power on reset)
3963 * values in hba power info
3964 * @hba: per-adapter instance
3966 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3968 hba->pwr_info.gear_rx = UFS_PWM_G1;
3969 hba->pwr_info.gear_tx = UFS_PWM_G1;
3970 hba->pwr_info.lane_rx = 1;
3971 hba->pwr_info.lane_tx = 1;
3972 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3973 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3974 hba->pwr_info.hs_rate = 0;
3978 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3979 * @hba: per-adapter instance
3981 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3983 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3985 if (hba->max_pwr_info.is_valid)
3988 pwr_info->pwr_tx = FAST_MODE;
3989 pwr_info->pwr_rx = FAST_MODE;
3990 pwr_info->hs_rate = PA_HS_MODE_B;
3992 /* Get the connected lane count */
3993 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3994 &pwr_info->lane_rx);
3995 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3996 &pwr_info->lane_tx);
3998 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3999 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4007 * First, get the maximum gears of HS speed.
4008 * If a zero value, it means there is no HSGEAR capability.
4009 * Then, get the maximum gears of PWM speed.
4011 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4012 if (!pwr_info->gear_rx) {
4013 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4014 &pwr_info->gear_rx);
4015 if (!pwr_info->gear_rx) {
4016 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4017 __func__, pwr_info->gear_rx);
4020 pwr_info->pwr_rx = SLOW_MODE;
4023 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4024 &pwr_info->gear_tx);
4025 if (!pwr_info->gear_tx) {
4026 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4027 &pwr_info->gear_tx);
4028 if (!pwr_info->gear_tx) {
4029 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4030 __func__, pwr_info->gear_tx);
4033 pwr_info->pwr_tx = SLOW_MODE;
4036 hba->max_pwr_info.is_valid = true;
4040 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4041 struct ufs_pa_layer_attr *pwr_mode)
4045 /* if already configured to the requested pwr_mode */
4046 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4047 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4048 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4049 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4050 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4051 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4052 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4053 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4058 * Configure attributes for power mode change with below.
4059 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4060 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4063 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4064 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4066 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4067 pwr_mode->pwr_rx == FAST_MODE)
4068 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4070 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4072 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4073 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4075 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4076 pwr_mode->pwr_tx == FAST_MODE)
4077 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4079 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4081 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4082 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4083 pwr_mode->pwr_rx == FAST_MODE ||
4084 pwr_mode->pwr_tx == FAST_MODE)
4085 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4088 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4089 DL_FC0ProtectionTimeOutVal_Default);
4090 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4091 DL_TC0ReplayTimeOutVal_Default);
4092 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4093 DL_AFC0ReqTimeOutVal_Default);
4094 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4095 DL_FC1ProtectionTimeOutVal_Default);
4096 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4097 DL_TC1ReplayTimeOutVal_Default);
4098 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4099 DL_AFC1ReqTimeOutVal_Default);
4101 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4102 DL_FC0ProtectionTimeOutVal_Default);
4103 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4104 DL_TC0ReplayTimeOutVal_Default);
4105 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4106 DL_AFC0ReqTimeOutVal_Default);
4108 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4109 | pwr_mode->pwr_tx);
4113 "%s: power mode change failed %d\n", __func__, ret);
4115 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4118 memcpy(&hba->pwr_info, pwr_mode,
4119 sizeof(struct ufs_pa_layer_attr));
4126 * ufshcd_config_pwr_mode - configure a new power mode
4127 * @hba: per-adapter instance
4128 * @desired_pwr_mode: desired power configuration
4130 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4131 struct ufs_pa_layer_attr *desired_pwr_mode)
4133 struct ufs_pa_layer_attr final_params = { 0 };
4136 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4137 desired_pwr_mode, &final_params);
4140 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4142 ret = ufshcd_change_power_mode(hba, &final_params);
4146 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4149 * ufshcd_complete_dev_init() - checks device readiness
4150 * @hba: per-adapter instance
4152 * Set fDeviceInit flag and poll until device toggles it.
4154 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4158 bool flag_res = true;
4160 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4161 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4164 "%s setting fDeviceInit flag failed with error %d\n",
4169 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4170 for (i = 0; i < 1000 && !err && flag_res; i++)
4171 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4172 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4176 "%s reading fDeviceInit flag failed with error %d\n",
4180 "%s fDeviceInit was not cleared by the device\n",
4188 * ufshcd_make_hba_operational - Make UFS controller operational
4189 * @hba: per adapter instance
4191 * To bring UFS host controller to operational state,
4192 * 1. Enable required interrupts
4193 * 2. Configure interrupt aggregation
4194 * 3. Program UTRL and UTMRL base address
4195 * 4. Configure run-stop-registers
4197 * Returns 0 on success, non-zero value on failure
4199 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4204 /* Enable required interrupts */
4205 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4207 /* Configure interrupt aggregation */
4208 if (ufshcd_is_intr_aggr_allowed(hba))
4209 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4211 ufshcd_disable_intr_aggr(hba);
4213 /* Configure UTRL and UTMRL base address registers */
4214 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4215 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4216 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4217 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4218 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4219 REG_UTP_TASK_REQ_LIST_BASE_L);
4220 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4221 REG_UTP_TASK_REQ_LIST_BASE_H);
4224 * Make sure base address and interrupt setup are updated before
4225 * enabling the run/stop registers below.
4230 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4232 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4233 if (!(ufshcd_get_lists_status(reg))) {
4234 ufshcd_enable_run_stop_reg(hba);
4237 "Host controller not ready to process requests");
4245 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4248 * ufshcd_hba_stop - Send controller to reset state
4249 * @hba: per adapter instance
4251 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
4253 unsigned long flags;
4257 * Obtain the host lock to prevent that the controller is disabled
4258 * while the UFS interrupt handler is active on another CPU.
4260 spin_lock_irqsave(hba->host->host_lock, flags);
4261 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4262 spin_unlock_irqrestore(hba->host->host_lock, flags);
4264 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4265 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4268 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4272 * ufshcd_hba_enable - initialize the controller
4273 * @hba: per adapter instance
4275 * The controller resets itself and controller firmware initialization
4276 * sequence kicks off. When controller is ready it will set
4277 * the Host Controller Enable bit to 1.
4279 * Returns 0 on success, non-zero value on failure
4281 int ufshcd_hba_enable(struct ufs_hba *hba)
4285 if (!ufshcd_is_hba_active(hba))
4286 /* change controller state to "reset state" */
4287 ufshcd_hba_stop(hba);
4289 /* UniPro link is disabled at this point */
4290 ufshcd_set_link_off(hba);
4292 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4294 /* start controller initialization sequence */
4295 ufshcd_hba_start(hba);
4298 * To initialize a UFS host controller HCE bit must be set to 1.
4299 * During initialization the HCE bit value changes from 1->0->1.
4300 * When the host controller completes initialization sequence
4301 * it sets the value of HCE bit to 1. The same HCE bit is read back
4302 * to check if the controller has completed initialization sequence.
4303 * So without this delay the value HCE = 1, set in the previous
4304 * instruction might be read back.
4305 * This delay can be changed based on the controller.
4307 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4309 /* wait for the host controller to complete initialization */
4311 while (ufshcd_is_hba_active(hba)) {
4316 "Controller enable failed\n");
4319 usleep_range(1000, 1100);
4322 /* enable UIC related interrupts */
4323 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4325 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4329 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4331 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4333 int tx_lanes = 0, i, err = 0;
4336 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4339 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4341 for (i = 0; i < tx_lanes; i++) {
4343 err = ufshcd_dme_set(hba,
4344 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4345 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4348 err = ufshcd_dme_peer_set(hba,
4349 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4350 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4353 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4354 __func__, peer, i, err);
4362 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4364 return ufshcd_disable_tx_lcc(hba, true);
4367 void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4370 reg_hist->reg[reg_hist->pos] = reg;
4371 reg_hist->tstamp[reg_hist->pos] = ktime_get();
4372 reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4374 EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
4377 * ufshcd_link_startup - Initialize unipro link startup
4378 * @hba: per adapter instance
4380 * Returns 0 for success, non-zero in case of failure
4382 static int ufshcd_link_startup(struct ufs_hba *hba)
4385 int retries = DME_LINKSTARTUP_RETRIES;
4386 bool link_startup_again = false;
4389 * If UFS device isn't active then we will have to issue link startup
4390 * 2 times to make sure the device state move to active.
4392 if (!ufshcd_is_ufs_dev_active(hba))
4393 link_startup_again = true;
4397 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4399 ret = ufshcd_dme_link_startup(hba);
4401 /* check if device is detected by inter-connect layer */
4402 if (!ret && !ufshcd_is_device_present(hba)) {
4403 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4405 dev_err(hba->dev, "%s: Device not present\n", __func__);
4411 * DME link lost indication is only received when link is up,
4412 * but we can't be sure if the link is up until link startup
4413 * succeeds. So reset the local Uni-Pro and try again.
4415 if (ret && ufshcd_hba_enable(hba)) {
4416 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4420 } while (ret && retries--);
4423 /* failed to get the link up... retire */
4424 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4429 if (link_startup_again) {
4430 link_startup_again = false;
4431 retries = DME_LINKSTARTUP_RETRIES;
4435 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4436 ufshcd_init_pwr_info(hba);
4437 ufshcd_print_pwr_info(hba);
4439 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4440 ret = ufshcd_disable_device_tx_lcc(hba);
4445 /* Include any host controller configuration via UIC commands */
4446 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4450 ret = ufshcd_make_hba_operational(hba);
4453 dev_err(hba->dev, "link startup failed %d\n", ret);
4454 ufshcd_print_host_state(hba);
4455 ufshcd_print_pwr_info(hba);
4456 ufshcd_print_host_regs(hba);
4462 * ufshcd_verify_dev_init() - Verify device initialization
4463 * @hba: per-adapter instance
4465 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4466 * device Transport Protocol (UTP) layer is ready after a reset.
4467 * If the UTP layer at the device side is not initialized, it may
4468 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4469 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4471 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4476 ufshcd_hold(hba, false);
4477 mutex_lock(&hba->dev_cmd.lock);
4478 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4479 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4482 if (!err || err == -ETIMEDOUT)
4485 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4487 mutex_unlock(&hba->dev_cmd.lock);
4488 ufshcd_release(hba);
4491 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4496 * ufshcd_set_queue_depth - set lun queue depth
4497 * @sdev: pointer to SCSI device
4499 * Read bLUQueueDepth value and activate scsi tagged command
4500 * queueing. For WLUN, queue depth is set to 1. For best-effort
4501 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4502 * value that host can queue.
4504 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4508 struct ufs_hba *hba;
4510 hba = shost_priv(sdev->host);
4512 lun_qdepth = hba->nutrs;
4513 ret = ufshcd_read_unit_desc_param(hba,
4514 ufshcd_scsi_to_upiu_lun(sdev->lun),
4515 UNIT_DESC_PARAM_LU_Q_DEPTH,
4517 sizeof(lun_qdepth));
4519 /* Some WLUN doesn't support unit descriptor */
4520 if (ret == -EOPNOTSUPP)
4522 else if (!lun_qdepth)
4523 /* eventually, we can figure out the real queue depth */
4524 lun_qdepth = hba->nutrs;
4526 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4528 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4529 __func__, lun_qdepth);
4530 scsi_change_queue_depth(sdev, lun_qdepth);
4534 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4535 * @hba: per-adapter instance
4536 * @lun: UFS device lun id
4537 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4539 * Returns 0 in case of success and b_lu_write_protect status would be returned
4540 * @b_lu_write_protect parameter.
4541 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4542 * Returns -EINVAL in case of invalid parameters passed to this function.
4544 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4546 u8 *b_lu_write_protect)
4550 if (!b_lu_write_protect)
4553 * According to UFS device spec, RPMB LU can't be write
4554 * protected so skip reading bLUWriteProtect parameter for
4555 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4557 else if (lun >= hba->dev_info.max_lu_supported)
4560 ret = ufshcd_read_unit_desc_param(hba,
4562 UNIT_DESC_PARAM_LU_WR_PROTECT,
4564 sizeof(*b_lu_write_protect));
4569 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4571 * @hba: per-adapter instance
4572 * @sdev: pointer to SCSI device
4575 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4576 struct scsi_device *sdev)
4578 if (hba->dev_info.f_power_on_wp_en &&
4579 !hba->dev_info.is_lu_power_on_wp) {
4580 u8 b_lu_write_protect;
4582 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4583 &b_lu_write_protect) &&
4584 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4585 hba->dev_info.is_lu_power_on_wp = true;
4590 * ufshcd_slave_alloc - handle initial SCSI device configurations
4591 * @sdev: pointer to SCSI device
4595 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4597 struct ufs_hba *hba;
4599 hba = shost_priv(sdev->host);
4601 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4602 sdev->use_10_for_ms = 1;
4604 /* DBD field should be set to 1 in mode sense(10) */
4605 sdev->set_dbd_for_ms = 1;
4607 /* allow SCSI layer to restart the device in case of errors */
4608 sdev->allow_restart = 1;
4610 /* REPORT SUPPORTED OPERATION CODES is not supported */
4611 sdev->no_report_opcodes = 1;
4613 /* WRITE_SAME command is not supported */
4614 sdev->no_write_same = 1;
4616 ufshcd_set_queue_depth(sdev);
4618 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4624 * ufshcd_change_queue_depth - change queue depth
4625 * @sdev: pointer to SCSI device
4626 * @depth: required depth to set
4628 * Change queue depth and make sure the max. limits are not crossed.
4630 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4632 struct ufs_hba *hba = shost_priv(sdev->host);
4634 if (depth > hba->nutrs)
4636 return scsi_change_queue_depth(sdev, depth);
4640 * ufshcd_slave_configure - adjust SCSI device configurations
4641 * @sdev: pointer to SCSI device
4643 static int ufshcd_slave_configure(struct scsi_device *sdev)
4645 struct ufs_hba *hba = shost_priv(sdev->host);
4646 struct request_queue *q = sdev->request_queue;
4648 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4650 if (ufshcd_is_rpm_autosuspend_allowed(hba))
4651 sdev->rpm_autosuspend = 1;
4657 * ufshcd_slave_destroy - remove SCSI device configurations
4658 * @sdev: pointer to SCSI device
4660 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4662 struct ufs_hba *hba;
4664 hba = shost_priv(sdev->host);
4665 /* Drop the reference as it won't be needed anymore */
4666 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4667 unsigned long flags;
4669 spin_lock_irqsave(hba->host->host_lock, flags);
4670 hba->sdev_ufs_device = NULL;
4671 spin_unlock_irqrestore(hba->host->host_lock, flags);
4676 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4677 * @lrbp: pointer to local reference block of completed command
4678 * @scsi_status: SCSI command status
4680 * Returns value base on SCSI command status
4683 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4687 switch (scsi_status) {
4688 case SAM_STAT_CHECK_CONDITION:
4689 ufshcd_copy_sense_data(lrbp);
4692 result |= DID_OK << 16 |
4693 COMMAND_COMPLETE << 8 |
4696 case SAM_STAT_TASK_SET_FULL:
4698 case SAM_STAT_TASK_ABORTED:
4699 ufshcd_copy_sense_data(lrbp);
4700 result |= scsi_status;
4703 result |= DID_ERROR << 16;
4705 } /* end of switch */
4711 * ufshcd_transfer_rsp_status - Get overall status of the response
4712 * @hba: per adapter instance
4713 * @lrbp: pointer to local reference block of completed command
4715 * Returns result of the command to notify SCSI midlayer
4718 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4724 /* overall command status of utrd */
4725 ocs = ufshcd_get_tr_ocs(lrbp);
4729 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4730 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4732 case UPIU_TRANSACTION_RESPONSE:
4734 * get the response UPIU result to extract
4735 * the SCSI command status
4737 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4740 * get the result based on SCSI status response
4741 * to notify the SCSI midlayer of the command status
4743 scsi_status = result & MASK_SCSI_STATUS;
4744 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4747 * Currently we are only supporting BKOPs exception
4748 * events hence we can ignore BKOPs exception event
4749 * during power management callbacks. BKOPs exception
4750 * event is not expected to be raised in runtime suspend
4751 * callback as it allows the urgent bkops.
4752 * During system suspend, we are anyway forcefully
4753 * disabling the bkops and if urgent bkops is needed
4754 * it will be enabled on system resume. Long term
4755 * solution could be to abort the system suspend if
4756 * UFS device needs urgent BKOPs.
4758 if (!hba->pm_op_in_progress &&
4759 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4760 schedule_work(&hba->eeh_work)) {
4762 * Prevent suspend once eeh_work is scheduled
4763 * to avoid deadlock between ufshcd_suspend
4764 * and exception event handler.
4766 pm_runtime_get_noresume(hba->dev);
4769 case UPIU_TRANSACTION_REJECT_UPIU:
4770 /* TODO: handle Reject UPIU Response */
4771 result = DID_ERROR << 16;
4773 "Reject UPIU not fully implemented\n");
4777 "Unexpected request response code = %x\n",
4779 result = DID_ERROR << 16;
4784 result |= DID_ABORT << 16;
4786 case OCS_INVALID_COMMAND_STATUS:
4787 result |= DID_REQUEUE << 16;
4789 case OCS_INVALID_CMD_TABLE_ATTR:
4790 case OCS_INVALID_PRDT_ATTR:
4791 case OCS_MISMATCH_DATA_BUF_SIZE:
4792 case OCS_MISMATCH_RESP_UPIU_SIZE:
4793 case OCS_PEER_COMM_FAILURE:
4794 case OCS_FATAL_ERROR:
4796 result |= DID_ERROR << 16;
4798 "OCS error from controller = %x for tag %d\n",
4799 ocs, lrbp->task_tag);
4800 ufshcd_print_host_regs(hba);
4801 ufshcd_print_host_state(hba);
4803 } /* end of switch */
4805 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
4806 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4811 * ufshcd_uic_cmd_compl - handle completion of uic command
4812 * @hba: per adapter instance
4813 * @intr_status: interrupt status generated by the controller
4816 * IRQ_HANDLED - If interrupt is valid
4817 * IRQ_NONE - If invalid interrupt
4819 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4821 irqreturn_t retval = IRQ_NONE;
4823 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4824 hba->active_uic_cmd->argument2 |=
4825 ufshcd_get_uic_cmd_result(hba);
4826 hba->active_uic_cmd->argument3 =
4827 ufshcd_get_dme_attr_val(hba);
4828 complete(&hba->active_uic_cmd->done);
4829 retval = IRQ_HANDLED;
4832 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
4833 complete(hba->uic_async_done);
4834 retval = IRQ_HANDLED;
4840 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4841 * @hba: per adapter instance
4842 * @completed_reqs: requests to complete
4844 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4845 unsigned long completed_reqs)
4847 struct ufshcd_lrb *lrbp;
4848 struct scsi_cmnd *cmd;
4852 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4853 lrbp = &hba->lrb[index];
4856 ufshcd_add_command_trace(hba, index, "complete");
4857 result = ufshcd_transfer_rsp_status(hba, lrbp);
4858 scsi_dma_unmap(cmd);
4859 cmd->result = result;
4860 /* Mark completed command as NULL in LRB */
4862 lrbp->compl_time_stamp = ktime_get();
4863 /* Do not touch lrbp after scsi done */
4864 cmd->scsi_done(cmd);
4865 __ufshcd_release(hba);
4866 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4867 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4868 lrbp->compl_time_stamp = ktime_get();
4869 if (hba->dev_cmd.complete) {
4870 ufshcd_add_command_trace(hba, index,
4872 complete(hba->dev_cmd.complete);
4875 if (ufshcd_is_clkscaling_supported(hba))
4876 hba->clk_scaling.active_reqs--;
4879 /* clear corresponding bits of completed commands */
4880 hba->outstanding_reqs ^= completed_reqs;
4882 ufshcd_clk_scaling_update_busy(hba);
4886 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4887 * @hba: per adapter instance
4890 * IRQ_HANDLED - If interrupt is valid
4891 * IRQ_NONE - If invalid interrupt
4893 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
4895 unsigned long completed_reqs;
4898 /* Resetting interrupt aggregation counters first and reading the
4899 * DOOR_BELL afterward allows us to handle all the completed requests.
4900 * In order to prevent other interrupts starvation the DB is read once
4901 * after reset. The down side of this solution is the possibility of
4902 * false interrupt if device completes another request after resetting
4903 * aggregation and before reading the DB.
4905 if (ufshcd_is_intr_aggr_allowed(hba))
4906 ufshcd_reset_intr_aggr(hba);
4908 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4909 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4911 if (completed_reqs) {
4912 __ufshcd_transfer_req_compl(hba, completed_reqs);
4920 * ufshcd_disable_ee - disable exception event
4921 * @hba: per-adapter instance
4922 * @mask: exception event to disable
4924 * Disables exception event in the device so that the EVENT_ALERT
4927 * Returns zero on success, non-zero error value on failure.
4929 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4934 if (!(hba->ee_ctrl_mask & mask))
4937 val = hba->ee_ctrl_mask & ~mask;
4938 val &= MASK_EE_STATUS;
4939 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4940 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4942 hba->ee_ctrl_mask &= ~mask;
4948 * ufshcd_enable_ee - enable exception event
4949 * @hba: per-adapter instance
4950 * @mask: exception event to enable
4952 * Enable corresponding exception event in the device to allow
4953 * device to alert host in critical scenarios.
4955 * Returns zero on success, non-zero error value on failure.
4957 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4962 if (hba->ee_ctrl_mask & mask)
4965 val = hba->ee_ctrl_mask | mask;
4966 val &= MASK_EE_STATUS;
4967 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4968 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4970 hba->ee_ctrl_mask |= mask;
4976 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4977 * @hba: per-adapter instance
4979 * Allow device to manage background operations on its own. Enabling
4980 * this might lead to inconsistent latencies during normal data transfers
4981 * as the device is allowed to manage its own way of handling background
4984 * Returns zero on success, non-zero on failure.
4986 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4990 if (hba->auto_bkops_enabled)
4993 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4994 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
4996 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5001 hba->auto_bkops_enabled = true;
5002 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5004 /* No need of URGENT_BKOPS exception from the device */
5005 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5007 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5014 * ufshcd_disable_auto_bkops - block device in doing background operations
5015 * @hba: per-adapter instance
5017 * Disabling background operations improves command response latency but
5018 * has drawback of device moving into critical state where the device is
5019 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5020 * host is idle so that BKOPS are managed effectively without any negative
5023 * Returns zero on success, non-zero on failure.
5025 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5029 if (!hba->auto_bkops_enabled)
5033 * If host assisted BKOPs is to be enabled, make sure
5034 * urgent bkops exception is allowed.
5036 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5038 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5043 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5044 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5046 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5048 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5052 hba->auto_bkops_enabled = false;
5053 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5054 hba->is_urgent_bkops_lvl_checked = false;
5060 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5061 * @hba: per adapter instance
5063 * After a device reset the device may toggle the BKOPS_EN flag
5064 * to default value. The s/w tracking variables should be updated
5065 * as well. This function would change the auto-bkops state based on
5066 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5068 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5070 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5071 hba->auto_bkops_enabled = false;
5072 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5073 ufshcd_enable_auto_bkops(hba);
5075 hba->auto_bkops_enabled = true;
5076 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5077 ufshcd_disable_auto_bkops(hba);
5079 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5080 hba->is_urgent_bkops_lvl_checked = false;
5083 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5085 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5086 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5090 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5091 * @hba: per-adapter instance
5092 * @status: bkops_status value
5094 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5095 * flag in the device to permit background operations if the device
5096 * bkops_status is greater than or equal to "status" argument passed to
5097 * this function, disable otherwise.
5099 * Returns 0 for success, non-zero in case of failure.
5101 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5102 * to know whether auto bkops is enabled or disabled after this function
5103 * returns control to it.
5105 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5106 enum bkops_status status)
5109 u32 curr_status = 0;
5111 err = ufshcd_get_bkops_status(hba, &curr_status);
5113 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5116 } else if (curr_status > BKOPS_STATUS_MAX) {
5117 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5118 __func__, curr_status);
5123 if (curr_status >= status)
5124 err = ufshcd_enable_auto_bkops(hba);
5126 err = ufshcd_disable_auto_bkops(hba);
5132 * ufshcd_urgent_bkops - handle urgent bkops exception event
5133 * @hba: per-adapter instance
5135 * Enable fBackgroundOpsEn flag in the device to permit background
5138 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5139 * and negative error value for any other failure.
5141 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5143 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5146 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5148 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5149 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5152 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5155 u32 curr_status = 0;
5157 if (hba->is_urgent_bkops_lvl_checked)
5158 goto enable_auto_bkops;
5160 err = ufshcd_get_bkops_status(hba, &curr_status);
5162 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5168 * We are seeing that some devices are raising the urgent bkops
5169 * exception events even when BKOPS status doesn't indicate performace
5170 * impacted or critical. Handle these device by determining their urgent
5171 * bkops status at runtime.
5173 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5174 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5175 __func__, curr_status);
5176 /* update the current status as the urgent bkops level */
5177 hba->urgent_bkops_lvl = curr_status;
5178 hba->is_urgent_bkops_lvl_checked = true;
5182 err = ufshcd_enable_auto_bkops(hba);
5185 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5189 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5193 enum query_opcode opcode;
5195 if (!ufshcd_is_wb_allowed(hba))
5198 if (!(enable ^ hba->wb_enabled))
5201 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5203 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5205 index = ufshcd_wb_get_query_index(hba);
5206 ret = ufshcd_query_flag_retry(hba, opcode,
5207 QUERY_FLAG_IDN_WB_EN, index, NULL);
5209 dev_err(hba->dev, "%s write booster %s failed %d\n",
5210 __func__, enable ? "enable" : "disable", ret);
5214 hba->wb_enabled = enable;
5215 dev_dbg(hba->dev, "%s write booster %s %d\n",
5216 __func__, enable ? "enable" : "disable", ret);
5221 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5227 val = UPIU_QUERY_OPCODE_SET_FLAG;
5229 val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5231 index = ufshcd_wb_get_query_index(hba);
5232 return ufshcd_query_flag_retry(hba, val,
5233 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5237 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5240 ufshcd_wb_buf_flush_enable(hba);
5242 ufshcd_wb_buf_flush_disable(hba);
5246 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5251 if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5254 index = ufshcd_wb_get_query_index(hba);
5255 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5256 QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5259 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5262 hba->wb_buf_flush_enabled = true;
5264 dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5268 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5273 if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5276 index = ufshcd_wb_get_query_index(hba);
5277 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5278 QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5281 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5284 hba->wb_buf_flush_enabled = false;
5285 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5291 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5298 index = ufshcd_wb_get_query_index(hba);
5299 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5300 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5301 index, 0, &cur_buf);
5303 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5309 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5313 /* Let it continue to flush when available buffer exceeds threshold */
5314 if (avail_buf < hba->vps->wb_flush_threshold)
5320 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5326 if (!ufshcd_is_wb_allowed(hba))
5329 * The ufs device needs the vcc to be ON to flush.
5330 * With user-space reduction enabled, it's enough to enable flush
5331 * by checking only the available buffer. The threshold
5332 * defined here is > 90% full.
5333 * With user-space preserved enabled, the current-buffer
5334 * should be checked too because the wb buffer size can reduce
5335 * when disk tends to be full. This info is provided by current
5336 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5337 * keeping vcc on when current buffer is empty.
5339 index = ufshcd_wb_get_query_index(hba);
5340 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5341 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5342 index, 0, &avail_buf);
5344 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5349 if (!hba->dev_info.b_presrv_uspc_en) {
5350 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5355 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5358 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5360 struct ufs_hba *hba = container_of(to_delayed_work(work),
5362 rpm_dev_flush_recheck_work);
5364 * To prevent unnecessary VCC power drain after device finishes
5365 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5366 * after a certain delay to recheck the threshold by next runtime
5369 pm_runtime_get_sync(hba->dev);
5370 pm_runtime_put_sync(hba->dev);
5374 * ufshcd_exception_event_handler - handle exceptions raised by device
5375 * @work: pointer to work data
5377 * Read bExceptionEventStatus attribute from the device and handle the
5378 * exception event accordingly.
5380 static void ufshcd_exception_event_handler(struct work_struct *work)
5382 struct ufs_hba *hba;
5385 hba = container_of(work, struct ufs_hba, eeh_work);
5387 pm_runtime_get_sync(hba->dev);
5388 ufshcd_scsi_block_requests(hba);
5389 err = ufshcd_get_ee_status(hba, &status);
5391 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5396 status &= hba->ee_ctrl_mask;
5398 if (status & MASK_EE_URGENT_BKOPS)
5399 ufshcd_bkops_exception_event_handler(hba);
5402 ufshcd_scsi_unblock_requests(hba);
5404 * pm_runtime_get_noresume is called while scheduling
5405 * eeh_work to avoid suspend racing with exception work.
5406 * Hence decrement usage counter using pm_runtime_put_noidle
5407 * to allow suspend on completion of exception event handler.
5409 pm_runtime_put_noidle(hba->dev);
5410 pm_runtime_put(hba->dev);
5414 /* Complete requests that have door-bell cleared */
5415 static void ufshcd_complete_requests(struct ufs_hba *hba)
5417 ufshcd_transfer_req_compl(hba);
5418 ufshcd_tmc_handler(hba);
5422 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5423 * to recover from the DL NAC errors or not.
5424 * @hba: per-adapter instance
5426 * Returns true if error handling is required, false otherwise
5428 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5430 unsigned long flags;
5431 bool err_handling = true;
5433 spin_lock_irqsave(hba->host->host_lock, flags);
5435 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5436 * device fatal error and/or DL NAC & REPLAY timeout errors.
5438 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5441 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5442 ((hba->saved_err & UIC_ERROR) &&
5443 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5446 if ((hba->saved_err & UIC_ERROR) &&
5447 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5450 * wait for 50ms to see if we can get any other errors or not.
5452 spin_unlock_irqrestore(hba->host->host_lock, flags);
5454 spin_lock_irqsave(hba->host->host_lock, flags);
5457 * now check if we have got any other severe errors other than
5460 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5461 ((hba->saved_err & UIC_ERROR) &&
5462 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5466 * As DL NAC is the only error received so far, send out NOP
5467 * command to confirm if link is still active or not.
5468 * - If we don't get any response then do error recovery.
5469 * - If we get response then clear the DL NAC error bit.
5472 spin_unlock_irqrestore(hba->host->host_lock, flags);
5473 err = ufshcd_verify_dev_init(hba);
5474 spin_lock_irqsave(hba->host->host_lock, flags);
5479 /* Link seems to be alive hence ignore the DL NAC errors */
5480 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5481 hba->saved_err &= ~UIC_ERROR;
5482 /* clear NAC error */
5483 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5484 if (!hba->saved_uic_err) {
5485 err_handling = false;
5490 spin_unlock_irqrestore(hba->host->host_lock, flags);
5491 return err_handling;
5495 * ufshcd_err_handler - handle UFS errors that require s/w attention
5496 * @work: pointer to work structure
5498 static void ufshcd_err_handler(struct work_struct *work)
5500 struct ufs_hba *hba;
5501 unsigned long flags;
5506 bool needs_reset = false;
5508 hba = container_of(work, struct ufs_hba, eh_work);
5510 pm_runtime_get_sync(hba->dev);
5511 ufshcd_hold(hba, false);
5513 spin_lock_irqsave(hba->host->host_lock, flags);
5514 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5517 hba->ufshcd_state = UFSHCD_STATE_RESET;
5518 ufshcd_set_eh_in_progress(hba);
5520 /* Complete requests that have door-bell cleared by h/w */
5521 ufshcd_complete_requests(hba);
5523 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5526 spin_unlock_irqrestore(hba->host->host_lock, flags);
5527 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5528 ret = ufshcd_quirk_dl_nac_errors(hba);
5529 spin_lock_irqsave(hba->host->host_lock, flags);
5531 goto skip_err_handling;
5533 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5534 (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
5535 ((hba->saved_err & UIC_ERROR) &&
5536 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5537 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5538 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5542 * if host reset is required then skip clearing the pending
5543 * transfers forcefully because they will get cleared during
5544 * host reset and restore
5547 goto skip_pending_xfer_clear;
5549 /* release lock as clear command might sleep */
5550 spin_unlock_irqrestore(hba->host->host_lock, flags);
5551 /* Clear pending transfer requests */
5552 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5553 if (ufshcd_clear_cmd(hba, tag)) {
5555 goto lock_skip_pending_xfer_clear;
5559 /* Clear pending task management requests */
5560 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5561 if (ufshcd_clear_tm_cmd(hba, tag)) {
5563 goto lock_skip_pending_xfer_clear;
5567 lock_skip_pending_xfer_clear:
5568 spin_lock_irqsave(hba->host->host_lock, flags);
5570 /* Complete the requests that are cleared by s/w */
5571 ufshcd_complete_requests(hba);
5573 if (err_xfer || err_tm)
5576 skip_pending_xfer_clear:
5577 /* Fatal errors need reset */
5579 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5582 * ufshcd_reset_and_restore() does the link reinitialization
5583 * which will need atleast one empty doorbell slot to send the
5584 * device management commands (NOP and query commands).
5585 * If there is no slot empty at this moment then free up last
5588 if (hba->outstanding_reqs == max_doorbells)
5589 __ufshcd_transfer_req_compl(hba,
5590 (1UL << (hba->nutrs - 1)));
5592 spin_unlock_irqrestore(hba->host->host_lock, flags);
5593 err = ufshcd_reset_and_restore(hba);
5594 spin_lock_irqsave(hba->host->host_lock, flags);
5596 dev_err(hba->dev, "%s: reset and restore failed\n",
5598 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5601 * Inform scsi mid-layer that we did reset and allow to handle
5602 * Unit Attention properly.
5604 scsi_report_bus_reset(hba->host, 0);
5606 hba->saved_uic_err = 0;
5611 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5612 if (hba->saved_err || hba->saved_uic_err)
5613 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5614 __func__, hba->saved_err, hba->saved_uic_err);
5617 ufshcd_clear_eh_in_progress(hba);
5620 spin_unlock_irqrestore(hba->host->host_lock, flags);
5621 ufshcd_scsi_unblock_requests(hba);
5622 ufshcd_release(hba);
5623 pm_runtime_put_sync(hba->dev);
5627 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5628 * @hba: per-adapter instance
5631 * IRQ_HANDLED - If interrupt is valid
5632 * IRQ_NONE - If invalid interrupt
5634 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
5637 irqreturn_t retval = IRQ_NONE;
5639 /* PHY layer lane error */
5640 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5641 /* Ignore LINERESET indication, as this is not an error */
5642 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5643 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5645 * To know whether this error is fatal or not, DB timeout
5646 * must be checked but this error is handled separately.
5648 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5649 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
5650 retval |= IRQ_HANDLED;
5653 /* PA_INIT_ERROR is fatal and needs UIC reset */
5654 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5655 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5656 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
5657 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
5659 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5660 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5661 else if (hba->dev_quirks &
5662 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5663 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5665 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5666 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5667 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5669 retval |= IRQ_HANDLED;
5672 /* UIC NL/TL/DME errors needs software retry */
5673 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5674 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5675 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
5676 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
5677 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5678 retval |= IRQ_HANDLED;
5681 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5682 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
5683 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
5684 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
5685 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5686 retval |= IRQ_HANDLED;
5689 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5690 if ((reg & UIC_DME_ERROR) &&
5691 (reg & UIC_DME_ERROR_CODE_MASK)) {
5692 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
5693 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5694 retval |= IRQ_HANDLED;
5697 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5698 __func__, hba->uic_error);
5702 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5705 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5706 !ufshcd_is_auto_hibern8_enabled(hba))
5709 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5712 if (hba->active_uic_cmd &&
5713 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5714 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5721 * ufshcd_check_errors - Check for errors that need s/w attention
5722 * @hba: per-adapter instance
5725 * IRQ_HANDLED - If interrupt is valid
5726 * IRQ_NONE - If invalid interrupt
5728 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
5730 bool queue_eh_work = false;
5731 irqreturn_t retval = IRQ_NONE;
5733 if (hba->errors & INT_FATAL_ERRORS) {
5734 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
5735 queue_eh_work = true;
5738 if (hba->errors & UIC_ERROR) {
5740 retval = ufshcd_update_uic_error(hba);
5742 queue_eh_work = true;
5745 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5747 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5748 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5750 hba->errors, ufshcd_get_upmcrs(hba));
5751 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5753 queue_eh_work = true;
5756 if (queue_eh_work) {
5758 * update the transfer error masks to sticky bits, let's do this
5759 * irrespective of current ufshcd_state.
5761 hba->saved_err |= hba->errors;
5762 hba->saved_uic_err |= hba->uic_error;
5764 /* handle fatal errors only when link is functional */
5765 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5766 /* block commands from scsi mid-layer */
5767 ufshcd_scsi_block_requests(hba);
5769 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5771 /* dump controller state before resetting */
5772 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5773 bool pr_prdt = !!(hba->saved_err &
5774 SYSTEM_BUS_FATAL_ERROR);
5776 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5777 __func__, hba->saved_err,
5778 hba->saved_uic_err);
5780 ufshcd_print_host_regs(hba);
5781 ufshcd_print_pwr_info(hba);
5782 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5783 ufshcd_print_trs(hba, hba->outstanding_reqs,
5786 schedule_work(&hba->eh_work);
5788 retval |= IRQ_HANDLED;
5791 * if (!queue_eh_work) -
5792 * Other errors are either non-fatal where host recovers
5793 * itself without s/w intervention or errors that will be
5794 * handled by the SCSI core layer.
5800 struct ufs_hba *hba;
5801 unsigned long pending;
5805 static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
5807 struct ctm_info *const ci = priv;
5808 struct completion *c;
5810 WARN_ON_ONCE(reserved);
5811 if (test_bit(req->tag, &ci->pending))
5814 c = req->end_io_data;
5821 * ufshcd_tmc_handler - handle task management function completion
5822 * @hba: per adapter instance
5825 * IRQ_HANDLED - If interrupt is valid
5826 * IRQ_NONE - If invalid interrupt
5828 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
5830 struct request_queue *q = hba->tmf_queue;
5831 struct ctm_info ci = {
5833 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
5836 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
5837 return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
5841 * ufshcd_sl_intr - Interrupt service routine
5842 * @hba: per adapter instance
5843 * @intr_status: contains interrupts generated by the controller
5846 * IRQ_HANDLED - If interrupt is valid
5847 * IRQ_NONE - If invalid interrupt
5849 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5851 irqreturn_t retval = IRQ_NONE;
5853 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5855 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5856 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5859 retval |= ufshcd_check_errors(hba);
5861 if (intr_status & UFSHCD_UIC_MASK)
5862 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
5864 if (intr_status & UTP_TASK_REQ_COMPL)
5865 retval |= ufshcd_tmc_handler(hba);
5867 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5868 retval |= ufshcd_transfer_req_compl(hba);
5874 * ufshcd_intr - Main interrupt service routine
5876 * @__hba: pointer to adapter instance
5879 * IRQ_HANDLED - If interrupt is valid
5880 * IRQ_NONE - If invalid interrupt
5882 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5884 u32 intr_status, enabled_intr_status;
5885 irqreturn_t retval = IRQ_NONE;
5886 struct ufs_hba *hba = __hba;
5887 int retries = hba->nutrs;
5889 spin_lock(hba->host->host_lock);
5890 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5893 * There could be max of hba->nutrs reqs in flight and in worst case
5894 * if the reqs get finished 1 by 1 after the interrupt status is
5895 * read, make sure we handle them by checking the interrupt status
5896 * again in a loop until we process all of the reqs before returning.
5899 enabled_intr_status =
5900 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5902 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5903 if (enabled_intr_status)
5904 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
5906 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5907 } while (intr_status && --retries);
5909 if (retval == IRQ_NONE) {
5910 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
5911 __func__, intr_status);
5912 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
5915 spin_unlock(hba->host->host_lock);
5919 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5922 u32 mask = 1 << tag;
5923 unsigned long flags;
5925 if (!test_bit(tag, &hba->outstanding_tasks))
5928 spin_lock_irqsave(hba->host->host_lock, flags);
5929 ufshcd_utmrl_clear(hba, tag);
5930 spin_unlock_irqrestore(hba->host->host_lock, flags);
5932 /* poll for max. 1 sec to clear door bell register by h/w */
5933 err = ufshcd_wait_for_register(hba,
5934 REG_UTP_TASK_REQ_DOOR_BELL,
5935 mask, 0, 1000, 1000);
5940 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5941 struct utp_task_req_desc *treq, u8 tm_function)
5943 struct request_queue *q = hba->tmf_queue;
5944 struct Scsi_Host *host = hba->host;
5945 DECLARE_COMPLETION_ONSTACK(wait);
5946 struct request *req;
5947 unsigned long flags;
5948 int free_slot, task_tag, err;
5951 * Get free slot, sleep if slots are unavailable.
5952 * Even though we use wait_event() which sleeps indefinitely,
5953 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5955 req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
5956 req->end_io_data = &wait;
5957 free_slot = req->tag;
5958 WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
5959 ufshcd_hold(hba, false);
5961 spin_lock_irqsave(host->host_lock, flags);
5962 task_tag = hba->nutrs + free_slot;
5964 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5966 memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
5967 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5969 /* send command to the controller */
5970 __set_bit(free_slot, &hba->outstanding_tasks);
5972 /* Make sure descriptors are ready before ringing the task doorbell */
5975 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5976 /* Make sure that doorbell is committed immediately */
5979 spin_unlock_irqrestore(host->host_lock, flags);
5981 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5983 /* wait until the task management command is completed */
5984 err = wait_for_completion_io_timeout(&wait,
5985 msecs_to_jiffies(TM_CMD_TIMEOUT));
5988 * Make sure that ufshcd_compl_tm() does not trigger a
5991 req->end_io_data = NULL;
5992 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5993 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5994 __func__, tm_function);
5995 if (ufshcd_clear_tm_cmd(hba, free_slot))
5996 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5997 __func__, free_slot);
6001 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
6003 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6006 spin_lock_irqsave(hba->host->host_lock, flags);
6007 __clear_bit(free_slot, &hba->outstanding_tasks);
6008 spin_unlock_irqrestore(hba->host->host_lock, flags);
6010 blk_put_request(req);
6012 ufshcd_release(hba);
6017 * ufshcd_issue_tm_cmd - issues task management commands to controller
6018 * @hba: per adapter instance
6019 * @lun_id: LUN ID to which TM command is sent
6020 * @task_id: task ID to which the TM command is applicable
6021 * @tm_function: task management function opcode
6022 * @tm_response: task management service response return value
6024 * Returns non-zero value on error, zero on success.
6026 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6027 u8 tm_function, u8 *tm_response)
6029 struct utp_task_req_desc treq = { { 0 }, };
6032 /* Configure task request descriptor */
6033 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6034 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6036 /* Configure task request UPIU */
6037 treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6038 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6039 treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6042 * The host shall provide the same value for LUN field in the basic
6043 * header and for Input Parameter.
6045 treq.input_param1 = cpu_to_be32(lun_id);
6046 treq.input_param2 = cpu_to_be32(task_id);
6048 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6049 if (err == -ETIMEDOUT)
6052 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6053 if (ocs_value != OCS_SUCCESS)
6054 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6055 __func__, ocs_value);
6056 else if (tm_response)
6057 *tm_response = be32_to_cpu(treq.output_param1) &
6058 MASK_TM_SERVICE_RESP;
6063 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6064 * @hba: per-adapter instance
6065 * @req_upiu: upiu request
6066 * @rsp_upiu: upiu reply
6067 * @desc_buff: pointer to descriptor buffer, NULL if NA
6068 * @buff_len: descriptor size, 0 if NA
6069 * @cmd_type: specifies the type (NOP, Query...)
6070 * @desc_op: descriptor operation
6072 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6073 * Therefore, it "rides" the device management infrastructure: uses its tag and
6074 * tasks work queues.
6076 * Since there is only one available tag for device management commands,
6077 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6079 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6080 struct utp_upiu_req *req_upiu,
6081 struct utp_upiu_req *rsp_upiu,
6082 u8 *desc_buff, int *buff_len,
6083 enum dev_cmd_type cmd_type,
6084 enum query_opcode desc_op)
6086 struct request_queue *q = hba->cmd_queue;
6087 struct request *req;
6088 struct ufshcd_lrb *lrbp;
6091 struct completion wait;
6092 unsigned long flags;
6095 down_read(&hba->clk_scaling_lock);
6097 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6103 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6105 init_completion(&wait);
6106 lrbp = &hba->lrb[tag];
6110 lrbp->sense_bufflen = 0;
6111 lrbp->sense_buffer = NULL;
6112 lrbp->task_tag = tag;
6114 lrbp->intr_cmd = true;
6115 hba->dev_cmd.type = cmd_type;
6117 switch (hba->ufs_version) {
6118 case UFSHCI_VERSION_10:
6119 case UFSHCI_VERSION_11:
6120 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6123 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6127 /* update the task tag in the request upiu */
6128 req_upiu->header.dword_0 |= cpu_to_be32(tag);
6130 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6132 /* just copy the upiu request as it is */
6133 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6134 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6135 /* The Data Segment Area is optional depending upon the query
6136 * function value. for WRITE DESCRIPTOR, the data segment
6137 * follows right after the tsf.
6139 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6143 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6145 hba->dev_cmd.complete = &wait;
6147 /* Make sure descriptors are ready before ringing the doorbell */
6149 spin_lock_irqsave(hba->host->host_lock, flags);
6150 ufshcd_send_command(hba, tag);
6151 spin_unlock_irqrestore(hba->host->host_lock, flags);
6154 * ignore the returning value here - ufshcd_check_query_response is
6155 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6156 * read the response directly ignoring all errors.
6158 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6160 /* just copy the upiu response as it is */
6161 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6162 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6163 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6164 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6165 MASK_QUERY_DATA_SEG_LEN;
6167 if (*buff_len >= resp_len) {
6168 memcpy(desc_buff, descp, resp_len);
6169 *buff_len = resp_len;
6172 "%s: rsp size %d is bigger than buffer size %d",
6173 __func__, resp_len, *buff_len);
6179 blk_put_request(req);
6181 up_read(&hba->clk_scaling_lock);
6186 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6187 * @hba: per-adapter instance
6188 * @req_upiu: upiu request
6189 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6190 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
6191 * @desc_buff: pointer to descriptor buffer, NULL if NA
6192 * @buff_len: descriptor size, 0 if NA
6193 * @desc_op: descriptor operation
6195 * Supports UTP Transfer requests (nop and query), and UTP Task
6196 * Management requests.
6197 * It is up to the caller to fill the upiu conent properly, as it will
6198 * be copied without any further input validations.
6200 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6201 struct utp_upiu_req *req_upiu,
6202 struct utp_upiu_req *rsp_upiu,
6204 u8 *desc_buff, int *buff_len,
6205 enum query_opcode desc_op)
6208 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6209 struct utp_task_req_desc treq = { { 0 }, };
6211 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6214 case UPIU_TRANSACTION_NOP_OUT:
6215 cmd_type = DEV_CMD_TYPE_NOP;
6217 case UPIU_TRANSACTION_QUERY_REQ:
6218 ufshcd_hold(hba, false);
6219 mutex_lock(&hba->dev_cmd.lock);
6220 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6221 desc_buff, buff_len,
6223 mutex_unlock(&hba->dev_cmd.lock);
6224 ufshcd_release(hba);
6227 case UPIU_TRANSACTION_TASK_REQ:
6228 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6229 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6231 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6233 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6234 if (err == -ETIMEDOUT)
6237 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6238 if (ocs_value != OCS_SUCCESS) {
6239 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6244 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6257 * ufshcd_eh_device_reset_handler - device reset handler registered to
6259 * @cmd: SCSI command pointer
6261 * Returns SUCCESS/FAILED
6263 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6265 struct Scsi_Host *host;
6266 struct ufs_hba *hba;
6271 struct ufshcd_lrb *lrbp;
6272 unsigned long flags;
6274 host = cmd->device->host;
6275 hba = shost_priv(host);
6276 tag = cmd->request->tag;
6278 lrbp = &hba->lrb[tag];
6279 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6280 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6286 /* clear the commands that were pending for corresponding LUN */
6287 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6288 if (hba->lrb[pos].lun == lrbp->lun) {
6289 err = ufshcd_clear_cmd(hba, pos);
6294 spin_lock_irqsave(host->host_lock, flags);
6295 ufshcd_transfer_req_compl(hba);
6296 spin_unlock_irqrestore(host->host_lock, flags);
6299 hba->req_abort_count = 0;
6300 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
6304 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6310 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6312 struct ufshcd_lrb *lrbp;
6315 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6316 lrbp = &hba->lrb[tag];
6317 lrbp->req_abort_skip = true;
6322 * ufshcd_abort - abort a specific command
6323 * @cmd: SCSI command pointer
6325 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6326 * command, and in host controller by clearing the door-bell register. There can
6327 * be race between controller sending the command to the device while abort is
6328 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6329 * really issued and then try to abort it.
6331 * Returns SUCCESS/FAILED
6333 static int ufshcd_abort(struct scsi_cmnd *cmd)
6335 struct Scsi_Host *host;
6336 struct ufs_hba *hba;
6337 unsigned long flags;
6342 struct ufshcd_lrb *lrbp;
6345 host = cmd->device->host;
6346 hba = shost_priv(host);
6347 tag = cmd->request->tag;
6348 lrbp = &hba->lrb[tag];
6349 if (!ufshcd_valid_tag(hba, tag)) {
6351 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6352 __func__, tag, cmd, cmd->request);
6357 * Task abort to the device W-LUN is illegal. When this command
6358 * will fail, due to spec violation, scsi err handling next step
6359 * will be to send LU reset which, again, is a spec violation.
6360 * To avoid these unnecessary/illegal step we skip to the last error
6361 * handling stage: reset and restore.
6363 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6364 return ufshcd_eh_host_reset_handler(cmd);
6366 ufshcd_hold(hba, false);
6367 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6368 /* If command is already aborted/completed, return SUCCESS */
6369 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6371 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6372 __func__, tag, hba->outstanding_reqs, reg);
6376 if (!(reg & (1 << tag))) {
6378 "%s: cmd was completed, but without a notifying intr, tag = %d",
6382 /* Print Transfer Request of aborted task */
6383 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6386 * Print detailed info about aborted request.
6387 * As more than one request might get aborted at the same time,
6388 * print full information only for the first aborted request in order
6389 * to reduce repeated printouts. For other aborted requests only print
6392 scsi_print_command(hba->lrb[tag].cmd);
6393 if (!hba->req_abort_count) {
6394 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
6395 ufshcd_print_host_regs(hba);
6396 ufshcd_print_host_state(hba);
6397 ufshcd_print_pwr_info(hba);
6398 ufshcd_print_trs(hba, 1 << tag, true);
6400 ufshcd_print_trs(hba, 1 << tag, false);
6402 hba->req_abort_count++;
6404 /* Skip task abort in case previous aborts failed and report failure */
6405 if (lrbp->req_abort_skip) {
6410 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6411 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6412 UFS_QUERY_TASK, &resp);
6413 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6414 /* cmd pending in the device */
6415 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6418 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6420 * cmd not pending in the device, check if it is
6423 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6425 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6426 if (reg & (1 << tag)) {
6427 /* sleep for max. 200us to stabilize */
6428 usleep_range(100, 200);
6431 /* command completed already */
6432 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6437 "%s: no response from device. tag = %d, err %d\n",
6438 __func__, tag, err);
6440 err = resp; /* service response error */
6450 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6451 UFS_ABORT_TASK, &resp);
6452 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6454 err = resp; /* service response error */
6455 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6456 __func__, tag, err);
6461 err = ufshcd_clear_cmd(hba, tag);
6463 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6464 __func__, tag, err);
6468 scsi_dma_unmap(cmd);
6470 spin_lock_irqsave(host->host_lock, flags);
6471 ufshcd_outstanding_req_clear(hba, tag);
6472 hba->lrb[tag].cmd = NULL;
6473 spin_unlock_irqrestore(host->host_lock, flags);
6479 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6480 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6485 * This ufshcd_release() corresponds to the original scsi cmd that got
6486 * aborted here (as we won't get any IRQ for it).
6488 ufshcd_release(hba);
6493 * ufshcd_host_reset_and_restore - reset and restore host controller
6494 * @hba: per-adapter instance
6496 * Note that host controller reset may issue DME_RESET to
6497 * local and remote (device) Uni-Pro stack and the attributes
6498 * are reset to default state.
6500 * Returns zero on success, non-zero on failure
6502 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6505 unsigned long flags;
6508 * Stop the host controller and complete the requests
6511 ufshcd_hba_stop(hba);
6513 spin_lock_irqsave(hba->host->host_lock, flags);
6514 hba->silence_err_logs = true;
6515 ufshcd_complete_requests(hba);
6516 hba->silence_err_logs = false;
6517 spin_unlock_irqrestore(hba->host->host_lock, flags);
6519 /* scale up clocks to max frequency before full reinitialization */
6520 ufshcd_set_clk_freq(hba, true);
6522 err = ufshcd_hba_enable(hba);
6526 /* Establish the link again and restore the device */
6527 err = ufshcd_probe_hba(hba, false);
6529 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6533 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6534 ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
6539 * ufshcd_reset_and_restore - reset and re-initialize host/device
6540 * @hba: per-adapter instance
6542 * Reset and recover device, host and re-establish link. This
6543 * is helpful to recover the communication in fatal error conditions.
6545 * Returns zero on success, non-zero on failure
6547 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6550 int retries = MAX_HOST_RESET_RETRIES;
6553 /* Reset the attached device */
6554 ufshcd_vops_device_reset(hba);
6556 err = ufshcd_host_reset_and_restore(hba);
6557 } while (err && --retries);
6563 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6564 * @cmd: SCSI command pointer
6566 * Returns SUCCESS/FAILED
6568 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6571 unsigned long flags;
6572 struct ufs_hba *hba;
6574 hba = shost_priv(cmd->device->host);
6576 ufshcd_hold(hba, false);
6578 * Check if there is any race with fatal error handling.
6579 * If so, wait for it to complete. Even though fatal error
6580 * handling does reset and restore in some cases, don't assume
6581 * anything out of it. We are just avoiding race here.
6584 spin_lock_irqsave(hba->host->host_lock, flags);
6585 if (!(work_pending(&hba->eh_work) ||
6586 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6587 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6589 spin_unlock_irqrestore(hba->host->host_lock, flags);
6590 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6591 flush_work(&hba->eh_work);
6594 hba->ufshcd_state = UFSHCD_STATE_RESET;
6595 ufshcd_set_eh_in_progress(hba);
6596 spin_unlock_irqrestore(hba->host->host_lock, flags);
6598 err = ufshcd_reset_and_restore(hba);
6600 spin_lock_irqsave(hba->host->host_lock, flags);
6603 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6606 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6608 ufshcd_clear_eh_in_progress(hba);
6609 spin_unlock_irqrestore(hba->host->host_lock, flags);
6611 ufshcd_release(hba);
6616 * ufshcd_get_max_icc_level - calculate the ICC level
6617 * @sup_curr_uA: max. current supported by the regulator
6618 * @start_scan: row at the desc table to start scan from
6619 * @buff: power descriptor buffer
6621 * Returns calculated max ICC level for specific regulator
6623 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6630 for (i = start_scan; i >= 0; i--) {
6631 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6632 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6633 ATTR_ICC_LVL_UNIT_OFFSET;
6634 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6636 case UFSHCD_NANO_AMP:
6637 curr_uA = curr_uA / 1000;
6639 case UFSHCD_MILI_AMP:
6640 curr_uA = curr_uA * 1000;
6643 curr_uA = curr_uA * 1000 * 1000;
6645 case UFSHCD_MICRO_AMP:
6649 if (sup_curr_uA >= curr_uA)
6654 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6661 * ufshcd_calc_icc_level - calculate the max ICC level
6662 * In case regulators are not initialized we'll return 0
6663 * @hba: per-adapter instance
6664 * @desc_buf: power descriptor buffer to extract ICC levels from.
6665 * @len: length of desc_buff
6667 * Returns calculated ICC level
6669 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6670 u8 *desc_buf, int len)
6674 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6675 !hba->vreg_info.vccq2) {
6677 "%s: Regulator capability was not set, actvIccLevel=%d",
6678 __func__, icc_level);
6682 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6683 icc_level = ufshcd_get_max_icc_level(
6684 hba->vreg_info.vcc->max_uA,
6685 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6686 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6688 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6689 icc_level = ufshcd_get_max_icc_level(
6690 hba->vreg_info.vccq->max_uA,
6692 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6694 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
6695 icc_level = ufshcd_get_max_icc_level(
6696 hba->vreg_info.vccq2->max_uA,
6698 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6703 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
6706 int buff_len = hba->desc_size.pwr_desc;
6710 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6714 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
6715 desc_buf, buff_len);
6718 "%s: Failed reading power descriptor.len = %d ret = %d",
6719 __func__, buff_len, ret);
6723 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
6725 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
6727 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6728 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
6732 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6733 __func__, icc_level, ret);
6739 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
6741 scsi_autopm_get_device(sdev);
6742 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
6743 if (sdev->rpm_autosuspend)
6744 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
6745 RPM_AUTOSUSPEND_DELAY_MS);
6746 scsi_autopm_put_device(sdev);
6750 * ufshcd_scsi_add_wlus - Adds required W-LUs
6751 * @hba: per-adapter instance
6753 * UFS device specification requires the UFS devices to support 4 well known
6755 * "REPORT_LUNS" (address: 01h)
6756 * "UFS Device" (address: 50h)
6757 * "RPMB" (address: 44h)
6758 * "BOOT" (address: 30h)
6759 * UFS device's power management needs to be controlled by "POWER CONDITION"
6760 * field of SSU (START STOP UNIT) command. But this "power condition" field
6761 * will take effect only when its sent to "UFS device" well known logical unit
6762 * hence we require the scsi_device instance to represent this logical unit in
6763 * order for the UFS host driver to send the SSU command for power management.
6765 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6766 * Block) LU so user space process can control this LU. User space may also
6767 * want to have access to BOOT LU.
6769 * This function adds scsi device instances for each of all well known LUs
6770 * (except "REPORT LUNS" LU).
6772 * Returns zero on success (all required W-LUs are added successfully),
6773 * non-zero error value on failure (if failed to add any of the required W-LU).
6775 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6778 struct scsi_device *sdev_rpmb;
6779 struct scsi_device *sdev_boot;
6781 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6782 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6783 if (IS_ERR(hba->sdev_ufs_device)) {
6784 ret = PTR_ERR(hba->sdev_ufs_device);
6785 hba->sdev_ufs_device = NULL;
6788 ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
6789 scsi_device_put(hba->sdev_ufs_device);
6791 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6792 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6793 if (IS_ERR(sdev_rpmb)) {
6794 ret = PTR_ERR(sdev_rpmb);
6795 goto remove_sdev_ufs_device;
6797 ufshcd_blk_pm_runtime_init(sdev_rpmb);
6798 scsi_device_put(sdev_rpmb);
6800 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6801 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6802 if (IS_ERR(sdev_boot)) {
6803 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6805 ufshcd_blk_pm_runtime_init(sdev_boot);
6806 scsi_device_put(sdev_boot);
6810 remove_sdev_ufs_device:
6811 scsi_remove_device(hba->sdev_ufs_device);
6816 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
6819 u32 d_lu_wb_buf_alloc;
6821 if (!ufshcd_is_wb_allowed(hba))
6824 if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
6827 hba->dev_info.d_ext_ufs_feature_sup =
6828 get_unaligned_be32(desc_buf +
6829 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
6831 if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
6835 * WB may be supported but not configured while provisioning.
6836 * The spec says, in dedicated wb buffer mode,
6837 * a max of 1 lun would have wb buffer configured.
6838 * Now only shared buffer mode is supported.
6840 hba->dev_info.b_wb_buffer_type =
6841 desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
6843 hba->dev_info.b_presrv_uspc_en =
6844 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
6846 if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) {
6847 hba->dev_info.d_wb_alloc_units =
6848 get_unaligned_be32(desc_buf +
6849 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
6850 if (!hba->dev_info.d_wb_alloc_units)
6853 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
6854 d_lu_wb_buf_alloc = 0;
6855 ufshcd_read_unit_desc_param(hba,
6857 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
6858 (u8 *)&d_lu_wb_buf_alloc,
6859 sizeof(d_lu_wb_buf_alloc));
6860 if (d_lu_wb_buf_alloc) {
6861 hba->dev_info.wb_dedicated_lu = lun;
6866 if (!d_lu_wb_buf_alloc)
6872 hba->caps &= ~UFSHCD_CAP_WB_EN;
6875 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
6877 struct ufs_dev_fix *f;
6878 struct ufs_dev_info *dev_info = &hba->dev_info;
6883 for (f = fixups; f->quirk; f++) {
6884 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
6885 f->wmanufacturerid == UFS_ANY_VENDOR) &&
6886 ((dev_info->model &&
6887 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
6888 !strcmp(f->model, UFS_ANY_MODEL)))
6889 hba->dev_quirks |= f->quirk;
6892 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
6894 static void ufs_fixup_device_setup(struct ufs_hba *hba)
6896 /* fix by general quirk table */
6897 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
6899 /* allow vendors to fix quirks */
6900 ufshcd_vops_fixup_dev_quirks(hba);
6903 static int ufs_get_device_desc(struct ufs_hba *hba)
6909 struct ufs_dev_info *dev_info = &hba->dev_info;
6911 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6912 QUERY_DESC_MAX_SIZE + 1);
6913 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6919 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
6920 hba->desc_size.dev_desc);
6922 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6928 * getting vendor (manufacturerID) and Bank Index in big endian
6931 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6932 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6934 /* getting Specification Version in big endian format */
6935 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
6936 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
6938 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6940 err = ufshcd_read_string_desc(hba, model_index,
6941 &dev_info->model, SD_ASCII_STD);
6943 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6948 ufs_fixup_device_setup(hba);
6951 * Probe WB only for UFS-3.1 devices or UFS devices with quirk
6952 * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
6954 if (dev_info->wspecversion >= 0x310 ||
6955 dev_info->wspecversion == 0x220 ||
6956 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
6957 ufshcd_wb_probe(hba, desc_buf);
6960 * ufshcd_read_string_desc returns size of the string
6961 * reset the error value
6970 static void ufs_put_device_desc(struct ufs_hba *hba)
6972 struct ufs_dev_info *dev_info = &hba->dev_info;
6974 kfree(dev_info->model);
6975 dev_info->model = NULL;
6979 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6980 * @hba: per-adapter instance
6982 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6983 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6984 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6985 * the hibern8 exit latency.
6987 * Returns zero on success, non-zero error value on failure.
6989 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6992 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6994 ret = ufshcd_dme_peer_get(hba,
6996 RX_MIN_ACTIVATETIME_CAPABILITY,
6997 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6998 &peer_rx_min_activatetime);
7002 /* make sure proper unit conversion is applied */
7003 tuned_pa_tactivate =
7004 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7005 / PA_TACTIVATE_TIME_UNIT_US);
7006 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7007 tuned_pa_tactivate);
7014 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7015 * @hba: per-adapter instance
7017 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7018 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7019 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7020 * This optimal value can help reduce the hibern8 exit latency.
7022 * Returns zero on success, non-zero error value on failure.
7024 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7027 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7028 u32 max_hibern8_time, tuned_pa_hibern8time;
7030 ret = ufshcd_dme_get(hba,
7031 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7032 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7033 &local_tx_hibern8_time_cap);
7037 ret = ufshcd_dme_peer_get(hba,
7038 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7039 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7040 &peer_rx_hibern8_time_cap);
7044 max_hibern8_time = max(local_tx_hibern8_time_cap,
7045 peer_rx_hibern8_time_cap);
7046 /* make sure proper unit conversion is applied */
7047 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7048 / PA_HIBERN8_TIME_UNIT_US);
7049 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7050 tuned_pa_hibern8time);
7056 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7057 * less than device PA_TACTIVATE time.
7058 * @hba: per-adapter instance
7060 * Some UFS devices require host PA_TACTIVATE to be lower than device
7061 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7064 * Returns zero on success, non-zero error value on failure.
7066 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7069 u32 granularity, peer_granularity;
7070 u32 pa_tactivate, peer_pa_tactivate;
7071 u32 pa_tactivate_us, peer_pa_tactivate_us;
7072 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7074 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7079 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7084 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7085 (granularity > PA_GRANULARITY_MAX_VAL)) {
7086 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7087 __func__, granularity);
7091 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7092 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7093 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7094 __func__, peer_granularity);
7098 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7102 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7103 &peer_pa_tactivate);
7107 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7108 peer_pa_tactivate_us = peer_pa_tactivate *
7109 gran_to_us_table[peer_granularity - 1];
7111 if (pa_tactivate_us > peer_pa_tactivate_us) {
7112 u32 new_peer_pa_tactivate;
7114 new_peer_pa_tactivate = pa_tactivate_us /
7115 gran_to_us_table[peer_granularity - 1];
7116 new_peer_pa_tactivate++;
7117 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7118 new_peer_pa_tactivate);
7125 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7127 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7128 ufshcd_tune_pa_tactivate(hba);
7129 ufshcd_tune_pa_hibern8time(hba);
7132 ufshcd_vops_apply_dev_quirks(hba);
7134 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7135 /* set 1ms timeout for PA_TACTIVATE */
7136 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7138 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7139 ufshcd_quirk_tune_host_pa_tactivate(hba);
7142 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7144 hba->ufs_stats.hibern8_exit_cnt = 0;
7145 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7146 hba->req_abort_count = 0;
7149 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
7153 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
7154 &hba->desc_size.dev_desc);
7156 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
7158 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
7159 &hba->desc_size.pwr_desc);
7161 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
7163 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
7164 &hba->desc_size.interc_desc);
7166 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
7168 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
7169 &hba->desc_size.conf_desc);
7171 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
7173 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
7174 &hba->desc_size.unit_desc);
7176 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
7178 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
7179 &hba->desc_size.geom_desc);
7181 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
7183 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
7184 &hba->desc_size.hlth_desc);
7186 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
7189 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7195 buff_len = hba->desc_size.geom_desc;
7196 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7202 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
7203 desc_buf, buff_len);
7205 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7210 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7211 hba->dev_info.max_lu_supported = 32;
7212 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7213 hba->dev_info.max_lu_supported = 8;
7220 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7221 {19200000, REF_CLK_FREQ_19_2_MHZ},
7222 {26000000, REF_CLK_FREQ_26_MHZ},
7223 {38400000, REF_CLK_FREQ_38_4_MHZ},
7224 {52000000, REF_CLK_FREQ_52_MHZ},
7225 {0, REF_CLK_FREQ_INVAL},
7228 static enum ufs_ref_clk_freq
7229 ufs_get_bref_clk_from_hz(unsigned long freq)
7233 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7234 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7235 return ufs_ref_clk_freqs[i].val;
7237 return REF_CLK_FREQ_INVAL;
7240 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7244 freq = clk_get_rate(refclk);
7246 hba->dev_ref_clk_freq =
7247 ufs_get_bref_clk_from_hz(freq);
7249 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7251 "invalid ref_clk setting = %ld\n", freq);
7254 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7258 u32 freq = hba->dev_ref_clk_freq;
7260 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7261 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7264 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7269 if (ref_clk == freq)
7270 goto out; /* nothing to update */
7272 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7273 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7276 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7277 ufs_ref_clk_freqs[freq].freq_hz);
7281 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7282 ufs_ref_clk_freqs[freq].freq_hz);
7288 static int ufshcd_device_params_init(struct ufs_hba *hba)
7293 /* Init check for device descriptor sizes */
7294 ufshcd_init_desc_sizes(hba);
7296 /* Init UFS geometry descriptor related parameters */
7297 ret = ufshcd_device_geo_params_init(hba);
7301 /* Check and apply UFS device quirks */
7302 ret = ufs_get_device_desc(hba);
7304 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7309 ufshcd_get_ref_clk_gating_wait(hba);
7311 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7312 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7313 hba->dev_info.f_power_on_wp_en = flag;
7315 /* Probe maximum power mode co-supported by both UFS host and device */
7316 if (ufshcd_get_max_pwr_mode(hba))
7318 "%s: Failed getting max supported power mode\n",
7325 * ufshcd_add_lus - probe and add UFS logical units
7326 * @hba: per-adapter instance
7328 static int ufshcd_add_lus(struct ufs_hba *hba)
7332 /* Add required well known logical units to scsi mid layer */
7333 ret = ufshcd_scsi_add_wlus(hba);
7337 /* Initialize devfreq after UFS device is detected */
7338 if (ufshcd_is_clkscaling_supported(hba)) {
7339 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7341 sizeof(struct ufs_pa_layer_attr));
7342 hba->clk_scaling.saved_pwr_info.is_valid = true;
7343 if (!hba->devfreq) {
7344 ret = ufshcd_devfreq_init(hba);
7349 hba->clk_scaling.is_allowed = true;
7353 scsi_scan_host(hba->host);
7354 pm_runtime_put_sync(hba->dev);
7361 * ufshcd_probe_hba - probe hba to detect device and initialize
7362 * @hba: per-adapter instance
7363 * @async: asynchronous execution or not
7365 * Execute link-startup and verify device initialization
7367 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7370 ktime_t start = ktime_get();
7372 ret = ufshcd_link_startup(hba);
7376 /* Debug counters initialization */
7377 ufshcd_clear_dbg_ufs_stats(hba);
7379 /* UniPro link is active now */
7380 ufshcd_set_link_active(hba);
7382 /* Verify device initialization by sending NOP OUT UPIU */
7383 ret = ufshcd_verify_dev_init(hba);
7387 /* Initiate UFS initialization, and waiting until completion */
7388 ret = ufshcd_complete_dev_init(hba);
7393 * Initialize UFS device parameters used by driver, these
7394 * parameters are associated with UFS descriptors.
7397 ret = ufshcd_device_params_init(hba);
7402 ufshcd_tune_unipro_params(hba);
7404 /* UFS device is also active now */
7405 ufshcd_set_ufs_dev_active(hba);
7406 ufshcd_force_reset_auto_bkops(hba);
7407 hba->wlun_dev_clr_ua = true;
7409 /* Gear up to HS gear if supported */
7410 if (hba->max_pwr_info.is_valid) {
7412 * Set the right value to bRefClkFreq before attempting to
7413 * switch to HS gears.
7415 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7416 ufshcd_set_dev_ref_clk(hba);
7417 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7419 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7423 ufshcd_print_pwr_info(hba);
7427 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7428 * and for removable UFS card as well, hence always set the parameter.
7429 * Note: Error handler may issue the device reset hence resetting
7430 * bActiveICCLevel as well so it is always safe to set this here.
7432 ufshcd_set_active_icc_lvl(hba);
7434 /* set the state as operational after switching to desired gear */
7435 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7437 ufshcd_wb_config(hba);
7438 /* Enable Auto-Hibernate if configured */
7439 ufshcd_auto_hibern8_enable(hba);
7443 trace_ufshcd_init(dev_name(hba->dev), ret,
7444 ktime_to_us(ktime_sub(ktime_get(), start)),
7445 hba->curr_dev_pwr_mode, hba->uic_link_state);
7450 * ufshcd_async_scan - asynchronous execution for probing hba
7451 * @data: data pointer to pass to this function
7452 * @cookie: cookie data
7454 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7456 struct ufs_hba *hba = (struct ufs_hba *)data;
7459 /* Initialize hba, detect and initialize UFS device */
7460 ret = ufshcd_probe_hba(hba, true);
7464 /* Probe and add UFS logical units */
7465 ret = ufshcd_add_lus(hba);
7468 * If we failed to initialize the device or the device is not
7469 * present, turn off the power/clocks etc.
7472 pm_runtime_put_sync(hba->dev);
7473 ufshcd_exit_clk_scaling(hba);
7474 ufshcd_hba_exit(hba);
7478 static const struct attribute_group *ufshcd_driver_groups[] = {
7479 &ufs_sysfs_unit_descriptor_group,
7480 &ufs_sysfs_lun_attributes_group,
7484 static struct ufs_hba_variant_params ufs_hba_vps = {
7485 .hba_enable_delay_us = 1000,
7486 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
7487 .devfreq_profile.polling_ms = 100,
7488 .devfreq_profile.target = ufshcd_devfreq_target,
7489 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
7490 .ondemand_data.upthreshold = 70,
7491 .ondemand_data.downdifferential = 5,
7494 static struct scsi_host_template ufshcd_driver_template = {
7495 .module = THIS_MODULE,
7497 .proc_name = UFSHCD,
7498 .queuecommand = ufshcd_queuecommand,
7499 .slave_alloc = ufshcd_slave_alloc,
7500 .slave_configure = ufshcd_slave_configure,
7501 .slave_destroy = ufshcd_slave_destroy,
7502 .change_queue_depth = ufshcd_change_queue_depth,
7503 .eh_abort_handler = ufshcd_abort,
7504 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7505 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7507 .sg_tablesize = SG_ALL,
7508 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7509 .can_queue = UFSHCD_CAN_QUEUE,
7510 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
7511 .max_host_blocked = 1,
7512 .track_queue_depth = 1,
7513 .sdev_groups = ufshcd_driver_groups,
7514 .dma_boundary = PAGE_SIZE - 1,
7515 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
7518 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7527 * "set_load" operation shall be required on those regulators
7528 * which specifically configured current limitation. Otherwise
7529 * zero max_uA may cause unexpected behavior when regulator is
7530 * enabled or set as high power mode.
7535 ret = regulator_set_load(vreg->reg, ua);
7537 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7538 __func__, vreg->name, ua, ret);
7544 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7545 struct ufs_vreg *vreg)
7547 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7550 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7551 struct ufs_vreg *vreg)
7556 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7559 static int ufshcd_config_vreg(struct device *dev,
7560 struct ufs_vreg *vreg, bool on)
7563 struct regulator *reg;
7565 int min_uV, uA_load;
7572 if (regulator_count_voltages(reg) > 0) {
7573 uA_load = on ? vreg->max_uA : 0;
7574 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7578 if (vreg->min_uV && vreg->max_uV) {
7579 min_uV = on ? vreg->min_uV : 0;
7580 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7583 "%s: %s set voltage failed, err=%d\n",
7584 __func__, name, ret);
7593 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7597 if (!vreg || vreg->enabled)
7600 ret = ufshcd_config_vreg(dev, vreg, true);
7602 ret = regulator_enable(vreg->reg);
7605 vreg->enabled = true;
7607 dev_err(dev, "%s: %s enable failed, err=%d\n",
7608 __func__, vreg->name, ret);
7613 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7617 if (!vreg || !vreg->enabled)
7620 ret = regulator_disable(vreg->reg);
7623 /* ignore errors on applying disable config */
7624 ufshcd_config_vreg(dev, vreg, false);
7625 vreg->enabled = false;
7627 dev_err(dev, "%s: %s disable failed, err=%d\n",
7628 __func__, vreg->name, ret);
7634 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7637 struct device *dev = hba->dev;
7638 struct ufs_vreg_info *info = &hba->vreg_info;
7640 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7644 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7648 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7654 ufshcd_toggle_vreg(dev, info->vccq2, false);
7655 ufshcd_toggle_vreg(dev, info->vccq, false);
7656 ufshcd_toggle_vreg(dev, info->vcc, false);
7661 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7663 struct ufs_vreg_info *info = &hba->vreg_info;
7665 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7668 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7675 vreg->reg = devm_regulator_get(dev, vreg->name);
7676 if (IS_ERR(vreg->reg)) {
7677 ret = PTR_ERR(vreg->reg);
7678 dev_err(dev, "%s: %s get failed, err=%d\n",
7679 __func__, vreg->name, ret);
7685 static int ufshcd_init_vreg(struct ufs_hba *hba)
7688 struct device *dev = hba->dev;
7689 struct ufs_vreg_info *info = &hba->vreg_info;
7691 ret = ufshcd_get_vreg(dev, info->vcc);
7695 ret = ufshcd_get_vreg(dev, info->vccq);
7699 ret = ufshcd_get_vreg(dev, info->vccq2);
7704 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7706 struct ufs_vreg_info *info = &hba->vreg_info;
7709 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7714 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7718 struct ufs_clk_info *clki;
7719 struct list_head *head = &hba->clk_list_head;
7720 unsigned long flags;
7721 ktime_t start = ktime_get();
7722 bool clk_state_changed = false;
7724 if (list_empty(head))
7727 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7731 list_for_each_entry(clki, head, list) {
7732 if (!IS_ERR_OR_NULL(clki->clk)) {
7733 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7736 clk_state_changed = on ^ clki->enabled;
7737 if (on && !clki->enabled) {
7738 ret = clk_prepare_enable(clki->clk);
7740 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7741 __func__, clki->name, ret);
7744 } else if (!on && clki->enabled) {
7745 clk_disable_unprepare(clki->clk);
7748 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7749 clki->name, on ? "en" : "dis");
7753 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7759 list_for_each_entry(clki, head, list) {
7760 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7761 clk_disable_unprepare(clki->clk);
7763 } else if (!ret && on) {
7764 spin_lock_irqsave(hba->host->host_lock, flags);
7765 hba->clk_gating.state = CLKS_ON;
7766 trace_ufshcd_clk_gating(dev_name(hba->dev),
7767 hba->clk_gating.state);
7768 spin_unlock_irqrestore(hba->host->host_lock, flags);
7771 if (clk_state_changed)
7772 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7773 (on ? "on" : "off"),
7774 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7778 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7780 return __ufshcd_setup_clocks(hba, on, false);
7783 static int ufshcd_init_clocks(struct ufs_hba *hba)
7786 struct ufs_clk_info *clki;
7787 struct device *dev = hba->dev;
7788 struct list_head *head = &hba->clk_list_head;
7790 if (list_empty(head))
7793 list_for_each_entry(clki, head, list) {
7797 clki->clk = devm_clk_get(dev, clki->name);
7798 if (IS_ERR(clki->clk)) {
7799 ret = PTR_ERR(clki->clk);
7800 dev_err(dev, "%s: %s clk get failed, %d\n",
7801 __func__, clki->name, ret);
7806 * Parse device ref clk freq as per device tree "ref_clk".
7807 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7808 * in ufshcd_alloc_host().
7810 if (!strcmp(clki->name, "ref_clk"))
7811 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7813 if (clki->max_freq) {
7814 ret = clk_set_rate(clki->clk, clki->max_freq);
7816 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7817 __func__, clki->name,
7818 clki->max_freq, ret);
7821 clki->curr_freq = clki->max_freq;
7823 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7824 clki->name, clk_get_rate(clki->clk));
7830 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7837 err = ufshcd_vops_init(hba);
7841 err = ufshcd_vops_setup_regulators(hba, true);
7848 ufshcd_vops_exit(hba);
7851 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7852 __func__, ufshcd_get_var_name(hba), err);
7856 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7861 ufshcd_vops_setup_regulators(hba, false);
7863 ufshcd_vops_exit(hba);
7866 static int ufshcd_hba_init(struct ufs_hba *hba)
7871 * Handle host controller power separately from the UFS device power
7872 * rails as it will help controlling the UFS host controller power
7873 * collapse easily which is different than UFS device power collapse.
7874 * Also, enable the host controller power before we go ahead with rest
7875 * of the initialization here.
7877 err = ufshcd_init_hba_vreg(hba);
7881 err = ufshcd_setup_hba_vreg(hba, true);
7885 err = ufshcd_init_clocks(hba);
7887 goto out_disable_hba_vreg;
7889 err = ufshcd_setup_clocks(hba, true);
7891 goto out_disable_hba_vreg;
7893 err = ufshcd_init_vreg(hba);
7895 goto out_disable_clks;
7897 err = ufshcd_setup_vreg(hba, true);
7899 goto out_disable_clks;
7901 err = ufshcd_variant_hba_init(hba);
7903 goto out_disable_vreg;
7905 hba->is_powered = true;
7909 ufshcd_setup_vreg(hba, false);
7911 ufshcd_setup_clocks(hba, false);
7912 out_disable_hba_vreg:
7913 ufshcd_setup_hba_vreg(hba, false);
7918 static void ufshcd_hba_exit(struct ufs_hba *hba)
7920 if (hba->is_powered) {
7921 ufshcd_variant_hba_exit(hba);
7922 ufshcd_setup_vreg(hba, false);
7923 ufshcd_suspend_clkscaling(hba);
7924 if (ufshcd_is_clkscaling_supported(hba))
7926 ufshcd_suspend_clkscaling(hba);
7927 ufshcd_setup_clocks(hba, false);
7928 ufshcd_setup_hba_vreg(hba, false);
7929 hba->is_powered = false;
7930 ufs_put_device_desc(hba);
7935 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7937 unsigned char cmd[6] = {REQUEST_SENSE,
7946 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
7952 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7953 UFS_SENSE_SIZE, NULL, NULL,
7954 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7956 pr_err("%s: failed with err %d\n", __func__, ret);
7964 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7966 * @hba: per adapter instance
7967 * @pwr_mode: device power mode to set
7969 * Returns 0 if requested power mode is set successfully
7970 * Returns non-zero if failed to set the requested power mode
7972 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7973 enum ufs_dev_pwr_mode pwr_mode)
7975 unsigned char cmd[6] = { START_STOP };
7976 struct scsi_sense_hdr sshdr;
7977 struct scsi_device *sdp;
7978 unsigned long flags;
7981 spin_lock_irqsave(hba->host->host_lock, flags);
7982 sdp = hba->sdev_ufs_device;
7984 ret = scsi_device_get(sdp);
7985 if (!ret && !scsi_device_online(sdp)) {
7987 scsi_device_put(sdp);
7992 spin_unlock_irqrestore(hba->host->host_lock, flags);
7998 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7999 * handling, which would wait for host to be resumed. Since we know
8000 * we are functional while we are here, skip host resume in error
8003 hba->host->eh_noresume = 1;
8004 if (hba->wlun_dev_clr_ua) {
8005 ret = ufshcd_send_request_sense(hba, sdp);
8008 /* Unit attention condition is cleared now */
8009 hba->wlun_dev_clr_ua = false;
8012 cmd[4] = pwr_mode << 4;
8015 * Current function would be generally called from the power management
8016 * callbacks hence set the RQF_PM flag so that it doesn't resume the
8017 * already suspended childs.
8019 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8020 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8022 sdev_printk(KERN_WARNING, sdp,
8023 "START_STOP failed for power mode: %d, result %x\n",
8025 if (driver_byte(ret) == DRIVER_SENSE)
8026 scsi_print_sense_hdr(sdp, NULL, &sshdr);
8030 hba->curr_dev_pwr_mode = pwr_mode;
8032 scsi_device_put(sdp);
8033 hba->host->eh_noresume = 0;
8037 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8038 enum uic_link_state req_link_state,
8039 int check_for_bkops)
8043 if (req_link_state == hba->uic_link_state)
8046 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8047 ret = ufshcd_uic_hibern8_enter(hba);
8049 ufshcd_set_link_hibern8(hba);
8054 * If autobkops is enabled, link can't be turned off because
8055 * turning off the link would also turn off the device.
8057 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8058 (!check_for_bkops || !hba->auto_bkops_enabled)) {
8060 * Let's make sure that link is in low power mode, we are doing
8061 * this currently by putting the link in Hibern8. Otherway to
8062 * put the link in low power mode is to send the DME end point
8063 * to device and then send the DME reset command to local
8064 * unipro. But putting the link in hibern8 is much faster.
8066 ret = ufshcd_uic_hibern8_enter(hba);
8070 * Change controller state to "reset state" which
8071 * should also put the link in off/reset state
8073 ufshcd_hba_stop(hba);
8075 * TODO: Check if we need any delay to make sure that
8076 * controller is reset
8078 ufshcd_set_link_off(hba);
8085 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8088 * It seems some UFS devices may keep drawing more than sleep current
8089 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8090 * To avoid this situation, add 2ms delay before putting these UFS
8091 * rails in LPM mode.
8093 if (!ufshcd_is_link_active(hba) &&
8094 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8095 usleep_range(2000, 2100);
8098 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8101 * If UFS device and link is in OFF state, all power supplies (VCC,
8102 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8103 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8104 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8106 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8107 * in low power state which would save some power.
8109 * If Write Booster is enabled and the device needs to flush the WB
8110 * buffer OR if bkops status is urgent for WB, keep Vcc on.
8112 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8113 !hba->dev_info.is_lu_power_on_wp) {
8114 ufshcd_setup_vreg(hba, false);
8115 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8116 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8117 if (!ufshcd_is_link_active(hba)) {
8118 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8119 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8124 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8128 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8129 !hba->dev_info.is_lu_power_on_wp) {
8130 ret = ufshcd_setup_vreg(hba, true);
8131 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8132 if (!ret && !ufshcd_is_link_active(hba)) {
8133 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8136 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8140 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8145 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8147 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8152 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8154 if (ufshcd_is_link_off(hba))
8155 ufshcd_setup_hba_vreg(hba, false);
8158 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8160 if (ufshcd_is_link_off(hba))
8161 ufshcd_setup_hba_vreg(hba, true);
8165 * ufshcd_suspend - helper function for suspend operations
8166 * @hba: per adapter instance
8167 * @pm_op: desired low power operation type
8169 * This function will try to put the UFS device and link into low power
8170 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8171 * (System PM level).
8173 * If this function is called during shutdown, it will make sure that
8174 * both UFS device and UFS link is powered off.
8176 * NOTE: UFS device & link must be active before we enter in this function.
8178 * Returns 0 for success and non-zero for failure
8180 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8183 enum ufs_pm_level pm_lvl;
8184 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8185 enum uic_link_state req_link_state;
8187 hba->pm_op_in_progress = 1;
8188 if (!ufshcd_is_shutdown_pm(pm_op)) {
8189 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8190 hba->rpm_lvl : hba->spm_lvl;
8191 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8192 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8194 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8195 req_link_state = UIC_LINK_OFF_STATE;
8199 * If we can't transition into any of the low power modes
8200 * just gate the clocks.
8202 ufshcd_hold(hba, false);
8203 hba->clk_gating.is_suspended = true;
8205 if (hba->clk_scaling.is_allowed) {
8206 cancel_work_sync(&hba->clk_scaling.suspend_work);
8207 cancel_work_sync(&hba->clk_scaling.resume_work);
8208 ufshcd_suspend_clkscaling(hba);
8211 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8212 req_link_state == UIC_LINK_ACTIVE_STATE) {
8216 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8217 (req_link_state == hba->uic_link_state))
8220 /* UFS device & link must be active before we enter in this function */
8221 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8226 if (ufshcd_is_runtime_pm(pm_op)) {
8227 if (ufshcd_can_autobkops_during_suspend(hba)) {
8229 * The device is idle with no requests in the queue,
8230 * allow background operations if bkops status shows
8231 * that performance might be impacted.
8233 ret = ufshcd_urgent_bkops(hba);
8237 /* make sure that auto bkops is disabled */
8238 ufshcd_disable_auto_bkops(hba);
8241 * If device needs to do BKOP or WB buffer flush during
8242 * Hibern8, keep device power mode as "active power mode"
8245 hba->dev_info.b_rpm_dev_flush_capable =
8246 hba->auto_bkops_enabled ||
8247 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8248 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8249 ufshcd_is_auto_hibern8_enabled(hba))) &&
8250 ufshcd_wb_need_flush(hba));
8253 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8254 if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8255 !ufshcd_is_runtime_pm(pm_op)) {
8256 /* ensure that bkops is disabled */
8257 ufshcd_disable_auto_bkops(hba);
8260 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8261 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8267 flush_work(&hba->eeh_work);
8268 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8270 goto set_dev_active;
8272 ufshcd_vreg_set_lpm(hba);
8276 * Call vendor specific suspend callback. As these callbacks may access
8277 * vendor specific host controller register space call them before the
8278 * host clocks are ON.
8280 ret = ufshcd_vops_suspend(hba, pm_op);
8282 goto set_link_active;
8284 * Disable the host irq as host controller as there won't be any
8285 * host controller transaction expected till resume.
8287 ufshcd_disable_irq(hba);
8289 if (!ufshcd_is_link_active(hba))
8290 ufshcd_setup_clocks(hba, false);
8292 /* If link is active, device ref_clk can't be switched off */
8293 __ufshcd_setup_clocks(hba, false, true);
8295 hba->clk_gating.state = CLKS_OFF;
8296 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
8298 /* Put the host controller in low power mode if possible */
8299 ufshcd_hba_vreg_set_lpm(hba);
8303 if (hba->clk_scaling.is_allowed)
8304 ufshcd_resume_clkscaling(hba);
8305 ufshcd_vreg_set_hpm(hba);
8306 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8307 ufshcd_set_link_active(hba);
8308 else if (ufshcd_is_link_off(hba))
8309 ufshcd_host_reset_and_restore(hba);
8311 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8312 ufshcd_disable_auto_bkops(hba);
8314 if (hba->clk_scaling.is_allowed)
8315 ufshcd_resume_clkscaling(hba);
8316 hba->clk_gating.is_suspended = false;
8317 hba->dev_info.b_rpm_dev_flush_capable = false;
8318 ufshcd_release(hba);
8320 if (hba->dev_info.b_rpm_dev_flush_capable) {
8321 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8322 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8325 hba->pm_op_in_progress = 0;
8328 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
8333 * ufshcd_resume - helper function for resume operations
8334 * @hba: per adapter instance
8335 * @pm_op: runtime PM or system PM
8337 * This function basically brings the UFS device, UniPro link and controller
8340 * Returns 0 for success and non-zero for failure
8342 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8345 enum uic_link_state old_link_state;
8347 hba->pm_op_in_progress = 1;
8348 old_link_state = hba->uic_link_state;
8350 ufshcd_hba_vreg_set_hpm(hba);
8351 /* Make sure clocks are enabled before accessing controller */
8352 ret = ufshcd_setup_clocks(hba, true);
8356 /* enable the host irq as host controller would be active soon */
8357 ufshcd_enable_irq(hba);
8359 ret = ufshcd_vreg_set_hpm(hba);
8361 goto disable_irq_and_vops_clks;
8364 * Call vendor specific resume callback. As these callbacks may access
8365 * vendor specific host controller register space call them when the
8366 * host clocks are ON.
8368 ret = ufshcd_vops_resume(hba, pm_op);
8372 if (ufshcd_is_link_hibern8(hba)) {
8373 ret = ufshcd_uic_hibern8_exit(hba);
8375 ufshcd_set_link_active(hba);
8377 goto vendor_suspend;
8378 } else if (ufshcd_is_link_off(hba)) {
8380 * A full initialization of the host and the device is
8381 * required since the link was put to off during suspend.
8383 ret = ufshcd_reset_and_restore(hba);
8385 * ufshcd_reset_and_restore() should have already
8386 * set the link state as active
8388 if (ret || !ufshcd_is_link_active(hba))
8389 goto vendor_suspend;
8392 if (!ufshcd_is_ufs_dev_active(hba)) {
8393 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8395 goto set_old_link_state;
8398 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8399 ufshcd_enable_auto_bkops(hba);
8402 * If BKOPs operations are urgently needed at this moment then
8403 * keep auto-bkops enabled or else disable it.
8405 ufshcd_urgent_bkops(hba);
8407 hba->clk_gating.is_suspended = false;
8409 if (hba->clk_scaling.is_allowed)
8410 ufshcd_resume_clkscaling(hba);
8412 /* Enable Auto-Hibernate if configured */
8413 ufshcd_auto_hibern8_enable(hba);
8415 if (hba->dev_info.b_rpm_dev_flush_capable) {
8416 hba->dev_info.b_rpm_dev_flush_capable = false;
8417 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8420 /* Schedule clock gating in case of no access to UFS device yet */
8421 ufshcd_release(hba);
8426 ufshcd_link_state_transition(hba, old_link_state, 0);
8428 ufshcd_vops_suspend(hba, pm_op);
8430 ufshcd_vreg_set_lpm(hba);
8431 disable_irq_and_vops_clks:
8432 ufshcd_disable_irq(hba);
8433 if (hba->clk_scaling.is_allowed)
8434 ufshcd_suspend_clkscaling(hba);
8435 ufshcd_setup_clocks(hba, false);
8437 hba->pm_op_in_progress = 0;
8439 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
8444 * ufshcd_system_suspend - system suspend routine
8445 * @hba: per adapter instance
8447 * Check the description of ufshcd_suspend() function for more details.
8449 * Returns 0 for success and non-zero for failure
8451 int ufshcd_system_suspend(struct ufs_hba *hba)
8454 ktime_t start = ktime_get();
8456 if (!hba || !hba->is_powered)
8459 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8460 hba->curr_dev_pwr_mode) &&
8461 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8462 hba->uic_link_state))
8465 if (pm_runtime_suspended(hba->dev)) {
8467 * UFS device and/or UFS link low power states during runtime
8468 * suspend seems to be different than what is expected during
8469 * system suspend. Hence runtime resume the devic & link and
8470 * let the system suspend low power states to take effect.
8471 * TODO: If resume takes longer time, we might have optimize
8472 * it in future by not resuming everything if possible.
8474 ret = ufshcd_runtime_resume(hba);
8479 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8481 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8482 ktime_to_us(ktime_sub(ktime_get(), start)),
8483 hba->curr_dev_pwr_mode, hba->uic_link_state);
8485 hba->is_sys_suspended = true;
8488 EXPORT_SYMBOL(ufshcd_system_suspend);
8491 * ufshcd_system_resume - system resume routine
8492 * @hba: per adapter instance
8494 * Returns 0 for success and non-zero for failure
8497 int ufshcd_system_resume(struct ufs_hba *hba)
8500 ktime_t start = ktime_get();
8505 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8507 * Let the runtime resume take care of resuming
8508 * if runtime suspended.
8512 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8514 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8515 ktime_to_us(ktime_sub(ktime_get(), start)),
8516 hba->curr_dev_pwr_mode, hba->uic_link_state);
8518 hba->is_sys_suspended = false;
8521 EXPORT_SYMBOL(ufshcd_system_resume);
8524 * ufshcd_runtime_suspend - runtime suspend routine
8525 * @hba: per adapter instance
8527 * Check the description of ufshcd_suspend() function for more details.
8529 * Returns 0 for success and non-zero for failure
8531 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8534 ktime_t start = ktime_get();
8539 if (!hba->is_powered)
8542 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8544 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8545 ktime_to_us(ktime_sub(ktime_get(), start)),
8546 hba->curr_dev_pwr_mode, hba->uic_link_state);
8549 EXPORT_SYMBOL(ufshcd_runtime_suspend);
8552 * ufshcd_runtime_resume - runtime resume routine
8553 * @hba: per adapter instance
8555 * This function basically brings the UFS device, UniPro link and controller
8556 * to active state. Following operations are done in this function:
8558 * 1. Turn on all the controller related clocks
8559 * 2. Bring the UniPro link out of Hibernate state
8560 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8562 * 4. If auto-bkops is enabled on the device, disable it.
8564 * So following would be the possible power state after this function return
8566 * S1: UFS device in Active state with VCC rail ON
8567 * UniPro link in Active state
8568 * All the UFS/UniPro controller clocks are ON
8570 * Returns 0 for success and non-zero for failure
8572 int ufshcd_runtime_resume(struct ufs_hba *hba)
8575 ktime_t start = ktime_get();
8580 if (!hba->is_powered)
8583 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8585 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8586 ktime_to_us(ktime_sub(ktime_get(), start)),
8587 hba->curr_dev_pwr_mode, hba->uic_link_state);
8590 EXPORT_SYMBOL(ufshcd_runtime_resume);
8592 int ufshcd_runtime_idle(struct ufs_hba *hba)
8596 EXPORT_SYMBOL(ufshcd_runtime_idle);
8599 * ufshcd_shutdown - shutdown routine
8600 * @hba: per adapter instance
8602 * This function would power off both UFS device and UFS link.
8604 * Returns 0 always to allow force shutdown even in case of errors.
8606 int ufshcd_shutdown(struct ufs_hba *hba)
8610 if (!hba->is_powered)
8613 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8616 if (pm_runtime_suspended(hba->dev)) {
8617 ret = ufshcd_runtime_resume(hba);
8622 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8625 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8626 /* allow force shutdown even in case of errors */
8629 EXPORT_SYMBOL(ufshcd_shutdown);
8632 * ufshcd_remove - de-allocate SCSI host and host memory space
8633 * data structure memory
8634 * @hba: per adapter instance
8636 void ufshcd_remove(struct ufs_hba *hba)
8638 ufs_bsg_remove(hba);
8639 ufs_sysfs_remove_nodes(hba->dev);
8640 blk_cleanup_queue(hba->tmf_queue);
8641 blk_mq_free_tag_set(&hba->tmf_tag_set);
8642 blk_cleanup_queue(hba->cmd_queue);
8643 scsi_remove_host(hba->host);
8644 /* disable interrupts */
8645 ufshcd_disable_intr(hba, hba->intr_mask);
8646 ufshcd_hba_stop(hba);
8648 ufshcd_exit_clk_scaling(hba);
8649 ufshcd_exit_clk_gating(hba);
8650 if (ufshcd_is_clkscaling_supported(hba))
8651 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8652 ufshcd_hba_exit(hba);
8654 EXPORT_SYMBOL_GPL(ufshcd_remove);
8657 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8658 * @hba: pointer to Host Bus Adapter (HBA)
8660 void ufshcd_dealloc_host(struct ufs_hba *hba)
8662 scsi_host_put(hba->host);
8664 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8667 * ufshcd_set_dma_mask - Set dma mask based on the controller
8668 * addressing capability
8669 * @hba: per adapter instance
8671 * Returns 0 for success, non-zero for failure
8673 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8675 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8676 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8679 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8683 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8684 * @dev: pointer to device handle
8685 * @hba_handle: driver private handle
8686 * Returns 0 on success, non-zero value on failure
8688 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8690 struct Scsi_Host *host;
8691 struct ufs_hba *hba;
8696 "Invalid memory reference for dev is NULL\n");
8701 host = scsi_host_alloc(&ufshcd_driver_template,
8702 sizeof(struct ufs_hba));
8704 dev_err(dev, "scsi_host_alloc failed\n");
8708 hba = shost_priv(host);
8712 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
8714 INIT_LIST_HEAD(&hba->clk_list_head);
8719 EXPORT_SYMBOL(ufshcd_alloc_host);
8721 /* This function exists because blk_mq_alloc_tag_set() requires this. */
8722 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
8723 const struct blk_mq_queue_data *qd)
8726 return BLK_STS_NOTSUPP;
8729 static const struct blk_mq_ops ufshcd_tmf_ops = {
8730 .queue_rq = ufshcd_queue_tmf,
8734 * ufshcd_init - Driver initialization routine
8735 * @hba: per-adapter instance
8736 * @mmio_base: base register address
8737 * @irq: Interrupt line of device
8738 * Returns 0 on success, non-zero value on failure
8740 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8743 struct Scsi_Host *host = hba->host;
8744 struct device *dev = hba->dev;
8748 "Invalid memory reference for mmio_base is NULL\n");
8753 hba->mmio_base = mmio_base;
8755 hba->vps = &ufs_hba_vps;
8757 err = ufshcd_hba_init(hba);
8761 /* Read capabilities registers */
8762 ufshcd_hba_capabilities(hba);
8764 /* Get UFS version supported by the controller */
8765 hba->ufs_version = ufshcd_get_ufs_version(hba);
8767 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8768 (hba->ufs_version != UFSHCI_VERSION_11) &&
8769 (hba->ufs_version != UFSHCI_VERSION_20) &&
8770 (hba->ufs_version != UFSHCI_VERSION_21))
8771 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8774 /* Get Interrupt bit mask per version */
8775 hba->intr_mask = ufshcd_get_intr_mask(hba);
8777 err = ufshcd_set_dma_mask(hba);
8779 dev_err(hba->dev, "set dma mask failed\n");
8783 /* Allocate memory for host memory space */
8784 err = ufshcd_memory_alloc(hba);
8786 dev_err(hba->dev, "Memory allocation failed\n");
8791 ufshcd_host_memory_configure(hba);
8793 host->can_queue = hba->nutrs;
8794 host->cmd_per_lun = hba->nutrs;
8795 host->max_id = UFSHCD_MAX_ID;
8796 host->max_lun = UFS_MAX_LUNS;
8797 host->max_channel = UFSHCD_MAX_CHANNEL;
8798 host->unique_id = host->host_no;
8799 host->max_cmd_len = UFS_CDB_SIZE;
8801 hba->max_pwr_info.is_valid = false;
8803 /* Initialize work queues */
8804 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8805 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8807 /* Initialize UIC command mutex */
8808 mutex_init(&hba->uic_cmd_mutex);
8810 /* Initialize mutex for device management commands */
8811 mutex_init(&hba->dev_cmd.lock);
8813 init_rwsem(&hba->clk_scaling_lock);
8815 ufshcd_init_clk_gating(hba);
8817 ufshcd_init_clk_scaling(hba);
8820 * In order to avoid any spurious interrupt immediately after
8821 * registering UFS controller interrupt handler, clear any pending UFS
8822 * interrupt status and disable all the UFS interrupts.
8824 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8825 REG_INTERRUPT_STATUS);
8826 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8828 * Make sure that UFS interrupts are disabled and any pending interrupt
8829 * status is cleared before registering UFS interrupt handler.
8833 /* IRQ registration */
8834 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8836 dev_err(hba->dev, "request irq failed\n");
8839 hba->is_irq_enabled = true;
8842 err = scsi_add_host(host, hba->dev);
8844 dev_err(hba->dev, "scsi_add_host failed\n");
8848 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
8849 if (IS_ERR(hba->cmd_queue)) {
8850 err = PTR_ERR(hba->cmd_queue);
8851 goto out_remove_scsi_host;
8854 hba->tmf_tag_set = (struct blk_mq_tag_set) {
8856 .queue_depth = hba->nutmrs,
8857 .ops = &ufshcd_tmf_ops,
8858 .flags = BLK_MQ_F_NO_SCHED,
8860 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
8862 goto free_cmd_queue;
8863 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
8864 if (IS_ERR(hba->tmf_queue)) {
8865 err = PTR_ERR(hba->tmf_queue);
8866 goto free_tmf_tag_set;
8869 /* Reset the attached device */
8870 ufshcd_vops_device_reset(hba);
8872 /* Host controller enable */
8873 err = ufshcd_hba_enable(hba);
8875 dev_err(hba->dev, "Host controller enable failed\n");
8876 ufshcd_print_host_regs(hba);
8877 ufshcd_print_host_state(hba);
8878 goto free_tmf_queue;
8882 * Set the default power management level for runtime and system PM.
8883 * Default power saving mode is to keep UFS link in Hibern8 state
8884 * and UFS device in sleep state.
8886 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8888 UIC_LINK_HIBERN8_STATE);
8889 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8891 UIC_LINK_HIBERN8_STATE);
8893 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
8894 ufshcd_rpm_dev_flush_recheck_work);
8896 /* Set the default auto-hiberate idle timer value to 150 ms */
8897 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
8898 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8899 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8902 /* Hold auto suspend until async scan completes */
8903 pm_runtime_get_sync(dev);
8904 atomic_set(&hba->scsi_block_reqs_cnt, 0);
8906 * We are assuming that device wasn't put in sleep/power-down
8907 * state exclusively during the boot stage before kernel.
8908 * This assumption helps avoid doing link startup twice during
8909 * ufshcd_probe_hba().
8911 ufshcd_set_ufs_dev_active(hba);
8913 async_schedule(ufshcd_async_scan, hba);
8914 ufs_sysfs_add_nodes(hba->dev);
8919 blk_cleanup_queue(hba->tmf_queue);
8921 blk_mq_free_tag_set(&hba->tmf_tag_set);
8923 blk_cleanup_queue(hba->cmd_queue);
8924 out_remove_scsi_host:
8925 scsi_remove_host(hba->host);
8927 ufshcd_exit_clk_scaling(hba);
8928 ufshcd_exit_clk_gating(hba);
8930 hba->is_irq_enabled = false;
8931 ufshcd_hba_exit(hba);
8935 EXPORT_SYMBOL_GPL(ufshcd_init);
8937 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8938 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8939 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8940 MODULE_LICENSE("GPL");
8941 MODULE_VERSION(UFSHCD_DRIVER_VERSION);