Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[platform/kernel/linux-rpi.git] / drivers / scsi / ufs / ufshcd.c
index 2d70569..e04e8b8 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/nls.h>
 #include <linux/of.h>
 #include <linux/bitfield.h>
+#include <linux/blk-pm.h>
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
@@ -91,6 +92,9 @@
 /* default delay of autosuspend: 2000 ms */
 #define RPM_AUTOSUSPEND_DELAY_MS 2000
 
+/* Default value of wait time before gating device ref clock */
+#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
+
 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                           \
        ({                                                              \
                int _ret;                                               \
@@ -532,6 +536,18 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
                 hba->pwr_info.hs_rate);
 }
 
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
+{
+       if (!us)
+               return;
+
+       if (us < 10)
+               udelay(us);
+       else
+               usleep_range(us, us + tolerance);
+}
+EXPORT_SYMBOL_GPL(ufshcd_delay_us);
+
 /*
  * ufshcd_wait_for_register - wait for register value to change
  * @hba - per-adapter interface
@@ -642,11 +658,7 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
  */
 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 {
-       if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
-               ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
-       else
-               ufshcd_writel(hba, ~(1 << pos),
-                               REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+       ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 }
 
 /**
@@ -656,10 +668,7 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
  */
 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
 {
-       if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
-               ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
-       else
-               ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+       ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 }
 
 /**
@@ -1191,6 +1200,9 @@ static int ufshcd_devfreq_target(struct device *dev,
        if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
 
+       clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+       /* Override with the closest supported frequency */
+       *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
        spin_lock_irqsave(hba->host->host_lock, irq_flags);
        if (ufshcd_eh_in_progress(hba)) {
                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@@ -1205,8 +1217,11 @@ static int ufshcd_devfreq_target(struct device *dev,
                goto out;
        }
 
-       clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+       /* Decide based on the rounded-off frequency and update */
        scale_up = (*freq == clki->max_freq) ? true : false;
+       if (!scale_up)
+               *freq = clki->min_freq;
+       /* Update the frequency */
        if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
                ret = 0;
@@ -1254,6 +1269,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
        struct ufs_hba *hba = dev_get_drvdata(dev);
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
        unsigned long flags;
+       struct list_head *clk_list = &hba->clk_list_head;
+       struct ufs_clk_info *clki;
 
        if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
@@ -1264,6 +1281,13 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
        if (!scaling->window_start_t)
                goto start_window;
 
+       clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+       /*
+        * If current frequency is 0, then the ondemand governor considers
+        * there's no initial frequency set. And it always requests to set
+        * to max. frequency.
+        */
+       stat->current_frequency = clki->curr_freq;
        if (scaling->is_busy_started)
                scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
                                        scaling->busy_start_t));
@@ -1292,6 +1316,17 @@ static struct devfreq_dev_profile ufs_devfreq_profile = {
        .get_dev_status = ufshcd_devfreq_get_dev_status,
 };
 
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
+       .upthreshold = 70,
+       .downdifferential = 5,
+};
+
+static void *gov_data = &ufs_ondemand_data;
+#else
+static void *gov_data; /* NULL */
+#endif
+
 static int ufshcd_devfreq_init(struct ufs_hba *hba)
 {
        struct list_head *clk_list = &hba->clk_list_head;
@@ -1307,10 +1342,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
        dev_pm_opp_add(hba->dev, clki->min_freq, 0);
        dev_pm_opp_add(hba->dev, clki->max_freq, 0);
 
+       ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
+                                        gov_data);
        devfreq = devfreq_add_device(hba->dev,
                        &ufs_devfreq_profile,
                        DEVFREQ_GOV_SIMPLE_ONDEMAND,
-                       NULL);
+                       gov_data);
        if (IS_ERR(devfreq)) {
                ret = PTR_ERR(devfreq);
                dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
@@ -1518,6 +1555,11 @@ start:
                 */
                if (ufshcd_can_hibern8_during_gating(hba) &&
                    ufshcd_is_link_hibern8(hba)) {
+                       if (async) {
+                               rc = -EAGAIN;
+                               hba->clk_gating.active_reqs--;
+                               break;
+                       }
                        spin_unlock_irqrestore(hba->host->host_lock, flags);
                        flush_work(&hba->clk_gating.ungate_work);
                        spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2093,13 +2135,8 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                return sg_segments;
 
        if (sg_segments) {
-               if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
-                       lrbp->utr_descriptor_ptr->prd_table_length =
-                               cpu_to_le16((u16)(sg_segments *
-                                       sizeof(struct ufshcd_sg_entry)));
-               else
-                       lrbp->utr_descriptor_ptr->prd_table_length =
-                               cpu_to_le16((u16) (sg_segments));
+               lrbp->utr_descriptor_ptr->prd_table_length =
+                       cpu_to_le16((u16)sg_segments);
 
                prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
 
@@ -2363,6 +2400,27 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
        return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
 }
 
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
+{
+       struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+       struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
+       dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
+               i * sizeof(struct utp_transfer_cmd_desc);
+       u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
+                                      response_upiu);
+       u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+       lrb->utr_descriptor_ptr = utrdlp + i;
+       lrb->utrd_dma_addr = hba->utrdl_dma_addr +
+               i * sizeof(struct utp_transfer_req_desc);
+       lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+       lrb->ucd_req_dma_addr = cmd_desc_element_addr;
+       lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+       lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
+       lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+       lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+}
+
 /**
  * ufshcd_queuecommand - main entry point for SCSI requests
  * @host: SCSI host pointer
@@ -2452,7 +2510,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
        /* issue command to the controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+       ufshcd_vops_setup_xfer_req(hba, tag, true);
        ufshcd_send_command(hba, tag);
 out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2639,7 +2697,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
        /* Make sure descriptors are ready before ringing the doorbell */
        wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+       ufshcd_vops_setup_xfer_req(hba, tag, false);
        ufshcd_send_command(hba, tag);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
@@ -3276,6 +3334,31 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
                                      param_offset, param_read_buf, param_size);
 }
 
+static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
+{
+       int err = 0;
+       u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
+
+       if (hba->dev_info.wspecversion >= 0x300) {
+               err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+                               QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
+                               &gating_wait);
+               if (err)
+                       dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
+                                        err, gating_wait);
+
+               if (gating_wait == 0) {
+                       gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
+                       dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
+                                        gating_wait);
+               }
+
+               hba->dev_info.clk_gating_wait_us = gating_wait;
+       }
+
+       return err;
+}
+
 /**
  * ufshcd_memory_alloc - allocate memory for host memory space data structures
  * @hba: per adapter instance
@@ -3373,7 +3456,6 @@ out:
  */
 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
 {
-       struct utp_transfer_cmd_desc *cmd_descp;
        struct utp_transfer_req_desc *utrdlp;
        dma_addr_t cmd_desc_dma_addr;
        dma_addr_t cmd_desc_element_addr;
@@ -3383,7 +3465,6 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
        int i;
 
        utrdlp = hba->utrdl_base_addr;
-       cmd_descp = hba->ucdl_base_addr;
 
        response_offset =
                offsetof(struct utp_transfer_cmd_desc, response_upiu);
@@ -3403,36 +3484,13 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
                                cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
 
                /* Response upiu and prdt offset should be in double words */
-               if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
-                       utrdlp[i].response_upiu_offset =
-                               cpu_to_le16(response_offset);
-                       utrdlp[i].prd_table_offset =
-                               cpu_to_le16(prdt_offset);
-                       utrdlp[i].response_upiu_length =
-                               cpu_to_le16(ALIGNED_UPIU_SIZE);
-               } else {
-                       utrdlp[i].response_upiu_offset =
-                               cpu_to_le16((response_offset >> 2));
-                       utrdlp[i].prd_table_offset =
-                               cpu_to_le16((prdt_offset >> 2));
-                       utrdlp[i].response_upiu_length =
-                               cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
-               }
+               utrdlp[i].response_upiu_offset =
+                       cpu_to_le16(response_offset >> 2);
+               utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
+               utrdlp[i].response_upiu_length =
+                       cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
 
-               hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
-               hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
-                               (i * sizeof(struct utp_transfer_req_desc));
-               hba->lrb[i].ucd_req_ptr =
-                       (struct utp_upiu_req *)(cmd_descp + i);
-               hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
-               hba->lrb[i].ucd_rsp_ptr =
-                       (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
-               hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
-                               response_offset;
-               hba->lrb[i].ucd_prdt_ptr =
-                       (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
-               hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
-                               prdt_offset;
+               ufshcd_init_lrb(hba, &hba->lrb[i], i);
        }
 }
 
@@ -3460,52 +3518,6 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
                        "dme-link-startup: error code %d\n", ret);
        return ret;
 }
-/**
- * ufshcd_dme_reset - UIC command for DME_RESET
- * @hba: per adapter instance
- *
- * DME_RESET command is issued in order to reset UniPro stack.
- * This function now deal with cold reset.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_reset(struct ufs_hba *hba)
-{
-       struct uic_command uic_cmd = {0};
-       int ret;
-
-       uic_cmd.command = UIC_CMD_DME_RESET;
-
-       ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
-       if (ret)
-               dev_err(hba->dev,
-                       "dme-reset: error code %d\n", ret);
-
-       return ret;
-}
-
-/**
- * ufshcd_dme_enable - UIC command for DME_ENABLE
- * @hba: per adapter instance
- *
- * DME_ENABLE command is issued in order to enable UniPro stack.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_enable(struct ufs_hba *hba)
-{
-       struct uic_command uic_cmd = {0};
-       int ret;
-
-       uic_cmd.command = UIC_CMD_DME_ENABLE;
-
-       ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
-       if (ret)
-               dev_err(hba->dev,
-                       "dme-reset: error code %d\n", ret);
-
-       return ret;
-}
 
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
 {
@@ -4224,7 +4236,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
 }
 
 /**
- * ufshcd_hba_execute_hce - initialize the controller
+ * ufshcd_hba_enable - initialize the controller
  * @hba: per adapter instance
  *
  * The controller resets itself and controller firmware initialization
@@ -4233,7 +4245,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
  *
  * Returns 0 on success, non-zero value on failure
  */
-static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
+int ufshcd_hba_enable(struct ufs_hba *hba)
 {
        int retry;
 
@@ -4259,10 +4271,10 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
         * instruction might be read back.
         * This delay can be changed based on the controller.
         */
-       usleep_range(1000, 1100);
+       ufshcd_delay_us(hba->hba_enable_delay_us, 100);
 
        /* wait for the host controller to complete initialization */
-       retry = 10;
+       retry = 50;
        while (ufshcd_is_hba_active(hba)) {
                if (retry) {
                        retry--;
@@ -4271,7 +4283,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
                                "Controller enable failed\n");
                        return -EIO;
                }
-               usleep_range(5000, 5100);
+               usleep_range(1000, 1100);
        }
 
        /* enable UIC related interrupts */
@@ -4281,37 +4293,11 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
 
        return 0;
 }
-
-int ufshcd_hba_enable(struct ufs_hba *hba)
-{
-       int ret;
-
-       if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
-               ufshcd_set_link_off(hba);
-               ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
-
-               /* enable UIC related interrupts */
-               ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
-               ret = ufshcd_dme_reset(hba);
-               if (!ret) {
-                       ret = ufshcd_dme_enable(hba);
-                       if (!ret)
-                               ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
-                       if (ret)
-                               dev_err(hba->dev,
-                                       "Host controller enable failed with non-hce\n");
-               }
-       } else {
-               ret = ufshcd_hba_execute_hce(hba);
-       }
-
-       return ret;
-}
 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
 
 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
 {
-       int tx_lanes, i, err = 0;
+       int tx_lanes = 0, i, err = 0;
 
        if (!peer)
                ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
@@ -4737,8 +4723,15 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                         * UFS device needs urgent BKOPs.
                         */
                        if (!hba->pm_op_in_progress &&
-                           ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
-                               schedule_work(&hba->eeh_work);
+                           ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
+                           schedule_work(&hba->eeh_work)) {
+                               /*
+                                * Prevent suspend once eeh_work is scheduled
+                                * to avoid deadlock between ufshcd_suspend
+                                * and exception event handler.
+                                */
+                               pm_runtime_get_noresume(hba->dev);
+                       }
                        break;
                case UPIU_TRANSACTION_REJECT_UPIU:
                        /* TODO: handle Reject UPIU Response */
@@ -4876,8 +4869,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
         * false interrupt if device completes another request after resetting
         * aggregation and before reading the DB.
         */
-       if (ufshcd_is_intr_aggr_allowed(hba) &&
-           !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
+       if (ufshcd_is_intr_aggr_allowed(hba))
                ufshcd_reset_intr_aggr(hba);
 
        tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -5191,7 +5183,14 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
 
 out:
        ufshcd_scsi_unblock_requests(hba);
-       pm_runtime_put_sync(hba->dev);
+       /*
+        * pm_runtime_get_noresume is called while scheduling
+        * eeh_work to avoid suspend racing with exception work.
+        * Hence decrement usage counter using pm_runtime_put_noidle
+        * to allow suspend on completion of exception event handler.
+        */
+       pm_runtime_put_noidle(hba->dev);
+       pm_runtime_put(hba->dev);
        return;
 }
 
@@ -5486,7 +5485,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
                                         u32 intr_mask)
 {
-       if (!ufshcd_is_auto_hibern8_supported(hba))
+       if (!ufshcd_is_auto_hibern8_supported(hba) ||
+           !ufshcd_is_auto_hibern8_enabled(hba))
                return false;
 
        if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
@@ -6482,11 +6482,12 @@ out:
        return icc_level;
 }
 
-static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
 {
        int ret;
        int buff_len = hba->desc_size.pwr_desc;
        u8 *desc_buf;
+       u32 icc_level;
 
        desc_buf = kmalloc(buff_len, GFP_KERNEL);
        if (!desc_buf)
@@ -6501,25 +6502,32 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
                goto out;
        }
 
-       hba->init_prefetch_data.icc_level =
-                       ufshcd_find_max_sup_active_icc_level(hba,
-                       desc_buf, buff_len);
-       dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
-                       __func__, hba->init_prefetch_data.icc_level);
+       icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
+                                                        buff_len);
+       dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
 
        ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
-               QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
-               &hba->init_prefetch_data.icc_level);
+               QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
 
        if (ret)
                dev_err(hba->dev,
                        "%s: Failed configuring bActiveICCLevel = %d ret = %d",
-                       __func__, hba->init_prefetch_data.icc_level , ret);
+                       __func__, icc_level, ret);
 
 out:
        kfree(desc_buf);
 }
 
+static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
+{
+       scsi_autopm_get_device(sdev);
+       blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
+       if (sdev->rpm_autosuspend)
+               pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
+                                                RPM_AUTOSUSPEND_DELAY_MS);
+       scsi_autopm_put_device(sdev);
+}
+
 /**
  * ufshcd_scsi_add_wlus - Adds required W-LUs
  * @hba: per-adapter instance
@@ -6559,6 +6567,7 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
                hba->sdev_ufs_device = NULL;
                goto out;
        }
+       ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
        scsi_device_put(hba->sdev_ufs_device);
 
        sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
@@ -6567,14 +6576,17 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
                ret = PTR_ERR(sdev_rpmb);
                goto remove_sdev_ufs_device;
        }
+       ufshcd_blk_pm_runtime_init(sdev_rpmb);
        scsi_device_put(sdev_rpmb);
 
        sdev_boot = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
-       if (IS_ERR(sdev_boot))
+       if (IS_ERR(sdev_boot)) {
                dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
-       else
+       } else {
+               ufshcd_blk_pm_runtime_init(sdev_boot);
                scsi_device_put(sdev_boot);
+       }
        goto out;
 
 remove_sdev_ufs_device:
@@ -6614,6 +6626,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
        dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
                                     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
 
+       /* getting Specification Version in big endian format */
+       dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
+                                     desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+
        model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
        err = ufshcd_read_string_desc(hba, model_index,
                                      &dev_info->model, SD_ASCII_STD);
@@ -6811,14 +6827,14 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
                ufshcd_tune_pa_hibern8time(hba);
        }
 
+       ufshcd_vops_apply_dev_quirks(hba);
+
        if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
                /* set 1ms timeout for PA_TACTIVATE */
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
 
        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
                ufshcd_quirk_tune_host_pa_tactivate(hba);
-
-       ufshcd_vops_apply_dev_quirks(hba);
 }
 
 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -6991,6 +7007,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
                goto out;
        }
 
+       ufshcd_get_ref_clk_gating_wait(hba);
+
        ufs_fixup_device_setup(hba);
 
        if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
@@ -7014,8 +7032,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
 {
        int ret;
 
-       ufshcd_init_icc_levels(hba);
-
        /* Add required well known logical units to scsi mid layer */
        ret = ufshcd_scsi_add_wlus(hba);
        if (ret)
@@ -7113,6 +7129,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
                }
        }
 
+       /*
+        * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
+        * and for removable UFS card as well, hence always set the parameter.
+        * Note: Error handler may issue the device reset hence resetting
+        * bActiveICCLevel as well so it is always safe to set this here.
+        */
+       ufshcd_set_active_icc_lvl(hba);
+
        /* set the state as operational after switching to desired gear */
        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 
@@ -7241,6 +7265,11 @@ static int ufshcd_config_vreg(struct device *dev,
        name = vreg->name;
 
        if (regulator_count_voltages(reg) > 0) {
+               uA_load = on ? vreg->max_uA : 0;
+               ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+               if (ret)
+                       goto out;
+
                if (vreg->min_uV && vreg->max_uV) {
                        min_uV = on ? vreg->min_uV : 0;
                        ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
@@ -7251,11 +7280,6 @@ static int ufshcd_config_vreg(struct device *dev,
                                goto out;
                        }
                }
-
-               uA_load = on ? vreg->max_uA : 0;
-               ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
-               if (ret)
-                       goto out;
        }
 out:
        return ret;
@@ -7395,16 +7419,9 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
        if (list_empty(head))
                goto out;
 
-       /*
-        * vendor specific setup_clocks ops may depend on clocks managed by
-        * this standard driver hence call the vendor specific setup_clocks
-        * before disabling the clocks managed here.
-        */
-       if (!on) {
-               ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
-               if (ret)
-                       return ret;
-       }
+       ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+       if (ret)
+               return ret;
 
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
@@ -7428,16 +7445,9 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
                }
        }
 
-       /*
-        * vendor specific setup_clocks ops may depend on clocks managed by
-        * this standard driver hence call the vendor specific setup_clocks
-        * after enabling the clocks managed here.
-        */
-       if (on) {
-               ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
-               if (ret)
-                       return ret;
-       }
+       ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+       if (ret)
+               return ret;
 
 out:
        if (ret) {
@@ -7931,6 +7941,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                        goto enable_gating;
        }
 
+       flush_work(&hba->eeh_work);
        ret = ufshcd_link_state_transition(hba, req_link_state, 1);
        if (ret)
                goto set_dev_active;
@@ -8402,6 +8413,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        hba->mmio_base = mmio_base;
        hba->irq = irq;
+       hba->hba_enable_delay_us = 1000;
 
        err = ufshcd_hba_init(hba);
        if (err)