}
if (ufshcd_is_runtime_pm(pm_op)) {
- /*
- * The device is idle with no requests in the queue,
- * allow background operations if needed.
- */
- ret = ufshcd_bkops_ctrl(hba, BKOPS_STATUS_NON_CRITICAL);
- if (ret)
- goto enable_gating;
+ if (ufshcd_can_autobkops_during_suspend(hba)) {
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations if bkops status shows
+ * that performance might be impacted.
+ */
+ ret = ufshcd_urgent_bkops(hba);
+ if (ret)
+ goto enable_gating;
+ } else {
+ /* make sure that auto bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
}
if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
goto set_old_link_state;
}
- ufshcd_disable_auto_bkops(hba);
+ /*
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
+ */
+ ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false;
if (ufshcd_is_clkscaling_enabled(hba))
#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
/* Allow dynamic clk scaling */
#define UFSHCD_CAP_CLK_SCALING (1 << 2)
+ /* Allow auto bkops to enabled during runtime suspend */
+#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
{
return hba->caps & UFSHCD_CAP_CLK_SCALING;
}
+static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \