scsi: ufs: Add clock ungating to a separate workqueue
authorVijay Viswanath <vviswana@codeaurora.org>
Thu, 3 May 2018 11:07:22 +0000 (16:37 +0530)
committerMartin K. Petersen <martin.petersen@oracle.com>
Fri, 18 May 2018 16:22:47 +0000 (12:22 -0400)
UFS driver can receive a request during memory reclaim by kswapd.  So
when ufs driver puts the ungate work in queue, and if there are no idle
workers, kthreadd is invoked to create a new kworker. Since kswapd task
holds a mutex which kthreadd also needs, this can cause a deadlock
situation. So ungate work must be done in a separate work queue with
WQ_MEM_RECLAIM flag enabled.  Such a workqueue will have a rescue thread
which will be called when the above deadlock condition is possible.

Signed-off-by: Vijay Viswanath <vviswana@codeaurora.org>
Signed-off-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Asutosh Das <asutoshd@codeaurora.org>
Reviewed-by: Subhash Jadavani <subhashj@codeaurora.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h

index 0284921..77e2b3e 100644 (file)
@@ -1532,7 +1532,8 @@ start:
                hba->clk_gating.state = REQ_CLKS_ON;
                trace_ufshcd_clk_gating(dev_name(hba->dev),
                                        hba->clk_gating.state);
-               schedule_work(&hba->clk_gating.ungate_work);
+               queue_work(hba->clk_gating.clk_gating_workq,
+                          &hba->clk_gating.ungate_work);
                /*
                 * fall through to check if we should wait for this
                 * work to be done or not.
@@ -1718,6 +1719,8 @@ out:
 
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
+       char wq_name[sizeof("ufs_clk_gating_00")];
+
        if (!ufshcd_is_clkgating_allowed(hba))
                return;
 
@@ -1725,6 +1728,11 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
        INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
        INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
 
+       snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+                hba->host->host_no);
+       hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
+                                                          WQ_MEM_RECLAIM);
+
        hba->clk_gating.is_enabled = true;
 
        hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
@@ -1752,6 +1760,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
        device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
        cancel_work_sync(&hba->clk_gating.ungate_work);
        cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+       destroy_workqueue(hba->clk_gating.clk_gating_workq);
 }
 
 /* Must be called with host lock acquired */
index a44b9f4..f51758f 100644 (file)
@@ -362,6 +362,7 @@ struct ufs_clk_gating {
        struct device_attribute enable_attr;
        bool is_enabled;
        int active_reqs;
+       struct workqueue_struct *clk_gating_workq;
 };
 
 struct ufs_saved_pwr_info {