scsi: ufs: core: mcq: Add completion support in poll
authorAsutosh Das <quic_asutoshd@quicinc.com>
Fri, 13 Jan 2023 20:48:51 +0000 (12:48 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Sat, 14 Jan 2023 02:03:38 +0000 (21:03 -0500)
Complete CQE requests in poll. Assumption is that several poll completion
may happen in different CPUs for the same completion queue. Hence a spin
lock protection is added.

Co-developed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Manivannan Sadhasivam <mani@kernel.org>
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/ufs/core/ufs-mcq.c
drivers/ufs/core/ufshcd-priv.h
drivers/ufs/core/ufshcd.c
include/ufs/ufshcd.h

index cd10d59..e710d19 100644 (file)
@@ -294,6 +294,18 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
        return completed_reqs;
 }
 
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+                                      struct ufs_hw_queue *hwq)
+{
+       unsigned long completed_reqs;
+
+       spin_lock(&hwq->cq_lock);
+       completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+       spin_unlock(&hwq->cq_lock);
+
+       return completed_reqs;
+}
+
 void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
 {
        struct ufs_hw_queue *hwq;
@@ -390,6 +402,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
                hwq = &hba->uhq[i];
                hwq->max_entries = hba->nutrs;
                spin_lock_init(&hwq->sq_lock);
+               spin_lock_init(&hwq->cq_lock);
        }
 
        /* The very first HW queue serves device commands */
index 583fb86..9b63090 100644 (file)
@@ -75,6 +75,8 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
                                         struct ufs_hw_queue *hwq);
 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
                                           struct request *req);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+                                      struct ufs_hw_queue *hwq);
 
 #define UFSHCD_MCQ_IO_QUEUE_OFFSET     1
 #define SD_ASCII_STD true
index 3afa076..cb1bca4 100644 (file)
@@ -5461,6 +5461,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
        struct ufs_hba *hba = shost_priv(shost);
        unsigned long completed_reqs, flags;
        u32 tr_doorbell;
+       struct ufs_hw_queue *hwq;
+
+       if (is_mcq_enabled(hba)) {
+               hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+               return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+       }
 
        spin_lock_irqsave(&hba->outstanding_lock, flags);
        tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
index 0dcb104..33973e9 100644 (file)
@@ -1086,6 +1086,7 @@ struct ufs_hba {
  * @sq_lock: serialize submission queue access
  * @cq_tail_slot: current slot to which CQ tail pointer is pointing
  * @cq_head_slot: current slot to which CQ head pointer is pointing
+ * @cq_lock: Synchronize between multiple polling instances
  */
 struct ufs_hw_queue {
        void __iomem *mcq_sq_head;
@@ -1103,6 +1104,7 @@ struct ufs_hw_queue {
        spinlock_t sq_lock;
        u32 cq_tail_slot;
        u32 cq_head_slot;
+       spinlock_t cq_lock;
 };
 
 static inline bool is_mcq_enabled(struct ufs_hba *hba)