RDMA/pvrdma: Replace spin_lock_irqsave with spin_lock in hard IRQ
authorWeihang Li <liweihang@huawei.com>
Fri, 5 Feb 2021 08:37:58 +0000 (16:37 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 5 Feb 2021 16:03:07 +0000 (12:03 -0400)
There is no need to do irqsave and irqrestore in context of hard IRQ.

Link: https://lore.kernel.org/r/1612514278-49220-1-git-send-email-liweihang@huawei.com
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c

index 00a3309..4b6019e 100644 (file)
@@ -474,7 +474,6 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
        int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
                         sizeof(struct pvrdma_cqne);
        unsigned int head;
-       unsigned long flags;
 
        dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
 
@@ -483,11 +482,11 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
                struct pvrdma_cq *cq;
 
                cqne = get_cqne(dev, head);
-               spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+               spin_lock(&dev->cq_tbl_lock);
                cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
                if (cq)
                        refcount_inc(&cq->refcnt);
-               spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+               spin_unlock(&dev->cq_tbl_lock);
 
                if (cq && cq->ibcq.comp_handler)
                        cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);