iw_cxgb4: Atomically flush per QP HW CQEs
authorBharat Potnuri <bharat@chelsio.com>
Fri, 27 Apr 2018 11:11:16 +0000 (16:41 +0530)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 9 May 2018 07:51:53 +0000 (09:51 +0200)
commit 2df19e19ae90d94fd8724083f161f368a2797537 upstream.

When a CQ is shared by multiple QPs, c4iw_flush_hw_cq() needs to acquire
corresponding QP lock before moving the CQEs into its corresponding SW
queue and accessing the SQ contents for completing a WR.
Ignore CQEs if corresponding QP is already flushed.

Cc: stable@vger.kernel.org
Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c

index 73feeee..6b15508 100644 (file)
@@ -330,7 +330,7 @@ static void advance_oldest_read(struct t4_wq *wq)
  * Deal with out-of-order and/or completions that complete
  * prior unsignalled WRs.
  */
-void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
 {
        struct t4_cqe *hw_cqe, *swcqe, read_cqe;
        struct c4iw_qp *qhp;
@@ -354,6 +354,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
                if (qhp == NULL)
                        goto next_cqe;
 
+               if (flush_qhp != qhp) {
+                       spin_lock(&qhp->lock);
+
+                       if (qhp->wq.flushed == 1)
+                               goto next_cqe;
+               }
+
                if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
                        goto next_cqe;
 
@@ -405,6 +412,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 next_cqe:
                t4_hwcq_consume(&chp->cq);
                ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
+               if (qhp && flush_qhp != qhp)
+                       spin_unlock(&qhp->lock);
        }
 }
 
index 9bb2e0a..f527798 100644 (file)
@@ -993,7 +993,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
-void c4iw_flush_hw_cq(struct c4iw_cq *chp);
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
index f311ea7..a8a8f65 100644 (file)
@@ -1349,12 +1349,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
        qhp->wq.flushed = 1;
        t4_set_wq_in_error(&qhp->wq);
 
-       c4iw_flush_hw_cq(rchp);
+       c4iw_flush_hw_cq(rchp, qhp);
        c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
        rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
 
        if (schp != rchp)
-               c4iw_flush_hw_cq(schp);
+               c4iw_flush_hw_cq(schp, qhp);
        sq_flushed = c4iw_flush_sq(qhp);
 
        spin_unlock(&qhp->lock);