RDMA/bnxt_re: Add HW workaround for avoiding stall for UD QPs
authorSomnath Kotur <somnath.kotur@broadcom.com>
Mon, 22 May 2017 10:15:36 +0000 (03:15 -0700)
committerDoug Ledford <dledford@redhat.com>
Wed, 14 Jun 2017 17:01:58 +0000 (13:01 -0400)
HW stalls out after 0x800000 WQEs are posted for UD QPs.
To workaround this problem, driver will send a modify_qp cmd
to the HW at around the halfway mark(0x400000) so that FW
can accordingly modify the QP context in the HW to prevent this
stall.
This workaround needs to be done for UD, QP1 and Raw Ethertype
packets. Added a counter to keep track of WQEs posted during post_send.

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_fp.h

index ebf7be8..d5e457e 100644 (file)
@@ -56,6 +56,8 @@
 #define BNXT_RE_MAX_SRQC_COUNT         (64 * 1024)
 #define BNXT_RE_MAX_CQ_COUNT           (64 * 1024)
 
+#define BNXT_RE_UD_QP_HW_STALL         0x400000
+
 struct bnxt_re_work {
        struct work_struct      work;
        unsigned long           event;
index d94b1b3..08e7e59 100644 (file)
@@ -2075,6 +2075,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
        return payload_sz;
 }
 
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+       if ((qp->ib_qp.qp_type == IB_QPT_UD ||
+            qp->ib_qp.qp_type == IB_QPT_GSI ||
+            qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+            qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+               int qp_attr_mask;
+               struct ib_qp_attr qp_attr;
+
+               qp_attr_mask = IB_QP_STATE;
+               qp_attr.qp_state = IB_QPS_RTS;
+               bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+               qp->qplib_qp.wqe_cnt = 0;
+       }
+}
+
 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
                                       struct bnxt_re_qp *qp,
                                struct ib_send_wr *wr)
@@ -2120,6 +2136,7 @@ bad:
                wr = wr->next;
        }
        bnxt_qplib_post_send_db(&qp->qplib_qp);
+       bnxt_ud_qp_hw_stall_workaround(qp);
        spin_unlock_irqrestore(&qp->sq_lock, flags);
        return rc;
 }
@@ -2216,6 +2233,7 @@ bad:
                wr = wr->next;
        }
        bnxt_qplib_post_send_db(&qp->qplib_qp);
+       bnxt_ud_qp_hw_stall_workaround(qp);
        spin_unlock_irqrestore(&qp->sq_lock, flags);
 
        return rc;
index 31593fd..f05500b 100644 (file)
@@ -1298,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
        }
 
        sq->hwq.prod++;
+
+       qp->wqe_cnt++;
+
 done:
        return rc;
 }
index 71539ea..36b7b7d 100644 (file)
@@ -250,6 +250,7 @@ struct bnxt_qplib_qp {
        u8                              timeout;
        u8                              retry_cnt;
        u8                              rnr_retry;
+       u64                             wqe_cnt;
        u32                             min_rnr_timer;
        u32                             max_rd_atomic;
        u32                             max_dest_rd_atomic;