IB/rdmavt, IB/hfi1, IB/qib: Make wc opcode translation driver dependent
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Tue, 21 Mar 2017 00:25:04 +0000 (17:25 -0700)
committerDoug Ledford <dledford@redhat.com>
Wed, 5 Apr 2017 18:45:09 +0000 (14:45 -0400)
The work to create a completion helper moved the translation of send
wqe operations to completion opcodes to rdmvat.

This precludes having driver dependent operations.  Make the translation
driver dependent by doing the translation in the driver prior to the
rvt_qp_swqe_complete() call using restored translation tables.

Fixes: Commit f2dc9cdce83c ("IB/rdmavt: Add a send completion helper")
Fixes: Commit 0771da5a6e9d ("IB/hfi1,IB/qib: Use new send completion helper")
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/ruc.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/sw/rdmavt/qp.c
include/rdma/rdmavt_qp.h

index 7382be1..4649530 100644 (file)
@@ -1034,7 +1034,10 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
                /* see post_send() */
                barrier();
                rvt_put_swqe(wqe);
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_hfi1_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        }
        /*
         * If we were waiting for sends to complete before re-sending,
@@ -1081,7 +1084,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
                qp->s_last = s_last;
                /* see post_send() */
                barrier();
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_hfi1_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        } else {
                struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
 
index aa15bcb..d2eb793 100644 (file)
@@ -920,7 +920,10 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
            qp->ibqp.qp_type == IB_QPT_GSI)
                atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
 
-       rvt_qp_swqe_complete(qp, wqe, status);
+       rvt_qp_swqe_complete(qp,
+                            wqe,
+                            ib_hfi1_wc_opcode[wqe->wr.opcode],
+                            status);
 
        if (qp->s_acked == old_last)
                qp->s_acked = last;
index 222315f..815cb44 100644 (file)
@@ -297,6 +297,22 @@ static inline bool wss_exceeds_threshold(void)
 }
 
 /*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
+       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+       [IB_WR_SEND] = IB_WC_SEND,
+       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+       [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+       [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+       [IB_WR_REG_MR] = IB_WC_REG_MR
+};
+
+/*
  * Length of header by opcode, 0 --> not supported
  */
 const u8 hdr_len_by_opcode[256] = {
index 12658e3..0234987 100644 (file)
@@ -938,7 +938,10 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
                /* see post_send() */
                barrier();
                rvt_put_swqe(wqe);
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_qib_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        }
        /*
         * If we were waiting for sends to complete before resending,
@@ -983,7 +986,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
                qp->s_last = s_last;
                /* see post_send() */
                barrier();
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_qib_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        } else
                this_cpu_inc(*ibp->rvp.rc_delayed_comp);
 
index 17655cc..6e1adf7 100644 (file)
@@ -769,7 +769,10 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
            qp->ibqp.qp_type == IB_QPT_GSI)
                atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
 
-       rvt_qp_swqe_complete(qp, wqe, status);
+       rvt_qp_swqe_complete(qp,
+                            wqe,
+                            ib_qib_wc_opcode[wqe->wr.opcode],
+                            status);
 
        if (qp->s_acked == old_last)
                qp->s_acked = last;
index 83f8b5f..e120efe 100644 (file)
@@ -114,6 +114,19 @@ module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
 
 /*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_qib_wc_opcode[] = {
+       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+       [IB_WR_SEND] = IB_WC_SEND,
+       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
+/*
  * System image GUID.
  */
 __be64 ib_qib_sys_image_guid;
index f5ad8d4..28fb724 100644 (file)
@@ -117,23 +117,6 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
 };
 EXPORT_SYMBOL(ib_rvt_state_ops);
 
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
-       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
-       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
-       [IB_WR_SEND] = IB_WC_SEND,
-       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
-       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
-       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
-       [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
-       [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
-       [IB_WR_REG_MR] = IB_WC_REG_MR
-};
-EXPORT_SYMBOL(ib_rvt_wc_opcode);
-
 static void get_map_page(struct rvt_qpn_table *qpt,
                         struct rvt_qpn_map *map,
                         gfp_t gfp)
index f381639..3cdd9e2 100644 (file)
@@ -574,6 +574,7 @@ extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
 static inline void rvt_qp_swqe_complete(
        struct rvt_qp *qp,
        struct rvt_swqe *wqe,
+       enum ib_wc_opcode opcode,
        enum ib_wc_status status)
 {
        if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
@@ -586,7 +587,7 @@ static inline void rvt_qp_swqe_complete(
                memset(&wc, 0, sizeof(wc));
                wc.wr_id = wqe->wr.wr_id;
                wc.status = status;
-               wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
+               wc.opcode = opcode;
                wc.qp = &qp->ibqp;
                wc.byte_len = wqe->length;
                rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,