staging/rdma/hfi1: pre-compute sc and sde for RC/UC QPs
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Tue, 10 Nov 2015 00:13:59 +0000 (19:13 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Nov 2015 00:55:37 +0000 (16:55 -0800)
Now that we have a multi-threaded work queue we precomputed and store the SC
and SDE on RC and UC QPs for faster access.

Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/rdma/hfi1/qp.c
drivers/staging/rdma/hfi1/qp.h
drivers/staging/rdma/hfi1/ruc.c
drivers/staging/rdma/hfi1/ud.c
drivers/staging/rdma/hfi1/verbs.c
drivers/staging/rdma/hfi1/verbs.h

index a2bbf0b..d37c4a7 100644 (file)
@@ -617,7 +617,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        int mig = 0;
        int ret;
        u32 pmtu = 0; /* for gcc warning only */
-       struct hfi1_devdata *dd;
+       struct hfi1_devdata *dd = dd_from_dev(dev);
 
        spin_lock_irq(&qp->r_lock);
        spin_lock(&qp->s_lock);
@@ -631,23 +631,35 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto inval;
 
        if (attr_mask & IB_QP_AV) {
+               u8 sc;
+
                if (attr->ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
                        goto inval;
                if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr))
                        goto inval;
+               sc = ah_to_sc(ibqp->device, &attr->ah_attr);
+               if (!qp_to_sdma_engine(qp, sc) &&
+                   dd->flags & HFI1_HAS_SEND_DMA)
+                       goto inval;
        }
 
        if (attr_mask & IB_QP_ALT_PATH) {
+               u8 sc;
+
                if (attr->alt_ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
                        goto inval;
                if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
                        goto inval;
-               if (attr->alt_pkey_index >= hfi1_get_npkeys(dd_from_dev(dev)))
+               if (attr->alt_pkey_index >= hfi1_get_npkeys(dd))
+                       goto inval;
+               sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
+               if (!qp_to_sdma_engine(qp, sc) &&
+                   dd->flags & HFI1_HAS_SEND_DMA)
                        goto inval;
        }
 
        if (attr_mask & IB_QP_PKEY_INDEX)
-               if (attr->pkey_index >= hfi1_get_npkeys(dd_from_dev(dev)))
+               if (attr->pkey_index >= hfi1_get_npkeys(dd))
                        goto inval;
 
        if (attr_mask & IB_QP_MIN_RNR_TIMER)
@@ -792,6 +804,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                qp->remote_ah_attr = attr->ah_attr;
                qp->s_srate = attr->ah_attr.static_rate;
                qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+               qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+               qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
        }
 
        if (attr_mask & IB_QP_ALT_PATH) {
@@ -806,6 +820,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                        qp->port_num = qp->alt_ah_attr.port_num;
                        qp->s_pkey_index = qp->s_alt_pkey_index;
                        qp->s_flags |= HFI1_S_AHG_CLEAR;
+                       qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+                       qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
                }
        }
 
@@ -1528,9 +1544,6 @@ struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5)
        if (!(dd->flags & HFI1_HAS_SEND_DMA))
                return NULL;
        switch (qp->ibqp.qp_type) {
-       case IB_QPT_UC:
-       case IB_QPT_RC:
-               break;
        case IB_QPT_SMI:
                return NULL;
        default:
@@ -1699,6 +1712,8 @@ void hfi1_migrate_qp(struct hfi1_qp *qp)
        qp->port_num = qp->alt_ah_attr.port_num;
        qp->s_pkey_index = qp->s_alt_pkey_index;
        qp->s_flags |= HFI1_S_AHG_CLEAR;
+       qp->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
+       qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
 
        ev.device = qp->ibqp.device;
        ev.element.qp = &qp->ibqp;
index e49cfa6..94fd748 100644 (file)
@@ -128,7 +128,6 @@ static inline void clear_ahg(struct hfi1_qp *qp)
        if (qp->s_sde && qp->s_ahgidx >= 0)
                sdma_ahg_free(qp->s_sde, qp->s_ahgidx);
        qp->s_ahgidx = -1;
-       qp->s_sde = NULL;
 }
 
 /**
index cf8ac61..863092b 100644 (file)
@@ -694,11 +694,8 @@ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
                clear_ahg(qp);
        if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
                /* first middle that needs copy  */
-               if (qp->s_ahgidx < 0) {
-                       if (!qp->s_sde)
-                               qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+               if (qp->s_ahgidx < 0)
                        qp->s_ahgidx = sdma_ahg_alloc(qp->s_sde);
-               }
                if (qp->s_ahgidx >= 0) {
                        qp->s_ahgpsn = npsn;
                        qp->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
@@ -741,7 +738,6 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
        u16 lrh0;
        u32 nwords;
        u32 extra_bytes;
-       u8 sc5;
        u32 bth1;
 
        /* Construct the header. */
@@ -755,9 +751,7 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
                lrh0 = HFI1_LRH_GRH;
                middle = 0;
        }
-       sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
-       lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
-       qp->s_sc = sc5;
+       lrh0 |= (qp->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
        /*
         * reset s_hdr/AHG fields
         *
index 5a9c784..54ff1f5 100644 (file)
@@ -383,6 +383,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
                lrh0 |= (sc5 & 0xf) << 12;
                qp->s_sc = sc5;
        }
+       qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
        qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
        qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
        qp->s_hdr->ibh.lrh[2] =
index 38d2cd3..296b7cb 100644 (file)
@@ -1011,7 +1011,6 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
        struct verbs_txreq *tx;
        struct sdma_txreq *stx;
        u64 pbc_flags = 0;
-       struct sdma_engine *sde;
        u8 sc5 = qp->s_sc;
        int ret;
 
@@ -1032,12 +1031,7 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
        if (IS_ERR(tx))
                goto bail_tx;
 
-       if (!qp->s_hdr->sde) {
-               tx->sde = sde = qp_to_sdma_engine(qp, sc5);
-               if (!sde)
-                       goto bail_no_sde;
-       } else
-               tx->sde = sde = qp->s_hdr->sde;
+       tx->sde = qp->s_sde;
 
        if (likely(pbc == 0)) {
                u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
@@ -1052,17 +1046,15 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
        if (qp->s_rdma_mr)
                qp->s_rdma_mr = NULL;
        tx->hdr_dwords = hdrwords + 2;
-       ret = build_verbs_tx_desc(sde, ss, len, tx, ahdr, pbc);
+       ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
        if (unlikely(ret))
                goto bail_build;
        trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
-       ret =  sdma_send_txreq(sde, &qp->s_iowait, &tx->txreq);
+       ret =  sdma_send_txreq(tx->sde, &qp->s_iowait, &tx->txreq);
        if (unlikely(ret == -ECOMM))
                goto bail_ecomm;
        return ret;
 
-bail_no_sde:
-       hfi1_put_txreq(tx);
 bail_ecomm:
        /* The current one got "sent" */
        return 0;
index b5013f8..fdbe0f9 100644 (file)
@@ -441,7 +441,8 @@ struct hfi1_qp {
        struct hfi1_swqe *s_wq;  /* send work queue */
        struct hfi1_mmap_info *ip;
        struct ahg_ib_header *s_hdr;     /* next packet header to send */
-       u8 s_sc;                        /* SC[0..4] for next packet */
+       /* sc for UC/RC QPs - based on ah for UD */
+       u8 s_sc;
        unsigned long timeout_jiffies;  /* computed from timeout */
 
        enum ib_mtu path_mtu;