RDMA/rxe: Add link_down, rdma_sends, rdma_recvs stats counters
authorAndrew Boyer <andrew.boyer@dell.com>
Thu, 1 Nov 2018 13:18:46 +0000 (09:18 -0400)
committerJason Gunthorpe <jgg@mellanox.com>
Thu, 8 Nov 2018 21:22:54 +0000 (14:22 -0700)
link_down is self-explanatory.

rdma_sends and rdma_recvs count the number of RDMA Send and RDMA Receive
operations completed successfully. This is different from the existing
sent_pkts and rcvd_pkts counters because the existing counters measure
packets, not RDMA operations.

ack_deffered is renamed to ack_deferred to fix the spelling.

out_of_sequence is renamed to out_of_seq_request to make clear that it is
counting only requests and not other packets which can be out of sequence.

Signed-off-by: Andrew Boyer <andrew.boyer@dell.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_hw_counters.c
drivers/infiniband/sw/rxe/rxe_hw_counters.h
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_resp.c

index ea089cb..e996da6 100644 (file)
@@ -439,6 +439,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
  */
 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 {
+       struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
        struct rxe_cqe cqe;
 
        if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
@@ -451,6 +452,11 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
                advance_consumer(qp->sq.queue);
        }
 
+       if (wqe->wr.opcode == IB_WR_SEND ||
+           wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
+           wqe->wr.opcode == IB_WR_SEND_WITH_INV)
+               rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND);
+
        /*
         * we completed something so let req run again
         * if it is trying to fence
index 6aeb7a1..4a24895 100644 (file)
@@ -37,15 +37,18 @@ static const char * const rxe_counter_name[] = {
        [RXE_CNT_SENT_PKTS]           =  "sent_pkts",
        [RXE_CNT_RCVD_PKTS]           =  "rcvd_pkts",
        [RXE_CNT_DUP_REQ]             =  "duplicate_request",
-       [RXE_CNT_OUT_OF_SEQ_REQ]      =  "out_of_sequence",
+       [RXE_CNT_OUT_OF_SEQ_REQ]      =  "out_of_seq_request",
        [RXE_CNT_RCV_RNR]             =  "rcvd_rnr_err",
        [RXE_CNT_SND_RNR]             =  "send_rnr_err",
        [RXE_CNT_RCV_SEQ_ERR]         =  "rcvd_seq_err",
-       [RXE_CNT_COMPLETER_SCHED]     =  "ack_deffered",
+       [RXE_CNT_COMPLETER_SCHED]     =  "ack_deferred",
        [RXE_CNT_RETRY_EXCEEDED]      =  "retry_exceeded_err",
        [RXE_CNT_RNR_RETRY_EXCEEDED]  =  "retry_rnr_exceeded_err",
        [RXE_CNT_COMP_RETRY]          =  "completer_retry_err",
        [RXE_CNT_SEND_ERR]            =  "send_err",
+       [RXE_CNT_LINK_DOWNED]         =  "link_downed",
+       [RXE_CNT_RDMA_SEND]           =  "rdma_sends",
+       [RXE_CNT_RDMA_RECV]           =  "rdma_recvs",
 };
 
 int rxe_ib_get_hw_stats(struct ib_device *ibdev,
index f44df1b..72c0d63 100644 (file)
@@ -50,6 +50,9 @@ enum rxe_counters {
        RXE_CNT_RNR_RETRY_EXCEEDED,
        RXE_CNT_COMP_RETRY,
        RXE_CNT_SEND_ERR,
+       RXE_CNT_LINK_DOWNED,
+       RXE_CNT_RDMA_SEND,
+       RXE_CNT_RDMA_RECV,
        RXE_NUM_OF_COUNTERS
 };
 
index cb028a3..b26a814 100644 (file)
@@ -621,6 +621,7 @@ void rxe_port_down(struct rxe_dev *rxe)
        port->attr.state = IB_PORT_DOWN;
 
        rxe_port_event(rxe, IB_EVENT_PORT_ERR);
+       rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
        dev_info(&rxe->ib_dev.dev, "set down\n");
 }
 
index 4ed54c9..59e5c18 100644 (file)
@@ -835,6 +835,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
        struct ib_wc *wc = &cqe.ibwc;
        struct ib_uverbs_wc *uwc = &cqe.uibwc;
        struct rxe_recv_wqe *wqe = qp->resp.wqe;
+       struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 
        if (unlikely(!wqe))
                return RESPST_CLEANUP;
@@ -852,6 +853,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
        }
 
        if (wc->status == IB_WC_SUCCESS) {
+               rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
                wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
                                pkt->mask & RXE_WRITE_MASK) ?
                                        IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
@@ -900,7 +902,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
                        }
 
                        if (pkt->mask & RXE_IETH_MASK) {
-                               struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
                                struct rxe_mem *rmr;
 
                                wc->wc_flags |= IB_WC_WITH_INVALIDATE;