net/xprtrdma: Simplify ib_post_(send|recv|srq_recv)() calls
authorBart Van Assche <bart.vanassche@wdc.com>
Wed, 18 Jul 2018 16:25:31 +0000 (09:25 -0700)
committerJason Gunthorpe <jgg@mellanox.com>
Tue, 24 Jul 2018 22:06:37 +0000 (16:06 -0600)
Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL
as third argument to ib_post_(send|recv|srq_recv)().

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Acked-by: Anna Schumaker <Anna.Schumaker@netapp.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
net/sunrpc/xprtrdma/fmr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index 17fb1e0..0f7c465 100644 (file)
@@ -279,9 +279,7 @@ out_maperr:
 static int
 fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
 {
-       struct ib_send_wr *bad_wr;
-
-       return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr);
+       return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL);
 }
 
 /* Invalidate all memory regions that were registered for "req".
index c040de1..a167eeb 100644 (file)
@@ -464,7 +464,7 @@ out_mapmr_err:
 static int
 frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
 {
-       struct ib_send_wr *post_wr, *bad_wr;
+       struct ib_send_wr *post_wr;
        struct rpcrdma_mr *mr;
 
        post_wr = &req->rl_sendctx->sc_wr;
@@ -486,7 +486,7 @@ frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
        /* If ib_post_send fails, the next ->send_request for
         * @req will queue these MWs for recovery.
         */
-       return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
+       return ib_post_send(ia->ri_id->qp, post_wr, NULL);
 }
 
 /* Handle a remotely invalidated mr on the @mrs list
index 841fca1..2ef75e8 100644 (file)
@@ -229,11 +229,10 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
                                struct svc_rdma_recv_ctxt *ctxt)
 {
-       struct ib_recv_wr *bad_recv_wr;
        int ret;
 
        svc_xprt_get(&rdma->sc_xprt);
-       ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr);
+       ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
        trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
        if (ret)
                goto err_post;
index ce3ea84..8097542 100644 (file)
@@ -329,7 +329,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
        do {
                if (atomic_sub_return(cc->cc_sqecount,
                                      &rdma->sc_sq_avail) > 0) {
-                       ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
+                       ret = ib_post_send(rdma->sc_qp, first_wr, NULL);
                        trace_svcrdma_post_rw(&cc->cc_cqe,
                                              cc->cc_sqecount, ret);
                        if (ret)
index 4a3efae..ffef0c5 100644 (file)
@@ -291,7 +291,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
  */
 int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
 {
-       struct ib_send_wr *bad_wr;
        int ret;
 
        might_sleep();
@@ -311,7 +310,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
                }
 
                svc_xprt_get(&rdma->sc_xprt);
-               ret = ib_post_send(rdma->sc_qp, wr, &bad_wr);
+               ret = ib_post_send(rdma->sc_qp, wr, NULL);
                trace_svcrdma_post_send(wr, ret);
                if (ret) {
                        set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);