RDMA/core: Simplify ib_post_(send|recv|srq_recv)() calls
authorBart Van Assche <bart.vanassche@wdc.com>
Wed, 18 Jul 2018 16:25:17 +0000 (09:25 -0700)
committerJason Gunthorpe <jgg@mellanox.com>
Tue, 24 Jul 2018 22:06:36 +0000 (16:06 -0600)
Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL
as third argument to ib_post_(send|recv|srq_recv)().

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/core/mad.c
drivers/infiniband/core/rw.c
drivers/infiniband/core/verbs.c

index 34e9b27..ef459f2 100644 (file)
@@ -1181,7 +1181,6 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
 {
        struct ib_mad_qp_info *qp_info;
        struct list_head *list;
-       struct ib_send_wr *bad_send_wr;
        struct ib_mad_agent *mad_agent;
        struct ib_sge *sge;
        unsigned long flags;
@@ -1219,7 +1218,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
        spin_lock_irqsave(&qp_info->send_queue.lock, flags);
        if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
                ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
-                                  &bad_send_wr);
+                                  NULL);
                list = &qp_info->send_queue.list;
        } else {
                ret = 0;
@@ -2476,7 +2475,6 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
        struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
        struct ib_mad_qp_info           *qp_info;
        struct ib_mad_queue             *send_queue;
-       struct ib_send_wr               *bad_send_wr;
        struct ib_mad_send_wc           mad_send_wc;
        unsigned long flags;
        int ret;
@@ -2526,7 +2524,7 @@ retry:
 
        if (queued_send_wr) {
                ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
-                                  &bad_send_wr);
+                                  NULL);
                if (ret) {
                        dev_err(&port_priv->device->dev,
                                "ib_post_send failed: %d\n", ret);
@@ -2571,11 +2569,9 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
        if (wc->status == IB_WC_WR_FLUSH_ERR) {
                if (mad_send_wr->retry) {
                        /* Repost send */
-                       struct ib_send_wr *bad_send_wr;
-
                        mad_send_wr->retry = 0;
                        ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
-                                       &bad_send_wr);
+                                          NULL);
                        if (!ret)
                                return false;
                }
@@ -2891,7 +2887,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
        int post, ret;
        struct ib_mad_private *mad_priv;
        struct ib_sge sg_list;
-       struct ib_recv_wr recv_wr, *bad_recv_wr;
+       struct ib_recv_wr recv_wr;
        struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
 
        /* Initialize common scatter list fields */
@@ -2935,7 +2931,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                post = (++recv_queue->count < recv_queue->max_active);
                list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
                spin_unlock_irqrestore(&recv_queue->lock, flags);
-               ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
+               ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
                if (ret) {
                        spin_lock_irqsave(&recv_queue->lock, flags);
                        list_del(&mad_priv->header.mad_list.list);
index 474d652..683e6d1 100644 (file)
@@ -564,10 +564,10 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs);
 int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
                struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
 {
-       struct ib_send_wr *first_wr, *bad_wr;
+       struct ib_send_wr *first_wr;
 
        first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
-       return ib_post_send(qp, first_wr, &bad_wr);
+       return ib_post_send(qp, first_wr, NULL);
 }
 EXPORT_SYMBOL(rdma_rw_ctx_post);
 
index b6ceb6f..cde359d 100644 (file)
@@ -2473,7 +2473,6 @@ static void __ib_drain_sq(struct ib_qp *qp)
        struct ib_cq *cq = qp->send_cq;
        struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
        struct ib_drain_cqe sdrain;
-       struct ib_send_wr *bad_swr;
        struct ib_rdma_wr swr = {
                .wr = {
                        .next = NULL,
@@ -2492,7 +2491,7 @@ static void __ib_drain_sq(struct ib_qp *qp)
        sdrain.cqe.done = ib_drain_qp_done;
        init_completion(&sdrain.done);
 
-       ret = ib_post_send(qp, &swr.wr, &bad_swr);
+       ret = ib_post_send(qp, &swr.wr, NULL);
        if (ret) {
                WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
                return;
@@ -2513,7 +2512,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
        struct ib_cq *cq = qp->recv_cq;
        struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
        struct ib_drain_cqe rdrain;
-       struct ib_recv_wr rwr = {}, *bad_rwr;
+       struct ib_recv_wr rwr = {};
        int ret;
 
        ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
@@ -2526,7 +2525,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
        rdrain.cqe.done = ib_drain_qp_done;
        init_completion(&rdrain.done);
 
-       ret = ib_post_recv(qp, &rwr, &bad_rwr);
+       ret = ib_post_recv(qp, &rwr, NULL);
        if (ret) {
                WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
                return;