IB/mlx5: Break up wqe handling into begin & finish routines
authorSagi Grimberg <sagig@mellanox.com>
Sun, 23 Feb 2014 12:19:08 +0000 (14:19 +0200)
committerRoland Dreier <roland@purestorage.com>
Fri, 7 Mar 2014 19:26:49 +0000 (11:26 -0800)
As a preliminary step for signature feature which will require posting
multiple (3) WQEs for a single WR, we break post_send routine WQE
indexing into begin and finish routines.

This patch does not change any functionality.

Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
drivers/infiniband/hw/mlx5/qp.c

index 01999f3..a029009 100644 (file)
@@ -2047,6 +2047,59 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
        }
 }
 
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+                    struct mlx5_wqe_ctrl_seg **ctrl,
+                    struct ib_send_wr *wr, int *idx,
+                    int *size, int nreq)
+{
+       int err = 0;
+
+       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
+               err = -ENOMEM;
+               return err;
+       }
+
+       *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+       *seg = mlx5_get_send_wqe(qp, *idx);
+       *ctrl = *seg;
+       *(uint32_t *)(*seg + 8) = 0;
+       (*ctrl)->imm = send_ieth(wr);
+       (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+               (wr->send_flags & IB_SEND_SIGNALED ?
+                MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+               (wr->send_flags & IB_SEND_SOLICITED ?
+                MLX5_WQE_CTRL_SOLICITED : 0);
+
+       *seg += sizeof(**ctrl);
+       *size = sizeof(**ctrl) / 16;
+
+       return err;
+}
+
+static void finish_wqe(struct mlx5_ib_qp *qp,
+                      struct mlx5_wqe_ctrl_seg *ctrl,
+                      u8 size, unsigned idx, u64 wr_id,
+                      int nreq, u8 fence, u8 next_fence,
+                      u32 mlx5_opcode)
+{
+       u8 opmod = 0;
+
+       ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+                                            mlx5_opcode | ((u32)opmod << 24));
+       ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
+       ctrl->fm_ce_se |= fence;
+       qp->fm_cache = next_fence;
+       if (unlikely(qp->wq_sig))
+               ctrl->signature = wq_sig(ctrl);
+
+       qp->sq.wrid[idx] = wr_id;
+       qp->sq.w_list[idx].opcode = mlx5_opcode;
+       qp->sq.wqe_head[idx] = qp->sq.head + nreq;
+       qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+       qp->sq.w_list[idx].next = qp->sq.cur_post;
+}
+
+
 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                      struct ib_send_wr **bad_wr)
 {
@@ -2060,7 +2113,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        int uninitialized_var(size);
        void *qend = qp->sq.qend;
        unsigned long flags;
-       u32 mlx5_opcode;
        unsigned idx;
        int err = 0;
        int inl = 0;
@@ -2069,7 +2121,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        int nreq;
        int i;
        u8 next_fence = 0;
-       u8 opmod = 0;
        u8 fence;
 
        spin_lock_irqsave(&qp->sq.lock, flags);
@@ -2082,36 +2133,23 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        goto out;
                }
 
-               if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
+               fence = qp->fm_cache;
+               num_sge = wr->num_sge;
+               if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
                        err = -ENOMEM;
                        *bad_wr = wr;
                        goto out;
                }
 
-               fence = qp->fm_cache;
-               num_sge = wr->num_sge;
-               if (unlikely(num_sge > qp->sq.max_gs)) {
+               err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
+               if (err) {
                        mlx5_ib_warn(dev, "\n");
                        err = -ENOMEM;
                        *bad_wr = wr;
                        goto out;
                }
 
-               idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
-               seg = mlx5_get_send_wqe(qp, idx);
-               ctrl = seg;
-               *(uint32_t *)(seg + 8) = 0;
-               ctrl->imm = send_ieth(wr);
-               ctrl->fm_ce_se = qp->sq_signal_bits |
-                       (wr->send_flags & IB_SEND_SIGNALED ?
-                        MLX5_WQE_CTRL_CQ_UPDATE : 0) |
-                       (wr->send_flags & IB_SEND_SOLICITED ?
-                        MLX5_WQE_CTRL_SOLICITED : 0);
-
-               seg += sizeof(*ctrl);
-               size = sizeof(*ctrl) / 16;
-
                switch (ibqp->qp_type) {
                case IB_QPT_XRC_INI:
                        xrc = seg;
@@ -2244,22 +2282,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                }
 
-               mlx5_opcode = mlx5_ib_opcode[wr->opcode];
-               ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8)      |
-                                                    mlx5_opcode                        |
-                                                    ((u32)opmod << 24));
-               ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
-               ctrl->fm_ce_se |= get_fence(fence, wr);
-               qp->fm_cache = next_fence;
-               if (unlikely(qp->wq_sig))
-                       ctrl->signature = wq_sig(ctrl);
-
-               qp->sq.wrid[idx] = wr->wr_id;
-               qp->sq.w_list[idx].opcode = mlx5_opcode;
-               qp->sq.wqe_head[idx] = qp->sq.head + nreq;
-               qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
-               qp->sq.w_list[idx].next = qp->sq.cur_post;
-
+               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                          get_fence(fence, wr), next_fence,
+                          mlx5_ib_opcode[wr->opcode]);
                if (0)
                        dump_wqe(qp, idx, size);
        }