RDMA/mlx5: Introduce mlx5_umr_post_send_wait()
authorAharon Landau <aharonl@nvidia.com>
Tue, 12 Apr 2022 07:24:01 +0000 (10:24 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Mon, 25 Apr 2022 14:53:00 +0000 (11:53 -0300)
Introduce mlx5_umr_post_send_wait() that uses a UMR adjusted flow for
posting WQEs. The next patches will gradually move UMR operations to use
this flow. Once done, will get rid of mlx5_ib_post_send_wait().

mlx5_umr_post_send_wait gets already written WQE segments and will only
memcpy it to the SQ. This way, we avoid packing all the data in a WR just
to unpack it into the WQE.

Link: https://lore.kernel.org/r/f027dd592fde62402b2d49efded8d1d22229d22b.1649747695.git.leonro@nvidia.com
Signed-off-by: Aharon Landau <aharonl@nvidia.com>
Reviewed-by: Michael Guralnik <michaelgur@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/umr.c
drivers/infiniband/hw/mlx5/umr.h

index 8131501..f17f64c 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "mlx5_ib.h"
 #include "umr.h"
+#include "wr.h"
 
 static __be64 get_umr_enable_mr_mask(void)
 {
@@ -228,3 +229,94 @@ void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
        ib_free_cq(dev->umrc.cq);
        ib_dealloc_pd(dev->umrc.pd);
 }
+
+static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
+                              struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+       unsigned int wqe_size =
+               with_data ? sizeof(struct mlx5r_umr_wqe) :
+                           sizeof(struct mlx5r_umr_wqe) -
+                                   sizeof(struct mlx5_wqe_data_seg);
+       struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct mlx5_ib_qp *qp = to_mqp(ibqp);
+       struct mlx5_wqe_ctrl_seg *ctrl;
+       union {
+               struct ib_cqe *ib_cqe;
+               u64 wr_id;
+       } id;
+       void *cur_edge, *seg;
+       unsigned long flags;
+       unsigned int idx;
+       int size, err;
+
+       if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+               return -EIO;
+
+       spin_lock_irqsave(&qp->sq.lock, flags);
+
+       err = mlx5r_begin_wqe(qp, &seg, &ctrl, &idx, &size, &cur_edge, 0,
+                             cpu_to_be32(mkey), false, false);
+       if (WARN_ON(err))
+               goto out;
+
+       qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
+
+       mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size);
+
+       id.ib_cqe = cqe;
+       mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
+                        MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR);
+
+       mlx5r_ring_db(qp, 1, ctrl);
+
+out:
+       spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+       return err;
+}
+
+static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       struct mlx5_ib_umr_context *context =
+               container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
+
+       context->status = wc->status;
+       complete(&context->done);
+}
+
+static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context)
+{
+       context->cqe.done = mlx5r_umr_done;
+       init_completion(&context->done);
+}
+
+static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
+                                  struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+       struct umr_common *umrc = &dev->umrc;
+       struct mlx5r_umr_context umr_context;
+       int err;
+
+       err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask));
+       if (WARN_ON(err))
+               return err;
+
+       mlx5r_umr_init_context(&umr_context);
+
+       down(&umrc->sem);
+       err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
+                                 with_data);
+       if (err)
+               mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
+       else {
+               wait_for_completion(&umr_context.done);
+               if (umr_context.status != IB_WC_SUCCESS) {
+                       mlx5_ib_warn(dev, "reg umr failed (%u)\n",
+                                    umr_context.status);
+                       err = -EFAULT;
+               }
+       }
+       up(&umrc->sem);
+       return err;
+}
index 0fe6cdd..d984213 100644 (file)
@@ -79,4 +79,16 @@ int mlx5r_umr_set_umr_ctrl_seg(struct mlx5_ib_dev *dev,
                               struct mlx5_wqe_umr_ctrl_seg *umr,
                               const struct ib_send_wr *wr);
 
+struct mlx5r_umr_context {
+       struct ib_cqe cqe;
+       enum ib_wc_status status;
+       struct completion done;
+};
+
+struct mlx5r_umr_wqe {
+       struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
+       struct mlx5_mkey_seg mkey_seg;
+       struct mlx5_wqe_data_seg data_seg;
+};
+
 #endif /* _MLX5_IB_UMR_H */