RDMA/mlx5: Use mlx5_umr_post_send_wait() to rereg pd access
authorAharon Landau <aharonl@nvidia.com>
Tue, 12 Apr 2022 07:24:03 +0000 (10:24 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Mon, 25 Apr 2022 14:53:00 +0000 (11:53 -0300)
Move rereg_pd_access logic to umr.c, and use mlx5_umr_post_send_wait()
instead of mlx5_ib_post_send_wait().

Link: https://lore.kernel.org/r/18da4f47edbc2561f652b7ee4e7a5269e866af77.1649747695.git.leonro@nvidia.com
Signed-off-by: Aharon Landau <aharonl@nvidia.com>
Reviewed-by: Michael Guralnik <michaelgur@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/umr.c
drivers/infiniband/hw/mlx5/umr.h

index 32ad93e..50b4ccd 100644 (file)
@@ -1646,30 +1646,6 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
                                      target_access_flags);
 }
 
-static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
-                              int access_flags)
-{
-       struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
-       struct mlx5_umr_wr umrwr = {
-               .wr = {
-                       .send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
-                                     MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS,
-                       .opcode = MLX5_IB_WR_UMR,
-               },
-               .mkey = mr->mmkey.key,
-               .pd = pd,
-               .access_flags = access_flags,
-       };
-       int err;
-
-       err = mlx5_ib_post_send_wait(dev, &umrwr);
-       if (err)
-               return err;
-
-       mr->access_flags = access_flags;
-       return 0;
-}
-
 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
                                  struct ib_umem *new_umem,
                                  int new_access_flags, u64 iova,
@@ -1770,7 +1746,8 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                /* Fast path for PD/access change */
                if (can_use_umr_rereg_access(dev, mr->access_flags,
                                             new_access_flags)) {
-                       err = umr_rereg_pd_access(mr, new_pd, new_access_flags);
+                       err = mlx5r_umr_rereg_pd_access(mr, new_pd,
+                                                       new_access_flags);
                        if (err)
                                return ERR_PTR(err);
                        return NULL;
index 2f14f6c..716c352 100644 (file)
@@ -349,3 +349,44 @@ int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
 
        return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
 }
+
+static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+                                      struct mlx5_mkey_seg *seg,
+                                      unsigned int access_flags)
+{
+       MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+       MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+       MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+       MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
+       MLX5_SET(mkc, seg, lr, 1);
+       MLX5_SET(mkc, seg, relaxed_ordering_write,
+                !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+       MLX5_SET(mkc, seg, relaxed_ordering_read,
+                !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+}
+
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+                             int access_flags)
+{
+       struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+       struct mlx5r_umr_wqe wqe = {};
+       int err;
+
+       wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev);
+       wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+       wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE;
+       wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
+
+       mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags);
+       MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn);
+       MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+       MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+                mlx5_mkey_variant(mr->mmkey.key));
+
+       err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+       if (err)
+               return err;
+
+       mr->access_flags = access_flags;
+       return 0;
+}
index c14072b..5381631 100644 (file)
@@ -92,5 +92,7 @@ struct mlx5r_umr_wqe {
 };
 
 int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+                             int access_flags);
 
 #endif /* _MLX5_IB_UMR_H */