RDMA/hns: Use new interface to write FRMR fields
authorYixing Liu <liuyixing1@huawei.com>
Mon, 21 Jun 2021 08:00:41 +0000 (16:00 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Mon, 21 Jun 2021 18:03:42 +0000 (15:03 -0300)
Use "hr_reg_write" to replace "roce_set_filed".

Link: https://lore.kernel.org/r/1624262443-24528-8-git-send-email-liweihang@huawei.com
Signed-off-by: Yixing Liu <liuyixing1@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.h

index 2266f9a..868a902 100644 (file)
@@ -105,16 +105,12 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
        u64 pbl_ba;
 
        /* use ib_access_flags */
-       roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S,
-                    !!(wr->access & IB_ACCESS_MW_BIND));
-       roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S,
-                    !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
-       roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RR_S,
-                    !!(wr->access & IB_ACCESS_REMOTE_READ));
-       roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RW_S,
-                    !!(wr->access & IB_ACCESS_REMOTE_WRITE));
-       roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_LW_S,
-                    !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+       hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
+       hr_reg_write_bool(fseg, FRMR_ATOMIC,
+                         wr->access & IB_ACCESS_REMOTE_ATOMIC);
+       hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
+       hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
+       hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
 
        /* Data structure reuse may lead to confusion */
        pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
@@ -126,11 +122,10 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
        rc_sq_wqe->rkey = cpu_to_le32(wr->key);
        rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
 
-       fseg->pbl_size = cpu_to_le32(mr->npages);
-       roce_set_field(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
-                      V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
-                      to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
-       roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
+       hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
+       hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+                    to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+       hr_reg_clear(fseg, FRMR_BLK_MODE);
 }
 
 static void set_atomic_seg(const struct ib_send_wr *wr,
index d398cf0..e6880a9 100644 (file)
@@ -1064,16 +1064,6 @@ struct hns_roce_v2_rc_send_wqe {
 
 #define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
 
-#define V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S 10
-
-#define V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S 11
-
-#define V2_RC_FRMR_WQE_BYTE_40_RR_S 12
-
-#define V2_RC_FRMR_WQE_BYTE_40_RW_S 13
-
-#define V2_RC_FRMR_WQE_BYTE_40_LW_S 14
-
 #define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31
 
 #define        V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
@@ -1092,10 +1082,18 @@ struct hns_roce_wqe_frmr_seg {
        __le32  byte_40;
 };
 
-#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
-#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4)
-
-#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8
+#define FRMR_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_wqe_frmr_seg, h, l)
+
+#define FRMR_PBL_SIZE FRMR_WQE_FIELD_LOC(31, 0)
+#define FRMR_BLOCK_SIZE FRMR_WQE_FIELD_LOC(35, 32)
+#define FRMR_PBL_BUF_PG_SZ FRMR_WQE_FIELD_LOC(39, 36)
+#define FRMR_BLK_MODE FRMR_WQE_FIELD_LOC(40, 40)
+#define FRMR_ZBVA FRMR_WQE_FIELD_LOC(41, 41)
+#define FRMR_BIND_EN FRMR_WQE_FIELD_LOC(42, 42)
+#define FRMR_ATOMIC FRMR_WQE_FIELD_LOC(43, 43)
+#define FRMR_RR FRMR_WQE_FIELD_LOC(44, 44)
+#define FRMR_RW FRMR_WQE_FIELD_LOC(45, 45)
+#define FRMR_LW FRMR_WQE_FIELD_LOC(46, 46)
 
 struct hns_roce_v2_wqe_data_seg {
        __le32    len;