RDMA/hns: Support QP's restrack raw ops for hns driver
authorWenpeng Liang <liangwenpeng@huawei.com>
Mon, 22 Aug 2022 10:44:53 +0000 (18:44 +0800)
committerLeon Romanovsky <leonro@nvidia.com>
Tue, 23 Aug 2022 08:35:13 +0000 (11:35 +0300)
The QP raw restrack attributes come from the queue context maintained by
the ROCEE.

For example:

$ rdma res show qp link hns_0 -jp -dd -r
[ {
        "ifindex": 4,
        "ifname": "hns_0",
        "data": [ 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,
  5,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,255,156,0,0,63,156,0,0,
  7,0,0,0,1,0,0,0,9,0,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,63,156,0,
  0,0,0,0,0 ]
    } ]

Link: https://lore.kernel.org/r/20220822104455.2311053-6-liangwenpeng@huawei.com
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_restrack.c

index 7578c0c..e039587 100644 (file)
@@ -895,6 +895,7 @@ struct hns_roce_hw {
        void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
        int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
        int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
+       int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
        const struct ib_device_ops *hns_roce_dev_ops;
        const struct ib_device_ops *hns_roce_dev_srq_ops;
 };
@@ -1226,6 +1227,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev);
 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
 struct hns_user_mmap_entry *
 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
                                size_t length,
index 979cd57..319de9a 100644 (file)
@@ -5307,9 +5307,8 @@ static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
        return (state < ARRAY_SIZE(map)) ? map[state] : -1;
 }
 
-static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
-                                struct hns_roce_qp *hr_qp,
-                                struct hns_roce_v2_qp_context *hr_context)
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
+                                void *buffer)
 {
        struct hns_roce_cmd_mailbox *mailbox;
        int ret;
@@ -5319,11 +5318,11 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
                return PTR_ERR(mailbox);
 
        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
-                               hr_qp->qpn);
+                               qpn);
        if (ret)
                goto out;
 
-       memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
+       memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
 
 out:
        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -5353,7 +5352,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                goto done;
        }
 
-       ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
+       ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
        if (ret) {
                ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
                ret = -EINVAL;
@@ -6645,6 +6644,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
        .cleanup_eq = hns_roce_v2_cleanup_eq_table,
        .write_srqc = hns_roce_v2_write_srqc,
        .query_cqc = hns_roce_v2_query_cqc,
+       .query_qpc = hns_roce_v2_query_qpc,
        .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
        .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
 };
index 8744202..17bc73c 100644 (file)
@@ -569,6 +569,7 @@ static const struct ib_device_ops hns_roce_dev_restrack_ops = {
        .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
        .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
        .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
+       .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
 };
 
 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
index e8fef37..9bafc62 100644 (file)
@@ -112,3 +112,59 @@ err:
 
        return -EMSGSIZE;
 }
+
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
+       struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+       struct hns_roce_v2_qp_context context;
+       u32 data[MAX_ENTRY_NUM] = {};
+       int offset = 0;
+       int ret;
+
+       if (!hr_dev->hw->query_qpc)
+               return -EINVAL;
+
+       ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
+       if (ret)
+               return -EINVAL;
+
+       data[offset++] = hr_reg_read(&context, QPC_QP_ST);
+       data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE);
+       data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG);
+       data[offset++] = hr_reg_read(&context, QPC_SRQ_EN);
+       data[offset++] = hr_reg_read(&context, QPC_SRQN);
+       data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD);
+       data[offset++] = hr_reg_read(&context, QPC_TX_CQN);
+       data[offset++] = hr_reg_read(&context, QPC_RX_CQN);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT);
+       data[offset++] = hr_reg_read(&context, QPC_RQWS);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT);
+       data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM);
+       data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM);
+       data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ);
+       data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ);
+       data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
+       data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR);
+       data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX);
+       data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR);
+
+       ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+       return ret;
+}