RDMA/erdma: Implement the lifecycle of reflushing work for each QP
authorCheng Xu <chengyou@linux.alibaba.com>
Wed, 16 Nov 2022 02:31:06 +0000 (10:31 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 24 Nov 2022 18:58:52 +0000 (14:58 -0400)
Each QP has a work for reflushing purpose. In the work, driver will report
the latest pi to hardware.

Link: https://lore.kernel.org/r/20221116023107.82835-3-chengyou@linux.alibaba.com
Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/erdma/erdma_hw.h
drivers/infiniband/hw/erdma/erdma_verbs.c
drivers/infiniband/hw/erdma/erdma_verbs.h

index 1b2e2b7..ab371fe 100644 (file)
@@ -145,6 +145,7 @@ enum CMDQ_RDMA_OPCODE {
        CMDQ_OPCODE_MODIFY_QP = 3,
        CMDQ_OPCODE_CREATE_CQ = 4,
        CMDQ_OPCODE_DESTROY_CQ = 5,
+       CMDQ_OPCODE_REFLUSH = 6,
        CMDQ_OPCODE_REG_MR = 8,
        CMDQ_OPCODE_DEREG_MR = 9
 };
@@ -301,6 +302,13 @@ struct erdma_cmdq_destroy_qp_req {
        u32 qpn;
 };
 
+struct erdma_cmdq_reflush_req {
+       u64 hdr;
+       u32 qpn;
+       u32 sq_pi;
+       u32 rq_pi;
+};
+
 /* cap qword 0 definition */
 #define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
 #define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
index d843ce1..5dab1e8 100644 (file)
@@ -379,6 +379,21 @@ int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
        return 0;
 }
 
+static void erdma_flush_worker(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct erdma_qp *qp =
+               container_of(dwork, struct erdma_qp, reflush_dwork);
+       struct erdma_cmdq_reflush_req req;
+
+       erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+                               CMDQ_OPCODE_REFLUSH);
+       req.qpn = QP_ID(qp);
+       req.sq_pi = qp->kern_qp.sq_pi;
+       req.rq_pi = qp->kern_qp.rq_pi;
+       erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL);
+}
+
 static int erdma_qp_validate_cap(struct erdma_dev *dev,
                                 struct ib_qp_init_attr *attrs)
 {
@@ -735,6 +750,7 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
        qp->attrs.max_send_sge = attrs->cap.max_send_sge;
        qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
        qp->attrs.state = ERDMA_QP_STATE_IDLE;
+       INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
 
        ret = create_qp_cmd(dev, qp);
        if (ret)
@@ -1028,6 +1044,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
        erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
        up_write(&qp->state_lock);
 
+       cancel_delayed_work_sync(&qp->reflush_dwork);
+
        erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
                                CMDQ_OPCODE_DESTROY_QP);
        req.qpn = QP_ID(qp);
index a5574f0..9f341d0 100644 (file)
@@ -197,6 +197,8 @@ struct erdma_qp {
        struct erdma_cep *cep;
        struct rw_semaphore state_lock;
 
+       struct delayed_work reflush_dwork;
+
        union {
                struct erdma_kqp kern_qp;
                struct erdma_uqp user_qp;