RDMA/erdma: Use fixed hardware page size
authorCheng Xu <chengyou@linux.alibaba.com>
Tue, 7 Mar 2023 10:29:23 +0000 (18:29 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 May 2023 14:03:33 +0000 (23:03 +0900)
[ Upstream commit d649c638dc26f3501da510cf7fceb5c15ca54258 ]

Hardware's page size is 4096, but the kernel's page size may vary. Driver
should use hardware's page size when communicating with hardware.

Fixes: 155055771704 ("RDMA/erdma: Add verbs implementation")
Link: https://lore.kernel.org/r/20230307102924.70577-2-chengyou@linux.alibaba.com
Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/infiniband/hw/erdma/erdma_hw.h
drivers/infiniband/hw/erdma/erdma_verbs.c

index c533c69..2eb41e6 100644 (file)
 
 #define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
 
+/* Hardware page size definition */
+#define ERDMA_HW_PAGE_SHIFT 12
+#define ERDMA_HW_PAGE_SIZE 4096
+
 /* WQE related. */
 #define EQE_SIZE 16
 #define EQE_SHIFT 4
index 19c69ea..654d851 100644 (file)
@@ -38,7 +38,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
                   FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
 
        if (rdma_is_kernel_res(&qp->ibqp.res)) {
-               u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
+               u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
 
                req.sq_cqn_mtt_cfg =
                        FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
@@ -66,13 +66,13 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
                user_qp = &qp->user_qp;
                req.sq_cqn_mtt_cfg = FIELD_PREP(
                        ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
-                       ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
+                       ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
                req.sq_cqn_mtt_cfg |=
                        FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
 
                req.rq_cqn_mtt_cfg = FIELD_PREP(
                        ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
-                       ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
+                       ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
                req.rq_cqn_mtt_cfg |=
                        FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
 
@@ -163,7 +163,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
        if (rdma_is_kernel_res(&cq->ibcq.res)) {
                page_size = SZ_32M;
                req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
-                                      ilog2(page_size) - PAGE_SHIFT);
+                                      ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
                req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
                req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
 
@@ -176,8 +176,9 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
                        cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
        } else {
                mtt = &cq->user_cq.qbuf_mtt;
-               req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
-                                      ilog2(mtt->page_size) - PAGE_SHIFT);
+               req.cfg0 |=
+                       FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+                                  ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT);
                if (mtt->mtt_nents == 1) {
                        req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
                        req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
@@ -618,7 +619,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
        u32 rq_offset;
        int ret;
 
-       if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
+       if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
                   qp->attrs.rq_size * RQE_SIZE))
                return -EINVAL;
 
@@ -628,7 +629,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
        if (ret)
                return ret;
 
-       rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
+       rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
        qp->user_qp.rq_offset = rq_offset;
 
        ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,