RDMA/pvrdma: Use ib_umem_num_dma_blocks() instead of ib_umem_page_count()
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 4 Sep 2020 22:41:55 +0000 (19:41 -0300)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 11 Sep 2020 13:24:54 +0000 (10:24 -0300)
This driver always uses PAGE_SIZE.

Link: https://lore.kernel.org/r/14-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c

index 32aede5..319546a 100644 (file)
@@ -142,7 +142,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                        goto err_cq;
                }
 
-               npages = ib_umem_page_count(cq->umem);
+               npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
        } else {
                /* One extra page for shared ring state */
                npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
index 9a8f2a9..8a385ac 100644 (file)
@@ -298,9 +298,11 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
                                goto err_qp;
                        }
 
-                       qp->npages_send = ib_umem_page_count(qp->sumem);
+                       qp->npages_send =
+                               ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
                        if (!is_srq)
-                               qp->npages_recv = ib_umem_page_count(qp->rumem);
+                               qp->npages_recv = ib_umem_num_dma_blocks(
+                                       qp->rumem, PAGE_SIZE);
                        else
                                qp->npages_recv = 0;
                        qp->npages = qp->npages_send + qp->npages_recv;
index f680227..082208f 100644 (file)
@@ -152,7 +152,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
                goto err_srq;
        }
 
-       srq->npages = ib_umem_page_count(srq->umem);
+       srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
 
        if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
                dev_warn(&dev->pdev->dev,