RDMA/mlx5: Remove mlx5_ib_mr->npages
authorJason Gunthorpe <jgg@nvidia.com>
Mon, 26 Oct 2020 13:19:32 +0000 (15:19 +0200)
committerJason Gunthorpe <jgg@nvidia.com>
Mon, 2 Nov 2020 18:52:26 +0000 (14:52 -0400)
This is the same value as ib_umem_num_pages(mr->umem), use that instead.

Link: https://lore.kernel.org/r/20201026131936.1335664-4-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c

index 93310dd..0eaf992 100644 (file)
@@ -602,7 +602,6 @@ struct mlx5_ib_mr {
        struct mlx5_shared_mr_info      *smr_info;
        struct list_head        list;
        struct mlx5_cache_ent  *cache_ent;
-       int                     npages;
        struct mlx5_ib_dev     *dev;
        u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
        struct mlx5_core_sig_ctx    *sig;
index f3a2811..b6d9419 100644 (file)
@@ -1417,8 +1417,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
 
        mr->umem = umem;
-       mr->npages = npages;
-       atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
+       atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
        set_mr_fields(dev, mr, length, access_flags);
 
        if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
@@ -1551,8 +1550,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                 * used.
                 */
                flags |= IB_MR_REREG_TRANS;
-               atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
-               mr->npages = 0;
+               atomic_sub(ib_umem_num_pages(mr->umem),
+                          &dev->mdev->priv.reg_pages);
                ib_umem_release(mr->umem);
                mr->umem = NULL;
 
@@ -1560,8 +1559,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                                  &npages, &page_shift, &ncont, &order);
                if (err)
                        goto err;
-               mr->npages = ncont;
-               atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
+               atomic_add(ib_umem_num_pages(mr->umem),
+                          &dev->mdev->priv.reg_pages);
        }
 
        if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
@@ -1694,7 +1693,6 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
-       int npages = mr->npages;
        struct ib_umem *umem = mr->umem;
 
        /* Stop all DMA */
@@ -1703,14 +1701,17 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
        else
                clean_mr(dev, mr);
 
+       if (umem) {
+               if (!is_odp_mr(mr))
+                       atomic_sub(ib_umem_num_pages(umem),
+                                  &dev->mdev->priv.reg_pages);
+               ib_umem_release(umem);
+       }
+
        if (mr->cache_ent)
                mlx5_mr_cache_free(dev, mr);
        else
                kfree(mr);
-
-       ib_umem_release(umem);
-       atomic_sub(npages, &dev->mdev->priv.reg_pages);
-
 }
 
 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)