RDMA/mlx5: Introduce ODP prefetch counter
authorMaor Gottlieb <maorg@mellanox.com>
Sun, 21 Jun 2020 10:41:47 +0000 (13:41 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 3 Jul 2020 12:16:25 +0000 (09:16 -0300)
For debugging purpose it will be easier to understand if prefetch works
okay if it has its own counter. Introduce ODP prefetch counter and count
per MR the total number of prefetched pages.

In addition remove comment which is not relevant anymore and anyway not in
the correct place.

Link: https://lore.kernel.org/r/20200621104147.53795-1-leon@kernel.org
Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/restrack.c
include/rdma/ib_verbs.h

index 7d2ec9e..ee88b32 100644 (file)
@@ -913,11 +913,6 @@ next_mr:
                if (ret < 0)
                        goto srcu_unlock;
 
-               /*
-                * When prefetching a page, page fault is generated
-                * in order to bring the page to the main memory.
-                * In the current flow, page faults are being counted.
-                */
                mlx5_update_odp_stats(mr, faults, ret);
 
                npages += ret;
@@ -1755,12 +1750,17 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
        struct prefetch_mr_work *work =
                container_of(w, struct prefetch_mr_work, work);
        u32 bytes_mapped = 0;
+       int ret;
        u32 i;
 
-       for (i = 0; i < work->num_sge; ++i)
-               pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
-                            work->frags[i].length, &bytes_mapped,
-                            work->pf_flags);
+       for (i = 0; i < work->num_sge; ++i) {
+               ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+                                  work->frags[i].length, &bytes_mapped,
+                                  work->pf_flags);
+               if (ret <= 0)
+                       continue;
+               mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
+       }
 
        destroy_prefetch_work(work);
 }
@@ -1818,6 +1818,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
                                   &bytes_mapped, pf_flags);
                if (ret < 0)
                        goto out;
+               mlx5_update_odp_stats(mr, prefetch, ret);
        }
        ret = 0;
 
index 224a639..32c6d03 100644 (file)
@@ -99,6 +99,9 @@ int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg,
                    msg, "page_invalidations",
                    atomic64_read(&mr->odp_stats.invalidations)))
                goto err_table;
+       if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
+                                        atomic64_read(&mr->odp_stats.prefetch)))
+               goto err_table;
 
        nla_nest_end(msg, table_attr);
        return 0;
index 1e902a8..f6b51a7 100644 (file)
@@ -2271,6 +2271,7 @@ struct rdma_netdev_alloc_params {
 struct ib_odp_counters {
        atomic64_t faults;
        atomic64_t invalidations;
+       atomic64_t prefetch;
 };
 
 struct ib_counters {