net/mlx5e: Don't recycle page if moved to far NUMA
authorTariq Toukan <tariqt@mellanox.com>
Thu, 13 Jul 2017 15:26:40 +0000 (18:26 +0300)
committerSaeed Mahameed <saeedm@mellanox.com>
Sun, 3 Sep 2017 03:34:09 +0000 (06:34 +0300)
Avoid recycling an RX page if it moved to another NUMA node.
Add an ethtool counter to count such events.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h

index a4c9a0a..2da2ea2 100644 (file)
@@ -208,6 +208,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                s->rx_cache_full  += rq_stats->cache_full;
                s->rx_cache_empty += rq_stats->cache_empty;
                s->rx_cache_busy  += rq_stats->cache_busy;
+               s->rx_cache_waive += rq_stats->cache_waive;
 
                for (j = 0; j < priv->channels.params.num_tc; j++) {
                        sq_stats = &c->sq[j].stats;
index 88a8749..f1dd638 100644 (file)
@@ -163,7 +163,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
 
 static inline bool mlx5e_page_is_reserved(struct page *page)
 {
-       return page_is_pfmemalloc(page) || page_to_nid(page) != numa_node_id();
+       return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
 }
 
 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
@@ -177,8 +177,10 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
                return false;
        }
 
-       if (unlikely(page_is_pfmemalloc(dma_info->page)))
+       if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
+               rq->stats.cache_waive++;
                return false;
+       }
 
        cache->page_cache[cache->tail] = *dma_info;
        cache->tail = tail_next;
index 6761796..6d199ff 100644 (file)
@@ -84,6 +84,7 @@ struct mlx5e_sw_stats {
        u64 rx_cache_full;
        u64 rx_cache_empty;
        u64 rx_cache_busy;
+       u64 rx_cache_waive;
 
        /* Special handling counters */
        u64 link_down_events_phy;
@@ -123,6 +124,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
 };
 
@@ -354,6 +356,7 @@ struct mlx5e_rq_stats {
        u64 cache_full;
        u64 cache_empty;
        u64 cache_busy;
+       u64 cache_waive;
 };
 
 static const struct counter_desc rq_stats_desc[] = {
@@ -377,6 +380,7 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
 };
 
 struct mlx5e_sq_stats {