i40e: Add a stat tracking new RX page allocations
authorJoe Damato <jdamato@fastly.com>
Fri, 17 Dec 2021 19:35:17 +0000 (11:35 -0800)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Tue, 8 Feb 2022 16:21:52 +0000 (08:21 -0800)
Add a counter for new page allocations in the i40e RX path. This stat is
accessible with ethtool.

Signed-off-by: Joe Damato <jdamato@fastly.com>
Tested-by: Dave Switzer <david.switzer@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h

index ee85165..ea8021d 100644 (file)
@@ -855,6 +855,7 @@ struct i40e_vsi {
        u64 rx_buf_failed;
        u64 rx_page_failed;
        u64 rx_page_reuse;
+       u64 rx_page_alloc;
 
        /* These are containers of ring pointers, allocated at run-time */
        struct i40e_ring **rx_rings;
index 9317b2d..17a16b4 100644 (file)
@@ -296,6 +296,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
        I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
        I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
        I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
+       I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
 };
 
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
index c22e7b1..9d62f58 100644 (file)
@@ -773,8 +773,8 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
  **/
 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 {
+       u64 rx_page, rx_buf, rx_reuse, rx_alloc;
        struct i40e_pf *pf = vsi->back;
-       u64 rx_page, rx_buf, rx_reuse;
        struct rtnl_link_stats64 *ons;
        struct rtnl_link_stats64 *ns;   /* netdev stats */
        struct i40e_eth_stats *oes;
@@ -807,6 +807,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        rx_page = 0;
        rx_buf = 0;
        rx_reuse = 0;
+       rx_alloc = 0;
        rcu_read_lock();
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                /* locate Tx ring */
@@ -841,6 +842,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                rx_buf += p->rx_stats.alloc_buff_failed;
                rx_page += p->rx_stats.alloc_page_failed;
                rx_reuse += p->rx_stats.page_reuse_count;
+               rx_alloc += p->rx_stats.page_alloc_count;
 
                if (i40e_enabled_xdp_vsi(vsi)) {
                        /* locate XDP ring */
@@ -869,6 +871,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
        vsi->rx_page_reuse = rx_reuse;
+       vsi->rx_page_alloc = rx_alloc;
 
        ns->rx_packets = rx_p;
        ns->rx_bytes = rx_b;
index da4929e..54fd497 100644 (file)
@@ -1673,6 +1673,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
                return false;
        }
 
+       rx_ring->rx_stats.page_alloc_count++;
+
        /* map page for use */
        dma = dma_map_page_attrs(rx_ring->dev, page, 0,
                                 i40e_rx_pg_size(rx_ring),
index 88387a6..13188dc 100644 (file)
@@ -298,6 +298,7 @@ struct i40e_rx_queue_stats {
        u64 alloc_page_failed;
        u64 alloc_buff_failed;
        u64 page_reuse_count;
+       u64 page_alloc_count;
 };
 
 enum i40e_ring_state_t {