net: atlantic: additional per-queue stats
authorDmitry Bogdanov <dbogdanov@marvell.com>
Mon, 20 Jul 2020 18:32:37 +0000 (21:32 +0300)
committerDavid S. Miller <davem@davemloft.net>
Tue, 21 Jul 2020 01:07:38 +0000 (18:07 -0700)
This patch adds additional per-queue stats, these could
be useful for debugging and diagnostics.

Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
drivers/net/ethernet/aquantia/atlantic/aq_vec.c

index 98ba835..9e18d30 100644 (file)
@@ -94,6 +94,9 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
        "%sQueue[%d] InJumboPackets",
        "%sQueue[%d] InLroPackets",
        "%sQueue[%d] InErrors",
+       "%sQueue[%d] AllocFails",
+       "%sQueue[%d] SkbAllocFails",
+       "%sQueue[%d] Polls",
 };
 
 static const char * const aq_ethtool_queue_tx_stat_names[] = {
index b51ab2d..4f91365 100644 (file)
@@ -94,6 +94,11 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
        if (!rxbuf->rxdata.page) {
                ret = aq_get_rxpage(&rxbuf->rxdata, order,
                                    aq_nic_get_dev(self->aq_nic));
+               if (ret) {
+                       u64_stats_update_begin(&self->stats.rx.syncp);
+                       self->stats.rx.alloc_fails++;
+                       u64_stats_update_end(&self->stats.rx.syncp);
+               }
                return ret;
        }
 
@@ -414,6 +419,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                        skb = build_skb(aq_buf_vaddr(&buff->rxdata),
                                        AQ_CFG_RX_FRAME_MAX);
                        if (unlikely(!skb)) {
+                               u64_stats_update_begin(&self->stats.rx.syncp);
+                               self->stats.rx.skb_alloc_fails++;
+                               u64_stats_update_end(&self->stats.rx.syncp);
                                err = -ENOMEM;
                                goto err_exit;
                        }
@@ -427,6 +435,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                } else {
                        skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
                        if (unlikely(!skb)) {
+                               u64_stats_update_begin(&self->stats.rx.syncp);
+                               self->stats.rx.skb_alloc_fails++;
+                               u64_stats_update_end(&self->stats.rx.syncp);
                                err = -ENOMEM;
                                goto err_exit;
                        }
@@ -599,6 +610,9 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
                        data[++count] = self->stats.rx.jumbo_packets;
                        data[++count] = self->stats.rx.lro_packets;
                        data[++count] = self->stats.rx.errors;
+                       data[++count] = self->stats.rx.alloc_fails;
+                       data[++count] = self->stats.rx.skb_alloc_fails;
+                       data[++count] = self->stats.rx.polls;
                } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
        } else {
                /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
index c92c3a0..93659e5 100644 (file)
@@ -95,6 +95,9 @@ struct aq_ring_stats_rx_s {
        u64 bytes;
        u64 lro_packets;
        u64 jumbo_packets;
+       u64 alloc_fails;
+       u64 skb_alloc_fails;
+       u64 polls;
        u64 pg_losts;
        u64 pg_flips;
        u64 pg_reuses;
index b008d12..d281322 100644 (file)
@@ -45,6 +45,9 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
        } else {
                for (i = 0U, ring = self->ring[0];
                        self->tx_rings > i; ++i, ring = self->ring[i]) {
+                       u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
+                       ring[AQ_VEC_RX_ID].stats.rx.polls++;
+                       u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
                        if (self->aq_hw_ops->hw_ring_tx_head_update) {
                                err = self->aq_hw_ops->hw_ring_tx_head_update(
                                                        self->aq_hw,