if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
}
}
skb = netdev_alloc_skb_ip_align(adapter->netdev,
rx_ring->rx_buf_len);
if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- adapter->alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
if (!bi->page) {
bi->page = netdev_alloc_page(adapter->netdev);
if (!bi->page) {
- adapter->alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
}
}
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
- adapter->alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_rx_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
if (staterr & IXGBE_RXD_STAT_EOP) {
if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb,
- &(rx_ring->rsc_count));
+ &(rx_ring->rx_stats.rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->delay_unmap) {
dma_unmap_single(rx_ring->dev,
IXGBE_RSC_CB(skb)->delay_unmap = false;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
- rx_ring->rsc_count +=
- skb_shinfo(skb)->nr_frags;
+ rx_ring->rx_stats.rsc_count +=
+ skb_shinfo(skb)->nr_frags;
else
- rx_ring->rsc_count++;
- rx_ring->rsc_flush++;
+ rx_ring->rx_stats.rsc_count++;
+ rx_ring->rx_stats.rsc_flush++;
}
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets++;
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
- rx_ring->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
- u64 non_eop_descs = 0, restart_queue = 0;
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 bytes = 0, packets = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i]->rsc_count;
- rsc_flush += adapter->rx_ring[i]->rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
}
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ bytes += rx_ring->stats.bytes;
+ packets += rx_ring->stats.packets;
+ }
+ adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ netdev->stats.rx_bytes = bytes;
+ netdev->stats.rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
/* gather some stats to the adapter struct that are per queue */
- for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i]->restart_queue;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ bytes += tx_ring->stats.bytes;
+ packets += tx_ring->stats.packets;
+ }
adapter->restart_queue = restart_queue;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
- adapter->non_eop_descs = non_eop_descs;
+ adapter->tx_busy = tx_busy;
+ netdev->stats.tx_bytes = bytes;
+ netdev->stats.tx_packets = packets;
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
return 0;
}
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}