unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ bool xdp_xmit = false;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
}
if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBE_XDP_TX)
+ if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
+ xdp_xmit = true;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
- else
+ } else {
rx_buffer->pagecnt_bias++;
+ }
total_rx_packets++;
total_rx_bytes += size;
} else if (skb) {
total_rx_packets++;
}
+ if (xdp_xmit) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+ }
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
tx_desc->read.olinfo_status =
cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch. (Only applicable for weak-ordered
- * memory model archs, such as IA-64).
- *
- * We also need this memory barrier to make certain all of the
- * status bits have been updated before next_to_watch is written.
- */
- wmb();
+ /* Avoid any potential race with xdp_xmit and cleanup */
+ smp_wmb();
/* set next_to_watch value indicating a packet is present */
i++;
tx_buffer->next_to_watch = tx_desc;
ring->next_to_use = i;
- writel(i, ring->tail);
return IXGBE_XDP_TX;
}