igb: Add support for padding packet
authorAlexander Duyck <alexander.h.duyck@intel.com>
Tue, 7 Feb 2017 02:27:14 +0000 (18:27 -0800)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 17 Mar 2017 19:11:44 +0000 (12:11 -0700)
With the size of the frame limited we can now write to an offset within the
buffer instead of having to write at the very start of the buffer.  The
advantage to this is that it allows us to leave padding room for things
like supporting XDP in the future.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c

index eb91c87..dc6e298 100644 (file)
@@ -314,6 +314,7 @@ struct igb_q_vector {
 
 enum e1000_ring_flags_t {
        IGB_RING_FLAG_RX_3K_BUFFER,
+       IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
        IGB_RING_FLAG_RX_SCTP_CSUM,
        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
        IGB_RING_FLAG_TX_CTX_IDX,
@@ -327,11 +328,21 @@ enum e1000_ring_flags_t {
 #define clear_ring_uses_large_buffer(ring) \
        clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
 
+#define ring_uses_build_skb(ring) \
+       test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+       set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+       clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
 static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
 {
 #if (PAGE_SIZE < 8192)
        if (ring_uses_large_buffer(ring))
                return IGB_RXBUFFER_3072;
+
+       if (ring_uses_build_skb(ring))
+               return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
 #endif
        return IGB_RXBUFFER_2048;
 }
index 24c20d4..3ef6657 100644 (file)
@@ -3783,11 +3783,14 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
                                  struct igb_ring *rx_ring)
 {
        /* set build_skb and buffer size flags */
+       clear_ring_build_skb_enabled(rx_ring);
        clear_ring_uses_large_buffer(rx_ring);
 
        if (adapter->flags & IGB_FLAG_RX_LEGACY)
                return;
 
+       set_ring_build_skb_enabled(rx_ring);
+
 #if (PAGE_SIZE < 8192)
        if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
                return;
@@ -6957,7 +6960,9 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+                               SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
+                               SKB_DATA_ALIGN(size);
 #endif
        unsigned int pull_len;
 
@@ -7293,6 +7298,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        return total_packets;
 }
 
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
                                  struct igb_rx_buffer *bi)
 {
@@ -7328,7 +7338,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
 
        bi->dma = dma;
        bi->page = page;
-       bi->page_offset = 0;
+       bi->page_offset = igb_rx_offset(rx_ring);
        bi->pagecnt_bias = 1;
 
        return true;