From: Josh Hunt Date: Fri, 11 Oct 2019 16:53:39 +0000 (-0400) Subject: ixgbe: Add UDP segmentation offload support X-Git-Tag: v5.10.7~3838^2~267^2~3 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c74d4bdbae4f00575362c2ba147ffcb31e724f04;p=platform%2Fkernel%2Flinux-rpi.git ixgbe: Add UDP segmentation offload support Repost from a series by Alexander Duyck to add UDP segmentation offload support to the igb driver: https://lore.kernel.org/netdev/20180504003916.4769.66271.stgit@localhost.localdomain/ CC: Alexander Duyck CC: Willem de Bruijn Suggested-by: Alexander Duyck Signed-off-by: Josh Hunt Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1ce2397..6c9edd2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7946,6 +7946,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, } ip; union { struct tcphdr *tcp; + struct udphdr *udp; unsigned char *hdr; } l4; u32 paylen, l4_offset; @@ -7969,7 +7970,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? + IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP; /* initialize outer IP header fields */ if (ip.v4->version == 4) { @@ -7999,12 +8001,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; - /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; - /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; @@ -10190,6 +10200,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6); @@ -10198,6 +10209,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | NETIF_F_TSO | NETIF_F_TSO6); @@ -10907,7 +10919,7 @@ skip_sriov: IXGBE_GSO_PARTIAL_FEATURES; if (hw->mac.type >= ixgbe_mac_82599EB) - netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; #ifdef CONFIG_IXGBE_IPSEC #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \