sfc: advertise encapsulated offloads on EF10
authorEdward Cree <ecree@solarflare.com>
Fri, 11 Sep 2020 22:40:41 +0000 (23:40 +0100)
committerDavid S. Miller <davem@davemloft.net>
Sat, 12 Sep 2020 00:15:22 +0000 (17:15 -0700)
Necessitates an .ndo_features_check, as the EF10 datapath has several
 limitations on what it can handle.

Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx_common.c
drivers/net/ethernet/sfc/efx_common.h

index 4775b82..c9df2e9 100644 (file)
@@ -1304,6 +1304,7 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
 static int efx_ef10_init_nic(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       netdev_features_t hw_enc_features = 0;
        int rc;
 
        if (nic_data->must_check_datapath_caps) {
@@ -1348,6 +1349,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
                nic_data->must_restore_piobufs = false;
        }
 
+       /* add encapsulated checksum offload features */
+       if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
+               hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       /* add encapsulated TSO features */
+       if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
+               netdev_features_t encap_tso_features;
+
+               encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+                       NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
+
+               hw_enc_features |= encap_tso_features | NETIF_F_TSO;
+               efx->net_dev->features |= encap_tso_features;
+       }
+       efx->net_dev->hw_enc_features = hw_enc_features;
+
        /* don't fail init if RSS setup doesn't work */
        rc = efx->type->rx_push_rss_config(efx, false,
                                           efx->rss_context.rx_indir_table, NULL);
index 58b043f..7183080 100644 (file)
@@ -596,6 +596,7 @@ static const struct net_device_ops efx_netdev_ops = {
        .ndo_set_mac_address    = efx_set_mac_address,
        .ndo_set_rx_mode        = efx_set_rx_mode,
        .ndo_set_features       = efx_set_features,
+       .ndo_features_check     = efx_features_check,
        .ndo_vlan_rx_add_vid    = efx_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = efx_vlan_rx_kill_vid,
 #ifdef CONFIG_SFC_SRIOV
index 80a23de..c256db2 100644 (file)
@@ -11,6 +11,7 @@
 #include "net_driver.h"
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <net/gre.h>
 #include "efx_common.h"
 #include "efx_channels.h"
 #include "efx.h"
@@ -1287,6 +1288,89 @@ const struct pci_error_handlers efx_err_handlers = {
        .resume         = efx_io_resume,
 };
 
+/* Determine whether the NIC will be able to handle TX offloads for a given
+ * encapsulated packet.
+ */
+static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
+{
+       struct gre_base_hdr *greh;
+       __be16 dst_port;
+       u8 ipproto;
+
+       /* Does the NIC support encap offloads?
+        * If not, we should never get here, because we shouldn't have
+        * advertised encap offload feature flags in the first place.
+        */
+       if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port))
+               return false;
+
+       /* Determine encapsulation protocol in use */
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               ipproto = ip_hdr(skb)->protocol;
+               break;
+       case htons(ETH_P_IPV6):
+               /* If there are extension headers, this will cause us to
+                * think we can't offload something that we maybe could have.
+                */
+               ipproto = ipv6_hdr(skb)->nexthdr;
+               break;
+       default:
+               /* Not IP, so can't offload it */
+               return false;
+       }
+       switch (ipproto) {
+       case IPPROTO_GRE:
+               /* We support NVGRE but not IP over GRE or random gretaps.
+                * Specifically, the NIC will accept GRE as encapsulated if
+                * the inner protocol is Ethernet, but only handle it
+                * correctly if the GRE header is 8 bytes long.  Moreover,
+                * it will not update the Checksum or Sequence Number fields
+                * if they are present.  (The Routing Present flag,
+                * GRE_ROUTING, cannot be set else the header would be more
+                * than 8 bytes long; so we don't have to worry about it.)
+                */
+               if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+                       return false;
+               if (ntohs(skb->inner_protocol) != ETH_P_TEB)
+                       return false;
+               if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8)
+                       return false;
+               greh = (struct gre_base_hdr *)skb_transport_header(skb);
+               return !(greh->flags & (GRE_CSUM | GRE_SEQ));
+       case IPPROTO_UDP:
+               /* If the port is registered for a UDP tunnel, we assume the
+                * packet is for that tunnel, and the NIC will handle it as
+                * such.  If not, the NIC won't know what to do with it.
+                */
+               dst_port = udp_hdr(skb)->dest;
+               return efx->type->udp_tnl_has_port(efx, dst_port);
+       default:
+               return false;
+       }
+}
+
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+                                    netdev_features_t features)
+{
+       struct efx_nic *efx = netdev_priv(dev);
+
+       if (skb->encapsulation) {
+               if (features & NETIF_F_GSO_MASK)
+                       /* Hardware can only do TSO with at most 208 bytes
+                        * of headers.
+                        */
+                       if (skb_inner_transport_offset(skb) >
+                           EFX_TSO2_MAX_HDRLEN)
+                               features &= ~(NETIF_F_GSO_MASK);
+               if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK))
+                       if (!efx_can_encap_offloads(efx, skb))
+                               features &= ~(NETIF_F_GSO_MASK |
+                                             NETIF_F_CSUM_MASK);
+       }
+       return features;
+}
+
 int efx_get_phys_port_id(struct net_device *net_dev,
                         struct netdev_phys_item_id *ppid)
 {
index 4056f68..65513fd 100644 (file)
@@ -105,6 +105,9 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu);
 
 extern const struct pci_error_handlers efx_err_handlers;
 
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+                                    netdev_features_t features);
+
 int efx_get_phys_port_id(struct net_device *net_dev,
                         struct netdev_phys_item_id *ppid);