ipv4: tunnels: use DEV_STATS_INC()
authorEric Dumazet <edumazet@google.com>
Tue, 15 Nov 2022 08:53:58 +0000 (08:53 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 16 Nov 2022 12:48:44 +0000 (12:48 +0000)
Most of code paths in tunnels are lockless (eg NETIF_F_LLTX in tx).

Adopt SMP safe DEV_STATS_INC() to update dev->stats fields.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/ipmr.c

index d8ee523..a4ccef3 100644 (file)
@@ -510,7 +510,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 
 err_free_skb:
        kfree_skb(skb);
-       dev->stats.tx_dropped++;
+       DEV_STATS_INC(dev, tx_dropped);
 }
 
 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -592,7 +592,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 
 err_free_skb:
        kfree_skb(skb);
-       dev->stats.tx_dropped++;
+       DEV_STATS_INC(dev, tx_dropped);
 }
 
 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
@@ -663,7 +663,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 
 free_skb:
        kfree_skb(skb);
-       dev->stats.tx_dropped++;
+       DEV_STATS_INC(dev, tx_dropped);
        return NETDEV_TX_OK;
 }
 
@@ -717,7 +717,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 
 free_skb:
        kfree_skb(skb);
-       dev->stats.tx_dropped++;
+       DEV_STATS_INC(dev, tx_dropped);
        return NETDEV_TX_OK;
 }
 
@@ -745,7 +745,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 
 free_skb:
        kfree_skb(skb);
-       dev->stats.tx_dropped++;
+       DEV_STATS_INC(dev, tx_dropped);
        return NETDEV_TX_OK;
 }
 
index 019f3b0..de90b09 100644 (file)
@@ -368,23 +368,23 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
 
 #ifdef CONFIG_NET_IPGRE_BROADCAST
        if (ipv4_is_multicast(iph->daddr)) {
-               tunnel->dev->stats.multicast++;
+               DEV_STATS_INC(tunnel->dev, multicast);
                skb->pkt_type = PACKET_BROADCAST;
        }
 #endif
 
        if ((!(tpi->flags&TUNNEL_CSUM) &&  (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
             ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
-               tunnel->dev->stats.rx_crc_errors++;
-               tunnel->dev->stats.rx_errors++;
+               DEV_STATS_INC(tunnel->dev, rx_crc_errors);
+               DEV_STATS_INC(tunnel->dev, rx_errors);
                goto drop;
        }
 
        if (tunnel->parms.i_flags&TUNNEL_SEQ) {
                if (!(tpi->flags&TUNNEL_SEQ) ||
                    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
-                       tunnel->dev->stats.rx_fifo_errors++;
-                       tunnel->dev->stats.rx_errors++;
+                       DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
+                       DEV_STATS_INC(tunnel->dev, rx_errors);
                        goto drop;
                }
                tunnel->i_seqno = ntohl(tpi->seq) + 1;
@@ -398,8 +398,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
                        net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
                                        &iph->saddr, iph->tos);
                if (err > 1) {
-                       ++tunnel->dev->stats.rx_frame_errors;
-                       ++tunnel->dev->stats.rx_errors;
+                       DEV_STATS_INC(tunnel->dev, rx_frame_errors);
+                       DEV_STATS_INC(tunnel->dev, rx_errors);
                        goto drop;
                }
        }
@@ -581,7 +581,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        if (!rt) {
                rt = ip_route_output_key(tunnel->net, &fl4);
                if (IS_ERR(rt)) {
-                       dev->stats.tx_carrier_errors++;
+                       DEV_STATS_INC(dev, tx_carrier_errors);
                        goto tx_error;
                }
                if (use_cache)
@@ -590,7 +590,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
        if (rt->dst.dev == dev) {
                ip_rt_put(rt);
-               dev->stats.collisions++;
+               DEV_STATS_INC(dev, collisions);
                goto tx_error;
        }
 
@@ -625,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                      df, !net_eq(tunnel->net, dev_net(dev)));
        return;
 tx_error:
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        goto kfree;
 tx_dropped:
-       dev->stats.tx_dropped++;
+       DEV_STATS_INC(dev, tx_dropped);
 kfree:
        kfree_skb(skb);
 }
@@ -662,7 +662,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                /* NBMA tunnel */
 
                if (!skb_dst(skb)) {
-                       dev->stats.tx_fifo_errors++;
+                       DEV_STATS_INC(dev, tx_fifo_errors);
                        goto tx_error;
                }
 
@@ -749,7 +749,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                rt = ip_route_output_key(tunnel->net, &fl4);
 
                if (IS_ERR(rt)) {
-                       dev->stats.tx_carrier_errors++;
+                       DEV_STATS_INC(dev, tx_carrier_errors);
                        goto tx_error;
                }
                if (use_cache)
@@ -762,7 +762,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        if (rt->dst.dev == dev) {
                ip_rt_put(rt);
-               dev->stats.collisions++;
+               DEV_STATS_INC(dev, collisions);
                goto tx_error;
        }
 
@@ -805,7 +805,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        if (skb_cow_head(skb, dev->needed_headroom)) {
                ip_rt_put(rt);
-               dev->stats.tx_dropped++;
+               DEV_STATS_INC(dev, tx_dropped);
                kfree_skb(skb);
                return;
        }
@@ -819,7 +819,7 @@ tx_error_icmp:
        dst_link_failure(skb);
 #endif
 tx_error:
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        kfree_skb(skb);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
index 8c2bd1d..53bfd8a 100644 (file)
@@ -107,8 +107,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        dev = tunnel->dev;
 
        if (err) {
-               dev->stats.rx_errors++;
-               dev->stats.rx_dropped++;
+               DEV_STATS_INC(dev, rx_errors);
+               DEV_STATS_INC(dev, rx_dropped);
 
                return 0;
        }
@@ -183,7 +183,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                        fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
                        rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
                        if (IS_ERR(rt)) {
-                               dev->stats.tx_carrier_errors++;
+                               DEV_STATS_INC(dev, tx_carrier_errors);
                                goto tx_error_icmp;
                        }
                        dst = &rt->dst;
@@ -198,14 +198,14 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                        if (dst->error) {
                                dst_release(dst);
                                dst = NULL;
-                               dev->stats.tx_carrier_errors++;
+                               DEV_STATS_INC(dev, tx_carrier_errors);
                                goto tx_error_icmp;
                        }
                        skb_dst_set(skb, dst);
                        break;
 #endif
                default:
-                       dev->stats.tx_carrier_errors++;
+                       DEV_STATS_INC(dev, tx_carrier_errors);
                        goto tx_error_icmp;
                }
        }
@@ -213,7 +213,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        dst_hold(dst);
        dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0);
        if (IS_ERR(dst)) {
-               dev->stats.tx_carrier_errors++;
+               DEV_STATS_INC(dev, tx_carrier_errors);
                goto tx_error_icmp;
        }
 
@@ -221,7 +221,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                goto xmit;
 
        if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
-               dev->stats.tx_carrier_errors++;
+               DEV_STATS_INC(dev, tx_carrier_errors);
                dst_release(dst);
                goto tx_error_icmp;
        }
@@ -230,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
 
        if (tdev == dev) {
                dst_release(dst);
-               dev->stats.collisions++;
+               DEV_STATS_INC(dev, collisions);
                goto tx_error;
        }
 
@@ -267,7 +267,7 @@ xmit:
 tx_error_icmp:
        dst_link_failure(skb);
 tx_error:
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        kfree_skb(skb);
        return NETDEV_TX_OK;
 }
@@ -304,7 +304,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        return vti_xmit(skb, dev, &fl);
 
 tx_err:
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        kfree_skb(skb);
        return NETDEV_TX_OK;
 }
index 180f9da..abea777 100644 (file)
@@ -310,7 +310,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
 tx_error:
        kfree_skb(skb);
 
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        return NETDEV_TX_OK;
 }
 
index e04544a..b58df3c 100644 (file)
@@ -506,8 +506,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
                return err;
        }
 
-       dev->stats.tx_bytes += skb->len;
-       dev->stats.tx_packets++;
+       DEV_STATS_ADD(dev, tx_bytes, skb->len);
+       DEV_STATS_INC(dev, tx_packets);
        rcu_read_lock();
 
        /* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
@@ -1839,8 +1839,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
        if (vif->flags & VIFF_REGISTER) {
                WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
                WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
-               vif_dev->stats.tx_bytes += skb->len;
-               vif_dev->stats.tx_packets++;
+               DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
+               DEV_STATS_INC(vif_dev, tx_packets);
                ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
                goto out_free;
        }
@@ -1898,8 +1898,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
        if (vif->flags & VIFF_TUNNEL) {
                ip_encap(net, skb, vif->local, vif->remote);
                /* FIXME: extra output firewall step used to be here. --RR */
-               vif_dev->stats.tx_packets++;
-               vif_dev->stats.tx_bytes += skb->len;
+               DEV_STATS_INC(vif_dev, tx_packets);
+               DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
        }
 
        IPCB(skb)->flags |= IPSKB_FORWARDED;