ipv4: add (struct uncached_list)->quarantine list
authorEric Dumazet <edumazet@google.com>
Thu, 10 Feb 2022 21:42:31 +0000 (13:42 -0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 11 Feb 2022 11:44:27 +0000 (11:44 +0000)
This is an optimization to keep the per-cpu lists as short as possible:

Whenever rt_flush_dev() changes one rtable dst.dev
matching the disappearing device, it can can transfer the object
to a quarantine list, waiting for a final rt_del_uncached_list().

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/route.c

index 634766e..202d6b1 100644 (file)
@@ -1485,6 +1485,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
 struct uncached_list {
        spinlock_t              lock;
        struct list_head        head;
+       struct list_head        quarantine;
 };
 
 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
@@ -1506,7 +1507,7 @@ void rt_del_uncached_list(struct rtable *rt)
                struct uncached_list *ul = rt->rt_uncached_list;
 
                spin_lock_bh(&ul->lock);
-               list_del(&rt->rt_uncached);
+               list_del_init(&rt->rt_uncached);
                spin_unlock_bh(&ul->lock);
        }
 }
@@ -1521,20 +1522,24 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
 
 void rt_flush_dev(struct net_device *dev)
 {
-       struct rtable *rt;
+       struct rtable *rt, *safe;
        int cpu;
 
        for_each_possible_cpu(cpu) {
                struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
 
+               if (list_empty(&ul->head))
+                       continue;
+
                spin_lock_bh(&ul->lock);
-               list_for_each_entry(rt, &ul->head, rt_uncached) {
+               list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
                        if (rt->dst.dev != dev)
                                continue;
                        rt->dst.dev = blackhole_netdev;
                        dev_replace_track(dev, blackhole_netdev,
                                          &rt->dst.dev_tracker,
                                          GFP_ATOMIC);
+                       list_move(&rt->rt_uncached, &ul->quarantine);
                }
                spin_unlock_bh(&ul->lock);
        }
@@ -3706,6 +3711,7 @@ int __init ip_rt_init(void)
                struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
 
                INIT_LIST_HEAD(&ul->head);
+               INIT_LIST_HEAD(&ul->quarantine);
                spin_lock_init(&ul->lock);
        }
 #ifdef CONFIG_IP_ROUTE_CLASSID