neighbour: fix various data-races
[platform/kernel/linux-starfive.git] / net / core / neighbour.c
index 9c09f09..df81c1f 100644 (file)
@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
 
 static int neigh_forced_gc(struct neigh_table *tbl)
 {
-       int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+       int max_clean = atomic_read(&tbl->gc_entries) -
+                       READ_ONCE(tbl->gc_thresh2);
        unsigned long tref = jiffies - 5 * HZ;
        struct neighbour *n, *tmp;
        int shrunk = 0;
@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
                }
        }
 
-       tbl->last_flush = jiffies;
+       WRITE_ONCE(tbl->last_flush, jiffies);
 
        write_unlock_bh(&tbl->lock);
 
@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
 {
        struct neighbour *n = NULL;
        unsigned long now = jiffies;
-       int entries;
+       int entries, gc_thresh3;
 
        if (exempt_from_gc)
                goto do_alloc;
 
        entries = atomic_inc_return(&tbl->gc_entries) - 1;
-       if (entries >= tbl->gc_thresh3 ||
-           (entries >= tbl->gc_thresh2 &&
-            time_after(now, tbl->last_flush + 5 * HZ))) {
-               if (!neigh_forced_gc(tbl) &&
-                   entries >= tbl->gc_thresh3) {
+       gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
+       if (entries >= gc_thresh3 ||
+           (entries >= READ_ONCE(tbl->gc_thresh2) &&
+            time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
+               if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
                        net_info_ratelimited("%s: neighbor table overflow!\n",
                                             tbl->id);
                        NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
 
        if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
                struct neigh_parms *p;
-               tbl->last_rand = jiffies;
+
+               WRITE_ONCE(tbl->last_rand, jiffies);
                list_for_each_entry(p, &tbl->parms_list, list)
                        p->reachable_time =
                                neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
        }
 
-       if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+       if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
                goto out;
 
        for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
        ndtmsg->ndtm_pad2   = 0;
 
        if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
-           nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
-           nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
-           nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
-           nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+           nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
+                         NDTA_PAD) ||
+           nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
+           nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
+           nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
                goto nla_put_failure;
        {
                unsigned long now = jiffies;
-               long flush_delta = now - tbl->last_flush;
-               long rand_delta = now - tbl->last_rand;
+               long flush_delta = now - READ_ONCE(tbl->last_flush);
+               long rand_delta = now - READ_ONCE(tbl->last_rand);
                struct neigh_hash_table *nht;
                struct ndt_config ndc = {
                        .ndtc_key_len           = tbl->key_len,
@@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                        .ndtc_entries           = atomic_read(&tbl->entries),
                        .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
                        .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
-                       .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
+                       .ndtc_proxy_qlen        = READ_ONCE(tbl->proxy_queue.qlen),
                };
 
                rcu_read_lock();
@@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                        struct neigh_statistics *st;
 
                        st = per_cpu_ptr(tbl->stats, cpu);
-                       ndst.ndts_allocs                += st->allocs;
-                       ndst.ndts_destroys              += st->destroys;
-                       ndst.ndts_hash_grows            += st->hash_grows;
-                       ndst.ndts_res_failed            += st->res_failed;
-                       ndst.ndts_lookups               += st->lookups;
-                       ndst.ndts_hits                  += st->hits;
-                       ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
-                       ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
-                       ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
-                       ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
-                       ndst.ndts_table_fulls           += st->table_fulls;
+                       ndst.ndts_allocs                += READ_ONCE(st->allocs);
+                       ndst.ndts_destroys              += READ_ONCE(st->destroys);
+                       ndst.ndts_hash_grows            += READ_ONCE(st->hash_grows);
+                       ndst.ndts_res_failed            += READ_ONCE(st->res_failed);
+                       ndst.ndts_lookups               += READ_ONCE(st->lookups);
+                       ndst.ndts_hits                  += READ_ONCE(st->hits);
+                       ndst.ndts_rcv_probes_mcast      += READ_ONCE(st->rcv_probes_mcast);
+                       ndst.ndts_rcv_probes_ucast      += READ_ONCE(st->rcv_probes_ucast);
+                       ndst.ndts_periodic_gc_runs      += READ_ONCE(st->periodic_gc_runs);
+                       ndst.ndts_forced_gc_runs        += READ_ONCE(st->forced_gc_runs);
+                       ndst.ndts_table_fulls           += READ_ONCE(st->table_fulls);
                }
 
                if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto errout_tbl_lock;
 
        if (tb[NDTA_THRESH1])
-               tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+               WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
 
        if (tb[NDTA_THRESH2])
-               tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+               WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
 
        if (tb[NDTA_THRESH3])
-               tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+               WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
 
        if (tb[NDTA_GC_INTERVAL])
-               tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+               WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
 
        err = 0;