devlink: Fix use-after-free after a failed reload
[platform/kernel/linux-rpi.git] / net / core / neighbour.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Generic address resolution entity
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
8  *
9  *      Fixes:
10  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
11  *      Harald Welte            Add neighbour cache statistics like rtstat
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41
42 #include <trace/events/neigh.h>
43
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...)              \
46 do {                                            \
47         if (level <= NEIGH_DEBUG)               \
48                 pr_debug(fmt, ##__VA_ARGS__);   \
49 } while (0)
50
51 #define PNEIGH_HASHMASK         0xF
52
53 static void neigh_timer_handler(struct timer_list *t);
54 static void __neigh_notify(struct neighbour *n, int type, int flags,
55                            u32 pid);
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58                                     struct net_device *dev);
59
60 #ifdef CONFIG_PROC_FS
61 static const struct seq_operations neigh_stat_seq_ops;
62 #endif
63
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78
79    Reference count prevents destruction.
80
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90  */
91
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94         kfree_skb(skb);
95         return -ENETDOWN;
96 }
97
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100         trace_neigh_cleanup_and_release(neigh, 0);
101         __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103         neigh_release(neigh);
104 }
105
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114         return base ? (prandom_u32() % base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117
118 static void neigh_mark_dead(struct neighbour *n)
119 {
120         n->dead = 1;
121         if (!list_empty(&n->gc_list)) {
122                 list_del_init(&n->gc_list);
123                 atomic_dec(&n->tbl->gc_entries);
124         }
125 }
126
127 static void neigh_update_gc_list(struct neighbour *n)
128 {
129         bool on_gc_list, exempt_from_gc;
130
131         write_lock_bh(&n->tbl->lock);
132         write_lock(&n->lock);
133
134         if (n->dead)
135                 goto out;
136
137         /* remove from the gc list if new state is permanent or if neighbor
138          * is externally learned; otherwise entry should be on the gc list
139          */
140         exempt_from_gc = n->nud_state & NUD_PERMANENT ||
141                          n->flags & NTF_EXT_LEARNED;
142         on_gc_list = !list_empty(&n->gc_list);
143
144         if (exempt_from_gc && on_gc_list) {
145                 list_del_init(&n->gc_list);
146                 atomic_dec(&n->tbl->gc_entries);
147         } else if (!exempt_from_gc && !on_gc_list) {
148                 /* add entries to the tail; cleaning removes from the front */
149                 list_add_tail(&n->gc_list, &n->tbl->gc_list);
150                 atomic_inc(&n->tbl->gc_entries);
151         }
152
153 out:
154         write_unlock(&n->lock);
155         write_unlock_bh(&n->tbl->lock);
156 }
157
158 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
159                                      int *notify)
160 {
161         bool rc = false;
162         u8 ndm_flags;
163
164         if (!(flags & NEIGH_UPDATE_F_ADMIN))
165                 return rc;
166
167         ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
168         if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
169                 if (ndm_flags & NTF_EXT_LEARNED)
170                         neigh->flags |= NTF_EXT_LEARNED;
171                 else
172                         neigh->flags &= ~NTF_EXT_LEARNED;
173                 rc = true;
174                 *notify = 1;
175         }
176
177         return rc;
178 }
179
180 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
181                       struct neigh_table *tbl)
182 {
183         bool retval = false;
184
185         write_lock(&n->lock);
186         if (refcount_read(&n->refcnt) == 1) {
187                 struct neighbour *neigh;
188
189                 neigh = rcu_dereference_protected(n->next,
190                                                   lockdep_is_held(&tbl->lock));
191                 rcu_assign_pointer(*np, neigh);
192                 neigh_mark_dead(n);
193                 retval = true;
194         }
195         write_unlock(&n->lock);
196         if (retval)
197                 neigh_cleanup_and_release(n);
198         return retval;
199 }
200
201 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
202 {
203         struct neigh_hash_table *nht;
204         void *pkey = ndel->primary_key;
205         u32 hash_val;
206         struct neighbour *n;
207         struct neighbour __rcu **np;
208
209         nht = rcu_dereference_protected(tbl->nht,
210                                         lockdep_is_held(&tbl->lock));
211         hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
212         hash_val = hash_val >> (32 - nht->hash_shift);
213
214         np = &nht->hash_buckets[hash_val];
215         while ((n = rcu_dereference_protected(*np,
216                                               lockdep_is_held(&tbl->lock)))) {
217                 if (n == ndel)
218                         return neigh_del(n, np, tbl);
219                 np = &n->next;
220         }
221         return false;
222 }
223
224 static int neigh_forced_gc(struct neigh_table *tbl)
225 {
226         int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
227         unsigned long tref = jiffies - 5 * HZ;
228         struct neighbour *n, *tmp;
229         int shrunk = 0;
230
231         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
232
233         write_lock_bh(&tbl->lock);
234
235         list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
236                 if (refcount_read(&n->refcnt) == 1) {
237                         bool remove = false;
238
239                         write_lock(&n->lock);
240                         if ((n->nud_state == NUD_FAILED) ||
241                             (n->nud_state == NUD_NOARP) ||
242                             (tbl->is_multicast &&
243                              tbl->is_multicast(n->primary_key)) ||
244                             time_after(tref, n->updated))
245                                 remove = true;
246                         write_unlock(&n->lock);
247
248                         if (remove && neigh_remove_one(n, tbl))
249                                 shrunk++;
250                         if (shrunk >= max_clean)
251                                 break;
252                 }
253         }
254
255         tbl->last_flush = jiffies;
256
257         write_unlock_bh(&tbl->lock);
258
259         return shrunk;
260 }
261
262 static void neigh_add_timer(struct neighbour *n, unsigned long when)
263 {
264         neigh_hold(n);
265         if (unlikely(mod_timer(&n->timer, when))) {
266                 printk("NEIGH: BUG, double timer add, state is %x\n",
267                        n->nud_state);
268                 dump_stack();
269         }
270 }
271
272 static int neigh_del_timer(struct neighbour *n)
273 {
274         if ((n->nud_state & NUD_IN_TIMER) &&
275             del_timer(&n->timer)) {
276                 neigh_release(n);
277                 return 1;
278         }
279         return 0;
280 }
281
282 static void pneigh_queue_purge(struct sk_buff_head *list)
283 {
284         struct sk_buff *skb;
285
286         while ((skb = skb_dequeue(list)) != NULL) {
287                 dev_put(skb->dev);
288                 kfree_skb(skb);
289         }
290 }
291
292 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
293                             bool skip_perm)
294 {
295         int i;
296         struct neigh_hash_table *nht;
297
298         nht = rcu_dereference_protected(tbl->nht,
299                                         lockdep_is_held(&tbl->lock));
300
301         for (i = 0; i < (1 << nht->hash_shift); i++) {
302                 struct neighbour *n;
303                 struct neighbour __rcu **np = &nht->hash_buckets[i];
304
305                 while ((n = rcu_dereference_protected(*np,
306                                         lockdep_is_held(&tbl->lock))) != NULL) {
307                         if (dev && n->dev != dev) {
308                                 np = &n->next;
309                                 continue;
310                         }
311                         if (skip_perm && n->nud_state & NUD_PERMANENT) {
312                                 np = &n->next;
313                                 continue;
314                         }
315                         rcu_assign_pointer(*np,
316                                    rcu_dereference_protected(n->next,
317                                                 lockdep_is_held(&tbl->lock)));
318                         write_lock(&n->lock);
319                         neigh_del_timer(n);
320                         neigh_mark_dead(n);
321                         if (refcount_read(&n->refcnt) != 1) {
322                                 /* The most unpleasant situation.
323                                    We must destroy neighbour entry,
324                                    but someone still uses it.
325
326                                    The destroy will be delayed until
327                                    the last user releases us, but
328                                    we must kill timers etc. and move
329                                    it to safe state.
330                                  */
331                                 __skb_queue_purge(&n->arp_queue);
332                                 n->arp_queue_len_bytes = 0;
333                                 n->output = neigh_blackhole;
334                                 if (n->nud_state & NUD_VALID)
335                                         n->nud_state = NUD_NOARP;
336                                 else
337                                         n->nud_state = NUD_NONE;
338                                 neigh_dbg(2, "neigh %p is stray\n", n);
339                         }
340                         write_unlock(&n->lock);
341                         neigh_cleanup_and_release(n);
342                 }
343         }
344 }
345
346 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
347 {
348         write_lock_bh(&tbl->lock);
349         neigh_flush_dev(tbl, dev, false);
350         write_unlock_bh(&tbl->lock);
351 }
352 EXPORT_SYMBOL(neigh_changeaddr);
353
354 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
355                           bool skip_perm)
356 {
357         write_lock_bh(&tbl->lock);
358         neigh_flush_dev(tbl, dev, skip_perm);
359         pneigh_ifdown_and_unlock(tbl, dev);
360
361         del_timer_sync(&tbl->proxy_timer);
362         pneigh_queue_purge(&tbl->proxy_queue);
363         return 0;
364 }
365
366 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
367 {
368         __neigh_ifdown(tbl, dev, true);
369         return 0;
370 }
371 EXPORT_SYMBOL(neigh_carrier_down);
372
373 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
374 {
375         __neigh_ifdown(tbl, dev, false);
376         return 0;
377 }
378 EXPORT_SYMBOL(neigh_ifdown);
379
380 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
381                                      struct net_device *dev,
382                                      u8 flags, bool exempt_from_gc)
383 {
384         struct neighbour *n = NULL;
385         unsigned long now = jiffies;
386         int entries;
387
388         if (exempt_from_gc)
389                 goto do_alloc;
390
391         entries = atomic_inc_return(&tbl->gc_entries) - 1;
392         if (entries >= tbl->gc_thresh3 ||
393             (entries >= tbl->gc_thresh2 &&
394              time_after(now, tbl->last_flush + 5 * HZ))) {
395                 if (!neigh_forced_gc(tbl) &&
396                     entries >= tbl->gc_thresh3) {
397                         net_info_ratelimited("%s: neighbor table overflow!\n",
398                                              tbl->id);
399                         NEIGH_CACHE_STAT_INC(tbl, table_fulls);
400                         goto out_entries;
401                 }
402         }
403
404 do_alloc:
405         n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
406         if (!n)
407                 goto out_entries;
408
409         __skb_queue_head_init(&n->arp_queue);
410         rwlock_init(&n->lock);
411         seqlock_init(&n->ha_lock);
412         n->updated        = n->used = now;
413         n->nud_state      = NUD_NONE;
414         n->output         = neigh_blackhole;
415         n->flags          = flags;
416         seqlock_init(&n->hh.hh_lock);
417         n->parms          = neigh_parms_clone(&tbl->parms);
418         timer_setup(&n->timer, neigh_timer_handler, 0);
419
420         NEIGH_CACHE_STAT_INC(tbl, allocs);
421         n->tbl            = tbl;
422         refcount_set(&n->refcnt, 1);
423         n->dead           = 1;
424         INIT_LIST_HEAD(&n->gc_list);
425
426         atomic_inc(&tbl->entries);
427 out:
428         return n;
429
430 out_entries:
431         if (!exempt_from_gc)
432                 atomic_dec(&tbl->gc_entries);
433         goto out;
434 }
435
436 static void neigh_get_hash_rnd(u32 *x)
437 {
438         *x = get_random_u32() | 1;
439 }
440
441 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
442 {
443         size_t size = (1 << shift) * sizeof(struct neighbour *);
444         struct neigh_hash_table *ret;
445         struct neighbour __rcu **buckets;
446         int i;
447
448         ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
449         if (!ret)
450                 return NULL;
451         if (size <= PAGE_SIZE) {
452                 buckets = kzalloc(size, GFP_ATOMIC);
453         } else {
454                 buckets = (struct neighbour __rcu **)
455                           __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
456                                            get_order(size));
457                 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
458         }
459         if (!buckets) {
460                 kfree(ret);
461                 return NULL;
462         }
463         ret->hash_buckets = buckets;
464         ret->hash_shift = shift;
465         for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
466                 neigh_get_hash_rnd(&ret->hash_rnd[i]);
467         return ret;
468 }
469
470 static void neigh_hash_free_rcu(struct rcu_head *head)
471 {
472         struct neigh_hash_table *nht = container_of(head,
473                                                     struct neigh_hash_table,
474                                                     rcu);
475         size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
476         struct neighbour __rcu **buckets = nht->hash_buckets;
477
478         if (size <= PAGE_SIZE) {
479                 kfree(buckets);
480         } else {
481                 kmemleak_free(buckets);
482                 free_pages((unsigned long)buckets, get_order(size));
483         }
484         kfree(nht);
485 }
486
487 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
488                                                 unsigned long new_shift)
489 {
490         unsigned int i, hash;
491         struct neigh_hash_table *new_nht, *old_nht;
492
493         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
494
495         old_nht = rcu_dereference_protected(tbl->nht,
496                                             lockdep_is_held(&tbl->lock));
497         new_nht = neigh_hash_alloc(new_shift);
498         if (!new_nht)
499                 return old_nht;
500
501         for (i = 0; i < (1 << old_nht->hash_shift); i++) {
502                 struct neighbour *n, *next;
503
504                 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
505                                                    lockdep_is_held(&tbl->lock));
506                      n != NULL;
507                      n = next) {
508                         hash = tbl->hash(n->primary_key, n->dev,
509                                          new_nht->hash_rnd);
510
511                         hash >>= (32 - new_nht->hash_shift);
512                         next = rcu_dereference_protected(n->next,
513                                                 lockdep_is_held(&tbl->lock));
514
515                         rcu_assign_pointer(n->next,
516                                            rcu_dereference_protected(
517                                                 new_nht->hash_buckets[hash],
518                                                 lockdep_is_held(&tbl->lock)));
519                         rcu_assign_pointer(new_nht->hash_buckets[hash], n);
520                 }
521         }
522
523         rcu_assign_pointer(tbl->nht, new_nht);
524         call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
525         return new_nht;
526 }
527
528 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
529                                struct net_device *dev)
530 {
531         struct neighbour *n;
532
533         NEIGH_CACHE_STAT_INC(tbl, lookups);
534
535         rcu_read_lock_bh();
536         n = __neigh_lookup_noref(tbl, pkey, dev);
537         if (n) {
538                 if (!refcount_inc_not_zero(&n->refcnt))
539                         n = NULL;
540                 NEIGH_CACHE_STAT_INC(tbl, hits);
541         }
542
543         rcu_read_unlock_bh();
544         return n;
545 }
546 EXPORT_SYMBOL(neigh_lookup);
547
548 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
549                                      const void *pkey)
550 {
551         struct neighbour *n;
552         unsigned int key_len = tbl->key_len;
553         u32 hash_val;
554         struct neigh_hash_table *nht;
555
556         NEIGH_CACHE_STAT_INC(tbl, lookups);
557
558         rcu_read_lock_bh();
559         nht = rcu_dereference_bh(tbl->nht);
560         hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
561
562         for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
563              n != NULL;
564              n = rcu_dereference_bh(n->next)) {
565                 if (!memcmp(n->primary_key, pkey, key_len) &&
566                     net_eq(dev_net(n->dev), net)) {
567                         if (!refcount_inc_not_zero(&n->refcnt))
568                                 n = NULL;
569                         NEIGH_CACHE_STAT_INC(tbl, hits);
570                         break;
571                 }
572         }
573
574         rcu_read_unlock_bh();
575         return n;
576 }
577 EXPORT_SYMBOL(neigh_lookup_nodev);
578
579 static struct neighbour *
580 ___neigh_create(struct neigh_table *tbl, const void *pkey,
581                 struct net_device *dev, u8 flags,
582                 bool exempt_from_gc, bool want_ref)
583 {
584         u32 hash_val, key_len = tbl->key_len;
585         struct neighbour *n1, *rc, *n;
586         struct neigh_hash_table *nht;
587         int error;
588
589         n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
590         trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
591         if (!n) {
592                 rc = ERR_PTR(-ENOBUFS);
593                 goto out;
594         }
595
596         memcpy(n->primary_key, pkey, key_len);
597         n->dev = dev;
598         dev_hold(dev);
599
600         /* Protocol specific setup. */
601         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
602                 rc = ERR_PTR(error);
603                 goto out_neigh_release;
604         }
605
606         if (dev->netdev_ops->ndo_neigh_construct) {
607                 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
608                 if (error < 0) {
609                         rc = ERR_PTR(error);
610                         goto out_neigh_release;
611                 }
612         }
613
614         /* Device specific setup. */
615         if (n->parms->neigh_setup &&
616             (error = n->parms->neigh_setup(n)) < 0) {
617                 rc = ERR_PTR(error);
618                 goto out_neigh_release;
619         }
620
621         n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
622
623         write_lock_bh(&tbl->lock);
624         nht = rcu_dereference_protected(tbl->nht,
625                                         lockdep_is_held(&tbl->lock));
626
627         if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
628                 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
629
630         hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
631
632         if (n->parms->dead) {
633                 rc = ERR_PTR(-EINVAL);
634                 goto out_tbl_unlock;
635         }
636
637         for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
638                                             lockdep_is_held(&tbl->lock));
639              n1 != NULL;
640              n1 = rcu_dereference_protected(n1->next,
641                         lockdep_is_held(&tbl->lock))) {
642                 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
643                         if (want_ref)
644                                 neigh_hold(n1);
645                         rc = n1;
646                         goto out_tbl_unlock;
647                 }
648         }
649
650         n->dead = 0;
651         if (!exempt_from_gc)
652                 list_add_tail(&n->gc_list, &n->tbl->gc_list);
653
654         if (want_ref)
655                 neigh_hold(n);
656         rcu_assign_pointer(n->next,
657                            rcu_dereference_protected(nht->hash_buckets[hash_val],
658                                                      lockdep_is_held(&tbl->lock)));
659         rcu_assign_pointer(nht->hash_buckets[hash_val], n);
660         write_unlock_bh(&tbl->lock);
661         neigh_dbg(2, "neigh %p is created\n", n);
662         rc = n;
663 out:
664         return rc;
665 out_tbl_unlock:
666         write_unlock_bh(&tbl->lock);
667 out_neigh_release:
668         if (!exempt_from_gc)
669                 atomic_dec(&tbl->gc_entries);
670         neigh_release(n);
671         goto out;
672 }
673
674 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
675                                  struct net_device *dev, bool want_ref)
676 {
677         return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
678 }
679 EXPORT_SYMBOL(__neigh_create);
680
681 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
682 {
683         u32 hash_val = *(u32 *)(pkey + key_len - 4);
684         hash_val ^= (hash_val >> 16);
685         hash_val ^= hash_val >> 8;
686         hash_val ^= hash_val >> 4;
687         hash_val &= PNEIGH_HASHMASK;
688         return hash_val;
689 }
690
691 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
692                                               struct net *net,
693                                               const void *pkey,
694                                               unsigned int key_len,
695                                               struct net_device *dev)
696 {
697         while (n) {
698                 if (!memcmp(n->key, pkey, key_len) &&
699                     net_eq(pneigh_net(n), net) &&
700                     (n->dev == dev || !n->dev))
701                         return n;
702                 n = n->next;
703         }
704         return NULL;
705 }
706
707 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
708                 struct net *net, const void *pkey, struct net_device *dev)
709 {
710         unsigned int key_len = tbl->key_len;
711         u32 hash_val = pneigh_hash(pkey, key_len);
712
713         return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
714                                  net, pkey, key_len, dev);
715 }
716 EXPORT_SYMBOL_GPL(__pneigh_lookup);
717
718 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
719                                     struct net *net, const void *pkey,
720                                     struct net_device *dev, int creat)
721 {
722         struct pneigh_entry *n;
723         unsigned int key_len = tbl->key_len;
724         u32 hash_val = pneigh_hash(pkey, key_len);
725
726         read_lock_bh(&tbl->lock);
727         n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
728                               net, pkey, key_len, dev);
729         read_unlock_bh(&tbl->lock);
730
731         if (n || !creat)
732                 goto out;
733
734         ASSERT_RTNL();
735
736         n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
737         if (!n)
738                 goto out;
739
740         write_pnet(&n->net, net);
741         memcpy(n->key, pkey, key_len);
742         n->dev = dev;
743         dev_hold(dev);
744
745         if (tbl->pconstructor && tbl->pconstructor(n)) {
746                 dev_put(dev);
747                 kfree(n);
748                 n = NULL;
749                 goto out;
750         }
751
752         write_lock_bh(&tbl->lock);
753         n->next = tbl->phash_buckets[hash_val];
754         tbl->phash_buckets[hash_val] = n;
755         write_unlock_bh(&tbl->lock);
756 out:
757         return n;
758 }
759 EXPORT_SYMBOL(pneigh_lookup);
760
761
762 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
763                   struct net_device *dev)
764 {
765         struct pneigh_entry *n, **np;
766         unsigned int key_len = tbl->key_len;
767         u32 hash_val = pneigh_hash(pkey, key_len);
768
769         write_lock_bh(&tbl->lock);
770         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
771              np = &n->next) {
772                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
773                     net_eq(pneigh_net(n), net)) {
774                         *np = n->next;
775                         write_unlock_bh(&tbl->lock);
776                         if (tbl->pdestructor)
777                                 tbl->pdestructor(n);
778                         dev_put(n->dev);
779                         kfree(n);
780                         return 0;
781                 }
782         }
783         write_unlock_bh(&tbl->lock);
784         return -ENOENT;
785 }
786
787 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
788                                     struct net_device *dev)
789 {
790         struct pneigh_entry *n, **np, *freelist = NULL;
791         u32 h;
792
793         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
794                 np = &tbl->phash_buckets[h];
795                 while ((n = *np) != NULL) {
796                         if (!dev || n->dev == dev) {
797                                 *np = n->next;
798                                 n->next = freelist;
799                                 freelist = n;
800                                 continue;
801                         }
802                         np = &n->next;
803                 }
804         }
805         write_unlock_bh(&tbl->lock);
806         while ((n = freelist)) {
807                 freelist = n->next;
808                 n->next = NULL;
809                 if (tbl->pdestructor)
810                         tbl->pdestructor(n);
811                 dev_put(n->dev);
812                 kfree(n);
813         }
814         return -ENOENT;
815 }
816
817 static void neigh_parms_destroy(struct neigh_parms *parms);
818
819 static inline void neigh_parms_put(struct neigh_parms *parms)
820 {
821         if (refcount_dec_and_test(&parms->refcnt))
822                 neigh_parms_destroy(parms);
823 }
824
825 /*
826  *      neighbour must already be out of the table;
827  *
828  */
829 void neigh_destroy(struct neighbour *neigh)
830 {
831         struct net_device *dev = neigh->dev;
832
833         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
834
835         if (!neigh->dead) {
836                 pr_warn("Destroying alive neighbour %p\n", neigh);
837                 dump_stack();
838                 return;
839         }
840
841         if (neigh_del_timer(neigh))
842                 pr_warn("Impossible event\n");
843
844         write_lock_bh(&neigh->lock);
845         __skb_queue_purge(&neigh->arp_queue);
846         write_unlock_bh(&neigh->lock);
847         neigh->arp_queue_len_bytes = 0;
848
849         if (dev->netdev_ops->ndo_neigh_destroy)
850                 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
851
852         dev_put(dev);
853         neigh_parms_put(neigh->parms);
854
855         neigh_dbg(2, "neigh %p is destroyed\n", neigh);
856
857         atomic_dec(&neigh->tbl->entries);
858         kfree_rcu(neigh, rcu);
859 }
860 EXPORT_SYMBOL(neigh_destroy);
861
862 /* Neighbour state is suspicious;
863    disable fast path.
864
865    Called with write_locked neigh.
866  */
867 static void neigh_suspect(struct neighbour *neigh)
868 {
869         neigh_dbg(2, "neigh %p is suspected\n", neigh);
870
871         neigh->output = neigh->ops->output;
872 }
873
874 /* Neighbour state is OK;
875    enable fast path.
876
877    Called with write_locked neigh.
878  */
879 static void neigh_connect(struct neighbour *neigh)
880 {
881         neigh_dbg(2, "neigh %p is connected\n", neigh);
882
883         neigh->output = neigh->ops->connected_output;
884 }
885
886 static void neigh_periodic_work(struct work_struct *work)
887 {
888         struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
889         struct neighbour *n;
890         struct neighbour __rcu **np;
891         unsigned int i;
892         struct neigh_hash_table *nht;
893
894         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
895
896         write_lock_bh(&tbl->lock);
897         nht = rcu_dereference_protected(tbl->nht,
898                                         lockdep_is_held(&tbl->lock));
899
900         /*
901          *      periodically recompute ReachableTime from random function
902          */
903
904         if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
905                 struct neigh_parms *p;
906                 tbl->last_rand = jiffies;
907                 list_for_each_entry(p, &tbl->parms_list, list)
908                         p->reachable_time =
909                                 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
910         }
911
912         if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
913                 goto out;
914
915         for (i = 0 ; i < (1 << nht->hash_shift); i++) {
916                 np = &nht->hash_buckets[i];
917
918                 while ((n = rcu_dereference_protected(*np,
919                                 lockdep_is_held(&tbl->lock))) != NULL) {
920                         unsigned int state;
921
922                         write_lock(&n->lock);
923
924                         state = n->nud_state;
925                         if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
926                             (n->flags & NTF_EXT_LEARNED)) {
927                                 write_unlock(&n->lock);
928                                 goto next_elt;
929                         }
930
931                         if (time_before(n->used, n->confirmed))
932                                 n->used = n->confirmed;
933
934                         if (refcount_read(&n->refcnt) == 1 &&
935                             (state == NUD_FAILED ||
936                              time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
937                                 *np = n->next;
938                                 neigh_mark_dead(n);
939                                 write_unlock(&n->lock);
940                                 neigh_cleanup_and_release(n);
941                                 continue;
942                         }
943                         write_unlock(&n->lock);
944
945 next_elt:
946                         np = &n->next;
947                 }
948                 /*
949                  * It's fine to release lock here, even if hash table
950                  * grows while we are preempted.
951                  */
952                 write_unlock_bh(&tbl->lock);
953                 cond_resched();
954                 write_lock_bh(&tbl->lock);
955                 nht = rcu_dereference_protected(tbl->nht,
956                                                 lockdep_is_held(&tbl->lock));
957         }
958 out:
959         /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
960          * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
961          * BASE_REACHABLE_TIME.
962          */
963         queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
964                               NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
965         write_unlock_bh(&tbl->lock);
966 }
967
968 static __inline__ int neigh_max_probes(struct neighbour *n)
969 {
970         struct neigh_parms *p = n->parms;
971         return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
972                (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
973                 NEIGH_VAR(p, MCAST_PROBES));
974 }
975
976 static void neigh_invalidate(struct neighbour *neigh)
977         __releases(neigh->lock)
978         __acquires(neigh->lock)
979 {
980         struct sk_buff *skb;
981
982         NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
983         neigh_dbg(2, "neigh %p is failed\n", neigh);
984         neigh->updated = jiffies;
985
986         /* It is very thin place. report_unreachable is very complicated
987            routine. Particularly, it can hit the same neighbour entry!
988
989            So that, we try to be accurate and avoid dead loop. --ANK
990          */
991         while (neigh->nud_state == NUD_FAILED &&
992                (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
993                 write_unlock(&neigh->lock);
994                 neigh->ops->error_report(neigh, skb);
995                 write_lock(&neigh->lock);
996         }
997         __skb_queue_purge(&neigh->arp_queue);
998         neigh->arp_queue_len_bytes = 0;
999 }
1000
1001 static void neigh_probe(struct neighbour *neigh)
1002         __releases(neigh->lock)
1003 {
1004         struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1005         /* keep skb alive even if arp_queue overflows */
1006         if (skb)
1007                 skb = skb_clone(skb, GFP_ATOMIC);
1008         write_unlock(&neigh->lock);
1009         if (neigh->ops->solicit)
1010                 neigh->ops->solicit(neigh, skb);
1011         atomic_inc(&neigh->probes);
1012         consume_skb(skb);
1013 }
1014
1015 /* Called when a timer expires for a neighbour entry. */
1016
1017 static void neigh_timer_handler(struct timer_list *t)
1018 {
1019         unsigned long now, next;
1020         struct neighbour *neigh = from_timer(neigh, t, timer);
1021         unsigned int state;
1022         int notify = 0;
1023
1024         write_lock(&neigh->lock);
1025
1026         state = neigh->nud_state;
1027         now = jiffies;
1028         next = now + HZ;
1029
1030         if (!(state & NUD_IN_TIMER))
1031                 goto out;
1032
1033         if (state & NUD_REACHABLE) {
1034                 if (time_before_eq(now,
1035                                    neigh->confirmed + neigh->parms->reachable_time)) {
1036                         neigh_dbg(2, "neigh %p is still alive\n", neigh);
1037                         next = neigh->confirmed + neigh->parms->reachable_time;
1038                 } else if (time_before_eq(now,
1039                                           neigh->used +
1040                                           NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1041                         neigh_dbg(2, "neigh %p is delayed\n", neigh);
1042                         neigh->nud_state = NUD_DELAY;
1043                         neigh->updated = jiffies;
1044                         neigh_suspect(neigh);
1045                         next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1046                 } else {
1047                         neigh_dbg(2, "neigh %p is suspected\n", neigh);
1048                         neigh->nud_state = NUD_STALE;
1049                         neigh->updated = jiffies;
1050                         neigh_suspect(neigh);
1051                         notify = 1;
1052                 }
1053         } else if (state & NUD_DELAY) {
1054                 if (time_before_eq(now,
1055                                    neigh->confirmed +
1056                                    NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1057                         neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1058                         neigh->nud_state = NUD_REACHABLE;
1059                         neigh->updated = jiffies;
1060                         neigh_connect(neigh);
1061                         notify = 1;
1062                         next = neigh->confirmed + neigh->parms->reachable_time;
1063                 } else {
1064                         neigh_dbg(2, "neigh %p is probed\n", neigh);
1065                         neigh->nud_state = NUD_PROBE;
1066                         neigh->updated = jiffies;
1067                         atomic_set(&neigh->probes, 0);
1068                         notify = 1;
1069                         next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1070                                          HZ/100);
1071                 }
1072         } else {
1073                 /* NUD_PROBE|NUD_INCOMPLETE */
1074                 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1075         }
1076
1077         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1078             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1079                 neigh->nud_state = NUD_FAILED;
1080                 notify = 1;
1081                 neigh_invalidate(neigh);
1082                 goto out;
1083         }
1084
1085         if (neigh->nud_state & NUD_IN_TIMER) {
1086                 if (time_before(next, jiffies + HZ/100))
1087                         next = jiffies + HZ/100;
1088                 if (!mod_timer(&neigh->timer, next))
1089                         neigh_hold(neigh);
1090         }
1091         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1092                 neigh_probe(neigh);
1093         } else {
1094 out:
1095                 write_unlock(&neigh->lock);
1096         }
1097
1098         if (notify)
1099                 neigh_update_notify(neigh, 0);
1100
1101         trace_neigh_timer_handler(neigh, 0);
1102
1103         neigh_release(neigh);
1104 }
1105
1106 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1107 {
1108         int rc;
1109         bool immediate_probe = false;
1110
1111         write_lock_bh(&neigh->lock);
1112
1113         rc = 0;
1114         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1115                 goto out_unlock_bh;
1116         if (neigh->dead)
1117                 goto out_dead;
1118
1119         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1120                 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1121                     NEIGH_VAR(neigh->parms, APP_PROBES)) {
1122                         unsigned long next, now = jiffies;
1123
1124                         atomic_set(&neigh->probes,
1125                                    NEIGH_VAR(neigh->parms, UCAST_PROBES));
1126                         neigh_del_timer(neigh);
1127                         neigh->nud_state     = NUD_INCOMPLETE;
1128                         neigh->updated = now;
1129                         next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1130                                          HZ/100);
1131                         neigh_add_timer(neigh, next);
1132                         immediate_probe = true;
1133                 } else {
1134                         neigh->nud_state = NUD_FAILED;
1135                         neigh->updated = jiffies;
1136                         write_unlock_bh(&neigh->lock);
1137
1138                         kfree_skb(skb);
1139                         return 1;
1140                 }
1141         } else if (neigh->nud_state & NUD_STALE) {
1142                 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1143                 neigh_del_timer(neigh);
1144                 neigh->nud_state = NUD_DELAY;
1145                 neigh->updated = jiffies;
1146                 neigh_add_timer(neigh, jiffies +
1147                                 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1148         }
1149
1150         if (neigh->nud_state == NUD_INCOMPLETE) {
1151                 if (skb) {
1152                         while (neigh->arp_queue_len_bytes + skb->truesize >
1153                                NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1154                                 struct sk_buff *buff;
1155
1156                                 buff = __skb_dequeue(&neigh->arp_queue);
1157                                 if (!buff)
1158                                         break;
1159                                 neigh->arp_queue_len_bytes -= buff->truesize;
1160                                 kfree_skb(buff);
1161                                 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1162                         }
1163                         skb_dst_force(skb);
1164                         __skb_queue_tail(&neigh->arp_queue, skb);
1165                         neigh->arp_queue_len_bytes += skb->truesize;
1166                 }
1167                 rc = 1;
1168         }
1169 out_unlock_bh:
1170         if (immediate_probe)
1171                 neigh_probe(neigh);
1172         else
1173                 write_unlock(&neigh->lock);
1174         local_bh_enable();
1175         trace_neigh_event_send_done(neigh, rc);
1176         return rc;
1177
1178 out_dead:
1179         if (neigh->nud_state & NUD_STALE)
1180                 goto out_unlock_bh;
1181         write_unlock_bh(&neigh->lock);
1182         kfree_skb(skb);
1183         trace_neigh_event_send_dead(neigh, 1);
1184         return 1;
1185 }
1186 EXPORT_SYMBOL(__neigh_event_send);
1187
1188 static void neigh_update_hhs(struct neighbour *neigh)
1189 {
1190         struct hh_cache *hh;
1191         void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1192                 = NULL;
1193
1194         if (neigh->dev->header_ops)
1195                 update = neigh->dev->header_ops->cache_update;
1196
1197         if (update) {
1198                 hh = &neigh->hh;
1199                 if (READ_ONCE(hh->hh_len)) {
1200                         write_seqlock_bh(&hh->hh_lock);
1201                         update(hh, neigh->dev, neigh->ha);
1202                         write_sequnlock_bh(&hh->hh_lock);
1203                 }
1204         }
1205 }
1206
1207
1208
1209 /* Generic update routine.
1210    -- lladdr is new lladdr or NULL, if it is not supplied.
1211    -- new    is new state.
1212    -- flags
1213         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1214                                 if it is different.
1215         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1216                                 lladdr instead of overriding it
1217                                 if it is different.
1218         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
1219         NEIGH_UPDATE_F_USE      means that the entry is user triggered.
1220         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1221                                 NTF_ROUTER flag.
1222         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1223                                 a router.
1224
1225    Caller MUST hold reference count on the entry.
1226  */
1227
1228 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1229                           u8 new, u32 flags, u32 nlmsg_pid,
1230                           struct netlink_ext_ack *extack)
1231 {
1232         bool ext_learn_change = false;
1233         u8 old;
1234         int err;
1235         int notify = 0;
1236         struct net_device *dev;
1237         int update_isrouter = 0;
1238
1239         trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1240
1241         write_lock_bh(&neigh->lock);
1242
1243         dev    = neigh->dev;
1244         old    = neigh->nud_state;
1245         err    = -EPERM;
1246
1247         if (neigh->dead) {
1248                 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1249                 new = old;
1250                 goto out;
1251         }
1252         if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1253             (old & (NUD_NOARP | NUD_PERMANENT)))
1254                 goto out;
1255
1256         ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
1257         if (flags & NEIGH_UPDATE_F_USE) {
1258                 new = old & ~NUD_PERMANENT;
1259                 neigh->nud_state = new;
1260                 err = 0;
1261                 goto out;
1262         }
1263
1264         if (!(new & NUD_VALID)) {
1265                 neigh_del_timer(neigh);
1266                 if (old & NUD_CONNECTED)
1267                         neigh_suspect(neigh);
1268                 neigh->nud_state = new;
1269                 err = 0;
1270                 notify = old & NUD_VALID;
1271                 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1272                     (new & NUD_FAILED)) {
1273                         neigh_invalidate(neigh);
1274                         notify = 1;
1275                 }
1276                 goto out;
1277         }
1278
1279         /* Compare new lladdr with cached one */
1280         if (!dev->addr_len) {
1281                 /* First case: device needs no address. */
1282                 lladdr = neigh->ha;
1283         } else if (lladdr) {
1284                 /* The second case: if something is already cached
1285                    and a new address is proposed:
1286                    - compare new & old
1287                    - if they are different, check override flag
1288                  */
1289                 if ((old & NUD_VALID) &&
1290                     !memcmp(lladdr, neigh->ha, dev->addr_len))
1291                         lladdr = neigh->ha;
1292         } else {
1293                 /* No address is supplied; if we know something,
1294                    use it, otherwise discard the request.
1295                  */
1296                 err = -EINVAL;
1297                 if (!(old & NUD_VALID)) {
1298                         NL_SET_ERR_MSG(extack, "No link layer address given");
1299                         goto out;
1300                 }
1301                 lladdr = neigh->ha;
1302         }
1303
1304         /* Update confirmed timestamp for neighbour entry after we
1305          * received ARP packet even if it doesn't change IP to MAC binding.
1306          */
1307         if (new & NUD_CONNECTED)
1308                 neigh->confirmed = jiffies;
1309
1310         /* If entry was valid and address is not changed,
1311            do not change entry state, if new one is STALE.
1312          */
1313         err = 0;
1314         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1315         if (old & NUD_VALID) {
1316                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1317                         update_isrouter = 0;
1318                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1319                             (old & NUD_CONNECTED)) {
1320                                 lladdr = neigh->ha;
1321                                 new = NUD_STALE;
1322                         } else
1323                                 goto out;
1324                 } else {
1325                         if (lladdr == neigh->ha && new == NUD_STALE &&
1326                             !(flags & NEIGH_UPDATE_F_ADMIN))
1327                                 new = old;
1328                 }
1329         }
1330
1331         /* Update timestamp only once we know we will make a change to the
1332          * neighbour entry. Otherwise we risk to move the locktime window with
1333          * noop updates and ignore relevant ARP updates.
1334          */
1335         if (new != old || lladdr != neigh->ha)
1336                 neigh->updated = jiffies;
1337
1338         if (new != old) {
1339                 neigh_del_timer(neigh);
1340                 if (new & NUD_PROBE)
1341                         atomic_set(&neigh->probes, 0);
1342                 if (new & NUD_IN_TIMER)
1343                         neigh_add_timer(neigh, (jiffies +
1344                                                 ((new & NUD_REACHABLE) ?
1345                                                  neigh->parms->reachable_time :
1346                                                  0)));
1347                 neigh->nud_state = new;
1348                 notify = 1;
1349         }
1350
1351         if (lladdr != neigh->ha) {
1352                 write_seqlock(&neigh->ha_lock);
1353                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1354                 write_sequnlock(&neigh->ha_lock);
1355                 neigh_update_hhs(neigh);
1356                 if (!(new & NUD_CONNECTED))
1357                         neigh->confirmed = jiffies -
1358                                       (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1359                 notify = 1;
1360         }
1361         if (new == old)
1362                 goto out;
1363         if (new & NUD_CONNECTED)
1364                 neigh_connect(neigh);
1365         else
1366                 neigh_suspect(neigh);
1367         if (!(old & NUD_VALID)) {
1368                 struct sk_buff *skb;
1369
1370                 /* Again: avoid dead loop if something went wrong */
1371
1372                 while (neigh->nud_state & NUD_VALID &&
1373                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1374                         struct dst_entry *dst = skb_dst(skb);
1375                         struct neighbour *n2, *n1 = neigh;
1376                         write_unlock_bh(&neigh->lock);
1377
1378                         rcu_read_lock();
1379
1380                         /* Why not just use 'neigh' as-is?  The problem is that
1381                          * things such as shaper, eql, and sch_teql can end up
1382                          * using alternative, different, neigh objects to output
1383                          * the packet in the output path.  So what we need to do
1384                          * here is re-lookup the top-level neigh in the path so
1385                          * we can reinject the packet there.
1386                          */
1387                         n2 = NULL;
1388                         if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1389                                 n2 = dst_neigh_lookup_skb(dst, skb);
1390                                 if (n2)
1391                                         n1 = n2;
1392                         }
1393                         n1->output(n1, skb);
1394                         if (n2)
1395                                 neigh_release(n2);
1396                         rcu_read_unlock();
1397
1398                         write_lock_bh(&neigh->lock);
1399                 }
1400                 __skb_queue_purge(&neigh->arp_queue);
1401                 neigh->arp_queue_len_bytes = 0;
1402         }
1403 out:
1404         if (update_isrouter)
1405                 neigh_update_is_router(neigh, flags, &notify);
1406         write_unlock_bh(&neigh->lock);
1407
1408         if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1409                 neigh_update_gc_list(neigh);
1410
1411         if (notify)
1412                 neigh_update_notify(neigh, nlmsg_pid);
1413
1414         trace_neigh_update_done(neigh, err);
1415
1416         return err;
1417 }
1418
1419 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1420                  u32 flags, u32 nlmsg_pid)
1421 {
1422         return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1423 }
1424 EXPORT_SYMBOL(neigh_update);
1425
1426 /* Update the neigh to listen temporarily for probe responses, even if it is
1427  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1428  */
1429 void __neigh_set_probe_once(struct neighbour *neigh)
1430 {
1431         if (neigh->dead)
1432                 return;
1433         neigh->updated = jiffies;
1434         if (!(neigh->nud_state & NUD_FAILED))
1435                 return;
1436         neigh->nud_state = NUD_INCOMPLETE;
1437         atomic_set(&neigh->probes, neigh_max_probes(neigh));
1438         neigh_add_timer(neigh,
1439                         jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1440                                       HZ/100));
1441 }
1442 EXPORT_SYMBOL(__neigh_set_probe_once);
1443
1444 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1445                                  u8 *lladdr, void *saddr,
1446                                  struct net_device *dev)
1447 {
1448         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1449                                                  lladdr || !dev->addr_len);
1450         if (neigh)
1451                 neigh_update(neigh, lladdr, NUD_STALE,
1452                              NEIGH_UPDATE_F_OVERRIDE, 0);
1453         return neigh;
1454 }
1455 EXPORT_SYMBOL(neigh_event_ns);
1456
1457 /* called with read_lock_bh(&n->lock); */
1458 static void neigh_hh_init(struct neighbour *n)
1459 {
1460         struct net_device *dev = n->dev;
1461         __be16 prot = n->tbl->protocol;
1462         struct hh_cache *hh = &n->hh;
1463
1464         write_lock_bh(&n->lock);
1465
1466         /* Only one thread can come in here and initialize the
1467          * hh_cache entry.
1468          */
1469         if (!hh->hh_len)
1470                 dev->header_ops->cache(n, hh, prot);
1471
1472         write_unlock_bh(&n->lock);
1473 }
1474
1475 /* Slow and careful. */
1476
1477 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1478 {
1479         int rc = 0;
1480
1481         if (!neigh_event_send(neigh, skb)) {
1482                 int err;
1483                 struct net_device *dev = neigh->dev;
1484                 unsigned int seq;
1485
1486                 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1487                         neigh_hh_init(neigh);
1488
1489                 do {
1490                         __skb_pull(skb, skb_network_offset(skb));
1491                         seq = read_seqbegin(&neigh->ha_lock);
1492                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1493                                               neigh->ha, NULL, skb->len);
1494                 } while (read_seqretry(&neigh->ha_lock, seq));
1495
1496                 if (err >= 0)
1497                         rc = dev_queue_xmit(skb);
1498                 else
1499                         goto out_kfree_skb;
1500         }
1501 out:
1502         return rc;
1503 out_kfree_skb:
1504         rc = -EINVAL;
1505         kfree_skb(skb);
1506         goto out;
1507 }
1508 EXPORT_SYMBOL(neigh_resolve_output);
1509
1510 /* As fast as possible without hh cache */
1511
1512 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1513 {
1514         struct net_device *dev = neigh->dev;
1515         unsigned int seq;
1516         int err;
1517
1518         do {
1519                 __skb_pull(skb, skb_network_offset(skb));
1520                 seq = read_seqbegin(&neigh->ha_lock);
1521                 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1522                                       neigh->ha, NULL, skb->len);
1523         } while (read_seqretry(&neigh->ha_lock, seq));
1524
1525         if (err >= 0)
1526                 err = dev_queue_xmit(skb);
1527         else {
1528                 err = -EINVAL;
1529                 kfree_skb(skb);
1530         }
1531         return err;
1532 }
1533 EXPORT_SYMBOL(neigh_connected_output);
1534
1535 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1536 {
1537         return dev_queue_xmit(skb);
1538 }
1539 EXPORT_SYMBOL(neigh_direct_output);
1540
1541 static void neigh_proxy_process(struct timer_list *t)
1542 {
1543         struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1544         long sched_next = 0;
1545         unsigned long now = jiffies;
1546         struct sk_buff *skb, *n;
1547
1548         spin_lock(&tbl->proxy_queue.lock);
1549
1550         skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1551                 long tdif = NEIGH_CB(skb)->sched_next - now;
1552
1553                 if (tdif <= 0) {
1554                         struct net_device *dev = skb->dev;
1555
1556                         __skb_unlink(skb, &tbl->proxy_queue);
1557                         if (tbl->proxy_redo && netif_running(dev)) {
1558                                 rcu_read_lock();
1559                                 tbl->proxy_redo(skb);
1560                                 rcu_read_unlock();
1561                         } else {
1562                                 kfree_skb(skb);
1563                         }
1564
1565                         dev_put(dev);
1566                 } else if (!sched_next || tdif < sched_next)
1567                         sched_next = tdif;
1568         }
1569         del_timer(&tbl->proxy_timer);
1570         if (sched_next)
1571                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1572         spin_unlock(&tbl->proxy_queue.lock);
1573 }
1574
1575 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1576                     struct sk_buff *skb)
1577 {
1578         unsigned long sched_next = jiffies +
1579                         prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
1580
1581         if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1582                 kfree_skb(skb);
1583                 return;
1584         }
1585
1586         NEIGH_CB(skb)->sched_next = sched_next;
1587         NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1588
1589         spin_lock(&tbl->proxy_queue.lock);
1590         if (del_timer(&tbl->proxy_timer)) {
1591                 if (time_before(tbl->proxy_timer.expires, sched_next))
1592                         sched_next = tbl->proxy_timer.expires;
1593         }
1594         skb_dst_drop(skb);
1595         dev_hold(skb->dev);
1596         __skb_queue_tail(&tbl->proxy_queue, skb);
1597         mod_timer(&tbl->proxy_timer, sched_next);
1598         spin_unlock(&tbl->proxy_queue.lock);
1599 }
1600 EXPORT_SYMBOL(pneigh_enqueue);
1601
1602 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1603                                                       struct net *net, int ifindex)
1604 {
1605         struct neigh_parms *p;
1606
1607         list_for_each_entry(p, &tbl->parms_list, list) {
1608                 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1609                     (!p->dev && !ifindex && net_eq(net, &init_net)))
1610                         return p;
1611         }
1612
1613         return NULL;
1614 }
1615
1616 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1617                                       struct neigh_table *tbl)
1618 {
1619         struct neigh_parms *p;
1620         struct net *net = dev_net(dev);
1621         const struct net_device_ops *ops = dev->netdev_ops;
1622
1623         p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1624         if (p) {
1625                 p->tbl            = tbl;
1626                 refcount_set(&p->refcnt, 1);
1627                 p->reachable_time =
1628                                 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1629                 dev_hold(dev);
1630                 p->dev = dev;
1631                 write_pnet(&p->net, net);
1632                 p->sysctl_table = NULL;
1633
1634                 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1635                         dev_put(dev);
1636                         kfree(p);
1637                         return NULL;
1638                 }
1639
1640                 write_lock_bh(&tbl->lock);
1641                 list_add(&p->list, &tbl->parms.list);
1642                 write_unlock_bh(&tbl->lock);
1643
1644                 neigh_parms_data_state_cleanall(p);
1645         }
1646         return p;
1647 }
1648 EXPORT_SYMBOL(neigh_parms_alloc);
1649
1650 static void neigh_rcu_free_parms(struct rcu_head *head)
1651 {
1652         struct neigh_parms *parms =
1653                 container_of(head, struct neigh_parms, rcu_head);
1654
1655         neigh_parms_put(parms);
1656 }
1657
1658 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1659 {
1660         if (!parms || parms == &tbl->parms)
1661                 return;
1662         write_lock_bh(&tbl->lock);
1663         list_del(&parms->list);
1664         parms->dead = 1;
1665         write_unlock_bh(&tbl->lock);
1666         dev_put(parms->dev);
1667         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1668 }
1669 EXPORT_SYMBOL(neigh_parms_release);
1670
1671 static void neigh_parms_destroy(struct neigh_parms *parms)
1672 {
1673         kfree(parms);
1674 }
1675
1676 static struct lock_class_key neigh_table_proxy_queue_class;
1677
1678 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1679
1680 void neigh_table_init(int index, struct neigh_table *tbl)
1681 {
1682         unsigned long now = jiffies;
1683         unsigned long phsize;
1684
1685         INIT_LIST_HEAD(&tbl->parms_list);
1686         INIT_LIST_HEAD(&tbl->gc_list);
1687         list_add(&tbl->parms.list, &tbl->parms_list);
1688         write_pnet(&tbl->parms.net, &init_net);
1689         refcount_set(&tbl->parms.refcnt, 1);
1690         tbl->parms.reachable_time =
1691                           neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1692
1693         tbl->stats = alloc_percpu(struct neigh_statistics);
1694         if (!tbl->stats)
1695                 panic("cannot create neighbour cache statistics");
1696
1697 #ifdef CONFIG_PROC_FS
1698         if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1699                               &neigh_stat_seq_ops, tbl))
1700                 panic("cannot create neighbour proc dir entry");
1701 #endif
1702
1703         RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1704
1705         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1706         tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1707
1708         if (!tbl->nht || !tbl->phash_buckets)
1709                 panic("cannot allocate neighbour cache hashes");
1710
1711         if (!tbl->entry_size)
1712                 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1713                                         tbl->key_len, NEIGH_PRIV_ALIGN);
1714         else
1715                 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1716
1717         rwlock_init(&tbl->lock);
1718         INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1719         queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1720                         tbl->parms.reachable_time);
1721         timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1722         skb_queue_head_init_class(&tbl->proxy_queue,
1723                         &neigh_table_proxy_queue_class);
1724
1725         tbl->last_flush = now;
1726         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1727
1728         neigh_tables[index] = tbl;
1729 }
1730 EXPORT_SYMBOL(neigh_table_init);
1731
1732 int neigh_table_clear(int index, struct neigh_table *tbl)
1733 {
1734         neigh_tables[index] = NULL;
1735         /* It is not clean... Fix it to unload IPv6 module safely */
1736         cancel_delayed_work_sync(&tbl->gc_work);
1737         del_timer_sync(&tbl->proxy_timer);
1738         pneigh_queue_purge(&tbl->proxy_queue);
1739         neigh_ifdown(tbl, NULL);
1740         if (atomic_read(&tbl->entries))
1741                 pr_crit("neighbour leakage\n");
1742
1743         call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1744                  neigh_hash_free_rcu);
1745         tbl->nht = NULL;
1746
1747         kfree(tbl->phash_buckets);
1748         tbl->phash_buckets = NULL;
1749
1750         remove_proc_entry(tbl->id, init_net.proc_net_stat);
1751
1752         free_percpu(tbl->stats);
1753         tbl->stats = NULL;
1754
1755         return 0;
1756 }
1757 EXPORT_SYMBOL(neigh_table_clear);
1758
1759 static struct neigh_table *neigh_find_table(int family)
1760 {
1761         struct neigh_table *tbl = NULL;
1762
1763         switch (family) {
1764         case AF_INET:
1765                 tbl = neigh_tables[NEIGH_ARP_TABLE];
1766                 break;
1767         case AF_INET6:
1768                 tbl = neigh_tables[NEIGH_ND_TABLE];
1769                 break;
1770         case AF_DECnet:
1771                 tbl = neigh_tables[NEIGH_DN_TABLE];
1772                 break;
1773         }
1774
1775         return tbl;
1776 }
1777
1778 const struct nla_policy nda_policy[NDA_MAX+1] = {
1779         [NDA_UNSPEC]            = { .strict_start_type = NDA_NH_ID },
1780         [NDA_DST]               = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1781         [NDA_LLADDR]            = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1782         [NDA_CACHEINFO]         = { .len = sizeof(struct nda_cacheinfo) },
1783         [NDA_PROBES]            = { .type = NLA_U32 },
1784         [NDA_VLAN]              = { .type = NLA_U16 },
1785         [NDA_PORT]              = { .type = NLA_U16 },
1786         [NDA_VNI]               = { .type = NLA_U32 },
1787         [NDA_IFINDEX]           = { .type = NLA_U32 },
1788         [NDA_MASTER]            = { .type = NLA_U32 },
1789         [NDA_PROTOCOL]          = { .type = NLA_U8 },
1790         [NDA_NH_ID]             = { .type = NLA_U32 },
1791         [NDA_FDB_EXT_ATTRS]     = { .type = NLA_NESTED },
1792 };
1793
1794 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1795                         struct netlink_ext_ack *extack)
1796 {
1797         struct net *net = sock_net(skb->sk);
1798         struct ndmsg *ndm;
1799         struct nlattr *dst_attr;
1800         struct neigh_table *tbl;
1801         struct neighbour *neigh;
1802         struct net_device *dev = NULL;
1803         int err = -EINVAL;
1804
1805         ASSERT_RTNL();
1806         if (nlmsg_len(nlh) < sizeof(*ndm))
1807                 goto out;
1808
1809         dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1810         if (!dst_attr) {
1811                 NL_SET_ERR_MSG(extack, "Network address not specified");
1812                 goto out;
1813         }
1814
1815         ndm = nlmsg_data(nlh);
1816         if (ndm->ndm_ifindex) {
1817                 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1818                 if (dev == NULL) {
1819                         err = -ENODEV;
1820                         goto out;
1821                 }
1822         }
1823
1824         tbl = neigh_find_table(ndm->ndm_family);
1825         if (tbl == NULL)
1826                 return -EAFNOSUPPORT;
1827
1828         if (nla_len(dst_attr) < (int)tbl->key_len) {
1829                 NL_SET_ERR_MSG(extack, "Invalid network address");
1830                 goto out;
1831         }
1832
1833         if (ndm->ndm_flags & NTF_PROXY) {
1834                 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1835                 goto out;
1836         }
1837
1838         if (dev == NULL)
1839                 goto out;
1840
1841         neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1842         if (neigh == NULL) {
1843                 err = -ENOENT;
1844                 goto out;
1845         }
1846
1847         err = __neigh_update(neigh, NULL, NUD_FAILED,
1848                              NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1849                              NETLINK_CB(skb).portid, extack);
1850         write_lock_bh(&tbl->lock);
1851         neigh_release(neigh);
1852         neigh_remove_one(neigh, tbl);
1853         write_unlock_bh(&tbl->lock);
1854
1855 out:
1856         return err;
1857 }
1858
1859 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1860                      struct netlink_ext_ack *extack)
1861 {
1862         int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1863                 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1864         struct net *net = sock_net(skb->sk);
1865         struct ndmsg *ndm;
1866         struct nlattr *tb[NDA_MAX+1];
1867         struct neigh_table *tbl;
1868         struct net_device *dev = NULL;
1869         struct neighbour *neigh;
1870         void *dst, *lladdr;
1871         u8 protocol = 0;
1872         int err;
1873
1874         ASSERT_RTNL();
1875         err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1876                                      nda_policy, extack);
1877         if (err < 0)
1878                 goto out;
1879
1880         err = -EINVAL;
1881         if (!tb[NDA_DST]) {
1882                 NL_SET_ERR_MSG(extack, "Network address not specified");
1883                 goto out;
1884         }
1885
1886         ndm = nlmsg_data(nlh);
1887         if (ndm->ndm_ifindex) {
1888                 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1889                 if (dev == NULL) {
1890                         err = -ENODEV;
1891                         goto out;
1892                 }
1893
1894                 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1895                         NL_SET_ERR_MSG(extack, "Invalid link address");
1896                         goto out;
1897                 }
1898         }
1899
1900         tbl = neigh_find_table(ndm->ndm_family);
1901         if (tbl == NULL)
1902                 return -EAFNOSUPPORT;
1903
1904         if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1905                 NL_SET_ERR_MSG(extack, "Invalid network address");
1906                 goto out;
1907         }
1908
1909         dst = nla_data(tb[NDA_DST]);
1910         lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1911
1912         if (tb[NDA_PROTOCOL])
1913                 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1914
1915         if (ndm->ndm_flags & NTF_PROXY) {
1916                 struct pneigh_entry *pn;
1917
1918                 err = -ENOBUFS;
1919                 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1920                 if (pn) {
1921                         pn->flags = ndm->ndm_flags;
1922                         if (protocol)
1923                                 pn->protocol = protocol;
1924                         err = 0;
1925                 }
1926                 goto out;
1927         }
1928
1929         if (!dev) {
1930                 NL_SET_ERR_MSG(extack, "Device not specified");
1931                 goto out;
1932         }
1933
1934         if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1935                 err = -EINVAL;
1936                 goto out;
1937         }
1938
1939         neigh = neigh_lookup(tbl, dst, dev);
1940         if (neigh == NULL) {
1941                 bool exempt_from_gc;
1942
1943                 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1944                         err = -ENOENT;
1945                         goto out;
1946                 }
1947
1948                 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1949                                  ndm->ndm_flags & NTF_EXT_LEARNED;
1950                 neigh = ___neigh_create(tbl, dst, dev,
1951                                         ndm->ndm_flags & NTF_EXT_LEARNED,
1952                                         exempt_from_gc, true);
1953                 if (IS_ERR(neigh)) {
1954                         err = PTR_ERR(neigh);
1955                         goto out;
1956                 }
1957         } else {
1958                 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1959                         err = -EEXIST;
1960                         neigh_release(neigh);
1961                         goto out;
1962                 }
1963
1964                 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1965                         flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1966                                    NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1967         }
1968
1969         if (protocol)
1970                 neigh->protocol = protocol;
1971         if (ndm->ndm_flags & NTF_EXT_LEARNED)
1972                 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1973         if (ndm->ndm_flags & NTF_ROUTER)
1974                 flags |= NEIGH_UPDATE_F_ISROUTER;
1975         if (ndm->ndm_flags & NTF_USE)
1976                 flags |= NEIGH_UPDATE_F_USE;
1977
1978         err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1979                              NETLINK_CB(skb).portid, extack);
1980         if (!err && ndm->ndm_flags & NTF_USE) {
1981                 neigh_event_send(neigh, NULL);
1982                 err = 0;
1983         }
1984         neigh_release(neigh);
1985 out:
1986         return err;
1987 }
1988
1989 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1990 {
1991         struct nlattr *nest;
1992
1993         nest = nla_nest_start_noflag(skb, NDTA_PARMS);
1994         if (nest == NULL)
1995                 return -ENOBUFS;
1996
1997         if ((parms->dev &&
1998              nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1999             nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2000             nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2001                         NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2002             /* approximative value for deprecated QUEUE_LEN (in packets) */
2003             nla_put_u32(skb, NDTPA_QUEUE_LEN,
2004                         NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2005             nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2006             nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2007             nla_put_u32(skb, NDTPA_UCAST_PROBES,
2008                         NEIGH_VAR(parms, UCAST_PROBES)) ||
2009             nla_put_u32(skb, NDTPA_MCAST_PROBES,
2010                         NEIGH_VAR(parms, MCAST_PROBES)) ||
2011             nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2012                         NEIGH_VAR(parms, MCAST_REPROBES)) ||
2013             nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2014                           NDTPA_PAD) ||
2015             nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2016                           NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2017             nla_put_msecs(skb, NDTPA_GC_STALETIME,
2018                           NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2019             nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2020                           NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2021             nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2022                           NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2023             nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2024                           NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2025             nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2026                           NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2027             nla_put_msecs(skb, NDTPA_LOCKTIME,
2028                           NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2029                 goto nla_put_failure;
2030         return nla_nest_end(skb, nest);
2031
2032 nla_put_failure:
2033         nla_nest_cancel(skb, nest);
2034         return -EMSGSIZE;
2035 }
2036
2037 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2038                               u32 pid, u32 seq, int type, int flags)
2039 {
2040         struct nlmsghdr *nlh;
2041         struct ndtmsg *ndtmsg;
2042
2043         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2044         if (nlh == NULL)
2045                 return -EMSGSIZE;
2046
2047         ndtmsg = nlmsg_data(nlh);
2048
2049         read_lock_bh(&tbl->lock);
2050         ndtmsg->ndtm_family = tbl->family;
2051         ndtmsg->ndtm_pad1   = 0;
2052         ndtmsg->ndtm_pad2   = 0;
2053
2054         if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2055             nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2056             nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2057             nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2058             nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2059                 goto nla_put_failure;
2060         {
2061                 unsigned long now = jiffies;
2062                 long flush_delta = now - tbl->last_flush;
2063                 long rand_delta = now - tbl->last_rand;
2064                 struct neigh_hash_table *nht;
2065                 struct ndt_config ndc = {
2066                         .ndtc_key_len           = tbl->key_len,
2067                         .ndtc_entry_size        = tbl->entry_size,
2068                         .ndtc_entries           = atomic_read(&tbl->entries),
2069                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
2070                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
2071                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
2072                 };
2073
2074                 rcu_read_lock_bh();
2075                 nht = rcu_dereference_bh(tbl->nht);
2076                 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2077                 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2078                 rcu_read_unlock_bh();
2079
2080                 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2081                         goto nla_put_failure;
2082         }
2083
2084         {
2085                 int cpu;
2086                 struct ndt_stats ndst;
2087
2088                 memset(&ndst, 0, sizeof(ndst));
2089
2090                 for_each_possible_cpu(cpu) {
2091                         struct neigh_statistics *st;
2092
2093                         st = per_cpu_ptr(tbl->stats, cpu);
2094                         ndst.ndts_allocs                += st->allocs;
2095                         ndst.ndts_destroys              += st->destroys;
2096                         ndst.ndts_hash_grows            += st->hash_grows;
2097                         ndst.ndts_res_failed            += st->res_failed;
2098                         ndst.ndts_lookups               += st->lookups;
2099                         ndst.ndts_hits                  += st->hits;
2100                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
2101                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
2102                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
2103                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
2104                         ndst.ndts_table_fulls           += st->table_fulls;
2105                 }
2106
2107                 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2108                                   NDTA_PAD))
2109                         goto nla_put_failure;
2110         }
2111
2112         BUG_ON(tbl->parms.dev);
2113         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2114                 goto nla_put_failure;
2115
2116         read_unlock_bh(&tbl->lock);
2117         nlmsg_end(skb, nlh);
2118         return 0;
2119
2120 nla_put_failure:
2121         read_unlock_bh(&tbl->lock);
2122         nlmsg_cancel(skb, nlh);
2123         return -EMSGSIZE;
2124 }
2125
2126 static int neightbl_fill_param_info(struct sk_buff *skb,
2127                                     struct neigh_table *tbl,
2128                                     struct neigh_parms *parms,
2129                                     u32 pid, u32 seq, int type,
2130                                     unsigned int flags)
2131 {
2132         struct ndtmsg *ndtmsg;
2133         struct nlmsghdr *nlh;
2134
2135         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2136         if (nlh == NULL)
2137                 return -EMSGSIZE;
2138
2139         ndtmsg = nlmsg_data(nlh);
2140
2141         read_lock_bh(&tbl->lock);
2142         ndtmsg->ndtm_family = tbl->family;
2143         ndtmsg->ndtm_pad1   = 0;
2144         ndtmsg->ndtm_pad2   = 0;
2145
2146         if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2147             neightbl_fill_parms(skb, parms) < 0)
2148                 goto errout;
2149
2150         read_unlock_bh(&tbl->lock);
2151         nlmsg_end(skb, nlh);
2152         return 0;
2153 errout:
2154         read_unlock_bh(&tbl->lock);
2155         nlmsg_cancel(skb, nlh);
2156         return -EMSGSIZE;
2157 }
2158
2159 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2160         [NDTA_NAME]             = { .type = NLA_STRING },
2161         [NDTA_THRESH1]          = { .type = NLA_U32 },
2162         [NDTA_THRESH2]          = { .type = NLA_U32 },
2163         [NDTA_THRESH3]          = { .type = NLA_U32 },
2164         [NDTA_GC_INTERVAL]      = { .type = NLA_U64 },
2165         [NDTA_PARMS]            = { .type = NLA_NESTED },
2166 };
2167
2168 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2169         [NDTPA_IFINDEX]                 = { .type = NLA_U32 },
2170         [NDTPA_QUEUE_LEN]               = { .type = NLA_U32 },
2171         [NDTPA_PROXY_QLEN]              = { .type = NLA_U32 },
2172         [NDTPA_APP_PROBES]              = { .type = NLA_U32 },
2173         [NDTPA_UCAST_PROBES]            = { .type = NLA_U32 },
2174         [NDTPA_MCAST_PROBES]            = { .type = NLA_U32 },
2175         [NDTPA_MCAST_REPROBES]          = { .type = NLA_U32 },
2176         [NDTPA_BASE_REACHABLE_TIME]     = { .type = NLA_U64 },
2177         [NDTPA_GC_STALETIME]            = { .type = NLA_U64 },
2178         [NDTPA_DELAY_PROBE_TIME]        = { .type = NLA_U64 },
2179         [NDTPA_RETRANS_TIME]            = { .type = NLA_U64 },
2180         [NDTPA_ANYCAST_DELAY]           = { .type = NLA_U64 },
2181         [NDTPA_PROXY_DELAY]             = { .type = NLA_U64 },
2182         [NDTPA_LOCKTIME]                = { .type = NLA_U64 },
2183 };
2184
2185 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2186                         struct netlink_ext_ack *extack)
2187 {
2188         struct net *net = sock_net(skb->sk);
2189         struct neigh_table *tbl;
2190         struct ndtmsg *ndtmsg;
2191         struct nlattr *tb[NDTA_MAX+1];
2192         bool found = false;
2193         int err, tidx;
2194
2195         err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2196                                      nl_neightbl_policy, extack);
2197         if (err < 0)
2198                 goto errout;
2199
2200         if (tb[NDTA_NAME] == NULL) {
2201                 err = -EINVAL;
2202                 goto errout;
2203         }
2204
2205         ndtmsg = nlmsg_data(nlh);
2206
2207         for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2208                 tbl = neigh_tables[tidx];
2209                 if (!tbl)
2210                         continue;
2211                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2212                         continue;
2213                 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2214                         found = true;
2215                         break;
2216                 }
2217         }
2218
2219         if (!found)
2220                 return -ENOENT;
2221
2222         /*
2223          * We acquire tbl->lock to be nice to the periodic timers and
2224          * make sure they always see a consistent set of values.
2225          */
2226         write_lock_bh(&tbl->lock);
2227
2228         if (tb[NDTA_PARMS]) {
2229                 struct nlattr *tbp[NDTPA_MAX+1];
2230                 struct neigh_parms *p;
2231                 int i, ifindex = 0;
2232
2233                 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2234                                                   tb[NDTA_PARMS],
2235                                                   nl_ntbl_parm_policy, extack);
2236                 if (err < 0)
2237                         goto errout_tbl_lock;
2238
2239                 if (tbp[NDTPA_IFINDEX])
2240                         ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2241
2242                 p = lookup_neigh_parms(tbl, net, ifindex);
2243                 if (p == NULL) {
2244                         err = -ENOENT;
2245                         goto errout_tbl_lock;
2246                 }
2247
2248                 for (i = 1; i <= NDTPA_MAX; i++) {
2249                         if (tbp[i] == NULL)
2250                                 continue;
2251
2252                         switch (i) {
2253                         case NDTPA_QUEUE_LEN:
2254                                 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2255                                               nla_get_u32(tbp[i]) *
2256                                               SKB_TRUESIZE(ETH_FRAME_LEN));
2257                                 break;
2258                         case NDTPA_QUEUE_LENBYTES:
2259                                 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2260                                               nla_get_u32(tbp[i]));
2261                                 break;
2262                         case NDTPA_PROXY_QLEN:
2263                                 NEIGH_VAR_SET(p, PROXY_QLEN,
2264                                               nla_get_u32(tbp[i]));
2265                                 break;
2266                         case NDTPA_APP_PROBES:
2267                                 NEIGH_VAR_SET(p, APP_PROBES,
2268                                               nla_get_u32(tbp[i]));
2269                                 break;
2270                         case NDTPA_UCAST_PROBES:
2271                                 NEIGH_VAR_SET(p, UCAST_PROBES,
2272                                               nla_get_u32(tbp[i]));
2273                                 break;
2274                         case NDTPA_MCAST_PROBES:
2275                                 NEIGH_VAR_SET(p, MCAST_PROBES,
2276                                               nla_get_u32(tbp[i]));
2277                                 break;
2278                         case NDTPA_MCAST_REPROBES:
2279                                 NEIGH_VAR_SET(p, MCAST_REPROBES,
2280                                               nla_get_u32(tbp[i]));
2281                                 break;
2282                         case NDTPA_BASE_REACHABLE_TIME:
2283                                 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2284                                               nla_get_msecs(tbp[i]));
2285                                 /* update reachable_time as well, otherwise, the change will
2286                                  * only be effective after the next time neigh_periodic_work
2287                                  * decides to recompute it (can be multiple minutes)
2288                                  */
2289                                 p->reachable_time =
2290                                         neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2291                                 break;
2292                         case NDTPA_GC_STALETIME:
2293                                 NEIGH_VAR_SET(p, GC_STALETIME,
2294                                               nla_get_msecs(tbp[i]));
2295                                 break;
2296                         case NDTPA_DELAY_PROBE_TIME:
2297                                 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2298                                               nla_get_msecs(tbp[i]));
2299                                 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2300                                 break;
2301                         case NDTPA_RETRANS_TIME:
2302                                 NEIGH_VAR_SET(p, RETRANS_TIME,
2303                                               nla_get_msecs(tbp[i]));
2304                                 break;
2305                         case NDTPA_ANYCAST_DELAY:
2306                                 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2307                                               nla_get_msecs(tbp[i]));
2308                                 break;
2309                         case NDTPA_PROXY_DELAY:
2310                                 NEIGH_VAR_SET(p, PROXY_DELAY,
2311                                               nla_get_msecs(tbp[i]));
2312                                 break;
2313                         case NDTPA_LOCKTIME:
2314                                 NEIGH_VAR_SET(p, LOCKTIME,
2315                                               nla_get_msecs(tbp[i]));
2316                                 break;
2317                         }
2318                 }
2319         }
2320
2321         err = -ENOENT;
2322         if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2323              tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2324             !net_eq(net, &init_net))
2325                 goto errout_tbl_lock;
2326
2327         if (tb[NDTA_THRESH1])
2328                 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2329
2330         if (tb[NDTA_THRESH2])
2331                 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2332
2333         if (tb[NDTA_THRESH3])
2334                 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2335
2336         if (tb[NDTA_GC_INTERVAL])
2337                 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2338
2339         err = 0;
2340
2341 errout_tbl_lock:
2342         write_unlock_bh(&tbl->lock);
2343 errout:
2344         return err;
2345 }
2346
2347 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2348                                     struct netlink_ext_ack *extack)
2349 {
2350         struct ndtmsg *ndtm;
2351
2352         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2353                 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2354                 return -EINVAL;
2355         }
2356
2357         ndtm = nlmsg_data(nlh);
2358         if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2359                 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2360                 return -EINVAL;
2361         }
2362
2363         if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2364                 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2365                 return -EINVAL;
2366         }
2367
2368         return 0;
2369 }
2370
2371 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2372 {
2373         const struct nlmsghdr *nlh = cb->nlh;
2374         struct net *net = sock_net(skb->sk);
2375         int family, tidx, nidx = 0;
2376         int tbl_skip = cb->args[0];
2377         int neigh_skip = cb->args[1];
2378         struct neigh_table *tbl;
2379
2380         if (cb->strict_check) {
2381                 int err = neightbl_valid_dump_info(nlh, cb->extack);
2382
2383                 if (err < 0)
2384                         return err;
2385         }
2386
2387         family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2388
2389         for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2390                 struct neigh_parms *p;
2391
2392                 tbl = neigh_tables[tidx];
2393                 if (!tbl)
2394                         continue;
2395
2396                 if (tidx < tbl_skip || (family && tbl->family != family))
2397                         continue;
2398
2399                 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2400                                        nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2401                                        NLM_F_MULTI) < 0)
2402                         break;
2403
2404                 nidx = 0;
2405                 p = list_next_entry(&tbl->parms, list);
2406                 list_for_each_entry_from(p, &tbl->parms_list, list) {
2407                         if (!net_eq(neigh_parms_net(p), net))
2408                                 continue;
2409
2410                         if (nidx < neigh_skip)
2411                                 goto next;
2412
2413                         if (neightbl_fill_param_info(skb, tbl, p,
2414                                                      NETLINK_CB(cb->skb).portid,
2415                                                      nlh->nlmsg_seq,
2416                                                      RTM_NEWNEIGHTBL,
2417                                                      NLM_F_MULTI) < 0)
2418                                 goto out;
2419                 next:
2420                         nidx++;
2421                 }
2422
2423                 neigh_skip = 0;
2424         }
2425 out:
2426         cb->args[0] = tidx;
2427         cb->args[1] = nidx;
2428
2429         return skb->len;
2430 }
2431
2432 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2433                            u32 pid, u32 seq, int type, unsigned int flags)
2434 {
2435         unsigned long now = jiffies;
2436         struct nda_cacheinfo ci;
2437         struct nlmsghdr *nlh;
2438         struct ndmsg *ndm;
2439
2440         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2441         if (nlh == NULL)
2442                 return -EMSGSIZE;
2443
2444         ndm = nlmsg_data(nlh);
2445         ndm->ndm_family  = neigh->ops->family;
2446         ndm->ndm_pad1    = 0;
2447         ndm->ndm_pad2    = 0;
2448         ndm->ndm_flags   = neigh->flags;
2449         ndm->ndm_type    = neigh->type;
2450         ndm->ndm_ifindex = neigh->dev->ifindex;
2451
2452         if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2453                 goto nla_put_failure;
2454
2455         read_lock_bh(&neigh->lock);
2456         ndm->ndm_state   = neigh->nud_state;
2457         if (neigh->nud_state & NUD_VALID) {
2458                 char haddr[MAX_ADDR_LEN];
2459
2460                 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2461                 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2462                         read_unlock_bh(&neigh->lock);
2463                         goto nla_put_failure;
2464                 }
2465         }
2466
2467         ci.ndm_used      = jiffies_to_clock_t(now - neigh->used);
2468         ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2469         ci.ndm_updated   = jiffies_to_clock_t(now - neigh->updated);
2470         ci.ndm_refcnt    = refcount_read(&neigh->refcnt) - 1;
2471         read_unlock_bh(&neigh->lock);
2472
2473         if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2474             nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2475                 goto nla_put_failure;
2476
2477         if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2478                 goto nla_put_failure;
2479
2480         nlmsg_end(skb, nlh);
2481         return 0;
2482
2483 nla_put_failure:
2484         nlmsg_cancel(skb, nlh);
2485         return -EMSGSIZE;
2486 }
2487
2488 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2489                             u32 pid, u32 seq, int type, unsigned int flags,
2490                             struct neigh_table *tbl)
2491 {
2492         struct nlmsghdr *nlh;
2493         struct ndmsg *ndm;
2494
2495         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2496         if (nlh == NULL)
2497                 return -EMSGSIZE;
2498
2499         ndm = nlmsg_data(nlh);
2500         ndm->ndm_family  = tbl->family;
2501         ndm->ndm_pad1    = 0;
2502         ndm->ndm_pad2    = 0;
2503         ndm->ndm_flags   = pn->flags | NTF_PROXY;
2504         ndm->ndm_type    = RTN_UNICAST;
2505         ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2506         ndm->ndm_state   = NUD_NONE;
2507
2508         if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2509                 goto nla_put_failure;
2510
2511         if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2512                 goto nla_put_failure;
2513
2514         nlmsg_end(skb, nlh);
2515         return 0;
2516
2517 nla_put_failure:
2518         nlmsg_cancel(skb, nlh);
2519         return -EMSGSIZE;
2520 }
2521
2522 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2523 {
2524         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2525         __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2526 }
2527
2528 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2529 {
2530         struct net_device *master;
2531
2532         if (!master_idx)
2533                 return false;
2534
2535         master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2536
2537         /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2538          * invalid value for ifindex to denote "no master".
2539          */
2540         if (master_idx == -1)
2541                 return !!master;
2542
2543         if (!master || master->ifindex != master_idx)
2544                 return true;
2545
2546         return false;
2547 }
2548
2549 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2550 {
2551         if (filter_idx && (!dev || dev->ifindex != filter_idx))
2552                 return true;
2553
2554         return false;
2555 }
2556
2557 struct neigh_dump_filter {
2558         int master_idx;
2559         int dev_idx;
2560 };
2561
2562 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2563                             struct netlink_callback *cb,
2564                             struct neigh_dump_filter *filter)
2565 {
2566         struct net *net = sock_net(skb->sk);
2567         struct neighbour *n;
2568         int rc, h, s_h = cb->args[1];
2569         int idx, s_idx = idx = cb->args[2];
2570         struct neigh_hash_table *nht;
2571         unsigned int flags = NLM_F_MULTI;
2572
2573         if (filter->dev_idx || filter->master_idx)
2574                 flags |= NLM_F_DUMP_FILTERED;
2575
2576         rcu_read_lock_bh();
2577         nht = rcu_dereference_bh(tbl->nht);
2578
2579         for (h = s_h; h < (1 << nht->hash_shift); h++) {
2580                 if (h > s_h)
2581                         s_idx = 0;
2582                 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2583                      n != NULL;
2584                      n = rcu_dereference_bh(n->next)) {
2585                         if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2586                                 goto next;
2587                         if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2588                             neigh_master_filtered(n->dev, filter->master_idx))
2589                                 goto next;
2590                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2591                                             cb->nlh->nlmsg_seq,
2592                                             RTM_NEWNEIGH,
2593                                             flags) < 0) {
2594                                 rc = -1;
2595                                 goto out;
2596                         }
2597 next:
2598                         idx++;
2599                 }
2600         }
2601         rc = skb->len;
2602 out:
2603         rcu_read_unlock_bh();
2604         cb->args[1] = h;
2605         cb->args[2] = idx;
2606         return rc;
2607 }
2608
2609 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2610                              struct netlink_callback *cb,
2611                              struct neigh_dump_filter *filter)
2612 {
2613         struct pneigh_entry *n;
2614         struct net *net = sock_net(skb->sk);
2615         int rc, h, s_h = cb->args[3];
2616         int idx, s_idx = idx = cb->args[4];
2617         unsigned int flags = NLM_F_MULTI;
2618
2619         if (filter->dev_idx || filter->master_idx)
2620                 flags |= NLM_F_DUMP_FILTERED;
2621
2622         read_lock_bh(&tbl->lock);
2623
2624         for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2625                 if (h > s_h)
2626                         s_idx = 0;
2627                 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2628                         if (idx < s_idx || pneigh_net(n) != net)
2629                                 goto next;
2630                         if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2631                             neigh_master_filtered(n->dev, filter->master_idx))
2632                                 goto next;
2633                         if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2634                                             cb->nlh->nlmsg_seq,
2635                                             RTM_NEWNEIGH, flags, tbl) < 0) {
2636                                 read_unlock_bh(&tbl->lock);
2637                                 rc = -1;
2638                                 goto out;
2639                         }
2640                 next:
2641                         idx++;
2642                 }
2643         }
2644
2645         read_unlock_bh(&tbl->lock);
2646         rc = skb->len;
2647 out:
2648         cb->args[3] = h;
2649         cb->args[4] = idx;
2650         return rc;
2651
2652 }
2653
2654 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2655                                 bool strict_check,
2656                                 struct neigh_dump_filter *filter,
2657                                 struct netlink_ext_ack *extack)
2658 {
2659         struct nlattr *tb[NDA_MAX + 1];
2660         int err, i;
2661
2662         if (strict_check) {
2663                 struct ndmsg *ndm;
2664
2665                 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2666                         NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2667                         return -EINVAL;
2668                 }
2669
2670                 ndm = nlmsg_data(nlh);
2671                 if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2672                     ndm->ndm_state || ndm->ndm_type) {
2673                         NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2674                         return -EINVAL;
2675                 }
2676
2677                 if (ndm->ndm_flags & ~NTF_PROXY) {
2678                         NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2679                         return -EINVAL;
2680                 }
2681
2682                 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2683                                                     tb, NDA_MAX, nda_policy,
2684                                                     extack);
2685         } else {
2686                 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2687                                              NDA_MAX, nda_policy, extack);
2688         }
2689         if (err < 0)
2690                 return err;
2691
2692         for (i = 0; i <= NDA_MAX; ++i) {
2693                 if (!tb[i])
2694                         continue;
2695
2696                 /* all new attributes should require strict_check */
2697                 switch (i) {
2698                 case NDA_IFINDEX:
2699                         filter->dev_idx = nla_get_u32(tb[i]);
2700                         break;
2701                 case NDA_MASTER:
2702                         filter->master_idx = nla_get_u32(tb[i]);
2703                         break;
2704                 default:
2705                         if (strict_check) {
2706                                 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2707                                 return -EINVAL;
2708                         }
2709                 }
2710         }
2711
2712         return 0;
2713 }
2714
2715 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2716 {
2717         const struct nlmsghdr *nlh = cb->nlh;
2718         struct neigh_dump_filter filter = {};
2719         struct neigh_table *tbl;
2720         int t, family, s_t;
2721         int proxy = 0;
2722         int err;
2723
2724         family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2725
2726         /* check for full ndmsg structure presence, family member is
2727          * the same for both structures
2728          */
2729         if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2730             ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2731                 proxy = 1;
2732
2733         err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2734         if (err < 0 && cb->strict_check)
2735                 return err;
2736
2737         s_t = cb->args[0];
2738
2739         for (t = 0; t < NEIGH_NR_TABLES; t++) {
2740                 tbl = neigh_tables[t];
2741
2742                 if (!tbl)
2743                         continue;
2744                 if (t < s_t || (family && tbl->family != family))
2745                         continue;
2746                 if (t > s_t)
2747                         memset(&cb->args[1], 0, sizeof(cb->args) -
2748                                                 sizeof(cb->args[0]));
2749                 if (proxy)
2750                         err = pneigh_dump_table(tbl, skb, cb, &filter);
2751                 else
2752                         err = neigh_dump_table(tbl, skb, cb, &filter);
2753                 if (err < 0)
2754                         break;
2755         }
2756
2757         cb->args[0] = t;
2758         return skb->len;
2759 }
2760
2761 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2762                                struct neigh_table **tbl,
2763                                void **dst, int *dev_idx, u8 *ndm_flags,
2764                                struct netlink_ext_ack *extack)
2765 {
2766         struct nlattr *tb[NDA_MAX + 1];
2767         struct ndmsg *ndm;
2768         int err, i;
2769
2770         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2771                 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2772                 return -EINVAL;
2773         }
2774
2775         ndm = nlmsg_data(nlh);
2776         if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2777             ndm->ndm_type) {
2778                 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2779                 return -EINVAL;
2780         }
2781
2782         if (ndm->ndm_flags & ~NTF_PROXY) {
2783                 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2784                 return -EINVAL;
2785         }
2786
2787         err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2788                                             NDA_MAX, nda_policy, extack);
2789         if (err < 0)
2790                 return err;
2791
2792         *ndm_flags = ndm->ndm_flags;
2793         *dev_idx = ndm->ndm_ifindex;
2794         *tbl = neigh_find_table(ndm->ndm_family);
2795         if (*tbl == NULL) {
2796                 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2797                 return -EAFNOSUPPORT;
2798         }
2799
2800         for (i = 0; i <= NDA_MAX; ++i) {
2801                 if (!tb[i])
2802                         continue;
2803
2804                 switch (i) {
2805                 case NDA_DST:
2806                         if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2807                                 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2808                                 return -EINVAL;
2809                         }
2810                         *dst = nla_data(tb[i]);
2811                         break;
2812                 default:
2813                         NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2814                         return -EINVAL;
2815                 }
2816         }
2817
2818         return 0;
2819 }
2820
2821 static inline size_t neigh_nlmsg_size(void)
2822 {
2823         return NLMSG_ALIGN(sizeof(struct ndmsg))
2824                + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2825                + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2826                + nla_total_size(sizeof(struct nda_cacheinfo))
2827                + nla_total_size(4)  /* NDA_PROBES */
2828                + nla_total_size(1); /* NDA_PROTOCOL */
2829 }
2830
2831 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2832                            u32 pid, u32 seq)
2833 {
2834         struct sk_buff *skb;
2835         int err = 0;
2836
2837         skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2838         if (!skb)
2839                 return -ENOBUFS;
2840
2841         err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2842         if (err) {
2843                 kfree_skb(skb);
2844                 goto errout;
2845         }
2846
2847         err = rtnl_unicast(skb, net, pid);
2848 errout:
2849         return err;
2850 }
2851
2852 static inline size_t pneigh_nlmsg_size(void)
2853 {
2854         return NLMSG_ALIGN(sizeof(struct ndmsg))
2855                + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2856                + nla_total_size(1); /* NDA_PROTOCOL */
2857 }
2858
2859 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2860                             u32 pid, u32 seq, struct neigh_table *tbl)
2861 {
2862         struct sk_buff *skb;
2863         int err = 0;
2864
2865         skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2866         if (!skb)
2867                 return -ENOBUFS;
2868
2869         err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2870         if (err) {
2871                 kfree_skb(skb);
2872                 goto errout;
2873         }
2874
2875         err = rtnl_unicast(skb, net, pid);
2876 errout:
2877         return err;
2878 }
2879
2880 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2881                      struct netlink_ext_ack *extack)
2882 {
2883         struct net *net = sock_net(in_skb->sk);
2884         struct net_device *dev = NULL;
2885         struct neigh_table *tbl = NULL;
2886         struct neighbour *neigh;
2887         void *dst = NULL;
2888         u8 ndm_flags = 0;
2889         int dev_idx = 0;
2890         int err;
2891
2892         err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2893                                   extack);
2894         if (err < 0)
2895                 return err;
2896
2897         if (dev_idx) {
2898                 dev = __dev_get_by_index(net, dev_idx);
2899                 if (!dev) {
2900                         NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2901                         return -ENODEV;
2902                 }
2903         }
2904
2905         if (!dst) {
2906                 NL_SET_ERR_MSG(extack, "Network address not specified");
2907                 return -EINVAL;
2908         }
2909
2910         if (ndm_flags & NTF_PROXY) {
2911                 struct pneigh_entry *pn;
2912
2913                 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2914                 if (!pn) {
2915                         NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2916                         return -ENOENT;
2917                 }
2918                 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2919                                         nlh->nlmsg_seq, tbl);
2920         }
2921
2922         if (!dev) {
2923                 NL_SET_ERR_MSG(extack, "No device specified");
2924                 return -EINVAL;
2925         }
2926
2927         neigh = neigh_lookup(tbl, dst, dev);
2928         if (!neigh) {
2929                 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2930                 return -ENOENT;
2931         }
2932
2933         err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2934                               nlh->nlmsg_seq);
2935
2936         neigh_release(neigh);
2937
2938         return err;
2939 }
2940
2941 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2942 {
2943         int chain;
2944         struct neigh_hash_table *nht;
2945
2946         rcu_read_lock_bh();
2947         nht = rcu_dereference_bh(tbl->nht);
2948
2949         read_lock(&tbl->lock); /* avoid resizes */
2950         for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2951                 struct neighbour *n;
2952
2953                 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2954                      n != NULL;
2955                      n = rcu_dereference_bh(n->next))
2956                         cb(n, cookie);
2957         }
2958         read_unlock(&tbl->lock);
2959         rcu_read_unlock_bh();
2960 }
2961 EXPORT_SYMBOL(neigh_for_each);
2962
2963 /* The tbl->lock must be held as a writer and BH disabled. */
2964 void __neigh_for_each_release(struct neigh_table *tbl,
2965                               int (*cb)(struct neighbour *))
2966 {
2967         int chain;
2968         struct neigh_hash_table *nht;
2969
2970         nht = rcu_dereference_protected(tbl->nht,
2971                                         lockdep_is_held(&tbl->lock));
2972         for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2973                 struct neighbour *n;
2974                 struct neighbour __rcu **np;
2975
2976                 np = &nht->hash_buckets[chain];
2977                 while ((n = rcu_dereference_protected(*np,
2978                                         lockdep_is_held(&tbl->lock))) != NULL) {
2979                         int release;
2980
2981                         write_lock(&n->lock);
2982                         release = cb(n);
2983                         if (release) {
2984                                 rcu_assign_pointer(*np,
2985                                         rcu_dereference_protected(n->next,
2986                                                 lockdep_is_held(&tbl->lock)));
2987                                 neigh_mark_dead(n);
2988                         } else
2989                                 np = &n->next;
2990                         write_unlock(&n->lock);
2991                         if (release)
2992                                 neigh_cleanup_and_release(n);
2993                 }
2994         }
2995 }
2996 EXPORT_SYMBOL(__neigh_for_each_release);
2997
2998 int neigh_xmit(int index, struct net_device *dev,
2999                const void *addr, struct sk_buff *skb)
3000 {
3001         int err = -EAFNOSUPPORT;
3002         if (likely(index < NEIGH_NR_TABLES)) {
3003                 struct neigh_table *tbl;
3004                 struct neighbour *neigh;
3005
3006                 tbl = neigh_tables[index];
3007                 if (!tbl)
3008                         goto out;
3009                 rcu_read_lock_bh();
3010                 if (index == NEIGH_ARP_TABLE) {
3011                         u32 key = *((u32 *)addr);
3012
3013                         neigh = __ipv4_neigh_lookup_noref(dev, key);
3014                 } else {
3015                         neigh = __neigh_lookup_noref(tbl, addr, dev);
3016                 }
3017                 if (!neigh)
3018                         neigh = __neigh_create(tbl, addr, dev, false);
3019                 err = PTR_ERR(neigh);
3020                 if (IS_ERR(neigh)) {
3021                         rcu_read_unlock_bh();
3022                         goto out_kfree_skb;
3023                 }
3024                 err = neigh->output(neigh, skb);
3025                 rcu_read_unlock_bh();
3026         }
3027         else if (index == NEIGH_LINK_TABLE) {
3028                 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3029                                       addr, NULL, skb->len);
3030                 if (err < 0)
3031                         goto out_kfree_skb;
3032                 err = dev_queue_xmit(skb);
3033         }
3034 out:
3035         return err;
3036 out_kfree_skb:
3037         kfree_skb(skb);
3038         goto out;
3039 }
3040 EXPORT_SYMBOL(neigh_xmit);
3041
3042 #ifdef CONFIG_PROC_FS
3043
3044 static struct neighbour *neigh_get_first(struct seq_file *seq)
3045 {
3046         struct neigh_seq_state *state = seq->private;
3047         struct net *net = seq_file_net(seq);
3048         struct neigh_hash_table *nht = state->nht;
3049         struct neighbour *n = NULL;
3050         int bucket;
3051
3052         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3053         for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3054                 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3055
3056                 while (n) {
3057                         if (!net_eq(dev_net(n->dev), net))
3058                                 goto next;
3059                         if (state->neigh_sub_iter) {
3060                                 loff_t fakep = 0;
3061                                 void *v;
3062
3063                                 v = state->neigh_sub_iter(state, n, &fakep);
3064                                 if (!v)
3065                                         goto next;
3066                         }
3067                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3068                                 break;
3069                         if (n->nud_state & ~NUD_NOARP)
3070                                 break;
3071 next:
3072                         n = rcu_dereference_bh(n->next);
3073                 }
3074
3075                 if (n)
3076                         break;
3077         }
3078         state->bucket = bucket;
3079
3080         return n;
3081 }
3082
3083 static struct neighbour *neigh_get_next(struct seq_file *seq,
3084                                         struct neighbour *n,
3085                                         loff_t *pos)
3086 {
3087         struct neigh_seq_state *state = seq->private;
3088         struct net *net = seq_file_net(seq);
3089         struct neigh_hash_table *nht = state->nht;
3090
3091         if (state->neigh_sub_iter) {
3092                 void *v = state->neigh_sub_iter(state, n, pos);
3093                 if (v)
3094                         return n;
3095         }
3096         n = rcu_dereference_bh(n->next);
3097
3098         while (1) {
3099                 while (n) {
3100                         if (!net_eq(dev_net(n->dev), net))
3101                                 goto next;
3102                         if (state->neigh_sub_iter) {
3103                                 void *v = state->neigh_sub_iter(state, n, pos);
3104                                 if (v)
3105                                         return n;
3106                                 goto next;
3107                         }
3108                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3109                                 break;
3110
3111                         if (n->nud_state & ~NUD_NOARP)
3112                                 break;
3113 next:
3114                         n = rcu_dereference_bh(n->next);
3115                 }
3116
3117                 if (n)
3118                         break;
3119
3120                 if (++state->bucket >= (1 << nht->hash_shift))
3121                         break;
3122
3123                 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3124         }
3125
3126         if (n && pos)
3127                 --(*pos);
3128         return n;
3129 }
3130
3131 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3132 {
3133         struct neighbour *n = neigh_get_first(seq);
3134
3135         if (n) {
3136                 --(*pos);
3137                 while (*pos) {
3138                         n = neigh_get_next(seq, n, pos);
3139                         if (!n)
3140                                 break;
3141                 }
3142         }
3143         return *pos ? NULL : n;
3144 }
3145
3146 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3147 {
3148         struct neigh_seq_state *state = seq->private;
3149         struct net *net = seq_file_net(seq);
3150         struct neigh_table *tbl = state->tbl;
3151         struct pneigh_entry *pn = NULL;
3152         int bucket;
3153
3154         state->flags |= NEIGH_SEQ_IS_PNEIGH;
3155         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3156                 pn = tbl->phash_buckets[bucket];
3157                 while (pn && !net_eq(pneigh_net(pn), net))
3158                         pn = pn->next;
3159                 if (pn)
3160                         break;
3161         }
3162         state->bucket = bucket;
3163
3164         return pn;
3165 }
3166
3167 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3168                                             struct pneigh_entry *pn,
3169                                             loff_t *pos)
3170 {
3171         struct neigh_seq_state *state = seq->private;
3172         struct net *net = seq_file_net(seq);
3173         struct neigh_table *tbl = state->tbl;
3174
3175         do {
3176                 pn = pn->next;
3177         } while (pn && !net_eq(pneigh_net(pn), net));
3178
3179         while (!pn) {
3180                 if (++state->bucket > PNEIGH_HASHMASK)
3181                         break;
3182                 pn = tbl->phash_buckets[state->bucket];
3183                 while (pn && !net_eq(pneigh_net(pn), net))
3184                         pn = pn->next;
3185                 if (pn)
3186                         break;
3187         }
3188
3189         if (pn && pos)
3190                 --(*pos);
3191
3192         return pn;
3193 }
3194
3195 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3196 {
3197         struct pneigh_entry *pn = pneigh_get_first(seq);
3198
3199         if (pn) {
3200                 --(*pos);
3201                 while (*pos) {
3202                         pn = pneigh_get_next(seq, pn, pos);
3203                         if (!pn)
3204                                 break;
3205                 }
3206         }
3207         return *pos ? NULL : pn;
3208 }
3209
3210 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3211 {
3212         struct neigh_seq_state *state = seq->private;
3213         void *rc;
3214         loff_t idxpos = *pos;
3215
3216         rc = neigh_get_idx(seq, &idxpos);
3217         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3218                 rc = pneigh_get_idx(seq, &idxpos);
3219
3220         return rc;
3221 }
3222
3223 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3224         __acquires(tbl->lock)
3225         __acquires(rcu_bh)
3226 {
3227         struct neigh_seq_state *state = seq->private;
3228
3229         state->tbl = tbl;
3230         state->bucket = 0;
3231         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3232
3233         rcu_read_lock_bh();
3234         state->nht = rcu_dereference_bh(tbl->nht);
3235         read_lock(&tbl->lock);
3236
3237         return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3238 }
3239 EXPORT_SYMBOL(neigh_seq_start);
3240
3241 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3242 {
3243         struct neigh_seq_state *state;
3244         void *rc;
3245
3246         if (v == SEQ_START_TOKEN) {
3247                 rc = neigh_get_first(seq);
3248                 goto out;
3249         }
3250
3251         state = seq->private;
3252         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3253                 rc = neigh_get_next(seq, v, NULL);
3254                 if (rc)
3255                         goto out;
3256                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3257                         rc = pneigh_get_first(seq);
3258         } else {
3259                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3260                 rc = pneigh_get_next(seq, v, NULL);
3261         }
3262 out:
3263         ++(*pos);
3264         return rc;
3265 }
3266 EXPORT_SYMBOL(neigh_seq_next);
3267
3268 void neigh_seq_stop(struct seq_file *seq, void *v)
3269         __releases(tbl->lock)
3270         __releases(rcu_bh)
3271 {
3272         struct neigh_seq_state *state = seq->private;
3273         struct neigh_table *tbl = state->tbl;
3274
3275         read_unlock(&tbl->lock);
3276         rcu_read_unlock_bh();
3277 }
3278 EXPORT_SYMBOL(neigh_seq_stop);
3279
3280 /* statistics via seq_file */
3281
3282 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3283 {
3284         struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3285         int cpu;
3286
3287         if (*pos == 0)
3288                 return SEQ_START_TOKEN;
3289
3290         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3291                 if (!cpu_possible(cpu))
3292                         continue;
3293                 *pos = cpu+1;
3294                 return per_cpu_ptr(tbl->stats, cpu);
3295         }
3296         return NULL;
3297 }
3298
3299 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3300 {
3301         struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3302         int cpu;
3303
3304         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3305                 if (!cpu_possible(cpu))
3306                         continue;
3307                 *pos = cpu+1;
3308                 return per_cpu_ptr(tbl->stats, cpu);
3309         }
3310         (*pos)++;
3311         return NULL;
3312 }
3313
3314 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3315 {
3316
3317 }
3318
3319 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3320 {
3321         struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3322         struct neigh_statistics *st = v;
3323
3324         if (v == SEQ_START_TOKEN) {
3325                 seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3326                 return 0;
3327         }
3328
3329         seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3330                         "%08lx         %08lx         %08lx         "
3331                         "%08lx       %08lx            %08lx\n",
3332                    atomic_read(&tbl->entries),
3333
3334                    st->allocs,
3335                    st->destroys,
3336                    st->hash_grows,
3337
3338                    st->lookups,
3339                    st->hits,
3340
3341                    st->res_failed,
3342
3343                    st->rcv_probes_mcast,
3344                    st->rcv_probes_ucast,
3345
3346                    st->periodic_gc_runs,
3347                    st->forced_gc_runs,
3348                    st->unres_discards,
3349                    st->table_fulls
3350                    );
3351
3352         return 0;
3353 }
3354
3355 static const struct seq_operations neigh_stat_seq_ops = {
3356         .start  = neigh_stat_seq_start,
3357         .next   = neigh_stat_seq_next,
3358         .stop   = neigh_stat_seq_stop,
3359         .show   = neigh_stat_seq_show,
3360 };
3361 #endif /* CONFIG_PROC_FS */
3362
3363 static void __neigh_notify(struct neighbour *n, int type, int flags,
3364                            u32 pid)
3365 {
3366         struct net *net = dev_net(n->dev);
3367         struct sk_buff *skb;
3368         int err = -ENOBUFS;
3369
3370         skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3371         if (skb == NULL)
3372                 goto errout;
3373
3374         err = neigh_fill_info(skb, n, pid, 0, type, flags);
3375         if (err < 0) {
3376                 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3377                 WARN_ON(err == -EMSGSIZE);
3378                 kfree_skb(skb);
3379                 goto errout;
3380         }
3381         rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3382         return;
3383 errout:
3384         if (err < 0)
3385                 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3386 }
3387
3388 void neigh_app_ns(struct neighbour *n)
3389 {
3390         __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3391 }
3392 EXPORT_SYMBOL(neigh_app_ns);
3393
3394 #ifdef CONFIG_SYSCTL
3395 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3396
3397 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3398                            void *buffer, size_t *lenp, loff_t *ppos)
3399 {
3400         int size, ret;
3401         struct ctl_table tmp = *ctl;
3402
3403         tmp.extra1 = SYSCTL_ZERO;
3404         tmp.extra2 = &unres_qlen_max;
3405         tmp.data = &size;
3406
3407         size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3408         ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3409
3410         if (write && !ret)
3411                 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3412         return ret;
3413 }
3414
3415 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3416                                                    int family)
3417 {
3418         switch (family) {
3419         case AF_INET:
3420                 return __in_dev_arp_parms_get_rcu(dev);
3421         case AF_INET6:
3422                 return __in6_dev_nd_parms_get_rcu(dev);
3423         }
3424         return NULL;
3425 }
3426
3427 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3428                                   int index)
3429 {
3430         struct net_device *dev;
3431         int family = neigh_parms_family(p);
3432
3433         rcu_read_lock();
3434         for_each_netdev_rcu(net, dev) {
3435                 struct neigh_parms *dst_p =
3436                                 neigh_get_dev_parms_rcu(dev, family);
3437
3438                 if (dst_p && !test_bit(index, dst_p->data_state))
3439                         dst_p->data[index] = p->data[index];
3440         }
3441         rcu_read_unlock();
3442 }
3443
3444 static void neigh_proc_update(struct ctl_table *ctl, int write)
3445 {
3446         struct net_device *dev = ctl->extra1;
3447         struct neigh_parms *p = ctl->extra2;
3448         struct net *net = neigh_parms_net(p);
3449         int index = (int *) ctl->data - p->data;
3450
3451         if (!write)
3452                 return;
3453
3454         set_bit(index, p->data_state);
3455         if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3456                 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3457         if (!dev) /* NULL dev means this is default value */
3458                 neigh_copy_dflt_parms(net, p, index);
3459 }
3460
3461 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3462                                            void *buffer, size_t *lenp,
3463                                            loff_t *ppos)
3464 {
3465         struct ctl_table tmp = *ctl;
3466         int ret;
3467
3468         tmp.extra1 = SYSCTL_ZERO;
3469         tmp.extra2 = SYSCTL_INT_MAX;
3470
3471         ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3472         neigh_proc_update(ctl, write);
3473         return ret;
3474 }
3475
3476 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3477                         size_t *lenp, loff_t *ppos)
3478 {
3479         int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3480
3481         neigh_proc_update(ctl, write);
3482         return ret;
3483 }
3484 EXPORT_SYMBOL(neigh_proc_dointvec);
3485
3486 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3487                                 size_t *lenp, loff_t *ppos)
3488 {
3489         int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3490
3491         neigh_proc_update(ctl, write);
3492         return ret;
3493 }
3494 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3495
3496 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3497                                               void *buffer, size_t *lenp,
3498                                               loff_t *ppos)
3499 {
3500         int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3501
3502         neigh_proc_update(ctl, write);
3503         return ret;
3504 }
3505
3506 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3507                                    void *buffer, size_t *lenp, loff_t *ppos)
3508 {
3509         int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3510
3511         neigh_proc_update(ctl, write);
3512         return ret;
3513 }
3514 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3515
3516 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3517                                           void *buffer, size_t *lenp,
3518                                           loff_t *ppos)
3519 {
3520         int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3521
3522         neigh_proc_update(ctl, write);
3523         return ret;
3524 }
3525
3526 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3527                                           void *buffer, size_t *lenp,
3528                                           loff_t *ppos)
3529 {
3530         struct neigh_parms *p = ctl->extra2;
3531         int ret;
3532
3533         if (strcmp(ctl->procname, "base_reachable_time") == 0)
3534                 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3535         else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3536                 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3537         else
3538                 ret = -1;
3539
3540         if (write && ret == 0) {
3541                 /* update reachable_time as well, otherwise, the change will
3542                  * only be effective after the next time neigh_periodic_work
3543                  * decides to recompute it
3544                  */
3545                 p->reachable_time =
3546                         neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3547         }
3548         return ret;
3549 }
3550
3551 #define NEIGH_PARMS_DATA_OFFSET(index)  \
3552         (&((struct neigh_parms *) 0)->data[index])
3553
3554 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3555         [NEIGH_VAR_ ## attr] = { \
3556                 .procname       = name, \
3557                 .data           = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3558                 .maxlen         = sizeof(int), \
3559                 .mode           = mval, \
3560                 .proc_handler   = proc, \
3561         }
3562
3563 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3564         NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3565
3566 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3567         NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3568
3569 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3570         NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3571
3572 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3573         NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3574
3575 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3576         NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3577
3578 static struct neigh_sysctl_table {
3579         struct ctl_table_header *sysctl_header;
3580         struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3581 } neigh_sysctl_template __read_mostly = {
3582         .neigh_vars = {
3583                 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3584                 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3585                 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3586                 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3587                 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3588                 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3589                 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3590                 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3591                 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3592                 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3593                 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3594                 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3595                 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3596                 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3597                 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3598                 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3599                 [NEIGH_VAR_GC_INTERVAL] = {
3600                         .procname       = "gc_interval",
3601                         .maxlen         = sizeof(int),
3602                         .mode           = 0644,
3603                         .proc_handler   = proc_dointvec_jiffies,
3604                 },
3605                 [NEIGH_VAR_GC_THRESH1] = {
3606                         .procname       = "gc_thresh1",
3607                         .maxlen         = sizeof(int),
3608                         .mode           = 0644,
3609                         .extra1         = SYSCTL_ZERO,
3610                         .extra2         = SYSCTL_INT_MAX,
3611                         .proc_handler   = proc_dointvec_minmax,
3612                 },
3613                 [NEIGH_VAR_GC_THRESH2] = {
3614                         .procname       = "gc_thresh2",
3615                         .maxlen         = sizeof(int),
3616                         .mode           = 0644,
3617                         .extra1         = SYSCTL_ZERO,
3618                         .extra2         = SYSCTL_INT_MAX,
3619                         .proc_handler   = proc_dointvec_minmax,
3620                 },
3621                 [NEIGH_VAR_GC_THRESH3] = {
3622                         .procname       = "gc_thresh3",
3623                         .maxlen         = sizeof(int),
3624                         .mode           = 0644,
3625                         .extra1         = SYSCTL_ZERO,
3626                         .extra2         = SYSCTL_INT_MAX,
3627                         .proc_handler   = proc_dointvec_minmax,
3628                 },
3629                 {},
3630         },
3631 };
3632
3633 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3634                           proc_handler *handler)
3635 {
3636         int i;
3637         struct neigh_sysctl_table *t;
3638         const char *dev_name_source;
3639         char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3640         char *p_name;
3641
3642         t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3643         if (!t)
3644                 goto err;
3645
3646         for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3647                 t->neigh_vars[i].data += (long) p;
3648                 t->neigh_vars[i].extra1 = dev;
3649                 t->neigh_vars[i].extra2 = p;
3650         }
3651
3652         if (dev) {
3653                 dev_name_source = dev->name;
3654                 /* Terminate the table early */
3655                 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3656                        sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3657         } else {
3658                 struct neigh_table *tbl = p->tbl;
3659                 dev_name_source = "default";
3660                 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3661                 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3662                 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3663                 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3664         }
3665
3666         if (handler) {
3667                 /* RetransTime */
3668                 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3669                 /* ReachableTime */
3670                 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3671                 /* RetransTime (in milliseconds)*/
3672                 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3673                 /* ReachableTime (in milliseconds) */
3674                 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3675         } else {
3676                 /* Those handlers will update p->reachable_time after
3677                  * base_reachable_time(_ms) is set to ensure the new timer starts being
3678                  * applied after the next neighbour update instead of waiting for
3679                  * neigh_periodic_work to update its value (can be multiple minutes)
3680                  * So any handler that replaces them should do this as well
3681                  */
3682                 /* ReachableTime */
3683                 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3684                         neigh_proc_base_reachable_time;
3685                 /* ReachableTime (in milliseconds) */
3686                 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3687                         neigh_proc_base_reachable_time;
3688         }
3689
3690         /* Don't export sysctls to unprivileged users */
3691         if (neigh_parms_net(p)->user_ns != &init_user_ns)
3692                 t->neigh_vars[0].procname = NULL;
3693
3694         switch (neigh_parms_family(p)) {
3695         case AF_INET:
3696               p_name = "ipv4";
3697               break;
3698         case AF_INET6:
3699               p_name = "ipv6";
3700               break;
3701         default:
3702               BUG();
3703         }
3704
3705         snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3706                 p_name, dev_name_source);
3707         t->sysctl_header =
3708                 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3709         if (!t->sysctl_header)
3710                 goto free;
3711
3712         p->sysctl_table = t;
3713         return 0;
3714
3715 free:
3716         kfree(t);
3717 err:
3718         return -ENOBUFS;
3719 }
3720 EXPORT_SYMBOL(neigh_sysctl_register);
3721
3722 void neigh_sysctl_unregister(struct neigh_parms *p)
3723 {
3724         if (p->sysctl_table) {
3725                 struct neigh_sysctl_table *t = p->sysctl_table;
3726                 p->sysctl_table = NULL;
3727                 unregister_net_sysctl_table(t->sysctl_header);
3728                 kfree(t);
3729         }
3730 }
3731 EXPORT_SYMBOL(neigh_sysctl_unregister);
3732
3733 #endif  /* CONFIG_SYSCTL */
3734
3735 static int __init neigh_init(void)
3736 {
3737         rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3738         rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3739         rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3740
3741         rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3742                       0);
3743         rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3744
3745         return 0;
3746 }
3747
3748 subsys_initcall(neigh_init);