1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
12 #include <net/ipv6_stubs.h>
13 #include <net/lwtunnel.h>
14 #include <net/ndisc.h>
15 #include <net/nexthop.h>
16 #include <net/route.h>
19 static void remove_nexthop(struct net *net, struct nexthop *nh,
20 struct nl_info *nlinfo);
22 #define NH_DEV_HASHBITS 8
23 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
25 static const struct nla_policy rtm_nh_policy_new[] = {
26 [NHA_ID] = { .type = NLA_U32 },
27 [NHA_GROUP] = { .type = NLA_BINARY },
28 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
29 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
30 [NHA_OIF] = { .type = NLA_U32 },
31 [NHA_GATEWAY] = { .type = NLA_BINARY },
32 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
33 [NHA_ENCAP] = { .type = NLA_NESTED },
34 [NHA_FDB] = { .type = NLA_FLAG },
37 static const struct nla_policy rtm_nh_policy_get[] = {
38 [NHA_ID] = { .type = NLA_U32 },
41 static const struct nla_policy rtm_nh_policy_dump[] = {
42 [NHA_OIF] = { .type = NLA_U32 },
43 [NHA_GROUPS] = { .type = NLA_FLAG },
44 [NHA_MASTER] = { .type = NLA_U32 },
45 [NHA_FDB] = { .type = NLA_FLAG },
48 static bool nexthop_notifiers_is_empty(struct net *net)
50 return !net->nexthop.notifier_chain.head;
54 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
55 const struct nh_info *nhi)
57 nh_info->dev = nhi->fib_nhc.nhc_dev;
58 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
59 if (nh_info->gw_family == AF_INET)
60 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
61 else if (nh_info->gw_family == AF_INET6)
62 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
64 nh_info->is_reject = nhi->reject_nh;
65 nh_info->is_fdb = nhi->fdb_nh;
66 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
69 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
70 const struct nexthop *nh)
72 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
74 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
75 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
79 __nh_notifier_single_info_init(info->nh, nhi);
84 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
89 static int nh_notifier_mp_info_init(struct nh_notifier_info *info,
92 u16 num_nh = nhg->num_nh;
95 info->type = NH_NOTIFIER_INFO_TYPE_GRP;
96 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
101 info->nh_grp->num_nh = num_nh;
102 info->nh_grp->is_fdb = nhg->fdb_nh;
104 for (i = 0; i < num_nh; i++) {
105 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
108 nhi = rtnl_dereference(nhge->nh->nh_info);
109 info->nh_grp->nh_entries[i].id = nhge->nh->id;
110 info->nh_grp->nh_entries[i].weight = nhge->weight;
111 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
118 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
119 struct nh_group *nhg)
121 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
122 u16 num_nh_buckets = res_table->num_nh_buckets;
126 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
127 size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
128 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
130 if (!info->nh_res_table)
133 info->nh_res_table->num_nh_buckets = num_nh_buckets;
135 for (i = 0; i < num_nh_buckets; i++) {
136 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
137 struct nh_grp_entry *nhge;
140 nhge = rtnl_dereference(bucket->nh_entry);
141 nhi = rtnl_dereference(nhge->nh->nh_info);
142 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
149 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
150 const struct nexthop *nh)
152 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
155 return nh_notifier_mp_info_init(info, nhg);
156 else if (nhg->resilient)
157 return nh_notifier_res_table_info_init(info, nhg);
161 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
162 const struct nexthop *nh)
164 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
168 else if (nhg->resilient)
169 vfree(info->nh_res_table);
172 static int nh_notifier_info_init(struct nh_notifier_info *info,
173 const struct nexthop *nh)
178 return nh_notifier_grp_info_init(info, nh);
180 return nh_notifier_single_info_init(info, nh);
183 static void nh_notifier_info_fini(struct nh_notifier_info *info,
184 const struct nexthop *nh)
187 nh_notifier_grp_info_fini(info, nh);
189 nh_notifier_single_info_fini(info);
192 static int call_nexthop_notifiers(struct net *net,
193 enum nexthop_event_type event_type,
195 struct netlink_ext_ack *extack)
197 struct nh_notifier_info info = {
205 if (nexthop_notifiers_is_empty(net))
208 err = nh_notifier_info_init(&info, nh);
210 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
214 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
216 nh_notifier_info_fini(&info, nh);
218 return notifier_to_errno(err);
222 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
223 bool force, unsigned int *p_idle_timer_ms)
225 struct nh_res_table *res_table;
226 struct nh_group *nhg;
230 /* When 'force' is false, nexthop bucket replacement is performed
231 * because the bucket was deemed to be idle. In this case, capable
232 * listeners can choose to perform an atomic replacement: The bucket is
233 * only replaced if it is inactive. However, if the idle timer interval
234 * is smaller than the interval in which a listener is querying
235 * buckets' activity from the device, then atomic replacement should
236 * not be tried. Pass the idle timer value to listeners, so that they
237 * could determine which type of replacement to perform.
240 *p_idle_timer_ms = 0;
246 nh = nexthop_find_by_id(info->net, info->id);
252 nhg = rcu_dereference(nh->nh_grp);
253 res_table = rcu_dereference(nhg->res_table);
254 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
262 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
263 u16 bucket_index, bool force,
264 struct nh_info *oldi,
265 struct nh_info *newi)
267 unsigned int idle_timer_ms;
270 err = nh_notifier_res_bucket_idle_timer_get(info, force,
275 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
276 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
278 if (!info->nh_res_bucket)
281 info->nh_res_bucket->bucket_index = bucket_index;
282 info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
283 info->nh_res_bucket->force = force;
284 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
285 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
289 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
291 kfree(info->nh_res_bucket);
294 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
295 u16 bucket_index, bool force,
296 struct nh_info *oldi,
297 struct nh_info *newi,
298 struct netlink_ext_ack *extack)
300 struct nh_notifier_info info = {
307 if (nexthop_notifiers_is_empty(net))
310 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
315 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
316 NEXTHOP_EVENT_BUCKET_REPLACE, &info);
317 nh_notifier_res_bucket_info_fini(&info);
319 return notifier_to_errno(err);
322 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
324 * 1) a collection of callbacks for NH maintenance. This operates under
326 * 2) the delayed work that gradually balances the resilient table,
327 * 3) and nexthop_select_path(), operating under RCU.
329 * Both the delayed work and the RTNL block are writers, and need to
330 * maintain mutual exclusion. Since there are only two and well-known
331 * writers for each table, the RTNL code can make sure it has exclusive
334 * - Have the DW operate without locking;
335 * - synchronously cancel the DW;
337 * - if the write was not actually a delete, call upkeep, which schedules
338 * DW again if necessary.
340 * The functions that are always called from the RTNL context use
341 * rtnl_dereference(). The functions that can also be called from the DW do
342 * a raw dereference and rely on the above mutual exclusion scheme.
344 #define nh_res_dereference(p) (rcu_dereference_raw(p))
346 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
347 u16 bucket_index, bool force,
348 struct nexthop *old_nh,
349 struct nexthop *new_nh,
350 struct netlink_ext_ack *extack)
352 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
353 struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
355 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
356 force, oldi, newi, extack);
359 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
360 struct netlink_ext_ack *extack)
362 struct nh_notifier_info info = {
366 struct nh_group *nhg;
371 if (nexthop_notifiers_is_empty(net))
374 /* At this point, the nexthop buckets are still not populated. Only
375 * emit a notification with the logical nexthops, so that a listener
376 * could potentially veto it in case of unsupported configuration.
378 nhg = rtnl_dereference(nh->nh_grp);
379 err = nh_notifier_mp_info_init(&info, nhg);
381 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
385 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
386 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
390 return notifier_to_errno(err);
393 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
394 enum nexthop_event_type event_type,
396 struct netlink_ext_ack *extack)
398 struct nh_notifier_info info = {
404 err = nh_notifier_info_init(&info, nh);
408 err = nb->notifier_call(nb, event_type, &info);
409 nh_notifier_info_fini(&info, nh);
411 return notifier_to_errno(err);
414 static unsigned int nh_dev_hashfn(unsigned int val)
416 unsigned int mask = NH_DEV_HASHSIZE - 1;
419 (val >> NH_DEV_HASHBITS) ^
420 (val >> (NH_DEV_HASHBITS * 2))) & mask;
423 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
425 struct net_device *dev = nhi->fib_nhc.nhc_dev;
426 struct hlist_head *head;
431 hash = nh_dev_hashfn(dev->ifindex);
432 head = &net->nexthop.devhash[hash];
433 hlist_add_head(&nhi->dev_hash, head);
436 static void nexthop_free_group(struct nexthop *nh)
438 struct nh_group *nhg;
441 nhg = rcu_dereference_raw(nh->nh_grp);
442 for (i = 0; i < nhg->num_nh; ++i) {
443 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
445 WARN_ON(!list_empty(&nhge->nh_list));
446 nexthop_put(nhge->nh);
449 WARN_ON(nhg->spare == nhg);
452 vfree(rcu_dereference_raw(nhg->res_table));
458 static void nexthop_free_single(struct nexthop *nh)
462 nhi = rcu_dereference_raw(nh->nh_info);
463 switch (nhi->family) {
465 fib_nh_release(nh->net, &nhi->fib_nh);
468 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
474 void nexthop_free_rcu(struct rcu_head *head)
476 struct nexthop *nh = container_of(head, struct nexthop, rcu);
479 nexthop_free_group(nh);
481 nexthop_free_single(nh);
485 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
487 static struct nexthop *nexthop_alloc(void)
491 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
493 INIT_LIST_HEAD(&nh->fi_list);
494 INIT_LIST_HEAD(&nh->f6i_list);
495 INIT_LIST_HEAD(&nh->grp_list);
496 INIT_LIST_HEAD(&nh->fdb_list);
501 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
503 struct nh_group *nhg;
505 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
507 nhg->num_nh = num_nh;
512 static void nh_res_table_upkeep_dw(struct work_struct *work);
514 static struct nh_res_table *
515 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
517 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
518 struct nh_res_table *res_table;
521 size = struct_size(res_table, nh_buckets, num_nh_buckets);
522 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
526 res_table->net = net;
527 res_table->nhg_id = nhg_id;
528 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
529 INIT_LIST_HEAD(&res_table->uw_nh_entries);
530 res_table->idle_timer = cfg->nh_grp_res_idle_timer;
531 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
532 res_table->num_nh_buckets = num_nh_buckets;
536 static void nh_base_seq_inc(struct net *net)
538 while (++net->nexthop.seq == 0)
542 /* no reference taken; rcu lock or rtnl must be held */
543 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
545 struct rb_node **pp, *parent = NULL, *next;
547 pp = &net->nexthop.rb_root.rb_node;
551 next = rcu_dereference_raw(*pp);
556 nh = rb_entry(parent, struct nexthop, rb_node);
559 else if (id > nh->id)
560 pp = &next->rb_right;
566 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
568 /* used for auto id allocation; called with rtnl held */
569 static u32 nh_find_unused_id(struct net *net)
571 u32 id_start = net->nexthop.last_id_allocated;
574 net->nexthop.last_id_allocated++;
575 if (net->nexthop.last_id_allocated == id_start)
578 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
579 return net->nexthop.last_id_allocated;
584 static void nh_res_time_set_deadline(unsigned long next_time,
585 unsigned long *deadline)
587 if (time_before(next_time, *deadline))
588 *deadline = next_time;
591 static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
593 struct nexthop_grp *p;
594 size_t len = nhg->num_nh * sizeof(*p);
600 group_type = NEXTHOP_GRP_TYPE_MPATH;
602 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
603 goto nla_put_failure;
605 nla = nla_reserve(skb, NHA_GROUP, len);
607 goto nla_put_failure;
610 for (i = 0; i < nhg->num_nh; ++i) {
611 p->id = nhg->nh_entries[i].nh->id;
612 p->weight = nhg->nh_entries[i].weight - 1;
622 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
623 int event, u32 portid, u32 seq, unsigned int nlflags)
625 struct fib6_nh *fib6_nh;
626 struct fib_nh *fib_nh;
627 struct nlmsghdr *nlh;
631 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
635 nhm = nlmsg_data(nlh);
636 nhm->nh_family = AF_UNSPEC;
637 nhm->nh_flags = nh->nh_flags;
638 nhm->nh_protocol = nh->protocol;
642 if (nla_put_u32(skb, NHA_ID, nh->id))
643 goto nla_put_failure;
646 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
648 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
649 goto nla_put_failure;
650 if (nla_put_nh_group(skb, nhg))
651 goto nla_put_failure;
655 nhi = rtnl_dereference(nh->nh_info);
656 nhm->nh_family = nhi->family;
657 if (nhi->reject_nh) {
658 if (nla_put_flag(skb, NHA_BLACKHOLE))
659 goto nla_put_failure;
661 } else if (nhi->fdb_nh) {
662 if (nla_put_flag(skb, NHA_FDB))
663 goto nla_put_failure;
665 const struct net_device *dev;
667 dev = nhi->fib_nhc.nhc_dev;
668 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
669 goto nla_put_failure;
672 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
673 switch (nhi->family) {
675 fib_nh = &nhi->fib_nh;
676 if (fib_nh->fib_nh_gw_family &&
677 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
678 goto nla_put_failure;
682 fib6_nh = &nhi->fib6_nh;
683 if (fib6_nh->fib_nh_gw_family &&
684 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
685 goto nla_put_failure;
689 if (nhi->fib_nhc.nhc_lwtstate &&
690 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
691 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
692 goto nla_put_failure;
699 nlmsg_cancel(skb, nlh);
703 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
705 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
706 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
708 return nla_total_size(sz) +
709 nla_total_size(2); /* NHA_GROUP_TYPE */
712 static size_t nh_nlmsg_size_single(struct nexthop *nh)
714 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
717 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
718 * are mutually exclusive
720 sz = nla_total_size(4); /* NHA_OIF */
722 switch (nhi->family) {
724 if (nhi->fib_nh.fib_nh_gw_family)
725 sz += nla_total_size(4); /* NHA_GATEWAY */
730 if (nhi->fib6_nh.fib_nh_gw_family)
731 sz += nla_total_size(sizeof(const struct in6_addr));
735 if (nhi->fib_nhc.nhc_lwtstate) {
736 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
737 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
743 static size_t nh_nlmsg_size(struct nexthop *nh)
745 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
747 sz += nla_total_size(4); /* NHA_ID */
750 sz += nh_nlmsg_size_grp(nh);
752 sz += nh_nlmsg_size_single(nh);
757 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
759 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
760 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
764 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
768 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
770 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
771 WARN_ON(err == -EMSGSIZE);
776 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
777 info->nlh, gfp_any());
781 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
784 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
786 return (unsigned long)atomic_long_read(&bucket->used_time);
790 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
791 const struct nh_res_bucket *bucket,
794 unsigned long time = nh_res_bucket_used_time(bucket);
796 /* Bucket was not used since it was migrated. The idle time is now. */
797 if (time == bucket->migrated_time)
800 return time + res_table->idle_timer;
804 nh_res_table_unb_point(const struct nh_res_table *res_table)
806 return res_table->unbalanced_since + res_table->unbalanced_timer;
809 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
810 struct nh_res_bucket *bucket)
812 unsigned long now = jiffies;
814 atomic_long_set(&bucket->used_time, (long)now);
815 bucket->migrated_time = now;
818 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
820 atomic_long_set(&bucket->used_time, (long)jiffies);
823 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
824 bool *is_fdb, struct netlink_ext_ack *extack)
827 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
829 /* Nesting groups within groups is not supported. */
831 NL_SET_ERR_MSG(extack,
832 "Multipath group can not be a nexthop within a group");
835 if (nhg->resilient) {
836 NL_SET_ERR_MSG(extack,
837 "Resilient group can not be a nexthop within a group");
840 *is_fdb = nhg->fdb_nh;
842 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
844 if (nhi->reject_nh && npaths > 1) {
845 NL_SET_ERR_MSG(extack,
846 "Blackhole nexthop can not be used in a group with more than 1 path");
849 *is_fdb = nhi->fdb_nh;
855 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
856 struct netlink_ext_ack *extack)
860 nhi = rtnl_dereference(nh->nh_info);
863 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
867 if (*nh_family == AF_UNSPEC) {
868 *nh_family = nhi->family;
869 } else if (*nh_family != nhi->family) {
870 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
877 static int nh_check_attr_group(struct net *net,
878 struct nlattr *tb[], size_t tb_size,
879 struct netlink_ext_ack *extack)
881 unsigned int len = nla_len(tb[NHA_GROUP]);
882 u8 nh_family = AF_UNSPEC;
883 struct nexthop_grp *nhg;
887 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
888 NL_SET_ERR_MSG(extack,
889 "Invalid length for nexthop group attribute");
893 /* convert len to number of nexthop ids */
896 nhg = nla_data(tb[NHA_GROUP]);
897 for (i = 0; i < len; ++i) {
898 if (nhg[i].resvd1 || nhg[i].resvd2) {
899 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
902 if (nhg[i].weight > 254) {
903 NL_SET_ERR_MSG(extack, "Invalid value for weight");
906 for (j = i + 1; j < len; ++j) {
907 if (nhg[i].id == nhg[j].id) {
908 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
916 nhg = nla_data(tb[NHA_GROUP]);
917 for (i = 0; i < len; ++i) {
921 nh = nexthop_find_by_id(net, nhg[i].id);
923 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
926 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
929 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
932 if (!nhg_fdb && is_fdb_nh) {
933 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
937 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
942 NL_SET_ERR_MSG(extack,
943 "No other attributes can be set in nexthop groups");
950 static bool ipv6_good_nh(const struct fib6_nh *nh)
952 int state = NUD_REACHABLE;
957 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
959 state = n->nud_state;
961 rcu_read_unlock_bh();
963 return !!(state & NUD_VALID);
966 static bool ipv4_good_nh(const struct fib_nh *nh)
968 int state = NUD_REACHABLE;
973 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
974 (__force u32)nh->fib_nh_gw4);
976 state = n->nud_state;
978 rcu_read_unlock_bh();
980 return !!(state & NUD_VALID);
983 static struct nexthop *nexthop_select_path_mp(struct nh_group *nhg, int hash)
985 struct nexthop *rc = NULL;
988 for (i = 0; i < nhg->num_nh; ++i) {
989 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
992 if (hash > atomic_read(&nhge->mpath.upper_bound))
995 nhi = rcu_dereference(nhge->nh->nh_info);
999 /* nexthops always check if it is good and does
1000 * not rely on a sysctl for this behavior
1002 switch (nhi->family) {
1004 if (ipv4_good_nh(&nhi->fib_nh))
1008 if (ipv6_good_nh(&nhi->fib6_nh))
1020 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1022 struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1023 u16 bucket_index = hash % res_table->num_nh_buckets;
1024 struct nh_res_bucket *bucket;
1025 struct nh_grp_entry *nhge;
1027 /* nexthop_select_path() is expected to return a non-NULL value, so
1028 * skip protocol validation and just hand out whatever there is.
1030 bucket = &res_table->nh_buckets[bucket_index];
1031 nh_res_bucket_set_busy(bucket);
1032 nhge = rcu_dereference(bucket->nh_entry);
1036 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1038 struct nh_group *nhg;
1043 nhg = rcu_dereference(nh->nh_grp);
1045 return nexthop_select_path_mp(nhg, hash);
1046 else if (nhg->resilient)
1047 return nexthop_select_path_res(nhg, hash);
1052 EXPORT_SYMBOL_GPL(nexthop_select_path);
1054 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1055 int (*cb)(struct fib6_nh *nh, void *arg),
1058 struct nh_info *nhi;
1062 struct nh_group *nhg;
1065 nhg = rcu_dereference_rtnl(nh->nh_grp);
1066 for (i = 0; i < nhg->num_nh; i++) {
1067 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1069 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1070 err = cb(&nhi->fib6_nh, arg);
1075 nhi = rcu_dereference_rtnl(nh->nh_info);
1076 err = cb(&nhi->fib6_nh, arg);
1083 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1085 static int check_src_addr(const struct in6_addr *saddr,
1086 struct netlink_ext_ack *extack)
1088 if (!ipv6_addr_any(saddr)) {
1089 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1095 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1096 struct netlink_ext_ack *extack)
1098 struct nh_info *nhi;
1101 /* fib6_src is unique to a fib6_info and limits the ability to cache
1102 * routes in fib6_nh within a nexthop that is potentially shared
1103 * across multiple fib entries. If the config wants to use source
1104 * routing it can not use nexthop objects. mlxsw also does not allow
1105 * fib6_src on routes.
1107 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1111 struct nh_group *nhg;
1113 nhg = rtnl_dereference(nh->nh_grp);
1116 is_fdb_nh = nhg->fdb_nh;
1118 nhi = rtnl_dereference(nh->nh_info);
1119 if (nhi->family == AF_INET)
1121 is_fdb_nh = nhi->fdb_nh;
1125 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1131 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1134 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1136 /* if existing nexthop has ipv6 routes linked to it, need
1137 * to verify this new spec works with ipv6
1139 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1140 struct netlink_ext_ack *extack)
1142 struct fib6_info *f6i;
1144 if (list_empty(&old->f6i_list))
1147 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1148 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1152 return fib6_check_nexthop(new, NULL, extack);
1155 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1156 struct netlink_ext_ack *extack)
1158 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1159 NL_SET_ERR_MSG(extack,
1160 "Route with host scope can not have a gateway");
1164 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1165 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1172 /* Invoked by fib add code to verify nexthop by id is ok with
1173 * config for prefix; parts of fib_check_nh not done when nexthop
1176 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1177 struct netlink_ext_ack *extack)
1179 struct nh_info *nhi;
1183 struct nh_group *nhg;
1185 nhg = rtnl_dereference(nh->nh_grp);
1187 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1192 if (scope == RT_SCOPE_HOST) {
1193 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1198 /* all nexthops in a group have the same scope */
1199 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1200 err = nexthop_check_scope(nhi, scope, extack);
1202 nhi = rtnl_dereference(nh->nh_info);
1204 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1208 err = nexthop_check_scope(nhi, scope, extack);
1215 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1216 struct netlink_ext_ack *extack)
1218 struct fib_info *fi;
1220 list_for_each_entry(fi, &old->fi_list, nh_list) {
1223 err = fib_check_nexthop(new, fi->fib_scope, extack);
1230 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1232 return nhge->res.count_buckets == nhge->res.wants_buckets;
1235 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1237 return nhge->res.count_buckets > nhge->res.wants_buckets;
1240 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1242 return nhge->res.count_buckets < nhge->res.wants_buckets;
1245 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1247 return list_empty(&res_table->uw_nh_entries);
1250 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1252 struct nh_grp_entry *nhge;
1254 if (bucket->occupied) {
1255 nhge = nh_res_dereference(bucket->nh_entry);
1256 nhge->res.count_buckets--;
1257 bucket->occupied = false;
1261 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1262 struct nh_grp_entry *nhge)
1264 nh_res_bucket_unset_nh(bucket);
1266 bucket->occupied = true;
1267 rcu_assign_pointer(bucket->nh_entry, nhge);
1268 nhge->res.count_buckets++;
1271 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1272 struct nh_res_bucket *bucket,
1273 unsigned long *deadline, bool *force)
1275 unsigned long now = jiffies;
1276 struct nh_grp_entry *nhge;
1277 unsigned long idle_point;
1279 if (!bucket->occupied) {
1280 /* The bucket is not occupied, its NHGE pointer is either
1281 * NULL or obsolete. We _have to_ migrate: set force.
1287 nhge = nh_res_dereference(bucket->nh_entry);
1289 /* If the bucket is populated by an underweight or balanced
1290 * nexthop, do not migrate.
1292 if (!nh_res_nhge_is_ow(nhge))
1295 /* At this point we know that the bucket is populated with an
1296 * overweight nexthop. It needs to be migrated to a new nexthop if
1297 * the idle timer of unbalanced timer expired.
1300 idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1301 if (time_after_eq(now, idle_point)) {
1302 /* The bucket is idle. We _can_ migrate: unset force. */
1307 /* Unbalanced timer of 0 means "never force". */
1308 if (res_table->unbalanced_timer) {
1309 unsigned long unb_point;
1311 unb_point = nh_res_table_unb_point(res_table);
1312 if (time_after(now, unb_point)) {
1313 /* The bucket is not idle, but the unbalanced timer
1314 * expired. We _can_ migrate, but set force anyway,
1315 * so that drivers know to ignore activity reports
1322 nh_res_time_set_deadline(unb_point, deadline);
1325 nh_res_time_set_deadline(idle_point, deadline);
1329 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1330 u16 bucket_index, bool notify, bool force)
1332 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1333 struct nh_grp_entry *new_nhge;
1334 struct netlink_ext_ack extack;
1337 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1338 struct nh_grp_entry,
1340 if (WARN_ON_ONCE(!new_nhge))
1341 /* If this function is called, "bucket" is either not
1342 * occupied, or it belongs to a next hop that is
1343 * overweight. In either case, there ought to be a
1344 * corresponding underweight next hop.
1349 struct nh_grp_entry *old_nhge;
1351 old_nhge = nh_res_dereference(bucket->nh_entry);
1352 err = call_nexthop_res_bucket_notifiers(res_table->net,
1354 bucket_index, force,
1356 new_nhge->nh, &extack);
1358 pr_err_ratelimited("%s\n", extack._msg);
1361 /* It is not possible to veto a forced replacement, so
1362 * just clear the hardware flags from the nexthop
1363 * bucket to indicate to user space that this bucket is
1364 * not correctly populated in hardware.
1366 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1370 nh_res_bucket_set_nh(bucket, new_nhge);
1371 nh_res_bucket_set_idle(res_table, bucket);
1373 if (nh_res_nhge_is_balanced(new_nhge))
1374 list_del(&new_nhge->res.uw_nh_entry);
1378 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1380 static void nh_res_table_upkeep(struct nh_res_table *res_table, bool notify)
1382 unsigned long now = jiffies;
1383 unsigned long deadline;
1386 /* Deadline is the next time that upkeep should be run. It is the
1387 * earliest time at which one of the buckets might be migrated.
1388 * Start at the most pessimistic estimate: either unbalanced_timer
1389 * from now, or if there is none, idle_timer from now. For each
1390 * encountered time point, call nh_res_time_set_deadline() to
1391 * refine the estimate.
1393 if (res_table->unbalanced_timer)
1394 deadline = now + res_table->unbalanced_timer;
1396 deadline = now + res_table->idle_timer;
1398 for (i = 0; i < res_table->num_nh_buckets; i++) {
1399 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1402 if (nh_res_bucket_should_migrate(res_table, bucket,
1403 &deadline, &force)) {
1404 if (!nh_res_bucket_migrate(res_table, i, notify,
1406 unsigned long idle_point;
1408 /* A driver can override the migration
1409 * decision if the HW reports that the
1410 * bucket is actually not idle. Therefore
1411 * remark the bucket as busy again and
1412 * update the deadline.
1414 nh_res_bucket_set_busy(bucket);
1415 idle_point = nh_res_bucket_idle_point(res_table,
1418 nh_res_time_set_deadline(idle_point, &deadline);
1423 /* If the group is still unbalanced, schedule the next upkeep to
1424 * either the deadline computed above, or the minimum deadline,
1425 * whichever comes later.
1427 if (!nh_res_table_is_balanced(res_table)) {
1428 unsigned long now = jiffies;
1429 unsigned long min_deadline;
1431 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1432 if (time_before(deadline, min_deadline))
1433 deadline = min_deadline;
1435 queue_delayed_work(system_power_efficient_wq,
1436 &res_table->upkeep_dw, deadline - now);
1440 static void nh_res_table_upkeep_dw(struct work_struct *work)
1442 struct delayed_work *dw = to_delayed_work(work);
1443 struct nh_res_table *res_table;
1445 res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1446 nh_res_table_upkeep(res_table, true);
1449 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1451 cancel_delayed_work_sync(&res_table->upkeep_dw);
1454 static void nh_res_group_rebalance(struct nh_group *nhg,
1455 struct nh_res_table *res_table)
1457 int prev_upper_bound = 0;
1462 INIT_LIST_HEAD(&res_table->uw_nh_entries);
1464 for (i = 0; i < nhg->num_nh; ++i)
1465 total += nhg->nh_entries[i].weight;
1467 for (i = 0; i < nhg->num_nh; ++i) {
1468 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1472 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1474 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1475 prev_upper_bound = upper_bound;
1477 if (nh_res_nhge_is_uw(nhge)) {
1478 if (list_empty(&res_table->uw_nh_entries))
1479 res_table->unbalanced_since = jiffies;
1480 list_add(&nhge->res.uw_nh_entry,
1481 &res_table->uw_nh_entries);
1486 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1487 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1488 * entry in NHG as not occupied.
1490 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1491 struct nh_group *nhg)
1495 for (i = 0; i < res_table->num_nh_buckets; i++) {
1496 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1497 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1501 for (j = 0; j < nhg->num_nh; j++) {
1502 struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1504 if (nhge->nh->id == id) {
1505 nh_res_bucket_set_nh(bucket, nhge);
1512 nh_res_bucket_unset_nh(bucket);
1516 static void replace_nexthop_grp_res(struct nh_group *oldg,
1517 struct nh_group *newg)
1519 /* For NH group replacement, the new NHG might only have a stub
1520 * hash table with 0 buckets, because the number of buckets was not
1521 * specified. For NH removal, oldg and newg both reference the same
1522 * res_table. So in any case, in the following, we want to work
1523 * with oldg->res_table.
1525 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1526 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1527 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1529 nh_res_table_cancel_upkeep(old_res_table);
1530 nh_res_table_migrate_buckets(old_res_table, newg);
1531 nh_res_group_rebalance(newg, old_res_table);
1532 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1533 old_res_table->unbalanced_since = prev_unbalanced_since;
1534 nh_res_table_upkeep(old_res_table, true);
1537 static void nh_mp_group_rebalance(struct nh_group *nhg)
1543 for (i = 0; i < nhg->num_nh; ++i)
1544 total += nhg->nh_entries[i].weight;
1546 for (i = 0; i < nhg->num_nh; ++i) {
1547 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1551 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1552 atomic_set(&nhge->mpath.upper_bound, upper_bound);
1556 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1557 struct nl_info *nlinfo)
1559 struct nh_grp_entry *nhges, *new_nhges;
1560 struct nexthop *nhp = nhge->nh_parent;
1561 struct netlink_ext_ack extack;
1562 struct nexthop *nh = nhge->nh;
1563 struct nh_group *nhg, *newg;
1568 nhg = rtnl_dereference(nhp->nh_grp);
1571 /* last entry, keep it visible and remove the parent */
1572 if (nhg->num_nh == 1) {
1573 remove_nexthop(net, nhp, nlinfo);
1577 newg->has_v4 = false;
1578 newg->is_multipath = nhg->is_multipath;
1579 newg->mpath = nhg->mpath;
1580 newg->resilient = nhg->resilient;
1581 newg->fdb_nh = nhg->fdb_nh;
1582 newg->num_nh = nhg->num_nh;
1584 /* copy old entries to new except the one getting removed */
1585 nhges = nhg->nh_entries;
1586 new_nhges = newg->nh_entries;
1587 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
1588 struct nh_info *nhi;
1590 /* current nexthop getting removed */
1591 if (nhg->nh_entries[i].nh == nh) {
1596 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1597 if (nhi->family == AF_INET)
1598 newg->has_v4 = true;
1600 list_del(&nhges[i].nh_list);
1601 new_nhges[j].nh_parent = nhges[i].nh_parent;
1602 new_nhges[j].nh = nhges[i].nh;
1603 new_nhges[j].weight = nhges[i].weight;
1604 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
1609 nh_mp_group_rebalance(newg);
1610 else if (newg->resilient)
1611 replace_nexthop_grp_res(nhg, newg);
1613 rcu_assign_pointer(nhp->nh_grp, newg);
1615 list_del(&nhge->nh_list);
1616 nexthop_put(nhge->nh);
1618 /* Removal of a NH from a resilient group is notified through
1619 * bucket notifications.
1622 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
1625 pr_err("%s\n", extack._msg);
1629 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
1632 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
1633 struct nl_info *nlinfo)
1635 struct nh_grp_entry *nhge, *tmp;
1637 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
1638 remove_nh_grp_entry(net, nhge, nlinfo);
1640 /* make sure all see the newly published array before releasing rtnl */
1644 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
1646 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
1647 struct nh_res_table *res_table;
1648 int i, num_nh = nhg->num_nh;
1650 for (i = 0; i < num_nh; ++i) {
1651 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1653 if (WARN_ON(!nhge->nh))
1656 list_del_init(&nhge->nh_list);
1659 if (nhg->resilient) {
1660 res_table = rtnl_dereference(nhg->res_table);
1661 nh_res_table_cancel_upkeep(res_table);
1665 /* not called for nexthop replace */
1666 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
1668 struct fib6_info *f6i, *tmp;
1669 bool do_flush = false;
1670 struct fib_info *fi;
1672 list_for_each_entry(fi, &nh->fi_list, nh_list) {
1673 fi->fib_flags |= RTNH_F_DEAD;
1679 /* ip6_del_rt removes the entry from this list hence the _safe */
1680 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
1681 /* __ip6_del_rt does a release, so do a hold here */
1682 fib6_info_hold(f6i);
1683 ipv6_stub->ip6_del_rt(net, f6i,
1684 !net->ipv4.sysctl_nexthop_compat_mode);
1688 static void __remove_nexthop(struct net *net, struct nexthop *nh,
1689 struct nl_info *nlinfo)
1691 __remove_nexthop_fib(net, nh);
1694 remove_nexthop_group(nh, nlinfo);
1696 struct nh_info *nhi;
1698 nhi = rtnl_dereference(nh->nh_info);
1699 if (nhi->fib_nhc.nhc_dev)
1700 hlist_del(&nhi->dev_hash);
1702 remove_nexthop_from_groups(net, nh, nlinfo);
1706 static void remove_nexthop(struct net *net, struct nexthop *nh,
1707 struct nl_info *nlinfo)
1709 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
1711 /* remove from the tree */
1712 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
1715 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
1717 __remove_nexthop(net, nh, nlinfo);
1718 nh_base_seq_inc(net);
1723 /* if any FIB entries reference this nexthop, any dst entries
1724 * need to be regenerated
1726 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
1728 struct fib6_info *f6i;
1730 if (!list_empty(&nh->fi_list))
1731 rt_cache_flush(net);
1733 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
1734 ipv6_stub->fib6_update_sernum(net, f6i);
1737 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
1738 struct nexthop *new, const struct nh_config *cfg,
1739 struct netlink_ext_ack *extack)
1741 struct nh_res_table *tmp_table = NULL;
1742 struct nh_res_table *new_res_table;
1743 struct nh_res_table *old_res_table;
1744 struct nh_group *oldg, *newg;
1747 if (!new->is_group) {
1748 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
1752 oldg = rtnl_dereference(old->nh_grp);
1753 newg = rtnl_dereference(new->nh_grp);
1755 if (newg->mpath != oldg->mpath) {
1756 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
1761 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
1765 } else if (newg->resilient) {
1766 new_res_table = rtnl_dereference(newg->res_table);
1767 old_res_table = rtnl_dereference(oldg->res_table);
1769 /* Accept if num_nh_buckets was not given, but if it was
1770 * given, demand that the value be correct.
1772 if (cfg->nh_grp_res_has_num_buckets &&
1773 cfg->nh_grp_res_num_buckets !=
1774 old_res_table->num_nh_buckets) {
1775 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
1779 /* Emit a pre-replace notification so that listeners could veto
1780 * a potentially unsupported configuration. Otherwise,
1781 * individual bucket replacement notifications would need to be
1782 * vetoed, which is something that should only happen if the
1783 * bucket is currently active.
1785 err = call_nexthop_res_table_notifiers(net, new, extack);
1789 if (cfg->nh_grp_res_has_idle_timer)
1790 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
1791 if (cfg->nh_grp_res_has_unbalanced_timer)
1792 old_res_table->unbalanced_timer =
1793 cfg->nh_grp_res_unbalanced_timer;
1795 replace_nexthop_grp_res(oldg, newg);
1797 tmp_table = new_res_table;
1798 rcu_assign_pointer(newg->res_table, old_res_table);
1799 rcu_assign_pointer(newg->spare->res_table, old_res_table);
1802 /* update parents - used by nexthop code for cleanup */
1803 for (i = 0; i < newg->num_nh; i++)
1804 newg->nh_entries[i].nh_parent = old;
1806 rcu_assign_pointer(old->nh_grp, newg);
1808 if (newg->resilient) {
1809 rcu_assign_pointer(oldg->res_table, tmp_table);
1810 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
1813 for (i = 0; i < oldg->num_nh; i++)
1814 oldg->nh_entries[i].nh_parent = new;
1816 rcu_assign_pointer(new->nh_grp, oldg);
1821 static void nh_group_v4_update(struct nh_group *nhg)
1823 struct nh_grp_entry *nhges;
1824 bool has_v4 = false;
1827 nhges = nhg->nh_entries;
1828 for (i = 0; i < nhg->num_nh; i++) {
1829 struct nh_info *nhi;
1831 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1832 if (nhi->family == AF_INET)
1835 nhg->has_v4 = has_v4;
1838 static int replace_nexthop_single_notify_res(struct net *net,
1839 struct nh_res_table *res_table,
1840 struct nexthop *old,
1841 struct nh_info *oldi,
1842 struct nh_info *newi,
1843 struct netlink_ext_ack *extack)
1845 u32 nhg_id = res_table->nhg_id;
1849 for (i = 0; i < res_table->num_nh_buckets; i++) {
1850 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1851 struct nh_grp_entry *nhge;
1853 nhge = rtnl_dereference(bucket->nh_entry);
1854 if (nhge->nh == old) {
1855 err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
1868 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1869 struct nh_grp_entry *nhge;
1871 nhge = rtnl_dereference(bucket->nh_entry);
1872 if (nhge->nh == old)
1873 __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
1880 static int replace_nexthop_single_notify(struct net *net,
1881 struct nexthop *group_nh,
1882 struct nexthop *old,
1883 struct nh_info *oldi,
1884 struct nh_info *newi,
1885 struct netlink_ext_ack *extack)
1887 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
1888 struct nh_res_table *res_table;
1891 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
1893 } else if (nhg->resilient) {
1894 res_table = rtnl_dereference(nhg->res_table);
1895 return replace_nexthop_single_notify_res(net, res_table,
1903 static int replace_nexthop_single(struct net *net, struct nexthop *old,
1904 struct nexthop *new,
1905 struct netlink_ext_ack *extack)
1907 u8 old_protocol, old_nh_flags;
1908 struct nh_info *oldi, *newi;
1909 struct nh_grp_entry *nhge;
1912 if (new->is_group) {
1913 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
1917 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
1921 /* Hardware flags were set on 'old' as 'new' is not in the red-black
1922 * tree. Therefore, inherit the flags from 'old' to 'new'.
1924 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
1926 oldi = rtnl_dereference(old->nh_info);
1927 newi = rtnl_dereference(new->nh_info);
1929 newi->nh_parent = old;
1930 oldi->nh_parent = new;
1932 old_protocol = old->protocol;
1933 old_nh_flags = old->nh_flags;
1935 old->protocol = new->protocol;
1936 old->nh_flags = new->nh_flags;
1938 rcu_assign_pointer(old->nh_info, newi);
1939 rcu_assign_pointer(new->nh_info, oldi);
1941 /* Send a replace notification for all the groups using the nexthop. */
1942 list_for_each_entry(nhge, &old->grp_list, nh_list) {
1943 struct nexthop *nhp = nhge->nh_parent;
1945 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
1951 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
1952 * update IPv4 indication in all the groups using the nexthop.
1954 if (oldi->family == AF_INET && newi->family == AF_INET6) {
1955 list_for_each_entry(nhge, &old->grp_list, nh_list) {
1956 struct nexthop *nhp = nhge->nh_parent;
1957 struct nh_group *nhg;
1959 nhg = rtnl_dereference(nhp->nh_grp);
1960 nh_group_v4_update(nhg);
1967 rcu_assign_pointer(new->nh_info, newi);
1968 rcu_assign_pointer(old->nh_info, oldi);
1969 old->nh_flags = old_nh_flags;
1970 old->protocol = old_protocol;
1971 oldi->nh_parent = old;
1972 newi->nh_parent = new;
1973 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
1974 struct nexthop *nhp = nhge->nh_parent;
1976 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
1978 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
1982 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
1983 struct nl_info *info)
1985 struct fib6_info *f6i;
1987 if (!list_empty(&nh->fi_list)) {
1988 struct fib_info *fi;
1990 /* expectation is a few fib_info per nexthop and then
1991 * a lot of routes per fib_info. So mark the fib_info
1992 * and then walk the fib tables once
1994 list_for_each_entry(fi, &nh->fi_list, nh_list)
1995 fi->nh_updated = true;
1997 fib_info_notify_update(net, info);
1999 list_for_each_entry(fi, &nh->fi_list, nh_list)
2000 fi->nh_updated = false;
2003 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2004 ipv6_stub->fib6_rt_update(net, f6i, info);
2007 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2008 * linked to this nexthop and for all groups that the nexthop
2011 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2012 struct nl_info *info)
2014 struct nh_grp_entry *nhge;
2016 __nexthop_replace_notify(net, nh, info);
2018 list_for_each_entry(nhge, &nh->grp_list, nh_list)
2019 __nexthop_replace_notify(net, nhge->nh_parent, info);
2022 static int replace_nexthop(struct net *net, struct nexthop *old,
2023 struct nexthop *new, const struct nh_config *cfg,
2024 struct netlink_ext_ack *extack)
2026 bool new_is_reject = false;
2027 struct nh_grp_entry *nhge;
2030 /* check that existing FIB entries are ok with the
2031 * new nexthop definition
2033 err = fib_check_nh_list(old, new, extack);
2037 err = fib6_check_nh_list(old, new, extack);
2041 if (!new->is_group) {
2042 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2044 new_is_reject = nhi->reject_nh;
2047 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2048 /* if new nexthop is a blackhole, any groups using this
2049 * nexthop cannot have more than 1 path
2051 if (new_is_reject &&
2052 nexthop_num_path(nhge->nh_parent) > 1) {
2053 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2057 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2061 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2067 err = replace_nexthop_grp(net, old, new, cfg, extack);
2069 err = replace_nexthop_single(net, old, new, extack);
2072 nh_rt_cache_flush(net, old);
2074 __remove_nexthop(net, new, NULL);
2081 /* called with rtnl_lock held */
2082 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2083 struct nh_config *cfg, struct netlink_ext_ack *extack)
2085 struct rb_node **pp, *parent = NULL, *next;
2086 struct rb_root *root = &net->nexthop.rb_root;
2087 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2088 bool create = !!(cfg->nlflags & NLM_F_CREATE);
2089 u32 new_id = new_nh->id;
2090 int replace_notify = 0;
2093 pp = &root->rb_node;
2103 nh = rb_entry(parent, struct nexthop, rb_node);
2104 if (new_id < nh->id) {
2105 pp = &next->rb_left;
2106 } else if (new_id > nh->id) {
2107 pp = &next->rb_right;
2108 } else if (replace) {
2109 rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2111 new_nh = nh; /* send notification with old nh */
2116 /* id already exists and not a replace */
2121 if (replace && !create) {
2122 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2127 if (new_nh->is_group) {
2128 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2129 struct nh_res_table *res_table;
2131 if (nhg->resilient) {
2132 res_table = rtnl_dereference(nhg->res_table);
2134 /* Not passing the number of buckets is OK when
2135 * replacing, but not when creating a new group.
2137 if (!cfg->nh_grp_res_has_num_buckets) {
2138 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2143 nh_res_group_rebalance(nhg, res_table);
2145 /* Do not send bucket notifications, we do full
2146 * notification below.
2148 nh_res_table_upkeep(res_table, false);
2152 rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2153 rb_insert_color(&new_nh->rb_node, root);
2155 /* The initial insertion is a full notification for mpath as well
2156 * as resilient groups.
2158 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2160 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2164 nh_base_seq_inc(net);
2165 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2166 if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode)
2167 nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2174 /* remove all nexthops tied to a device being deleted */
2175 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2177 unsigned int hash = nh_dev_hashfn(dev->ifindex);
2178 struct net *net = dev_net(dev);
2179 struct hlist_head *head = &net->nexthop.devhash[hash];
2180 struct hlist_node *n;
2181 struct nh_info *nhi;
2183 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2184 if (nhi->fib_nhc.nhc_dev != dev)
2187 if (nhi->reject_nh &&
2188 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2191 remove_nexthop(net, nhi->nh_parent, NULL);
2195 /* rtnl; called when net namespace is deleted */
2196 static void flush_all_nexthops(struct net *net)
2198 struct rb_root *root = &net->nexthop.rb_root;
2199 struct rb_node *node;
2202 while ((node = rb_first(root))) {
2203 nh = rb_entry(node, struct nexthop, rb_node);
2204 remove_nexthop(net, nh, NULL);
2209 static struct nexthop *nexthop_create_group(struct net *net,
2210 struct nh_config *cfg)
2212 struct nlattr *grps_attr = cfg->nh_grp;
2213 struct nexthop_grp *entry = nla_data(grps_attr);
2214 u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2215 struct nh_group *nhg;
2220 if (WARN_ON(!num_nh))
2221 return ERR_PTR(-EINVAL);
2223 nh = nexthop_alloc();
2225 return ERR_PTR(-ENOMEM);
2229 nhg = nexthop_grp_alloc(num_nh);
2232 return ERR_PTR(-ENOMEM);
2235 /* spare group used for removals */
2236 nhg->spare = nexthop_grp_alloc(num_nh);
2240 return ERR_PTR(-ENOMEM);
2242 nhg->spare->spare = nhg;
2244 for (i = 0; i < nhg->num_nh; ++i) {
2245 struct nexthop *nhe;
2246 struct nh_info *nhi;
2248 nhe = nexthop_find_by_id(net, entry[i].id);
2249 if (!nexthop_get(nhe)) {
2254 nhi = rtnl_dereference(nhe->nh_info);
2255 if (nhi->family == AF_INET)
2258 nhg->nh_entries[i].nh = nhe;
2259 nhg->nh_entries[i].weight = entry[i].weight + 1;
2260 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2261 nhg->nh_entries[i].nh_parent = nh;
2264 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2266 nhg->is_multipath = true;
2267 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2268 struct nh_res_table *res_table;
2270 /* Bounce resilient groups for now. */
2274 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2280 rcu_assign_pointer(nhg->spare->res_table, res_table);
2281 rcu_assign_pointer(nhg->res_table, res_table);
2282 nhg->resilient = true;
2283 nhg->is_multipath = true;
2286 WARN_ON_ONCE(nhg->mpath + nhg->resilient != 1);
2289 nh_mp_group_rebalance(nhg);
2294 rcu_assign_pointer(nh->nh_grp, nhg);
2299 for (i--; i >= 0; --i) {
2300 list_del(&nhg->nh_entries[i].nh_list);
2301 nexthop_put(nhg->nh_entries[i].nh);
2308 return ERR_PTR(err);
2311 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2312 struct nh_info *nhi, struct nh_config *cfg,
2313 struct netlink_ext_ack *extack)
2315 struct fib_nh *fib_nh = &nhi->fib_nh;
2316 struct fib_config fib_cfg = {
2317 .fc_oif = cfg->nh_ifindex,
2318 .fc_gw4 = cfg->gw.ipv4,
2319 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2320 .fc_flags = cfg->nh_flags,
2321 .fc_encap = cfg->nh_encap,
2322 .fc_encap_type = cfg->nh_encap_type,
2324 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2327 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2329 fib_nh_release(net, fib_nh);
2336 /* sets nh_dev if successful */
2337 err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2339 nh->nh_flags = fib_nh->fib_nh_flags;
2340 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2341 fib_nh->fib_nh_scope);
2343 fib_nh_release(net, fib_nh);
2349 static int nh_create_ipv6(struct net *net, struct nexthop *nh,
2350 struct nh_info *nhi, struct nh_config *cfg,
2351 struct netlink_ext_ack *extack)
2353 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2354 struct fib6_config fib6_cfg = {
2355 .fc_table = l3mdev_fib_table(cfg->dev),
2356 .fc_ifindex = cfg->nh_ifindex,
2357 .fc_gateway = cfg->gw.ipv6,
2358 .fc_flags = cfg->nh_flags,
2359 .fc_encap = cfg->nh_encap,
2360 .fc_encap_type = cfg->nh_encap_type,
2361 .fc_is_fdb = cfg->nh_fdb,
2365 if (!ipv6_addr_any(&cfg->gw.ipv6))
2366 fib6_cfg.fc_flags |= RTF_GATEWAY;
2368 /* sets nh_dev if successful */
2369 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2372 ipv6_stub->fib6_nh_release(fib6_nh);
2374 nh->nh_flags = fib6_nh->fib_nh_flags;
2379 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2380 struct netlink_ext_ack *extack)
2382 struct nh_info *nhi;
2386 nh = nexthop_alloc();
2388 return ERR_PTR(-ENOMEM);
2390 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2393 return ERR_PTR(-ENOMEM);
2396 nh->nh_flags = cfg->nh_flags;
2399 nhi->nh_parent = nh;
2400 nhi->family = cfg->nh_family;
2401 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2406 if (cfg->nh_blackhole) {
2408 cfg->nh_ifindex = net->loopback_dev->ifindex;
2411 switch (cfg->nh_family) {
2413 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2416 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2423 return ERR_PTR(err);
2426 /* add the entry to the device based hash */
2428 nexthop_devhash_add(net, nhi);
2430 rcu_assign_pointer(nh->nh_info, nhi);
2435 /* called with rtnl lock held */
2436 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2437 struct netlink_ext_ack *extack)
2442 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2443 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2444 return ERR_PTR(-EINVAL);
2448 cfg->nh_id = nh_find_unused_id(net);
2450 NL_SET_ERR_MSG(extack, "No unused id");
2451 return ERR_PTR(-EINVAL);
2456 nh = nexthop_create_group(net, cfg);
2458 nh = nexthop_create(net, cfg, extack);
2463 refcount_set(&nh->refcnt, 1);
2464 nh->id = cfg->nh_id;
2465 nh->protocol = cfg->nh_protocol;
2468 err = insert_nexthop(net, nh, cfg, extack);
2470 __remove_nexthop(net, nh, NULL);
2478 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
2479 struct nlmsghdr *nlh, struct nh_config *cfg,
2480 struct netlink_ext_ack *extack)
2482 struct nhmsg *nhm = nlmsg_data(nlh);
2483 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
2486 err = nlmsg_parse(nlh, sizeof(*nhm), tb,
2487 ARRAY_SIZE(rtm_nh_policy_new) - 1,
2488 rtm_nh_policy_new, extack);
2493 if (nhm->resvd || nhm->nh_scope) {
2494 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
2497 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
2498 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
2502 switch (nhm->nh_family) {
2511 NL_SET_ERR_MSG(extack, "Invalid address family");
2515 memset(cfg, 0, sizeof(*cfg));
2516 cfg->nlflags = nlh->nlmsg_flags;
2517 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
2518 cfg->nlinfo.nlh = nlh;
2519 cfg->nlinfo.nl_net = net;
2521 cfg->nh_family = nhm->nh_family;
2522 cfg->nh_protocol = nhm->nh_protocol;
2523 cfg->nh_flags = nhm->nh_flags;
2526 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
2529 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
2530 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
2531 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
2534 if (nhm->nh_flags) {
2535 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
2538 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
2541 if (tb[NHA_GROUP]) {
2542 if (nhm->nh_family != AF_UNSPEC) {
2543 NL_SET_ERR_MSG(extack, "Invalid family for group");
2546 cfg->nh_grp = tb[NHA_GROUP];
2548 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
2549 if (tb[NHA_GROUP_TYPE])
2550 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
2552 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
2553 NL_SET_ERR_MSG(extack, "Invalid group type");
2556 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb), extack);
2558 /* no other attributes should be set */
2562 if (tb[NHA_BLACKHOLE]) {
2563 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
2564 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
2565 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
2569 cfg->nh_blackhole = 1;
2574 if (!cfg->nh_fdb && !tb[NHA_OIF]) {
2575 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
2579 if (!cfg->nh_fdb && tb[NHA_OIF]) {
2580 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
2581 if (cfg->nh_ifindex)
2582 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
2585 NL_SET_ERR_MSG(extack, "Invalid device index");
2587 } else if (!(cfg->dev->flags & IFF_UP)) {
2588 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2591 } else if (!netif_carrier_ok(cfg->dev)) {
2592 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
2599 if (tb[NHA_GATEWAY]) {
2600 struct nlattr *gwa = tb[NHA_GATEWAY];
2602 switch (cfg->nh_family) {
2604 if (nla_len(gwa) != sizeof(u32)) {
2605 NL_SET_ERR_MSG(extack, "Invalid gateway");
2608 cfg->gw.ipv4 = nla_get_be32(gwa);
2611 if (nla_len(gwa) != sizeof(struct in6_addr)) {
2612 NL_SET_ERR_MSG(extack, "Invalid gateway");
2615 cfg->gw.ipv6 = nla_get_in6_addr(gwa);
2618 NL_SET_ERR_MSG(extack,
2619 "Unknown address family for gateway");
2623 /* device only nexthop (no gateway) */
2624 if (cfg->nh_flags & RTNH_F_ONLINK) {
2625 NL_SET_ERR_MSG(extack,
2626 "ONLINK flag can not be set for nexthop without a gateway");
2631 if (tb[NHA_ENCAP]) {
2632 cfg->nh_encap = tb[NHA_ENCAP];
2634 if (!tb[NHA_ENCAP_TYPE]) {
2635 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
2639 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
2640 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
2644 } else if (tb[NHA_ENCAP_TYPE]) {
2645 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
2656 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2657 struct netlink_ext_ack *extack)
2659 struct net *net = sock_net(skb->sk);
2660 struct nh_config cfg;
2664 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
2666 nh = nexthop_add(net, &cfg, extack);
2674 static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
2675 struct nlattr **tb, u32 *id,
2676 struct netlink_ext_ack *extack)
2678 struct nhmsg *nhm = nlmsg_data(nlh);
2680 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2681 NL_SET_ERR_MSG(extack, "Invalid values in header");
2686 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
2690 *id = nla_get_u32(tb[NHA_ID]);
2692 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
2699 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
2700 struct netlink_ext_ack *extack)
2702 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
2705 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
2706 ARRAY_SIZE(rtm_nh_policy_get) - 1,
2707 rtm_nh_policy_get, extack);
2711 return __nh_valid_get_del_req(nlh, tb, id, extack);
2715 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2716 struct netlink_ext_ack *extack)
2718 struct net *net = sock_net(skb->sk);
2719 struct nl_info nlinfo = {
2722 .portid = NETLINK_CB(skb).portid,
2728 err = nh_valid_get_del_req(nlh, &id, extack);
2732 nh = nexthop_find_by_id(net, id);
2736 remove_nexthop(net, nh, &nlinfo);
2742 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2743 struct netlink_ext_ack *extack)
2745 struct net *net = sock_net(in_skb->sk);
2746 struct sk_buff *skb = NULL;
2751 err = nh_valid_get_del_req(nlh, &id, extack);
2756 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2761 nh = nexthop_find_by_id(net, id);
2765 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
2768 WARN_ON(err == -EMSGSIZE);
2772 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2780 struct nh_dump_filter {
2787 static bool nh_dump_filtered(struct nexthop *nh,
2788 struct nh_dump_filter *filter, u8 family)
2790 const struct net_device *dev;
2791 const struct nh_info *nhi;
2793 if (filter->group_filter && !nh->is_group)
2796 if (!filter->dev_idx && !filter->master_idx && !family)
2802 nhi = rtnl_dereference(nh->nh_info);
2803 if (family && nhi->family != family)
2806 dev = nhi->fib_nhc.nhc_dev;
2807 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
2810 if (filter->master_idx) {
2811 struct net_device *master;
2816 master = netdev_master_upper_dev_get((struct net_device *)dev);
2817 if (!master || master->ifindex != filter->master_idx)
2824 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
2825 struct nh_dump_filter *filter,
2826 struct netlink_ext_ack *extack)
2832 idx = nla_get_u32(tb[NHA_OIF]);
2833 if (idx > INT_MAX) {
2834 NL_SET_ERR_MSG(extack, "Invalid device index");
2837 filter->dev_idx = idx;
2839 if (tb[NHA_MASTER]) {
2840 idx = nla_get_u32(tb[NHA_MASTER]);
2841 if (idx > INT_MAX) {
2842 NL_SET_ERR_MSG(extack, "Invalid master device index");
2845 filter->master_idx = idx;
2847 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
2848 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
2850 nhm = nlmsg_data(nlh);
2851 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2852 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
2859 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
2860 struct nh_dump_filter *filter,
2861 struct netlink_callback *cb)
2863 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
2866 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
2867 ARRAY_SIZE(rtm_nh_policy_dump) - 1,
2868 rtm_nh_policy_dump, cb->extack);
2872 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
2875 struct rtm_dump_nh_ctx {
2879 static struct rtm_dump_nh_ctx *
2880 rtm_dump_nh_ctx(struct netlink_callback *cb)
2882 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
2884 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
2888 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
2889 struct netlink_callback *cb,
2890 struct rb_root *root,
2891 struct rtm_dump_nh_ctx *ctx,
2892 int (*nh_cb)(struct sk_buff *skb,
2893 struct netlink_callback *cb,
2894 struct nexthop *nh, void *data),
2897 struct rb_node *node;
2902 for (node = rb_first(root); node; node = rb_next(node)) {
2908 nh = rb_entry(node, struct nexthop, rb_node);
2910 err = nh_cb(skb, cb, nh, data);
2921 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
2922 struct nexthop *nh, void *data)
2924 struct nhmsg *nhm = nlmsg_data(cb->nlh);
2925 struct nh_dump_filter *filter = data;
2927 if (nh_dump_filtered(nh, filter, nhm->nh_family))
2930 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
2931 NETLINK_CB(cb->skb).portid,
2932 cb->nlh->nlmsg_seq, NLM_F_MULTI);
2936 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
2938 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
2939 struct net *net = sock_net(skb->sk);
2940 struct rb_root *root = &net->nexthop.rb_root;
2941 struct nh_dump_filter filter = {};
2944 err = nh_valid_dump_req(cb->nlh, &filter, cb);
2948 err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
2949 &rtm_dump_nexthop_cb, &filter);
2951 if (likely(skb->len))
2959 cb->seq = net->nexthop.seq;
2960 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2964 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
2966 unsigned int hash = nh_dev_hashfn(dev->ifindex);
2967 struct net *net = dev_net(dev);
2968 struct hlist_head *head = &net->nexthop.devhash[hash];
2969 struct hlist_node *n;
2970 struct nh_info *nhi;
2972 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2973 if (nhi->fib_nhc.nhc_dev == dev) {
2974 if (nhi->family == AF_INET)
2975 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
2982 static int nh_netdev_event(struct notifier_block *this,
2983 unsigned long event, void *ptr)
2985 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2986 struct netdev_notifier_info_ext *info_ext;
2990 case NETDEV_UNREGISTER:
2991 nexthop_flush_dev(dev, event);
2994 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
2995 nexthop_flush_dev(dev, event);
2997 case NETDEV_CHANGEMTU:
2999 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3000 rt_cache_flush(dev_net(dev));
3006 static struct notifier_block nh_netdev_notifier = {
3007 .notifier_call = nh_netdev_event,
3010 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3011 struct netlink_ext_ack *extack)
3013 struct rb_root *root = &net->nexthop.rb_root;
3014 struct rb_node *node;
3017 for (node = rb_first(root); node; node = rb_next(node)) {
3020 nh = rb_entry(node, struct nexthop, rb_node);
3021 err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
3030 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3031 struct netlink_ext_ack *extack)
3036 err = nexthops_dump(net, nb, extack);
3039 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3045 EXPORT_SYMBOL(register_nexthop_notifier);
3047 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3049 return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3052 EXPORT_SYMBOL(unregister_nexthop_notifier);
3054 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3056 struct nexthop *nexthop;
3060 nexthop = nexthop_find_by_id(net, id);
3064 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3066 nexthop->nh_flags |= RTNH_F_OFFLOAD;
3068 nexthop->nh_flags |= RTNH_F_TRAP;
3073 EXPORT_SYMBOL(nexthop_set_hw_flags);
3075 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3076 bool offload, bool trap)
3078 struct nh_res_table *res_table;
3079 struct nh_res_bucket *bucket;
3080 struct nexthop *nexthop;
3081 struct nh_group *nhg;
3085 nexthop = nexthop_find_by_id(net, id);
3086 if (!nexthop || !nexthop->is_group)
3089 nhg = rcu_dereference(nexthop->nh_grp);
3090 if (!nhg->resilient)
3093 if (bucket_index >= nhg->res_table->num_nh_buckets)
3096 res_table = rcu_dereference(nhg->res_table);
3097 bucket = &res_table->nh_buckets[bucket_index];
3098 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3100 bucket->nh_flags |= RTNH_F_OFFLOAD;
3102 bucket->nh_flags |= RTNH_F_TRAP;
3107 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3109 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3110 unsigned long *activity)
3112 struct nh_res_table *res_table;
3113 struct nexthop *nexthop;
3114 struct nh_group *nhg;
3119 nexthop = nexthop_find_by_id(net, id);
3120 if (!nexthop || !nexthop->is_group)
3123 nhg = rcu_dereference(nexthop->nh_grp);
3124 if (!nhg->resilient)
3127 /* Instead of silently ignoring some buckets, demand that the sizes
3130 res_table = rcu_dereference(nhg->res_table);
3131 if (num_buckets != res_table->num_nh_buckets)
3134 for (i = 0; i < num_buckets; i++) {
3135 if (test_bit(i, activity))
3136 nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3142 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3144 static void __net_exit nexthop_net_exit(struct net *net)
3147 flush_all_nexthops(net);
3149 kfree(net->nexthop.devhash);
3152 static int __net_init nexthop_net_init(struct net *net)
3154 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
3156 net->nexthop.rb_root = RB_ROOT;
3157 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
3158 if (!net->nexthop.devhash)
3160 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
3165 static struct pernet_operations nexthop_net_ops = {
3166 .init = nexthop_net_init,
3167 .exit = nexthop_net_exit,
3170 static int __init nexthop_init(void)
3172 register_pernet_subsys(&nexthop_net_ops);
3174 register_netdevice_notifier(&nh_netdev_notifier);
3176 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3177 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
3178 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
3179 rtm_dump_nexthop, 0);
3181 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3182 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3184 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3185 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3189 subsys_initcall(nexthop_init);