1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Routing netlink socket interface: protocol independent part.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/string.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/fcntl.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/capability.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/security.h>
33 #include <linux/mutex.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/pci.h>
38 #include <linux/etherdevice.h>
39 #include <linux/bpf.h>
41 #include <linux/uaccess.h>
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
46 #include <net/protocol.h>
48 #include <net/route.h>
52 #include <net/pkt_sched.h>
53 #include <net/fib_rules.h>
54 #include <net/rtnetlink.h>
55 #include <net/net_namespace.h>
56 #include <net/devlink.h>
60 #define RTNL_MAX_TYPE 50
61 #define RTNL_SLAVE_MAX_TYPE 40
65 rtnl_dumpit_func dumpit;
71 static DEFINE_MUTEX(rtnl_mutex);
75 mutex_lock(&rtnl_mutex);
77 EXPORT_SYMBOL(rtnl_lock);
79 int rtnl_lock_killable(void)
81 return mutex_lock_killable(&rtnl_mutex);
83 EXPORT_SYMBOL(rtnl_lock_killable);
85 static struct sk_buff *defer_kfree_skb_list;
86 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
89 tail->next = defer_kfree_skb_list;
90 defer_kfree_skb_list = head;
93 EXPORT_SYMBOL(rtnl_kfree_skbs);
95 void __rtnl_unlock(void)
97 struct sk_buff *head = defer_kfree_skb_list;
99 defer_kfree_skb_list = NULL;
101 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
102 * is used. In some places, e.g. in cfg80211, we have code that will do
109 * and because netdev_run_todo() acquires the RTNL for items on the list
110 * we could cause a situation such as this:
113 * unregister_netdevice()
121 * // list not empty now
122 * // because of thread 2
124 * while (!list_empty(...))
129 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
130 * it's not used in cases where something is added to do the list.
132 WARN_ON(!list_empty(&net_todo_list));
134 mutex_unlock(&rtnl_mutex);
137 struct sk_buff *next = head->next;
145 void rtnl_unlock(void)
147 /* This fellow will unlock it for us. */
150 EXPORT_SYMBOL(rtnl_unlock);
152 int rtnl_trylock(void)
154 return mutex_trylock(&rtnl_mutex);
156 EXPORT_SYMBOL(rtnl_trylock);
158 int rtnl_is_locked(void)
160 return mutex_is_locked(&rtnl_mutex);
162 EXPORT_SYMBOL(rtnl_is_locked);
164 bool refcount_dec_and_rtnl_lock(refcount_t *r)
166 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
168 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
170 #ifdef CONFIG_PROVE_LOCKING
171 bool lockdep_rtnl_is_held(void)
173 return lockdep_is_held(&rtnl_mutex);
175 EXPORT_SYMBOL(lockdep_rtnl_is_held);
176 #endif /* #ifdef CONFIG_PROVE_LOCKING */
178 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
180 static inline int rtm_msgindex(int msgtype)
182 int msgindex = msgtype - RTM_BASE;
185 * msgindex < 0 implies someone tried to register a netlink
186 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
187 * the message type has not been added to linux/rtnetlink.h
189 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
194 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
196 struct rtnl_link __rcu **tab;
198 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
199 protocol = PF_UNSPEC;
201 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
203 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
205 return rcu_dereference_rtnl(tab[msgtype]);
208 static int rtnl_register_internal(struct module *owner,
209 int protocol, int msgtype,
210 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
213 struct rtnl_link *link, *old;
214 struct rtnl_link __rcu **tab;
218 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
219 msgindex = rtm_msgindex(msgtype);
222 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
224 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
228 /* ensures we see the 0 stores */
229 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
232 old = rtnl_dereference(tab[msgindex]);
234 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
238 link = kzalloc(sizeof(*link), GFP_KERNEL);
243 WARN_ON(link->owner && link->owner != owner);
246 WARN_ON(doit && link->doit && link->doit != doit);
249 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
251 link->dumpit = dumpit;
253 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
254 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
255 link->flags |= flags;
257 /* publish protocol:msgtype */
258 rcu_assign_pointer(tab[msgindex], link);
268 * rtnl_register_module - Register a rtnetlink message type
270 * @owner: module registering the hook (THIS_MODULE)
271 * @protocol: Protocol family or PF_UNSPEC
272 * @msgtype: rtnetlink message type
273 * @doit: Function pointer called for each request message
274 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
275 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
277 * Like rtnl_register, but for use by removable modules.
279 int rtnl_register_module(struct module *owner,
280 int protocol, int msgtype,
281 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
284 return rtnl_register_internal(owner, protocol, msgtype,
285 doit, dumpit, flags);
287 EXPORT_SYMBOL_GPL(rtnl_register_module);
290 * rtnl_register - Register a rtnetlink message type
291 * @protocol: Protocol family or PF_UNSPEC
292 * @msgtype: rtnetlink message type
293 * @doit: Function pointer called for each request message
294 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
295 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
297 * Registers the specified function pointers (at least one of them has
298 * to be non-NULL) to be called whenever a request message for the
299 * specified protocol family and message type is received.
301 * The special protocol family PF_UNSPEC may be used to define fallback
302 * function pointers for the case when no entry for the specific protocol
305 void rtnl_register(int protocol, int msgtype,
306 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
311 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
314 pr_err("Unable to register rtnetlink message handler, "
315 "protocol = %d, message type = %d\n", protocol, msgtype);
319 * rtnl_unregister - Unregister a rtnetlink message type
320 * @protocol: Protocol family or PF_UNSPEC
321 * @msgtype: rtnetlink message type
323 * Returns 0 on success or a negative error code.
325 int rtnl_unregister(int protocol, int msgtype)
327 struct rtnl_link __rcu **tab;
328 struct rtnl_link *link;
331 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
332 msgindex = rtm_msgindex(msgtype);
335 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
341 link = rtnl_dereference(tab[msgindex]);
342 RCU_INIT_POINTER(tab[msgindex], NULL);
345 kfree_rcu(link, rcu);
349 EXPORT_SYMBOL_GPL(rtnl_unregister);
352 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
353 * @protocol : Protocol family or PF_UNSPEC
355 * Identical to calling rtnl_unregster() for all registered message types
356 * of a certain protocol family.
358 void rtnl_unregister_all(int protocol)
360 struct rtnl_link __rcu **tab;
361 struct rtnl_link *link;
364 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
367 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
372 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
373 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
374 link = rtnl_dereference(tab[msgindex]);
378 RCU_INIT_POINTER(tab[msgindex], NULL);
379 kfree_rcu(link, rcu);
387 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
389 static LIST_HEAD(link_ops);
391 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
393 const struct rtnl_link_ops *ops;
395 list_for_each_entry(ops, &link_ops, list) {
396 if (!strcmp(ops->kind, kind))
403 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
404 * @ops: struct rtnl_link_ops * to register
406 * The caller must hold the rtnl_mutex. This function should be used
407 * by drivers that create devices during module initialization. It
408 * must be called before registering the devices.
410 * Returns 0 on success or a negative error code.
412 int __rtnl_link_register(struct rtnl_link_ops *ops)
414 if (rtnl_link_ops_get(ops->kind))
417 /* The check for alloc/setup is here because if ops
418 * does not have that filled up, it is not possible
419 * to use the ops for creating device. So do not
420 * fill up dellink as well. That disables rtnl_dellink.
422 if ((ops->alloc || ops->setup) && !ops->dellink)
423 ops->dellink = unregister_netdevice_queue;
425 list_add_tail(&ops->list, &link_ops);
428 EXPORT_SYMBOL_GPL(__rtnl_link_register);
431 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
432 * @ops: struct rtnl_link_ops * to register
434 * Returns 0 on success or a negative error code.
436 int rtnl_link_register(struct rtnl_link_ops *ops)
440 /* Sanity-check max sizes to avoid stack buffer overflow. */
441 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
442 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
446 err = __rtnl_link_register(ops);
450 EXPORT_SYMBOL_GPL(rtnl_link_register);
452 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
454 struct net_device *dev;
455 LIST_HEAD(list_kill);
457 for_each_netdev(net, dev) {
458 if (dev->rtnl_link_ops == ops)
459 ops->dellink(dev, &list_kill);
461 unregister_netdevice_many(&list_kill);
465 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
466 * @ops: struct rtnl_link_ops * to unregister
468 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
469 * integrity (hold pernet_ops_rwsem for writing to close the race
470 * with setup_net() and cleanup_net()).
472 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
477 __rtnl_kill_links(net, ops);
479 list_del(&ops->list);
481 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
483 /* Return with the rtnl_lock held when there are no network
484 * devices unregistering in any network namespace.
486 static void rtnl_lock_unregistering_all(void)
490 DEFINE_WAIT_FUNC(wait, woken_wake_function);
492 add_wait_queue(&netdev_unregistering_wq, &wait);
494 unregistering = false;
496 /* We held write locked pernet_ops_rwsem, and parallel
497 * setup_net() and cleanup_net() are not possible.
500 if (atomic_read(&net->dev_unreg_count) > 0) {
501 unregistering = true;
509 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
511 remove_wait_queue(&netdev_unregistering_wq, &wait);
515 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
516 * @ops: struct rtnl_link_ops * to unregister
518 void rtnl_link_unregister(struct rtnl_link_ops *ops)
520 /* Close the race with setup_net() and cleanup_net() */
521 down_write(&pernet_ops_rwsem);
522 rtnl_lock_unregistering_all();
523 __rtnl_link_unregister(ops);
525 up_write(&pernet_ops_rwsem);
527 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
529 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
531 struct net_device *master_dev;
532 const struct rtnl_link_ops *ops;
537 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
541 ops = master_dev->rtnl_link_ops;
542 if (!ops || !ops->get_slave_size)
544 /* IFLA_INFO_SLAVE_DATA + nested data */
545 size = nla_total_size(sizeof(struct nlattr)) +
546 ops->get_slave_size(master_dev, dev);
553 static size_t rtnl_link_get_size(const struct net_device *dev)
555 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
561 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
562 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
565 /* IFLA_INFO_DATA + nested data */
566 size += nla_total_size(sizeof(struct nlattr)) +
569 if (ops->get_xstats_size)
570 /* IFLA_INFO_XSTATS */
571 size += nla_total_size(ops->get_xstats_size(dev));
573 size += rtnl_link_get_slave_info_data_size(dev);
578 static LIST_HEAD(rtnl_af_ops);
580 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
582 const struct rtnl_af_ops *ops;
586 list_for_each_entry(ops, &rtnl_af_ops, list) {
587 if (ops->family == family)
595 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
596 * @ops: struct rtnl_af_ops * to register
598 * Returns 0 on success or a negative error code.
600 void rtnl_af_register(struct rtnl_af_ops *ops)
603 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
606 EXPORT_SYMBOL_GPL(rtnl_af_register);
609 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
610 * @ops: struct rtnl_af_ops * to unregister
612 void rtnl_af_unregister(struct rtnl_af_ops *ops)
615 list_del_rcu(&ops->list);
620 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
622 static size_t rtnl_link_get_af_size(const struct net_device *dev,
625 struct rtnl_af_ops *af_ops;
629 size = nla_total_size(sizeof(struct nlattr));
632 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
633 if (af_ops->get_link_af_size) {
634 /* AF_* + nested data */
635 size += nla_total_size(sizeof(struct nlattr)) +
636 af_ops->get_link_af_size(dev, ext_filter_mask);
644 static bool rtnl_have_link_slave_info(const struct net_device *dev)
646 struct net_device *master_dev;
651 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
652 if (master_dev && master_dev->rtnl_link_ops)
658 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
659 const struct net_device *dev)
661 struct net_device *master_dev;
662 const struct rtnl_link_ops *ops;
663 struct nlattr *slave_data;
666 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
669 ops = master_dev->rtnl_link_ops;
672 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
674 if (ops->fill_slave_info) {
675 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
678 err = ops->fill_slave_info(skb, master_dev, dev);
680 goto err_cancel_slave_data;
681 nla_nest_end(skb, slave_data);
685 err_cancel_slave_data:
686 nla_nest_cancel(skb, slave_data);
690 static int rtnl_link_info_fill(struct sk_buff *skb,
691 const struct net_device *dev)
693 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
699 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
701 if (ops->fill_xstats) {
702 err = ops->fill_xstats(skb, dev);
706 if (ops->fill_info) {
707 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
710 err = ops->fill_info(skb, dev);
712 goto err_cancel_data;
713 nla_nest_end(skb, data);
718 nla_nest_cancel(skb, data);
722 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
724 struct nlattr *linkinfo;
727 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
728 if (linkinfo == NULL)
731 err = rtnl_link_info_fill(skb, dev);
733 goto err_cancel_link;
735 err = rtnl_link_slave_info_fill(skb, dev);
737 goto err_cancel_link;
739 nla_nest_end(skb, linkinfo);
743 nla_nest_cancel(skb, linkinfo);
748 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
750 struct sock *rtnl = net->rtnl;
752 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
755 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
757 struct sock *rtnl = net->rtnl;
759 return nlmsg_unicast(rtnl, skb, pid);
761 EXPORT_SYMBOL(rtnl_unicast);
763 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
764 const struct nlmsghdr *nlh, gfp_t flags)
766 struct sock *rtnl = net->rtnl;
768 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
770 EXPORT_SYMBOL(rtnl_notify);
772 void rtnl_set_sk_err(struct net *net, u32 group, int error)
774 struct sock *rtnl = net->rtnl;
776 netlink_set_err(rtnl, 0, group, error);
778 EXPORT_SYMBOL(rtnl_set_sk_err);
780 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
785 /* nothing is dumped for dst_default_metrics, so just skip the loop */
786 if (metrics == dst_default_metrics.metrics)
789 mx = nla_nest_start_noflag(skb, RTA_METRICS);
793 for (i = 0; i < RTAX_MAX; i++) {
795 if (i == RTAX_CC_ALGO - 1) {
796 char tmp[TCP_CA_NAME_MAX], *name;
798 name = tcp_ca_get_name_by_key(metrics[i], tmp);
801 if (nla_put_string(skb, i + 1, name))
802 goto nla_put_failure;
803 } else if (i == RTAX_FEATURES - 1) {
804 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
808 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
809 if (nla_put_u32(skb, i + 1, user_features))
810 goto nla_put_failure;
812 if (nla_put_u32(skb, i + 1, metrics[i]))
813 goto nla_put_failure;
820 nla_nest_cancel(skb, mx);
824 return nla_nest_end(skb, mx);
827 nla_nest_cancel(skb, mx);
830 EXPORT_SYMBOL(rtnetlink_put_metrics);
832 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
833 long expires, u32 error)
835 struct rta_cacheinfo ci = {
841 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
842 ci.rta_used = dst->__use;
843 ci.rta_clntref = atomic_read(&dst->__refcnt);
848 clock = jiffies_to_clock_t(abs(expires));
849 clock = min_t(unsigned long, clock, INT_MAX);
850 ci.rta_expires = (expires > 0) ? clock : -clock;
852 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
854 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
856 static void set_operstate(struct net_device *dev, unsigned char transition)
858 unsigned char operstate = dev->operstate;
860 switch (transition) {
862 if ((operstate == IF_OPER_DORMANT ||
863 operstate == IF_OPER_TESTING ||
864 operstate == IF_OPER_UNKNOWN) &&
865 !netif_dormant(dev) && !netif_testing(dev))
866 operstate = IF_OPER_UP;
869 case IF_OPER_TESTING:
870 if (netif_oper_up(dev))
871 operstate = IF_OPER_TESTING;
874 case IF_OPER_DORMANT:
875 if (netif_oper_up(dev))
876 operstate = IF_OPER_DORMANT;
880 if (dev->operstate != operstate) {
881 write_lock(&dev_base_lock);
882 dev->operstate = operstate;
883 write_unlock(&dev_base_lock);
884 netdev_state_change(dev);
888 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
890 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
891 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
894 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
895 const struct ifinfomsg *ifm)
897 unsigned int flags = ifm->ifi_flags;
899 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
901 flags = (flags & ifm->ifi_change) |
902 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
907 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
908 const struct rtnl_link_stats64 *b)
910 a->rx_packets = b->rx_packets;
911 a->tx_packets = b->tx_packets;
912 a->rx_bytes = b->rx_bytes;
913 a->tx_bytes = b->tx_bytes;
914 a->rx_errors = b->rx_errors;
915 a->tx_errors = b->tx_errors;
916 a->rx_dropped = b->rx_dropped;
917 a->tx_dropped = b->tx_dropped;
919 a->multicast = b->multicast;
920 a->collisions = b->collisions;
922 a->rx_length_errors = b->rx_length_errors;
923 a->rx_over_errors = b->rx_over_errors;
924 a->rx_crc_errors = b->rx_crc_errors;
925 a->rx_frame_errors = b->rx_frame_errors;
926 a->rx_fifo_errors = b->rx_fifo_errors;
927 a->rx_missed_errors = b->rx_missed_errors;
929 a->tx_aborted_errors = b->tx_aborted_errors;
930 a->tx_carrier_errors = b->tx_carrier_errors;
931 a->tx_fifo_errors = b->tx_fifo_errors;
932 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
933 a->tx_window_errors = b->tx_window_errors;
935 a->rx_compressed = b->rx_compressed;
936 a->tx_compressed = b->tx_compressed;
938 a->rx_nohandler = b->rx_nohandler;
942 static inline int rtnl_vfinfo_size(const struct net_device *dev,
945 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
946 int num_vfs = dev_num_vf(dev->dev.parent);
947 size_t size = nla_total_size(0);
950 nla_total_size(sizeof(struct ifla_vf_mac)) +
951 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
952 nla_total_size(sizeof(struct ifla_vf_vlan)) +
953 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
954 nla_total_size(MAX_VLAN_LIST_LEN *
955 sizeof(struct ifla_vf_vlan_info)) +
956 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
957 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
958 nla_total_size(sizeof(struct ifla_vf_rate)) +
959 nla_total_size(sizeof(struct ifla_vf_link_state)) +
960 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
961 nla_total_size(0) + /* nest IFLA_VF_STATS */
962 /* IFLA_VF_STATS_RX_PACKETS */
963 nla_total_size_64bit(sizeof(__u64)) +
964 /* IFLA_VF_STATS_TX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_RX_BYTES */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_TX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_BROADCAST */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_MULTICAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_RX_DROPPED */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_TX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)) +
978 nla_total_size(sizeof(struct ifla_vf_trust)));
984 static size_t rtnl_port_size(const struct net_device *dev,
987 size_t port_size = nla_total_size(4) /* PORT_VF */
988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
991 + nla_total_size(1) /* PROT_VDP_REQUEST */
992 + nla_total_size(2); /* PORT_VDP_RESPONSE */
993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
996 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000 !(ext_filter_mask & RTEXT_FILTER_VF))
1002 if (dev_num_vf(dev->dev.parent))
1003 return port_self_size + vf_ports_size +
1004 vf_port_size * dev_num_vf(dev->dev.parent);
1006 return port_self_size;
1009 static size_t rtnl_xdp_size(void)
1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1012 nla_total_size(1) + /* XDP_ATTACHED */
1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1019 static size_t rtnl_prop_list_size(const struct net_device *dev)
1021 struct netdev_name_node *name_node;
1024 if (list_empty(&dev->name_node->list))
1026 size = nla_total_size(0);
1027 list_for_each_entry(name_node, &dev->name_node->list, list)
1028 size += nla_total_size(ALTIFNAMSIZ);
1032 static size_t rtnl_proto_down_size(const struct net_device *dev)
1034 size_t size = nla_total_size(1);
1036 if (dev->proto_down_reason)
1037 size += nla_total_size(0) + nla_total_size(4);
1042 static size_t rtnl_devlink_port_size(const struct net_device *dev)
1044 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1046 if (dev->devlink_port)
1047 size += devlink_nl_port_handle_size(dev->devlink_port);
1052 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1053 u32 ext_filter_mask)
1055 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1056 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1057 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1058 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1059 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1060 + nla_total_size(sizeof(struct rtnl_link_stats))
1061 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1062 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1063 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1064 + nla_total_size(4) /* IFLA_TXQLEN */
1065 + nla_total_size(4) /* IFLA_WEIGHT */
1066 + nla_total_size(4) /* IFLA_MTU */
1067 + nla_total_size(4) /* IFLA_LINK */
1068 + nla_total_size(4) /* IFLA_MASTER */
1069 + nla_total_size(1) /* IFLA_CARRIER */
1070 + nla_total_size(4) /* IFLA_PROMISCUITY */
1071 + nla_total_size(4) /* IFLA_ALLMULTI */
1072 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1073 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1074 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1075 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1076 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1077 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1078 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
1079 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1080 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1081 + nla_total_size(1) /* IFLA_OPERSTATE */
1082 + nla_total_size(1) /* IFLA_LINKMODE */
1083 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1084 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1085 + nla_total_size(4) /* IFLA_GROUP */
1086 + nla_total_size(ext_filter_mask
1087 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1088 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1089 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1090 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1091 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1092 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1093 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1094 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1095 + rtnl_xdp_size() /* IFLA_XDP */
1096 + nla_total_size(4) /* IFLA_EVENT */
1097 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1098 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1099 + rtnl_proto_down_size(dev) /* proto down */
1100 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1101 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1102 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1103 + nla_total_size(4) /* IFLA_MIN_MTU */
1104 + nla_total_size(4) /* IFLA_MAX_MTU */
1105 + rtnl_prop_list_size(dev)
1106 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1107 + rtnl_devlink_port_size(dev)
1111 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1113 struct nlattr *vf_ports;
1114 struct nlattr *vf_port;
1118 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1122 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1123 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1125 goto nla_put_failure;
1126 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1127 goto nla_put_failure;
1128 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1129 if (err == -EMSGSIZE)
1130 goto nla_put_failure;
1132 nla_nest_cancel(skb, vf_port);
1135 nla_nest_end(skb, vf_port);
1138 nla_nest_end(skb, vf_ports);
1143 nla_nest_cancel(skb, vf_ports);
1147 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1149 struct nlattr *port_self;
1152 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1156 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1158 nla_nest_cancel(skb, port_self);
1159 return (err == -EMSGSIZE) ? err : 0;
1162 nla_nest_end(skb, port_self);
1167 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1168 u32 ext_filter_mask)
1172 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1173 !(ext_filter_mask & RTEXT_FILTER_VF))
1176 err = rtnl_port_self_fill(skb, dev);
1180 if (dev_num_vf(dev->dev.parent)) {
1181 err = rtnl_vf_ports_fill(skb, dev);
1189 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1192 struct netdev_phys_item_id ppid;
1194 err = dev_get_phys_port_id(dev, &ppid);
1196 if (err == -EOPNOTSUPP)
1201 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1207 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1209 char name[IFNAMSIZ];
1212 err = dev_get_phys_port_name(dev, name, sizeof(name));
1214 if (err == -EOPNOTSUPP)
1219 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1225 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1227 struct netdev_phys_item_id ppid = { };
1230 err = dev_get_port_parent_id(dev, &ppid, false);
1232 if (err == -EOPNOTSUPP)
1237 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1243 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1244 struct net_device *dev)
1246 struct rtnl_link_stats64 *sp;
1247 struct nlattr *attr;
1249 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1250 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1254 sp = nla_data(attr);
1255 dev_get_stats(dev, sp);
1257 attr = nla_reserve(skb, IFLA_STATS,
1258 sizeof(struct rtnl_link_stats));
1262 copy_rtnl_link_stats(nla_data(attr), sp);
1267 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1268 struct net_device *dev,
1270 struct nlattr *vfinfo)
1272 struct ifla_vf_rss_query_en vf_rss_query_en;
1273 struct nlattr *vf, *vfstats, *vfvlanlist;
1274 struct ifla_vf_link_state vf_linkstate;
1275 struct ifla_vf_vlan_info vf_vlan_info;
1276 struct ifla_vf_spoofchk vf_spoofchk;
1277 struct ifla_vf_tx_rate vf_tx_rate;
1278 struct ifla_vf_stats vf_stats;
1279 struct ifla_vf_trust vf_trust;
1280 struct ifla_vf_vlan vf_vlan;
1281 struct ifla_vf_rate vf_rate;
1282 struct ifla_vf_mac vf_mac;
1283 struct ifla_vf_broadcast vf_broadcast;
1284 struct ifla_vf_info ivi;
1285 struct ifla_vf_guid node_guid;
1286 struct ifla_vf_guid port_guid;
1288 memset(&ivi, 0, sizeof(ivi));
1290 /* Not all SR-IOV capable drivers support the
1291 * spoofcheck and "RSS query enable" query. Preset to
1292 * -1 so the user space tool can detect that the driver
1293 * didn't report anything.
1296 ivi.rss_query_en = -1;
1298 /* The default value for VF link state is "auto"
1299 * IFLA_VF_LINK_STATE_AUTO which equals zero
1302 /* VLAN Protocol by default is 802.1Q */
1303 ivi.vlan_proto = htons(ETH_P_8021Q);
1304 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1307 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1308 memset(&node_guid, 0, sizeof(node_guid));
1309 memset(&port_guid, 0, sizeof(port_guid));
1318 vf_rss_query_en.vf =
1321 port_guid.vf = ivi.vf;
1323 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1324 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1325 vf_vlan.vlan = ivi.vlan;
1326 vf_vlan.qos = ivi.qos;
1327 vf_vlan_info.vlan = ivi.vlan;
1328 vf_vlan_info.qos = ivi.qos;
1329 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1330 vf_tx_rate.rate = ivi.max_tx_rate;
1331 vf_rate.min_tx_rate = ivi.min_tx_rate;
1332 vf_rate.max_tx_rate = ivi.max_tx_rate;
1333 vf_spoofchk.setting = ivi.spoofchk;
1334 vf_linkstate.link_state = ivi.linkstate;
1335 vf_rss_query_en.setting = ivi.rss_query_en;
1336 vf_trust.setting = ivi.trusted;
1337 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1339 goto nla_put_vfinfo_failure;
1340 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1341 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1342 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1343 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1345 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1347 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1349 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1351 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1352 sizeof(vf_rss_query_en),
1353 &vf_rss_query_en) ||
1354 nla_put(skb, IFLA_VF_TRUST,
1355 sizeof(vf_trust), &vf_trust))
1356 goto nla_put_vf_failure;
1358 if (dev->netdev_ops->ndo_get_vf_guid &&
1359 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1361 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1363 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1365 goto nla_put_vf_failure;
1367 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1369 goto nla_put_vf_failure;
1370 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1372 nla_nest_cancel(skb, vfvlanlist);
1373 goto nla_put_vf_failure;
1375 nla_nest_end(skb, vfvlanlist);
1376 memset(&vf_stats, 0, sizeof(vf_stats));
1377 if (dev->netdev_ops->ndo_get_vf_stats)
1378 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1380 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1382 goto nla_put_vf_failure;
1383 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1384 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1385 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1386 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1387 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1388 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1389 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1390 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1391 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1392 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1393 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1394 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1395 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1396 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1397 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1398 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1399 nla_nest_cancel(skb, vfstats);
1400 goto nla_put_vf_failure;
1402 nla_nest_end(skb, vfstats);
1403 nla_nest_end(skb, vf);
1407 nla_nest_cancel(skb, vf);
1408 nla_put_vfinfo_failure:
1409 nla_nest_cancel(skb, vfinfo);
1413 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1414 struct net_device *dev,
1415 u32 ext_filter_mask)
1417 struct nlattr *vfinfo;
1420 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1423 num_vfs = dev_num_vf(dev->dev.parent);
1424 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1427 if (!dev->netdev_ops->ndo_get_vf_config)
1430 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1434 for (i = 0; i < num_vfs; i++) {
1435 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1439 nla_nest_end(skb, vfinfo);
1443 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1445 struct rtnl_link_ifmap map;
1447 memset(&map, 0, sizeof(map));
1448 map.mem_start = dev->mem_start;
1449 map.mem_end = dev->mem_end;
1450 map.base_addr = dev->base_addr;
1453 map.port = dev->if_port;
1455 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1461 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1463 const struct bpf_prog *generic_xdp_prog;
1467 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1468 if (!generic_xdp_prog)
1470 return generic_xdp_prog->aux->id;
1473 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1475 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1478 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1480 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1483 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1484 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1485 u32 (*get_prog_id)(struct net_device *dev))
1490 curr_id = get_prog_id(dev);
1495 err = nla_put_u32(skb, attr, curr_id);
1499 if (*mode != XDP_ATTACHED_NONE)
1500 *mode = XDP_ATTACHED_MULTI;
1507 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1514 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1519 mode = XDP_ATTACHED_NONE;
1520 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1521 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1524 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1525 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1528 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1529 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1533 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1537 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1538 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1543 nla_nest_end(skb, xdp);
1547 nla_nest_cancel(skb, xdp);
1551 static u32 rtnl_get_event(unsigned long event)
1553 u32 rtnl_event_type = IFLA_EVENT_NONE;
1557 rtnl_event_type = IFLA_EVENT_REBOOT;
1559 case NETDEV_FEAT_CHANGE:
1560 rtnl_event_type = IFLA_EVENT_FEATURES;
1562 case NETDEV_BONDING_FAILOVER:
1563 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1565 case NETDEV_NOTIFY_PEERS:
1566 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1568 case NETDEV_RESEND_IGMP:
1569 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1571 case NETDEV_CHANGEINFODATA:
1572 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1578 return rtnl_event_type;
1581 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1583 const struct net_device *upper_dev;
1588 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1590 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1596 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1599 int ifindex = dev_get_iflink(dev);
1601 if (force || dev->ifindex != ifindex)
1602 return nla_put_u32(skb, IFLA_LINK, ifindex);
1607 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1608 struct net_device *dev)
1613 ret = dev_get_alias(dev, buf, sizeof(buf));
1614 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1617 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1618 const struct net_device *dev,
1619 struct net *src_net, gfp_t gfp)
1621 bool put_iflink = false;
1623 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1624 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1626 if (!net_eq(dev_net(dev), link_net)) {
1627 int id = peernet2id_alloc(src_net, link_net, gfp);
1629 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1636 return nla_put_iflink(skb, dev, put_iflink);
1639 static int rtnl_fill_link_af(struct sk_buff *skb,
1640 const struct net_device *dev,
1641 u32 ext_filter_mask)
1643 const struct rtnl_af_ops *af_ops;
1644 struct nlattr *af_spec;
1646 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1650 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1654 if (!af_ops->fill_link_af)
1657 af = nla_nest_start_noflag(skb, af_ops->family);
1661 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1663 * Caller may return ENODATA to indicate that there
1664 * was no data to be dumped. This is not an error, it
1665 * means we should trim the attribute header and
1668 if (err == -ENODATA)
1669 nla_nest_cancel(skb, af);
1673 nla_nest_end(skb, af);
1676 nla_nest_end(skb, af_spec);
1680 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1681 const struct net_device *dev)
1683 struct netdev_name_node *name_node;
1686 list_for_each_entry(name_node, &dev->name_node->list, list) {
1687 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1694 static int rtnl_fill_prop_list(struct sk_buff *skb,
1695 const struct net_device *dev)
1697 struct nlattr *prop_list;
1700 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1704 ret = rtnl_fill_alt_ifnames(skb, dev);
1708 nla_nest_end(skb, prop_list);
1712 nla_nest_cancel(skb, prop_list);
1716 static int rtnl_fill_proto_down(struct sk_buff *skb,
1717 const struct net_device *dev)
1722 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1723 goto nla_put_failure;
1725 preason = dev->proto_down_reason;
1729 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1733 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1734 nla_nest_cancel(skb, pr);
1735 goto nla_put_failure;
1738 nla_nest_end(skb, pr);
1745 static int rtnl_fill_devlink_port(struct sk_buff *skb,
1746 const struct net_device *dev)
1748 struct nlattr *devlink_port_nest;
1751 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1752 if (!devlink_port_nest)
1755 if (dev->devlink_port) {
1756 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1761 nla_nest_end(skb, devlink_port_nest);
1765 nla_nest_cancel(skb, devlink_port_nest);
1769 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1770 struct net_device *dev, struct net *src_net,
1771 int type, u32 pid, u32 seq, u32 change,
1772 unsigned int flags, u32 ext_filter_mask,
1773 u32 event, int *new_nsid, int new_ifindex,
1774 int tgt_netnsid, gfp_t gfp)
1776 struct ifinfomsg *ifm;
1777 struct nlmsghdr *nlh;
1778 struct Qdisc *qdisc;
1781 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1785 ifm = nlmsg_data(nlh);
1786 ifm->ifi_family = AF_UNSPEC;
1788 ifm->ifi_type = dev->type;
1789 ifm->ifi_index = dev->ifindex;
1790 ifm->ifi_flags = dev_get_flags(dev);
1791 ifm->ifi_change = change;
1793 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1794 goto nla_put_failure;
1796 qdisc = rtnl_dereference(dev->qdisc);
1797 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1798 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1799 nla_put_u8(skb, IFLA_OPERSTATE,
1800 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1801 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1802 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1803 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1804 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1805 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1806 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1807 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1808 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1809 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1810 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1811 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1812 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) ||
1813 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) ||
1814 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1815 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1817 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1819 put_master_ifindex(skb, dev) ||
1820 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1822 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1823 nla_put_ifalias(skb, dev) ||
1824 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1825 atomic_read(&dev->carrier_up_count) +
1826 atomic_read(&dev->carrier_down_count)) ||
1827 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1828 atomic_read(&dev->carrier_up_count)) ||
1829 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1830 atomic_read(&dev->carrier_down_count)))
1831 goto nla_put_failure;
1833 if (rtnl_fill_proto_down(skb, dev))
1834 goto nla_put_failure;
1836 if (event != IFLA_EVENT_NONE) {
1837 if (nla_put_u32(skb, IFLA_EVENT, event))
1838 goto nla_put_failure;
1841 if (rtnl_fill_link_ifmap(skb, dev))
1842 goto nla_put_failure;
1844 if (dev->addr_len) {
1845 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1846 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1847 goto nla_put_failure;
1850 if (rtnl_phys_port_id_fill(skb, dev))
1851 goto nla_put_failure;
1853 if (rtnl_phys_port_name_fill(skb, dev))
1854 goto nla_put_failure;
1856 if (rtnl_phys_switch_id_fill(skb, dev))
1857 goto nla_put_failure;
1859 if (rtnl_fill_stats(skb, dev))
1860 goto nla_put_failure;
1862 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1863 goto nla_put_failure;
1865 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1866 goto nla_put_failure;
1868 if (rtnl_xdp_fill(skb, dev))
1869 goto nla_put_failure;
1871 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1872 if (rtnl_link_fill(skb, dev) < 0)
1873 goto nla_put_failure;
1876 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1877 goto nla_put_failure;
1880 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1881 goto nla_put_failure;
1883 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1884 goto nla_put_failure;
1886 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1887 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1888 goto nla_put_failure;
1891 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1892 goto nla_put_failure_rcu;
1895 if (rtnl_fill_prop_list(skb, dev))
1896 goto nla_put_failure;
1898 if (dev->dev.parent &&
1899 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1900 dev_name(dev->dev.parent)))
1901 goto nla_put_failure;
1903 if (dev->dev.parent && dev->dev.parent->bus &&
1904 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1905 dev->dev.parent->bus->name))
1906 goto nla_put_failure;
1908 if (rtnl_fill_devlink_port(skb, dev))
1909 goto nla_put_failure;
1911 nlmsg_end(skb, nlh);
1914 nla_put_failure_rcu:
1917 nlmsg_cancel(skb, nlh);
1921 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1922 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1923 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1924 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1925 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1926 [IFLA_MTU] = { .type = NLA_U32 },
1927 [IFLA_LINK] = { .type = NLA_U32 },
1928 [IFLA_MASTER] = { .type = NLA_U32 },
1929 [IFLA_CARRIER] = { .type = NLA_U8 },
1930 [IFLA_TXQLEN] = { .type = NLA_U32 },
1931 [IFLA_WEIGHT] = { .type = NLA_U32 },
1932 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1933 [IFLA_LINKMODE] = { .type = NLA_U8 },
1934 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1935 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1936 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1937 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1938 * allow 0-length string (needed to remove an alias).
1940 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1941 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1942 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1943 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1944 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1945 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1946 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1947 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1948 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1949 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1950 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1951 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1952 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1953 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1954 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1955 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1956 [IFLA_XDP] = { .type = NLA_NESTED },
1957 [IFLA_EVENT] = { .type = NLA_U32 },
1958 [IFLA_GROUP] = { .type = NLA_U32 },
1959 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1960 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1961 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1962 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1963 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1964 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
1965 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
1966 .len = ALTIFNAMSIZ - 1 },
1967 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
1968 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1969 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
1970 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
1971 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
1972 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
1973 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
1974 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
1975 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
1976 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
1979 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1980 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1981 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1982 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1983 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1986 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1987 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1988 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
1989 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1990 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1991 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1992 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1993 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1994 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1995 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1996 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1997 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1998 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1999 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2002 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2003 [IFLA_PORT_VF] = { .type = NLA_U32 },
2004 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
2005 .len = PORT_PROFILE_MAX },
2006 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2007 .len = PORT_UUID_MAX },
2008 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2009 .len = PORT_UUID_MAX },
2010 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2011 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
2013 /* Unused, but we need to keep it here since user space could
2014 * fill it. It's also broken with regard to NLA_BINARY use in
2015 * combination with structs.
2017 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2018 .len = sizeof(struct ifla_port_vsi) },
2021 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2022 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2023 [IFLA_XDP_FD] = { .type = NLA_S32 },
2024 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
2025 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
2026 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
2027 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
2030 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2032 const struct rtnl_link_ops *ops = NULL;
2033 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2035 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2038 if (linfo[IFLA_INFO_KIND]) {
2039 char kind[MODULE_NAME_LEN];
2041 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2042 ops = rtnl_link_ops_get(kind);
2048 static bool link_master_filtered(struct net_device *dev, int master_idx)
2050 struct net_device *master;
2055 master = netdev_master_upper_dev_get(dev);
2057 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2058 * another invalid value for ifindex to denote "no master".
2060 if (master_idx == -1)
2063 if (!master || master->ifindex != master_idx)
2069 static bool link_kind_filtered(const struct net_device *dev,
2070 const struct rtnl_link_ops *kind_ops)
2072 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2078 static bool link_dump_filtered(struct net_device *dev,
2080 const struct rtnl_link_ops *kind_ops)
2082 if (link_master_filtered(dev, master_idx) ||
2083 link_kind_filtered(dev, kind_ops))
2090 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2091 * @sk: netlink socket
2092 * @netnsid: network namespace identifier
2094 * Returns the network namespace identified by netnsid on success or an error
2095 * pointer on failure.
2097 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2101 net = get_net_ns_by_id(sock_net(sk), netnsid);
2103 return ERR_PTR(-EINVAL);
2105 /* For now, the caller is required to have CAP_NET_ADMIN in
2106 * the user namespace owning the target net ns.
2108 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2110 return ERR_PTR(-EACCES);
2114 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2116 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2117 bool strict_check, struct nlattr **tb,
2118 struct netlink_ext_ack *extack)
2123 struct ifinfomsg *ifm;
2125 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2126 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2130 ifm = nlmsg_data(nlh);
2131 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2133 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2136 if (ifm->ifi_index) {
2137 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2141 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2142 IFLA_MAX, ifla_policy,
2146 /* A hack to preserve kernel<->userspace interface.
2147 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2148 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2149 * what iproute2 < v3.9.0 used.
2150 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2151 * attribute, its netlink message is shorter than struct ifinfomsg.
2153 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2154 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2156 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2160 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2162 struct netlink_ext_ack *extack = cb->extack;
2163 const struct nlmsghdr *nlh = cb->nlh;
2164 struct net *net = sock_net(skb->sk);
2165 struct net *tgt_net = net;
2168 struct net_device *dev;
2169 struct hlist_head *head;
2170 struct nlattr *tb[IFLA_MAX+1];
2171 u32 ext_filter_mask = 0;
2172 const struct rtnl_link_ops *kind_ops = NULL;
2173 unsigned int flags = NLM_F_MULTI;
2179 s_idx = cb->args[1];
2181 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2183 if (cb->strict_check)
2189 for (i = 0; i <= IFLA_MAX; ++i) {
2193 /* new attributes should only be added with strict checking */
2195 case IFLA_TARGET_NETNSID:
2196 netnsid = nla_get_s32(tb[i]);
2197 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2198 if (IS_ERR(tgt_net)) {
2199 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2200 return PTR_ERR(tgt_net);
2204 ext_filter_mask = nla_get_u32(tb[i]);
2207 master_idx = nla_get_u32(tb[i]);
2210 kind_ops = linkinfo_to_kind_ops(tb[i]);
2213 if (cb->strict_check) {
2214 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2220 if (master_idx || kind_ops)
2221 flags |= NLM_F_DUMP_FILTERED;
2224 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2226 head = &tgt_net->dev_index_head[h];
2227 hlist_for_each_entry(dev, head, index_hlist) {
2228 if (link_dump_filtered(dev, master_idx, kind_ops))
2232 err = rtnl_fill_ifinfo(skb, dev, net,
2234 NETLINK_CB(cb->skb).portid,
2235 nlh->nlmsg_seq, 0, flags,
2236 ext_filter_mask, 0, NULL, 0,
2237 netnsid, GFP_KERNEL);
2240 if (likely(skb->len))
2254 cb->seq = tgt_net->dev_base_seq;
2255 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2262 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2263 struct netlink_ext_ack *exterr)
2265 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2268 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2270 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2273 /* Examine the link attributes and figure out which
2274 * network namespace we are talking about.
2276 if (tb[IFLA_NET_NS_PID])
2277 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2278 else if (tb[IFLA_NET_NS_FD])
2279 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2281 net = get_net(src_net);
2284 EXPORT_SYMBOL(rtnl_link_get_net);
2286 /* Figure out which network namespace we are talking about by
2287 * examining the link attributes in the following order:
2289 * 1. IFLA_NET_NS_PID
2291 * 3. IFLA_TARGET_NETNSID
2293 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2294 struct nlattr *tb[])
2298 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2299 return rtnl_link_get_net(src_net, tb);
2301 if (!tb[IFLA_TARGET_NETNSID])
2302 return get_net(src_net);
2304 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2306 return ERR_PTR(-EINVAL);
2311 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2312 struct net *src_net,
2313 struct nlattr *tb[], int cap)
2317 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2321 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2323 return ERR_PTR(-EPERM);
2329 /* Verify that rtnetlink requests do not pass additional properties
2330 * potentially referring to different network namespaces.
2332 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2333 struct netlink_ext_ack *extack,
2337 if (netns_id_only) {
2338 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2341 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2345 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2348 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2351 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2357 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2361 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2364 const struct net_device_ops *ops = dev->netdev_ops;
2366 if (!ops->ndo_set_vf_rate)
2368 if (max_tx_rate && max_tx_rate < min_tx_rate)
2371 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2374 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2375 struct netlink_ext_ack *extack)
2378 if (tb[IFLA_ADDRESS] &&
2379 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2382 if (tb[IFLA_BROADCAST] &&
2383 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2387 if (tb[IFLA_AF_SPEC]) {
2391 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2392 const struct rtnl_af_ops *af_ops;
2394 af_ops = rtnl_af_lookup(nla_type(af));
2396 return -EAFNOSUPPORT;
2398 if (!af_ops->set_link_af)
2401 if (af_ops->validate_link_af) {
2402 err = af_ops->validate_link_af(dev, af, extack);
2412 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2415 const struct net_device_ops *ops = dev->netdev_ops;
2417 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2420 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2422 if (dev->type != ARPHRD_INFINIBAND)
2425 return handle_infiniband_guid(dev, ivt, guid_type);
2428 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2430 const struct net_device_ops *ops = dev->netdev_ops;
2433 if (tb[IFLA_VF_MAC]) {
2434 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2436 if (ivm->vf >= INT_MAX)
2439 if (ops->ndo_set_vf_mac)
2440 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2446 if (tb[IFLA_VF_VLAN]) {
2447 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2449 if (ivv->vf >= INT_MAX)
2452 if (ops->ndo_set_vf_vlan)
2453 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2455 htons(ETH_P_8021Q));
2460 if (tb[IFLA_VF_VLAN_LIST]) {
2461 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2462 struct nlattr *attr;
2466 if (!ops->ndo_set_vf_vlan)
2469 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2470 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2471 nla_len(attr) < NLA_HDRLEN) {
2474 if (len >= MAX_VLAN_LIST_LEN)
2476 ivvl[len] = nla_data(attr);
2483 if (ivvl[0]->vf >= INT_MAX)
2485 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2486 ivvl[0]->qos, ivvl[0]->vlan_proto);
2491 if (tb[IFLA_VF_TX_RATE]) {
2492 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2493 struct ifla_vf_info ivf;
2495 if (ivt->vf >= INT_MAX)
2498 if (ops->ndo_get_vf_config)
2499 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2503 err = rtnl_set_vf_rate(dev, ivt->vf,
2504 ivf.min_tx_rate, ivt->rate);
2509 if (tb[IFLA_VF_RATE]) {
2510 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2512 if (ivt->vf >= INT_MAX)
2515 err = rtnl_set_vf_rate(dev, ivt->vf,
2516 ivt->min_tx_rate, ivt->max_tx_rate);
2521 if (tb[IFLA_VF_SPOOFCHK]) {
2522 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2524 if (ivs->vf >= INT_MAX)
2527 if (ops->ndo_set_vf_spoofchk)
2528 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2534 if (tb[IFLA_VF_LINK_STATE]) {
2535 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2537 if (ivl->vf >= INT_MAX)
2540 if (ops->ndo_set_vf_link_state)
2541 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2547 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2548 struct ifla_vf_rss_query_en *ivrssq_en;
2551 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2552 if (ivrssq_en->vf >= INT_MAX)
2554 if (ops->ndo_set_vf_rss_query_en)
2555 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2556 ivrssq_en->setting);
2561 if (tb[IFLA_VF_TRUST]) {
2562 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2564 if (ivt->vf >= INT_MAX)
2567 if (ops->ndo_set_vf_trust)
2568 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2573 if (tb[IFLA_VF_IB_NODE_GUID]) {
2574 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2576 if (ivt->vf >= INT_MAX)
2578 if (!ops->ndo_set_vf_guid)
2580 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2583 if (tb[IFLA_VF_IB_PORT_GUID]) {
2584 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2586 if (ivt->vf >= INT_MAX)
2588 if (!ops->ndo_set_vf_guid)
2591 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2597 static int do_set_master(struct net_device *dev, int ifindex,
2598 struct netlink_ext_ack *extack)
2600 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2601 const struct net_device_ops *ops;
2605 if (upper_dev->ifindex == ifindex)
2607 ops = upper_dev->netdev_ops;
2608 if (ops->ndo_del_slave) {
2609 err = ops->ndo_del_slave(upper_dev, dev);
2618 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2621 ops = upper_dev->netdev_ops;
2622 if (ops->ndo_add_slave) {
2623 err = ops->ndo_add_slave(upper_dev, dev, extack);
2633 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2634 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2635 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2638 static int do_set_proto_down(struct net_device *dev,
2639 struct nlattr *nl_proto_down,
2640 struct nlattr *nl_proto_down_reason,
2641 struct netlink_ext_ack *extack)
2643 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2644 unsigned long mask = 0;
2649 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2650 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2654 if (nl_proto_down_reason) {
2655 err = nla_parse_nested_deprecated(pdreason,
2656 IFLA_PROTO_DOWN_REASON_MAX,
2657 nl_proto_down_reason,
2658 ifla_proto_down_reason_policy,
2663 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2664 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2668 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2670 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2671 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2673 dev_change_proto_down_reason(dev, mask, value);
2676 if (nl_proto_down) {
2677 proto_down = nla_get_u8(nl_proto_down);
2679 /* Don't turn off protodown if there are active reasons */
2680 if (!proto_down && dev->proto_down_reason) {
2681 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2684 err = dev_change_proto_down(dev,
2693 #define DO_SETLINK_MODIFIED 0x01
2694 /* notify flag means notify + modified. */
2695 #define DO_SETLINK_NOTIFY 0x03
2696 static int do_setlink(const struct sk_buff *skb,
2697 struct net_device *dev, struct ifinfomsg *ifm,
2698 struct netlink_ext_ack *extack,
2699 struct nlattr **tb, int status)
2701 const struct net_device_ops *ops = dev->netdev_ops;
2702 char ifname[IFNAMSIZ];
2705 err = validate_linkmsg(dev, tb, extack);
2709 if (tb[IFLA_IFNAME])
2710 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2714 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2715 const char *pat = ifname[0] ? ifname : NULL;
2719 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2726 if (tb[IFLA_NEW_IFINDEX])
2727 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2731 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2735 status |= DO_SETLINK_MODIFIED;
2739 struct rtnl_link_ifmap *u_map;
2742 if (!ops->ndo_set_config) {
2747 if (!netif_device_present(dev)) {
2752 u_map = nla_data(tb[IFLA_MAP]);
2753 k_map.mem_start = (unsigned long) u_map->mem_start;
2754 k_map.mem_end = (unsigned long) u_map->mem_end;
2755 k_map.base_addr = (unsigned short) u_map->base_addr;
2756 k_map.irq = (unsigned char) u_map->irq;
2757 k_map.dma = (unsigned char) u_map->dma;
2758 k_map.port = (unsigned char) u_map->port;
2760 err = ops->ndo_set_config(dev, &k_map);
2764 status |= DO_SETLINK_NOTIFY;
2767 if (tb[IFLA_ADDRESS]) {
2768 struct sockaddr *sa;
2771 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2773 sa = kmalloc(len, GFP_KERNEL);
2778 sa->sa_family = dev->type;
2779 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2781 err = dev_set_mac_address_user(dev, sa, extack);
2785 status |= DO_SETLINK_MODIFIED;
2789 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2792 status |= DO_SETLINK_MODIFIED;
2795 if (tb[IFLA_GROUP]) {
2796 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2797 status |= DO_SETLINK_NOTIFY;
2801 * Interface selected by interface index but interface
2802 * name provided implies that a name change has been
2805 if (ifm->ifi_index > 0 && ifname[0]) {
2806 err = dev_change_name(dev, ifname);
2809 status |= DO_SETLINK_MODIFIED;
2812 if (tb[IFLA_IFALIAS]) {
2813 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2814 nla_len(tb[IFLA_IFALIAS]));
2817 status |= DO_SETLINK_NOTIFY;
2820 if (tb[IFLA_BROADCAST]) {
2821 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2822 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2825 if (tb[IFLA_MASTER]) {
2826 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2829 status |= DO_SETLINK_MODIFIED;
2832 if (ifm->ifi_flags || ifm->ifi_change) {
2833 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2839 if (tb[IFLA_CARRIER]) {
2840 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2843 status |= DO_SETLINK_MODIFIED;
2846 if (tb[IFLA_TXQLEN]) {
2847 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2849 err = dev_change_tx_queue_len(dev, value);
2852 status |= DO_SETLINK_MODIFIED;
2855 if (tb[IFLA_GSO_MAX_SIZE]) {
2856 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2858 if (max_size > dev->tso_max_size) {
2863 if (dev->gso_max_size ^ max_size) {
2864 netif_set_gso_max_size(dev, max_size);
2865 status |= DO_SETLINK_MODIFIED;
2869 if (tb[IFLA_GSO_MAX_SEGS]) {
2870 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2872 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2877 if (dev->gso_max_segs ^ max_segs) {
2878 netif_set_gso_max_segs(dev, max_segs);
2879 status |= DO_SETLINK_MODIFIED;
2883 if (tb[IFLA_GRO_MAX_SIZE]) {
2884 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2886 if (dev->gro_max_size ^ gro_max_size) {
2887 netif_set_gro_max_size(dev, gro_max_size);
2888 status |= DO_SETLINK_MODIFIED;
2892 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
2893 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
2895 if (max_size > dev->tso_max_size) {
2900 if (dev->gso_ipv4_max_size ^ max_size) {
2901 netif_set_gso_ipv4_max_size(dev, max_size);
2902 status |= DO_SETLINK_MODIFIED;
2906 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
2907 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
2909 if (dev->gro_ipv4_max_size ^ gro_max_size) {
2910 netif_set_gro_ipv4_max_size(dev, gro_max_size);
2911 status |= DO_SETLINK_MODIFIED;
2915 if (tb[IFLA_OPERSTATE])
2916 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2918 if (tb[IFLA_LINKMODE]) {
2919 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2921 write_lock(&dev_base_lock);
2922 if (dev->link_mode ^ value)
2923 status |= DO_SETLINK_NOTIFY;
2924 dev->link_mode = value;
2925 write_unlock(&dev_base_lock);
2928 if (tb[IFLA_VFINFO_LIST]) {
2929 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2930 struct nlattr *attr;
2933 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2934 if (nla_type(attr) != IFLA_VF_INFO ||
2935 nla_len(attr) < NLA_HDRLEN) {
2939 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2945 err = do_setvfinfo(dev, vfinfo);
2948 status |= DO_SETLINK_NOTIFY;
2953 if (tb[IFLA_VF_PORTS]) {
2954 struct nlattr *port[IFLA_PORT_MAX+1];
2955 struct nlattr *attr;
2960 if (!ops->ndo_set_vf_port)
2963 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2964 if (nla_type(attr) != IFLA_VF_PORT ||
2965 nla_len(attr) < NLA_HDRLEN) {
2969 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2975 if (!port[IFLA_PORT_VF]) {
2979 vf = nla_get_u32(port[IFLA_PORT_VF]);
2980 err = ops->ndo_set_vf_port(dev, vf, port);
2983 status |= DO_SETLINK_NOTIFY;
2988 if (tb[IFLA_PORT_SELF]) {
2989 struct nlattr *port[IFLA_PORT_MAX+1];
2991 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2993 ifla_port_policy, NULL);
2998 if (ops->ndo_set_vf_port)
2999 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3002 status |= DO_SETLINK_NOTIFY;
3005 if (tb[IFLA_AF_SPEC]) {
3009 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3010 const struct rtnl_af_ops *af_ops;
3012 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
3014 err = af_ops->set_link_af(dev, af, extack);
3018 status |= DO_SETLINK_NOTIFY;
3023 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3024 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3025 tb[IFLA_PROTO_DOWN_REASON], extack);
3028 status |= DO_SETLINK_NOTIFY;
3032 struct nlattr *xdp[IFLA_XDP_MAX + 1];
3035 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3037 ifla_xdp_policy, NULL);
3041 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3046 if (xdp[IFLA_XDP_FLAGS]) {
3047 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3048 if (xdp_flags & ~XDP_FLAGS_MASK) {
3052 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3058 if (xdp[IFLA_XDP_FD]) {
3059 int expected_fd = -1;
3061 if (xdp_flags & XDP_FLAGS_REPLACE) {
3062 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3067 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3070 err = dev_change_xdp_fd(dev, extack,
3071 nla_get_s32(xdp[IFLA_XDP_FD]),
3076 status |= DO_SETLINK_NOTIFY;
3081 if (status & DO_SETLINK_MODIFIED) {
3082 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3083 netdev_state_change(dev);
3086 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3093 static struct net_device *rtnl_dev_get(struct net *net,
3094 struct nlattr *tb[])
3096 char ifname[ALTIFNAMSIZ];
3098 if (tb[IFLA_IFNAME])
3099 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3100 else if (tb[IFLA_ALT_IFNAME])
3101 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3105 return __dev_get_by_name(net, ifname);
3108 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3109 struct netlink_ext_ack *extack)
3111 struct net *net = sock_net(skb->sk);
3112 struct ifinfomsg *ifm;
3113 struct net_device *dev;
3115 struct nlattr *tb[IFLA_MAX+1];
3117 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3118 ifla_policy, extack);
3122 err = rtnl_ensure_unique_netns(tb, extack, false);
3127 ifm = nlmsg_data(nlh);
3128 if (ifm->ifi_index > 0)
3129 dev = __dev_get_by_index(net, ifm->ifi_index);
3130 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3131 dev = rtnl_dev_get(net, tb);
3140 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3145 static int rtnl_group_dellink(const struct net *net, int group)
3147 struct net_device *dev, *aux;
3148 LIST_HEAD(list_kill);
3154 for_each_netdev(net, dev) {
3155 if (dev->group == group) {
3156 const struct rtnl_link_ops *ops;
3159 ops = dev->rtnl_link_ops;
3160 if (!ops || !ops->dellink)
3168 for_each_netdev_safe(net, dev, aux) {
3169 if (dev->group == group) {
3170 const struct rtnl_link_ops *ops;
3172 ops = dev->rtnl_link_ops;
3173 ops->dellink(dev, &list_kill);
3176 unregister_netdevice_many(&list_kill);
3181 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3183 const struct rtnl_link_ops *ops;
3184 LIST_HEAD(list_kill);
3186 ops = dev->rtnl_link_ops;
3187 if (!ops || !ops->dellink)
3190 ops->dellink(dev, &list_kill);
3191 unregister_netdevice_many_notify(&list_kill, portid, nlh);
3195 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3197 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3198 struct netlink_ext_ack *extack)
3200 struct net *net = sock_net(skb->sk);
3201 u32 portid = NETLINK_CB(skb).portid;
3202 struct net *tgt_net = net;
3203 struct net_device *dev = NULL;
3204 struct ifinfomsg *ifm;
3205 struct nlattr *tb[IFLA_MAX+1];
3209 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3210 ifla_policy, extack);
3214 err = rtnl_ensure_unique_netns(tb, extack, true);
3218 if (tb[IFLA_TARGET_NETNSID]) {
3219 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3220 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3221 if (IS_ERR(tgt_net))
3222 return PTR_ERR(tgt_net);
3226 ifm = nlmsg_data(nlh);
3227 if (ifm->ifi_index > 0)
3228 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3229 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3230 dev = rtnl_dev_get(net, tb);
3231 else if (tb[IFLA_GROUP])
3232 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3237 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3243 err = rtnl_delete_link(dev, portid, nlh);
3252 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3253 u32 portid, const struct nlmsghdr *nlh)
3255 unsigned int old_flags;
3258 old_flags = dev->flags;
3259 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3260 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3266 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3267 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3269 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3270 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3274 EXPORT_SYMBOL(rtnl_configure_link);
3276 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3277 unsigned char name_assign_type,
3278 const struct rtnl_link_ops *ops,
3279 struct nlattr *tb[],
3280 struct netlink_ext_ack *extack)
3282 struct net_device *dev;
3283 unsigned int num_tx_queues = 1;
3284 unsigned int num_rx_queues = 1;
3286 if (tb[IFLA_NUM_TX_QUEUES])
3287 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3288 else if (ops->get_num_tx_queues)
3289 num_tx_queues = ops->get_num_tx_queues();
3291 if (tb[IFLA_NUM_RX_QUEUES])
3292 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3293 else if (ops->get_num_rx_queues)
3294 num_rx_queues = ops->get_num_rx_queues();
3296 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3297 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3298 return ERR_PTR(-EINVAL);
3301 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3302 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3303 return ERR_PTR(-EINVAL);
3307 dev = ops->alloc(tb, ifname, name_assign_type,
3308 num_tx_queues, num_rx_queues);
3312 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3313 name_assign_type, ops->setup,
3314 num_tx_queues, num_rx_queues);
3318 return ERR_PTR(-ENOMEM);
3320 dev_net_set(dev, net);
3321 dev->rtnl_link_ops = ops;
3322 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3325 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3328 err = dev_validate_mtu(dev, mtu, extack);
3331 return ERR_PTR(err);
3335 if (tb[IFLA_ADDRESS]) {
3336 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3337 nla_len(tb[IFLA_ADDRESS]));
3338 dev->addr_assign_type = NET_ADDR_SET;
3340 if (tb[IFLA_BROADCAST])
3341 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3342 nla_len(tb[IFLA_BROADCAST]));
3343 if (tb[IFLA_TXQLEN])
3344 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3345 if (tb[IFLA_OPERSTATE])
3346 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3347 if (tb[IFLA_LINKMODE])
3348 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3350 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3351 if (tb[IFLA_GSO_MAX_SIZE])
3352 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3353 if (tb[IFLA_GSO_MAX_SEGS])
3354 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3355 if (tb[IFLA_GRO_MAX_SIZE])
3356 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3357 if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3358 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3359 if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3360 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
3364 EXPORT_SYMBOL(rtnl_create_link);
3366 static int rtnl_group_changelink(const struct sk_buff *skb,
3367 struct net *net, int group,
3368 struct ifinfomsg *ifm,
3369 struct netlink_ext_ack *extack,
3372 struct net_device *dev, *aux;
3375 for_each_netdev_safe(net, dev, aux) {
3376 if (dev->group == group) {
3377 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3386 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3387 const struct rtnl_link_ops *ops,
3388 const struct nlmsghdr *nlh,
3389 struct nlattr **tb, struct nlattr **data,
3390 struct netlink_ext_ack *extack)
3392 unsigned char name_assign_type = NET_NAME_USER;
3393 struct net *net = sock_net(skb->sk);
3394 u32 portid = NETLINK_CB(skb).portid;
3395 struct net *dest_net, *link_net;
3396 struct net_device *dev;
3397 char ifname[IFNAMSIZ];
3400 if (!ops->alloc && !ops->setup)
3403 if (tb[IFLA_IFNAME]) {
3404 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3406 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3407 name_assign_type = NET_NAME_ENUM;
3410 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3411 if (IS_ERR(dest_net))
3412 return PTR_ERR(dest_net);
3414 if (tb[IFLA_LINK_NETNSID]) {
3415 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3417 link_net = get_net_ns_by_id(dest_net, id);
3419 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3424 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3430 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3431 name_assign_type, ops, tb, extack);
3437 dev->ifindex = ifm->ifi_index;
3440 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3442 err = register_netdevice(dev);
3448 err = rtnl_configure_link(dev, ifm, portid, nlh);
3450 goto out_unregister;
3452 err = dev_change_net_namespace(dev, dest_net, ifname);
3454 goto out_unregister;
3456 if (tb[IFLA_MASTER]) {
3457 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3459 goto out_unregister;
3468 LIST_HEAD(list_kill);
3470 ops->dellink(dev, &list_kill);
3471 unregister_netdevice_many(&list_kill);
3473 unregister_netdevice(dev);
3478 struct rtnl_newlink_tbs {
3479 struct nlattr *tb[IFLA_MAX + 1];
3480 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3481 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3484 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3485 struct rtnl_newlink_tbs *tbs,
3486 struct netlink_ext_ack *extack)
3488 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3489 struct nlattr ** const tb = tbs->tb;
3490 const struct rtnl_link_ops *m_ops;
3491 struct net_device *master_dev;
3492 struct net *net = sock_net(skb->sk);
3493 const struct rtnl_link_ops *ops;
3494 struct nlattr **slave_data;
3495 char kind[MODULE_NAME_LEN];
3496 struct net_device *dev;
3497 struct ifinfomsg *ifm;
3498 struct nlattr **data;
3499 bool link_specified;
3502 #ifdef CONFIG_MODULES
3505 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3506 ifla_policy, extack);
3510 err = rtnl_ensure_unique_netns(tb, extack, false);
3514 ifm = nlmsg_data(nlh);
3515 if (ifm->ifi_index > 0) {
3516 link_specified = true;
3517 dev = __dev_get_by_index(net, ifm->ifi_index);
3518 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3519 link_specified = true;
3520 dev = rtnl_dev_get(net, tb);
3522 link_specified = false;
3529 master_dev = netdev_master_upper_dev_get(dev);
3531 m_ops = master_dev->rtnl_link_ops;
3534 err = validate_linkmsg(dev, tb, extack);
3538 if (tb[IFLA_LINKINFO]) {
3539 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3541 ifla_info_policy, NULL);
3545 memset(linkinfo, 0, sizeof(linkinfo));
3547 if (linkinfo[IFLA_INFO_KIND]) {
3548 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3549 ops = rtnl_link_ops_get(kind);
3557 if (ops->maxtype > RTNL_MAX_TYPE)
3560 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3561 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3562 linkinfo[IFLA_INFO_DATA],
3563 ops->policy, extack);
3568 if (ops->validate) {
3569 err = ops->validate(tb, data, extack);
3577 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3580 if (m_ops->slave_maxtype &&
3581 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3582 err = nla_parse_nested_deprecated(tbs->slave_attr,
3583 m_ops->slave_maxtype,
3584 linkinfo[IFLA_INFO_SLAVE_DATA],
3585 m_ops->slave_policy,
3589 slave_data = tbs->slave_attr;
3596 if (nlh->nlmsg_flags & NLM_F_EXCL)
3598 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3601 if (linkinfo[IFLA_INFO_DATA]) {
3602 if (!ops || ops != dev->rtnl_link_ops ||
3606 err = ops->changelink(dev, tb, data, extack);
3609 status |= DO_SETLINK_NOTIFY;
3612 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3613 if (!m_ops || !m_ops->slave_changelink)
3616 err = m_ops->slave_changelink(master_dev, dev, tb,
3617 slave_data, extack);
3620 status |= DO_SETLINK_NOTIFY;
3623 return do_setlink(skb, dev, ifm, extack, tb, status);
3626 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3627 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3628 * or it's for a group
3633 return rtnl_group_changelink(skb, net,
3634 nla_get_u32(tb[IFLA_GROUP]),
3639 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3643 #ifdef CONFIG_MODULES
3646 request_module("rtnl-link-%s", kind);
3648 ops = rtnl_link_ops_get(kind);
3653 NL_SET_ERR_MSG(extack, "Unknown device type");
3657 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
3660 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3661 struct netlink_ext_ack *extack)
3663 struct rtnl_newlink_tbs *tbs;
3666 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3670 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3675 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3676 const struct nlmsghdr *nlh,
3678 struct netlink_ext_ack *extack)
3680 struct ifinfomsg *ifm;
3683 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3684 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3688 if (!netlink_strict_get_check(skb))
3689 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3690 ifla_policy, extack);
3692 ifm = nlmsg_data(nlh);
3693 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3695 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3699 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3700 ifla_policy, extack);
3704 for (i = 0; i <= IFLA_MAX; i++) {
3710 case IFLA_ALT_IFNAME:
3712 case IFLA_TARGET_NETNSID:
3715 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3723 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3724 struct netlink_ext_ack *extack)
3726 struct net *net = sock_net(skb->sk);
3727 struct net *tgt_net = net;
3728 struct ifinfomsg *ifm;
3729 struct nlattr *tb[IFLA_MAX+1];
3730 struct net_device *dev = NULL;
3731 struct sk_buff *nskb;
3734 u32 ext_filter_mask = 0;
3736 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3740 err = rtnl_ensure_unique_netns(tb, extack, true);
3744 if (tb[IFLA_TARGET_NETNSID]) {
3745 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3746 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3747 if (IS_ERR(tgt_net))
3748 return PTR_ERR(tgt_net);
3751 if (tb[IFLA_EXT_MASK])
3752 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3755 ifm = nlmsg_data(nlh);
3756 if (ifm->ifi_index > 0)
3757 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3758 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3759 dev = rtnl_dev_get(tgt_net, tb);
3768 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3772 err = rtnl_fill_ifinfo(nskb, dev, net,
3773 RTM_NEWLINK, NETLINK_CB(skb).portid,
3774 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3775 0, NULL, 0, netnsid, GFP_KERNEL);
3777 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3778 WARN_ON(err == -EMSGSIZE);
3781 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3789 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3790 bool *changed, struct netlink_ext_ack *extack)
3796 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3800 if (cmd == RTM_NEWLINKPROP) {
3801 size = rtnl_prop_list_size(dev);
3802 size += nla_total_size(ALTIFNAMSIZ);
3803 if (size >= U16_MAX) {
3804 NL_SET_ERR_MSG(extack,
3805 "effective property list too long");
3810 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3814 if (cmd == RTM_NEWLINKPROP) {
3815 err = netdev_name_node_alt_create(dev, alt_ifname);
3818 } else if (cmd == RTM_DELLINKPROP) {
3819 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3831 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3832 struct netlink_ext_ack *extack)
3834 struct net *net = sock_net(skb->sk);
3835 struct nlattr *tb[IFLA_MAX + 1];
3836 struct net_device *dev;
3837 struct ifinfomsg *ifm;
3838 bool changed = false;
3839 struct nlattr *attr;
3842 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3846 err = rtnl_ensure_unique_netns(tb, extack, true);
3850 ifm = nlmsg_data(nlh);
3851 if (ifm->ifi_index > 0)
3852 dev = __dev_get_by_index(net, ifm->ifi_index);
3853 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3854 dev = rtnl_dev_get(net, tb);
3861 if (!tb[IFLA_PROP_LIST])
3864 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3865 switch (nla_type(attr)) {
3866 case IFLA_ALT_IFNAME:
3867 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3875 netdev_state_change(dev);
3879 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3880 struct netlink_ext_ack *extack)
3882 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3885 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3886 struct netlink_ext_ack *extack)
3888 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3891 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3893 struct net *net = sock_net(skb->sk);
3894 size_t min_ifinfo_dump_size = 0;
3895 struct nlattr *tb[IFLA_MAX+1];
3896 u32 ext_filter_mask = 0;
3897 struct net_device *dev;
3900 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3901 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3902 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3904 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3905 if (tb[IFLA_EXT_MASK])
3906 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3909 if (!ext_filter_mask)
3910 return NLMSG_GOODSIZE;
3912 * traverse the list of net devices and compute the minimum
3913 * buffer size based upon the filter mask.
3916 for_each_netdev_rcu(net, dev) {
3917 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3918 if_nlmsg_size(dev, ext_filter_mask));
3922 return nlmsg_total_size(min_ifinfo_dump_size);
3925 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3928 int s_idx = cb->family;
3929 int type = cb->nlh->nlmsg_type - RTM_BASE;
3935 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3936 struct rtnl_link __rcu **tab;
3937 struct rtnl_link *link;
3938 rtnl_dumpit_func dumpit;
3940 if (idx < s_idx || idx == PF_PACKET)
3943 if (type < 0 || type >= RTM_NR_MSGTYPES)
3946 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3950 link = rcu_dereference_rtnl(tab[type]);
3954 dumpit = link->dumpit;
3959 memset(&cb->args[0], 0, sizeof(cb->args));
3963 ret = dumpit(skb, cb);
3969 return skb->len ? : ret;
3972 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3973 unsigned int change,
3974 u32 event, gfp_t flags, int *new_nsid,
3975 int new_ifindex, u32 portid, u32 seq)
3977 struct net *net = dev_net(dev);
3978 struct sk_buff *skb;
3981 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3985 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3986 type, portid, seq, change, 0, 0, event,
3987 new_nsid, new_ifindex, -1, flags);
3989 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3990 WARN_ON(err == -EMSGSIZE);
3997 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4001 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4002 u32 portid, const struct nlmsghdr *nlh)
4004 struct net *net = dev_net(dev);
4006 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
4009 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4010 unsigned int change, u32 event,
4011 gfp_t flags, int *new_nsid, int new_ifindex,
4012 u32 portid, const struct nlmsghdr *nlh)
4014 struct sk_buff *skb;
4016 if (dev->reg_state != NETREG_REGISTERED)
4019 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
4020 new_ifindex, portid, nlmsg_seq(nlh));
4022 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
4025 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
4026 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
4028 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4029 NULL, 0, portid, nlh);
4032 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4033 gfp_t flags, int *new_nsid, int new_ifindex)
4035 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4036 new_nsid, new_ifindex, 0, NULL);
4039 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4040 struct net_device *dev,
4041 u8 *addr, u16 vid, u32 pid, u32 seq,
4042 int type, unsigned int flags,
4043 int nlflags, u16 ndm_state)
4045 struct nlmsghdr *nlh;
4048 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4052 ndm = nlmsg_data(nlh);
4053 ndm->ndm_family = AF_BRIDGE;
4056 ndm->ndm_flags = flags;
4058 ndm->ndm_ifindex = dev->ifindex;
4059 ndm->ndm_state = ndm_state;
4061 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
4062 goto nla_put_failure;
4064 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4065 goto nla_put_failure;
4067 nlmsg_end(skb, nlh);
4071 nlmsg_cancel(skb, nlh);
4075 static inline size_t rtnl_fdb_nlmsg_size(void)
4077 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4078 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
4079 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4083 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4086 struct net *net = dev_net(dev);
4087 struct sk_buff *skb;
4090 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
4094 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4095 0, 0, type, NTF_SELF, 0, ndm_state);
4101 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4104 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4108 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4110 int ndo_dflt_fdb_add(struct ndmsg *ndm,
4111 struct nlattr *tb[],
4112 struct net_device *dev,
4113 const unsigned char *addr, u16 vid,
4118 /* If aging addresses are supported device will need to
4119 * implement its own handler for this.
4121 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4122 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4126 if (tb[NDA_FLAGS_EXT]) {
4127 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4132 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4136 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4137 err = dev_uc_add_excl(dev, addr);
4138 else if (is_multicast_ether_addr(addr))
4139 err = dev_mc_add_excl(dev, addr);
4141 /* Only return duplicate errors if NLM_F_EXCL is set */
4142 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4147 EXPORT_SYMBOL(ndo_dflt_fdb_add);
4149 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4150 struct netlink_ext_ack *extack)
4155 if (nla_len(vlan_attr) != sizeof(u16)) {
4156 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4160 vid = nla_get_u16(vlan_attr);
4162 if (!vid || vid >= VLAN_VID_MASK) {
4163 NL_SET_ERR_MSG(extack, "invalid vlan id");
4171 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4172 struct netlink_ext_ack *extack)
4174 struct net *net = sock_net(skb->sk);
4176 struct nlattr *tb[NDA_MAX+1];
4177 struct net_device *dev;
4182 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4187 ndm = nlmsg_data(nlh);
4188 if (ndm->ndm_ifindex == 0) {
4189 NL_SET_ERR_MSG(extack, "invalid ifindex");
4193 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4195 NL_SET_ERR_MSG(extack, "unknown ifindex");
4199 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4200 NL_SET_ERR_MSG(extack, "invalid address");
4204 if (dev->type != ARPHRD_ETHER) {
4205 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4209 addr = nla_data(tb[NDA_LLADDR]);
4211 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4217 /* Support fdb on master device the net/bridge default case */
4218 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4219 netif_is_bridge_port(dev)) {
4220 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4221 const struct net_device_ops *ops = br_dev->netdev_ops;
4223 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4224 nlh->nlmsg_flags, extack);
4228 ndm->ndm_flags &= ~NTF_MASTER;
4231 /* Embedded bridge, macvlan, and any other device support */
4232 if ((ndm->ndm_flags & NTF_SELF)) {
4233 if (dev->netdev_ops->ndo_fdb_add)
4234 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4239 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4243 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4245 ndm->ndm_flags &= ~NTF_SELF;
4253 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4255 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4256 struct nlattr *tb[],
4257 struct net_device *dev,
4258 const unsigned char *addr, u16 vid)
4262 /* If aging addresses are supported device will need to
4263 * implement its own handler for this.
4265 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4266 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4270 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4271 err = dev_uc_del(dev, addr);
4272 else if (is_multicast_ether_addr(addr))
4273 err = dev_mc_del(dev, addr);
4277 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4279 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4280 [NDA_VLAN] = { .type = NLA_U16 },
4281 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
4282 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
4283 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
4286 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4287 struct netlink_ext_ack *extack)
4289 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4290 struct net *net = sock_net(skb->sk);
4291 const struct net_device_ops *ops;
4293 struct nlattr *tb[NDA_MAX+1];
4294 struct net_device *dev;
4299 if (!netlink_capable(skb, CAP_NET_ADMIN))
4303 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4306 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4307 fdb_del_bulk_policy, extack);
4312 ndm = nlmsg_data(nlh);
4313 if (ndm->ndm_ifindex == 0) {
4314 NL_SET_ERR_MSG(extack, "invalid ifindex");
4318 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4320 NL_SET_ERR_MSG(extack, "unknown ifindex");
4325 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4326 NL_SET_ERR_MSG(extack, "invalid address");
4329 addr = nla_data(tb[NDA_LLADDR]);
4332 if (dev->type != ARPHRD_ETHER) {
4333 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4337 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4343 /* Support fdb on master device the net/bridge default case */
4344 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4345 netif_is_bridge_port(dev)) {
4346 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4348 ops = br_dev->netdev_ops;
4350 if (ops->ndo_fdb_del)
4351 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4353 if (ops->ndo_fdb_del_bulk)
4354 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4361 ndm->ndm_flags &= ~NTF_MASTER;
4364 /* Embedded bridge, macvlan, and any other device support */
4365 if (ndm->ndm_flags & NTF_SELF) {
4366 ops = dev->netdev_ops;
4368 if (ops->ndo_fdb_del)
4369 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4371 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4373 /* in case err was cleared by NTF_MASTER call */
4375 if (ops->ndo_fdb_del_bulk)
4376 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4382 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4384 ndm->ndm_flags &= ~NTF_SELF;
4391 static int nlmsg_populate_fdb(struct sk_buff *skb,
4392 struct netlink_callback *cb,
4393 struct net_device *dev,
4395 struct netdev_hw_addr_list *list)
4397 struct netdev_hw_addr *ha;
4401 portid = NETLINK_CB(cb->skb).portid;
4402 seq = cb->nlh->nlmsg_seq;
4404 list_for_each_entry(ha, &list->list, list) {
4405 if (*idx < cb->args[2])
4408 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4410 RTM_NEWNEIGH, NTF_SELF,
4411 NLM_F_MULTI, NUD_PERMANENT);
4421 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4422 * @skb: socket buffer to store message in
4423 * @cb: netlink callback
4425 * @filter_dev: ignored
4426 * @idx: the number of FDB table entries dumped is added to *@idx
4428 * Default netdevice operation to dump the existing unicast address list.
4429 * Returns number of addresses from list put in skb.
4431 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4432 struct netlink_callback *cb,
4433 struct net_device *dev,
4434 struct net_device *filter_dev,
4439 if (dev->type != ARPHRD_ETHER)
4442 netif_addr_lock_bh(dev);
4443 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4446 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4448 netif_addr_unlock_bh(dev);
4451 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4453 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4454 int *br_idx, int *brport_idx,
4455 struct netlink_ext_ack *extack)
4457 struct nlattr *tb[NDA_MAX + 1];
4461 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4462 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4466 ndm = nlmsg_data(nlh);
4467 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4468 ndm->ndm_flags || ndm->ndm_type) {
4469 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4473 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4474 NDA_MAX, NULL, extack);
4478 *brport_idx = ndm->ndm_ifindex;
4479 for (i = 0; i <= NDA_MAX; ++i) {
4485 if (nla_len(tb[i]) != sizeof(u32)) {
4486 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4489 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4492 if (nla_len(tb[i]) != sizeof(u32)) {
4493 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4496 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4499 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4507 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4508 int *br_idx, int *brport_idx,
4509 struct netlink_ext_ack *extack)
4511 struct nlattr *tb[IFLA_MAX+1];
4514 /* A hack to preserve kernel<->userspace interface.
4515 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4516 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4517 * So, check for ndmsg with an optional u32 attribute (not used here).
4518 * Fortunately these sizes don't conflict with the size of ifinfomsg
4519 * with an optional attribute.
4521 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4522 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4523 nla_attr_size(sizeof(u32)))) {
4524 struct ifinfomsg *ifm;
4526 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4527 tb, IFLA_MAX, ifla_policy,
4531 } else if (err == 0) {
4532 if (tb[IFLA_MASTER])
4533 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4536 ifm = nlmsg_data(nlh);
4537 *brport_idx = ifm->ifi_index;
4542 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4544 struct net_device *dev;
4545 struct net_device *br_dev = NULL;
4546 const struct net_device_ops *ops = NULL;
4547 const struct net_device_ops *cops = NULL;
4548 struct net *net = sock_net(skb->sk);
4549 struct hlist_head *head;
4557 if (cb->strict_check)
4558 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4561 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4567 br_dev = __dev_get_by_index(net, br_idx);
4571 ops = br_dev->netdev_ops;
4575 s_idx = cb->args[1];
4577 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4579 head = &net->dev_index_head[h];
4580 hlist_for_each_entry(dev, head, index_hlist) {
4582 if (brport_idx && (dev->ifindex != brport_idx))
4585 if (!br_idx) { /* user did not specify a specific bridge */
4586 if (netif_is_bridge_port(dev)) {
4587 br_dev = netdev_master_upper_dev_get(dev);
4588 cops = br_dev->netdev_ops;
4591 if (dev != br_dev &&
4592 !netif_is_bridge_port(dev))
4595 if (br_dev != netdev_master_upper_dev_get(dev) &&
4596 !netif_is_bridge_master(dev))
4604 if (netif_is_bridge_port(dev)) {
4605 if (cops && cops->ndo_fdb_dump) {
4606 err = cops->ndo_fdb_dump(skb, cb,
4609 if (err == -EMSGSIZE)
4614 if (dev->netdev_ops->ndo_fdb_dump)
4615 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4619 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4621 if (err == -EMSGSIZE)
4626 /* reset fdb offset to 0 for rest of the interfaces */
4642 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4643 struct nlattr **tb, u8 *ndm_flags,
4644 int *br_idx, int *brport_idx, u8 **addr,
4645 u16 *vid, struct netlink_ext_ack *extack)
4650 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4651 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4655 ndm = nlmsg_data(nlh);
4656 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4658 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4662 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4663 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4667 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4668 NDA_MAX, nda_policy, extack);
4672 *ndm_flags = ndm->ndm_flags;
4673 *brport_idx = ndm->ndm_ifindex;
4674 for (i = 0; i <= NDA_MAX; ++i) {
4680 *br_idx = nla_get_u32(tb[i]);
4683 if (nla_len(tb[i]) != ETH_ALEN) {
4684 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4687 *addr = nla_data(tb[i]);
4690 err = fdb_vid_parse(tb[i], vid, extack);
4697 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4705 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4706 struct netlink_ext_ack *extack)
4708 struct net_device *dev = NULL, *br_dev = NULL;
4709 const struct net_device_ops *ops = NULL;
4710 struct net *net = sock_net(in_skb->sk);
4711 struct nlattr *tb[NDA_MAX + 1];
4712 struct sk_buff *skb;
4720 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4721 &brport_idx, &addr, &vid, extack);
4726 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4731 dev = __dev_get_by_index(net, brport_idx);
4733 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4740 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4744 br_dev = __dev_get_by_index(net, br_idx);
4746 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4749 ops = br_dev->netdev_ops;
4753 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4754 if (!netif_is_bridge_port(dev)) {
4755 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4758 br_dev = netdev_master_upper_dev_get(dev);
4760 NL_SET_ERR_MSG(extack, "Master of device not found");
4763 ops = br_dev->netdev_ops;
4765 if (!(ndm_flags & NTF_SELF)) {
4766 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4769 ops = dev->netdev_ops;
4773 if (!br_dev && !dev) {
4774 NL_SET_ERR_MSG(extack, "No device specified");
4778 if (!ops || !ops->ndo_fdb_get) {
4779 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4783 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4789 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4790 NETLINK_CB(in_skb).portid,
4791 nlh->nlmsg_seq, extack);
4795 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4801 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4802 unsigned int attrnum, unsigned int flag)
4805 return nla_put_u8(skb, attrnum, !!(flags & flag));
4809 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4810 struct net_device *dev, u16 mode,
4811 u32 flags, u32 mask, int nlflags,
4813 int (*vlan_fill)(struct sk_buff *skb,
4814 struct net_device *dev,
4817 struct nlmsghdr *nlh;
4818 struct ifinfomsg *ifm;
4819 struct nlattr *br_afspec;
4820 struct nlattr *protinfo;
4821 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4822 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4825 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4829 ifm = nlmsg_data(nlh);
4830 ifm->ifi_family = AF_BRIDGE;
4832 ifm->ifi_type = dev->type;
4833 ifm->ifi_index = dev->ifindex;
4834 ifm->ifi_flags = dev_get_flags(dev);
4835 ifm->ifi_change = 0;
4838 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4839 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4840 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4842 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4844 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4845 (dev->ifindex != dev_get_iflink(dev) &&
4846 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4847 goto nla_put_failure;
4849 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4851 goto nla_put_failure;
4853 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4854 nla_nest_cancel(skb, br_afspec);
4855 goto nla_put_failure;
4858 if (mode != BRIDGE_MODE_UNDEF) {
4859 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4860 nla_nest_cancel(skb, br_afspec);
4861 goto nla_put_failure;
4865 err = vlan_fill(skb, dev, filter_mask);
4867 nla_nest_cancel(skb, br_afspec);
4868 goto nla_put_failure;
4871 nla_nest_end(skb, br_afspec);
4873 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4875 goto nla_put_failure;
4877 if (brport_nla_put_flag(skb, flags, mask,
4878 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4879 brport_nla_put_flag(skb, flags, mask,
4880 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4881 brport_nla_put_flag(skb, flags, mask,
4882 IFLA_BRPORT_FAST_LEAVE,
4883 BR_MULTICAST_FAST_LEAVE) ||
4884 brport_nla_put_flag(skb, flags, mask,
4885 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4886 brport_nla_put_flag(skb, flags, mask,
4887 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4888 brport_nla_put_flag(skb, flags, mask,
4889 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4890 brport_nla_put_flag(skb, flags, mask,
4891 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4892 brport_nla_put_flag(skb, flags, mask,
4893 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4894 brport_nla_put_flag(skb, flags, mask,
4895 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4896 brport_nla_put_flag(skb, flags, mask,
4897 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4898 nla_nest_cancel(skb, protinfo);
4899 goto nla_put_failure;
4902 nla_nest_end(skb, protinfo);
4904 nlmsg_end(skb, nlh);
4907 nlmsg_cancel(skb, nlh);
4908 return err ? err : -EMSGSIZE;
4910 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4912 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4913 bool strict_check, u32 *filter_mask,
4914 struct netlink_ext_ack *extack)
4916 struct nlattr *tb[IFLA_MAX+1];
4920 struct ifinfomsg *ifm;
4922 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4923 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4927 ifm = nlmsg_data(nlh);
4928 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4929 ifm->ifi_change || ifm->ifi_index) {
4930 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4934 err = nlmsg_parse_deprecated_strict(nlh,
4935 sizeof(struct ifinfomsg),
4936 tb, IFLA_MAX, ifla_policy,
4939 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4940 tb, IFLA_MAX, ifla_policy,
4946 /* new attributes should only be added with strict checking */
4947 for (i = 0; i <= IFLA_MAX; ++i) {
4953 *filter_mask = nla_get_u32(tb[i]);
4957 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4966 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4968 const struct nlmsghdr *nlh = cb->nlh;
4969 struct net *net = sock_net(skb->sk);
4970 struct net_device *dev;
4972 u32 portid = NETLINK_CB(cb->skb).portid;
4973 u32 seq = nlh->nlmsg_seq;
4974 u32 filter_mask = 0;
4977 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4979 if (err < 0 && cb->strict_check)
4983 for_each_netdev_rcu(net, dev) {
4984 const struct net_device_ops *ops = dev->netdev_ops;
4985 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4987 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4988 if (idx >= cb->args[0]) {
4989 err = br_dev->netdev_ops->ndo_bridge_getlink(
4990 skb, portid, seq, dev,
4991 filter_mask, NLM_F_MULTI);
4992 if (err < 0 && err != -EOPNOTSUPP) {
4993 if (likely(skb->len))
5002 if (ops->ndo_bridge_getlink) {
5003 if (idx >= cb->args[0]) {
5004 err = ops->ndo_bridge_getlink(skb, portid,
5008 if (err < 0 && err != -EOPNOTSUPP) {
5009 if (likely(skb->len))
5026 static inline size_t bridge_nlmsg_size(void)
5028 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5029 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5030 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5031 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
5032 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5033 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5034 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5035 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5036 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5037 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5038 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5041 static int rtnl_bridge_notify(struct net_device *dev)
5043 struct net *net = dev_net(dev);
5044 struct sk_buff *skb;
5045 int err = -EOPNOTSUPP;
5047 if (!dev->netdev_ops->ndo_bridge_getlink)
5050 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5056 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5060 /* Notification info is only filled for bridge ports, not the bridge
5061 * device itself. Therefore, a zero notification length is valid and
5062 * should not result in an error.
5067 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5070 WARN_ON(err == -EMSGSIZE);
5073 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5077 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5078 struct netlink_ext_ack *extack)
5080 struct net *net = sock_net(skb->sk);
5081 struct ifinfomsg *ifm;
5082 struct net_device *dev;
5083 struct nlattr *br_spec, *attr = NULL;
5084 int rem, err = -EOPNOTSUPP;
5086 bool have_flags = false;
5088 if (nlmsg_len(nlh) < sizeof(*ifm))
5091 ifm = nlmsg_data(nlh);
5092 if (ifm->ifi_family != AF_BRIDGE)
5093 return -EPFNOSUPPORT;
5095 dev = __dev_get_by_index(net, ifm->ifi_index);
5097 NL_SET_ERR_MSG(extack, "unknown ifindex");
5101 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5103 nla_for_each_nested(attr, br_spec, rem) {
5104 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5105 if (nla_len(attr) < sizeof(flags))
5109 flags = nla_get_u16(attr);
5115 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5116 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5118 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5123 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5128 flags &= ~BRIDGE_FLAGS_MASTER;
5131 if ((flags & BRIDGE_FLAGS_SELF)) {
5132 if (!dev->netdev_ops->ndo_bridge_setlink)
5135 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5139 flags &= ~BRIDGE_FLAGS_SELF;
5141 /* Generate event to notify upper layer of bridge
5144 err = rtnl_bridge_notify(dev);
5149 memcpy(nla_data(attr), &flags, sizeof(flags));
5154 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5155 struct netlink_ext_ack *extack)
5157 struct net *net = sock_net(skb->sk);
5158 struct ifinfomsg *ifm;
5159 struct net_device *dev;
5160 struct nlattr *br_spec, *attr = NULL;
5161 int rem, err = -EOPNOTSUPP;
5163 bool have_flags = false;
5165 if (nlmsg_len(nlh) < sizeof(*ifm))
5168 ifm = nlmsg_data(nlh);
5169 if (ifm->ifi_family != AF_BRIDGE)
5170 return -EPFNOSUPPORT;
5172 dev = __dev_get_by_index(net, ifm->ifi_index);
5174 NL_SET_ERR_MSG(extack, "unknown ifindex");
5178 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5180 nla_for_each_nested(attr, br_spec, rem) {
5181 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5182 if (nla_len(attr) < sizeof(flags))
5186 flags = nla_get_u16(attr);
5192 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5193 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5195 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5200 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5204 flags &= ~BRIDGE_FLAGS_MASTER;
5207 if ((flags & BRIDGE_FLAGS_SELF)) {
5208 if (!dev->netdev_ops->ndo_bridge_dellink)
5211 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5215 flags &= ~BRIDGE_FLAGS_SELF;
5217 /* Generate event to notify upper layer of bridge
5220 err = rtnl_bridge_notify(dev);
5225 memcpy(nla_data(attr), &flags, sizeof(flags));
5230 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5232 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5233 (!idxattr || idxattr == attrid);
5237 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5239 return dev->netdev_ops &&
5240 dev->netdev_ops->ndo_has_offload_stats &&
5241 dev->netdev_ops->ndo_get_offload_stats &&
5242 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5246 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5248 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5249 sizeof(struct rtnl_link_stats64) : 0;
5253 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5254 struct sk_buff *skb)
5256 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5257 struct nlattr *attr = NULL;
5264 attr = nla_reserve_64bit(skb, attr_id, size,
5265 IFLA_OFFLOAD_XSTATS_UNSPEC);
5269 attr_data = nla_data(attr);
5270 memset(attr_data, 0, size);
5272 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5280 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5281 enum netdev_offload_xstats_type type)
5283 bool enabled = netdev_offload_xstats_enabled(dev, type);
5285 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5288 struct rtnl_offload_xstats_request_used {
5294 rtnl_offload_xstats_get_stats(struct net_device *dev,
5295 enum netdev_offload_xstats_type type,
5296 struct rtnl_offload_xstats_request_used *ru,
5297 struct rtnl_hw_stats64 *stats,
5298 struct netlink_ext_ack *extack)
5304 request = netdev_offload_xstats_enabled(dev, type);
5310 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5316 ru->request = request;
5323 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5324 struct rtnl_offload_xstats_request_used *ru)
5326 struct nlattr *nest;
5328 nest = nla_nest_start(skb, attr_id);
5332 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5333 goto nla_put_failure;
5335 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5336 goto nla_put_failure;
5338 nla_nest_end(skb, nest);
5342 nla_nest_cancel(skb, nest);
5347 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5348 struct netlink_ext_ack *extack)
5350 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5351 struct rtnl_offload_xstats_request_used ru_l3;
5352 struct nlattr *nest;
5355 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5359 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5363 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5364 IFLA_OFFLOAD_XSTATS_L3_STATS,
5366 goto nla_put_failure;
5368 nla_nest_end(skb, nest);
5372 nla_nest_cancel(skb, nest);
5376 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5377 int *prividx, u32 off_filter_mask,
5378 struct netlink_ext_ack *extack)
5380 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5381 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5382 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5383 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5384 bool have_data = false;
5387 if (*prividx <= attr_id_cpu_hit &&
5389 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5390 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5393 } else if (err != -ENODATA) {
5394 *prividx = attr_id_cpu_hit;
5399 if (*prividx <= attr_id_hw_s_info &&
5400 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5401 *prividx = attr_id_hw_s_info;
5403 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5411 if (*prividx <= attr_id_l3_stats &&
5412 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5413 unsigned int size_l3;
5414 struct nlattr *attr;
5416 *prividx = attr_id_l3_stats;
5418 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5421 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5422 IFLA_OFFLOAD_XSTATS_UNSPEC);
5426 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5427 nla_data(attr), extack);
5444 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5445 enum netdev_offload_xstats_type type)
5447 bool enabled = netdev_offload_xstats_enabled(dev, type);
5449 return nla_total_size(0) +
5450 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5451 nla_total_size(sizeof(u8)) +
5452 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5453 (enabled ? nla_total_size(sizeof(u8)) : 0) +
5458 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5460 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5462 return nla_total_size(0) +
5463 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5464 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5468 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5469 u32 off_filter_mask)
5471 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5472 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5476 if (off_filter_mask &
5477 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5478 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5479 nla_size += nla_total_size_64bit(size);
5482 if (off_filter_mask &
5483 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5484 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5486 if (off_filter_mask &
5487 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5488 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5489 nla_size += nla_total_size_64bit(size);
5493 nla_size += nla_total_size(0);
5498 struct rtnl_stats_dump_filters {
5499 /* mask[0] filters outer attributes. Then individual nests have their
5500 * filtering mask at the index of the nested attribute.
5502 u32 mask[IFLA_STATS_MAX + 1];
5505 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5506 int type, u32 pid, u32 seq, u32 change,
5508 const struct rtnl_stats_dump_filters *filters,
5509 int *idxattr, int *prividx,
5510 struct netlink_ext_ack *extack)
5512 unsigned int filter_mask = filters->mask[0];
5513 struct if_stats_msg *ifsm;
5514 struct nlmsghdr *nlh;
5515 struct nlattr *attr;
5516 int s_prividx = *prividx;
5521 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5525 ifsm = nlmsg_data(nlh);
5526 ifsm->family = PF_UNSPEC;
5529 ifsm->ifindex = dev->ifindex;
5530 ifsm->filter_mask = filter_mask;
5532 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5533 struct rtnl_link_stats64 *sp;
5535 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5536 sizeof(struct rtnl_link_stats64),
5540 goto nla_put_failure;
5543 sp = nla_data(attr);
5544 dev_get_stats(dev, sp);
5547 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5548 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5550 if (ops && ops->fill_linkxstats) {
5551 *idxattr = IFLA_STATS_LINK_XSTATS;
5552 attr = nla_nest_start_noflag(skb,
5553 IFLA_STATS_LINK_XSTATS);
5556 goto nla_put_failure;
5559 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5560 nla_nest_end(skb, attr);
5562 goto nla_put_failure;
5567 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5569 const struct rtnl_link_ops *ops = NULL;
5570 const struct net_device *master;
5572 master = netdev_master_upper_dev_get(dev);
5574 ops = master->rtnl_link_ops;
5575 if (ops && ops->fill_linkxstats) {
5576 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5577 attr = nla_nest_start_noflag(skb,
5578 IFLA_STATS_LINK_XSTATS_SLAVE);
5581 goto nla_put_failure;
5584 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5585 nla_nest_end(skb, attr);
5587 goto nla_put_failure;
5592 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5594 u32 off_filter_mask;
5596 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5597 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5598 attr = nla_nest_start_noflag(skb,
5599 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5602 goto nla_put_failure;
5605 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5606 off_filter_mask, extack);
5607 if (err == -ENODATA)
5608 nla_nest_cancel(skb, attr);
5610 nla_nest_end(skb, attr);
5612 if (err && err != -ENODATA)
5613 goto nla_put_failure;
5617 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5618 struct rtnl_af_ops *af_ops;
5620 *idxattr = IFLA_STATS_AF_SPEC;
5621 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5624 goto nla_put_failure;
5628 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5629 if (af_ops->fill_stats_af) {
5632 af = nla_nest_start_noflag(skb,
5637 goto nla_put_failure;
5639 err = af_ops->fill_stats_af(skb, dev);
5641 if (err == -ENODATA) {
5642 nla_nest_cancel(skb, af);
5643 } else if (err < 0) {
5645 goto nla_put_failure;
5648 nla_nest_end(skb, af);
5653 nla_nest_end(skb, attr);
5658 nlmsg_end(skb, nlh);
5663 /* not a multi message or no progress mean a real error */
5664 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5665 nlmsg_cancel(skb, nlh);
5667 nlmsg_end(skb, nlh);
5672 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5673 const struct rtnl_stats_dump_filters *filters)
5675 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5676 unsigned int filter_mask = filters->mask[0];
5678 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5679 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5681 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5683 int attr = IFLA_STATS_LINK_XSTATS;
5685 if (ops && ops->get_linkxstats_size) {
5686 size += nla_total_size(ops->get_linkxstats_size(dev,
5688 /* for IFLA_STATS_LINK_XSTATS */
5689 size += nla_total_size(0);
5693 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5694 struct net_device *_dev = (struct net_device *)dev;
5695 const struct rtnl_link_ops *ops = NULL;
5696 const struct net_device *master;
5698 /* netdev_master_upper_dev_get can't take const */
5699 master = netdev_master_upper_dev_get(_dev);
5701 ops = master->rtnl_link_ops;
5702 if (ops && ops->get_linkxstats_size) {
5703 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5705 size += nla_total_size(ops->get_linkxstats_size(dev,
5707 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5708 size += nla_total_size(0);
5712 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5713 u32 off_filter_mask;
5715 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5716 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5719 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5720 struct rtnl_af_ops *af_ops;
5722 /* for IFLA_STATS_AF_SPEC */
5723 size += nla_total_size(0);
5726 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5727 if (af_ops->get_stats_af_size) {
5728 size += nla_total_size(
5729 af_ops->get_stats_af_size(dev));
5732 size += nla_total_size(0);
5741 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5743 static const struct nla_policy
5744 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5745 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5746 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5749 static const struct nla_policy
5750 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5751 [IFLA_STATS_GET_FILTERS] =
5752 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5755 static const struct nla_policy
5756 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5757 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5760 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5761 struct rtnl_stats_dump_filters *filters,
5762 struct netlink_ext_ack *extack)
5764 struct nlattr *tb[IFLA_STATS_MAX + 1];
5768 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5769 rtnl_stats_get_policy_filters, extack);
5773 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5775 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5776 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5779 filters->mask[at] = nla_get_u32(tb[at]);
5786 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5788 struct rtnl_stats_dump_filters *filters,
5789 struct netlink_ext_ack *extack)
5791 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5795 filters->mask[0] = filter_mask;
5796 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5797 filters->mask[i] = -1U;
5799 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5800 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5804 if (tb[IFLA_STATS_GET_FILTERS]) {
5805 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5814 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5815 bool is_dump, struct netlink_ext_ack *extack)
5817 struct if_stats_msg *ifsm;
5819 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5820 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5827 ifsm = nlmsg_data(nlh);
5829 /* only requests using strict checks can pass data to influence
5830 * the dump. The legacy exception is filter_mask.
5832 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5833 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5836 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5837 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5844 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5845 struct netlink_ext_ack *extack)
5847 struct rtnl_stats_dump_filters filters;
5848 struct net *net = sock_net(skb->sk);
5849 struct net_device *dev = NULL;
5850 int idxattr = 0, prividx = 0;
5851 struct if_stats_msg *ifsm;
5852 struct sk_buff *nskb;
5855 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5860 ifsm = nlmsg_data(nlh);
5861 if (ifsm->ifindex > 0)
5862 dev = __dev_get_by_index(net, ifsm->ifindex);
5869 if (!ifsm->filter_mask) {
5870 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5874 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5878 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5882 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5883 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5884 0, &filters, &idxattr, &prividx, extack);
5886 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5887 WARN_ON(err == -EMSGSIZE);
5890 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5896 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5898 struct netlink_ext_ack *extack = cb->extack;
5899 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5900 struct rtnl_stats_dump_filters filters;
5901 struct net *net = sock_net(skb->sk);
5902 unsigned int flags = NLM_F_MULTI;
5903 struct if_stats_msg *ifsm;
5904 struct hlist_head *head;
5905 struct net_device *dev;
5909 s_idx = cb->args[1];
5910 s_idxattr = cb->args[2];
5911 s_prividx = cb->args[3];
5913 cb->seq = net->dev_base_seq;
5915 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5919 ifsm = nlmsg_data(cb->nlh);
5920 if (!ifsm->filter_mask) {
5921 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5925 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5930 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5932 head = &net->dev_index_head[h];
5933 hlist_for_each_entry(dev, head, index_hlist) {
5936 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5937 NETLINK_CB(cb->skb).portid,
5938 cb->nlh->nlmsg_seq, 0,
5940 &s_idxattr, &s_prividx,
5942 /* If we ran out of room on the first message,
5945 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5951 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5957 cb->args[3] = s_prividx;
5958 cb->args[2] = s_idxattr;
5965 void rtnl_offload_xstats_notify(struct net_device *dev)
5967 struct rtnl_stats_dump_filters response_filters = {};
5968 struct net *net = dev_net(dev);
5969 int idxattr = 0, prividx = 0;
5970 struct sk_buff *skb;
5975 response_filters.mask[0] |=
5976 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5977 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5978 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5980 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5985 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5986 &response_filters, &idxattr, &prividx, NULL);
5992 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
5996 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
5998 EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6000 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6001 struct netlink_ext_ack *extack)
6003 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
6004 struct rtnl_stats_dump_filters response_filters = {};
6005 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6006 struct net *net = sock_net(skb->sk);
6007 struct net_device *dev = NULL;
6008 struct if_stats_msg *ifsm;
6009 bool notify = false;
6012 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6017 ifsm = nlmsg_data(nlh);
6018 if (ifsm->family != AF_UNSPEC) {
6019 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6023 if (ifsm->ifindex > 0)
6024 dev = __dev_get_by_index(net, ifsm->ifindex);
6031 if (ifsm->filter_mask) {
6032 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6036 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6037 ifla_stats_set_policy, extack);
6041 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6042 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6045 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6047 err = netdev_offload_xstats_disable(dev, t_l3);
6051 else if (err != -EALREADY)
6054 response_filters.mask[0] |=
6055 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6056 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6057 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6061 rtnl_offload_xstats_notify(dev);
6066 /* Process one rtnetlink message. */
6068 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6069 struct netlink_ext_ack *extack)
6071 struct net *net = sock_net(skb->sk);
6072 struct rtnl_link *link;
6073 enum rtnl_kinds kind;
6074 struct module *owner;
6075 int err = -EOPNOTSUPP;
6076 rtnl_doit_func doit;
6081 type = nlh->nlmsg_type;
6087 /* All the messages must have at least 1 byte length */
6088 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6091 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6092 kind = rtnl_msgtype_kind(type);
6094 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6098 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6100 rtnl_dumpit_func dumpit;
6101 u32 min_dump_alloc = 0;
6103 link = rtnl_get_link(family, type);
6104 if (!link || !link->dumpit) {
6106 link = rtnl_get_link(family, type);
6107 if (!link || !link->dumpit)
6110 owner = link->owner;
6111 dumpit = link->dumpit;
6113 if (type == RTM_GETLINK - RTM_BASE)
6114 min_dump_alloc = rtnl_calcit(skb, nlh);
6117 /* need to do this before rcu_read_unlock() */
6118 if (!try_module_get(owner))
6119 err = -EPROTONOSUPPORT;
6125 struct netlink_dump_control c = {
6127 .min_dump_alloc = min_dump_alloc,
6130 err = netlink_dump_start(rtnl, skb, nlh, &c);
6131 /* netlink_dump_start() will keep a reference on
6132 * module if dump is still in progress.
6139 link = rtnl_get_link(family, type);
6140 if (!link || !link->doit) {
6142 link = rtnl_get_link(PF_UNSPEC, type);
6143 if (!link || !link->doit)
6147 owner = link->owner;
6148 if (!try_module_get(owner)) {
6149 err = -EPROTONOSUPPORT;
6153 flags = link->flags;
6154 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6155 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6156 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6161 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6165 err = doit(skb, nlh, extack);
6172 link = rtnl_get_link(family, type);
6173 if (link && link->doit)
6174 err = link->doit(skb, nlh, extack);
6190 static void rtnetlink_rcv(struct sk_buff *skb)
6192 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6195 static int rtnetlink_bind(struct net *net, int group)
6198 case RTNLGRP_IPV4_MROUTE_R:
6199 case RTNLGRP_IPV6_MROUTE_R:
6200 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6207 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6209 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6213 case NETDEV_CHANGEMTU:
6214 case NETDEV_CHANGEADDR:
6215 case NETDEV_CHANGENAME:
6216 case NETDEV_FEAT_CHANGE:
6217 case NETDEV_BONDING_FAILOVER:
6218 case NETDEV_POST_TYPE_CHANGE:
6219 case NETDEV_NOTIFY_PEERS:
6220 case NETDEV_CHANGEUPPER:
6221 case NETDEV_RESEND_IGMP:
6222 case NETDEV_CHANGEINFODATA:
6223 case NETDEV_CHANGELOWERSTATE:
6224 case NETDEV_CHANGE_TX_QUEUE_LEN:
6225 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6226 GFP_KERNEL, NULL, 0, 0, NULL);
6234 static struct notifier_block rtnetlink_dev_notifier = {
6235 .notifier_call = rtnetlink_event,
6239 static int __net_init rtnetlink_net_init(struct net *net)
6242 struct netlink_kernel_cfg cfg = {
6243 .groups = RTNLGRP_MAX,
6244 .input = rtnetlink_rcv,
6245 .cb_mutex = &rtnl_mutex,
6246 .flags = NL_CFG_F_NONROOT_RECV,
6247 .bind = rtnetlink_bind,
6250 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6257 static void __net_exit rtnetlink_net_exit(struct net *net)
6259 netlink_kernel_release(net->rtnl);
6263 static struct pernet_operations rtnetlink_net_ops = {
6264 .init = rtnetlink_net_init,
6265 .exit = rtnetlink_net_exit,
6268 void __init rtnetlink_init(void)
6270 if (register_pernet_subsys(&rtnetlink_net_ops))
6271 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6273 register_netdevice_notifier(&rtnetlink_dev_notifier);
6275 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6276 rtnl_dump_ifinfo, 0);
6277 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6278 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6279 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6281 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6282 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6283 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6285 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6286 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6288 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6289 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6290 RTNL_FLAG_BULK_DEL_SUPPORTED);
6291 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6293 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6294 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6295 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6297 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6299 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);