2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/errno.h>
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/kernel.h>
24 #include <linux/timer.h>
25 #include <linux/string.h>
26 #include <linux/sockios.h>
27 #include <linux/net.h>
28 #include <linux/fcntl.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/capability.h>
33 #include <linux/skbuff.h>
34 #include <linux/init.h>
35 #include <linux/security.h>
36 #include <linux/mutex.h>
37 #include <linux/if_addr.h>
38 #include <linux/if_bridge.h>
39 #include <linux/if_vlan.h>
40 #include <linux/pci.h>
41 #include <linux/etherdevice.h>
43 #include <asm/uaccess.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <net/switchdev.h>
49 #include <net/protocol.h>
51 #include <net/route.h>
55 #include <net/pkt_sched.h>
56 #include <net/fib_rules.h>
57 #include <net/rtnetlink.h>
58 #include <net/net_namespace.h>
62 rtnl_dumpit_func dumpit;
63 rtnl_calcit_func calcit;
66 static DEFINE_MUTEX(rtnl_mutex);
70 mutex_lock(&rtnl_mutex);
72 EXPORT_SYMBOL(rtnl_lock);
74 static struct sk_buff *defer_kfree_skb_list;
75 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
78 tail->next = defer_kfree_skb_list;
79 defer_kfree_skb_list = head;
82 EXPORT_SYMBOL(rtnl_kfree_skbs);
84 void __rtnl_unlock(void)
86 struct sk_buff *head = defer_kfree_skb_list;
88 defer_kfree_skb_list = NULL;
90 mutex_unlock(&rtnl_mutex);
93 struct sk_buff *next = head->next;
101 void rtnl_unlock(void)
103 /* This fellow will unlock it for us. */
106 EXPORT_SYMBOL(rtnl_unlock);
108 int rtnl_trylock(void)
110 return mutex_trylock(&rtnl_mutex);
112 EXPORT_SYMBOL(rtnl_trylock);
114 int rtnl_is_locked(void)
116 return mutex_is_locked(&rtnl_mutex);
118 EXPORT_SYMBOL(rtnl_is_locked);
120 #ifdef CONFIG_PROVE_LOCKING
121 bool lockdep_rtnl_is_held(void)
123 return lockdep_is_held(&rtnl_mutex);
125 EXPORT_SYMBOL(lockdep_rtnl_is_held);
126 #endif /* #ifdef CONFIG_PROVE_LOCKING */
128 static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
130 static inline int rtm_msgindex(int msgtype)
132 int msgindex = msgtype - RTM_BASE;
135 * msgindex < 0 implies someone tried to register a netlink
136 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
137 * the message type has not been added to linux/rtnetlink.h
139 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
144 static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
146 struct rtnl_link *tab;
148 if (protocol <= RTNL_FAMILY_MAX)
149 tab = rtnl_msg_handlers[protocol];
153 if (tab == NULL || tab[msgindex].doit == NULL)
154 tab = rtnl_msg_handlers[PF_UNSPEC];
156 return tab[msgindex].doit;
159 static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
161 struct rtnl_link *tab;
163 if (protocol <= RTNL_FAMILY_MAX)
164 tab = rtnl_msg_handlers[protocol];
168 if (tab == NULL || tab[msgindex].dumpit == NULL)
169 tab = rtnl_msg_handlers[PF_UNSPEC];
171 return tab[msgindex].dumpit;
174 static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
176 struct rtnl_link *tab;
178 if (protocol <= RTNL_FAMILY_MAX)
179 tab = rtnl_msg_handlers[protocol];
183 if (tab == NULL || tab[msgindex].calcit == NULL)
184 tab = rtnl_msg_handlers[PF_UNSPEC];
186 return tab[msgindex].calcit;
190 * __rtnl_register - Register a rtnetlink message type
191 * @protocol: Protocol family or PF_UNSPEC
192 * @msgtype: rtnetlink message type
193 * @doit: Function pointer called for each request message
194 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
195 * @calcit: Function pointer to calc size of dump message
197 * Registers the specified function pointers (at least one of them has
198 * to be non-NULL) to be called whenever a request message for the
199 * specified protocol family and message type is received.
201 * The special protocol family PF_UNSPEC may be used to define fallback
202 * function pointers for the case when no entry for the specific protocol
205 * Returns 0 on success or a negative error code.
207 int __rtnl_register(int protocol, int msgtype,
208 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
209 rtnl_calcit_func calcit)
211 struct rtnl_link *tab;
214 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
215 msgindex = rtm_msgindex(msgtype);
217 tab = rtnl_msg_handlers[protocol];
219 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
223 rtnl_msg_handlers[protocol] = tab;
227 tab[msgindex].doit = doit;
230 tab[msgindex].dumpit = dumpit;
233 tab[msgindex].calcit = calcit;
237 EXPORT_SYMBOL_GPL(__rtnl_register);
240 * rtnl_register - Register a rtnetlink message type
242 * Identical to __rtnl_register() but panics on failure. This is useful
243 * as failure of this function is very unlikely, it can only happen due
244 * to lack of memory when allocating the chain to store all message
245 * handlers for a protocol. Meant for use in init functions where lack
246 * of memory implies no sense in continuing.
248 void rtnl_register(int protocol, int msgtype,
249 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
250 rtnl_calcit_func calcit)
252 if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
253 panic("Unable to register rtnetlink message handler, "
254 "protocol = %d, message type = %d\n",
257 EXPORT_SYMBOL_GPL(rtnl_register);
260 * rtnl_unregister - Unregister a rtnetlink message type
261 * @protocol: Protocol family or PF_UNSPEC
262 * @msgtype: rtnetlink message type
264 * Returns 0 on success or a negative error code.
266 int rtnl_unregister(int protocol, int msgtype)
270 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
271 msgindex = rtm_msgindex(msgtype);
273 if (rtnl_msg_handlers[protocol] == NULL)
276 rtnl_msg_handlers[protocol][msgindex].doit = NULL;
277 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
278 rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
282 EXPORT_SYMBOL_GPL(rtnl_unregister);
285 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
286 * @protocol : Protocol family or PF_UNSPEC
288 * Identical to calling rtnl_unregster() for all registered message types
289 * of a certain protocol family.
291 void rtnl_unregister_all(int protocol)
293 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
295 kfree(rtnl_msg_handlers[protocol]);
296 rtnl_msg_handlers[protocol] = NULL;
298 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
300 static LIST_HEAD(link_ops);
302 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
304 const struct rtnl_link_ops *ops;
306 list_for_each_entry(ops, &link_ops, list) {
307 if (!strcmp(ops->kind, kind))
314 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
315 * @ops: struct rtnl_link_ops * to register
317 * The caller must hold the rtnl_mutex. This function should be used
318 * by drivers that create devices during module initialization. It
319 * must be called before registering the devices.
321 * Returns 0 on success or a negative error code.
323 int __rtnl_link_register(struct rtnl_link_ops *ops)
325 if (rtnl_link_ops_get(ops->kind))
328 /* The check for setup is here because if ops
329 * does not have that filled up, it is not possible
330 * to use the ops for creating device. So do not
331 * fill up dellink as well. That disables rtnl_dellink.
333 if (ops->setup && !ops->dellink)
334 ops->dellink = unregister_netdevice_queue;
336 list_add_tail(&ops->list, &link_ops);
339 EXPORT_SYMBOL_GPL(__rtnl_link_register);
342 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
343 * @ops: struct rtnl_link_ops * to register
345 * Returns 0 on success or a negative error code.
347 int rtnl_link_register(struct rtnl_link_ops *ops)
352 err = __rtnl_link_register(ops);
356 EXPORT_SYMBOL_GPL(rtnl_link_register);
358 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
360 struct net_device *dev;
361 LIST_HEAD(list_kill);
363 for_each_netdev(net, dev) {
364 if (dev->rtnl_link_ops == ops)
365 ops->dellink(dev, &list_kill);
367 unregister_netdevice_many(&list_kill);
371 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
372 * @ops: struct rtnl_link_ops * to unregister
374 * The caller must hold the rtnl_mutex.
376 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
381 __rtnl_kill_links(net, ops);
383 list_del(&ops->list);
385 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
387 /* Return with the rtnl_lock held when there are no network
388 * devices unregistering in any network namespace.
390 static void rtnl_lock_unregistering_all(void)
394 DEFINE_WAIT_FUNC(wait, woken_wake_function);
396 add_wait_queue(&netdev_unregistering_wq, &wait);
398 unregistering = false;
401 if (net->dev_unreg_count > 0) {
402 unregistering = true;
410 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
412 remove_wait_queue(&netdev_unregistering_wq, &wait);
416 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
417 * @ops: struct rtnl_link_ops * to unregister
419 void rtnl_link_unregister(struct rtnl_link_ops *ops)
421 /* Close the race with cleanup_net() */
422 mutex_lock(&net_mutex);
423 rtnl_lock_unregistering_all();
424 __rtnl_link_unregister(ops);
426 mutex_unlock(&net_mutex);
428 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
430 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
432 struct net_device *master_dev;
433 const struct rtnl_link_ops *ops;
435 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
438 ops = master_dev->rtnl_link_ops;
439 if (!ops || !ops->get_slave_size)
441 /* IFLA_INFO_SLAVE_DATA + nested data */
442 return nla_total_size(sizeof(struct nlattr)) +
443 ops->get_slave_size(master_dev, dev);
446 static size_t rtnl_link_get_size(const struct net_device *dev)
448 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
454 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
455 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
458 /* IFLA_INFO_DATA + nested data */
459 size += nla_total_size(sizeof(struct nlattr)) +
462 if (ops->get_xstats_size)
463 /* IFLA_INFO_XSTATS */
464 size += nla_total_size(ops->get_xstats_size(dev));
466 size += rtnl_link_get_slave_info_data_size(dev);
471 static LIST_HEAD(rtnl_af_ops);
473 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
475 const struct rtnl_af_ops *ops;
477 list_for_each_entry(ops, &rtnl_af_ops, list) {
478 if (ops->family == family)
486 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
487 * @ops: struct rtnl_af_ops * to register
489 * Returns 0 on success or a negative error code.
491 void rtnl_af_register(struct rtnl_af_ops *ops)
494 list_add_tail(&ops->list, &rtnl_af_ops);
497 EXPORT_SYMBOL_GPL(rtnl_af_register);
500 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
501 * @ops: struct rtnl_af_ops * to unregister
503 * The caller must hold the rtnl_mutex.
505 void __rtnl_af_unregister(struct rtnl_af_ops *ops)
507 list_del(&ops->list);
509 EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
512 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
513 * @ops: struct rtnl_af_ops * to unregister
515 void rtnl_af_unregister(struct rtnl_af_ops *ops)
518 __rtnl_af_unregister(ops);
521 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
523 static size_t rtnl_link_get_af_size(const struct net_device *dev,
526 struct rtnl_af_ops *af_ops;
530 size = nla_total_size(sizeof(struct nlattr));
532 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
533 if (af_ops->get_link_af_size) {
534 /* AF_* + nested data */
535 size += nla_total_size(sizeof(struct nlattr)) +
536 af_ops->get_link_af_size(dev, ext_filter_mask);
543 static bool rtnl_have_link_slave_info(const struct net_device *dev)
545 struct net_device *master_dev;
547 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
548 if (master_dev && master_dev->rtnl_link_ops)
553 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
554 const struct net_device *dev)
556 struct net_device *master_dev;
557 const struct rtnl_link_ops *ops;
558 struct nlattr *slave_data;
561 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
564 ops = master_dev->rtnl_link_ops;
567 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
569 if (ops->fill_slave_info) {
570 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
573 err = ops->fill_slave_info(skb, master_dev, dev);
575 goto err_cancel_slave_data;
576 nla_nest_end(skb, slave_data);
580 err_cancel_slave_data:
581 nla_nest_cancel(skb, slave_data);
585 static int rtnl_link_info_fill(struct sk_buff *skb,
586 const struct net_device *dev)
588 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
594 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
596 if (ops->fill_xstats) {
597 err = ops->fill_xstats(skb, dev);
601 if (ops->fill_info) {
602 data = nla_nest_start(skb, IFLA_INFO_DATA);
605 err = ops->fill_info(skb, dev);
607 goto err_cancel_data;
608 nla_nest_end(skb, data);
613 nla_nest_cancel(skb, data);
617 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
619 struct nlattr *linkinfo;
622 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
623 if (linkinfo == NULL)
626 err = rtnl_link_info_fill(skb, dev);
628 goto err_cancel_link;
630 err = rtnl_link_slave_info_fill(skb, dev);
632 goto err_cancel_link;
634 nla_nest_end(skb, linkinfo);
638 nla_nest_cancel(skb, linkinfo);
643 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
645 struct sock *rtnl = net->rtnl;
648 NETLINK_CB(skb).dst_group = group;
650 atomic_inc(&skb->users);
651 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
653 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
657 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
659 struct sock *rtnl = net->rtnl;
661 return nlmsg_unicast(rtnl, skb, pid);
663 EXPORT_SYMBOL(rtnl_unicast);
665 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
666 struct nlmsghdr *nlh, gfp_t flags)
668 struct sock *rtnl = net->rtnl;
672 report = nlmsg_report(nlh);
674 nlmsg_notify(rtnl, skb, pid, group, report, flags);
676 EXPORT_SYMBOL(rtnl_notify);
678 void rtnl_set_sk_err(struct net *net, u32 group, int error)
680 struct sock *rtnl = net->rtnl;
682 netlink_set_err(rtnl, 0, group, error);
684 EXPORT_SYMBOL(rtnl_set_sk_err);
686 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
691 mx = nla_nest_start(skb, RTA_METRICS);
695 for (i = 0; i < RTAX_MAX; i++) {
697 if (i == RTAX_CC_ALGO - 1) {
698 char tmp[TCP_CA_NAME_MAX], *name;
700 name = tcp_ca_get_name_by_key(metrics[i], tmp);
703 if (nla_put_string(skb, i + 1, name))
704 goto nla_put_failure;
705 } else if (i == RTAX_FEATURES - 1) {
706 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
710 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
711 if (nla_put_u32(skb, i + 1, user_features))
712 goto nla_put_failure;
714 if (nla_put_u32(skb, i + 1, metrics[i]))
715 goto nla_put_failure;
722 nla_nest_cancel(skb, mx);
726 return nla_nest_end(skb, mx);
729 nla_nest_cancel(skb, mx);
732 EXPORT_SYMBOL(rtnetlink_put_metrics);
734 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
735 long expires, u32 error)
737 struct rta_cacheinfo ci = {
738 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
739 .rta_used = dst->__use,
740 .rta_clntref = atomic_read(&(dst->__refcnt)),
748 clock = jiffies_to_clock_t(abs(expires));
749 clock = min_t(unsigned long, clock, INT_MAX);
750 ci.rta_expires = (expires > 0) ? clock : -clock;
752 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
754 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
756 static void set_operstate(struct net_device *dev, unsigned char transition)
758 unsigned char operstate = dev->operstate;
760 switch (transition) {
762 if ((operstate == IF_OPER_DORMANT ||
763 operstate == IF_OPER_UNKNOWN) &&
765 operstate = IF_OPER_UP;
768 case IF_OPER_DORMANT:
769 if (operstate == IF_OPER_UP ||
770 operstate == IF_OPER_UNKNOWN)
771 operstate = IF_OPER_DORMANT;
775 if (dev->operstate != operstate) {
776 write_lock_bh(&dev_base_lock);
777 dev->operstate = operstate;
778 write_unlock_bh(&dev_base_lock);
779 netdev_state_change(dev);
783 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
785 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
786 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
789 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
790 const struct ifinfomsg *ifm)
792 unsigned int flags = ifm->ifi_flags;
794 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
796 flags = (flags & ifm->ifi_change) |
797 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
802 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
803 const struct rtnl_link_stats64 *b)
805 a->rx_packets = b->rx_packets;
806 a->tx_packets = b->tx_packets;
807 a->rx_bytes = b->rx_bytes;
808 a->tx_bytes = b->tx_bytes;
809 a->rx_errors = b->rx_errors;
810 a->tx_errors = b->tx_errors;
811 a->rx_dropped = b->rx_dropped;
812 a->tx_dropped = b->tx_dropped;
814 a->multicast = b->multicast;
815 a->collisions = b->collisions;
817 a->rx_length_errors = b->rx_length_errors;
818 a->rx_over_errors = b->rx_over_errors;
819 a->rx_crc_errors = b->rx_crc_errors;
820 a->rx_frame_errors = b->rx_frame_errors;
821 a->rx_fifo_errors = b->rx_fifo_errors;
822 a->rx_missed_errors = b->rx_missed_errors;
824 a->tx_aborted_errors = b->tx_aborted_errors;
825 a->tx_carrier_errors = b->tx_carrier_errors;
826 a->tx_fifo_errors = b->tx_fifo_errors;
827 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
828 a->tx_window_errors = b->tx_window_errors;
830 a->rx_compressed = b->rx_compressed;
831 a->tx_compressed = b->tx_compressed;
833 a->rx_nohandler = b->rx_nohandler;
837 static inline int rtnl_vfinfo_size(const struct net_device *dev,
840 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
841 (ext_filter_mask & RTEXT_FILTER_VF)) {
842 int num_vfs = dev_num_vf(dev->dev.parent);
843 size_t size = nla_total_size(0);
846 nla_total_size(sizeof(struct ifla_vf_mac)) +
847 nla_total_size(sizeof(struct ifla_vf_vlan)) +
848 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
849 nla_total_size(MAX_VLAN_LIST_LEN *
850 sizeof(struct ifla_vf_vlan_info)) +
851 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
852 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
853 nla_total_size(sizeof(struct ifla_vf_rate)) +
854 nla_total_size(sizeof(struct ifla_vf_link_state)) +
855 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
856 nla_total_size(0) + /* nest IFLA_VF_STATS */
857 /* IFLA_VF_STATS_RX_PACKETS */
858 nla_total_size_64bit(sizeof(__u64)) +
859 /* IFLA_VF_STATS_TX_PACKETS */
860 nla_total_size_64bit(sizeof(__u64)) +
861 /* IFLA_VF_STATS_RX_BYTES */
862 nla_total_size_64bit(sizeof(__u64)) +
863 /* IFLA_VF_STATS_TX_BYTES */
864 nla_total_size_64bit(sizeof(__u64)) +
865 /* IFLA_VF_STATS_BROADCAST */
866 nla_total_size_64bit(sizeof(__u64)) +
867 /* IFLA_VF_STATS_MULTICAST */
868 nla_total_size_64bit(sizeof(__u64)) +
869 nla_total_size(sizeof(struct ifla_vf_trust)));
875 static size_t rtnl_port_size(const struct net_device *dev,
878 size_t port_size = nla_total_size(4) /* PORT_VF */
879 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
880 + nla_total_size(sizeof(struct ifla_port_vsi))
882 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
883 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
884 + nla_total_size(1) /* PROT_VDP_REQUEST */
885 + nla_total_size(2); /* PORT_VDP_RESPONSE */
886 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
887 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
889 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
892 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
893 !(ext_filter_mask & RTEXT_FILTER_VF))
895 if (dev_num_vf(dev->dev.parent))
896 return port_self_size + vf_ports_size +
897 vf_port_size * dev_num_vf(dev->dev.parent);
899 return port_self_size;
902 static size_t rtnl_xdp_size(const struct net_device *dev)
904 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
905 nla_total_size(1); /* XDP_ATTACHED */
907 if (!dev->netdev_ops->ndo_xdp)
913 static noinline size_t if_nlmsg_size(const struct net_device *dev,
916 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
917 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
918 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
919 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
920 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
921 + nla_total_size(sizeof(struct rtnl_link_stats))
922 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
923 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
924 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
925 + nla_total_size(4) /* IFLA_TXQLEN */
926 + nla_total_size(4) /* IFLA_WEIGHT */
927 + nla_total_size(4) /* IFLA_MTU */
928 + nla_total_size(4) /* IFLA_LINK */
929 + nla_total_size(4) /* IFLA_MASTER */
930 + nla_total_size(1) /* IFLA_CARRIER */
931 + nla_total_size(4) /* IFLA_PROMISCUITY */
932 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
933 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
934 + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */
935 + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */
936 + nla_total_size(1) /* IFLA_OPERSTATE */
937 + nla_total_size(1) /* IFLA_LINKMODE */
938 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
939 + nla_total_size(4) /* IFLA_LINK_NETNSID */
940 + nla_total_size(ext_filter_mask
941 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
942 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
943 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
944 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
945 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
946 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
947 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
948 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
949 + rtnl_xdp_size(dev) /* IFLA_XDP */
950 + nla_total_size(1); /* IFLA_PROTO_DOWN */
954 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
956 struct nlattr *vf_ports;
957 struct nlattr *vf_port;
961 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
965 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
966 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
968 goto nla_put_failure;
969 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
970 goto nla_put_failure;
971 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
972 if (err == -EMSGSIZE)
973 goto nla_put_failure;
975 nla_nest_cancel(skb, vf_port);
978 nla_nest_end(skb, vf_port);
981 nla_nest_end(skb, vf_ports);
986 nla_nest_cancel(skb, vf_ports);
990 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
992 struct nlattr *port_self;
995 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
999 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1001 nla_nest_cancel(skb, port_self);
1002 return (err == -EMSGSIZE) ? err : 0;
1005 nla_nest_end(skb, port_self);
1010 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1011 u32 ext_filter_mask)
1015 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1016 !(ext_filter_mask & RTEXT_FILTER_VF))
1019 err = rtnl_port_self_fill(skb, dev);
1023 if (dev_num_vf(dev->dev.parent)) {
1024 err = rtnl_vf_ports_fill(skb, dev);
1032 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1035 struct netdev_phys_item_id ppid;
1037 err = dev_get_phys_port_id(dev, &ppid);
1039 if (err == -EOPNOTSUPP)
1044 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1050 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1052 char name[IFNAMSIZ];
1055 err = dev_get_phys_port_name(dev, name, sizeof(name));
1057 if (err == -EOPNOTSUPP)
1062 if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
1068 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1071 struct switchdev_attr attr = {
1073 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1074 .flags = SWITCHDEV_F_NO_RECURSE,
1077 err = switchdev_port_attr_get(dev, &attr);
1079 if (err == -EOPNOTSUPP)
1084 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
1091 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1092 struct net_device *dev)
1094 struct rtnl_link_stats64 *sp;
1095 struct nlattr *attr;
1097 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1098 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1102 sp = nla_data(attr);
1103 dev_get_stats(dev, sp);
1105 attr = nla_reserve(skb, IFLA_STATS,
1106 sizeof(struct rtnl_link_stats));
1110 copy_rtnl_link_stats(nla_data(attr), sp);
1115 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1116 struct net_device *dev,
1118 struct nlattr *vfinfo)
1120 struct ifla_vf_rss_query_en vf_rss_query_en;
1121 struct nlattr *vf, *vfstats, *vfvlanlist;
1122 struct ifla_vf_link_state vf_linkstate;
1123 struct ifla_vf_vlan_info vf_vlan_info;
1124 struct ifla_vf_spoofchk vf_spoofchk;
1125 struct ifla_vf_tx_rate vf_tx_rate;
1126 struct ifla_vf_stats vf_stats;
1127 struct ifla_vf_trust vf_trust;
1128 struct ifla_vf_vlan vf_vlan;
1129 struct ifla_vf_rate vf_rate;
1130 struct ifla_vf_mac vf_mac;
1131 struct ifla_vf_info ivi;
1133 /* Not all SR-IOV capable drivers support the
1134 * spoofcheck and "RSS query enable" query. Preset to
1135 * -1 so the user space tool can detect that the driver
1136 * didn't report anything.
1139 ivi.rss_query_en = -1;
1141 memset(ivi.mac, 0, sizeof(ivi.mac));
1142 /* The default value for VF link state is "auto"
1143 * IFLA_VF_LINK_STATE_AUTO which equals zero
1146 /* VLAN Protocol by default is 802.1Q */
1147 ivi.vlan_proto = htons(ETH_P_8021Q);
1148 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1151 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1160 vf_rss_query_en.vf =
1161 vf_trust.vf = ivi.vf;
1163 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1164 vf_vlan.vlan = ivi.vlan;
1165 vf_vlan.qos = ivi.qos;
1166 vf_vlan_info.vlan = ivi.vlan;
1167 vf_vlan_info.qos = ivi.qos;
1168 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1169 vf_tx_rate.rate = ivi.max_tx_rate;
1170 vf_rate.min_tx_rate = ivi.min_tx_rate;
1171 vf_rate.max_tx_rate = ivi.max_tx_rate;
1172 vf_spoofchk.setting = ivi.spoofchk;
1173 vf_linkstate.link_state = ivi.linkstate;
1174 vf_rss_query_en.setting = ivi.rss_query_en;
1175 vf_trust.setting = ivi.trusted;
1176 vf = nla_nest_start(skb, IFLA_VF_INFO);
1178 goto nla_put_vfinfo_failure;
1179 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1180 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1181 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1183 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1185 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1187 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1189 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1190 sizeof(vf_rss_query_en),
1191 &vf_rss_query_en) ||
1192 nla_put(skb, IFLA_VF_TRUST,
1193 sizeof(vf_trust), &vf_trust))
1194 goto nla_put_vf_failure;
1195 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1197 goto nla_put_vf_failure;
1198 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1200 nla_nest_cancel(skb, vfvlanlist);
1201 goto nla_put_vf_failure;
1203 nla_nest_end(skb, vfvlanlist);
1204 memset(&vf_stats, 0, sizeof(vf_stats));
1205 if (dev->netdev_ops->ndo_get_vf_stats)
1206 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1208 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1210 goto nla_put_vf_failure;
1211 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1212 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1213 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1214 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1215 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1216 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1217 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1218 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1219 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1220 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1221 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1222 vf_stats.multicast, IFLA_VF_STATS_PAD)) {
1223 nla_nest_cancel(skb, vfstats);
1224 goto nla_put_vf_failure;
1226 nla_nest_end(skb, vfstats);
1227 nla_nest_end(skb, vf);
1231 nla_nest_cancel(skb, vf);
1232 nla_put_vfinfo_failure:
1233 nla_nest_cancel(skb, vfinfo);
1237 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1239 struct rtnl_link_ifmap map;
1241 memset(&map, 0, sizeof(map));
1242 map.mem_start = dev->mem_start;
1243 map.mem_end = dev->mem_end;
1244 map.base_addr = dev->base_addr;
1247 map.port = dev->if_port;
1249 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1255 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1257 struct netdev_xdp xdp_op = {};
1261 if (!dev->netdev_ops->ndo_xdp)
1263 xdp = nla_nest_start(skb, IFLA_XDP);
1266 xdp_op.command = XDP_QUERY_PROG;
1267 err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
1270 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, xdp_op.prog_attached);
1274 nla_nest_end(skb, xdp);
1278 nla_nest_cancel(skb, xdp);
1282 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1283 int type, u32 pid, u32 seq, u32 change,
1284 unsigned int flags, u32 ext_filter_mask)
1286 struct ifinfomsg *ifm;
1287 struct nlmsghdr *nlh;
1288 struct nlattr *af_spec;
1289 struct rtnl_af_ops *af_ops;
1290 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1293 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1297 ifm = nlmsg_data(nlh);
1298 ifm->ifi_family = AF_UNSPEC;
1300 ifm->ifi_type = dev->type;
1301 ifm->ifi_index = dev->ifindex;
1302 ifm->ifi_flags = dev_get_flags(dev);
1303 ifm->ifi_change = change;
1305 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1306 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1307 nla_put_u8(skb, IFLA_OPERSTATE,
1308 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1309 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1310 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1311 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1312 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1313 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1314 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1315 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1317 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1319 (dev->ifindex != dev_get_iflink(dev) &&
1320 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
1322 nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
1323 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1325 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1327 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
1328 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1329 atomic_read(&dev->carrier_changes)) ||
1330 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1331 goto nla_put_failure;
1333 if (rtnl_fill_link_ifmap(skb, dev))
1334 goto nla_put_failure;
1336 if (dev->addr_len) {
1337 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1338 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1339 goto nla_put_failure;
1342 if (rtnl_phys_port_id_fill(skb, dev))
1343 goto nla_put_failure;
1345 if (rtnl_phys_port_name_fill(skb, dev))
1346 goto nla_put_failure;
1348 if (rtnl_phys_switch_id_fill(skb, dev))
1349 goto nla_put_failure;
1351 if (rtnl_fill_stats(skb, dev))
1352 goto nla_put_failure;
1354 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1355 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1356 goto nla_put_failure;
1358 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1359 ext_filter_mask & RTEXT_FILTER_VF) {
1361 struct nlattr *vfinfo;
1362 int num_vfs = dev_num_vf(dev->dev.parent);
1364 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1366 goto nla_put_failure;
1367 for (i = 0; i < num_vfs; i++) {
1368 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1369 goto nla_put_failure;
1372 nla_nest_end(skb, vfinfo);
1375 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1376 goto nla_put_failure;
1378 if (rtnl_xdp_fill(skb, dev))
1379 goto nla_put_failure;
1381 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1382 if (rtnl_link_fill(skb, dev) < 0)
1383 goto nla_put_failure;
1386 if (dev->rtnl_link_ops &&
1387 dev->rtnl_link_ops->get_link_net) {
1388 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1390 if (!net_eq(dev_net(dev), link_net)) {
1391 int id = peernet2id_alloc(dev_net(dev), link_net);
1393 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1394 goto nla_put_failure;
1398 if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
1399 goto nla_put_failure;
1401 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
1402 if (af_ops->fill_link_af) {
1406 if (!(af = nla_nest_start(skb, af_ops->family)))
1407 goto nla_put_failure;
1409 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1412 * Caller may return ENODATA to indicate that there
1413 * was no data to be dumped. This is not an error, it
1414 * means we should trim the attribute header and
1417 if (err == -ENODATA)
1418 nla_nest_cancel(skb, af);
1420 goto nla_put_failure;
1422 nla_nest_end(skb, af);
1426 nla_nest_end(skb, af_spec);
1428 nlmsg_end(skb, nlh);
1432 nlmsg_cancel(skb, nlh);
1436 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1437 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1438 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1439 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1440 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1441 [IFLA_MTU] = { .type = NLA_U32 },
1442 [IFLA_LINK] = { .type = NLA_U32 },
1443 [IFLA_MASTER] = { .type = NLA_U32 },
1444 [IFLA_CARRIER] = { .type = NLA_U8 },
1445 [IFLA_TXQLEN] = { .type = NLA_U32 },
1446 [IFLA_WEIGHT] = { .type = NLA_U32 },
1447 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1448 [IFLA_LINKMODE] = { .type = NLA_U8 },
1449 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1450 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1451 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1452 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1453 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1454 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1455 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1456 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1457 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1458 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1459 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1460 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1461 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1462 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1463 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1464 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1465 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1466 [IFLA_XDP] = { .type = NLA_NESTED },
1469 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1470 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1471 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1472 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1473 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1476 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1477 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1478 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1479 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1480 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1481 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1482 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1483 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1484 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1485 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1486 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1487 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1488 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1491 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1492 [IFLA_PORT_VF] = { .type = NLA_U32 },
1493 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1494 .len = PORT_PROFILE_MAX },
1495 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1496 .len = sizeof(struct ifla_port_vsi)},
1497 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1498 .len = PORT_UUID_MAX },
1499 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1500 .len = PORT_UUID_MAX },
1501 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1502 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1505 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1506 [IFLA_XDP_FD] = { .type = NLA_S32 },
1507 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1510 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1512 const struct rtnl_link_ops *ops = NULL;
1513 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1515 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, ifla_info_policy) < 0)
1518 if (linfo[IFLA_INFO_KIND]) {
1519 char kind[MODULE_NAME_LEN];
1521 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1522 ops = rtnl_link_ops_get(kind);
1528 static bool link_master_filtered(struct net_device *dev, int master_idx)
1530 struct net_device *master;
1535 master = netdev_master_upper_dev_get(dev);
1536 if (!master || master->ifindex != master_idx)
1542 static bool link_kind_filtered(const struct net_device *dev,
1543 const struct rtnl_link_ops *kind_ops)
1545 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1551 static bool link_dump_filtered(struct net_device *dev,
1553 const struct rtnl_link_ops *kind_ops)
1555 if (link_master_filtered(dev, master_idx) ||
1556 link_kind_filtered(dev, kind_ops))
1562 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1564 struct net *net = sock_net(skb->sk);
1567 struct net_device *dev;
1568 struct hlist_head *head;
1569 struct nlattr *tb[IFLA_MAX+1];
1570 u32 ext_filter_mask = 0;
1571 const struct rtnl_link_ops *kind_ops = NULL;
1572 unsigned int flags = NLM_F_MULTI;
1578 s_idx = cb->args[1];
1580 cb->seq = net->dev_base_seq;
1582 /* A hack to preserve kernel<->userspace interface.
1583 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1584 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1585 * what iproute2 < v3.9.0 used.
1586 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1587 * attribute, its netlink message is shorter than struct ifinfomsg.
1589 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1590 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1592 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1594 if (tb[IFLA_EXT_MASK])
1595 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1597 if (tb[IFLA_MASTER])
1598 master_idx = nla_get_u32(tb[IFLA_MASTER]);
1600 if (tb[IFLA_LINKINFO])
1601 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
1603 if (master_idx || kind_ops)
1604 flags |= NLM_F_DUMP_FILTERED;
1607 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1609 head = &net->dev_index_head[h];
1610 hlist_for_each_entry(dev, head, index_hlist) {
1611 if (link_dump_filtered(dev, master_idx, kind_ops))
1615 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1616 NETLINK_CB(cb->skb).portid,
1617 cb->nlh->nlmsg_seq, 0,
1620 /* If we ran out of room on the first message,
1623 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
1628 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1640 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
1642 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
1644 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
1646 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1649 /* Examine the link attributes and figure out which
1650 * network namespace we are talking about.
1652 if (tb[IFLA_NET_NS_PID])
1653 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1654 else if (tb[IFLA_NET_NS_FD])
1655 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1657 net = get_net(src_net);
1660 EXPORT_SYMBOL(rtnl_link_get_net);
1662 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1665 if (tb[IFLA_ADDRESS] &&
1666 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
1669 if (tb[IFLA_BROADCAST] &&
1670 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
1674 if (tb[IFLA_AF_SPEC]) {
1678 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1679 const struct rtnl_af_ops *af_ops;
1681 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1682 return -EAFNOSUPPORT;
1684 if (!af_ops->set_link_af)
1687 if (af_ops->validate_link_af) {
1688 err = af_ops->validate_link_af(dev, af);
1698 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
1701 const struct net_device_ops *ops = dev->netdev_ops;
1703 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
1706 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
1708 if (dev->type != ARPHRD_INFINIBAND)
1711 return handle_infiniband_guid(dev, ivt, guid_type);
1714 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1716 const struct net_device_ops *ops = dev->netdev_ops;
1719 if (tb[IFLA_VF_MAC]) {
1720 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1723 if (ops->ndo_set_vf_mac)
1724 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1730 if (tb[IFLA_VF_VLAN]) {
1731 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1734 if (ops->ndo_set_vf_vlan)
1735 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1737 htons(ETH_P_8021Q));
1742 if (tb[IFLA_VF_VLAN_LIST]) {
1743 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
1744 struct nlattr *attr;
1748 if (!ops->ndo_set_vf_vlan)
1751 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
1752 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
1753 nla_len(attr) < NLA_HDRLEN) {
1756 if (len >= MAX_VLAN_LIST_LEN)
1758 ivvl[len] = nla_data(attr);
1765 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
1766 ivvl[0]->qos, ivvl[0]->vlan_proto);
1771 if (tb[IFLA_VF_TX_RATE]) {
1772 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1773 struct ifla_vf_info ivf;
1776 if (ops->ndo_get_vf_config)
1777 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1782 if (ops->ndo_set_vf_rate)
1783 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1790 if (tb[IFLA_VF_RATE]) {
1791 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1794 if (ops->ndo_set_vf_rate)
1795 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1802 if (tb[IFLA_VF_SPOOFCHK]) {
1803 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1806 if (ops->ndo_set_vf_spoofchk)
1807 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1813 if (tb[IFLA_VF_LINK_STATE]) {
1814 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1817 if (ops->ndo_set_vf_link_state)
1818 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1824 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1825 struct ifla_vf_rss_query_en *ivrssq_en;
1828 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1829 if (ops->ndo_set_vf_rss_query_en)
1830 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1831 ivrssq_en->setting);
1836 if (tb[IFLA_VF_TRUST]) {
1837 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
1840 if (ops->ndo_set_vf_trust)
1841 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
1846 if (tb[IFLA_VF_IB_NODE_GUID]) {
1847 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
1849 if (!ops->ndo_set_vf_guid)
1852 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
1855 if (tb[IFLA_VF_IB_PORT_GUID]) {
1856 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
1858 if (!ops->ndo_set_vf_guid)
1861 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
1867 static int do_set_master(struct net_device *dev, int ifindex)
1869 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1870 const struct net_device_ops *ops;
1874 if (upper_dev->ifindex == ifindex)
1876 ops = upper_dev->netdev_ops;
1877 if (ops->ndo_del_slave) {
1878 err = ops->ndo_del_slave(upper_dev, dev);
1887 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
1890 ops = upper_dev->netdev_ops;
1891 if (ops->ndo_add_slave) {
1892 err = ops->ndo_add_slave(upper_dev, dev);
1902 #define DO_SETLINK_MODIFIED 0x01
1903 /* notify flag means notify + modified. */
1904 #define DO_SETLINK_NOTIFY 0x03
1905 static int do_setlink(const struct sk_buff *skb,
1906 struct net_device *dev, struct ifinfomsg *ifm,
1907 struct nlattr **tb, char *ifname, int status)
1909 const struct net_device_ops *ops = dev->netdev_ops;
1912 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1913 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1918 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1923 err = dev_change_net_namespace(dev, net, ifname);
1927 status |= DO_SETLINK_MODIFIED;
1931 struct rtnl_link_ifmap *u_map;
1934 if (!ops->ndo_set_config) {
1939 if (!netif_device_present(dev)) {
1944 u_map = nla_data(tb[IFLA_MAP]);
1945 k_map.mem_start = (unsigned long) u_map->mem_start;
1946 k_map.mem_end = (unsigned long) u_map->mem_end;
1947 k_map.base_addr = (unsigned short) u_map->base_addr;
1948 k_map.irq = (unsigned char) u_map->irq;
1949 k_map.dma = (unsigned char) u_map->dma;
1950 k_map.port = (unsigned char) u_map->port;
1952 err = ops->ndo_set_config(dev, &k_map);
1956 status |= DO_SETLINK_NOTIFY;
1959 if (tb[IFLA_ADDRESS]) {
1960 struct sockaddr *sa;
1963 len = sizeof(sa_family_t) + dev->addr_len;
1964 sa = kmalloc(len, GFP_KERNEL);
1969 sa->sa_family = dev->type;
1970 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
1972 err = dev_set_mac_address(dev, sa);
1976 status |= DO_SETLINK_MODIFIED;
1980 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1983 status |= DO_SETLINK_MODIFIED;
1986 if (tb[IFLA_GROUP]) {
1987 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
1988 status |= DO_SETLINK_NOTIFY;
1992 * Interface selected by interface index but interface
1993 * name provided implies that a name change has been
1996 if (ifm->ifi_index > 0 && ifname[0]) {
1997 err = dev_change_name(dev, ifname);
2000 status |= DO_SETLINK_MODIFIED;
2003 if (tb[IFLA_IFALIAS]) {
2004 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2005 nla_len(tb[IFLA_IFALIAS]));
2008 status |= DO_SETLINK_NOTIFY;
2011 if (tb[IFLA_BROADCAST]) {
2012 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2013 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2016 if (ifm->ifi_flags || ifm->ifi_change) {
2017 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2022 if (tb[IFLA_MASTER]) {
2023 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
2026 status |= DO_SETLINK_MODIFIED;
2029 if (tb[IFLA_CARRIER]) {
2030 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2033 status |= DO_SETLINK_MODIFIED;
2036 if (tb[IFLA_TXQLEN]) {
2037 unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
2038 unsigned long orig_len = dev->tx_queue_len;
2040 if (dev->tx_queue_len ^ value) {
2041 dev->tx_queue_len = value;
2042 err = call_netdevice_notifiers(
2043 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
2044 err = notifier_to_errno(err);
2046 dev->tx_queue_len = orig_len;
2049 status |= DO_SETLINK_NOTIFY;
2053 if (tb[IFLA_OPERSTATE])
2054 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2056 if (tb[IFLA_LINKMODE]) {
2057 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2059 write_lock_bh(&dev_base_lock);
2060 if (dev->link_mode ^ value)
2061 status |= DO_SETLINK_NOTIFY;
2062 dev->link_mode = value;
2063 write_unlock_bh(&dev_base_lock);
2066 if (tb[IFLA_VFINFO_LIST]) {
2067 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2068 struct nlattr *attr;
2071 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2072 if (nla_type(attr) != IFLA_VF_INFO ||
2073 nla_len(attr) < NLA_HDRLEN) {
2077 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2081 err = do_setvfinfo(dev, vfinfo);
2084 status |= DO_SETLINK_NOTIFY;
2089 if (tb[IFLA_VF_PORTS]) {
2090 struct nlattr *port[IFLA_PORT_MAX+1];
2091 struct nlattr *attr;
2096 if (!ops->ndo_set_vf_port)
2099 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2100 if (nla_type(attr) != IFLA_VF_PORT ||
2101 nla_len(attr) < NLA_HDRLEN) {
2105 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2109 if (!port[IFLA_PORT_VF]) {
2113 vf = nla_get_u32(port[IFLA_PORT_VF]);
2114 err = ops->ndo_set_vf_port(dev, vf, port);
2117 status |= DO_SETLINK_NOTIFY;
2122 if (tb[IFLA_PORT_SELF]) {
2123 struct nlattr *port[IFLA_PORT_MAX+1];
2125 err = nla_parse_nested(port, IFLA_PORT_MAX,
2126 tb[IFLA_PORT_SELF], ifla_port_policy);
2131 if (ops->ndo_set_vf_port)
2132 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2135 status |= DO_SETLINK_NOTIFY;
2138 if (tb[IFLA_AF_SPEC]) {
2142 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2143 const struct rtnl_af_ops *af_ops;
2145 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
2148 err = af_ops->set_link_af(dev, af);
2152 status |= DO_SETLINK_NOTIFY;
2157 if (tb[IFLA_PROTO_DOWN]) {
2158 err = dev_change_proto_down(dev,
2159 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2162 status |= DO_SETLINK_NOTIFY;
2166 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2168 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2173 if (xdp[IFLA_XDP_ATTACHED]) {
2177 if (xdp[IFLA_XDP_FD]) {
2178 err = dev_change_xdp_fd(dev,
2179 nla_get_s32(xdp[IFLA_XDP_FD]));
2182 status |= DO_SETLINK_NOTIFY;
2187 if (status & DO_SETLINK_MODIFIED) {
2188 if (status & DO_SETLINK_NOTIFY)
2189 netdev_state_change(dev);
2192 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2199 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
2201 struct net *net = sock_net(skb->sk);
2202 struct ifinfomsg *ifm;
2203 struct net_device *dev;
2205 struct nlattr *tb[IFLA_MAX+1];
2206 char ifname[IFNAMSIZ];
2208 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
2212 if (tb[IFLA_IFNAME])
2213 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2218 ifm = nlmsg_data(nlh);
2219 if (ifm->ifi_index > 0)
2220 dev = __dev_get_by_index(net, ifm->ifi_index);
2221 else if (tb[IFLA_IFNAME])
2222 dev = __dev_get_by_name(net, ifname);
2231 err = validate_linkmsg(dev, tb);
2235 err = do_setlink(skb, dev, ifm, tb, ifname, 0);
2240 static int rtnl_group_dellink(const struct net *net, int group)
2242 struct net_device *dev, *aux;
2243 LIST_HEAD(list_kill);
2249 for_each_netdev(net, dev) {
2250 if (dev->group == group) {
2251 const struct rtnl_link_ops *ops;
2254 ops = dev->rtnl_link_ops;
2255 if (!ops || !ops->dellink)
2263 for_each_netdev_safe(net, dev, aux) {
2264 if (dev->group == group) {
2265 const struct rtnl_link_ops *ops;
2267 ops = dev->rtnl_link_ops;
2268 ops->dellink(dev, &list_kill);
2271 unregister_netdevice_many(&list_kill);
2276 int rtnl_delete_link(struct net_device *dev)
2278 const struct rtnl_link_ops *ops;
2279 LIST_HEAD(list_kill);
2281 ops = dev->rtnl_link_ops;
2282 if (!ops || !ops->dellink)
2285 ops->dellink(dev, &list_kill);
2286 unregister_netdevice_many(&list_kill);
2290 EXPORT_SYMBOL_GPL(rtnl_delete_link);
2292 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
2294 struct net *net = sock_net(skb->sk);
2295 struct net_device *dev;
2296 struct ifinfomsg *ifm;
2297 char ifname[IFNAMSIZ];
2298 struct nlattr *tb[IFLA_MAX+1];
2301 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
2305 if (tb[IFLA_IFNAME])
2306 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2308 ifm = nlmsg_data(nlh);
2309 if (ifm->ifi_index > 0)
2310 dev = __dev_get_by_index(net, ifm->ifi_index);
2311 else if (tb[IFLA_IFNAME])
2312 dev = __dev_get_by_name(net, ifname);
2313 else if (tb[IFLA_GROUP])
2314 return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
2321 return rtnl_delete_link(dev);
2324 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2326 unsigned int old_flags;
2329 old_flags = dev->flags;
2330 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2331 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2336 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2338 __dev_notify_flags(dev, old_flags, ~0U);
2341 EXPORT_SYMBOL(rtnl_configure_link);
2343 struct net_device *rtnl_create_link(struct net *net,
2344 const char *ifname, unsigned char name_assign_type,
2345 const struct rtnl_link_ops *ops, struct nlattr *tb[])
2348 struct net_device *dev;
2349 unsigned int num_tx_queues = 1;
2350 unsigned int num_rx_queues = 1;
2352 if (tb[IFLA_NUM_TX_QUEUES])
2353 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2354 else if (ops->get_num_tx_queues)
2355 num_tx_queues = ops->get_num_tx_queues();
2357 if (tb[IFLA_NUM_RX_QUEUES])
2358 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2359 else if (ops->get_num_rx_queues)
2360 num_rx_queues = ops->get_num_rx_queues();
2363 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2364 ops->setup, num_tx_queues, num_rx_queues);
2368 dev_net_set(dev, net);
2369 dev->rtnl_link_ops = ops;
2370 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2373 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
2374 if (tb[IFLA_ADDRESS]) {
2375 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2376 nla_len(tb[IFLA_ADDRESS]));
2377 dev->addr_assign_type = NET_ADDR_SET;
2379 if (tb[IFLA_BROADCAST])
2380 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2381 nla_len(tb[IFLA_BROADCAST]));
2382 if (tb[IFLA_TXQLEN])
2383 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2384 if (tb[IFLA_OPERSTATE])
2385 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2386 if (tb[IFLA_LINKMODE])
2387 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2389 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2394 return ERR_PTR(err);
2396 EXPORT_SYMBOL(rtnl_create_link);
2398 static int rtnl_group_changelink(const struct sk_buff *skb,
2399 struct net *net, int group,
2400 struct ifinfomsg *ifm,
2403 struct net_device *dev, *aux;
2406 for_each_netdev_safe(net, dev, aux) {
2407 if (dev->group == group) {
2408 err = do_setlink(skb, dev, ifm, tb, NULL, 0);
2417 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
2419 struct net *net = sock_net(skb->sk);
2420 const struct rtnl_link_ops *ops;
2421 const struct rtnl_link_ops *m_ops = NULL;
2422 struct net_device *dev;
2423 struct net_device *master_dev = NULL;
2424 struct ifinfomsg *ifm;
2425 char kind[MODULE_NAME_LEN];
2426 char ifname[IFNAMSIZ];
2427 struct nlattr *tb[IFLA_MAX+1];
2428 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
2429 unsigned char name_assign_type = NET_NAME_USER;
2432 #ifdef CONFIG_MODULES
2435 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
2439 if (tb[IFLA_IFNAME])
2440 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2444 ifm = nlmsg_data(nlh);
2445 if (ifm->ifi_index > 0)
2446 dev = __dev_get_by_index(net, ifm->ifi_index);
2449 dev = __dev_get_by_name(net, ifname);
2455 master_dev = netdev_master_upper_dev_get(dev);
2457 m_ops = master_dev->rtnl_link_ops;
2460 err = validate_linkmsg(dev, tb);
2464 if (tb[IFLA_LINKINFO]) {
2465 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
2466 tb[IFLA_LINKINFO], ifla_info_policy);
2470 memset(linkinfo, 0, sizeof(linkinfo));
2472 if (linkinfo[IFLA_INFO_KIND]) {
2473 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
2474 ops = rtnl_link_ops_get(kind);
2481 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2482 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2483 struct nlattr **data = NULL;
2484 struct nlattr **slave_data = NULL;
2485 struct net *dest_net, *link_net = NULL;
2488 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
2489 err = nla_parse_nested(attr, ops->maxtype,
2490 linkinfo[IFLA_INFO_DATA],
2496 if (ops->validate) {
2497 err = ops->validate(tb, data);
2504 if (m_ops->slave_maxtype &&
2505 linkinfo[IFLA_INFO_SLAVE_DATA]) {
2506 err = nla_parse_nested(slave_attr,
2507 m_ops->slave_maxtype,
2508 linkinfo[IFLA_INFO_SLAVE_DATA],
2509 m_ops->slave_policy);
2512 slave_data = slave_attr;
2514 if (m_ops->slave_validate) {
2515 err = m_ops->slave_validate(tb, slave_data);
2524 if (nlh->nlmsg_flags & NLM_F_EXCL)
2526 if (nlh->nlmsg_flags & NLM_F_REPLACE)
2529 if (linkinfo[IFLA_INFO_DATA]) {
2530 if (!ops || ops != dev->rtnl_link_ops ||
2534 err = ops->changelink(dev, tb, data);
2537 status |= DO_SETLINK_NOTIFY;
2540 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
2541 if (!m_ops || !m_ops->slave_changelink)
2544 err = m_ops->slave_changelink(master_dev, dev,
2548 status |= DO_SETLINK_NOTIFY;
2551 return do_setlink(skb, dev, ifm, tb, ifname, status);
2554 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2555 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
2556 return rtnl_group_changelink(skb, net,
2557 nla_get_u32(tb[IFLA_GROUP]),
2562 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
2566 #ifdef CONFIG_MODULES
2569 request_module("rtnl-link-%s", kind);
2571 ops = rtnl_link_ops_get(kind);
2583 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
2584 name_assign_type = NET_NAME_ENUM;
2587 dest_net = rtnl_link_get_net(net, tb);
2588 if (IS_ERR(dest_net))
2589 return PTR_ERR(dest_net);
2592 if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
2595 if (tb[IFLA_LINK_NETNSID]) {
2596 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
2598 link_net = get_net_ns_by_id(dest_net, id);
2604 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
2608 dev = rtnl_create_link(link_net ? : dest_net, ifname,
2609 name_assign_type, ops, tb);
2615 dev->ifindex = ifm->ifi_index;
2618 err = ops->newlink(link_net ? : net, dev, tb, data);
2619 /* Drivers should call free_netdev() in ->destructor
2620 * and unregister it on failure after registration
2621 * so that device could be finally freed in rtnl_unlock.
2624 /* If device is not registered at all, free it now */
2625 if (dev->reg_state == NETREG_UNINITIALIZED)
2630 err = register_netdevice(dev);
2636 err = rtnl_configure_link(dev, ifm);
2638 goto out_unregister;
2640 err = dev_change_net_namespace(dev, dest_net, ifname);
2642 goto out_unregister;
2651 LIST_HEAD(list_kill);
2653 ops->dellink(dev, &list_kill);
2654 unregister_netdevice_many(&list_kill);
2656 unregister_netdevice(dev);
2662 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh)
2664 struct net *net = sock_net(skb->sk);
2665 struct ifinfomsg *ifm;
2666 char ifname[IFNAMSIZ];
2667 struct nlattr *tb[IFLA_MAX+1];
2668 struct net_device *dev = NULL;
2669 struct sk_buff *nskb;
2671 u32 ext_filter_mask = 0;
2673 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
2677 if (tb[IFLA_IFNAME])
2678 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2680 if (tb[IFLA_EXT_MASK])
2681 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2683 ifm = nlmsg_data(nlh);
2684 if (ifm->ifi_index > 0)
2685 dev = __dev_get_by_index(net, ifm->ifi_index);
2686 else if (tb[IFLA_IFNAME])
2687 dev = __dev_get_by_name(net, ifname);
2694 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
2698 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
2699 nlh->nlmsg_seq, 0, 0, ext_filter_mask);
2701 /* -EMSGSIZE implies BUG in if_nlmsg_size */
2702 WARN_ON(err == -EMSGSIZE);
2705 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
2710 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
2712 struct net *net = sock_net(skb->sk);
2713 struct net_device *dev;
2714 struct nlattr *tb[IFLA_MAX+1];
2715 u32 ext_filter_mask = 0;
2716 u16 min_ifinfo_dump_size = 0;
2719 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
2720 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2721 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2723 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
2724 if (tb[IFLA_EXT_MASK])
2725 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2728 if (!ext_filter_mask)
2729 return NLMSG_GOODSIZE;
2731 * traverse the list of net devices and compute the minimum
2732 * buffer size based upon the filter mask.
2734 list_for_each_entry(dev, &net->dev_base_head, dev_list) {
2735 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
2740 return min_ifinfo_dump_size;
2743 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
2746 int s_idx = cb->family;
2750 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
2751 int type = cb->nlh->nlmsg_type-RTM_BASE;
2752 if (idx < s_idx || idx == PF_PACKET)
2754 if (rtnl_msg_handlers[idx] == NULL ||
2755 rtnl_msg_handlers[idx][type].dumpit == NULL)
2758 memset(&cb->args[0], 0, sizeof(cb->args));
2762 if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
2770 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
2771 unsigned int change, gfp_t flags)
2773 struct net *net = dev_net(dev);
2774 struct sk_buff *skb;
2776 size_t if_info_size;
2778 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
2782 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
2784 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
2785 WARN_ON(err == -EMSGSIZE);
2792 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2796 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
2798 struct net *net = dev_net(dev);
2800 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
2803 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
2806 struct sk_buff *skb;
2808 if (dev->reg_state != NETREG_REGISTERED)
2811 skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
2813 rtmsg_ifinfo_send(skb, dev, flags);
2815 EXPORT_SYMBOL(rtmsg_ifinfo);
2817 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2818 struct net_device *dev,
2819 u8 *addr, u16 vid, u32 pid, u32 seq,
2820 int type, unsigned int flags,
2821 int nlflags, u16 ndm_state)
2823 struct nlmsghdr *nlh;
2826 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2830 ndm = nlmsg_data(nlh);
2831 ndm->ndm_family = AF_BRIDGE;
2834 ndm->ndm_flags = flags;
2836 ndm->ndm_ifindex = dev->ifindex;
2837 ndm->ndm_state = ndm_state;
2839 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2840 goto nla_put_failure;
2842 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
2843 goto nla_put_failure;
2845 nlmsg_end(skb, nlh);
2849 nlmsg_cancel(skb, nlh);
2853 static inline size_t rtnl_fdb_nlmsg_size(void)
2855 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
2856 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
2857 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
2861 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
2864 struct net *net = dev_net(dev);
2865 struct sk_buff *skb;
2868 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
2872 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
2873 0, 0, type, NTF_SELF, 0, ndm_state);
2879 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2882 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2886 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
2888 int ndo_dflt_fdb_add(struct ndmsg *ndm,
2889 struct nlattr *tb[],
2890 struct net_device *dev,
2891 const unsigned char *addr, u16 vid,
2896 /* If aging addresses are supported device will need to
2897 * implement its own handler for this.
2899 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
2900 pr_info("%s: FDB only supports static addresses\n", dev->name);
2905 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
2909 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2910 err = dev_uc_add_excl(dev, addr);
2911 else if (is_multicast_ether_addr(addr))
2912 err = dev_mc_add_excl(dev, addr);
2914 /* Only return duplicate errors if NLM_F_EXCL is set */
2915 if (err == -EEXIST && !(flags & NLM_F_EXCL))
2920 EXPORT_SYMBOL(ndo_dflt_fdb_add);
2922 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
2927 if (nla_len(vlan_attr) != sizeof(u16)) {
2928 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
2932 vid = nla_get_u16(vlan_attr);
2934 if (!vid || vid >= VLAN_VID_MASK) {
2935 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
2944 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
2946 struct net *net = sock_net(skb->sk);
2948 struct nlattr *tb[NDA_MAX+1];
2949 struct net_device *dev;
2954 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
2958 ndm = nlmsg_data(nlh);
2959 if (ndm->ndm_ifindex == 0) {
2960 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
2964 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2966 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
2970 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
2971 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
2975 addr = nla_data(tb[NDA_LLADDR]);
2977 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
2983 /* Support fdb on master device the net/bridge default case */
2984 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2985 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2986 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2987 const struct net_device_ops *ops = br_dev->netdev_ops;
2989 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
2994 ndm->ndm_flags &= ~NTF_MASTER;
2997 /* Embedded bridge, macvlan, and any other device support */
2998 if ((ndm->ndm_flags & NTF_SELF)) {
2999 if (dev->netdev_ops->ndo_fdb_add)
3000 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3004 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3008 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3010 ndm->ndm_flags &= ~NTF_SELF;
3018 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3020 int ndo_dflt_fdb_del(struct ndmsg *ndm,
3021 struct nlattr *tb[],
3022 struct net_device *dev,
3023 const unsigned char *addr, u16 vid)
3027 /* If aging addresses are supported device will need to
3028 * implement its own handler for this.
3030 if (!(ndm->ndm_state & NUD_PERMANENT)) {
3031 pr_info("%s: FDB only supports static addresses\n", dev->name);
3035 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3036 err = dev_uc_del(dev, addr);
3037 else if (is_multicast_ether_addr(addr))
3038 err = dev_mc_del(dev, addr);
3042 EXPORT_SYMBOL(ndo_dflt_fdb_del);
3044 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
3046 struct net *net = sock_net(skb->sk);
3048 struct nlattr *tb[NDA_MAX+1];
3049 struct net_device *dev;
3054 if (!netlink_capable(skb, CAP_NET_ADMIN))
3057 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
3061 ndm = nlmsg_data(nlh);
3062 if (ndm->ndm_ifindex == 0) {
3063 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
3067 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3069 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
3073 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3074 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
3078 addr = nla_data(tb[NDA_LLADDR]);
3080 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3086 /* Support fdb on master device the net/bridge default case */
3087 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3088 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3089 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3090 const struct net_device_ops *ops = br_dev->netdev_ops;
3092 if (ops->ndo_fdb_del)
3093 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3098 ndm->ndm_flags &= ~NTF_MASTER;
3101 /* Embedded bridge, macvlan, and any other device support */
3102 if (ndm->ndm_flags & NTF_SELF) {
3103 if (dev->netdev_ops->ndo_fdb_del)
3104 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3107 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3110 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3112 ndm->ndm_flags &= ~NTF_SELF;
3119 static int nlmsg_populate_fdb(struct sk_buff *skb,
3120 struct netlink_callback *cb,
3121 struct net_device *dev,
3123 struct netdev_hw_addr_list *list)
3125 struct netdev_hw_addr *ha;
3129 portid = NETLINK_CB(cb->skb).portid;
3130 seq = cb->nlh->nlmsg_seq;
3132 list_for_each_entry(ha, &list->list, list) {
3133 if (*idx < cb->args[2])
3136 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3138 RTM_NEWNEIGH, NTF_SELF,
3139 NLM_F_MULTI, NUD_PERMANENT);
3149 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3150 * @nlh: netlink message header
3153 * Default netdevice operation to dump the existing unicast address list.
3154 * Returns number of addresses from list put in skb.
3156 int ndo_dflt_fdb_dump(struct sk_buff *skb,
3157 struct netlink_callback *cb,
3158 struct net_device *dev,
3159 struct net_device *filter_dev,
3164 netif_addr_lock_bh(dev);
3165 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3168 nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3170 netif_addr_unlock_bh(dev);
3173 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3175 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3177 struct net_device *dev;
3178 struct nlattr *tb[IFLA_MAX+1];
3179 struct net_device *br_dev = NULL;
3180 const struct net_device_ops *ops = NULL;
3181 const struct net_device_ops *cops = NULL;
3182 struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
3183 struct net *net = sock_net(skb->sk);
3184 struct hlist_head *head;
3192 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
3193 ifla_policy) == 0) {
3194 if (tb[IFLA_MASTER])
3195 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3198 brport_idx = ifm->ifi_index;
3201 br_dev = __dev_get_by_index(net, br_idx);
3205 ops = br_dev->netdev_ops;
3209 s_idx = cb->args[1];
3211 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3213 head = &net->dev_index_head[h];
3214 hlist_for_each_entry(dev, head, index_hlist) {
3216 if (brport_idx && (dev->ifindex != brport_idx))
3219 if (!br_idx) { /* user did not specify a specific bridge */
3220 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3221 br_dev = netdev_master_upper_dev_get(dev);
3222 cops = br_dev->netdev_ops;
3225 if (dev != br_dev &&
3226 !(dev->priv_flags & IFF_BRIDGE_PORT))
3229 if (br_dev != netdev_master_upper_dev_get(dev) &&
3230 !(dev->priv_flags & IFF_EBRIDGE))
3238 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3239 if (cops && cops->ndo_fdb_dump) {
3240 err = cops->ndo_fdb_dump(skb, cb,
3243 if (err == -EMSGSIZE)
3248 if (dev->netdev_ops->ndo_fdb_dump)
3249 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
3253 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
3255 if (err == -EMSGSIZE)
3260 /* reset fdb offset to 0 for rest of the interfaces */
3276 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
3277 unsigned int attrnum, unsigned int flag)
3280 return nla_put_u8(skb, attrnum, !!(flags & flag));
3284 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3285 struct net_device *dev, u16 mode,
3286 u32 flags, u32 mask, int nlflags,
3288 int (*vlan_fill)(struct sk_buff *skb,
3289 struct net_device *dev,
3292 struct nlmsghdr *nlh;
3293 struct ifinfomsg *ifm;
3294 struct nlattr *br_afspec;
3295 struct nlattr *protinfo;
3296 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
3297 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3300 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
3304 ifm = nlmsg_data(nlh);
3305 ifm->ifi_family = AF_BRIDGE;
3307 ifm->ifi_type = dev->type;
3308 ifm->ifi_index = dev->ifindex;
3309 ifm->ifi_flags = dev_get_flags(dev);
3310 ifm->ifi_change = 0;
3313 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
3314 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3315 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
3317 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
3319 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
3320 (dev->ifindex != dev_get_iflink(dev) &&
3321 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
3322 goto nla_put_failure;
3324 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
3326 goto nla_put_failure;
3328 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
3329 nla_nest_cancel(skb, br_afspec);
3330 goto nla_put_failure;
3333 if (mode != BRIDGE_MODE_UNDEF) {
3334 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
3335 nla_nest_cancel(skb, br_afspec);
3336 goto nla_put_failure;
3340 err = vlan_fill(skb, dev, filter_mask);
3342 nla_nest_cancel(skb, br_afspec);
3343 goto nla_put_failure;
3346 nla_nest_end(skb, br_afspec);
3348 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
3350 goto nla_put_failure;
3352 if (brport_nla_put_flag(skb, flags, mask,
3353 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
3354 brport_nla_put_flag(skb, flags, mask,
3355 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
3356 brport_nla_put_flag(skb, flags, mask,
3357 IFLA_BRPORT_FAST_LEAVE,
3358 BR_MULTICAST_FAST_LEAVE) ||
3359 brport_nla_put_flag(skb, flags, mask,
3360 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
3361 brport_nla_put_flag(skb, flags, mask,
3362 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
3363 brport_nla_put_flag(skb, flags, mask,
3364 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
3365 brport_nla_put_flag(skb, flags, mask,
3366 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
3367 brport_nla_put_flag(skb, flags, mask,
3368 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
3369 nla_nest_cancel(skb, protinfo);
3370 goto nla_put_failure;
3373 nla_nest_end(skb, protinfo);
3375 nlmsg_end(skb, nlh);
3378 nlmsg_cancel(skb, nlh);
3379 return err ? err : -EMSGSIZE;
3381 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
3383 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3385 struct net *net = sock_net(skb->sk);
3386 struct net_device *dev;
3388 u32 portid = NETLINK_CB(cb->skb).portid;
3389 u32 seq = cb->nlh->nlmsg_seq;
3390 u32 filter_mask = 0;
3393 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3394 struct nlattr *extfilt;
3396 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
3399 if (nla_len(extfilt) < sizeof(filter_mask))
3402 filter_mask = nla_get_u32(extfilt);
3407 for_each_netdev_rcu(net, dev) {
3408 const struct net_device_ops *ops = dev->netdev_ops;
3409 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3411 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3412 if (idx >= cb->args[0]) {
3413 err = br_dev->netdev_ops->ndo_bridge_getlink(
3414 skb, portid, seq, dev,
3415 filter_mask, NLM_F_MULTI);
3416 if (err < 0 && err != -EOPNOTSUPP)
3422 if (ops->ndo_bridge_getlink) {
3423 if (idx >= cb->args[0]) {
3424 err = ops->ndo_bridge_getlink(skb, portid,
3428 if (err < 0 && err != -EOPNOTSUPP)
3440 static inline size_t bridge_nlmsg_size(void)
3442 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
3443 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
3444 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3445 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
3446 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
3447 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
3448 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
3449 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
3450 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
3451 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
3452 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
3455 static int rtnl_bridge_notify(struct net_device *dev)
3457 struct net *net = dev_net(dev);
3458 struct sk_buff *skb;
3459 int err = -EOPNOTSUPP;
3461 if (!dev->netdev_ops->ndo_bridge_getlink)
3464 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
3470 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
3477 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
3480 WARN_ON(err == -EMSGSIZE);
3483 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3487 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
3489 struct net *net = sock_net(skb->sk);
3490 struct ifinfomsg *ifm;
3491 struct net_device *dev;
3492 struct nlattr *br_spec, *attr = NULL;
3493 int rem, err = -EOPNOTSUPP;
3495 bool have_flags = false;
3497 if (nlmsg_len(nlh) < sizeof(*ifm))
3500 ifm = nlmsg_data(nlh);
3501 if (ifm->ifi_family != AF_BRIDGE)
3502 return -EPFNOSUPPORT;
3504 dev = __dev_get_by_index(net, ifm->ifi_index);
3506 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3510 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3512 nla_for_each_nested(attr, br_spec, rem) {
3513 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3514 if (nla_len(attr) < sizeof(flags))
3518 flags = nla_get_u16(attr);
3524 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3525 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3527 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
3532 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
3536 flags &= ~BRIDGE_FLAGS_MASTER;
3539 if ((flags & BRIDGE_FLAGS_SELF)) {
3540 if (!dev->netdev_ops->ndo_bridge_setlink)
3543 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
3546 flags &= ~BRIDGE_FLAGS_SELF;
3548 /* Generate event to notify upper layer of bridge
3551 err = rtnl_bridge_notify(dev);
3556 memcpy(nla_data(attr), &flags, sizeof(flags));
3561 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
3563 struct net *net = sock_net(skb->sk);
3564 struct ifinfomsg *ifm;
3565 struct net_device *dev;
3566 struct nlattr *br_spec, *attr = NULL;
3567 int rem, err = -EOPNOTSUPP;
3569 bool have_flags = false;
3571 if (nlmsg_len(nlh) < sizeof(*ifm))
3574 ifm = nlmsg_data(nlh);
3575 if (ifm->ifi_family != AF_BRIDGE)
3576 return -EPFNOSUPPORT;
3578 dev = __dev_get_by_index(net, ifm->ifi_index);
3580 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3584 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3586 nla_for_each_nested(attr, br_spec, rem) {
3587 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3588 if (nla_len(attr) < sizeof(flags))
3592 flags = nla_get_u16(attr);
3598 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3599 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3601 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
3606 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
3610 flags &= ~BRIDGE_FLAGS_MASTER;
3613 if ((flags & BRIDGE_FLAGS_SELF)) {
3614 if (!dev->netdev_ops->ndo_bridge_dellink)
3617 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
3621 flags &= ~BRIDGE_FLAGS_SELF;
3623 /* Generate event to notify upper layer of bridge
3626 err = rtnl_bridge_notify(dev);
3631 memcpy(nla_data(attr), &flags, sizeof(flags));
3636 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
3638 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
3639 (!idxattr || idxattr == attrid);
3642 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
3643 static int rtnl_get_offload_stats_attr_size(int attr_id)
3646 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
3647 return sizeof(struct rtnl_link_stats64);
3653 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
3656 struct nlattr *attr = NULL;
3661 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3662 dev->netdev_ops->ndo_get_offload_stats))
3665 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3666 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3667 if (attr_id < *prividx)
3670 size = rtnl_get_offload_stats_attr_size(attr_id);
3674 if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
3677 attr = nla_reserve_64bit(skb, attr_id, size,
3678 IFLA_OFFLOAD_XSTATS_UNSPEC);
3680 goto nla_put_failure;
3682 attr_data = nla_data(attr);
3683 memset(attr_data, 0, size);
3684 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
3687 goto get_offload_stats_failure;
3698 get_offload_stats_failure:
3703 static int rtnl_get_offload_stats_size(const struct net_device *dev)
3709 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3710 dev->netdev_ops->ndo_get_offload_stats))
3713 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3714 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3715 if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
3717 size = rtnl_get_offload_stats_attr_size(attr_id);
3718 nla_size += nla_total_size_64bit(size);
3722 nla_size += nla_total_size(0);
3727 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3728 int type, u32 pid, u32 seq, u32 change,
3729 unsigned int flags, unsigned int filter_mask,
3730 int *idxattr, int *prividx)
3732 struct if_stats_msg *ifsm;
3733 struct nlmsghdr *nlh;
3734 struct nlattr *attr;
3735 int s_prividx = *prividx;
3740 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
3744 ifsm = nlmsg_data(nlh);
3745 ifsm->ifindex = dev->ifindex;
3746 ifsm->filter_mask = filter_mask;
3748 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
3749 struct rtnl_link_stats64 *sp;
3751 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
3752 sizeof(struct rtnl_link_stats64),
3755 goto nla_put_failure;
3757 sp = nla_data(attr);
3758 dev_get_stats(dev, sp);
3761 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
3762 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3764 if (ops && ops->fill_linkxstats) {
3765 *idxattr = IFLA_STATS_LINK_XSTATS;
3766 attr = nla_nest_start(skb,
3767 IFLA_STATS_LINK_XSTATS);
3769 goto nla_put_failure;
3771 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3772 nla_nest_end(skb, attr);
3774 goto nla_put_failure;
3779 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
3781 const struct rtnl_link_ops *ops = NULL;
3782 const struct net_device *master;
3784 master = netdev_master_upper_dev_get(dev);
3786 ops = master->rtnl_link_ops;
3787 if (ops && ops->fill_linkxstats) {
3788 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
3789 attr = nla_nest_start(skb,
3790 IFLA_STATS_LINK_XSTATS_SLAVE);
3792 goto nla_put_failure;
3794 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3795 nla_nest_end(skb, attr);
3797 goto nla_put_failure;
3802 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
3804 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
3805 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
3807 goto nla_put_failure;
3809 err = rtnl_get_offload_stats(skb, dev, prividx);
3810 if (err == -ENODATA)
3811 nla_nest_cancel(skb, attr);
3813 nla_nest_end(skb, attr);
3815 if (err && err != -ENODATA)
3816 goto nla_put_failure;
3820 nlmsg_end(skb, nlh);
3825 /* not a multi message or no progress mean a real error */
3826 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
3827 nlmsg_cancel(skb, nlh);
3829 nlmsg_end(skb, nlh);
3834 static size_t if_nlmsg_stats_size(const struct net_device *dev,
3839 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
3840 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
3842 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
3843 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3844 int attr = IFLA_STATS_LINK_XSTATS;
3846 if (ops && ops->get_linkxstats_size) {
3847 size += nla_total_size(ops->get_linkxstats_size(dev,
3849 /* for IFLA_STATS_LINK_XSTATS */
3850 size += nla_total_size(0);
3854 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
3855 struct net_device *_dev = (struct net_device *)dev;
3856 const struct rtnl_link_ops *ops = NULL;
3857 const struct net_device *master;
3859 /* netdev_master_upper_dev_get can't take const */
3860 master = netdev_master_upper_dev_get(_dev);
3862 ops = master->rtnl_link_ops;
3863 if (ops && ops->get_linkxstats_size) {
3864 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
3866 size += nla_total_size(ops->get_linkxstats_size(dev,
3868 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
3869 size += nla_total_size(0);
3873 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
3874 size += rtnl_get_offload_stats_size(dev);
3879 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
3881 struct net *net = sock_net(skb->sk);
3882 struct net_device *dev = NULL;
3883 int idxattr = 0, prividx = 0;
3884 struct if_stats_msg *ifsm;
3885 struct sk_buff *nskb;
3889 ifsm = nlmsg_data(nlh);
3890 if (ifsm->ifindex > 0)
3891 dev = __dev_get_by_index(net, ifsm->ifindex);
3898 filter_mask = ifsm->filter_mask;
3902 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
3906 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
3907 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
3908 0, filter_mask, &idxattr, &prividx);
3910 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
3911 WARN_ON(err == -EMSGSIZE);
3914 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3920 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
3922 int h, s_h, err, s_idx, s_idxattr, s_prividx;
3923 struct net *net = sock_net(skb->sk);
3924 unsigned int flags = NLM_F_MULTI;
3925 struct if_stats_msg *ifsm;
3926 struct hlist_head *head;
3927 struct net_device *dev;
3928 u32 filter_mask = 0;
3932 s_idx = cb->args[1];
3933 s_idxattr = cb->args[2];
3934 s_prividx = cb->args[3];
3936 cb->seq = net->dev_base_seq;
3938 ifsm = nlmsg_data(cb->nlh);
3939 filter_mask = ifsm->filter_mask;
3943 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3945 head = &net->dev_index_head[h];
3946 hlist_for_each_entry(dev, head, index_hlist) {
3949 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
3950 NETLINK_CB(cb->skb).portid,
3951 cb->nlh->nlmsg_seq, 0,
3953 &s_idxattr, &s_prividx);
3954 /* If we ran out of room on the first message,
3957 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
3963 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3969 cb->args[3] = s_prividx;
3970 cb->args[2] = s_idxattr;
3977 /* Process one rtnetlink message. */
3979 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
3981 struct net *net = sock_net(skb->sk);
3982 rtnl_doit_func doit;
3988 type = nlh->nlmsg_type;
3994 /* All the messages must have at least 1 byte length */
3995 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
3998 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
4001 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
4004 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
4006 rtnl_dumpit_func dumpit;
4007 rtnl_calcit_func calcit;
4008 u16 min_dump_alloc = 0;
4010 dumpit = rtnl_get_dumpit(family, type);
4013 calcit = rtnl_get_calcit(family, type);
4015 min_dump_alloc = calcit(skb, nlh);
4020 struct netlink_dump_control c = {
4022 .min_dump_alloc = min_dump_alloc,
4024 err = netlink_dump_start(rtnl, skb, nlh, &c);
4030 doit = rtnl_get_doit(family, type);
4034 return doit(skb, nlh);
4037 static void rtnetlink_rcv(struct sk_buff *skb)
4040 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
4044 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
4046 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4052 case NETDEV_POST_INIT:
4053 case NETDEV_REGISTER:
4055 case NETDEV_PRE_TYPE_CHANGE:
4056 case NETDEV_GOING_DOWN:
4057 case NETDEV_UNREGISTER:
4058 case NETDEV_UNREGISTER_FINAL:
4059 case NETDEV_RELEASE:
4061 case NETDEV_BONDING_INFO:
4064 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
4070 static struct notifier_block rtnetlink_dev_notifier = {
4071 .notifier_call = rtnetlink_event,
4075 static int __net_init rtnetlink_net_init(struct net *net)
4078 struct netlink_kernel_cfg cfg = {
4079 .groups = RTNLGRP_MAX,
4080 .input = rtnetlink_rcv,
4081 .cb_mutex = &rtnl_mutex,
4082 .flags = NL_CFG_F_NONROOT_RECV,
4085 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
4092 static void __net_exit rtnetlink_net_exit(struct net *net)
4094 netlink_kernel_release(net->rtnl);
4098 static struct pernet_operations rtnetlink_net_ops = {
4099 .init = rtnetlink_net_init,
4100 .exit = rtnetlink_net_exit,
4103 void __init rtnetlink_init(void)
4105 if (register_pernet_subsys(&rtnetlink_net_ops))
4106 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4108 register_netdevice_notifier(&rtnetlink_dev_notifier);
4110 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
4111 rtnl_dump_ifinfo, rtnl_calcit);
4112 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
4113 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
4114 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
4116 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
4117 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
4119 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
4120 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
4121 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
4123 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
4124 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
4125 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
4127 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,