2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_arp.h>
23 #include <linux/socket.h>
24 #include <linux/etherdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <net/rtnetlink.h>
27 #include <net/genetlink.h>
28 #include <net/netlink.h>
29 #include <linux/if_team.h>
31 #define DRV_NAME "team"
38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
40 static struct team_port *team_port_get_rcu(const struct net_device *dev)
42 struct team_port *port = rcu_dereference(dev->rx_handler_data);
44 return team_port_exists(dev) ? port : NULL;
47 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
49 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
51 return team_port_exists(dev) ? port : NULL;
55 * Since the ability to change mac address for open port device is tested in
56 * team_port_add, this function can be called without control of return value
58 static int __set_port_mac(struct net_device *port_dev,
59 const unsigned char *dev_addr)
63 memcpy(addr.sa_data, dev_addr, ETH_ALEN);
64 addr.sa_family = ARPHRD_ETHER;
65 return dev_set_mac_address(port_dev, &addr);
68 static int team_port_set_orig_mac(struct team_port *port)
70 return __set_port_mac(port->dev, port->orig.dev_addr);
73 int team_port_set_team_mac(struct team_port *port)
75 return __set_port_mac(port->dev, port->team->dev->dev_addr);
77 EXPORT_SYMBOL(team_port_set_team_mac);
79 static void team_refresh_port_linkup(struct team_port *port)
81 port->linkup = port->user.linkup_enabled ? port->user.linkup :
90 struct team_option_inst { /* One for each option instance */
91 struct list_head list;
92 struct list_head tmp_list;
93 struct team_option *option;
94 struct team_option_inst_info info;
99 static struct team_option *__team_find_option(struct team *team,
100 const char *opt_name)
102 struct team_option *option;
104 list_for_each_entry(option, &team->option_list, list) {
105 if (strcmp(option->name, opt_name) == 0)
111 static void __team_option_inst_del(struct team_option_inst *opt_inst)
113 list_del(&opt_inst->list);
117 static void __team_option_inst_del_option(struct team *team,
118 struct team_option *option)
120 struct team_option_inst *opt_inst, *tmp;
122 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
123 if (opt_inst->option == option)
124 __team_option_inst_del(opt_inst);
128 static int __team_option_inst_add(struct team *team, struct team_option *option,
129 struct team_port *port)
131 struct team_option_inst *opt_inst;
132 unsigned int array_size;
136 array_size = option->array_size;
138 array_size = 1; /* No array but still need one instance */
140 for (i = 0; i < array_size; i++) {
141 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
144 opt_inst->option = option;
145 opt_inst->info.port = port;
146 opt_inst->info.array_index = i;
147 opt_inst->changed = true;
148 opt_inst->removed = false;
149 list_add_tail(&opt_inst->list, &team->option_inst_list);
151 err = option->init(team, &opt_inst->info);
160 static int __team_option_inst_add_option(struct team *team,
161 struct team_option *option)
163 struct team_port *port;
166 if (!option->per_port) {
167 err = __team_option_inst_add(team, option, NULL);
169 goto inst_del_option;
172 list_for_each_entry(port, &team->port_list, list) {
173 err = __team_option_inst_add(team, option, port);
175 goto inst_del_option;
180 __team_option_inst_del_option(team, option);
184 static void __team_option_inst_mark_removed_option(struct team *team,
185 struct team_option *option)
187 struct team_option_inst *opt_inst;
189 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
190 if (opt_inst->option == option) {
191 opt_inst->changed = true;
192 opt_inst->removed = true;
197 static void __team_option_inst_del_port(struct team *team,
198 struct team_port *port)
200 struct team_option_inst *opt_inst, *tmp;
202 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
203 if (opt_inst->option->per_port &&
204 opt_inst->info.port == port)
205 __team_option_inst_del(opt_inst);
209 static int __team_option_inst_add_port(struct team *team,
210 struct team_port *port)
212 struct team_option *option;
215 list_for_each_entry(option, &team->option_list, list) {
216 if (!option->per_port)
218 err = __team_option_inst_add(team, option, port);
225 __team_option_inst_del_port(team, port);
229 static void __team_option_inst_mark_removed_port(struct team *team,
230 struct team_port *port)
232 struct team_option_inst *opt_inst;
234 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
235 if (opt_inst->info.port == port) {
236 opt_inst->changed = true;
237 opt_inst->removed = true;
242 static int __team_options_register(struct team *team,
243 const struct team_option *option,
247 struct team_option **dst_opts;
250 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
254 for (i = 0; i < option_count; i++, option++) {
255 if (__team_find_option(team, option->name)) {
259 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
266 for (i = 0; i < option_count; i++) {
267 err = __team_option_inst_add_option(team, dst_opts[i]);
270 list_add_tail(&dst_opts[i]->list, &team->option_list);
277 for (i--; i >= 0; i--)
278 __team_option_inst_del_option(team, dst_opts[i]);
280 i = option_count - 1;
282 for (i--; i >= 0; i--)
289 static void __team_options_mark_removed(struct team *team,
290 const struct team_option *option,
295 for (i = 0; i < option_count; i++, option++) {
296 struct team_option *del_opt;
298 del_opt = __team_find_option(team, option->name);
300 __team_option_inst_mark_removed_option(team, del_opt);
304 static void __team_options_unregister(struct team *team,
305 const struct team_option *option,
310 for (i = 0; i < option_count; i++, option++) {
311 struct team_option *del_opt;
313 del_opt = __team_find_option(team, option->name);
315 __team_option_inst_del_option(team, del_opt);
316 list_del(&del_opt->list);
322 static void __team_options_change_check(struct team *team);
323 static void __team_option_inst_change(struct team *team,
324 struct team_option_inst *opt_inst);
326 int team_options_register(struct team *team,
327 const struct team_option *option,
332 err = __team_options_register(team, option, option_count);
335 __team_options_change_check(team);
338 EXPORT_SYMBOL(team_options_register);
340 void team_options_unregister(struct team *team,
341 const struct team_option *option,
344 __team_options_mark_removed(team, option, option_count);
345 __team_options_change_check(team);
346 __team_options_unregister(team, option, option_count);
348 EXPORT_SYMBOL(team_options_unregister);
350 static int team_option_port_add(struct team *team, struct team_port *port)
354 err = __team_option_inst_add_port(team, port);
357 __team_options_change_check(team);
361 static void team_option_port_del(struct team *team, struct team_port *port)
363 __team_option_inst_mark_removed_port(team, port);
364 __team_options_change_check(team);
365 __team_option_inst_del_port(team, port);
368 static int team_option_get(struct team *team,
369 struct team_option_inst *opt_inst,
370 struct team_gsetter_ctx *ctx)
372 if (!opt_inst->option->getter)
374 return opt_inst->option->getter(team, ctx);
377 static int team_option_set(struct team *team,
378 struct team_option_inst *opt_inst,
379 struct team_gsetter_ctx *ctx)
383 if (!opt_inst->option->setter)
385 err = opt_inst->option->setter(team, ctx);
389 __team_option_inst_change(team, opt_inst);
393 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
395 struct team_option_inst *opt_inst;
397 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
398 opt_inst->changed = true;
400 EXPORT_SYMBOL(team_option_inst_set_change);
402 void team_options_change_check(struct team *team)
404 __team_options_change_check(team);
406 EXPORT_SYMBOL(team_options_change_check);
413 static LIST_HEAD(mode_list);
414 static DEFINE_SPINLOCK(mode_list_lock);
416 struct team_mode_item {
417 struct list_head list;
418 const struct team_mode *mode;
421 static struct team_mode_item *__find_mode(const char *kind)
423 struct team_mode_item *mitem;
425 list_for_each_entry(mitem, &mode_list, list) {
426 if (strcmp(mitem->mode->kind, kind) == 0)
432 static bool is_good_mode_name(const char *name)
434 while (*name != '\0') {
435 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
442 int team_mode_register(const struct team_mode *mode)
445 struct team_mode_item *mitem;
447 if (!is_good_mode_name(mode->kind) ||
448 mode->priv_size > TEAM_MODE_PRIV_SIZE)
451 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
455 spin_lock(&mode_list_lock);
456 if (__find_mode(mode->kind)) {
462 list_add_tail(&mitem->list, &mode_list);
464 spin_unlock(&mode_list_lock);
467 EXPORT_SYMBOL(team_mode_register);
469 void team_mode_unregister(const struct team_mode *mode)
471 struct team_mode_item *mitem;
473 spin_lock(&mode_list_lock);
474 mitem = __find_mode(mode->kind);
476 list_del_init(&mitem->list);
479 spin_unlock(&mode_list_lock);
481 EXPORT_SYMBOL(team_mode_unregister);
483 static const struct team_mode *team_mode_get(const char *kind)
485 struct team_mode_item *mitem;
486 const struct team_mode *mode = NULL;
488 spin_lock(&mode_list_lock);
489 mitem = __find_mode(kind);
491 spin_unlock(&mode_list_lock);
492 request_module("team-mode-%s", kind);
493 spin_lock(&mode_list_lock);
494 mitem = __find_mode(kind);
498 if (!try_module_get(mode->owner))
502 spin_unlock(&mode_list_lock);
506 static void team_mode_put(const struct team_mode *mode)
508 module_put(mode->owner);
511 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
513 dev_kfree_skb_any(skb);
517 rx_handler_result_t team_dummy_receive(struct team *team,
518 struct team_port *port,
521 return RX_HANDLER_ANOTHER;
524 static const struct team_mode __team_no_mode = {
528 static bool team_is_mode_set(struct team *team)
530 return team->mode != &__team_no_mode;
533 static void team_set_no_mode(struct team *team)
535 team->mode = &__team_no_mode;
538 static void team_adjust_ops(struct team *team)
541 * To avoid checks in rx/tx skb paths, ensure here that non-null and
542 * correct ops are always set.
545 if (list_empty(&team->port_list) ||
546 !team_is_mode_set(team) || !team->mode->ops->transmit)
547 team->ops.transmit = team_dummy_transmit;
549 team->ops.transmit = team->mode->ops->transmit;
551 if (list_empty(&team->port_list) ||
552 !team_is_mode_set(team) || !team->mode->ops->receive)
553 team->ops.receive = team_dummy_receive;
555 team->ops.receive = team->mode->ops->receive;
559 * We can benefit from the fact that it's ensured no port is present
560 * at the time of mode change. Therefore no packets are in fly so there's no
561 * need to set mode operations in any special way.
563 static int __team_change_mode(struct team *team,
564 const struct team_mode *new_mode)
566 /* Check if mode was previously set and do cleanup if so */
567 if (team_is_mode_set(team)) {
568 void (*exit_op)(struct team *team) = team->ops.exit;
570 /* Clear ops area so no callback is called any longer */
571 memset(&team->ops, 0, sizeof(struct team_mode_ops));
572 team_adjust_ops(team);
576 team_mode_put(team->mode);
577 team_set_no_mode(team);
578 /* zero private data area */
579 memset(&team->mode_priv, 0,
580 sizeof(struct team) - offsetof(struct team, mode_priv));
586 if (new_mode->ops->init) {
589 err = new_mode->ops->init(team);
594 team->mode = new_mode;
595 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
596 team_adjust_ops(team);
601 static int team_change_mode(struct team *team, const char *kind)
603 const struct team_mode *new_mode;
604 struct net_device *dev = team->dev;
607 if (!list_empty(&team->port_list)) {
608 netdev_err(dev, "No ports can be present during mode change\n");
612 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
613 netdev_err(dev, "Unable to change to the same mode the team is in\n");
617 new_mode = team_mode_get(kind);
619 netdev_err(dev, "Mode \"%s\" not found\n", kind);
623 err = __team_change_mode(team, new_mode);
625 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
626 team_mode_put(new_mode);
630 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
635 /************************
636 * Rx path frame handler
637 ************************/
639 static bool team_port_enabled(struct team_port *port);
641 /* note: already called with rcu_read_lock */
642 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
644 struct sk_buff *skb = *pskb;
645 struct team_port *port;
647 rx_handler_result_t res;
649 skb = skb_share_check(skb, GFP_ATOMIC);
651 return RX_HANDLER_CONSUMED;
655 port = team_port_get_rcu(skb->dev);
657 if (!team_port_enabled(port)) {
658 /* allow exact match delivery for disabled ports */
659 res = RX_HANDLER_EXACT;
661 res = team->ops.receive(team, port, skb);
663 if (res == RX_HANDLER_ANOTHER) {
664 struct team_pcpu_stats *pcpu_stats;
666 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
667 u64_stats_update_begin(&pcpu_stats->syncp);
668 pcpu_stats->rx_packets++;
669 pcpu_stats->rx_bytes += skb->len;
670 if (skb->pkt_type == PACKET_MULTICAST)
671 pcpu_stats->rx_multicast++;
672 u64_stats_update_end(&pcpu_stats->syncp);
674 skb->dev = team->dev;
676 this_cpu_inc(team->pcpu_stats->rx_dropped);
687 static bool team_port_find(const struct team *team,
688 const struct team_port *port)
690 struct team_port *cur;
692 list_for_each_entry(cur, &team->port_list, list)
698 static bool team_port_enabled(struct team_port *port)
700 return port->index != -1;
704 * Enable/disable port by adding to enabled port hashlist and setting
705 * port->index (Might be racy so reader could see incorrect ifindex when
706 * processing a flying packet, but that is not a problem). Write guarded
709 static void team_port_enable(struct team *team,
710 struct team_port *port)
712 if (team_port_enabled(port))
714 port->index = team->en_port_count++;
715 hlist_add_head_rcu(&port->hlist,
716 team_port_index_hash(team, port->index));
717 if (team->ops.port_enabled)
718 team->ops.port_enabled(team, port);
721 static void __reconstruct_port_hlist(struct team *team, int rm_index)
724 struct team_port *port;
726 for (i = rm_index + 1; i < team->en_port_count; i++) {
727 port = team_get_port_by_index(team, i);
728 hlist_del_rcu(&port->hlist);
730 hlist_add_head_rcu(&port->hlist,
731 team_port_index_hash(team, port->index));
735 static void team_port_disable(struct team *team,
736 struct team_port *port)
738 int rm_index = port->index;
740 if (!team_port_enabled(port))
742 if (team->ops.port_disabled)
743 team->ops.port_disabled(team, port);
744 hlist_del_rcu(&port->hlist);
745 __reconstruct_port_hlist(team, rm_index);
746 team->en_port_count--;
750 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
751 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
752 NETIF_F_HIGHDMA | NETIF_F_LRO)
754 static void __team_compute_features(struct team *team)
756 struct team_port *port;
757 u32 vlan_features = TEAM_VLAN_FEATURES;
758 unsigned short max_hard_header_len = ETH_HLEN;
760 list_for_each_entry(port, &team->port_list, list) {
761 vlan_features = netdev_increment_features(vlan_features,
762 port->dev->vlan_features,
765 if (port->dev->hard_header_len > max_hard_header_len)
766 max_hard_header_len = port->dev->hard_header_len;
769 team->dev->vlan_features = vlan_features;
770 team->dev->hard_header_len = max_hard_header_len;
772 netdev_change_features(team->dev);
775 static void team_compute_features(struct team *team)
777 mutex_lock(&team->lock);
778 __team_compute_features(team);
779 mutex_unlock(&team->lock);
782 static int team_port_enter(struct team *team, struct team_port *port)
787 port->dev->priv_flags |= IFF_TEAM_PORT;
788 if (team->ops.port_enter) {
789 err = team->ops.port_enter(team, port);
791 netdev_err(team->dev, "Device %s failed to enter team mode\n",
800 port->dev->priv_flags &= ~IFF_TEAM_PORT;
806 static void team_port_leave(struct team *team, struct team_port *port)
808 if (team->ops.port_leave)
809 team->ops.port_leave(team, port);
810 port->dev->priv_flags &= ~IFF_TEAM_PORT;
814 static void __team_port_change_check(struct team_port *port, bool linkup);
816 static int team_port_add(struct team *team, struct net_device *port_dev)
818 struct net_device *dev = team->dev;
819 struct team_port *port;
820 char *portname = port_dev->name;
823 if (port_dev->flags & IFF_LOOPBACK ||
824 port_dev->type != ARPHRD_ETHER) {
825 netdev_err(dev, "Device %s is of an unsupported type\n",
830 if (team_port_exists(port_dev)) {
831 netdev_err(dev, "Device %s is already a port "
832 "of a team device\n", portname);
836 if (port_dev->flags & IFF_UP) {
837 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
842 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
847 port->dev = port_dev;
850 port->orig.mtu = port_dev->mtu;
851 err = dev_set_mtu(port_dev, dev->mtu);
853 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
857 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
859 err = team_port_enter(team, port);
861 netdev_err(dev, "Device %s failed to enter team mode\n",
866 err = dev_open(port_dev);
868 netdev_dbg(dev, "Device %s opening failed\n",
873 err = vlan_vids_add_by_dev(port_dev, dev);
875 netdev_err(dev, "Failed to add vlan ids to device %s\n",
880 err = netdev_set_master(port_dev, dev);
882 netdev_err(dev, "Device %s failed to set master\n", portname);
886 err = netdev_rx_handler_register(port_dev, team_handle_frame,
889 netdev_err(dev, "Device %s failed to register rx_handler\n",
891 goto err_handler_register;
894 err = team_option_port_add(team, port);
896 netdev_err(dev, "Device %s failed to add per-port options\n",
898 goto err_option_port_add;
902 team_port_enable(team, port);
903 list_add_tail_rcu(&port->list, &team->port_list);
904 team_adjust_ops(team);
905 __team_compute_features(team);
906 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
908 netdev_info(dev, "Port device %s added\n", portname);
913 netdev_rx_handler_unregister(port_dev);
915 err_handler_register:
916 netdev_set_master(port_dev, NULL);
919 vlan_vids_del_by_dev(port_dev, dev);
925 team_port_leave(team, port);
926 team_port_set_orig_mac(port);
929 dev_set_mtu(port_dev, port->orig.mtu);
937 static int team_port_del(struct team *team, struct net_device *port_dev)
939 struct net_device *dev = team->dev;
940 struct team_port *port;
941 char *portname = port_dev->name;
943 port = team_port_get_rtnl(port_dev);
944 if (!port || !team_port_find(team, port)) {
945 netdev_err(dev, "Device %s does not act as a port of this team\n",
950 port->removed = true;
951 __team_port_change_check(port, false);
952 team_port_disable(team, port);
953 list_del_rcu(&port->list);
954 team_adjust_ops(team);
955 team_option_port_del(team, port);
956 netdev_rx_handler_unregister(port_dev);
957 netdev_set_master(port_dev, NULL);
958 vlan_vids_del_by_dev(port_dev, dev);
960 team_port_leave(team, port);
961 team_port_set_orig_mac(port);
962 dev_set_mtu(port_dev, port->orig.mtu);
965 netdev_info(dev, "Port device %s removed\n", portname);
966 __team_compute_features(team);
976 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
978 ctx->data.str_val = team->mode->kind;
982 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
984 return team_change_mode(team, ctx->data.str_val);
987 static int team_port_en_option_get(struct team *team,
988 struct team_gsetter_ctx *ctx)
990 struct team_port *port = ctx->info->port;
992 ctx->data.bool_val = team_port_enabled(port);
996 static int team_port_en_option_set(struct team *team,
997 struct team_gsetter_ctx *ctx)
999 struct team_port *port = ctx->info->port;
1001 if (ctx->data.bool_val)
1002 team_port_enable(team, port);
1004 team_port_disable(team, port);
1008 static int team_user_linkup_option_get(struct team *team,
1009 struct team_gsetter_ctx *ctx)
1011 struct team_port *port = ctx->info->port;
1013 ctx->data.bool_val = port->user.linkup;
1017 static int team_user_linkup_option_set(struct team *team,
1018 struct team_gsetter_ctx *ctx)
1020 struct team_port *port = ctx->info->port;
1022 port->user.linkup = ctx->data.bool_val;
1023 team_refresh_port_linkup(port);
1027 static int team_user_linkup_en_option_get(struct team *team,
1028 struct team_gsetter_ctx *ctx)
1030 struct team_port *port = ctx->info->port;
1032 ctx->data.bool_val = port->user.linkup_enabled;
1036 static int team_user_linkup_en_option_set(struct team *team,
1037 struct team_gsetter_ctx *ctx)
1039 struct team_port *port = ctx->info->port;
1041 port->user.linkup_enabled = ctx->data.bool_val;
1042 team_refresh_port_linkup(port);
1046 static const struct team_option team_options[] = {
1049 .type = TEAM_OPTION_TYPE_STRING,
1050 .getter = team_mode_option_get,
1051 .setter = team_mode_option_set,
1055 .type = TEAM_OPTION_TYPE_BOOL,
1057 .getter = team_port_en_option_get,
1058 .setter = team_port_en_option_set,
1061 .name = "user_linkup",
1062 .type = TEAM_OPTION_TYPE_BOOL,
1064 .getter = team_user_linkup_option_get,
1065 .setter = team_user_linkup_option_set,
1068 .name = "user_linkup_enabled",
1069 .type = TEAM_OPTION_TYPE_BOOL,
1071 .getter = team_user_linkup_en_option_get,
1072 .setter = team_user_linkup_en_option_set,
1076 static int team_init(struct net_device *dev)
1078 struct team *team = netdev_priv(dev);
1083 mutex_init(&team->lock);
1084 team_set_no_mode(team);
1086 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1087 if (!team->pcpu_stats)
1090 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1091 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1092 INIT_LIST_HEAD(&team->port_list);
1094 team_adjust_ops(team);
1096 INIT_LIST_HEAD(&team->option_list);
1097 INIT_LIST_HEAD(&team->option_inst_list);
1098 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1100 goto err_options_register;
1101 netif_carrier_off(dev);
1105 err_options_register:
1106 free_percpu(team->pcpu_stats);
1111 static void team_uninit(struct net_device *dev)
1113 struct team *team = netdev_priv(dev);
1114 struct team_port *port;
1115 struct team_port *tmp;
1117 mutex_lock(&team->lock);
1118 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1119 team_port_del(team, port->dev);
1121 __team_change_mode(team, NULL); /* cleanup */
1122 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1123 mutex_unlock(&team->lock);
1126 static void team_destructor(struct net_device *dev)
1128 struct team *team = netdev_priv(dev);
1130 free_percpu(team->pcpu_stats);
1134 static int team_open(struct net_device *dev)
1136 netif_carrier_on(dev);
1140 static int team_close(struct net_device *dev)
1142 netif_carrier_off(dev);
1147 * note: already called with rcu_read_lock
1149 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1151 struct team *team = netdev_priv(dev);
1152 bool tx_success = false;
1153 unsigned int len = skb->len;
1155 tx_success = team->ops.transmit(team, skb);
1157 struct team_pcpu_stats *pcpu_stats;
1159 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1160 u64_stats_update_begin(&pcpu_stats->syncp);
1161 pcpu_stats->tx_packets++;
1162 pcpu_stats->tx_bytes += len;
1163 u64_stats_update_end(&pcpu_stats->syncp);
1165 this_cpu_inc(team->pcpu_stats->tx_dropped);
1168 return NETDEV_TX_OK;
1171 static void team_change_rx_flags(struct net_device *dev, int change)
1173 struct team *team = netdev_priv(dev);
1174 struct team_port *port;
1178 list_for_each_entry_rcu(port, &team->port_list, list) {
1179 if (change & IFF_PROMISC) {
1180 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1181 dev_set_promiscuity(port->dev, inc);
1183 if (change & IFF_ALLMULTI) {
1184 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1185 dev_set_allmulti(port->dev, inc);
1191 static void team_set_rx_mode(struct net_device *dev)
1193 struct team *team = netdev_priv(dev);
1194 struct team_port *port;
1197 list_for_each_entry_rcu(port, &team->port_list, list) {
1198 dev_uc_sync(port->dev, dev);
1199 dev_mc_sync(port->dev, dev);
1204 static int team_set_mac_address(struct net_device *dev, void *p)
1206 struct team *team = netdev_priv(dev);
1207 struct team_port *port;
1208 struct sockaddr *addr = p;
1210 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1211 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1213 list_for_each_entry_rcu(port, &team->port_list, list)
1214 if (team->ops.port_change_mac)
1215 team->ops.port_change_mac(team, port);
1220 static int team_change_mtu(struct net_device *dev, int new_mtu)
1222 struct team *team = netdev_priv(dev);
1223 struct team_port *port;
1227 * Alhough this is reader, it's guarded by team lock. It's not possible
1228 * to traverse list in reverse under rcu_read_lock
1230 mutex_lock(&team->lock);
1231 list_for_each_entry(port, &team->port_list, list) {
1232 err = dev_set_mtu(port->dev, new_mtu);
1234 netdev_err(dev, "Device %s failed to change mtu",
1239 mutex_unlock(&team->lock);
1246 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1247 dev_set_mtu(port->dev, dev->mtu);
1248 mutex_unlock(&team->lock);
1253 static struct rtnl_link_stats64 *
1254 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1256 struct team *team = netdev_priv(dev);
1257 struct team_pcpu_stats *p;
1258 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1259 u32 rx_dropped = 0, tx_dropped = 0;
1263 for_each_possible_cpu(i) {
1264 p = per_cpu_ptr(team->pcpu_stats, i);
1266 start = u64_stats_fetch_begin_bh(&p->syncp);
1267 rx_packets = p->rx_packets;
1268 rx_bytes = p->rx_bytes;
1269 rx_multicast = p->rx_multicast;
1270 tx_packets = p->tx_packets;
1271 tx_bytes = p->tx_bytes;
1272 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
1274 stats->rx_packets += rx_packets;
1275 stats->rx_bytes += rx_bytes;
1276 stats->multicast += rx_multicast;
1277 stats->tx_packets += tx_packets;
1278 stats->tx_bytes += tx_bytes;
1280 * rx_dropped & tx_dropped are u32, updated
1281 * without syncp protection.
1283 rx_dropped += p->rx_dropped;
1284 tx_dropped += p->tx_dropped;
1286 stats->rx_dropped = rx_dropped;
1287 stats->tx_dropped = tx_dropped;
1291 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1293 struct team *team = netdev_priv(dev);
1294 struct team_port *port;
1298 * Alhough this is reader, it's guarded by team lock. It's not possible
1299 * to traverse list in reverse under rcu_read_lock
1301 mutex_lock(&team->lock);
1302 list_for_each_entry(port, &team->port_list, list) {
1303 err = vlan_vid_add(port->dev, vid);
1307 mutex_unlock(&team->lock);
1312 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1313 vlan_vid_del(port->dev, vid);
1314 mutex_unlock(&team->lock);
1319 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1321 struct team *team = netdev_priv(dev);
1322 struct team_port *port;
1325 list_for_each_entry_rcu(port, &team->port_list, list)
1326 vlan_vid_del(port->dev, vid);
1332 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1334 struct team *team = netdev_priv(dev);
1337 mutex_lock(&team->lock);
1338 err = team_port_add(team, port_dev);
1339 mutex_unlock(&team->lock);
1343 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1345 struct team *team = netdev_priv(dev);
1348 mutex_lock(&team->lock);
1349 err = team_port_del(team, port_dev);
1350 mutex_unlock(&team->lock);
1354 static netdev_features_t team_fix_features(struct net_device *dev,
1355 netdev_features_t features)
1357 struct team_port *port;
1358 struct team *team = netdev_priv(dev);
1359 netdev_features_t mask;
1362 features &= ~NETIF_F_ONE_FOR_ALL;
1363 features |= NETIF_F_ALL_FOR_ALL;
1366 list_for_each_entry_rcu(port, &team->port_list, list) {
1367 features = netdev_increment_features(features,
1368 port->dev->features,
1375 static const struct net_device_ops team_netdev_ops = {
1376 .ndo_init = team_init,
1377 .ndo_uninit = team_uninit,
1378 .ndo_open = team_open,
1379 .ndo_stop = team_close,
1380 .ndo_start_xmit = team_xmit,
1381 .ndo_change_rx_flags = team_change_rx_flags,
1382 .ndo_set_rx_mode = team_set_rx_mode,
1383 .ndo_set_mac_address = team_set_mac_address,
1384 .ndo_change_mtu = team_change_mtu,
1385 .ndo_get_stats64 = team_get_stats64,
1386 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
1387 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
1388 .ndo_add_slave = team_add_slave,
1389 .ndo_del_slave = team_del_slave,
1390 .ndo_fix_features = team_fix_features,
1394 /***********************
1395 * rt netlink interface
1396 ***********************/
1398 static void team_setup(struct net_device *dev)
1402 dev->netdev_ops = &team_netdev_ops;
1403 dev->destructor = team_destructor;
1404 dev->tx_queue_len = 0;
1405 dev->flags |= IFF_MULTICAST;
1406 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1409 * Indicate we support unicast address filtering. That way core won't
1410 * bring us to promisc mode in case a unicast addr is added.
1411 * Let this up to underlay drivers.
1413 dev->priv_flags |= IFF_UNICAST_FLT;
1415 dev->features |= NETIF_F_LLTX;
1416 dev->features |= NETIF_F_GRO;
1417 dev->hw_features = NETIF_F_HW_VLAN_TX |
1418 NETIF_F_HW_VLAN_RX |
1419 NETIF_F_HW_VLAN_FILTER;
1421 dev->features |= dev->hw_features;
1424 static int team_newlink(struct net *src_net, struct net_device *dev,
1425 struct nlattr *tb[], struct nlattr *data[])
1429 if (tb[IFLA_ADDRESS] == NULL)
1430 eth_hw_addr_random(dev);
1432 err = register_netdevice(dev);
1439 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1441 if (tb[IFLA_ADDRESS]) {
1442 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1444 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1445 return -EADDRNOTAVAIL;
1450 static struct rtnl_link_ops team_link_ops __read_mostly = {
1452 .priv_size = sizeof(struct team),
1453 .setup = team_setup,
1454 .newlink = team_newlink,
1455 .validate = team_validate,
1459 /***********************************
1460 * Generic netlink custom interface
1461 ***********************************/
1463 static struct genl_family team_nl_family = {
1464 .id = GENL_ID_GENERATE,
1465 .name = TEAM_GENL_NAME,
1466 .version = TEAM_GENL_VERSION,
1467 .maxattr = TEAM_ATTR_MAX,
1471 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1472 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
1473 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
1474 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
1475 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
1478 static const struct nla_policy
1479 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1480 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
1481 [TEAM_ATTR_OPTION_NAME] = {
1483 .len = TEAM_STRING_MAX_LEN,
1485 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1486 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1487 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1490 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1492 struct sk_buff *msg;
1496 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1500 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1501 &team_nl_family, 0, TEAM_CMD_NOOP);
1507 genlmsg_end(msg, hdr);
1509 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1518 * Netlink cmd functions should be locked by following two functions.
1519 * Since dev gets held here, that ensures dev won't disappear in between.
1521 static struct team *team_nl_team_get(struct genl_info *info)
1523 struct net *net = genl_info_net(info);
1525 struct net_device *dev;
1528 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1531 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1532 dev = dev_get_by_index(net, ifindex);
1533 if (!dev || dev->netdev_ops != &team_netdev_ops) {
1539 team = netdev_priv(dev);
1540 mutex_lock(&team->lock);
1544 static void team_nl_team_put(struct team *team)
1546 mutex_unlock(&team->lock);
1550 static int team_nl_send_generic(struct genl_info *info, struct team *team,
1551 int (*fill_func)(struct sk_buff *skb,
1552 struct genl_info *info,
1553 int flags, struct team *team))
1555 struct sk_buff *skb;
1558 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1562 err = fill_func(skb, info, NLM_F_ACK, team);
1566 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1574 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1575 struct team_option_inst *opt_inst)
1577 struct nlattr *option_item;
1578 struct team_option *option = opt_inst->option;
1579 struct team_option_inst_info *opt_inst_info;
1580 struct team_gsetter_ctx ctx;
1583 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1585 goto nla_put_failure;
1586 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1587 goto nla_put_failure;
1588 if (opt_inst->changed) {
1589 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1590 goto nla_put_failure;
1591 opt_inst->changed = false;
1593 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1594 goto nla_put_failure;
1596 opt_inst_info = &opt_inst->info;
1597 if (opt_inst_info->port &&
1598 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1599 opt_inst_info->port->dev->ifindex))
1600 goto nla_put_failure;
1601 if (opt_inst->option->array_size &&
1602 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1603 opt_inst_info->array_index))
1604 goto nla_put_failure;
1605 ctx.info = opt_inst_info;
1607 switch (option->type) {
1608 case TEAM_OPTION_TYPE_U32:
1609 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1610 goto nla_put_failure;
1611 err = team_option_get(team, opt_inst, &ctx);
1614 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
1615 goto nla_put_failure;
1617 case TEAM_OPTION_TYPE_STRING:
1618 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1619 goto nla_put_failure;
1620 err = team_option_get(team, opt_inst, &ctx);
1623 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1625 goto nla_put_failure;
1627 case TEAM_OPTION_TYPE_BINARY:
1628 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1629 goto nla_put_failure;
1630 err = team_option_get(team, opt_inst, &ctx);
1633 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
1634 ctx.data.bin_val.ptr))
1635 goto nla_put_failure;
1637 case TEAM_OPTION_TYPE_BOOL:
1638 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1639 goto nla_put_failure;
1640 err = team_option_get(team, opt_inst, &ctx);
1643 if (ctx.data.bool_val &&
1644 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1645 goto nla_put_failure;
1650 nla_nest_end(skb, option_item);
1659 static int team_nl_fill_options_get(struct sk_buff *skb,
1660 u32 pid, u32 seq, int flags,
1662 struct list_head *sel_opt_inst_list)
1664 struct nlattr *option_list;
1666 struct team_option_inst *opt_inst;
1669 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1670 TEAM_CMD_OPTIONS_GET);
1672 return PTR_ERR(hdr);
1674 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1675 goto nla_put_failure;
1676 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1678 goto nla_put_failure;
1680 list_for_each_entry(opt_inst, sel_opt_inst_list, tmp_list) {
1681 err = team_nl_fill_one_option_get(skb, team, opt_inst);
1686 nla_nest_end(skb, option_list);
1687 return genlmsg_end(skb, hdr);
1692 genlmsg_cancel(skb, hdr);
1696 static int team_nl_fill_options_get_all(struct sk_buff *skb,
1697 struct genl_info *info, int flags,
1700 struct team_option_inst *opt_inst;
1701 LIST_HEAD(sel_opt_inst_list);
1703 list_for_each_entry(opt_inst, &team->option_inst_list, list)
1704 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1705 return team_nl_fill_options_get(skb, info->snd_pid,
1706 info->snd_seq, NLM_F_ACK,
1707 team, &sel_opt_inst_list);
1710 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1715 team = team_nl_team_get(info);
1719 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
1721 team_nl_team_put(team);
1726 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1731 struct nlattr *nl_option;
1733 team = team_nl_team_get(info);
1738 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1743 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1744 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1745 struct nlattr *attr;
1746 struct nlattr *attr_data;
1747 enum team_option_type opt_type;
1748 int opt_port_ifindex = 0; /* != 0 for per-port options */
1749 u32 opt_array_index = 0;
1750 bool opt_is_array = false;
1751 struct team_option_inst *opt_inst;
1753 bool opt_found = false;
1755 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1759 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1760 nl_option, team_nl_option_policy);
1763 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1764 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1768 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1770 opt_type = TEAM_OPTION_TYPE_U32;
1773 opt_type = TEAM_OPTION_TYPE_STRING;
1776 opt_type = TEAM_OPTION_TYPE_BINARY;
1779 opt_type = TEAM_OPTION_TYPE_BOOL;
1785 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1786 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1791 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1792 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1794 opt_port_ifindex = nla_get_u32(attr);
1796 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1798 opt_is_array = true;
1799 opt_array_index = nla_get_u32(attr);
1802 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1803 struct team_option *option = opt_inst->option;
1804 struct team_gsetter_ctx ctx;
1805 struct team_option_inst_info *opt_inst_info;
1808 opt_inst_info = &opt_inst->info;
1809 tmp_ifindex = opt_inst_info->port ?
1810 opt_inst_info->port->dev->ifindex : 0;
1811 if (option->type != opt_type ||
1812 strcmp(option->name, opt_name) ||
1813 tmp_ifindex != opt_port_ifindex ||
1814 (option->array_size && !opt_is_array) ||
1815 opt_inst_info->array_index != opt_array_index)
1818 ctx.info = opt_inst_info;
1820 case TEAM_OPTION_TYPE_U32:
1821 ctx.data.u32_val = nla_get_u32(attr_data);
1823 case TEAM_OPTION_TYPE_STRING:
1824 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1828 ctx.data.str_val = nla_data(attr_data);
1830 case TEAM_OPTION_TYPE_BINARY:
1831 ctx.data.bin_val.len = nla_len(attr_data);
1832 ctx.data.bin_val.ptr = nla_data(attr_data);
1834 case TEAM_OPTION_TYPE_BOOL:
1835 ctx.data.bool_val = attr_data ? true : false;
1840 err = team_option_set(team, opt_inst, &ctx);
1851 team_nl_team_put(team);
1856 static int team_nl_fill_port_list_get(struct sk_buff *skb,
1857 u32 pid, u32 seq, int flags,
1861 struct nlattr *port_list;
1863 struct team_port *port;
1865 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1866 TEAM_CMD_PORT_LIST_GET);
1868 return PTR_ERR(hdr);
1870 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1871 goto nla_put_failure;
1872 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1874 goto nla_put_failure;
1876 list_for_each_entry(port, &team->port_list, list) {
1877 struct nlattr *port_item;
1879 /* Include only changed ports if fill all mode is not on */
1880 if (!fillall && !port->changed)
1882 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1884 goto nla_put_failure;
1885 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1886 goto nla_put_failure;
1887 if (port->changed) {
1888 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1889 goto nla_put_failure;
1890 port->changed = false;
1892 if ((port->removed &&
1893 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1894 (port->state.linkup &&
1895 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1896 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1897 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1898 goto nla_put_failure;
1899 nla_nest_end(skb, port_item);
1902 nla_nest_end(skb, port_list);
1903 return genlmsg_end(skb, hdr);
1906 genlmsg_cancel(skb, hdr);
1910 static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1911 struct genl_info *info, int flags,
1914 return team_nl_fill_port_list_get(skb, info->snd_pid,
1915 info->snd_seq, NLM_F_ACK,
1919 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1920 struct genl_info *info)
1925 team = team_nl_team_get(info);
1929 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1931 team_nl_team_put(team);
1936 static struct genl_ops team_nl_ops[] = {
1938 .cmd = TEAM_CMD_NOOP,
1939 .doit = team_nl_cmd_noop,
1940 .policy = team_nl_policy,
1943 .cmd = TEAM_CMD_OPTIONS_SET,
1944 .doit = team_nl_cmd_options_set,
1945 .policy = team_nl_policy,
1946 .flags = GENL_ADMIN_PERM,
1949 .cmd = TEAM_CMD_OPTIONS_GET,
1950 .doit = team_nl_cmd_options_get,
1951 .policy = team_nl_policy,
1952 .flags = GENL_ADMIN_PERM,
1955 .cmd = TEAM_CMD_PORT_LIST_GET,
1956 .doit = team_nl_cmd_port_list_get,
1957 .policy = team_nl_policy,
1958 .flags = GENL_ADMIN_PERM,
1962 static struct genl_multicast_group team_change_event_mcgrp = {
1963 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1966 static int team_nl_send_event_options_get(struct team *team,
1967 struct list_head *sel_opt_inst_list)
1969 struct sk_buff *skb;
1971 struct net *net = dev_net(team->dev);
1973 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1977 err = team_nl_fill_options_get(skb, 0, 0, 0, team, sel_opt_inst_list);
1981 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1990 static int team_nl_send_event_port_list_get(struct team *team)
1992 struct sk_buff *skb;
1994 struct net *net = dev_net(team->dev);
1996 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2000 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
2004 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
2013 static int team_nl_init(void)
2017 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2018 ARRAY_SIZE(team_nl_ops));
2022 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2024 goto err_change_event_grp_reg;
2028 err_change_event_grp_reg:
2029 genl_unregister_family(&team_nl_family);
2034 static void team_nl_fini(void)
2036 genl_unregister_family(&team_nl_family);
2044 static void __team_options_change_check(struct team *team)
2047 struct team_option_inst *opt_inst;
2048 LIST_HEAD(sel_opt_inst_list);
2050 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2051 if (opt_inst->changed)
2052 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2054 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2056 netdev_warn(team->dev, "Failed to send options change via netlink\n");
2059 static void __team_option_inst_change(struct team *team,
2060 struct team_option_inst *sel_opt_inst)
2063 LIST_HEAD(sel_opt_inst_list);
2065 sel_opt_inst->changed = true;
2066 list_add(&sel_opt_inst->tmp_list, &sel_opt_inst_list);
2067 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2069 netdev_warn(team->dev, "Failed to send option change via netlink\n");
2072 /* rtnl lock is held */
2073 static void __team_port_change_check(struct team_port *port, bool linkup)
2077 if (!port->removed && port->state.linkup == linkup)
2080 port->changed = true;
2081 port->state.linkup = linkup;
2082 team_refresh_port_linkup(port);
2084 struct ethtool_cmd ecmd;
2086 err = __ethtool_get_settings(port->dev, &ecmd);
2088 port->state.speed = ethtool_cmd_speed(&ecmd);
2089 port->state.duplex = ecmd.duplex;
2093 port->state.speed = 0;
2094 port->state.duplex = 0;
2097 err = team_nl_send_event_port_list_get(port->team);
2099 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
2104 static void team_port_change_check(struct team_port *port, bool linkup)
2106 struct team *team = port->team;
2108 mutex_lock(&team->lock);
2109 __team_port_change_check(port, linkup);
2110 mutex_unlock(&team->lock);
2114 /************************************
2115 * Net device notifier event handler
2116 ************************************/
2118 static int team_device_event(struct notifier_block *unused,
2119 unsigned long event, void *ptr)
2121 struct net_device *dev = (struct net_device *) ptr;
2122 struct team_port *port;
2124 port = team_port_get_rtnl(dev);
2130 if (netif_carrier_ok(dev))
2131 team_port_change_check(port, true);
2133 team_port_change_check(port, false);
2135 if (netif_running(port->dev))
2136 team_port_change_check(port,
2137 !!netif_carrier_ok(port->dev));
2139 case NETDEV_UNREGISTER:
2140 team_del_slave(port->team->dev, dev);
2142 case NETDEV_FEAT_CHANGE:
2143 team_compute_features(port->team);
2145 case NETDEV_CHANGEMTU:
2146 /* Forbid to change mtu of underlaying device */
2148 case NETDEV_PRE_TYPE_CHANGE:
2149 /* Forbid to change type of underlaying device */
2155 static struct notifier_block team_notifier_block __read_mostly = {
2156 .notifier_call = team_device_event,
2160 /***********************
2161 * Module init and exit
2162 ***********************/
2164 static int __init team_module_init(void)
2168 register_netdevice_notifier(&team_notifier_block);
2170 err = rtnl_link_register(&team_link_ops);
2174 err = team_nl_init();
2181 rtnl_link_unregister(&team_link_ops);
2184 unregister_netdevice_notifier(&team_notifier_block);
2189 static void __exit team_module_exit(void)
2192 rtnl_link_unregister(&team_link_ops);
2193 unregister_netdevice_notifier(&team_notifier_block);
2196 module_init(team_module_init);
2197 module_exit(team_module_exit);
2199 MODULE_LICENSE("GPL v2");
2200 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2201 MODULE_DESCRIPTION("Ethernet team device driver");
2202 MODULE_ALIAS_RTNL_LINK(DRV_NAME);