1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/list.h>
4 #include <linux/netdevice.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/skbuff.h>
8 #include <net/switchdev.h>
10 #include "br_private.h"
12 static struct static_key_false br_switchdev_tx_fwd_offload;
14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
15 const struct sk_buff *skb)
17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
20 return (p->flags & BR_TX_FWD_OFFLOAD) &&
21 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
24 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
26 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
29 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
32 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
34 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
37 /* Mark the frame for TX forwarding offload if this egress port supports it */
38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
41 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
42 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
45 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
46 * that the skb has been already forwarded to, to avoid further cloning to
47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
53 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
54 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
57 void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
61 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
64 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
65 const struct sk_buff *skb)
67 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
69 return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
70 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
73 /* Flags that can be offloaded to hardware */
74 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
75 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
76 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
78 int br_switchdev_set_port_flag(struct net_bridge_port *p,
81 struct netlink_ext_ack *extack)
83 struct switchdev_attr attr = {
86 struct switchdev_notifier_port_attr_info info = {
91 mask &= BR_PORT_FLAGS_HW_OFFLOAD;
95 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
96 attr.u.brport_flags.val = flags;
97 attr.u.brport_flags.mask = mask;
99 /* We run from atomic context here */
100 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
102 err = notifier_to_errno(err);
103 if (err == -EOPNOTSUPP)
107 if (extack && !extack->_msg)
108 NL_SET_ERR_MSG_MOD(extack,
109 "bridge flag offload is not supported");
113 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
114 attr.flags = SWITCHDEV_F_DEFER;
116 err = switchdev_port_attr_set(p->dev, &attr, extack);
118 if (extack && !extack->_msg)
119 NL_SET_ERR_MSG_MOD(extack,
120 "error setting offload flag on port");
127 static void br_switchdev_fdb_populate(struct net_bridge *br,
128 struct switchdev_notifier_fdb_info *item,
129 const struct net_bridge_fdb_entry *fdb,
132 const struct net_bridge_port *p = READ_ONCE(fdb->dst);
134 item->addr = fdb->key.addr.addr;
135 item->vid = fdb->key.vlan_id;
136 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
137 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
138 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
139 item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
140 item->info.ctx = ctx;
144 br_switchdev_fdb_notify(struct net_bridge *br,
145 const struct net_bridge_fdb_entry *fdb, int type)
147 struct switchdev_notifier_fdb_info item;
149 /* Entries with these flags were created using ndm_state == NUD_REACHABLE,
150 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
151 * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
152 * Drivers don't know how to deal with these, so don't notify them to
153 * avoid confusing them.
155 if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
156 !test_bit(BR_FDB_STATIC, &fdb->flags) &&
157 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
160 br_switchdev_fdb_populate(br, &item, fdb, NULL);
164 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
165 item.info.dev, &item.info, NULL);
168 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
169 item.info.dev, &item.info, NULL);
174 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
175 bool changed, struct netlink_ext_ack *extack)
177 struct switchdev_obj_port_vlan v = {
179 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
185 return switchdev_port_obj_add(dev, &v.obj, extack);
188 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
190 struct switchdev_obj_port_vlan v = {
192 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
196 return switchdev_port_obj_del(dev, &v.obj);
199 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
201 struct net_bridge *br = joining->br;
202 struct net_bridge_port *p;
205 /* joining is yet to be added to the port list. */
206 list_for_each_entry(p, &br->port_list, list) {
207 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
208 joining->hwdom = p->hwdom;
213 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
214 if (hwdom >= BR_HWDOM_MAX)
217 set_bit(hwdom, &br->busy_hwdoms);
218 joining->hwdom = hwdom;
222 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
224 struct net_bridge *br = leaving->br;
225 struct net_bridge_port *p;
227 /* leaving is no longer in the port list. */
228 list_for_each_entry(p, &br->port_list, list) {
229 if (p->hwdom == leaving->hwdom)
233 clear_bit(leaving->hwdom, &br->busy_hwdoms);
236 static int nbp_switchdev_add(struct net_bridge_port *p,
237 struct netdev_phys_item_id ppid,
239 struct netlink_ext_ack *extack)
243 if (p->offload_count) {
244 /* Prevent unsupported configurations such as a bridge port
245 * which is a bonding interface, and the member ports are from
246 * different hardware switches.
248 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
249 NL_SET_ERR_MSG_MOD(extack,
250 "Same bridge port cannot be offloaded by two physical switches");
254 /* Tolerate drivers that call switchdev_bridge_port_offload()
255 * more than once for the same bridge port, such as when the
256 * bridge port is an offloaded bonding/team interface.
264 p->offload_count = 1;
266 err = nbp_switchdev_hwdom_set(p);
270 if (tx_fwd_offload) {
271 p->flags |= BR_TX_FWD_OFFLOAD;
272 static_branch_inc(&br_switchdev_tx_fwd_offload);
278 static void nbp_switchdev_del(struct net_bridge_port *p)
280 if (WARN_ON(!p->offload_count))
285 if (p->offload_count)
289 nbp_switchdev_hwdom_put(p);
291 if (p->flags & BR_TX_FWD_OFFLOAD) {
292 p->flags &= ~BR_TX_FWD_OFFLOAD;
293 static_branch_dec(&br_switchdev_tx_fwd_offload);
298 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
299 const struct net_bridge_fdb_entry *fdb,
300 unsigned long action, const void *ctx)
302 struct switchdev_notifier_fdb_info item;
305 br_switchdev_fdb_populate(br, &item, fdb, ctx);
307 err = nb->notifier_call(nb, action, &item);
308 return notifier_to_errno(err);
312 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
313 bool adding, struct notifier_block *nb)
315 struct net_bridge_fdb_entry *fdb;
316 struct net_bridge *br;
317 unsigned long action;
323 if (!netif_is_bridge_master(br_dev))
326 br = netdev_priv(br_dev);
329 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
331 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
335 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
336 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
346 static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
348 struct notifier_block *nb,
349 struct netlink_ext_ack *extack)
351 struct switchdev_notifier_port_attr_info attr_info = {
358 struct net_bridge *br = netdev_priv(br_dev);
359 struct net_bridge_vlan_group *vg;
360 struct switchdev_attr attr;
361 struct net_bridge_vlan *v;
364 attr_info.attr = &attr;
365 attr.orig_dev = br_dev;
367 vg = br_vlan_group(br);
371 list_for_each_entry(v, &vg->vlan_list, vlist) {
373 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
374 attr.u.vlan_msti.vid = v->vid;
375 attr.u.vlan_msti.msti = v->msti;
377 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
379 err = notifier_to_errno(err);
389 br_switchdev_vlan_replay_one(struct notifier_block *nb,
390 struct net_device *dev,
391 struct switchdev_obj_port_vlan *vlan,
392 const void *ctx, unsigned long action,
393 struct netlink_ext_ack *extack)
395 struct switchdev_notifier_port_obj_info obj_info = {
405 err = nb->notifier_call(nb, action, &obj_info);
406 return notifier_to_errno(err);
409 static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
410 struct net_device *dev,
411 struct net_bridge_vlan_group *vg,
412 const void *ctx, unsigned long action,
413 struct netlink_ext_ack *extack)
415 struct net_bridge_vlan *v;
422 pvid = br_get_pvid(vg);
424 list_for_each_entry(v, &vg->vlan_list, vlist) {
425 struct switchdev_obj_port_vlan vlan = {
427 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
428 .flags = br_vlan_flags(v, pvid),
432 if (!br_vlan_should_use(v))
435 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
444 static int br_switchdev_vlan_replay(struct net_device *br_dev,
445 const void *ctx, bool adding,
446 struct notifier_block *nb,
447 struct netlink_ext_ack *extack)
449 struct net_bridge *br = netdev_priv(br_dev);
450 struct net_bridge_port *p;
451 unsigned long action;
459 if (!netif_is_bridge_master(br_dev))
463 action = SWITCHDEV_PORT_OBJ_ADD;
465 action = SWITCHDEV_PORT_OBJ_DEL;
467 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
468 ctx, action, extack);
472 list_for_each_entry(p, &br->port_list, list) {
473 struct net_device *dev = p->dev;
475 err = br_switchdev_vlan_replay_group(nb, dev,
477 ctx, action, extack);
483 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
491 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
492 struct br_switchdev_mdb_complete_info {
493 struct net_bridge_port *port;
497 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
499 struct br_switchdev_mdb_complete_info *data = priv;
500 struct net_bridge_port_group __rcu **pp;
501 struct net_bridge_port_group *p;
502 struct net_bridge_mdb_entry *mp;
503 struct net_bridge_port *port = data->port;
504 struct net_bridge *br = port->br;
509 spin_lock_bh(&br->multicast_lock);
510 mp = br_mdb_ip_get(br, &data->ip);
513 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
515 if (p->key.port != port)
517 p->flags |= MDB_PG_FLAGS_OFFLOAD;
520 spin_unlock_bh(&br->multicast_lock);
525 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
526 const struct net_bridge_mdb_entry *mp)
528 if (mp->addr.proto == htons(ETH_P_IP))
529 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
530 #if IS_ENABLED(CONFIG_IPV6)
531 else if (mp->addr.proto == htons(ETH_P_IPV6))
532 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
535 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
537 mdb->vid = mp->addr.vid;
540 static void br_switchdev_host_mdb_one(struct net_device *dev,
541 struct net_device *lower_dev,
542 struct net_bridge_mdb_entry *mp,
545 struct switchdev_obj_port_mdb mdb = {
547 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
548 .flags = SWITCHDEV_F_DEFER,
553 br_switchdev_mdb_populate(&mdb, mp);
557 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
560 switchdev_port_obj_del(lower_dev, &mdb.obj);
565 static void br_switchdev_host_mdb(struct net_device *dev,
566 struct net_bridge_mdb_entry *mp, int type)
568 struct net_device *lower_dev;
569 struct list_head *iter;
571 netdev_for_each_lower_dev(dev, lower_dev, iter)
572 br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
576 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
577 const struct switchdev_obj_port_mdb *mdb,
578 unsigned long action, const void *ctx,
579 struct netlink_ext_ack *extack)
581 struct switchdev_notifier_port_obj_info obj_info = {
591 err = nb->notifier_call(nb, action, &obj_info);
592 return notifier_to_errno(err);
595 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
596 enum switchdev_obj_id id,
597 const struct net_bridge_mdb_entry *mp,
598 struct net_device *orig_dev)
600 struct switchdev_obj_port_mdb *mdb;
602 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
607 mdb->obj.orig_dev = orig_dev;
608 br_switchdev_mdb_populate(mdb, mp);
609 list_add_tail(&mdb->obj.list, mdb_list);
614 void br_switchdev_mdb_notify(struct net_device *dev,
615 struct net_bridge_mdb_entry *mp,
616 struct net_bridge_port_group *pg,
619 struct br_switchdev_mdb_complete_info *complete_info;
620 struct switchdev_obj_port_mdb mdb = {
622 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
623 .flags = SWITCHDEV_F_DEFER,
628 return br_switchdev_host_mdb(dev, mp, type);
630 br_switchdev_mdb_populate(&mdb, mp);
632 mdb.obj.orig_dev = pg->key.port->dev;
635 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
638 complete_info->port = pg->key.port;
639 complete_info->ip = mp->addr;
640 mdb.obj.complete_priv = complete_info;
641 mdb.obj.complete = br_switchdev_mdb_complete;
642 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
643 kfree(complete_info);
646 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
653 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
654 const void *ctx, bool adding, struct notifier_block *nb,
655 struct netlink_ext_ack *extack)
657 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
658 const struct net_bridge_mdb_entry *mp;
659 struct switchdev_obj *obj, *tmp;
660 struct net_bridge *br;
661 unsigned long action;
670 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
673 br = netdev_priv(br_dev);
675 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
678 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
679 * because the write-side protection is br->multicast_lock. But we
680 * need to emulate the [ blocking ] calling context of a regular
681 * switchdev event, so since both br->multicast_lock and RCU read side
682 * critical sections are atomic, we have no choice but to pick the RCU
683 * read side lock, queue up all our events, leave the critical section
684 * and notify switchdev from blocking context.
688 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
689 struct net_bridge_port_group __rcu * const *pp;
690 const struct net_bridge_port_group *p;
692 if (mp->host_joined) {
693 err = br_switchdev_mdb_queue_one(&mdb_list,
694 SWITCHDEV_OBJ_ID_HOST_MDB,
702 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
704 if (p->key.port->dev != dev)
707 err = br_switchdev_mdb_queue_one(&mdb_list,
708 SWITCHDEV_OBJ_ID_PORT_MDB,
720 action = SWITCHDEV_PORT_OBJ_ADD;
722 action = SWITCHDEV_PORT_OBJ_DEL;
724 list_for_each_entry(obj, &mdb_list, list) {
725 err = br_switchdev_mdb_replay_one(nb, dev,
726 SWITCHDEV_OBJ_PORT_MDB(obj),
727 action, ctx, extack);
733 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
734 list_del(&obj->list);
735 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
745 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
746 struct notifier_block *atomic_nb,
747 struct notifier_block *blocking_nb,
748 struct netlink_ext_ack *extack)
750 struct net_device *br_dev = p->br->dev;
751 struct net_device *dev = p->dev;
754 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
755 if (err && err != -EOPNOTSUPP)
758 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
760 if (err && err != -EOPNOTSUPP)
763 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
764 if (err && err != -EOPNOTSUPP)
770 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
772 struct notifier_block *atomic_nb,
773 struct notifier_block *blocking_nb)
775 struct net_device *br_dev = p->br->dev;
776 struct net_device *dev = p->dev;
778 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
780 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
782 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
785 /* Let the bridge know that this port is offloaded, so that it can assign a
786 * switchdev hardware domain to it.
788 int br_switchdev_port_offload(struct net_bridge_port *p,
789 struct net_device *dev, const void *ctx,
790 struct notifier_block *atomic_nb,
791 struct notifier_block *blocking_nb,
793 struct netlink_ext_ack *extack)
795 struct netdev_phys_item_id ppid;
798 err = dev_get_port_parent_id(dev, &ppid, false);
802 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
806 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
808 goto out_switchdev_del;
813 nbp_switchdev_del(p);
818 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
819 struct notifier_block *atomic_nb,
820 struct notifier_block *blocking_nb)
822 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
824 nbp_switchdev_del(p);