1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
14 #include <net/addrconf.h>
17 #include "br_private.h"
19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 struct net_device *dev)
22 struct net_bridge *br = netdev_priv(dev);
23 struct net_bridge_port *p;
24 struct nlattr *nest, *port_nest;
26 if (!br->multicast_router || hlist_empty(&br->router_list))
29 nest = nla_nest_start(skb, MDBA_ROUTER);
33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
36 port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT);
39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 br_timer_value(&p->multicast_router_timer)) ||
42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 p->multicast_router)) {
44 nla_nest_cancel(skb, port_nest);
47 nla_nest_end(skb, port_nest);
50 nla_nest_end(skb, nest);
53 nla_nest_cancel(skb, nest);
57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
59 e->state = flags & MDB_PG_FLAGS_PERMANENT;
61 if (flags & MDB_PG_FLAGS_OFFLOAD)
62 e->flags |= MDB_FLAGS_OFFLOAD;
65 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
67 memset(ip, 0, sizeof(struct br_ip));
69 ip->proto = entry->addr.proto;
70 if (ip->proto == htons(ETH_P_IP))
71 ip->u.ip4 = entry->addr.u.ip4;
72 #if IS_ENABLED(CONFIG_IPV6)
74 ip->u.ip6 = entry->addr.u.ip6;
78 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
79 struct net_device *dev)
81 int idx = 0, s_idx = cb->args[1], err = 0;
82 struct net_bridge *br = netdev_priv(dev);
83 struct net_bridge_mdb_entry *mp;
84 struct nlattr *nest, *nest2;
86 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
89 nest = nla_nest_start(skb, MDBA_MDB);
93 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
94 struct net_bridge_port_group *p;
95 struct net_bridge_port_group __rcu **pp;
96 struct net_bridge_port *port;
101 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
107 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
109 struct nlattr *nest_ent;
110 struct br_mdb_entry e;
116 memset(&e, 0, sizeof(e));
117 e.ifindex = port->dev->ifindex;
119 __mdb_entry_fill_flags(&e, p->flags);
120 if (p->addr.proto == htons(ETH_P_IP))
121 e.addr.u.ip4 = p->addr.u.ip4;
122 #if IS_ENABLED(CONFIG_IPV6)
123 if (p->addr.proto == htons(ETH_P_IPV6))
124 e.addr.u.ip6 = p->addr.u.ip6;
126 e.addr.proto = p->addr.proto;
127 nest_ent = nla_nest_start(skb, MDBA_MDB_ENTRY_INFO);
129 nla_nest_cancel(skb, nest2);
133 if (nla_put_nohdr(skb, sizeof(e), &e) ||
135 MDBA_MDB_EATTR_TIMER,
136 br_timer_value(&p->timer))) {
137 nla_nest_cancel(skb, nest_ent);
138 nla_nest_cancel(skb, nest2);
142 nla_nest_end(skb, nest_ent);
144 nla_nest_end(skb, nest2);
151 nla_nest_end(skb, nest);
155 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
156 struct netlink_ext_ack *extack)
158 struct br_port_msg *bpm;
160 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
161 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
165 bpm = nlmsg_data(nlh);
167 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
170 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
171 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
178 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
180 struct net_device *dev;
181 struct net *net = sock_net(skb->sk);
182 struct nlmsghdr *nlh = NULL;
185 if (cb->strict_check) {
186 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
196 cb->seq = net->dev_base_seq;
198 for_each_netdev_rcu(net, dev) {
199 if (dev->priv_flags & IFF_EBRIDGE) {
200 struct br_port_msg *bpm;
205 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
206 cb->nlh->nlmsg_seq, RTM_GETMDB,
207 sizeof(*bpm), NLM_F_MULTI);
211 bpm = nlmsg_data(nlh);
212 memset(bpm, 0, sizeof(*bpm));
213 bpm->ifindex = dev->ifindex;
214 if (br_mdb_fill_info(skb, cb, dev) < 0)
216 if (br_rports_fill_info(skb, cb, dev) < 0)
234 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
235 struct net_device *dev,
236 struct br_mdb_entry *entry, u32 pid,
237 u32 seq, int type, unsigned int flags)
239 struct nlmsghdr *nlh;
240 struct br_port_msg *bpm;
241 struct nlattr *nest, *nest2;
243 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
247 bpm = nlmsg_data(nlh);
248 memset(bpm, 0, sizeof(*bpm));
249 bpm->family = AF_BRIDGE;
250 bpm->ifindex = dev->ifindex;
251 nest = nla_nest_start(skb, MDBA_MDB);
254 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
258 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
261 nla_nest_end(skb, nest2);
262 nla_nest_end(skb, nest);
267 nla_nest_end(skb, nest);
269 nlmsg_cancel(skb, nlh);
273 static inline size_t rtnl_mdb_nlmsg_size(void)
275 return NLMSG_ALIGN(sizeof(struct br_port_msg))
276 + nla_total_size(sizeof(struct br_mdb_entry));
279 struct br_mdb_complete_info {
280 struct net_bridge_port *port;
284 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
286 struct br_mdb_complete_info *data = priv;
287 struct net_bridge_port_group __rcu **pp;
288 struct net_bridge_port_group *p;
289 struct net_bridge_mdb_entry *mp;
290 struct net_bridge_port *port = data->port;
291 struct net_bridge *br = port->br;
296 spin_lock_bh(&br->multicast_lock);
297 mp = br_mdb_ip_get(br, &data->ip);
300 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
304 p->flags |= MDB_PG_FLAGS_OFFLOAD;
307 spin_unlock_bh(&br->multicast_lock);
312 static void br_mdb_switchdev_host_port(struct net_device *dev,
313 struct net_device *lower_dev,
314 struct br_mdb_entry *entry, int type)
316 struct switchdev_obj_port_mdb mdb = {
318 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
319 .flags = SWITCHDEV_F_DEFER,
324 if (entry->addr.proto == htons(ETH_P_IP))
325 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
326 #if IS_ENABLED(CONFIG_IPV6)
328 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
331 mdb.obj.orig_dev = dev;
334 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
337 switchdev_port_obj_del(lower_dev, &mdb.obj);
342 static void br_mdb_switchdev_host(struct net_device *dev,
343 struct br_mdb_entry *entry, int type)
345 struct net_device *lower_dev;
346 struct list_head *iter;
348 netdev_for_each_lower_dev(dev, lower_dev, iter)
349 br_mdb_switchdev_host_port(dev, lower_dev, entry, type);
352 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
353 struct br_mdb_entry *entry, int type)
355 struct br_mdb_complete_info *complete_info;
356 struct switchdev_obj_port_mdb mdb = {
358 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
359 .flags = SWITCHDEV_F_DEFER,
363 struct net_device *port_dev;
364 struct net *net = dev_net(dev);
368 port_dev = __dev_get_by_index(net, entry->ifindex);
369 if (entry->addr.proto == htons(ETH_P_IP))
370 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
371 #if IS_ENABLED(CONFIG_IPV6)
373 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
376 mdb.obj.orig_dev = port_dev;
377 if (p && port_dev && type == RTM_NEWMDB) {
378 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
380 complete_info->port = p;
381 __mdb_entry_to_br_ip(entry, &complete_info->ip);
382 mdb.obj.complete_priv = complete_info;
383 mdb.obj.complete = br_mdb_complete;
384 if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL))
385 kfree(complete_info);
387 } else if (p && port_dev && type == RTM_DELMDB) {
388 switchdev_port_obj_del(port_dev, &mdb.obj);
392 br_mdb_switchdev_host(dev, entry, type);
394 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
398 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
404 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
407 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
410 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
411 struct br_ip *group, int type, u8 flags)
413 struct br_mdb_entry entry;
415 memset(&entry, 0, sizeof(entry));
417 entry.ifindex = port->dev->ifindex;
419 entry.ifindex = dev->ifindex;
420 entry.addr.proto = group->proto;
421 entry.addr.u.ip4 = group->u.ip4;
422 #if IS_ENABLED(CONFIG_IPV6)
423 entry.addr.u.ip6 = group->u.ip6;
425 entry.vid = group->vid;
426 __mdb_entry_fill_flags(&entry, flags);
427 __br_mdb_notify(dev, port, &entry, type);
430 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
431 struct net_device *dev,
432 int ifindex, u32 pid,
433 u32 seq, int type, unsigned int flags)
435 struct br_port_msg *bpm;
436 struct nlmsghdr *nlh;
439 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
443 bpm = nlmsg_data(nlh);
444 memset(bpm, 0, sizeof(*bpm));
445 bpm->family = AF_BRIDGE;
446 bpm->ifindex = dev->ifindex;
447 nest = nla_nest_start(skb, MDBA_ROUTER);
451 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
454 nla_nest_end(skb, nest);
459 nla_nest_end(skb, nest);
461 nlmsg_cancel(skb, nlh);
465 static inline size_t rtnl_rtr_nlmsg_size(void)
467 return NLMSG_ALIGN(sizeof(struct br_port_msg))
468 + nla_total_size(sizeof(__u32));
471 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
474 struct net *net = dev_net(dev);
479 ifindex = port ? port->dev->ifindex : 0;
480 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
484 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
490 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
494 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
497 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
499 if (entry->ifindex == 0)
502 if (entry->addr.proto == htons(ETH_P_IP)) {
503 if (!ipv4_is_multicast(entry->addr.u.ip4))
505 if (ipv4_is_local_multicast(entry->addr.u.ip4))
507 #if IS_ENABLED(CONFIG_IPV6)
508 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
509 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
514 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
516 if (entry->vid >= VLAN_VID_MASK)
522 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
523 struct net_device **pdev, struct br_mdb_entry **pentry)
525 struct net *net = sock_net(skb->sk);
526 struct br_mdb_entry *entry;
527 struct br_port_msg *bpm;
528 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
529 struct net_device *dev;
532 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL,
537 bpm = nlmsg_data(nlh);
538 if (bpm->ifindex == 0) {
539 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
543 dev = __dev_get_by_index(net, bpm->ifindex);
545 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
549 if (!(dev->priv_flags & IFF_EBRIDGE)) {
550 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
556 if (!tb[MDBA_SET_ENTRY] ||
557 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
558 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
562 entry = nla_data(tb[MDBA_SET_ENTRY]);
563 if (!is_valid_mdb_entry(entry)) {
564 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
572 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
573 struct br_ip *group, unsigned char state)
575 struct net_bridge_mdb_entry *mp;
576 struct net_bridge_port_group *p;
577 struct net_bridge_port_group __rcu **pp;
578 unsigned long now = jiffies;
581 mp = br_mdb_ip_get(br, group);
583 mp = br_multicast_new_group(br, group);
584 err = PTR_ERR_OR_ZERO(mp);
589 for (pp = &mp->ports;
590 (p = mlock_dereference(*pp, br)) != NULL;
594 if ((unsigned long)p->port < (unsigned long)port)
598 p = br_multicast_new_port_group(port, group, *pp, state, NULL);
601 rcu_assign_pointer(*pp, p);
602 if (state == MDB_TEMPORARY)
603 mod_timer(&p->timer, now + br->multicast_membership_interval);
608 static int __br_mdb_add(struct net *net, struct net_bridge *br,
609 struct br_mdb_entry *entry)
612 struct net_device *dev;
613 struct net_bridge_port *p;
616 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
619 dev = __dev_get_by_index(net, entry->ifindex);
623 p = br_port_get_rtnl(dev);
624 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
627 __mdb_entry_to_br_ip(entry, &ip);
629 spin_lock_bh(&br->multicast_lock);
630 ret = br_mdb_add_group(br, p, &ip, entry->state);
631 spin_unlock_bh(&br->multicast_lock);
635 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
636 struct netlink_ext_ack *extack)
638 struct net *net = sock_net(skb->sk);
639 struct net_bridge_vlan_group *vg;
640 struct net_device *dev, *pdev;
641 struct br_mdb_entry *entry;
642 struct net_bridge_port *p;
643 struct net_bridge_vlan *v;
644 struct net_bridge *br;
647 err = br_mdb_parse(skb, nlh, &dev, &entry);
651 br = netdev_priv(dev);
653 /* If vlan filtering is enabled and VLAN is not specified
654 * install mdb entry on all vlans configured on the port.
656 pdev = __dev_get_by_index(net, entry->ifindex);
660 p = br_port_get_rtnl(pdev);
661 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
664 vg = nbp_vlan_group(p);
665 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
666 list_for_each_entry(v, &vg->vlan_list, vlist) {
668 err = __br_mdb_add(net, br, entry);
671 __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
674 err = __br_mdb_add(net, br, entry);
676 __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
682 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
684 struct net_bridge_mdb_entry *mp;
685 struct net_bridge_port_group *p;
686 struct net_bridge_port_group __rcu **pp;
690 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
693 __mdb_entry_to_br_ip(entry, &ip);
695 spin_lock_bh(&br->multicast_lock);
696 mp = br_mdb_ip_get(br, &ip);
700 for (pp = &mp->ports;
701 (p = mlock_dereference(*pp, br)) != NULL;
703 if (!p->port || p->port->dev->ifindex != entry->ifindex)
706 if (p->port->state == BR_STATE_DISABLED)
709 __mdb_entry_fill_flags(entry, p->flags);
710 rcu_assign_pointer(*pp, p->next);
711 hlist_del_init(&p->mglist);
712 del_timer(&p->timer);
716 if (!mp->ports && !mp->host_joined &&
717 netif_running(br->dev))
718 mod_timer(&mp->timer, jiffies);
723 spin_unlock_bh(&br->multicast_lock);
727 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
728 struct netlink_ext_ack *extack)
730 struct net *net = sock_net(skb->sk);
731 struct net_bridge_vlan_group *vg;
732 struct net_device *dev, *pdev;
733 struct br_mdb_entry *entry;
734 struct net_bridge_port *p;
735 struct net_bridge_vlan *v;
736 struct net_bridge *br;
739 err = br_mdb_parse(skb, nlh, &dev, &entry);
743 br = netdev_priv(dev);
745 /* If vlan filtering is enabled and VLAN is not specified
746 * delete mdb entry on all vlans configured on the port.
748 pdev = __dev_get_by_index(net, entry->ifindex);
752 p = br_port_get_rtnl(pdev);
753 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
756 vg = nbp_vlan_group(p);
757 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
758 list_for_each_entry(v, &vg->vlan_list, vlist) {
760 err = __br_mdb_del(br, entry);
762 __br_mdb_notify(dev, p, entry, RTM_DELMDB);
765 err = __br_mdb_del(br, entry);
767 __br_mdb_notify(dev, p, entry, RTM_DELMDB);
773 void br_mdb_init(void)
775 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
776 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
777 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
780 void br_mdb_uninit(void)
782 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
783 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
784 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);