Merge tag 'arm-soc-fixes-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36 #include "br_private_mcast_eht.h"
37
38 static const struct rhashtable_params br_mdb_rht_params = {
39         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
40         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
41         .key_len = sizeof(struct br_ip),
42         .automatic_shrinking = true,
43 };
44
45 static const struct rhashtable_params br_sg_port_rht_params = {
46         .head_offset = offsetof(struct net_bridge_port_group, rhnode),
47         .key_offset = offsetof(struct net_bridge_port_group, key),
48         .key_len = sizeof(struct net_bridge_port_group_sg_key),
49         .automatic_shrinking = true,
50 };
51
52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
53                                        struct bridge_mcast_own_query *query);
54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
55                                         struct net_bridge_mcast_port *pmctx);
56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
57                                          struct net_bridge_mcast_port *pmctx,
58                                          __be32 group,
59                                          __u16 vid,
60                                          const unsigned char *src);
61 static void br_multicast_port_group_rexmit(struct timer_list *t);
62
63 static void
64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
66                                         struct net_bridge_mcast_port *pmctx);
67 #if IS_ENABLED(CONFIG_IPV6)
68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
69                                          struct net_bridge_mcast_port *pmctx,
70                                          const struct in6_addr *group,
71                                          __u16 vid, const unsigned char *src);
72 #endif
73 static struct net_bridge_port_group *
74 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
75                          struct net_bridge_mcast_port *pmctx,
76                          struct br_ip *group,
77                          const unsigned char *src,
78                          u8 filter_mode,
79                          bool igmpv2_mldv1,
80                          bool blocked);
81 static void br_multicast_find_del_pg(struct net_bridge *br,
82                                      struct net_bridge_port_group *pg);
83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
84
85 static struct net_bridge_port_group *
86 br_sg_port_find(struct net_bridge *br,
87                 struct net_bridge_port_group_sg_key *sg_p)
88 {
89         lockdep_assert_held_once(&br->multicast_lock);
90
91         return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
92                                       br_sg_port_rht_params);
93 }
94
95 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
96                                                       struct br_ip *dst)
97 {
98         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
99 }
100
101 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
102                                            struct br_ip *dst)
103 {
104         struct net_bridge_mdb_entry *ent;
105
106         lockdep_assert_held_once(&br->multicast_lock);
107
108         rcu_read_lock();
109         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
110         rcu_read_unlock();
111
112         return ent;
113 }
114
115 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
116                                                    __be32 dst, __u16 vid)
117 {
118         struct br_ip br_dst;
119
120         memset(&br_dst, 0, sizeof(br_dst));
121         br_dst.dst.ip4 = dst;
122         br_dst.proto = htons(ETH_P_IP);
123         br_dst.vid = vid;
124
125         return br_mdb_ip_get(br, &br_dst);
126 }
127
128 #if IS_ENABLED(CONFIG_IPV6)
129 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
130                                                    const struct in6_addr *dst,
131                                                    __u16 vid)
132 {
133         struct br_ip br_dst;
134
135         memset(&br_dst, 0, sizeof(br_dst));
136         br_dst.dst.ip6 = *dst;
137         br_dst.proto = htons(ETH_P_IPV6);
138         br_dst.vid = vid;
139
140         return br_mdb_ip_get(br, &br_dst);
141 }
142 #endif
143
144 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
145                                         struct sk_buff *skb, u16 vid)
146 {
147         struct net_bridge *br = brmctx->br;
148         struct br_ip ip;
149
150         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
151             br_multicast_ctx_vlan_global_disabled(brmctx))
152                 return NULL;
153
154         if (BR_INPUT_SKB_CB(skb)->igmp)
155                 return NULL;
156
157         memset(&ip, 0, sizeof(ip));
158         ip.proto = skb->protocol;
159         ip.vid = vid;
160
161         switch (skb->protocol) {
162         case htons(ETH_P_IP):
163                 ip.dst.ip4 = ip_hdr(skb)->daddr;
164                 if (brmctx->multicast_igmp_version == 3) {
165                         struct net_bridge_mdb_entry *mdb;
166
167                         ip.src.ip4 = ip_hdr(skb)->saddr;
168                         mdb = br_mdb_ip_get_rcu(br, &ip);
169                         if (mdb)
170                                 return mdb;
171                         ip.src.ip4 = 0;
172                 }
173                 break;
174 #if IS_ENABLED(CONFIG_IPV6)
175         case htons(ETH_P_IPV6):
176                 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
177                 if (brmctx->multicast_mld_version == 2) {
178                         struct net_bridge_mdb_entry *mdb;
179
180                         ip.src.ip6 = ipv6_hdr(skb)->saddr;
181                         mdb = br_mdb_ip_get_rcu(br, &ip);
182                         if (mdb)
183                                 return mdb;
184                         memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
185                 }
186                 break;
187 #endif
188         default:
189                 ip.proto = 0;
190                 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
191         }
192
193         return br_mdb_ip_get_rcu(br, &ip);
194 }
195
196 /* IMPORTANT: this function must be used only when the contexts cannot be
197  * passed down (e.g. timer) and must be used for read-only purposes because
198  * the vlan snooping option can change, so it can return any context
199  * (non-vlan or vlan). Its initial intended purpose is to read timer values
200  * from the *current* context based on the option. At worst that could lead
201  * to inconsistent timers when the contexts are changed, i.e. src timer
202  * which needs to re-arm with a specific delay taken from the old context
203  */
204 static struct net_bridge_mcast_port *
205 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
206 {
207         struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
208         struct net_bridge_vlan *vlan;
209
210         lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
211
212         /* if vlan snooping is disabled use the port's multicast context */
213         if (!pg->key.addr.vid ||
214             !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
215                 goto out;
216
217         /* locking is tricky here, due to different rules for multicast and
218          * vlans we need to take rcu to find the vlan and make sure it has
219          * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
220          * multicast_lock which must be already held here, so the vlan's pmctx
221          * can safely be used on return
222          */
223         rcu_read_lock();
224         vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
225         if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
226                 pmctx = &vlan->port_mcast_ctx;
227         else
228                 pmctx = NULL;
229         rcu_read_unlock();
230 out:
231         return pmctx;
232 }
233
234 /* when snooping we need to check if the contexts should be used
235  * in the following order:
236  * - if pmctx is non-NULL (port), check if it should be used
237  * - if pmctx is NULL (bridge), check if brmctx should be used
238  */
239 static bool
240 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
241                             const struct net_bridge_mcast_port *pmctx)
242 {
243         if (!netif_running(brmctx->br->dev))
244                 return false;
245
246         if (pmctx)
247                 return !br_multicast_port_ctx_state_disabled(pmctx);
248         else
249                 return !br_multicast_ctx_vlan_disabled(brmctx);
250 }
251
252 static bool br_port_group_equal(struct net_bridge_port_group *p,
253                                 struct net_bridge_port *port,
254                                 const unsigned char *src)
255 {
256         if (p->key.port != port)
257                 return false;
258
259         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
260                 return true;
261
262         return ether_addr_equal(src, p->eth_addr);
263 }
264
265 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
266                                 struct net_bridge_port_group *pg,
267                                 struct br_ip *sg_ip)
268 {
269         struct net_bridge_port_group_sg_key sg_key;
270         struct net_bridge_port_group *src_pg;
271         struct net_bridge_mcast *brmctx;
272
273         memset(&sg_key, 0, sizeof(sg_key));
274         brmctx = br_multicast_port_ctx_get_global(pmctx);
275         sg_key.port = pg->key.port;
276         sg_key.addr = *sg_ip;
277         if (br_sg_port_find(brmctx->br, &sg_key))
278                 return;
279
280         src_pg = __br_multicast_add_group(brmctx, pmctx,
281                                           sg_ip, pg->eth_addr,
282                                           MCAST_INCLUDE, false, false);
283         if (IS_ERR_OR_NULL(src_pg) ||
284             src_pg->rt_protocol != RTPROT_KERNEL)
285                 return;
286
287         src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
288 }
289
290 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
291                                 struct br_ip *sg_ip)
292 {
293         struct net_bridge_port_group_sg_key sg_key;
294         struct net_bridge *br = pg->key.port->br;
295         struct net_bridge_port_group *src_pg;
296
297         memset(&sg_key, 0, sizeof(sg_key));
298         sg_key.port = pg->key.port;
299         sg_key.addr = *sg_ip;
300         src_pg = br_sg_port_find(br, &sg_key);
301         if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
302             src_pg->rt_protocol != RTPROT_KERNEL)
303                 return;
304
305         br_multicast_find_del_pg(br, src_pg);
306 }
307
308 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
309  * to all other ports' S,G entries which are not blocked by the current group
310  * for proper replication, the assumption is that any S,G blocked entries
311  * are already added so the S,G,port lookup should skip them.
312  * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
313  * deleted we need to remove it from all ports' S,G entries where it was
314  * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
315  */
316 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
317                                      u8 filter_mode)
318 {
319         struct net_bridge *br = pg->key.port->br;
320         struct net_bridge_port_group *pg_lst;
321         struct net_bridge_mcast_port *pmctx;
322         struct net_bridge_mdb_entry *mp;
323         struct br_ip sg_ip;
324
325         if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
326                 return;
327
328         mp = br_mdb_ip_get(br, &pg->key.addr);
329         if (!mp)
330                 return;
331         pmctx = br_multicast_pg_to_port_ctx(pg);
332         if (!pmctx)
333                 return;
334
335         memset(&sg_ip, 0, sizeof(sg_ip));
336         sg_ip = pg->key.addr;
337
338         for (pg_lst = mlock_dereference(mp->ports, br);
339              pg_lst;
340              pg_lst = mlock_dereference(pg_lst->next, br)) {
341                 struct net_bridge_group_src *src_ent;
342
343                 if (pg_lst == pg)
344                         continue;
345                 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
346                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
347                                 continue;
348                         sg_ip.src = src_ent->addr.src;
349                         switch (filter_mode) {
350                         case MCAST_INCLUDE:
351                                 __fwd_del_star_excl(pg, &sg_ip);
352                                 break;
353                         case MCAST_EXCLUDE:
354                                 __fwd_add_star_excl(pmctx, pg, &sg_ip);
355                                 break;
356                         }
357                 }
358         }
359 }
360
361 /* called when adding a new S,G with host_joined == false by default */
362 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
363                                        struct net_bridge_port_group *sg)
364 {
365         struct net_bridge_mdb_entry *sg_mp;
366
367         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
368                 return;
369         if (!star_mp->host_joined)
370                 return;
371
372         sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
373         if (!sg_mp)
374                 return;
375         sg_mp->host_joined = true;
376 }
377
378 /* set the host_joined state of all of *,G's S,G entries */
379 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
380 {
381         struct net_bridge *br = star_mp->br;
382         struct net_bridge_mdb_entry *sg_mp;
383         struct net_bridge_port_group *pg;
384         struct br_ip sg_ip;
385
386         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
387                 return;
388
389         memset(&sg_ip, 0, sizeof(sg_ip));
390         sg_ip = star_mp->addr;
391         for (pg = mlock_dereference(star_mp->ports, br);
392              pg;
393              pg = mlock_dereference(pg->next, br)) {
394                 struct net_bridge_group_src *src_ent;
395
396                 hlist_for_each_entry(src_ent, &pg->src_list, node) {
397                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
398                                 continue;
399                         sg_ip.src = src_ent->addr.src;
400                         sg_mp = br_mdb_ip_get(br, &sg_ip);
401                         if (!sg_mp)
402                                 continue;
403                         sg_mp->host_joined = star_mp->host_joined;
404                 }
405         }
406 }
407
408 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
409 {
410         struct net_bridge_port_group __rcu **pp;
411         struct net_bridge_port_group *p;
412
413         /* *,G exclude ports are only added to S,G entries */
414         if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
415                 return;
416
417         /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
418          * we should ignore perm entries since they're managed by user-space
419          */
420         for (pp = &sgmp->ports;
421              (p = mlock_dereference(*pp, sgmp->br)) != NULL;
422              pp = &p->next)
423                 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
424                                   MDB_PG_FLAGS_PERMANENT)))
425                         return;
426
427         /* currently the host can only have joined the *,G which means
428          * we treat it as EXCLUDE {}, so for an S,G it's considered a
429          * STAR_EXCLUDE entry and we can safely leave it
430          */
431         sgmp->host_joined = false;
432
433         for (pp = &sgmp->ports;
434              (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
435                 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
436                         br_multicast_del_pg(sgmp, p, pp);
437                 else
438                         pp = &p->next;
439         }
440 }
441
442 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
443                                        struct net_bridge_port_group *sg)
444 {
445         struct net_bridge_port_group_sg_key sg_key;
446         struct net_bridge *br = star_mp->br;
447         struct net_bridge_mcast_port *pmctx;
448         struct net_bridge_port_group *pg;
449         struct net_bridge_mcast *brmctx;
450
451         if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
452                 return;
453         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
454                 return;
455
456         br_multicast_sg_host_state(star_mp, sg);
457         memset(&sg_key, 0, sizeof(sg_key));
458         sg_key.addr = sg->key.addr;
459         /* we need to add all exclude ports to the S,G */
460         for (pg = mlock_dereference(star_mp->ports, br);
461              pg;
462              pg = mlock_dereference(pg->next, br)) {
463                 struct net_bridge_port_group *src_pg;
464
465                 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
466                         continue;
467
468                 sg_key.port = pg->key.port;
469                 if (br_sg_port_find(br, &sg_key))
470                         continue;
471
472                 pmctx = br_multicast_pg_to_port_ctx(pg);
473                 if (!pmctx)
474                         continue;
475                 brmctx = br_multicast_port_ctx_get_global(pmctx);
476
477                 src_pg = __br_multicast_add_group(brmctx, pmctx,
478                                                   &sg->key.addr,
479                                                   sg->eth_addr,
480                                                   MCAST_INCLUDE, false, false);
481                 if (IS_ERR_OR_NULL(src_pg) ||
482                     src_pg->rt_protocol != RTPROT_KERNEL)
483                         continue;
484                 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
485         }
486 }
487
488 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
489 {
490         struct net_bridge_mdb_entry *star_mp;
491         struct net_bridge_mcast_port *pmctx;
492         struct net_bridge_port_group *sg;
493         struct net_bridge_mcast *brmctx;
494         struct br_ip sg_ip;
495
496         if (src->flags & BR_SGRP_F_INSTALLED)
497                 return;
498
499         memset(&sg_ip, 0, sizeof(sg_ip));
500         pmctx = br_multicast_pg_to_port_ctx(src->pg);
501         if (!pmctx)
502                 return;
503         brmctx = br_multicast_port_ctx_get_global(pmctx);
504         sg_ip = src->pg->key.addr;
505         sg_ip.src = src->addr.src;
506
507         sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
508                                       src->pg->eth_addr, MCAST_INCLUDE, false,
509                                       !timer_pending(&src->timer));
510         if (IS_ERR_OR_NULL(sg))
511                 return;
512         src->flags |= BR_SGRP_F_INSTALLED;
513         sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
514
515         /* if it was added by user-space as perm we can skip next steps */
516         if (sg->rt_protocol != RTPROT_KERNEL &&
517             (sg->flags & MDB_PG_FLAGS_PERMANENT))
518                 return;
519
520         /* the kernel is now responsible for removing this S,G */
521         del_timer(&sg->timer);
522         star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
523         if (!star_mp)
524                 return;
525
526         br_multicast_sg_add_exclude_ports(star_mp, sg);
527 }
528
529 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
530                                         bool fastleave)
531 {
532         struct net_bridge_port_group *p, *pg = src->pg;
533         struct net_bridge_port_group __rcu **pp;
534         struct net_bridge_mdb_entry *mp;
535         struct br_ip sg_ip;
536
537         memset(&sg_ip, 0, sizeof(sg_ip));
538         sg_ip = pg->key.addr;
539         sg_ip.src = src->addr.src;
540
541         mp = br_mdb_ip_get(src->br, &sg_ip);
542         if (!mp)
543                 return;
544
545         for (pp = &mp->ports;
546              (p = mlock_dereference(*pp, src->br)) != NULL;
547              pp = &p->next) {
548                 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
549                         continue;
550
551                 if (p->rt_protocol != RTPROT_KERNEL &&
552                     (p->flags & MDB_PG_FLAGS_PERMANENT))
553                         break;
554
555                 if (fastleave)
556                         p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
557                 br_multicast_del_pg(mp, p, pp);
558                 break;
559         }
560         src->flags &= ~BR_SGRP_F_INSTALLED;
561 }
562
563 /* install S,G and based on src's timer enable or disable forwarding */
564 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
565 {
566         struct net_bridge_port_group_sg_key sg_key;
567         struct net_bridge_port_group *sg;
568         u8 old_flags;
569
570         br_multicast_fwd_src_add(src);
571
572         memset(&sg_key, 0, sizeof(sg_key));
573         sg_key.addr = src->pg->key.addr;
574         sg_key.addr.src = src->addr.src;
575         sg_key.port = src->pg->key.port;
576
577         sg = br_sg_port_find(src->br, &sg_key);
578         if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
579                 return;
580
581         old_flags = sg->flags;
582         if (timer_pending(&src->timer))
583                 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
584         else
585                 sg->flags |= MDB_PG_FLAGS_BLOCKED;
586
587         if (old_flags != sg->flags) {
588                 struct net_bridge_mdb_entry *sg_mp;
589
590                 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
591                 if (!sg_mp)
592                         return;
593                 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
594         }
595 }
596
597 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
598 {
599         struct net_bridge_mdb_entry *mp;
600
601         mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
602         WARN_ON(!hlist_unhashed(&mp->mdb_node));
603         WARN_ON(mp->ports);
604
605         del_timer_sync(&mp->timer);
606         kfree_rcu(mp, rcu);
607 }
608
609 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
610 {
611         struct net_bridge *br = mp->br;
612
613         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
614                                br_mdb_rht_params);
615         hlist_del_init_rcu(&mp->mdb_node);
616         hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
617         queue_work(system_long_wq, &br->mcast_gc_work);
618 }
619
620 static void br_multicast_group_expired(struct timer_list *t)
621 {
622         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
623         struct net_bridge *br = mp->br;
624
625         spin_lock(&br->multicast_lock);
626         if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
627             timer_pending(&mp->timer))
628                 goto out;
629
630         br_multicast_host_leave(mp, true);
631
632         if (mp->ports)
633                 goto out;
634         br_multicast_del_mdb_entry(mp);
635 out:
636         spin_unlock(&br->multicast_lock);
637 }
638
639 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
640 {
641         struct net_bridge_group_src *src;
642
643         src = container_of(gc, struct net_bridge_group_src, mcast_gc);
644         WARN_ON(!hlist_unhashed(&src->node));
645
646         del_timer_sync(&src->timer);
647         kfree_rcu(src, rcu);
648 }
649
650 void br_multicast_del_group_src(struct net_bridge_group_src *src,
651                                 bool fastleave)
652 {
653         struct net_bridge *br = src->pg->key.port->br;
654
655         br_multicast_fwd_src_remove(src, fastleave);
656         hlist_del_init_rcu(&src->node);
657         src->pg->src_ents--;
658         hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
659         queue_work(system_long_wq, &br->mcast_gc_work);
660 }
661
662 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
663 {
664         struct net_bridge_port_group *pg;
665
666         pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
667         WARN_ON(!hlist_unhashed(&pg->mglist));
668         WARN_ON(!hlist_empty(&pg->src_list));
669
670         del_timer_sync(&pg->rexmit_timer);
671         del_timer_sync(&pg->timer);
672         kfree_rcu(pg, rcu);
673 }
674
675 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
676                          struct net_bridge_port_group *pg,
677                          struct net_bridge_port_group __rcu **pp)
678 {
679         struct net_bridge *br = pg->key.port->br;
680         struct net_bridge_group_src *ent;
681         struct hlist_node *tmp;
682
683         rcu_assign_pointer(*pp, pg->next);
684         hlist_del_init(&pg->mglist);
685         br_multicast_eht_clean_sets(pg);
686         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
687                 br_multicast_del_group_src(ent, false);
688         br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
689         if (!br_multicast_is_star_g(&mp->addr)) {
690                 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
691                                        br_sg_port_rht_params);
692                 br_multicast_sg_del_exclude_ports(mp);
693         } else {
694                 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
695         }
696         hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
697         queue_work(system_long_wq, &br->mcast_gc_work);
698
699         if (!mp->ports && !mp->host_joined && netif_running(br->dev))
700                 mod_timer(&mp->timer, jiffies);
701 }
702
703 static void br_multicast_find_del_pg(struct net_bridge *br,
704                                      struct net_bridge_port_group *pg)
705 {
706         struct net_bridge_port_group __rcu **pp;
707         struct net_bridge_mdb_entry *mp;
708         struct net_bridge_port_group *p;
709
710         mp = br_mdb_ip_get(br, &pg->key.addr);
711         if (WARN_ON(!mp))
712                 return;
713
714         for (pp = &mp->ports;
715              (p = mlock_dereference(*pp, br)) != NULL;
716              pp = &p->next) {
717                 if (p != pg)
718                         continue;
719
720                 br_multicast_del_pg(mp, pg, pp);
721                 return;
722         }
723
724         WARN_ON(1);
725 }
726
727 static void br_multicast_port_group_expired(struct timer_list *t)
728 {
729         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
730         struct net_bridge_group_src *src_ent;
731         struct net_bridge *br = pg->key.port->br;
732         struct hlist_node *tmp;
733         bool changed;
734
735         spin_lock(&br->multicast_lock);
736         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
737             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
738                 goto out;
739
740         changed = !!(pg->filter_mode == MCAST_EXCLUDE);
741         pg->filter_mode = MCAST_INCLUDE;
742         hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
743                 if (!timer_pending(&src_ent->timer)) {
744                         br_multicast_del_group_src(src_ent, false);
745                         changed = true;
746                 }
747         }
748
749         if (hlist_empty(&pg->src_list)) {
750                 br_multicast_find_del_pg(br, pg);
751         } else if (changed) {
752                 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
753
754                 if (changed && br_multicast_is_star_g(&pg->key.addr))
755                         br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
756
757                 if (WARN_ON(!mp))
758                         goto out;
759                 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
760         }
761 out:
762         spin_unlock(&br->multicast_lock);
763 }
764
765 static void br_multicast_gc(struct hlist_head *head)
766 {
767         struct net_bridge_mcast_gc *gcent;
768         struct hlist_node *tmp;
769
770         hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
771                 hlist_del_init(&gcent->gc_node);
772                 gcent->destroy(gcent);
773         }
774 }
775
776 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
777                                              struct net_bridge_mcast_port *pmctx,
778                                              struct sk_buff *skb)
779 {
780         struct net_bridge_vlan *vlan = NULL;
781
782         if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
783                 vlan = pmctx->vlan;
784         else if (br_multicast_ctx_is_vlan(brmctx))
785                 vlan = brmctx->vlan;
786
787         if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
788                 u16 vlan_proto;
789
790                 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
791                         return;
792                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
793         }
794 }
795
796 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
797                                                     struct net_bridge_mcast_port *pmctx,
798                                                     struct net_bridge_port_group *pg,
799                                                     __be32 ip_dst, __be32 group,
800                                                     bool with_srcs, bool over_lmqt,
801                                                     u8 sflag, u8 *igmp_type,
802                                                     bool *need_rexmit)
803 {
804         struct net_bridge_port *p = pg ? pg->key.port : NULL;
805         struct net_bridge_group_src *ent;
806         size_t pkt_size, igmp_hdr_size;
807         unsigned long now = jiffies;
808         struct igmpv3_query *ihv3;
809         void *csum_start = NULL;
810         __sum16 *csum = NULL;
811         struct sk_buff *skb;
812         struct igmphdr *ih;
813         struct ethhdr *eth;
814         unsigned long lmqt;
815         struct iphdr *iph;
816         u16 lmqt_srcs = 0;
817
818         igmp_hdr_size = sizeof(*ih);
819         if (brmctx->multicast_igmp_version == 3) {
820                 igmp_hdr_size = sizeof(*ihv3);
821                 if (pg && with_srcs) {
822                         lmqt = now + (brmctx->multicast_last_member_interval *
823                                       brmctx->multicast_last_member_count);
824                         hlist_for_each_entry(ent, &pg->src_list, node) {
825                                 if (over_lmqt == time_after(ent->timer.expires,
826                                                             lmqt) &&
827                                     ent->src_query_rexmit_cnt > 0)
828                                         lmqt_srcs++;
829                         }
830
831                         if (!lmqt_srcs)
832                                 return NULL;
833                         igmp_hdr_size += lmqt_srcs * sizeof(__be32);
834                 }
835         }
836
837         pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
838         if ((p && pkt_size > p->dev->mtu) ||
839             pkt_size > brmctx->br->dev->mtu)
840                 return NULL;
841
842         skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
843         if (!skb)
844                 goto out;
845
846         __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
847         skb->protocol = htons(ETH_P_IP);
848
849         skb_reset_mac_header(skb);
850         eth = eth_hdr(skb);
851
852         ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
853         ip_eth_mc_map(ip_dst, eth->h_dest);
854         eth->h_proto = htons(ETH_P_IP);
855         skb_put(skb, sizeof(*eth));
856
857         skb_set_network_header(skb, skb->len);
858         iph = ip_hdr(skb);
859         iph->tot_len = htons(pkt_size - sizeof(*eth));
860
861         iph->version = 4;
862         iph->ihl = 6;
863         iph->tos = 0xc0;
864         iph->id = 0;
865         iph->frag_off = htons(IP_DF);
866         iph->ttl = 1;
867         iph->protocol = IPPROTO_IGMP;
868         iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
869                      inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
870         iph->daddr = ip_dst;
871         ((u8 *)&iph[1])[0] = IPOPT_RA;
872         ((u8 *)&iph[1])[1] = 4;
873         ((u8 *)&iph[1])[2] = 0;
874         ((u8 *)&iph[1])[3] = 0;
875         ip_send_check(iph);
876         skb_put(skb, 24);
877
878         skb_set_transport_header(skb, skb->len);
879         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
880
881         switch (brmctx->multicast_igmp_version) {
882         case 2:
883                 ih = igmp_hdr(skb);
884                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
885                 ih->code = (group ? brmctx->multicast_last_member_interval :
886                                     brmctx->multicast_query_response_interval) /
887                            (HZ / IGMP_TIMER_SCALE);
888                 ih->group = group;
889                 ih->csum = 0;
890                 csum = &ih->csum;
891                 csum_start = (void *)ih;
892                 break;
893         case 3:
894                 ihv3 = igmpv3_query_hdr(skb);
895                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
896                 ihv3->code = (group ? brmctx->multicast_last_member_interval :
897                                       brmctx->multicast_query_response_interval) /
898                              (HZ / IGMP_TIMER_SCALE);
899                 ihv3->group = group;
900                 ihv3->qqic = brmctx->multicast_query_interval / HZ;
901                 ihv3->nsrcs = htons(lmqt_srcs);
902                 ihv3->resv = 0;
903                 ihv3->suppress = sflag;
904                 ihv3->qrv = 2;
905                 ihv3->csum = 0;
906                 csum = &ihv3->csum;
907                 csum_start = (void *)ihv3;
908                 if (!pg || !with_srcs)
909                         break;
910
911                 lmqt_srcs = 0;
912                 hlist_for_each_entry(ent, &pg->src_list, node) {
913                         if (over_lmqt == time_after(ent->timer.expires,
914                                                     lmqt) &&
915                             ent->src_query_rexmit_cnt > 0) {
916                                 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
917                                 ent->src_query_rexmit_cnt--;
918                                 if (need_rexmit && ent->src_query_rexmit_cnt)
919                                         *need_rexmit = true;
920                         }
921                 }
922                 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
923                         kfree_skb(skb);
924                         return NULL;
925                 }
926                 break;
927         }
928
929         if (WARN_ON(!csum || !csum_start)) {
930                 kfree_skb(skb);
931                 return NULL;
932         }
933
934         *csum = ip_compute_csum(csum_start, igmp_hdr_size);
935         skb_put(skb, igmp_hdr_size);
936         __skb_pull(skb, sizeof(*eth));
937
938 out:
939         return skb;
940 }
941
942 #if IS_ENABLED(CONFIG_IPV6)
943 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
944                                                     struct net_bridge_mcast_port *pmctx,
945                                                     struct net_bridge_port_group *pg,
946                                                     const struct in6_addr *ip6_dst,
947                                                     const struct in6_addr *group,
948                                                     bool with_srcs, bool over_llqt,
949                                                     u8 sflag, u8 *igmp_type,
950                                                     bool *need_rexmit)
951 {
952         struct net_bridge_port *p = pg ? pg->key.port : NULL;
953         struct net_bridge_group_src *ent;
954         size_t pkt_size, mld_hdr_size;
955         unsigned long now = jiffies;
956         struct mld2_query *mld2q;
957         void *csum_start = NULL;
958         unsigned long interval;
959         __sum16 *csum = NULL;
960         struct ipv6hdr *ip6h;
961         struct mld_msg *mldq;
962         struct sk_buff *skb;
963         unsigned long llqt;
964         struct ethhdr *eth;
965         u16 llqt_srcs = 0;
966         u8 *hopopt;
967
968         mld_hdr_size = sizeof(*mldq);
969         if (brmctx->multicast_mld_version == 2) {
970                 mld_hdr_size = sizeof(*mld2q);
971                 if (pg && with_srcs) {
972                         llqt = now + (brmctx->multicast_last_member_interval *
973                                       brmctx->multicast_last_member_count);
974                         hlist_for_each_entry(ent, &pg->src_list, node) {
975                                 if (over_llqt == time_after(ent->timer.expires,
976                                                             llqt) &&
977                                     ent->src_query_rexmit_cnt > 0)
978                                         llqt_srcs++;
979                         }
980
981                         if (!llqt_srcs)
982                                 return NULL;
983                         mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
984                 }
985         }
986
987         pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
988         if ((p && pkt_size > p->dev->mtu) ||
989             pkt_size > brmctx->br->dev->mtu)
990                 return NULL;
991
992         skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
993         if (!skb)
994                 goto out;
995
996         __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
997         skb->protocol = htons(ETH_P_IPV6);
998
999         /* Ethernet header */
1000         skb_reset_mac_header(skb);
1001         eth = eth_hdr(skb);
1002
1003         ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1004         eth->h_proto = htons(ETH_P_IPV6);
1005         skb_put(skb, sizeof(*eth));
1006
1007         /* IPv6 header + HbH option */
1008         skb_set_network_header(skb, skb->len);
1009         ip6h = ipv6_hdr(skb);
1010
1011         *(__force __be32 *)ip6h = htonl(0x60000000);
1012         ip6h->payload_len = htons(8 + mld_hdr_size);
1013         ip6h->nexthdr = IPPROTO_HOPOPTS;
1014         ip6h->hop_limit = 1;
1015         ip6h->daddr = *ip6_dst;
1016         if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
1017                                &ip6h->daddr, 0, &ip6h->saddr)) {
1018                 kfree_skb(skb);
1019                 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1020                 return NULL;
1021         }
1022
1023         br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1024         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1025
1026         hopopt = (u8 *)(ip6h + 1);
1027         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
1028         hopopt[1] = 0;                          /* length of HbH */
1029         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
1030         hopopt[3] = 2;                          /* Length of RA Option */
1031         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
1032         hopopt[5] = 0;
1033         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
1034         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
1035
1036         skb_put(skb, sizeof(*ip6h) + 8);
1037
1038         /* ICMPv6 */
1039         skb_set_transport_header(skb, skb->len);
1040         interval = ipv6_addr_any(group) ?
1041                         brmctx->multicast_query_response_interval :
1042                         brmctx->multicast_last_member_interval;
1043         *igmp_type = ICMPV6_MGM_QUERY;
1044         switch (brmctx->multicast_mld_version) {
1045         case 1:
1046                 mldq = (struct mld_msg *)icmp6_hdr(skb);
1047                 mldq->mld_type = ICMPV6_MGM_QUERY;
1048                 mldq->mld_code = 0;
1049                 mldq->mld_cksum = 0;
1050                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
1051                 mldq->mld_reserved = 0;
1052                 mldq->mld_mca = *group;
1053                 csum = &mldq->mld_cksum;
1054                 csum_start = (void *)mldq;
1055                 break;
1056         case 2:
1057                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1058                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1059                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
1060                 mld2q->mld2q_code = 0;
1061                 mld2q->mld2q_cksum = 0;
1062                 mld2q->mld2q_resv1 = 0;
1063                 mld2q->mld2q_resv2 = 0;
1064                 mld2q->mld2q_suppress = sflag;
1065                 mld2q->mld2q_qrv = 2;
1066                 mld2q->mld2q_nsrcs = htons(llqt_srcs);
1067                 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1068                 mld2q->mld2q_mca = *group;
1069                 csum = &mld2q->mld2q_cksum;
1070                 csum_start = (void *)mld2q;
1071                 if (!pg || !with_srcs)
1072                         break;
1073
1074                 llqt_srcs = 0;
1075                 hlist_for_each_entry(ent, &pg->src_list, node) {
1076                         if (over_llqt == time_after(ent->timer.expires,
1077                                                     llqt) &&
1078                             ent->src_query_rexmit_cnt > 0) {
1079                                 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1080                                 ent->src_query_rexmit_cnt--;
1081                                 if (need_rexmit && ent->src_query_rexmit_cnt)
1082                                         *need_rexmit = true;
1083                         }
1084                 }
1085                 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
1086                         kfree_skb(skb);
1087                         return NULL;
1088                 }
1089                 break;
1090         }
1091
1092         if (WARN_ON(!csum || !csum_start)) {
1093                 kfree_skb(skb);
1094                 return NULL;
1095         }
1096
1097         *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1098                                 IPPROTO_ICMPV6,
1099                                 csum_partial(csum_start, mld_hdr_size, 0));
1100         skb_put(skb, mld_hdr_size);
1101         __skb_pull(skb, sizeof(*eth));
1102
1103 out:
1104         return skb;
1105 }
1106 #endif
1107
1108 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1109                                                 struct net_bridge_mcast_port *pmctx,
1110                                                 struct net_bridge_port_group *pg,
1111                                                 struct br_ip *ip_dst,
1112                                                 struct br_ip *group,
1113                                                 bool with_srcs, bool over_lmqt,
1114                                                 u8 sflag, u8 *igmp_type,
1115                                                 bool *need_rexmit)
1116 {
1117         __be32 ip4_dst;
1118
1119         switch (group->proto) {
1120         case htons(ETH_P_IP):
1121                 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1122                 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1123                                                     ip4_dst, group->dst.ip4,
1124                                                     with_srcs, over_lmqt,
1125                                                     sflag, igmp_type,
1126                                                     need_rexmit);
1127 #if IS_ENABLED(CONFIG_IPV6)
1128         case htons(ETH_P_IPV6): {
1129                 struct in6_addr ip6_dst;
1130
1131                 if (ip_dst)
1132                         ip6_dst = ip_dst->dst.ip6;
1133                 else
1134                         ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1135                                       htonl(1));
1136
1137                 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1138                                                     &ip6_dst, &group->dst.ip6,
1139                                                     with_srcs, over_lmqt,
1140                                                     sflag, igmp_type,
1141                                                     need_rexmit);
1142         }
1143 #endif
1144         }
1145         return NULL;
1146 }
1147
1148 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1149                                                     struct br_ip *group)
1150 {
1151         struct net_bridge_mdb_entry *mp;
1152         int err;
1153
1154         mp = br_mdb_ip_get(br, group);
1155         if (mp)
1156                 return mp;
1157
1158         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1159                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1160                 return ERR_PTR(-E2BIG);
1161         }
1162
1163         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1164         if (unlikely(!mp))
1165                 return ERR_PTR(-ENOMEM);
1166
1167         mp->br = br;
1168         mp->addr = *group;
1169         mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1170         timer_setup(&mp->timer, br_multicast_group_expired, 0);
1171         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1172                                             br_mdb_rht_params);
1173         if (err) {
1174                 kfree(mp);
1175                 mp = ERR_PTR(err);
1176         } else {
1177                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1178         }
1179
1180         return mp;
1181 }
1182
1183 static void br_multicast_group_src_expired(struct timer_list *t)
1184 {
1185         struct net_bridge_group_src *src = from_timer(src, t, timer);
1186         struct net_bridge_port_group *pg;
1187         struct net_bridge *br = src->br;
1188
1189         spin_lock(&br->multicast_lock);
1190         if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1191             timer_pending(&src->timer))
1192                 goto out;
1193
1194         pg = src->pg;
1195         if (pg->filter_mode == MCAST_INCLUDE) {
1196                 br_multicast_del_group_src(src, false);
1197                 if (!hlist_empty(&pg->src_list))
1198                         goto out;
1199                 br_multicast_find_del_pg(br, pg);
1200         } else {
1201                 br_multicast_fwd_src_handle(src);
1202         }
1203
1204 out:
1205         spin_unlock(&br->multicast_lock);
1206 }
1207
1208 struct net_bridge_group_src *
1209 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1210 {
1211         struct net_bridge_group_src *ent;
1212
1213         switch (ip->proto) {
1214         case htons(ETH_P_IP):
1215                 hlist_for_each_entry(ent, &pg->src_list, node)
1216                         if (ip->src.ip4 == ent->addr.src.ip4)
1217                                 return ent;
1218                 break;
1219 #if IS_ENABLED(CONFIG_IPV6)
1220         case htons(ETH_P_IPV6):
1221                 hlist_for_each_entry(ent, &pg->src_list, node)
1222                         if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1223                                 return ent;
1224                 break;
1225 #endif
1226         }
1227
1228         return NULL;
1229 }
1230
1231 static struct net_bridge_group_src *
1232 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1233 {
1234         struct net_bridge_group_src *grp_src;
1235
1236         if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1237                 return NULL;
1238
1239         switch (src_ip->proto) {
1240         case htons(ETH_P_IP):
1241                 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1242                     ipv4_is_multicast(src_ip->src.ip4))
1243                         return NULL;
1244                 break;
1245 #if IS_ENABLED(CONFIG_IPV6)
1246         case htons(ETH_P_IPV6):
1247                 if (ipv6_addr_any(&src_ip->src.ip6) ||
1248                     ipv6_addr_is_multicast(&src_ip->src.ip6))
1249                         return NULL;
1250                 break;
1251 #endif
1252         }
1253
1254         grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1255         if (unlikely(!grp_src))
1256                 return NULL;
1257
1258         grp_src->pg = pg;
1259         grp_src->br = pg->key.port->br;
1260         grp_src->addr = *src_ip;
1261         grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1262         timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1263
1264         hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1265         pg->src_ents++;
1266
1267         return grp_src;
1268 }
1269
1270 struct net_bridge_port_group *br_multicast_new_port_group(
1271                         struct net_bridge_port *port,
1272                         struct br_ip *group,
1273                         struct net_bridge_port_group __rcu *next,
1274                         unsigned char flags,
1275                         const unsigned char *src,
1276                         u8 filter_mode,
1277                         u8 rt_protocol)
1278 {
1279         struct net_bridge_port_group *p;
1280
1281         p = kzalloc(sizeof(*p), GFP_ATOMIC);
1282         if (unlikely(!p))
1283                 return NULL;
1284
1285         p->key.addr = *group;
1286         p->key.port = port;
1287         p->flags = flags;
1288         p->filter_mode = filter_mode;
1289         p->rt_protocol = rt_protocol;
1290         p->eht_host_tree = RB_ROOT;
1291         p->eht_set_tree = RB_ROOT;
1292         p->mcast_gc.destroy = br_multicast_destroy_port_group;
1293         INIT_HLIST_HEAD(&p->src_list);
1294
1295         if (!br_multicast_is_star_g(group) &&
1296             rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1297                                           br_sg_port_rht_params)) {
1298                 kfree(p);
1299                 return NULL;
1300         }
1301
1302         rcu_assign_pointer(p->next, next);
1303         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1304         timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1305         hlist_add_head(&p->mglist, &port->mglist);
1306
1307         if (src)
1308                 memcpy(p->eth_addr, src, ETH_ALEN);
1309         else
1310                 eth_broadcast_addr(p->eth_addr);
1311
1312         return p;
1313 }
1314
1315 void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
1316                             struct net_bridge_mdb_entry *mp, bool notify)
1317 {
1318         if (!mp->host_joined) {
1319                 mp->host_joined = true;
1320                 if (br_multicast_is_star_g(&mp->addr))
1321                         br_multicast_star_g_host_state(mp);
1322                 if (notify)
1323                         br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1324         }
1325
1326         if (br_group_is_l2(&mp->addr))
1327                 return;
1328
1329         mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
1330 }
1331
1332 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1333 {
1334         if (!mp->host_joined)
1335                 return;
1336
1337         mp->host_joined = false;
1338         if (br_multicast_is_star_g(&mp->addr))
1339                 br_multicast_star_g_host_state(mp);
1340         if (notify)
1341                 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1342 }
1343
1344 static struct net_bridge_port_group *
1345 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
1346                          struct net_bridge_mcast_port *pmctx,
1347                          struct br_ip *group,
1348                          const unsigned char *src,
1349                          u8 filter_mode,
1350                          bool igmpv2_mldv1,
1351                          bool blocked)
1352 {
1353         struct net_bridge_port_group __rcu **pp;
1354         struct net_bridge_port_group *p = NULL;
1355         struct net_bridge_mdb_entry *mp;
1356         unsigned long now = jiffies;
1357
1358         if (!br_multicast_ctx_should_use(brmctx, pmctx))
1359                 goto out;
1360
1361         mp = br_multicast_new_group(brmctx->br, group);
1362         if (IS_ERR(mp))
1363                 return ERR_CAST(mp);
1364
1365         if (!pmctx) {
1366                 br_multicast_host_join(brmctx, mp, true);
1367                 goto out;
1368         }
1369
1370         for (pp = &mp->ports;
1371              (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1372              pp = &p->next) {
1373                 if (br_port_group_equal(p, pmctx->port, src))
1374                         goto found;
1375                 if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1376                         break;
1377         }
1378
1379         p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1380                                         filter_mode, RTPROT_KERNEL);
1381         if (unlikely(!p)) {
1382                 p = ERR_PTR(-ENOMEM);
1383                 goto out;
1384         }
1385         rcu_assign_pointer(*pp, p);
1386         if (blocked)
1387                 p->flags |= MDB_PG_FLAGS_BLOCKED;
1388         br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1389
1390 found:
1391         if (igmpv2_mldv1)
1392                 mod_timer(&p->timer,
1393                           now + brmctx->multicast_membership_interval);
1394
1395 out:
1396         return p;
1397 }
1398
1399 static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1400                                   struct net_bridge_mcast_port *pmctx,
1401                                   struct br_ip *group,
1402                                   const unsigned char *src,
1403                                   u8 filter_mode,
1404                                   bool igmpv2_mldv1)
1405 {
1406         struct net_bridge_port_group *pg;
1407         int err;
1408
1409         spin_lock(&brmctx->br->multicast_lock);
1410         pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1411                                       igmpv2_mldv1, false);
1412         /* NULL is considered valid for host joined groups */
1413         err = PTR_ERR_OR_ZERO(pg);
1414         spin_unlock(&brmctx->br->multicast_lock);
1415
1416         return err;
1417 }
1418
1419 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1420                                       struct net_bridge_mcast_port *pmctx,
1421                                       __be32 group,
1422                                       __u16 vid,
1423                                       const unsigned char *src,
1424                                       bool igmpv2)
1425 {
1426         struct br_ip br_group;
1427         u8 filter_mode;
1428
1429         if (ipv4_is_local_multicast(group))
1430                 return 0;
1431
1432         memset(&br_group, 0, sizeof(br_group));
1433         br_group.dst.ip4 = group;
1434         br_group.proto = htons(ETH_P_IP);
1435         br_group.vid = vid;
1436         filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1437
1438         return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1439                                       filter_mode, igmpv2);
1440 }
1441
1442 #if IS_ENABLED(CONFIG_IPV6)
1443 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1444                                       struct net_bridge_mcast_port *pmctx,
1445                                       const struct in6_addr *group,
1446                                       __u16 vid,
1447                                       const unsigned char *src,
1448                                       bool mldv1)
1449 {
1450         struct br_ip br_group;
1451         u8 filter_mode;
1452
1453         if (ipv6_addr_is_ll_all_nodes(group))
1454                 return 0;
1455
1456         memset(&br_group, 0, sizeof(br_group));
1457         br_group.dst.ip6 = *group;
1458         br_group.proto = htons(ETH_P_IPV6);
1459         br_group.vid = vid;
1460         filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1461
1462         return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1463                                       filter_mode, mldv1);
1464 }
1465 #endif
1466
1467 static bool br_multicast_rport_del(struct hlist_node *rlist)
1468 {
1469         if (hlist_unhashed(rlist))
1470                 return false;
1471
1472         hlist_del_init_rcu(rlist);
1473         return true;
1474 }
1475
1476 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1477 {
1478         return br_multicast_rport_del(&pmctx->ip4_rlist);
1479 }
1480
1481 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1482 {
1483 #if IS_ENABLED(CONFIG_IPV6)
1484         return br_multicast_rport_del(&pmctx->ip6_rlist);
1485 #else
1486         return false;
1487 #endif
1488 }
1489
1490 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1491                                         struct timer_list *t,
1492                                         struct hlist_node *rlist)
1493 {
1494         struct net_bridge *br = pmctx->port->br;
1495         bool del;
1496
1497         spin_lock(&br->multicast_lock);
1498         if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1499             pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1500             timer_pending(t))
1501                 goto out;
1502
1503         del = br_multicast_rport_del(rlist);
1504         br_multicast_rport_del_notify(pmctx, del);
1505 out:
1506         spin_unlock(&br->multicast_lock);
1507 }
1508
1509 static void br_ip4_multicast_router_expired(struct timer_list *t)
1510 {
1511         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1512                                                          ip4_mc_router_timer);
1513
1514         br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1515 }
1516
1517 #if IS_ENABLED(CONFIG_IPV6)
1518 static void br_ip6_multicast_router_expired(struct timer_list *t)
1519 {
1520         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1521                                                          ip6_mc_router_timer);
1522
1523         br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1524 }
1525 #endif
1526
1527 static void br_mc_router_state_change(struct net_bridge *p,
1528                                       bool is_mc_router)
1529 {
1530         struct switchdev_attr attr = {
1531                 .orig_dev = p->dev,
1532                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1533                 .flags = SWITCHDEV_F_DEFER,
1534                 .u.mrouter = is_mc_router,
1535         };
1536
1537         switchdev_port_attr_set(p->dev, &attr, NULL);
1538 }
1539
1540 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1541                                               struct timer_list *timer)
1542 {
1543         spin_lock(&brmctx->br->multicast_lock);
1544         if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1545             brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1546             br_ip4_multicast_is_router(brmctx) ||
1547             br_ip6_multicast_is_router(brmctx))
1548                 goto out;
1549
1550         br_mc_router_state_change(brmctx->br, false);
1551 out:
1552         spin_unlock(&brmctx->br->multicast_lock);
1553 }
1554
1555 static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1556 {
1557         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1558                                                      ip4_mc_router_timer);
1559
1560         br_multicast_local_router_expired(brmctx, t);
1561 }
1562
1563 #if IS_ENABLED(CONFIG_IPV6)
1564 static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1565 {
1566         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1567                                                      ip6_mc_router_timer);
1568
1569         br_multicast_local_router_expired(brmctx, t);
1570 }
1571 #endif
1572
1573 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1574                                          struct bridge_mcast_own_query *query)
1575 {
1576         spin_lock(&brmctx->br->multicast_lock);
1577         if (!netif_running(brmctx->br->dev) ||
1578             br_multicast_ctx_vlan_global_disabled(brmctx) ||
1579             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1580                 goto out;
1581
1582         br_multicast_start_querier(brmctx, query);
1583
1584 out:
1585         spin_unlock(&brmctx->br->multicast_lock);
1586 }
1587
1588 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1589 {
1590         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1591                                                      ip4_other_query.timer);
1592
1593         br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1594 }
1595
1596 #if IS_ENABLED(CONFIG_IPV6)
1597 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1598 {
1599         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1600                                                      ip6_other_query.timer);
1601
1602         br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1603 }
1604 #endif
1605
1606 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1607                                             struct br_ip *ip,
1608                                             struct sk_buff *skb)
1609 {
1610         if (ip->proto == htons(ETH_P_IP))
1611                 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1612 #if IS_ENABLED(CONFIG_IPV6)
1613         else
1614                 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1615 #endif
1616 }
1617
1618 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1619                                       struct net_bridge_mcast_port *pmctx,
1620                                       struct net_bridge_port_group *pg,
1621                                       struct br_ip *ip_dst,
1622                                       struct br_ip *group,
1623                                       bool with_srcs,
1624                                       u8 sflag,
1625                                       bool *need_rexmit)
1626 {
1627         bool over_lmqt = !!sflag;
1628         struct sk_buff *skb;
1629         u8 igmp_type;
1630
1631         if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1632             !br_multicast_ctx_matches_vlan_snooping(brmctx))
1633                 return;
1634
1635 again_under_lmqt:
1636         skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
1637                                        with_srcs, over_lmqt, sflag, &igmp_type,
1638                                        need_rexmit);
1639         if (!skb)
1640                 return;
1641
1642         if (pmctx) {
1643                 skb->dev = pmctx->port->dev;
1644                 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1645                                    BR_MCAST_DIR_TX);
1646                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1647                         dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1648                         br_dev_queue_push_xmit);
1649
1650                 if (over_lmqt && with_srcs && sflag) {
1651                         over_lmqt = false;
1652                         goto again_under_lmqt;
1653                 }
1654         } else {
1655                 br_multicast_select_own_querier(brmctx, group, skb);
1656                 br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1657                                    BR_MCAST_DIR_RX);
1658                 netif_rx(skb);
1659         }
1660 }
1661
1662 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
1663                                       struct bridge_mcast_querier *dest)
1664 {
1665         unsigned int seq;
1666
1667         memset(dest, 0, sizeof(*dest));
1668         do {
1669                 seq = read_seqcount_begin(&querier->seq);
1670                 dest->port_ifidx = querier->port_ifidx;
1671                 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
1672         } while (read_seqcount_retry(&querier->seq, seq));
1673 }
1674
1675 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
1676                                         struct bridge_mcast_querier *querier,
1677                                         int ifindex,
1678                                         struct br_ip *saddr)
1679 {
1680         write_seqcount_begin(&querier->seq);
1681         querier->port_ifidx = ifindex;
1682         memcpy(&querier->addr, saddr, sizeof(*saddr));
1683         write_seqcount_end(&querier->seq);
1684 }
1685
1686 static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1687                                     struct net_bridge_mcast_port *pmctx,
1688                                     struct bridge_mcast_own_query *own_query)
1689 {
1690         struct bridge_mcast_other_query *other_query = NULL;
1691         struct bridge_mcast_querier *querier;
1692         struct br_ip br_group;
1693         unsigned long time;
1694
1695         if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1696             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1697             !brmctx->multicast_querier)
1698                 return;
1699
1700         memset(&br_group.dst, 0, sizeof(br_group.dst));
1701
1702         if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1703                     (own_query == &brmctx->ip4_own_query)) {
1704                 querier = &brmctx->ip4_querier;
1705                 other_query = &brmctx->ip4_other_query;
1706                 br_group.proto = htons(ETH_P_IP);
1707 #if IS_ENABLED(CONFIG_IPV6)
1708         } else {
1709                 querier = &brmctx->ip6_querier;
1710                 other_query = &brmctx->ip6_other_query;
1711                 br_group.proto = htons(ETH_P_IPV6);
1712 #endif
1713         }
1714
1715         if (!other_query || timer_pending(&other_query->timer))
1716                 return;
1717
1718         /* we're about to select ourselves as querier */
1719         if (!pmctx && querier->port_ifidx) {
1720                 struct br_ip zeroip = {};
1721
1722                 br_multicast_update_querier(brmctx, querier, 0, &zeroip);
1723         }
1724
1725         __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1726                                   0, NULL);
1727
1728         time = jiffies;
1729         time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1730                 brmctx->multicast_startup_query_interval :
1731                 brmctx->multicast_query_interval;
1732         mod_timer(&own_query->timer, time);
1733 }
1734
1735 static void
1736 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1737                                 struct bridge_mcast_own_query *query)
1738 {
1739         struct net_bridge *br = pmctx->port->br;
1740         struct net_bridge_mcast *brmctx;
1741
1742         spin_lock(&br->multicast_lock);
1743         if (br_multicast_port_ctx_state_stopped(pmctx))
1744                 goto out;
1745
1746         brmctx = br_multicast_port_ctx_get_global(pmctx);
1747         if (query->startup_sent < brmctx->multicast_startup_query_count)
1748                 query->startup_sent++;
1749
1750         br_multicast_send_query(brmctx, pmctx, query);
1751
1752 out:
1753         spin_unlock(&br->multicast_lock);
1754 }
1755
1756 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1757 {
1758         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1759                                                          ip4_own_query.timer);
1760
1761         br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1762 }
1763
1764 #if IS_ENABLED(CONFIG_IPV6)
1765 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1766 {
1767         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1768                                                          ip6_own_query.timer);
1769
1770         br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1771 }
1772 #endif
1773
1774 static void br_multicast_port_group_rexmit(struct timer_list *t)
1775 {
1776         struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1777         struct bridge_mcast_other_query *other_query = NULL;
1778         struct net_bridge *br = pg->key.port->br;
1779         struct net_bridge_mcast_port *pmctx;
1780         struct net_bridge_mcast *brmctx;
1781         bool need_rexmit = false;
1782
1783         spin_lock(&br->multicast_lock);
1784         if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1785             !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1786                 goto out;
1787
1788         pmctx = br_multicast_pg_to_port_ctx(pg);
1789         if (!pmctx)
1790                 goto out;
1791         brmctx = br_multicast_port_ctx_get_global(pmctx);
1792         if (!brmctx->multicast_querier)
1793                 goto out;
1794
1795         if (pg->key.addr.proto == htons(ETH_P_IP))
1796                 other_query = &brmctx->ip4_other_query;
1797 #if IS_ENABLED(CONFIG_IPV6)
1798         else
1799                 other_query = &brmctx->ip6_other_query;
1800 #endif
1801
1802         if (!other_query || timer_pending(&other_query->timer))
1803                 goto out;
1804
1805         if (pg->grp_query_rexmit_cnt) {
1806                 pg->grp_query_rexmit_cnt--;
1807                 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1808                                           &pg->key.addr, false, 1, NULL);
1809         }
1810         __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1811                                   &pg->key.addr, true, 0, &need_rexmit);
1812
1813         if (pg->grp_query_rexmit_cnt || need_rexmit)
1814                 mod_timer(&pg->rexmit_timer, jiffies +
1815                                              brmctx->multicast_last_member_interval);
1816 out:
1817         spin_unlock(&br->multicast_lock);
1818 }
1819
1820 static int br_mc_disabled_update(struct net_device *dev, bool value,
1821                                  struct netlink_ext_ack *extack)
1822 {
1823         struct switchdev_attr attr = {
1824                 .orig_dev = dev,
1825                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1826                 .flags = SWITCHDEV_F_DEFER,
1827                 .u.mc_disabled = !value,
1828         };
1829
1830         return switchdev_port_attr_set(dev, &attr, extack);
1831 }
1832
1833 void br_multicast_port_ctx_init(struct net_bridge_port *port,
1834                                 struct net_bridge_vlan *vlan,
1835                                 struct net_bridge_mcast_port *pmctx)
1836 {
1837         pmctx->port = port;
1838         pmctx->vlan = vlan;
1839         pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1840         timer_setup(&pmctx->ip4_mc_router_timer,
1841                     br_ip4_multicast_router_expired, 0);
1842         timer_setup(&pmctx->ip4_own_query.timer,
1843                     br_ip4_multicast_port_query_expired, 0);
1844 #if IS_ENABLED(CONFIG_IPV6)
1845         timer_setup(&pmctx->ip6_mc_router_timer,
1846                     br_ip6_multicast_router_expired, 0);
1847         timer_setup(&pmctx->ip6_own_query.timer,
1848                     br_ip6_multicast_port_query_expired, 0);
1849 #endif
1850 }
1851
1852 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
1853 {
1854 #if IS_ENABLED(CONFIG_IPV6)
1855         del_timer_sync(&pmctx->ip6_mc_router_timer);
1856 #endif
1857         del_timer_sync(&pmctx->ip4_mc_router_timer);
1858 }
1859
1860 int br_multicast_add_port(struct net_bridge_port *port)
1861 {
1862         int err;
1863
1864         port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
1865         br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
1866
1867         err = br_mc_disabled_update(port->dev,
1868                                     br_opt_get(port->br,
1869                                                BROPT_MULTICAST_ENABLED),
1870                                     NULL);
1871         if (err && err != -EOPNOTSUPP)
1872                 return err;
1873
1874         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1875         if (!port->mcast_stats)
1876                 return -ENOMEM;
1877
1878         return 0;
1879 }
1880
1881 void br_multicast_del_port(struct net_bridge_port *port)
1882 {
1883         struct net_bridge *br = port->br;
1884         struct net_bridge_port_group *pg;
1885         HLIST_HEAD(deleted_head);
1886         struct hlist_node *n;
1887
1888         /* Take care of the remaining groups, only perm ones should be left */
1889         spin_lock_bh(&br->multicast_lock);
1890         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1891                 br_multicast_find_del_pg(br, pg);
1892         hlist_move_list(&br->mcast_gc_list, &deleted_head);
1893         spin_unlock_bh(&br->multicast_lock);
1894         br_multicast_gc(&deleted_head);
1895         br_multicast_port_ctx_deinit(&port->multicast_ctx);
1896         free_percpu(port->mcast_stats);
1897 }
1898
1899 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1900 {
1901         query->startup_sent = 0;
1902
1903         if (try_to_del_timer_sync(&query->timer) >= 0 ||
1904             del_timer(&query->timer))
1905                 mod_timer(&query->timer, jiffies);
1906 }
1907
1908 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
1909 {
1910         struct net_bridge *br = pmctx->port->br;
1911         struct net_bridge_mcast *brmctx;
1912
1913         brmctx = br_multicast_port_ctx_get_global(pmctx);
1914         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1915             !netif_running(br->dev))
1916                 return;
1917
1918         br_multicast_enable(&pmctx->ip4_own_query);
1919 #if IS_ENABLED(CONFIG_IPV6)
1920         br_multicast_enable(&pmctx->ip6_own_query);
1921 #endif
1922         if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
1923                 br_ip4_multicast_add_router(brmctx, pmctx);
1924                 br_ip6_multicast_add_router(brmctx, pmctx);
1925         }
1926 }
1927
1928 void br_multicast_enable_port(struct net_bridge_port *port)
1929 {
1930         struct net_bridge *br = port->br;
1931
1932         spin_lock_bh(&br->multicast_lock);
1933         __br_multicast_enable_port_ctx(&port->multicast_ctx);
1934         spin_unlock_bh(&br->multicast_lock);
1935 }
1936
1937 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
1938 {
1939         struct net_bridge_port_group *pg;
1940         struct hlist_node *n;
1941         bool del = false;
1942
1943         hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
1944                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
1945                     (!br_multicast_port_ctx_is_vlan(pmctx) ||
1946                      pg->key.addr.vid == pmctx->vlan->vid))
1947                         br_multicast_find_del_pg(pmctx->port->br, pg);
1948
1949         del |= br_ip4_multicast_rport_del(pmctx);
1950         del_timer(&pmctx->ip4_mc_router_timer);
1951         del_timer(&pmctx->ip4_own_query.timer);
1952         del |= br_ip6_multicast_rport_del(pmctx);
1953 #if IS_ENABLED(CONFIG_IPV6)
1954         del_timer(&pmctx->ip6_mc_router_timer);
1955         del_timer(&pmctx->ip6_own_query.timer);
1956 #endif
1957         br_multicast_rport_del_notify(pmctx, del);
1958 }
1959
1960 void br_multicast_disable_port(struct net_bridge_port *port)
1961 {
1962         spin_lock_bh(&port->br->multicast_lock);
1963         __br_multicast_disable_port_ctx(&port->multicast_ctx);
1964         spin_unlock_bh(&port->br->multicast_lock);
1965 }
1966
1967 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1968 {
1969         struct net_bridge_group_src *ent;
1970         struct hlist_node *tmp;
1971         int deleted = 0;
1972
1973         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1974                 if (ent->flags & BR_SGRP_F_DELETE) {
1975                         br_multicast_del_group_src(ent, false);
1976                         deleted++;
1977                 }
1978
1979         return deleted;
1980 }
1981
1982 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1983                                 unsigned long expires)
1984 {
1985         mod_timer(&src->timer, expires);
1986         br_multicast_fwd_src_handle(src);
1987 }
1988
1989 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
1990                                               struct net_bridge_mcast_port *pmctx,
1991                                               struct net_bridge_port_group *pg)
1992 {
1993         struct bridge_mcast_other_query *other_query = NULL;
1994         u32 lmqc = brmctx->multicast_last_member_count;
1995         unsigned long lmqt, lmi, now = jiffies;
1996         struct net_bridge_group_src *ent;
1997
1998         if (!netif_running(brmctx->br->dev) ||
1999             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2000                 return;
2001
2002         if (pg->key.addr.proto == htons(ETH_P_IP))
2003                 other_query = &brmctx->ip4_other_query;
2004 #if IS_ENABLED(CONFIG_IPV6)
2005         else
2006                 other_query = &brmctx->ip6_other_query;
2007 #endif
2008
2009         lmqt = now + br_multicast_lmqt(brmctx);
2010         hlist_for_each_entry(ent, &pg->src_list, node) {
2011                 if (ent->flags & BR_SGRP_F_SEND) {
2012                         ent->flags &= ~BR_SGRP_F_SEND;
2013                         if (ent->timer.expires > lmqt) {
2014                                 if (brmctx->multicast_querier &&
2015                                     other_query &&
2016                                     !timer_pending(&other_query->timer))
2017                                         ent->src_query_rexmit_cnt = lmqc;
2018                                 __grp_src_mod_timer(ent, lmqt);
2019                         }
2020                 }
2021         }
2022
2023         if (!brmctx->multicast_querier ||
2024             !other_query || timer_pending(&other_query->timer))
2025                 return;
2026
2027         __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2028                                   &pg->key.addr, true, 1, NULL);
2029
2030         lmi = now + brmctx->multicast_last_member_interval;
2031         if (!timer_pending(&pg->rexmit_timer) ||
2032             time_after(pg->rexmit_timer.expires, lmi))
2033                 mod_timer(&pg->rexmit_timer, lmi);
2034 }
2035
2036 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
2037                                         struct net_bridge_mcast_port *pmctx,
2038                                         struct net_bridge_port_group *pg)
2039 {
2040         struct bridge_mcast_other_query *other_query = NULL;
2041         unsigned long now = jiffies, lmi;
2042
2043         if (!netif_running(brmctx->br->dev) ||
2044             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2045                 return;
2046
2047         if (pg->key.addr.proto == htons(ETH_P_IP))
2048                 other_query = &brmctx->ip4_other_query;
2049 #if IS_ENABLED(CONFIG_IPV6)
2050         else
2051                 other_query = &brmctx->ip6_other_query;
2052 #endif
2053
2054         if (brmctx->multicast_querier &&
2055             other_query && !timer_pending(&other_query->timer)) {
2056                 lmi = now + brmctx->multicast_last_member_interval;
2057                 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2058                 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2059                                           &pg->key.addr, false, 0, NULL);
2060                 if (!timer_pending(&pg->rexmit_timer) ||
2061                     time_after(pg->rexmit_timer.expires, lmi))
2062                         mod_timer(&pg->rexmit_timer, lmi);
2063         }
2064
2065         if (pg->filter_mode == MCAST_EXCLUDE &&
2066             (!timer_pending(&pg->timer) ||
2067              time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
2068                 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2069 }
2070
2071 /* State          Msg type      New state                Actions
2072  * INCLUDE (A)    IS_IN (B)     INCLUDE (A+B)            (B)=GMI
2073  * INCLUDE (A)    ALLOW (B)     INCLUDE (A+B)            (B)=GMI
2074  * EXCLUDE (X,Y)  ALLOW (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
2075  */
2076 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
2077                                      struct net_bridge_port_group *pg, void *h_addr,
2078                                      void *srcs, u32 nsrcs, size_t addr_size,
2079                                      int grec_type)
2080 {
2081         struct net_bridge_group_src *ent;
2082         unsigned long now = jiffies;
2083         bool changed = false;
2084         struct br_ip src_ip;
2085         u32 src_idx;
2086
2087         memset(&src_ip, 0, sizeof(src_ip));
2088         src_ip.proto = pg->key.addr.proto;
2089         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2090                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2091                 ent = br_multicast_find_group_src(pg, &src_ip);
2092                 if (!ent) {
2093                         ent = br_multicast_new_group_src(pg, &src_ip);
2094                         if (ent)
2095                                 changed = true;
2096                 }
2097
2098                 if (ent)
2099                         __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2100         }
2101
2102         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2103                                     grec_type))
2104                 changed = true;
2105
2106         return changed;
2107 }
2108
2109 /* State          Msg type      New state                Actions
2110  * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2111  *                                                       Delete (A-B)
2112  *                                                       Group Timer=GMI
2113  */
2114 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
2115                                  struct net_bridge_port_group *pg, void *h_addr,
2116                                  void *srcs, u32 nsrcs, size_t addr_size,
2117                                  int grec_type)
2118 {
2119         struct net_bridge_group_src *ent;
2120         struct br_ip src_ip;
2121         u32 src_idx;
2122
2123         hlist_for_each_entry(ent, &pg->src_list, node)
2124                 ent->flags |= BR_SGRP_F_DELETE;
2125
2126         memset(&src_ip, 0, sizeof(src_ip));
2127         src_ip.proto = pg->key.addr.proto;
2128         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2129                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2130                 ent = br_multicast_find_group_src(pg, &src_ip);
2131                 if (ent)
2132                         ent->flags &= ~BR_SGRP_F_DELETE;
2133                 else
2134                         ent = br_multicast_new_group_src(pg, &src_ip);
2135                 if (ent)
2136                         br_multicast_fwd_src_handle(ent);
2137         }
2138
2139         br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2140                                 grec_type);
2141
2142         __grp_src_delete_marked(pg);
2143 }
2144
2145 /* State          Msg type      New state                Actions
2146  * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=GMI
2147  *                                                       Delete (X-A)
2148  *                                                       Delete (Y-A)
2149  *                                                       Group Timer=GMI
2150  */
2151 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2152                                  struct net_bridge_port_group *pg, void *h_addr,
2153                                  void *srcs, u32 nsrcs, size_t addr_size,
2154                                  int grec_type)
2155 {
2156         struct net_bridge_group_src *ent;
2157         unsigned long now = jiffies;
2158         bool changed = false;
2159         struct br_ip src_ip;
2160         u32 src_idx;
2161
2162         hlist_for_each_entry(ent, &pg->src_list, node)
2163                 ent->flags |= BR_SGRP_F_DELETE;
2164
2165         memset(&src_ip, 0, sizeof(src_ip));
2166         src_ip.proto = pg->key.addr.proto;
2167         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2168                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2169                 ent = br_multicast_find_group_src(pg, &src_ip);
2170                 if (ent) {
2171                         ent->flags &= ~BR_SGRP_F_DELETE;
2172                 } else {
2173                         ent = br_multicast_new_group_src(pg, &src_ip);
2174                         if (ent) {
2175                                 __grp_src_mod_timer(ent,
2176                                                     now + br_multicast_gmi(brmctx));
2177                                 changed = true;
2178                         }
2179                 }
2180         }
2181
2182         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2183                                     grec_type))
2184                 changed = true;
2185
2186         if (__grp_src_delete_marked(pg))
2187                 changed = true;
2188
2189         return changed;
2190 }
2191
2192 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2193                                struct net_bridge_port_group *pg, void *h_addr,
2194                                void *srcs, u32 nsrcs, size_t addr_size,
2195                                int grec_type)
2196 {
2197         bool changed = false;
2198
2199         switch (pg->filter_mode) {
2200         case MCAST_INCLUDE:
2201                 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2202                                      grec_type);
2203                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2204                 changed = true;
2205                 break;
2206         case MCAST_EXCLUDE:
2207                 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2208                                                addr_size, grec_type);
2209                 break;
2210         }
2211
2212         pg->filter_mode = MCAST_EXCLUDE;
2213         mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2214
2215         return changed;
2216 }
2217
2218 /* State          Msg type      New state                Actions
2219  * INCLUDE (A)    TO_IN (B)     INCLUDE (A+B)            (B)=GMI
2220  *                                                       Send Q(G,A-B)
2221  */
2222 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2223                                 struct net_bridge_mcast_port *pmctx,
2224                                 struct net_bridge_port_group *pg, void *h_addr,
2225                                 void *srcs, u32 nsrcs, size_t addr_size,
2226                                 int grec_type)
2227 {
2228         u32 src_idx, to_send = pg->src_ents;
2229         struct net_bridge_group_src *ent;
2230         unsigned long now = jiffies;
2231         bool changed = false;
2232         struct br_ip src_ip;
2233
2234         hlist_for_each_entry(ent, &pg->src_list, node)
2235                 ent->flags |= BR_SGRP_F_SEND;
2236
2237         memset(&src_ip, 0, sizeof(src_ip));
2238         src_ip.proto = pg->key.addr.proto;
2239         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2240                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2241                 ent = br_multicast_find_group_src(pg, &src_ip);
2242                 if (ent) {
2243                         ent->flags &= ~BR_SGRP_F_SEND;
2244                         to_send--;
2245                 } else {
2246                         ent = br_multicast_new_group_src(pg, &src_ip);
2247                         if (ent)
2248                                 changed = true;
2249                 }
2250                 if (ent)
2251                         __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2252         }
2253
2254         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2255                                     grec_type))
2256                 changed = true;
2257
2258         if (to_send)
2259                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2260
2261         return changed;
2262 }
2263
2264 /* State          Msg type      New state                Actions
2265  * EXCLUDE (X,Y)  TO_IN (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
2266  *                                                       Send Q(G,X-A)
2267  *                                                       Send Q(G)
2268  */
2269 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2270                                 struct net_bridge_mcast_port *pmctx,
2271                                 struct net_bridge_port_group *pg, void *h_addr,
2272                                 void *srcs, u32 nsrcs, size_t addr_size,
2273                                 int grec_type)
2274 {
2275         u32 src_idx, to_send = pg->src_ents;
2276         struct net_bridge_group_src *ent;
2277         unsigned long now = jiffies;
2278         bool changed = false;
2279         struct br_ip src_ip;
2280
2281         hlist_for_each_entry(ent, &pg->src_list, node)
2282                 if (timer_pending(&ent->timer))
2283                         ent->flags |= BR_SGRP_F_SEND;
2284
2285         memset(&src_ip, 0, sizeof(src_ip));
2286         src_ip.proto = pg->key.addr.proto;
2287         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2288                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2289                 ent = br_multicast_find_group_src(pg, &src_ip);
2290                 if (ent) {
2291                         if (timer_pending(&ent->timer)) {
2292                                 ent->flags &= ~BR_SGRP_F_SEND;
2293                                 to_send--;
2294                         }
2295                 } else {
2296                         ent = br_multicast_new_group_src(pg, &src_ip);
2297                         if (ent)
2298                                 changed = true;
2299                 }
2300                 if (ent)
2301                         __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2302         }
2303
2304         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2305                                     grec_type))
2306                 changed = true;
2307
2308         if (to_send)
2309                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2310
2311         __grp_send_query_and_rexmit(brmctx, pmctx, pg);
2312
2313         return changed;
2314 }
2315
2316 static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2317                               struct net_bridge_mcast_port *pmctx,
2318                               struct net_bridge_port_group *pg, void *h_addr,
2319                               void *srcs, u32 nsrcs, size_t addr_size,
2320                               int grec_type)
2321 {
2322         bool changed = false;
2323
2324         switch (pg->filter_mode) {
2325         case MCAST_INCLUDE:
2326                 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2327                                               nsrcs, addr_size, grec_type);
2328                 break;
2329         case MCAST_EXCLUDE:
2330                 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2331                                               nsrcs, addr_size, grec_type);
2332                 break;
2333         }
2334
2335         if (br_multicast_eht_should_del_pg(pg)) {
2336                 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2337                 br_multicast_find_del_pg(pg->key.port->br, pg);
2338                 /* a notification has already been sent and we shouldn't
2339                  * access pg after the delete so we have to return false
2340                  */
2341                 changed = false;
2342         }
2343
2344         return changed;
2345 }
2346
2347 /* State          Msg type      New state                Actions
2348  * INCLUDE (A)    TO_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2349  *                                                       Delete (A-B)
2350  *                                                       Send Q(G,A*B)
2351  *                                                       Group Timer=GMI
2352  */
2353 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2354                                 struct net_bridge_mcast_port *pmctx,
2355                                 struct net_bridge_port_group *pg, void *h_addr,
2356                                 void *srcs, u32 nsrcs, size_t addr_size,
2357                                 int grec_type)
2358 {
2359         struct net_bridge_group_src *ent;
2360         u32 src_idx, to_send = 0;
2361         struct br_ip src_ip;
2362
2363         hlist_for_each_entry(ent, &pg->src_list, node)
2364                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2365
2366         memset(&src_ip, 0, sizeof(src_ip));
2367         src_ip.proto = pg->key.addr.proto;
2368         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2369                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2370                 ent = br_multicast_find_group_src(pg, &src_ip);
2371                 if (ent) {
2372                         ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2373                                      BR_SGRP_F_SEND;
2374                         to_send++;
2375                 } else {
2376                         ent = br_multicast_new_group_src(pg, &src_ip);
2377                 }
2378                 if (ent)
2379                         br_multicast_fwd_src_handle(ent);
2380         }
2381
2382         br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2383                                 grec_type);
2384
2385         __grp_src_delete_marked(pg);
2386         if (to_send)
2387                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2388 }
2389
2390 /* State          Msg type      New state                Actions
2391  * EXCLUDE (X,Y)  TO_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=Group Timer
2392  *                                                       Delete (X-A)
2393  *                                                       Delete (Y-A)
2394  *                                                       Send Q(G,A-Y)
2395  *                                                       Group Timer=GMI
2396  */
2397 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2398                                 struct net_bridge_mcast_port *pmctx,
2399                                 struct net_bridge_port_group *pg, void *h_addr,
2400                                 void *srcs, u32 nsrcs, size_t addr_size,
2401                                 int grec_type)
2402 {
2403         struct net_bridge_group_src *ent;
2404         u32 src_idx, to_send = 0;
2405         bool changed = false;
2406         struct br_ip src_ip;
2407
2408         hlist_for_each_entry(ent, &pg->src_list, node)
2409                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2410
2411         memset(&src_ip, 0, sizeof(src_ip));
2412         src_ip.proto = pg->key.addr.proto;
2413         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2414                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2415                 ent = br_multicast_find_group_src(pg, &src_ip);
2416                 if (ent) {
2417                         ent->flags &= ~BR_SGRP_F_DELETE;
2418                 } else {
2419                         ent = br_multicast_new_group_src(pg, &src_ip);
2420                         if (ent) {
2421                                 __grp_src_mod_timer(ent, pg->timer.expires);
2422                                 changed = true;
2423                         }
2424                 }
2425                 if (ent && timer_pending(&ent->timer)) {
2426                         ent->flags |= BR_SGRP_F_SEND;
2427                         to_send++;
2428                 }
2429         }
2430
2431         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2432                                     grec_type))
2433                 changed = true;
2434
2435         if (__grp_src_delete_marked(pg))
2436                 changed = true;
2437         if (to_send)
2438                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2439
2440         return changed;
2441 }
2442
2443 static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2444                               struct net_bridge_mcast_port *pmctx,
2445                               struct net_bridge_port_group *pg, void *h_addr,
2446                               void *srcs, u32 nsrcs, size_t addr_size,
2447                               int grec_type)
2448 {
2449         bool changed = false;
2450
2451         switch (pg->filter_mode) {
2452         case MCAST_INCLUDE:
2453                 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2454                                     addr_size, grec_type);
2455                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2456                 changed = true;
2457                 break;
2458         case MCAST_EXCLUDE:
2459                 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2460                                               nsrcs, addr_size, grec_type);
2461                 break;
2462         }
2463
2464         pg->filter_mode = MCAST_EXCLUDE;
2465         mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2466
2467         return changed;
2468 }
2469
2470 /* State          Msg type      New state                Actions
2471  * INCLUDE (A)    BLOCK (B)     INCLUDE (A)              Send Q(G,A*B)
2472  */
2473 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2474                                  struct net_bridge_mcast_port *pmctx,
2475                                  struct net_bridge_port_group *pg, void *h_addr,
2476                                  void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2477 {
2478         struct net_bridge_group_src *ent;
2479         u32 src_idx, to_send = 0;
2480         bool changed = false;
2481         struct br_ip src_ip;
2482
2483         hlist_for_each_entry(ent, &pg->src_list, node)
2484                 ent->flags &= ~BR_SGRP_F_SEND;
2485
2486         memset(&src_ip, 0, sizeof(src_ip));
2487         src_ip.proto = pg->key.addr.proto;
2488         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2489                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2490                 ent = br_multicast_find_group_src(pg, &src_ip);
2491                 if (ent) {
2492                         ent->flags |= BR_SGRP_F_SEND;
2493                         to_send++;
2494                 }
2495         }
2496
2497         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2498                                     grec_type))
2499                 changed = true;
2500
2501         if (to_send)
2502                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2503
2504         return changed;
2505 }
2506
2507 /* State          Msg type      New state                Actions
2508  * EXCLUDE (X,Y)  BLOCK (A)     EXCLUDE (X+(A-Y),Y)      (A-X-Y)=Group Timer
2509  *                                                       Send Q(G,A-Y)
2510  */
2511 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2512                                  struct net_bridge_mcast_port *pmctx,
2513                                  struct net_bridge_port_group *pg, void *h_addr,
2514                                  void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2515 {
2516         struct net_bridge_group_src *ent;
2517         u32 src_idx, to_send = 0;
2518         bool changed = false;
2519         struct br_ip src_ip;
2520
2521         hlist_for_each_entry(ent, &pg->src_list, node)
2522                 ent->flags &= ~BR_SGRP_F_SEND;
2523
2524         memset(&src_ip, 0, sizeof(src_ip));
2525         src_ip.proto = pg->key.addr.proto;
2526         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2527                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2528                 ent = br_multicast_find_group_src(pg, &src_ip);
2529                 if (!ent) {
2530                         ent = br_multicast_new_group_src(pg, &src_ip);
2531                         if (ent) {
2532                                 __grp_src_mod_timer(ent, pg->timer.expires);
2533                                 changed = true;
2534                         }
2535                 }
2536                 if (ent && timer_pending(&ent->timer)) {
2537                         ent->flags |= BR_SGRP_F_SEND;
2538                         to_send++;
2539                 }
2540         }
2541
2542         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2543                                     grec_type))
2544                 changed = true;
2545
2546         if (to_send)
2547                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2548
2549         return changed;
2550 }
2551
2552 static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2553                                struct net_bridge_mcast_port *pmctx,
2554                                struct net_bridge_port_group *pg, void *h_addr,
2555                                void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2556 {
2557         bool changed = false;
2558
2559         switch (pg->filter_mode) {
2560         case MCAST_INCLUDE:
2561                 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2562                                                nsrcs, addr_size, grec_type);
2563                 break;
2564         case MCAST_EXCLUDE:
2565                 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2566                                                nsrcs, addr_size, grec_type);
2567                 break;
2568         }
2569
2570         if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2571             br_multicast_eht_should_del_pg(pg)) {
2572                 if (br_multicast_eht_should_del_pg(pg))
2573                         pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2574                 br_multicast_find_del_pg(pg->key.port->br, pg);
2575                 /* a notification has already been sent and we shouldn't
2576                  * access pg after the delete so we have to return false
2577                  */
2578                 changed = false;
2579         }
2580
2581         return changed;
2582 }
2583
2584 static struct net_bridge_port_group *
2585 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2586                        struct net_bridge_port *p,
2587                        const unsigned char *src)
2588 {
2589         struct net_bridge *br __maybe_unused = mp->br;
2590         struct net_bridge_port_group *pg;
2591
2592         for (pg = mlock_dereference(mp->ports, br);
2593              pg;
2594              pg = mlock_dereference(pg->next, br))
2595                 if (br_port_group_equal(pg, p, src))
2596                         return pg;
2597
2598         return NULL;
2599 }
2600
2601 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2602                                          struct net_bridge_mcast_port *pmctx,
2603                                          struct sk_buff *skb,
2604                                          u16 vid)
2605 {
2606         bool igmpv2 = brmctx->multicast_igmp_version == 2;
2607         struct net_bridge_mdb_entry *mdst;
2608         struct net_bridge_port_group *pg;
2609         const unsigned char *src;
2610         struct igmpv3_report *ih;
2611         struct igmpv3_grec *grec;
2612         int i, len, num, type;
2613         __be32 group, *h_addr;
2614         bool changed = false;
2615         int err = 0;
2616         u16 nsrcs;
2617
2618         ih = igmpv3_report_hdr(skb);
2619         num = ntohs(ih->ngrec);
2620         len = skb_transport_offset(skb) + sizeof(*ih);
2621
2622         for (i = 0; i < num; i++) {
2623                 len += sizeof(*grec);
2624                 if (!ip_mc_may_pull(skb, len))
2625                         return -EINVAL;
2626
2627                 grec = (void *)(skb->data + len - sizeof(*grec));
2628                 group = grec->grec_mca;
2629                 type = grec->grec_type;
2630                 nsrcs = ntohs(grec->grec_nsrcs);
2631
2632                 len += nsrcs * 4;
2633                 if (!ip_mc_may_pull(skb, len))
2634                         return -EINVAL;
2635
2636                 switch (type) {
2637                 case IGMPV3_MODE_IS_INCLUDE:
2638                 case IGMPV3_MODE_IS_EXCLUDE:
2639                 case IGMPV3_CHANGE_TO_INCLUDE:
2640                 case IGMPV3_CHANGE_TO_EXCLUDE:
2641                 case IGMPV3_ALLOW_NEW_SOURCES:
2642                 case IGMPV3_BLOCK_OLD_SOURCES:
2643                         break;
2644
2645                 default:
2646                         continue;
2647                 }
2648
2649                 src = eth_hdr(skb)->h_source;
2650                 if (nsrcs == 0 &&
2651                     (type == IGMPV3_CHANGE_TO_INCLUDE ||
2652                      type == IGMPV3_MODE_IS_INCLUDE)) {
2653                         if (!pmctx || igmpv2) {
2654                                 br_ip4_multicast_leave_group(brmctx, pmctx,
2655                                                              group, vid, src);
2656                                 continue;
2657                         }
2658                 } else {
2659                         err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2660                                                          vid, src, igmpv2);
2661                         if (err)
2662                                 break;
2663                 }
2664
2665                 if (!pmctx || igmpv2)
2666                         continue;
2667
2668                 spin_lock_bh(&brmctx->br->multicast_lock);
2669                 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2670                         goto unlock_continue;
2671
2672                 mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2673                 if (!mdst)
2674                         goto unlock_continue;
2675                 pg = br_multicast_find_port(mdst, pmctx->port, src);
2676                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2677                         goto unlock_continue;
2678                 /* reload grec and host addr */
2679                 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2680                 h_addr = &ip_hdr(skb)->saddr;
2681                 switch (type) {
2682                 case IGMPV3_ALLOW_NEW_SOURCES:
2683                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2684                                                            grec->grec_src,
2685                                                            nsrcs, sizeof(__be32), type);
2686                         break;
2687                 case IGMPV3_MODE_IS_INCLUDE:
2688                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2689                                                            grec->grec_src,
2690                                                            nsrcs, sizeof(__be32), type);
2691                         break;
2692                 case IGMPV3_MODE_IS_EXCLUDE:
2693                         changed = br_multicast_isexc(brmctx, pg, h_addr,
2694                                                      grec->grec_src,
2695                                                      nsrcs, sizeof(__be32), type);
2696                         break;
2697                 case IGMPV3_CHANGE_TO_INCLUDE:
2698                         changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2699                                                     grec->grec_src,
2700                                                     nsrcs, sizeof(__be32), type);
2701                         break;
2702                 case IGMPV3_CHANGE_TO_EXCLUDE:
2703                         changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2704                                                     grec->grec_src,
2705                                                     nsrcs, sizeof(__be32), type);
2706                         break;
2707                 case IGMPV3_BLOCK_OLD_SOURCES:
2708                         changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2709                                                      grec->grec_src,
2710                                                      nsrcs, sizeof(__be32), type);
2711                         break;
2712                 }
2713                 if (changed)
2714                         br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2715 unlock_continue:
2716                 spin_unlock_bh(&brmctx->br->multicast_lock);
2717         }
2718
2719         return err;
2720 }
2721
2722 #if IS_ENABLED(CONFIG_IPV6)
2723 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2724                                         struct net_bridge_mcast_port *pmctx,
2725                                         struct sk_buff *skb,
2726                                         u16 vid)
2727 {
2728         bool mldv1 = brmctx->multicast_mld_version == 1;
2729         struct net_bridge_mdb_entry *mdst;
2730         struct net_bridge_port_group *pg;
2731         unsigned int nsrcs_offset;
2732         struct mld2_report *mld2r;
2733         const unsigned char *src;
2734         struct in6_addr *h_addr;
2735         struct mld2_grec *grec;
2736         unsigned int grec_len;
2737         bool changed = false;
2738         int i, len, num;
2739         int err = 0;
2740
2741         if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
2742                 return -EINVAL;
2743
2744         mld2r = (struct mld2_report *)icmp6_hdr(skb);
2745         num = ntohs(mld2r->mld2r_ngrec);
2746         len = skb_transport_offset(skb) + sizeof(*mld2r);
2747
2748         for (i = 0; i < num; i++) {
2749                 __be16 *_nsrcs, __nsrcs;
2750                 u16 nsrcs;
2751
2752                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2753
2754                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2755                     nsrcs_offset + sizeof(__nsrcs))
2756                         return -EINVAL;
2757
2758                 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
2759                                             sizeof(__nsrcs), &__nsrcs);
2760                 if (!_nsrcs)
2761                         return -EINVAL;
2762
2763                 nsrcs = ntohs(*_nsrcs);
2764                 grec_len = struct_size(grec, grec_src, nsrcs);
2765
2766                 if (!ipv6_mc_may_pull(skb, len + grec_len))
2767                         return -EINVAL;
2768
2769                 grec = (struct mld2_grec *)(skb->data + len);
2770                 len += grec_len;
2771
2772                 switch (grec->grec_type) {
2773                 case MLD2_MODE_IS_INCLUDE:
2774                 case MLD2_MODE_IS_EXCLUDE:
2775                 case MLD2_CHANGE_TO_INCLUDE:
2776                 case MLD2_CHANGE_TO_EXCLUDE:
2777                 case MLD2_ALLOW_NEW_SOURCES:
2778                 case MLD2_BLOCK_OLD_SOURCES:
2779                         break;
2780
2781                 default:
2782                         continue;
2783                 }
2784
2785                 src = eth_hdr(skb)->h_source;
2786                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2787                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2788                     nsrcs == 0) {
2789                         if (!pmctx || mldv1) {
2790                                 br_ip6_multicast_leave_group(brmctx, pmctx,
2791                                                              &grec->grec_mca,
2792                                                              vid, src);
2793                                 continue;
2794                         }
2795                 } else {
2796                         err = br_ip6_multicast_add_group(brmctx, pmctx,
2797                                                          &grec->grec_mca, vid,
2798                                                          src, mldv1);
2799                         if (err)
2800                                 break;
2801                 }
2802
2803                 if (!pmctx || mldv1)
2804                         continue;
2805
2806                 spin_lock_bh(&brmctx->br->multicast_lock);
2807                 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2808                         goto unlock_continue;
2809
2810                 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
2811                 if (!mdst)
2812                         goto unlock_continue;
2813                 pg = br_multicast_find_port(mdst, pmctx->port, src);
2814                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2815                         goto unlock_continue;
2816                 h_addr = &ipv6_hdr(skb)->saddr;
2817                 switch (grec->grec_type) {
2818                 case MLD2_ALLOW_NEW_SOURCES:
2819                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2820                                                            grec->grec_src, nsrcs,
2821                                                            sizeof(struct in6_addr),
2822                                                            grec->grec_type);
2823                         break;
2824                 case MLD2_MODE_IS_INCLUDE:
2825                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2826                                                            grec->grec_src, nsrcs,
2827                                                            sizeof(struct in6_addr),
2828                                                            grec->grec_type);
2829                         break;
2830                 case MLD2_MODE_IS_EXCLUDE:
2831                         changed = br_multicast_isexc(brmctx, pg, h_addr,
2832                                                      grec->grec_src, nsrcs,
2833                                                      sizeof(struct in6_addr),
2834                                                      grec->grec_type);
2835                         break;
2836                 case MLD2_CHANGE_TO_INCLUDE:
2837                         changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2838                                                     grec->grec_src, nsrcs,
2839                                                     sizeof(struct in6_addr),
2840                                                     grec->grec_type);
2841                         break;
2842                 case MLD2_CHANGE_TO_EXCLUDE:
2843                         changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2844                                                     grec->grec_src, nsrcs,
2845                                                     sizeof(struct in6_addr),
2846                                                     grec->grec_type);
2847                         break;
2848                 case MLD2_BLOCK_OLD_SOURCES:
2849                         changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2850                                                      grec->grec_src, nsrcs,
2851                                                      sizeof(struct in6_addr),
2852                                                      grec->grec_type);
2853                         break;
2854                 }
2855                 if (changed)
2856                         br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2857 unlock_continue:
2858                 spin_unlock_bh(&brmctx->br->multicast_lock);
2859         }
2860
2861         return err;
2862 }
2863 #endif
2864
2865 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
2866                                         struct net_bridge_mcast_port *pmctx,
2867                                         struct br_ip *saddr)
2868 {
2869         int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
2870         struct timer_list *own_timer, *other_timer;
2871         struct bridge_mcast_querier *querier;
2872
2873         switch (saddr->proto) {
2874         case htons(ETH_P_IP):
2875                 querier = &brmctx->ip4_querier;
2876                 own_timer = &brmctx->ip4_own_query.timer;
2877                 other_timer = &brmctx->ip4_other_query.timer;
2878                 if (!querier->addr.src.ip4 ||
2879                     ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
2880                         goto update;
2881                 break;
2882 #if IS_ENABLED(CONFIG_IPV6)
2883         case htons(ETH_P_IPV6):
2884                 querier = &brmctx->ip6_querier;
2885                 own_timer = &brmctx->ip6_own_query.timer;
2886                 other_timer = &brmctx->ip6_other_query.timer;
2887                 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
2888                         goto update;
2889                 break;
2890 #endif
2891         default:
2892                 return false;
2893         }
2894
2895         if (!timer_pending(own_timer) && !timer_pending(other_timer))
2896                 goto update;
2897
2898         return false;
2899
2900 update:
2901         br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
2902
2903         return true;
2904 }
2905
2906 static struct net_bridge_port *
2907 __br_multicast_get_querier_port(struct net_bridge *br,
2908                                 const struct bridge_mcast_querier *querier)
2909 {
2910         int port_ifidx = READ_ONCE(querier->port_ifidx);
2911         struct net_bridge_port *p;
2912         struct net_device *dev;
2913
2914         if (port_ifidx == 0)
2915                 return NULL;
2916
2917         dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
2918         if (!dev)
2919                 return NULL;
2920         p = br_port_get_rtnl_rcu(dev);
2921         if (!p || p->br != br)
2922                 return NULL;
2923
2924         return p;
2925 }
2926
2927 size_t br_multicast_querier_state_size(void)
2928 {
2929         return nla_total_size(0) +              /* nest attribute */
2930                nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
2931                nla_total_size(sizeof(int)) +    /* BRIDGE_QUERIER_IP_PORT */
2932                nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
2933 #if IS_ENABLED(CONFIG_IPV6)
2934                nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
2935                nla_total_size(sizeof(int)) +             /* BRIDGE_QUERIER_IPV6_PORT */
2936                nla_total_size_64bit(sizeof(u64)) +       /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
2937 #endif
2938                0;
2939 }
2940
2941 /* protected by rtnl or rcu */
2942 int br_multicast_dump_querier_state(struct sk_buff *skb,
2943                                     const struct net_bridge_mcast *brmctx,
2944                                     int nest_attr)
2945 {
2946         struct bridge_mcast_querier querier = {};
2947         struct net_bridge_port *p;
2948         struct nlattr *nest;
2949
2950         if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
2951             br_multicast_ctx_vlan_global_disabled(brmctx))
2952                 return 0;
2953
2954         nest = nla_nest_start(skb, nest_attr);
2955         if (!nest)
2956                 return -EMSGSIZE;
2957
2958         rcu_read_lock();
2959         if (!brmctx->multicast_querier &&
2960             !timer_pending(&brmctx->ip4_other_query.timer))
2961                 goto out_v6;
2962
2963         br_multicast_read_querier(&brmctx->ip4_querier, &querier);
2964         if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
2965                             querier.addr.src.ip4)) {
2966                 rcu_read_unlock();
2967                 goto out_err;
2968         }
2969
2970         p = __br_multicast_get_querier_port(brmctx->br, &querier);
2971         if (timer_pending(&brmctx->ip4_other_query.timer) &&
2972             (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
2973                                br_timer_value(&brmctx->ip4_other_query.timer),
2974                                BRIDGE_QUERIER_PAD) ||
2975              (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
2976                 rcu_read_unlock();
2977                 goto out_err;
2978         }
2979
2980 out_v6:
2981 #if IS_ENABLED(CONFIG_IPV6)
2982         if (!brmctx->multicast_querier &&
2983             !timer_pending(&brmctx->ip6_other_query.timer))
2984                 goto out;
2985
2986         br_multicast_read_querier(&brmctx->ip6_querier, &querier);
2987         if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
2988                              &querier.addr.src.ip6)) {
2989                 rcu_read_unlock();
2990                 goto out_err;
2991         }
2992
2993         p = __br_multicast_get_querier_port(brmctx->br, &querier);
2994         if (timer_pending(&brmctx->ip6_other_query.timer) &&
2995             (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
2996                                br_timer_value(&brmctx->ip6_other_query.timer),
2997                                BRIDGE_QUERIER_PAD) ||
2998              (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
2999                                p->dev->ifindex)))) {
3000                 rcu_read_unlock();
3001                 goto out_err;
3002         }
3003 out:
3004 #endif
3005         rcu_read_unlock();
3006         nla_nest_end(skb, nest);
3007         if (!nla_len(nest))
3008                 nla_nest_cancel(skb, nest);
3009
3010         return 0;
3011
3012 out_err:
3013         nla_nest_cancel(skb, nest);
3014         return -EMSGSIZE;
3015 }
3016
3017 static void
3018 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
3019                                 struct bridge_mcast_other_query *query,
3020                                 unsigned long max_delay)
3021 {
3022         if (!timer_pending(&query->timer))
3023                 query->delay_time = jiffies + max_delay;
3024
3025         mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
3026 }
3027
3028 static void br_port_mc_router_state_change(struct net_bridge_port *p,
3029                                            bool is_mc_router)
3030 {
3031         struct switchdev_attr attr = {
3032                 .orig_dev = p->dev,
3033                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
3034                 .flags = SWITCHDEV_F_DEFER,
3035                 .u.mrouter = is_mc_router,
3036         };
3037
3038         switchdev_port_attr_set(p->dev, &attr, NULL);
3039 }
3040
3041 static struct net_bridge_port *
3042 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
3043                              struct hlist_head *mc_router_list,
3044                              struct hlist_node *rlist)
3045 {
3046         struct net_bridge_mcast_port *pmctx;
3047
3048 #if IS_ENABLED(CONFIG_IPV6)
3049         if (mc_router_list == &brmctx->ip6_mc_router_list)
3050                 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3051                                     ip6_rlist);
3052         else
3053 #endif
3054                 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3055                                     ip4_rlist);
3056
3057         return pmctx->port;
3058 }
3059
3060 static struct hlist_node *
3061 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
3062                             struct net_bridge_port *port,
3063                             struct hlist_head *mc_router_list)
3064
3065 {
3066         struct hlist_node *slot = NULL;
3067         struct net_bridge_port *p;
3068         struct hlist_node *rlist;
3069
3070         hlist_for_each(rlist, mc_router_list) {
3071                 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
3072
3073                 if ((unsigned long)port >= (unsigned long)p)
3074                         break;
3075
3076                 slot = rlist;
3077         }
3078
3079         return slot;
3080 }
3081
3082 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
3083                                            struct hlist_node *rnode)
3084 {
3085 #if IS_ENABLED(CONFIG_IPV6)
3086         if (rnode != &pmctx->ip6_rlist)
3087                 return hlist_unhashed(&pmctx->ip6_rlist);
3088         else
3089                 return hlist_unhashed(&pmctx->ip4_rlist);
3090 #else
3091         return true;
3092 #endif
3093 }
3094
3095 /* Add port to router_list
3096  *  list is maintained ordered by pointer value
3097  *  and locked by br->multicast_lock and RCU
3098  */
3099 static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
3100                                     struct net_bridge_mcast_port *pmctx,
3101                                     struct hlist_node *rlist,
3102                                     struct hlist_head *mc_router_list)
3103 {
3104         struct hlist_node *slot;
3105
3106         if (!hlist_unhashed(rlist))
3107                 return;
3108
3109         slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
3110
3111         if (slot)
3112                 hlist_add_behind_rcu(rlist, slot);
3113         else
3114                 hlist_add_head_rcu(rlist, mc_router_list);
3115
3116         /* For backwards compatibility for now, only notify if we
3117          * switched from no IPv4/IPv6 multicast router to a new
3118          * IPv4 or IPv6 multicast router.
3119          */
3120         if (br_multicast_no_router_otherpf(pmctx, rlist)) {
3121                 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
3122                 br_port_mc_router_state_change(pmctx->port, true);
3123         }
3124 }
3125
3126 /* Add port to router_list
3127  *  list is maintained ordered by pointer value
3128  *  and locked by br->multicast_lock and RCU
3129  */
3130 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
3131                                         struct net_bridge_mcast_port *pmctx)
3132 {
3133         br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
3134                                 &brmctx->ip4_mc_router_list);
3135 }
3136
3137 /* Add port to router_list
3138  *  list is maintained ordered by pointer value
3139  *  and locked by br->multicast_lock and RCU
3140  */
3141 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
3142                                         struct net_bridge_mcast_port *pmctx)
3143 {
3144 #if IS_ENABLED(CONFIG_IPV6)
3145         br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
3146                                 &brmctx->ip6_mc_router_list);
3147 #endif
3148 }
3149
3150 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
3151                                      struct net_bridge_mcast_port *pmctx,
3152                                      struct timer_list *timer,
3153                                      struct hlist_node *rlist,
3154                                      struct hlist_head *mc_router_list)
3155 {
3156         unsigned long now = jiffies;
3157
3158         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3159                 return;
3160
3161         if (!pmctx) {
3162                 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
3163                         if (!br_ip4_multicast_is_router(brmctx) &&
3164                             !br_ip6_multicast_is_router(brmctx))
3165                                 br_mc_router_state_change(brmctx->br, true);
3166                         mod_timer(timer, now + brmctx->multicast_querier_interval);
3167                 }
3168                 return;
3169         }
3170
3171         if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
3172             pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3173                 return;
3174
3175         br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3176         mod_timer(timer, now + brmctx->multicast_querier_interval);
3177 }
3178
3179 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
3180                                          struct net_bridge_mcast_port *pmctx)
3181 {
3182         struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3183         struct hlist_node *rlist = NULL;
3184
3185         if (pmctx) {
3186                 timer = &pmctx->ip4_mc_router_timer;
3187                 rlist = &pmctx->ip4_rlist;
3188         }
3189
3190         br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3191                                  &brmctx->ip4_mc_router_list);
3192 }
3193
3194 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
3195                                          struct net_bridge_mcast_port *pmctx)
3196 {
3197 #if IS_ENABLED(CONFIG_IPV6)
3198         struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3199         struct hlist_node *rlist = NULL;
3200
3201         if (pmctx) {
3202                 timer = &pmctx->ip6_mc_router_timer;
3203                 rlist = &pmctx->ip6_rlist;
3204         }
3205
3206         br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3207                                  &brmctx->ip6_mc_router_list);
3208 #endif
3209 }
3210
3211 static void
3212 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
3213                                 struct net_bridge_mcast_port *pmctx,
3214                                 struct bridge_mcast_other_query *query,
3215                                 struct br_ip *saddr,
3216                                 unsigned long max_delay)
3217 {
3218         if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3219                 return;
3220
3221         br_multicast_update_query_timer(brmctx, query, max_delay);
3222         br_ip4_multicast_mark_router(brmctx, pmctx);
3223 }
3224
3225 #if IS_ENABLED(CONFIG_IPV6)
3226 static void
3227 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
3228                                 struct net_bridge_mcast_port *pmctx,
3229                                 struct bridge_mcast_other_query *query,
3230                                 struct br_ip *saddr,
3231                                 unsigned long max_delay)
3232 {
3233         if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3234                 return;
3235
3236         br_multicast_update_query_timer(brmctx, query, max_delay);
3237         br_ip6_multicast_mark_router(brmctx, pmctx);
3238 }
3239 #endif
3240
3241 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
3242                                    struct net_bridge_mcast_port *pmctx,
3243                                    struct sk_buff *skb,
3244                                    u16 vid)
3245 {
3246         unsigned int transport_len = ip_transport_len(skb);
3247         const struct iphdr *iph = ip_hdr(skb);
3248         struct igmphdr *ih = igmp_hdr(skb);
3249         struct net_bridge_mdb_entry *mp;
3250         struct igmpv3_query *ih3;
3251         struct net_bridge_port_group *p;
3252         struct net_bridge_port_group __rcu **pp;
3253         struct br_ip saddr = {};
3254         unsigned long max_delay;
3255         unsigned long now = jiffies;
3256         __be32 group;
3257
3258         spin_lock(&brmctx->br->multicast_lock);
3259         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3260                 goto out;
3261
3262         group = ih->group;
3263
3264         if (transport_len == sizeof(*ih)) {
3265                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3266
3267                 if (!max_delay) {
3268                         max_delay = 10 * HZ;
3269                         group = 0;
3270                 }
3271         } else if (transport_len >= sizeof(*ih3)) {
3272                 ih3 = igmpv3_query_hdr(skb);
3273                 if (ih3->nsrcs ||
3274                     (brmctx->multicast_igmp_version == 3 && group &&
3275                      ih3->suppress))
3276                         goto out;
3277
3278                 max_delay = ih3->code ?
3279                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3280         } else {
3281                 goto out;
3282         }
3283
3284         if (!group) {
3285                 saddr.proto = htons(ETH_P_IP);
3286                 saddr.src.ip4 = iph->saddr;
3287
3288                 br_ip4_multicast_query_received(brmctx, pmctx,
3289                                                 &brmctx->ip4_other_query,
3290                                                 &saddr, max_delay);
3291                 goto out;
3292         }
3293
3294         mp = br_mdb_ip4_get(brmctx->br, group, vid);
3295         if (!mp)
3296                 goto out;
3297
3298         max_delay *= brmctx->multicast_last_member_count;
3299
3300         if (mp->host_joined &&
3301             (timer_pending(&mp->timer) ?
3302              time_after(mp->timer.expires, now + max_delay) :
3303              try_to_del_timer_sync(&mp->timer) >= 0))
3304                 mod_timer(&mp->timer, now + max_delay);
3305
3306         for (pp = &mp->ports;
3307              (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3308              pp = &p->next) {
3309                 if (timer_pending(&p->timer) ?
3310                     time_after(p->timer.expires, now + max_delay) :
3311                     try_to_del_timer_sync(&p->timer) >= 0 &&
3312                     (brmctx->multicast_igmp_version == 2 ||
3313                      p->filter_mode == MCAST_EXCLUDE))
3314                         mod_timer(&p->timer, now + max_delay);
3315         }
3316
3317 out:
3318         spin_unlock(&brmctx->br->multicast_lock);
3319 }
3320
3321 #if IS_ENABLED(CONFIG_IPV6)
3322 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3323                                   struct net_bridge_mcast_port *pmctx,
3324                                   struct sk_buff *skb,
3325                                   u16 vid)
3326 {
3327         unsigned int transport_len = ipv6_transport_len(skb);
3328         struct mld_msg *mld;
3329         struct net_bridge_mdb_entry *mp;
3330         struct mld2_query *mld2q;
3331         struct net_bridge_port_group *p;
3332         struct net_bridge_port_group __rcu **pp;
3333         struct br_ip saddr = {};
3334         unsigned long max_delay;
3335         unsigned long now = jiffies;
3336         unsigned int offset = skb_transport_offset(skb);
3337         const struct in6_addr *group = NULL;
3338         bool is_general_query;
3339         int err = 0;
3340
3341         spin_lock(&brmctx->br->multicast_lock);
3342         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3343                 goto out;
3344
3345         if (transport_len == sizeof(*mld)) {
3346                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3347                         err = -EINVAL;
3348                         goto out;
3349                 }
3350                 mld = (struct mld_msg *) icmp6_hdr(skb);
3351                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3352                 if (max_delay)
3353                         group = &mld->mld_mca;
3354         } else {
3355                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3356                         err = -EINVAL;
3357                         goto out;
3358                 }
3359                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
3360                 if (!mld2q->mld2q_nsrcs)
3361                         group = &mld2q->mld2q_mca;
3362                 if (brmctx->multicast_mld_version == 2 &&
3363                     !ipv6_addr_any(&mld2q->mld2q_mca) &&
3364                     mld2q->mld2q_suppress)
3365                         goto out;
3366
3367                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3368         }
3369
3370         is_general_query = group && ipv6_addr_any(group);
3371
3372         if (is_general_query) {
3373                 saddr.proto = htons(ETH_P_IPV6);
3374                 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3375
3376                 br_ip6_multicast_query_received(brmctx, pmctx,
3377                                                 &brmctx->ip6_other_query,
3378                                                 &saddr, max_delay);
3379                 goto out;
3380         } else if (!group) {
3381                 goto out;
3382         }
3383
3384         mp = br_mdb_ip6_get(brmctx->br, group, vid);
3385         if (!mp)
3386                 goto out;
3387
3388         max_delay *= brmctx->multicast_last_member_count;
3389         if (mp->host_joined &&
3390             (timer_pending(&mp->timer) ?
3391              time_after(mp->timer.expires, now + max_delay) :
3392              try_to_del_timer_sync(&mp->timer) >= 0))
3393                 mod_timer(&mp->timer, now + max_delay);
3394
3395         for (pp = &mp->ports;
3396              (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3397              pp = &p->next) {
3398                 if (timer_pending(&p->timer) ?
3399                     time_after(p->timer.expires, now + max_delay) :
3400                     try_to_del_timer_sync(&p->timer) >= 0 &&
3401                     (brmctx->multicast_mld_version == 1 ||
3402                      p->filter_mode == MCAST_EXCLUDE))
3403                         mod_timer(&p->timer, now + max_delay);
3404         }
3405
3406 out:
3407         spin_unlock(&brmctx->br->multicast_lock);
3408         return err;
3409 }
3410 #endif
3411
3412 static void
3413 br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3414                          struct net_bridge_mcast_port *pmctx,
3415                          struct br_ip *group,
3416                          struct bridge_mcast_other_query *other_query,
3417                          struct bridge_mcast_own_query *own_query,
3418                          const unsigned char *src)
3419 {
3420         struct net_bridge_mdb_entry *mp;
3421         struct net_bridge_port_group *p;
3422         unsigned long now;
3423         unsigned long time;
3424
3425         spin_lock(&brmctx->br->multicast_lock);
3426         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3427                 goto out;
3428
3429         mp = br_mdb_ip_get(brmctx->br, group);
3430         if (!mp)
3431                 goto out;
3432
3433         if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3434                 struct net_bridge_port_group __rcu **pp;
3435
3436                 for (pp = &mp->ports;
3437                      (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3438                      pp = &p->next) {
3439                         if (!br_port_group_equal(p, pmctx->port, src))
3440                                 continue;
3441
3442                         if (p->flags & MDB_PG_FLAGS_PERMANENT)
3443                                 break;
3444
3445                         p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3446                         br_multicast_del_pg(mp, p, pp);
3447                 }
3448                 goto out;
3449         }
3450
3451         if (timer_pending(&other_query->timer))
3452                 goto out;
3453
3454         if (brmctx->multicast_querier) {
3455                 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3456                                           false, 0, NULL);
3457
3458                 time = jiffies + brmctx->multicast_last_member_count *
3459                                  brmctx->multicast_last_member_interval;
3460
3461                 mod_timer(&own_query->timer, time);
3462
3463                 for (p = mlock_dereference(mp->ports, brmctx->br);
3464                      p != NULL && pmctx != NULL;
3465                      p = mlock_dereference(p->next, brmctx->br)) {
3466                         if (!br_port_group_equal(p, pmctx->port, src))
3467                                 continue;
3468
3469                         if (!hlist_unhashed(&p->mglist) &&
3470                             (timer_pending(&p->timer) ?
3471                              time_after(p->timer.expires, time) :
3472                              try_to_del_timer_sync(&p->timer) >= 0)) {
3473                                 mod_timer(&p->timer, time);
3474                         }
3475
3476                         break;
3477                 }
3478         }
3479
3480         now = jiffies;
3481         time = now + brmctx->multicast_last_member_count *
3482                      brmctx->multicast_last_member_interval;
3483
3484         if (!pmctx) {
3485                 if (mp->host_joined &&
3486                     (timer_pending(&mp->timer) ?
3487                      time_after(mp->timer.expires, time) :
3488                      try_to_del_timer_sync(&mp->timer) >= 0)) {
3489                         mod_timer(&mp->timer, time);
3490                 }
3491
3492                 goto out;
3493         }
3494
3495         for (p = mlock_dereference(mp->ports, brmctx->br);
3496              p != NULL;
3497              p = mlock_dereference(p->next, brmctx->br)) {
3498                 if (p->key.port != pmctx->port)
3499                         continue;
3500
3501                 if (!hlist_unhashed(&p->mglist) &&
3502                     (timer_pending(&p->timer) ?
3503                      time_after(p->timer.expires, time) :
3504                      try_to_del_timer_sync(&p->timer) >= 0)) {
3505                         mod_timer(&p->timer, time);
3506                 }
3507
3508                 break;
3509         }
3510 out:
3511         spin_unlock(&brmctx->br->multicast_lock);
3512 }
3513
3514 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3515                                          struct net_bridge_mcast_port *pmctx,
3516                                          __be32 group,
3517                                          __u16 vid,
3518                                          const unsigned char *src)
3519 {
3520         struct br_ip br_group;
3521         struct bridge_mcast_own_query *own_query;
3522
3523         if (ipv4_is_local_multicast(group))
3524                 return;
3525
3526         own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3527
3528         memset(&br_group, 0, sizeof(br_group));
3529         br_group.dst.ip4 = group;
3530         br_group.proto = htons(ETH_P_IP);
3531         br_group.vid = vid;
3532
3533         br_multicast_leave_group(brmctx, pmctx, &br_group,
3534                                  &brmctx->ip4_other_query,
3535                                  own_query, src);
3536 }
3537
3538 #if IS_ENABLED(CONFIG_IPV6)
3539 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3540                                          struct net_bridge_mcast_port *pmctx,
3541                                          const struct in6_addr *group,
3542                                          __u16 vid,
3543                                          const unsigned char *src)
3544 {
3545         struct br_ip br_group;
3546         struct bridge_mcast_own_query *own_query;
3547
3548         if (ipv6_addr_is_ll_all_nodes(group))
3549                 return;
3550
3551         own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3552
3553         memset(&br_group, 0, sizeof(br_group));
3554         br_group.dst.ip6 = *group;
3555         br_group.proto = htons(ETH_P_IPV6);
3556         br_group.vid = vid;
3557
3558         br_multicast_leave_group(brmctx, pmctx, &br_group,
3559                                  &brmctx->ip6_other_query,
3560                                  own_query, src);
3561 }
3562 #endif
3563
3564 static void br_multicast_err_count(const struct net_bridge *br,
3565                                    const struct net_bridge_port *p,
3566                                    __be16 proto)
3567 {
3568         struct bridge_mcast_stats __percpu *stats;
3569         struct bridge_mcast_stats *pstats;
3570
3571         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3572                 return;
3573
3574         if (p)
3575                 stats = p->mcast_stats;
3576         else
3577                 stats = br->mcast_stats;
3578         if (WARN_ON(!stats))
3579                 return;
3580
3581         pstats = this_cpu_ptr(stats);
3582
3583         u64_stats_update_begin(&pstats->syncp);
3584         switch (proto) {
3585         case htons(ETH_P_IP):
3586                 pstats->mstats.igmp_parse_errors++;
3587                 break;
3588 #if IS_ENABLED(CONFIG_IPV6)
3589         case htons(ETH_P_IPV6):
3590                 pstats->mstats.mld_parse_errors++;
3591                 break;
3592 #endif
3593         }
3594         u64_stats_update_end(&pstats->syncp);
3595 }
3596
3597 static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3598                              struct net_bridge_mcast_port *pmctx,
3599                              const struct sk_buff *skb)
3600 {
3601         unsigned int offset = skb_transport_offset(skb);
3602         struct pimhdr *pimhdr, _pimhdr;
3603
3604         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3605         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3606             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3607                 return;
3608
3609         spin_lock(&brmctx->br->multicast_lock);
3610         br_ip4_multicast_mark_router(brmctx, pmctx);
3611         spin_unlock(&brmctx->br->multicast_lock);
3612 }
3613
3614 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3615                                     struct net_bridge_mcast_port *pmctx,
3616                                     struct sk_buff *skb)
3617 {
3618         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3619             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3620                 return -ENOMSG;
3621
3622         spin_lock(&brmctx->br->multicast_lock);
3623         br_ip4_multicast_mark_router(brmctx, pmctx);
3624         spin_unlock(&brmctx->br->multicast_lock);
3625
3626         return 0;
3627 }
3628
3629 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3630                                  struct net_bridge_mcast_port *pmctx,
3631                                  struct sk_buff *skb,
3632                                  u16 vid)
3633 {
3634         struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3635         const unsigned char *src;
3636         struct igmphdr *ih;
3637         int err;
3638
3639         err = ip_mc_check_igmp(skb);
3640
3641         if (err == -ENOMSG) {
3642                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3643                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3644                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3645                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3646                                 br_multicast_pim(brmctx, pmctx, skb);
3647                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3648                         br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3649                 }
3650
3651                 return 0;
3652         } else if (err < 0) {
3653                 br_multicast_err_count(brmctx->br, p, skb->protocol);
3654                 return err;
3655         }
3656
3657         ih = igmp_hdr(skb);
3658         src = eth_hdr(skb)->h_source;
3659         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3660
3661         switch (ih->type) {
3662         case IGMP_HOST_MEMBERSHIP_REPORT:
3663         case IGMPV2_HOST_MEMBERSHIP_REPORT:
3664                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3665                 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3666                                                  src, true);
3667                 break;
3668         case IGMPV3_HOST_MEMBERSHIP_REPORT:
3669                 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3670                 break;
3671         case IGMP_HOST_MEMBERSHIP_QUERY:
3672                 br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3673                 break;
3674         case IGMP_HOST_LEAVE_MESSAGE:
3675                 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3676                 break;
3677         }
3678
3679         br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3680                            BR_MCAST_DIR_RX);
3681
3682         return err;
3683 }
3684
3685 #if IS_ENABLED(CONFIG_IPV6)
3686 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3687                                      struct net_bridge_mcast_port *pmctx,
3688                                      struct sk_buff *skb)
3689 {
3690         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3691                 return;
3692
3693         spin_lock(&brmctx->br->multicast_lock);
3694         br_ip6_multicast_mark_router(brmctx, pmctx);
3695         spin_unlock(&brmctx->br->multicast_lock);
3696 }
3697
3698 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3699                                  struct net_bridge_mcast_port *pmctx,
3700                                  struct sk_buff *skb,
3701                                  u16 vid)
3702 {
3703         struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3704         const unsigned char *src;
3705         struct mld_msg *mld;
3706         int err;
3707
3708         err = ipv6_mc_check_mld(skb);
3709
3710         if (err == -ENOMSG || err == -ENODATA) {
3711                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3712                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3713                 if (err == -ENODATA &&
3714                     ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3715                         br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3716
3717                 return 0;
3718         } else if (err < 0) {
3719                 br_multicast_err_count(brmctx->br, p, skb->protocol);
3720                 return err;
3721         }
3722
3723         mld = (struct mld_msg *)skb_transport_header(skb);
3724         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3725
3726         switch (mld->mld_type) {
3727         case ICMPV6_MGM_REPORT:
3728                 src = eth_hdr(skb)->h_source;
3729                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3730                 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3731                                                  vid, src, true);
3732                 break;
3733         case ICMPV6_MLD2_REPORT:
3734                 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3735                 break;
3736         case ICMPV6_MGM_QUERY:
3737                 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3738                 break;
3739         case ICMPV6_MGM_REDUCTION:
3740                 src = eth_hdr(skb)->h_source;
3741                 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3742                                              src);
3743                 break;
3744         }
3745
3746         br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3747                            BR_MCAST_DIR_RX);
3748
3749         return err;
3750 }
3751 #endif
3752
3753 int br_multicast_rcv(struct net_bridge_mcast **brmctx,
3754                      struct net_bridge_mcast_port **pmctx,
3755                      struct net_bridge_vlan *vlan,
3756                      struct sk_buff *skb, u16 vid)
3757 {
3758         int ret = 0;
3759
3760         BR_INPUT_SKB_CB(skb)->igmp = 0;
3761         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3762
3763         if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
3764                 return 0;
3765
3766         if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
3767                 const struct net_bridge_vlan *masterv;
3768
3769                 /* the vlan has the master flag set only when transmitting
3770                  * through the bridge device
3771                  */
3772                 if (br_vlan_is_master(vlan)) {
3773                         masterv = vlan;
3774                         *brmctx = &vlan->br_mcast_ctx;
3775                         *pmctx = NULL;
3776                 } else {
3777                         masterv = vlan->brvlan;
3778                         *brmctx = &vlan->brvlan->br_mcast_ctx;
3779                         *pmctx = &vlan->port_mcast_ctx;
3780                 }
3781
3782                 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
3783                         return 0;
3784         }
3785
3786         switch (skb->protocol) {
3787         case htons(ETH_P_IP):
3788                 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
3789                 break;
3790 #if IS_ENABLED(CONFIG_IPV6)
3791         case htons(ETH_P_IPV6):
3792                 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
3793                 break;
3794 #endif
3795         }
3796
3797         return ret;
3798 }
3799
3800 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
3801                                        struct bridge_mcast_own_query *query,
3802                                        struct bridge_mcast_querier *querier)
3803 {
3804         spin_lock(&brmctx->br->multicast_lock);
3805         if (br_multicast_ctx_vlan_disabled(brmctx))
3806                 goto out;
3807
3808         if (query->startup_sent < brmctx->multicast_startup_query_count)
3809                 query->startup_sent++;
3810
3811         br_multicast_send_query(brmctx, NULL, query);
3812 out:
3813         spin_unlock(&brmctx->br->multicast_lock);
3814 }
3815
3816 static void br_ip4_multicast_query_expired(struct timer_list *t)
3817 {
3818         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3819                                                      ip4_own_query.timer);
3820
3821         br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
3822                                    &brmctx->ip4_querier);
3823 }
3824
3825 #if IS_ENABLED(CONFIG_IPV6)
3826 static void br_ip6_multicast_query_expired(struct timer_list *t)
3827 {
3828         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3829                                                      ip6_own_query.timer);
3830
3831         br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
3832                                    &brmctx->ip6_querier);
3833 }
3834 #endif
3835
3836 static void br_multicast_gc_work(struct work_struct *work)
3837 {
3838         struct net_bridge *br = container_of(work, struct net_bridge,
3839                                              mcast_gc_work);
3840         HLIST_HEAD(deleted_head);
3841
3842         spin_lock_bh(&br->multicast_lock);
3843         hlist_move_list(&br->mcast_gc_list, &deleted_head);
3844         spin_unlock_bh(&br->multicast_lock);
3845
3846         br_multicast_gc(&deleted_head);
3847 }
3848
3849 void br_multicast_ctx_init(struct net_bridge *br,
3850                            struct net_bridge_vlan *vlan,
3851                            struct net_bridge_mcast *brmctx)
3852 {
3853         brmctx->br = br;
3854         brmctx->vlan = vlan;
3855         brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3856         brmctx->multicast_last_member_count = 2;
3857         brmctx->multicast_startup_query_count = 2;
3858
3859         brmctx->multicast_last_member_interval = HZ;
3860         brmctx->multicast_query_response_interval = 10 * HZ;
3861         brmctx->multicast_startup_query_interval = 125 * HZ / 4;
3862         brmctx->multicast_query_interval = 125 * HZ;
3863         brmctx->multicast_querier_interval = 255 * HZ;
3864         brmctx->multicast_membership_interval = 260 * HZ;
3865
3866         brmctx->ip4_other_query.delay_time = 0;
3867         brmctx->ip4_querier.port_ifidx = 0;
3868         seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
3869         brmctx->multicast_igmp_version = 2;
3870 #if IS_ENABLED(CONFIG_IPV6)
3871         brmctx->multicast_mld_version = 1;
3872         brmctx->ip6_other_query.delay_time = 0;
3873         brmctx->ip6_querier.port_ifidx = 0;
3874         seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
3875 #endif
3876
3877         timer_setup(&brmctx->ip4_mc_router_timer,
3878                     br_ip4_multicast_local_router_expired, 0);
3879         timer_setup(&brmctx->ip4_other_query.timer,
3880                     br_ip4_multicast_querier_expired, 0);
3881         timer_setup(&brmctx->ip4_own_query.timer,
3882                     br_ip4_multicast_query_expired, 0);
3883 #if IS_ENABLED(CONFIG_IPV6)
3884         timer_setup(&brmctx->ip6_mc_router_timer,
3885                     br_ip6_multicast_local_router_expired, 0);
3886         timer_setup(&brmctx->ip6_other_query.timer,
3887                     br_ip6_multicast_querier_expired, 0);
3888         timer_setup(&brmctx->ip6_own_query.timer,
3889                     br_ip6_multicast_query_expired, 0);
3890 #endif
3891 }
3892
3893 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
3894 {
3895         __br_multicast_stop(brmctx);
3896 }
3897
3898 void br_multicast_init(struct net_bridge *br)
3899 {
3900         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3901
3902         br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
3903
3904         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3905         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3906
3907         spin_lock_init(&br->multicast_lock);
3908         INIT_HLIST_HEAD(&br->mdb_list);
3909         INIT_HLIST_HEAD(&br->mcast_gc_list);
3910         INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3911 }
3912
3913 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3914 {
3915         struct in_device *in_dev = in_dev_get(br->dev);
3916
3917         if (!in_dev)
3918                 return;
3919
3920         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3921         in_dev_put(in_dev);
3922 }
3923
3924 #if IS_ENABLED(CONFIG_IPV6)
3925 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3926 {
3927         struct in6_addr addr;
3928
3929         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3930         ipv6_dev_mc_inc(br->dev, &addr);
3931 }
3932 #else
3933 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3934 {
3935 }
3936 #endif
3937
3938 void br_multicast_join_snoopers(struct net_bridge *br)
3939 {
3940         br_ip4_multicast_join_snoopers(br);
3941         br_ip6_multicast_join_snoopers(br);
3942 }
3943
3944 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3945 {
3946         struct in_device *in_dev = in_dev_get(br->dev);
3947
3948         if (WARN_ON(!in_dev))
3949                 return;
3950
3951         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3952         in_dev_put(in_dev);
3953 }
3954
3955 #if IS_ENABLED(CONFIG_IPV6)
3956 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3957 {
3958         struct in6_addr addr;
3959
3960         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3961         ipv6_dev_mc_dec(br->dev, &addr);
3962 }
3963 #else
3964 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3965 {
3966 }
3967 #endif
3968
3969 void br_multicast_leave_snoopers(struct net_bridge *br)
3970 {
3971         br_ip4_multicast_leave_snoopers(br);
3972         br_ip6_multicast_leave_snoopers(br);
3973 }
3974
3975 static void __br_multicast_open_query(struct net_bridge *br,
3976                                       struct bridge_mcast_own_query *query)
3977 {
3978         query->startup_sent = 0;
3979
3980         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3981                 return;
3982
3983         mod_timer(&query->timer, jiffies);
3984 }
3985
3986 static void __br_multicast_open(struct net_bridge_mcast *brmctx)
3987 {
3988         __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
3989 #if IS_ENABLED(CONFIG_IPV6)
3990         __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
3991 #endif
3992 }
3993
3994 void br_multicast_open(struct net_bridge *br)
3995 {
3996         ASSERT_RTNL();
3997
3998         if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
3999                 struct net_bridge_vlan_group *vg;
4000                 struct net_bridge_vlan *vlan;
4001
4002                 vg = br_vlan_group(br);
4003                 if (vg) {
4004                         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4005                                 struct net_bridge_mcast *brmctx;
4006
4007                                 brmctx = &vlan->br_mcast_ctx;
4008                                 if (br_vlan_is_brentry(vlan) &&
4009                                     !br_multicast_ctx_vlan_disabled(brmctx))
4010                                         __br_multicast_open(&vlan->br_mcast_ctx);
4011                         }
4012                 }
4013         } else {
4014                 __br_multicast_open(&br->multicast_ctx);
4015         }
4016 }
4017
4018 static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
4019 {
4020         del_timer_sync(&brmctx->ip4_mc_router_timer);
4021         del_timer_sync(&brmctx->ip4_other_query.timer);
4022         del_timer_sync(&brmctx->ip4_own_query.timer);
4023 #if IS_ENABLED(CONFIG_IPV6)
4024         del_timer_sync(&brmctx->ip6_mc_router_timer);
4025         del_timer_sync(&brmctx->ip6_other_query.timer);
4026         del_timer_sync(&brmctx->ip6_own_query.timer);
4027 #endif
4028 }
4029
4030 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
4031 {
4032         struct net_bridge *br;
4033
4034         /* it's okay to check for the flag without the multicast lock because it
4035          * can only change under RTNL -> multicast_lock, we need the latter to
4036          * sync with timers and packets
4037          */
4038         if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
4039                 return;
4040
4041         if (br_vlan_is_master(vlan)) {
4042                 br = vlan->br;
4043
4044                 if (!br_vlan_is_brentry(vlan) ||
4045                     (on &&
4046                      br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
4047                         return;
4048
4049                 spin_lock_bh(&br->multicast_lock);
4050                 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4051                 spin_unlock_bh(&br->multicast_lock);
4052
4053                 if (on)
4054                         __br_multicast_open(&vlan->br_mcast_ctx);
4055                 else
4056                         __br_multicast_stop(&vlan->br_mcast_ctx);
4057         } else {
4058                 struct net_bridge_mcast *brmctx;
4059
4060                 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
4061                 if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
4062                         return;
4063
4064                 br = vlan->port->br;
4065                 spin_lock_bh(&br->multicast_lock);
4066                 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4067                 if (on)
4068                         __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
4069                 else
4070                         __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
4071                 spin_unlock_bh(&br->multicast_lock);
4072         }
4073 }
4074
4075 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
4076 {
4077         struct net_bridge_port *p;
4078
4079         if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
4080                 return;
4081
4082         list_for_each_entry(p, &vlan->br->port_list, list) {
4083                 struct net_bridge_vlan *vport;
4084
4085                 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
4086                 if (!vport)
4087                         continue;
4088                 br_multicast_toggle_one_vlan(vport, on);
4089         }
4090
4091         if (br_vlan_is_brentry(vlan))
4092                 br_multicast_toggle_one_vlan(vlan, on);
4093 }
4094
4095 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
4096                                       struct netlink_ext_ack *extack)
4097 {
4098         struct net_bridge_vlan_group *vg;
4099         struct net_bridge_vlan *vlan;
4100         struct net_bridge_port *p;
4101
4102         if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
4103                 return 0;
4104
4105         if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
4106                 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
4107                 return -EINVAL;
4108         }
4109
4110         vg = br_vlan_group(br);
4111         if (!vg)
4112                 return 0;
4113
4114         br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
4115
4116         /* disable/enable non-vlan mcast contexts based on vlan snooping */
4117         if (on)
4118                 __br_multicast_stop(&br->multicast_ctx);
4119         else
4120                 __br_multicast_open(&br->multicast_ctx);
4121         list_for_each_entry(p, &br->port_list, list) {
4122                 if (on)
4123                         br_multicast_disable_port(p);
4124                 else
4125                         br_multicast_enable_port(p);
4126         }
4127
4128         list_for_each_entry(vlan, &vg->vlan_list, vlist)
4129                 br_multicast_toggle_vlan(vlan, on);
4130
4131         return 0;
4132 }
4133
4134 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
4135 {
4136         ASSERT_RTNL();
4137
4138         /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
4139          * requires only RTNL to change
4140          */
4141         if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4142                 return false;
4143
4144         vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
4145         br_multicast_toggle_vlan(vlan, on);
4146
4147         return true;
4148 }
4149
4150 void br_multicast_stop(struct net_bridge *br)
4151 {
4152         ASSERT_RTNL();
4153
4154         if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4155                 struct net_bridge_vlan_group *vg;
4156                 struct net_bridge_vlan *vlan;
4157
4158                 vg = br_vlan_group(br);
4159                 if (vg) {
4160                         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4161                                 struct net_bridge_mcast *brmctx;
4162
4163                                 brmctx = &vlan->br_mcast_ctx;
4164                                 if (br_vlan_is_brentry(vlan) &&
4165                                     !br_multicast_ctx_vlan_disabled(brmctx))
4166                                         __br_multicast_stop(&vlan->br_mcast_ctx);
4167                         }
4168                 }
4169         } else {
4170                 __br_multicast_stop(&br->multicast_ctx);
4171         }
4172 }
4173
4174 void br_multicast_dev_del(struct net_bridge *br)
4175 {
4176         struct net_bridge_mdb_entry *mp;
4177         HLIST_HEAD(deleted_head);
4178         struct hlist_node *tmp;
4179
4180         spin_lock_bh(&br->multicast_lock);
4181         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
4182                 br_multicast_del_mdb_entry(mp);
4183         hlist_move_list(&br->mcast_gc_list, &deleted_head);
4184         spin_unlock_bh(&br->multicast_lock);
4185
4186         br_multicast_ctx_deinit(&br->multicast_ctx);
4187         br_multicast_gc(&deleted_head);
4188         cancel_work_sync(&br->mcast_gc_work);
4189
4190         rcu_barrier();
4191 }
4192
4193 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
4194 {
4195         int err = -EINVAL;
4196
4197         spin_lock_bh(&brmctx->br->multicast_lock);
4198
4199         switch (val) {
4200         case MDB_RTR_TYPE_DISABLED:
4201         case MDB_RTR_TYPE_PERM:
4202                 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
4203                 del_timer(&brmctx->ip4_mc_router_timer);
4204 #if IS_ENABLED(CONFIG_IPV6)
4205                 del_timer(&brmctx->ip6_mc_router_timer);
4206 #endif
4207                 brmctx->multicast_router = val;
4208                 err = 0;
4209                 break;
4210         case MDB_RTR_TYPE_TEMP_QUERY:
4211                 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4212                         br_mc_router_state_change(brmctx->br, false);
4213                 brmctx->multicast_router = val;
4214                 err = 0;
4215                 break;
4216         }
4217
4218         spin_unlock_bh(&brmctx->br->multicast_lock);
4219
4220         return err;
4221 }
4222
4223 static void
4224 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4225 {
4226         if (!deleted)
4227                 return;
4228
4229         /* For backwards compatibility for now, only notify if there is
4230          * no multicast router anymore for both IPv4 and IPv6.
4231          */
4232         if (!hlist_unhashed(&pmctx->ip4_rlist))
4233                 return;
4234 #if IS_ENABLED(CONFIG_IPV6)
4235         if (!hlist_unhashed(&pmctx->ip6_rlist))
4236                 return;
4237 #endif
4238
4239         br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4240         br_port_mc_router_state_change(pmctx->port, false);
4241
4242         /* don't allow timer refresh */
4243         if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
4244                 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4245 }
4246
4247 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
4248                                  unsigned long val)
4249 {
4250         struct net_bridge_mcast *brmctx;
4251         unsigned long now = jiffies;
4252         int err = -EINVAL;
4253         bool del = false;
4254
4255         brmctx = br_multicast_port_ctx_get_global(pmctx);
4256         spin_lock_bh(&brmctx->br->multicast_lock);
4257         if (pmctx->multicast_router == val) {
4258                 /* Refresh the temp router port timer */
4259                 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
4260                         mod_timer(&pmctx->ip4_mc_router_timer,
4261                                   now + brmctx->multicast_querier_interval);
4262 #if IS_ENABLED(CONFIG_IPV6)
4263                         mod_timer(&pmctx->ip6_mc_router_timer,
4264                                   now + brmctx->multicast_querier_interval);
4265 #endif
4266                 }
4267                 err = 0;
4268                 goto unlock;
4269         }
4270         switch (val) {
4271         case MDB_RTR_TYPE_DISABLED:
4272                 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
4273                 del |= br_ip4_multicast_rport_del(pmctx);
4274                 del_timer(&pmctx->ip4_mc_router_timer);
4275                 del |= br_ip6_multicast_rport_del(pmctx);
4276 #if IS_ENABLED(CONFIG_IPV6)
4277                 del_timer(&pmctx->ip6_mc_router_timer);
4278 #endif
4279                 br_multicast_rport_del_notify(pmctx, del);
4280                 break;
4281         case MDB_RTR_TYPE_TEMP_QUERY:
4282                 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4283                 del |= br_ip4_multicast_rport_del(pmctx);
4284                 del |= br_ip6_multicast_rport_del(pmctx);
4285                 br_multicast_rport_del_notify(pmctx, del);
4286                 break;
4287         case MDB_RTR_TYPE_PERM:
4288                 pmctx->multicast_router = MDB_RTR_TYPE_PERM;
4289                 del_timer(&pmctx->ip4_mc_router_timer);
4290                 br_ip4_multicast_add_router(brmctx, pmctx);
4291 #if IS_ENABLED(CONFIG_IPV6)
4292                 del_timer(&pmctx->ip6_mc_router_timer);
4293 #endif
4294                 br_ip6_multicast_add_router(brmctx, pmctx);
4295                 break;
4296         case MDB_RTR_TYPE_TEMP:
4297                 pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
4298                 br_ip4_multicast_mark_router(brmctx, pmctx);
4299                 br_ip6_multicast_mark_router(brmctx, pmctx);
4300                 break;
4301         default:
4302                 goto unlock;
4303         }
4304         err = 0;
4305 unlock:
4306         spin_unlock_bh(&brmctx->br->multicast_lock);
4307
4308         return err;
4309 }
4310
4311 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
4312 {
4313         int err;
4314
4315         if (br_vlan_is_master(v))
4316                 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
4317         else
4318                 err = br_multicast_set_port_router(&v->port_mcast_ctx,
4319                                                    mcast_router);
4320
4321         return err;
4322 }
4323
4324 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4325                                        struct bridge_mcast_own_query *query)
4326 {
4327         struct net_bridge_port *port;
4328
4329         if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
4330                 return;
4331
4332         __br_multicast_open_query(brmctx->br, query);
4333
4334         rcu_read_lock();
4335         list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4336                 struct bridge_mcast_own_query *ip4_own_query;
4337 #if IS_ENABLED(CONFIG_IPV6)
4338                 struct bridge_mcast_own_query *ip6_own_query;
4339 #endif
4340
4341                 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4342                         continue;
4343
4344                 if (br_multicast_ctx_is_vlan(brmctx)) {
4345                         struct net_bridge_vlan *vlan;
4346
4347                         vlan = br_vlan_find(nbp_vlan_group_rcu(port),
4348                                             brmctx->vlan->vid);
4349                         if (!vlan ||
4350                             br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
4351                                 continue;
4352
4353                         ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
4354 #if IS_ENABLED(CONFIG_IPV6)
4355                         ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
4356 #endif
4357                 } else {
4358                         ip4_own_query = &port->multicast_ctx.ip4_own_query;
4359 #if IS_ENABLED(CONFIG_IPV6)
4360                         ip6_own_query = &port->multicast_ctx.ip6_own_query;
4361 #endif
4362                 }
4363
4364                 if (query == &brmctx->ip4_own_query)
4365                         br_multicast_enable(ip4_own_query);
4366 #if IS_ENABLED(CONFIG_IPV6)
4367                 else
4368                         br_multicast_enable(ip6_own_query);
4369 #endif
4370         }
4371         rcu_read_unlock();
4372 }
4373
4374 int br_multicast_toggle(struct net_bridge *br, unsigned long val,
4375                         struct netlink_ext_ack *extack)
4376 {
4377         struct net_bridge_port *port;
4378         bool change_snoopers = false;
4379         int err = 0;
4380
4381         spin_lock_bh(&br->multicast_lock);
4382         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4383                 goto unlock;
4384
4385         err = br_mc_disabled_update(br->dev, val, extack);
4386         if (err == -EOPNOTSUPP)
4387                 err = 0;
4388         if (err)
4389                 goto unlock;
4390
4391         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4392         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4393                 change_snoopers = true;
4394                 goto unlock;
4395         }
4396
4397         if (!netif_running(br->dev))
4398                 goto unlock;
4399
4400         br_multicast_open(br);
4401         list_for_each_entry(port, &br->port_list, list)
4402                 __br_multicast_enable_port_ctx(&port->multicast_ctx);
4403
4404         change_snoopers = true;
4405
4406 unlock:
4407         spin_unlock_bh(&br->multicast_lock);
4408
4409         /* br_multicast_join_snoopers has the potential to cause
4410          * an MLD Report/Leave to be delivered to br_multicast_rcv,
4411          * which would in turn call br_multicast_add_group, which would
4412          * attempt to acquire multicast_lock. This function should be
4413          * called after the lock has been released to avoid deadlocks on
4414          * multicast_lock.
4415          *
4416          * br_multicast_leave_snoopers does not have the problem since
4417          * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4418          * returns without calling br_multicast_ipv4/6_rcv if it's not
4419          * enabled. Moved both functions out just for symmetry.
4420          */
4421         if (change_snoopers) {
4422                 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4423                         br_multicast_join_snoopers(br);
4424                 else
4425                         br_multicast_leave_snoopers(br);
4426         }
4427
4428         return err;
4429 }
4430
4431 bool br_multicast_enabled(const struct net_device *dev)
4432 {
4433         struct net_bridge *br = netdev_priv(dev);
4434
4435         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4436 }
4437 EXPORT_SYMBOL_GPL(br_multicast_enabled);
4438
4439 bool br_multicast_router(const struct net_device *dev)
4440 {
4441         struct net_bridge *br = netdev_priv(dev);
4442         bool is_router;
4443
4444         spin_lock_bh(&br->multicast_lock);
4445         is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4446         spin_unlock_bh(&br->multicast_lock);
4447         return is_router;
4448 }
4449 EXPORT_SYMBOL_GPL(br_multicast_router);
4450
4451 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
4452 {
4453         unsigned long max_delay;
4454
4455         val = !!val;
4456
4457         spin_lock_bh(&brmctx->br->multicast_lock);
4458         if (brmctx->multicast_querier == val)
4459                 goto unlock;
4460
4461         WRITE_ONCE(brmctx->multicast_querier, val);
4462         if (!val)
4463                 goto unlock;
4464
4465         max_delay = brmctx->multicast_query_response_interval;
4466
4467         if (!timer_pending(&brmctx->ip4_other_query.timer))
4468                 brmctx->ip4_other_query.delay_time = jiffies + max_delay;
4469
4470         br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4471
4472 #if IS_ENABLED(CONFIG_IPV6)
4473         if (!timer_pending(&brmctx->ip6_other_query.timer))
4474                 brmctx->ip6_other_query.delay_time = jiffies + max_delay;
4475
4476         br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4477 #endif
4478
4479 unlock:
4480         spin_unlock_bh(&brmctx->br->multicast_lock);
4481
4482         return 0;
4483 }
4484
4485 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
4486                                   unsigned long val)
4487 {
4488         /* Currently we support only version 2 and 3 */
4489         switch (val) {
4490         case 2:
4491         case 3:
4492                 break;
4493         default:
4494                 return -EINVAL;
4495         }
4496
4497         spin_lock_bh(&brmctx->br->multicast_lock);
4498         brmctx->multicast_igmp_version = val;
4499         spin_unlock_bh(&brmctx->br->multicast_lock);
4500
4501         return 0;
4502 }
4503
4504 #if IS_ENABLED(CONFIG_IPV6)
4505 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
4506                                  unsigned long val)
4507 {
4508         /* Currently we support version 1 and 2 */
4509         switch (val) {
4510         case 1:
4511         case 2:
4512                 break;
4513         default:
4514                 return -EINVAL;
4515         }
4516
4517         spin_lock_bh(&brmctx->br->multicast_lock);
4518         brmctx->multicast_mld_version = val;
4519         spin_unlock_bh(&brmctx->br->multicast_lock);
4520
4521         return 0;
4522 }
4523 #endif
4524
4525 /**
4526  * br_multicast_list_adjacent - Returns snooped multicast addresses
4527  * @dev:        The bridge port adjacent to which to retrieve addresses
4528  * @br_ip_list: The list to store found, snooped multicast IP addresses in
4529  *
4530  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4531  * snooping feature on all bridge ports of dev's bridge device, excluding
4532  * the addresses from dev itself.
4533  *
4534  * Returns the number of items added to br_ip_list.
4535  *
4536  * Notes:
4537  * - br_ip_list needs to be initialized by caller
4538  * - br_ip_list might contain duplicates in the end
4539  *   (needs to be taken care of by caller)
4540  * - br_ip_list needs to be freed by caller
4541  */
4542 int br_multicast_list_adjacent(struct net_device *dev,
4543                                struct list_head *br_ip_list)
4544 {
4545         struct net_bridge *br;
4546         struct net_bridge_port *port;
4547         struct net_bridge_port_group *group;
4548         struct br_ip_list *entry;
4549         int count = 0;
4550
4551         rcu_read_lock();
4552         if (!br_ip_list || !netif_is_bridge_port(dev))
4553                 goto unlock;
4554
4555         port = br_port_get_rcu(dev);
4556         if (!port || !port->br)
4557                 goto unlock;
4558
4559         br = port->br;
4560
4561         list_for_each_entry_rcu(port, &br->port_list, list) {
4562                 if (!port->dev || port->dev == dev)
4563                         continue;
4564
4565                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4566                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
4567                         if (!entry)
4568                                 goto unlock;
4569
4570                         entry->addr = group->key.addr;
4571                         list_add(&entry->list, br_ip_list);
4572                         count++;
4573                 }
4574         }
4575
4576 unlock:
4577         rcu_read_unlock();
4578         return count;
4579 }
4580 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4581
4582 /**
4583  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4584  * @dev: The bridge port providing the bridge on which to check for a querier
4585  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4586  *
4587  * Checks whether the given interface has a bridge on top and if so returns
4588  * true if a valid querier exists anywhere on the bridged link layer.
4589  * Otherwise returns false.
4590  */
4591 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4592 {
4593         struct net_bridge *br;
4594         struct net_bridge_port *port;
4595         struct ethhdr eth;
4596         bool ret = false;
4597
4598         rcu_read_lock();
4599         if (!netif_is_bridge_port(dev))
4600                 goto unlock;
4601
4602         port = br_port_get_rcu(dev);
4603         if (!port || !port->br)
4604                 goto unlock;
4605
4606         br = port->br;
4607
4608         memset(&eth, 0, sizeof(eth));
4609         eth.h_proto = htons(proto);
4610
4611         ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
4612
4613 unlock:
4614         rcu_read_unlock();
4615         return ret;
4616 }
4617 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4618
4619 /**
4620  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4621  * @dev: The bridge port adjacent to which to check for a querier
4622  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4623  *
4624  * Checks whether the given interface has a bridge on top and if so returns
4625  * true if a selected querier is behind one of the other ports of this
4626  * bridge. Otherwise returns false.
4627  */
4628 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4629 {
4630         struct net_bridge_mcast *brmctx;
4631         struct net_bridge *br;
4632         struct net_bridge_port *port;
4633         bool ret = false;
4634         int port_ifidx;
4635
4636         rcu_read_lock();
4637         if (!netif_is_bridge_port(dev))
4638                 goto unlock;
4639
4640         port = br_port_get_rcu(dev);
4641         if (!port || !port->br)
4642                 goto unlock;
4643
4644         br = port->br;
4645         brmctx = &br->multicast_ctx;
4646
4647         switch (proto) {
4648         case ETH_P_IP:
4649                 port_ifidx = brmctx->ip4_querier.port_ifidx;
4650                 if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4651                     port_ifidx == port->dev->ifindex)
4652                         goto unlock;
4653                 break;
4654 #if IS_ENABLED(CONFIG_IPV6)
4655         case ETH_P_IPV6:
4656                 port_ifidx = brmctx->ip6_querier.port_ifidx;
4657                 if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4658                     port_ifidx == port->dev->ifindex)
4659                         goto unlock;
4660                 break;
4661 #endif
4662         default:
4663                 goto unlock;
4664         }
4665
4666         ret = true;
4667 unlock:
4668         rcu_read_unlock();
4669         return ret;
4670 }
4671 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
4672
4673 /**
4674  * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
4675  * @dev: The bridge port adjacent to which to check for a multicast router
4676  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4677  *
4678  * Checks whether the given interface has a bridge on top and if so returns
4679  * true if a multicast router is behind one of the other ports of this
4680  * bridge. Otherwise returns false.
4681  */
4682 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
4683 {
4684         struct net_bridge_mcast_port *pmctx;
4685         struct net_bridge_mcast *brmctx;
4686         struct net_bridge_port *port;
4687         bool ret = false;
4688
4689         rcu_read_lock();
4690         port = br_port_get_check_rcu(dev);
4691         if (!port)
4692                 goto unlock;
4693
4694         brmctx = &port->br->multicast_ctx;
4695         switch (proto) {
4696         case ETH_P_IP:
4697                 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
4698                                          ip4_rlist) {
4699                         if (pmctx->port == port)
4700                                 continue;
4701
4702                         ret = true;
4703                         goto unlock;
4704                 }
4705                 break;
4706 #if IS_ENABLED(CONFIG_IPV6)
4707         case ETH_P_IPV6:
4708                 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
4709                                          ip6_rlist) {
4710                         if (pmctx->port == port)
4711                                 continue;
4712
4713                         ret = true;
4714                         goto unlock;
4715                 }
4716                 break;
4717 #endif
4718         default:
4719                 /* when compiled without IPv6 support, be conservative and
4720                  * always assume presence of an IPv6 multicast router
4721                  */
4722                 ret = true;
4723         }
4724
4725 unlock:
4726         rcu_read_unlock();
4727         return ret;
4728 }
4729 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
4730
4731 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
4732                                const struct sk_buff *skb, u8 type, u8 dir)
4733 {
4734         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
4735         __be16 proto = skb->protocol;
4736         unsigned int t_len;
4737
4738         u64_stats_update_begin(&pstats->syncp);
4739         switch (proto) {
4740         case htons(ETH_P_IP):
4741                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
4742                 switch (type) {
4743                 case IGMP_HOST_MEMBERSHIP_REPORT:
4744                         pstats->mstats.igmp_v1reports[dir]++;
4745                         break;
4746                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
4747                         pstats->mstats.igmp_v2reports[dir]++;
4748                         break;
4749                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
4750                         pstats->mstats.igmp_v3reports[dir]++;
4751                         break;
4752                 case IGMP_HOST_MEMBERSHIP_QUERY:
4753                         if (t_len != sizeof(struct igmphdr)) {
4754                                 pstats->mstats.igmp_v3queries[dir]++;
4755                         } else {
4756                                 unsigned int offset = skb_transport_offset(skb);
4757                                 struct igmphdr *ih, _ihdr;
4758
4759                                 ih = skb_header_pointer(skb, offset,
4760                                                         sizeof(_ihdr), &_ihdr);
4761                                 if (!ih)
4762                                         break;
4763                                 if (!ih->code)
4764                                         pstats->mstats.igmp_v1queries[dir]++;
4765                                 else
4766                                         pstats->mstats.igmp_v2queries[dir]++;
4767                         }
4768                         break;
4769                 case IGMP_HOST_LEAVE_MESSAGE:
4770                         pstats->mstats.igmp_leaves[dir]++;
4771                         break;
4772                 }
4773                 break;
4774 #if IS_ENABLED(CONFIG_IPV6)
4775         case htons(ETH_P_IPV6):
4776                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
4777                         sizeof(struct ipv6hdr);
4778                 t_len -= skb_network_header_len(skb);
4779                 switch (type) {
4780                 case ICMPV6_MGM_REPORT:
4781                         pstats->mstats.mld_v1reports[dir]++;
4782                         break;
4783                 case ICMPV6_MLD2_REPORT:
4784                         pstats->mstats.mld_v2reports[dir]++;
4785                         break;
4786                 case ICMPV6_MGM_QUERY:
4787                         if (t_len != sizeof(struct mld_msg))
4788                                 pstats->mstats.mld_v2queries[dir]++;
4789                         else
4790                                 pstats->mstats.mld_v1queries[dir]++;
4791                         break;
4792                 case ICMPV6_MGM_REDUCTION:
4793                         pstats->mstats.mld_leaves[dir]++;
4794                         break;
4795                 }
4796                 break;
4797 #endif /* CONFIG_IPV6 */
4798         }
4799         u64_stats_update_end(&pstats->syncp);
4800 }
4801
4802 void br_multicast_count(struct net_bridge *br,
4803                         const struct net_bridge_port *p,
4804                         const struct sk_buff *skb, u8 type, u8 dir)
4805 {
4806         struct bridge_mcast_stats __percpu *stats;
4807
4808         /* if multicast_disabled is true then igmp type can't be set */
4809         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
4810                 return;
4811
4812         if (p)
4813                 stats = p->mcast_stats;
4814         else
4815                 stats = br->mcast_stats;
4816         if (WARN_ON(!stats))
4817                 return;
4818
4819         br_mcast_stats_add(stats, skb, type, dir);
4820 }
4821
4822 int br_multicast_init_stats(struct net_bridge *br)
4823 {
4824         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
4825         if (!br->mcast_stats)
4826                 return -ENOMEM;
4827
4828         return 0;
4829 }
4830
4831 void br_multicast_uninit_stats(struct net_bridge *br)
4832 {
4833         free_percpu(br->mcast_stats);
4834 }
4835
4836 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
4837 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
4838 {
4839         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
4840         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
4841 }
4842
4843 void br_multicast_get_stats(const struct net_bridge *br,
4844                             const struct net_bridge_port *p,
4845                             struct br_mcast_stats *dest)
4846 {
4847         struct bridge_mcast_stats __percpu *stats;
4848         struct br_mcast_stats tdst;
4849         int i;
4850
4851         memset(dest, 0, sizeof(*dest));
4852         if (p)
4853                 stats = p->mcast_stats;
4854         else
4855                 stats = br->mcast_stats;
4856         if (WARN_ON(!stats))
4857                 return;
4858
4859         memset(&tdst, 0, sizeof(tdst));
4860         for_each_possible_cpu(i) {
4861                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
4862                 struct br_mcast_stats temp;
4863                 unsigned int start;
4864
4865                 do {
4866                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4867                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
4868                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4869
4870                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
4871                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
4872                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
4873                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
4874                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
4875                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
4876                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
4877                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
4878
4879                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
4880                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
4881                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
4882                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
4883                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
4884                 tdst.mld_parse_errors += temp.mld_parse_errors;
4885         }
4886         memcpy(dest, &tdst, sizeof(*dest));
4887 }
4888
4889 int br_mdb_hash_init(struct net_bridge *br)
4890 {
4891         int err;
4892
4893         err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
4894         if (err)
4895                 return err;
4896
4897         err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
4898         if (err) {
4899                 rhashtable_destroy(&br->sg_port_tbl);
4900                 return err;
4901         }
4902
4903         return 0;
4904 }
4905
4906 void br_mdb_hash_fini(struct net_bridge *br)
4907 {
4908         rhashtable_destroy(&br->sg_port_tbl);
4909         rhashtable_destroy(&br->mdb_hash_tbl);
4910 }