2 * Multicast support for IPv6
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * yoshfuji : fix format of router-alert option
19 * YOSHIFUJI Hideaki @USAGI:
20 * Fixed source address for MLD message based on
21 * <draft-ietf-magma-mld-source-05.txt>.
22 * YOSHIFUJI Hideaki @USAGI:
23 * - Ignore Queries for invalid addresses.
24 * - MLD for link-local addresses.
25 * David L Stevens <dlstevens@us.ibm.com>:
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/jiffies.h>
36 #include <linux/times.h>
37 #include <linux/net.h>
39 #include <linux/in6.h>
40 #include <linux/netdevice.h>
41 #include <linux/if_arp.h>
42 #include <linux/route.h>
43 #include <linux/init.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/slab.h>
47 #include <linux/pkt_sched.h>
50 #include <linux/netfilter.h>
51 #include <linux/netfilter_ipv6.h>
53 #include <net/net_namespace.h>
58 #include <net/protocol.h>
59 #include <net/if_inet6.h>
60 #include <net/ndisc.h>
61 #include <net/addrconf.h>
62 #include <net/ip6_route.h>
63 #include <net/inet_common.h>
65 #include <net/ip6_checksum.h>
67 /* Set to 3 to get tracing... */
71 #define MDBG(x) printk x
76 /* Ensure that we have struct in6_addr aligned on 32bit word. */
77 static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
78 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
79 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
80 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
83 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
85 /* Big mc list lock for all the sockets */
86 static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
88 static void igmp6_join_group(struct ifmcaddr6 *ma);
89 static void igmp6_leave_group(struct ifmcaddr6 *ma);
90 static void igmp6_timer_handler(unsigned long data);
92 static void mld_gq_timer_expire(unsigned long data);
93 static void mld_ifc_timer_expire(unsigned long data);
94 static void mld_ifc_event(struct inet6_dev *idev);
95 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
96 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
97 static void mld_clear_delrec(struct inet6_dev *idev);
98 static bool mld_in_v1_mode(const struct inet6_dev *idev);
99 static int sf_setstate(struct ifmcaddr6 *pmc);
100 static void sf_markstate(struct ifmcaddr6 *pmc);
101 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
102 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
103 int sfmode, int sfcount, const struct in6_addr *psfsrc,
105 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
106 int sfmode, int sfcount, const struct in6_addr *psfsrc,
108 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
109 struct inet6_dev *idev);
111 #define MLD_QRV_DEFAULT 2
112 /* RFC3810, 9.2. Query Interval */
113 #define MLD_QI_DEFAULT (125 * HZ)
114 /* RFC3810, 9.3. Query Response Interval */
115 #define MLD_QRI_DEFAULT (10 * HZ)
117 /* RFC3810, 8.1 Query Version Distinctions */
118 #define MLD_V1_QUERY_LEN 24
119 #define MLD_V2_QUERY_LEN_MIN 28
121 #define IPV6_MLD_MAX_MSF 64
123 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
126 * socket join on multicast group
129 #define for_each_pmc_rcu(np, pmc) \
130 for (pmc = rcu_dereference(np->ipv6_mc_list); \
132 pmc = rcu_dereference(pmc->next))
134 static int unsolicited_report_interval(struct inet6_dev *idev)
138 if (mld_in_v1_mode(idev))
139 iv = idev->cnf.mldv1_unsolicited_report_interval;
141 iv = idev->cnf.mldv2_unsolicited_report_interval;
143 return iv > 0 ? iv : 1;
146 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
148 struct net_device *dev = NULL;
149 struct ipv6_mc_socklist *mc_lst;
150 struct ipv6_pinfo *np = inet6_sk(sk);
151 struct net *net = sock_net(sk);
154 if (!ipv6_addr_is_multicast(addr))
158 for_each_pmc_rcu(np, mc_lst) {
159 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
160 ipv6_addr_equal(&mc_lst->addr, addr)) {
167 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
173 mc_lst->addr = *addr;
179 rt = rt6_lookup(net, addr, NULL, 0, 0);
185 dev = dev_get_by_index_rcu(net, ifindex);
190 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
194 mc_lst->ifindex = dev->ifindex;
195 mc_lst->sfmode = MCAST_EXCLUDE;
196 rwlock_init(&mc_lst->sflock);
197 mc_lst->sflist = NULL;
200 * now add/increase the group membership on the device
203 err = ipv6_dev_mc_inc(dev, addr);
208 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
212 spin_lock(&ipv6_sk_mc_lock);
213 mc_lst->next = np->ipv6_mc_list;
214 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
215 spin_unlock(&ipv6_sk_mc_lock);
224 * socket leave on multicast group
226 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
228 struct ipv6_pinfo *np = inet6_sk(sk);
229 struct ipv6_mc_socklist *mc_lst;
230 struct ipv6_mc_socklist __rcu **lnk;
231 struct net *net = sock_net(sk);
233 if (!ipv6_addr_is_multicast(addr))
237 spin_lock(&ipv6_sk_mc_lock);
238 for (lnk = &np->ipv6_mc_list;
239 (mc_lst = rcu_dereference_protected(*lnk,
240 lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
241 lnk = &mc_lst->next) {
242 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
243 ipv6_addr_equal(&mc_lst->addr, addr)) {
244 struct net_device *dev;
247 spin_unlock(&ipv6_sk_mc_lock);
250 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
252 struct inet6_dev *idev = __in6_dev_get(dev);
254 (void) ip6_mc_leave_src(sk, mc_lst, idev);
256 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
258 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
262 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
263 kfree_rcu(mc_lst, rcu);
267 spin_unlock(&ipv6_sk_mc_lock);
270 return -EADDRNOTAVAIL;
273 /* called with rcu_read_lock() */
274 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
275 const struct in6_addr *group,
278 struct net_device *dev = NULL;
279 struct inet6_dev *idev = NULL;
282 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
289 dev = dev_get_by_index_rcu(net, ifindex);
293 idev = __in6_dev_get(dev);
296 read_lock_bh(&idev->lock);
298 read_unlock_bh(&idev->lock);
304 void ipv6_sock_mc_close(struct sock *sk)
306 struct ipv6_pinfo *np = inet6_sk(sk);
307 struct ipv6_mc_socklist *mc_lst;
308 struct net *net = sock_net(sk);
310 if (!rcu_access_pointer(np->ipv6_mc_list))
314 spin_lock(&ipv6_sk_mc_lock);
315 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
316 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
317 struct net_device *dev;
319 np->ipv6_mc_list = mc_lst->next;
320 spin_unlock(&ipv6_sk_mc_lock);
323 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
325 struct inet6_dev *idev = __in6_dev_get(dev);
327 (void) ip6_mc_leave_src(sk, mc_lst, idev);
329 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
331 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
334 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
335 kfree_rcu(mc_lst, rcu);
337 spin_lock(&ipv6_sk_mc_lock);
339 spin_unlock(&ipv6_sk_mc_lock);
343 int ip6_mc_source(int add, int omode, struct sock *sk,
344 struct group_source_req *pgsr)
346 struct in6_addr *source, *group;
347 struct ipv6_mc_socklist *pmc;
348 struct inet6_dev *idev;
349 struct ipv6_pinfo *inet6 = inet6_sk(sk);
350 struct ip6_sf_socklist *psl;
351 struct net *net = sock_net(sk);
357 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
358 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
360 if (!ipv6_addr_is_multicast(group))
364 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
370 err = -EADDRNOTAVAIL;
372 for_each_pmc_rcu(inet6, pmc) {
373 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
375 if (ipv6_addr_equal(&pmc->addr, group))
378 if (!pmc) { /* must have a prior join */
382 /* if a source filter was set, must be the same mode as before */
384 if (pmc->sfmode != omode) {
388 } else if (pmc->sfmode != omode) {
389 /* allow mode switches for empty-set filters */
390 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
391 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
395 write_lock(&pmc->sflock);
401 goto done; /* err = -EADDRNOTAVAIL */
403 for (i=0; i<psl->sl_count; i++) {
404 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
408 if (rv) /* source not found */
409 goto done; /* err = -EADDRNOTAVAIL */
411 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
412 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
417 /* update the interface filter */
418 ip6_mc_del_src(idev, group, omode, 1, source, 1);
420 for (j=i+1; j<psl->sl_count; j++)
421 psl->sl_addr[j-1] = psl->sl_addr[j];
426 /* else, add a new source to the filter */
428 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
432 if (!psl || psl->sl_count == psl->sl_max) {
433 struct ip6_sf_socklist *newpsl;
434 int count = IP6_SFBLOCK;
437 count += psl->sl_max;
438 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
443 newpsl->sl_max = count;
444 newpsl->sl_count = count - IP6_SFBLOCK;
446 for (i=0; i<psl->sl_count; i++)
447 newpsl->sl_addr[i] = psl->sl_addr[i];
448 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
450 pmc->sflist = psl = newpsl;
452 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
453 for (i=0; i<psl->sl_count; i++) {
454 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
455 if (rv == 0) /* There is an error in the address. */
458 for (j=psl->sl_count-1; j>=i; j--)
459 psl->sl_addr[j+1] = psl->sl_addr[j];
460 psl->sl_addr[i] = *source;
463 /* update the interface list */
464 ip6_mc_add_src(idev, group, omode, 1, source, 1);
467 write_unlock(&pmc->sflock);
468 read_unlock_bh(&idev->lock);
471 return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
475 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
477 const struct in6_addr *group;
478 struct ipv6_mc_socklist *pmc;
479 struct inet6_dev *idev;
480 struct ipv6_pinfo *inet6 = inet6_sk(sk);
481 struct ip6_sf_socklist *newpsl, *psl;
482 struct net *net = sock_net(sk);
486 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
488 if (!ipv6_addr_is_multicast(group))
490 if (gsf->gf_fmode != MCAST_INCLUDE &&
491 gsf->gf_fmode != MCAST_EXCLUDE)
495 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
504 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
509 for_each_pmc_rcu(inet6, pmc) {
510 if (pmc->ifindex != gsf->gf_interface)
512 if (ipv6_addr_equal(&pmc->addr, group))
515 if (!pmc) { /* must have a prior join */
519 if (gsf->gf_numsrc) {
520 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
526 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
527 for (i=0; i<newpsl->sl_count; ++i) {
528 struct sockaddr_in6 *psin6;
530 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
531 newpsl->sl_addr[i] = psin6->sin6_addr;
533 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
534 newpsl->sl_count, newpsl->sl_addr, 0);
536 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
541 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
544 write_lock(&pmc->sflock);
547 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
548 psl->sl_count, psl->sl_addr, 0);
549 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
551 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
552 pmc->sflist = newpsl;
553 pmc->sfmode = gsf->gf_fmode;
554 write_unlock(&pmc->sflock);
557 read_unlock_bh(&idev->lock);
560 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
564 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
565 struct group_filter __user *optval, int __user *optlen)
567 int err, i, count, copycount;
568 const struct in6_addr *group;
569 struct ipv6_mc_socklist *pmc;
570 struct inet6_dev *idev;
571 struct ipv6_pinfo *inet6 = inet6_sk(sk);
572 struct ip6_sf_socklist *psl;
573 struct net *net = sock_net(sk);
575 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
577 if (!ipv6_addr_is_multicast(group))
581 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
588 err = -EADDRNOTAVAIL;
590 * changes to the ipv6_mc_list require the socket lock and
591 * a read lock on ip6_sk_mc_lock. We have the socket lock,
592 * so reading the list is safe.
595 for_each_pmc_rcu(inet6, pmc) {
596 if (pmc->ifindex != gsf->gf_interface)
598 if (ipv6_addr_equal(group, &pmc->addr))
601 if (!pmc) /* must have a prior join */
603 gsf->gf_fmode = pmc->sfmode;
605 count = psl ? psl->sl_count : 0;
606 read_unlock_bh(&idev->lock);
609 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
610 gsf->gf_numsrc = count;
611 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
612 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
615 /* changes to psl require the socket lock, a read lock on
616 * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
617 * have the socket lock, so reading here is safe.
619 for (i=0; i<copycount; i++) {
620 struct sockaddr_in6 *psin6;
621 struct sockaddr_storage ss;
623 psin6 = (struct sockaddr_in6 *)&ss;
624 memset(&ss, 0, sizeof(ss));
625 psin6->sin6_family = AF_INET6;
626 psin6->sin6_addr = psl->sl_addr[i];
627 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
632 read_unlock_bh(&idev->lock);
637 bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
638 const struct in6_addr *src_addr)
640 struct ipv6_pinfo *np = inet6_sk(sk);
641 struct ipv6_mc_socklist *mc;
642 struct ip6_sf_socklist *psl;
646 for_each_pmc_rcu(np, mc) {
647 if (ipv6_addr_equal(&mc->addr, mc_addr))
654 read_lock(&mc->sflock);
657 rv = mc->sfmode == MCAST_EXCLUDE;
661 for (i=0; i<psl->sl_count; i++) {
662 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
665 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
667 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
670 read_unlock(&mc->sflock);
676 static void ma_put(struct ifmcaddr6 *mc)
678 if (atomic_dec_and_test(&mc->mca_refcnt)) {
679 in6_dev_put(mc->idev);
684 static void igmp6_group_added(struct ifmcaddr6 *mc)
686 struct net_device *dev = mc->idev->dev;
687 char buf[MAX_ADDR_LEN];
689 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
690 IPV6_ADDR_SCOPE_LINKLOCAL)
693 spin_lock_bh(&mc->mca_lock);
694 if (!(mc->mca_flags&MAF_LOADED)) {
695 mc->mca_flags |= MAF_LOADED;
696 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
697 dev_mc_add(dev, buf);
699 spin_unlock_bh(&mc->mca_lock);
701 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
704 if (mld_in_v1_mode(mc->idev)) {
705 igmp6_join_group(mc);
710 mc->mca_crcount = mc->idev->mc_qrv;
711 mld_ifc_event(mc->idev);
714 static void igmp6_group_dropped(struct ifmcaddr6 *mc)
716 struct net_device *dev = mc->idev->dev;
717 char buf[MAX_ADDR_LEN];
719 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
720 IPV6_ADDR_SCOPE_LINKLOCAL)
723 spin_lock_bh(&mc->mca_lock);
724 if (mc->mca_flags&MAF_LOADED) {
725 mc->mca_flags &= ~MAF_LOADED;
726 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
727 dev_mc_del(dev, buf);
730 if (mc->mca_flags & MAF_NOREPORT)
732 spin_unlock_bh(&mc->mca_lock);
735 igmp6_leave_group(mc);
737 spin_lock_bh(&mc->mca_lock);
738 if (del_timer(&mc->mca_timer))
739 atomic_dec(&mc->mca_refcnt);
741 ip6_mc_clear_src(mc);
742 spin_unlock_bh(&mc->mca_lock);
746 * deleted ifmcaddr6 manipulation
748 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
750 struct ifmcaddr6 *pmc;
752 /* this is an "ifmcaddr6" for convenience; only the fields below
753 * are actually used. In particular, the refcnt and users are not
754 * used for management of the delete list. Using the same structure
755 * for deleted items allows change reports to use common code with
756 * non-deleted or query-response MCA's.
758 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
762 spin_lock_bh(&im->mca_lock);
763 spin_lock_init(&pmc->mca_lock);
764 pmc->idev = im->idev;
766 pmc->mca_addr = im->mca_addr;
767 pmc->mca_crcount = idev->mc_qrv;
768 pmc->mca_sfmode = im->mca_sfmode;
769 if (pmc->mca_sfmode == MCAST_INCLUDE) {
770 struct ip6_sf_list *psf;
772 pmc->mca_tomb = im->mca_tomb;
773 pmc->mca_sources = im->mca_sources;
774 im->mca_tomb = im->mca_sources = NULL;
775 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
776 psf->sf_crcount = pmc->mca_crcount;
778 spin_unlock_bh(&im->mca_lock);
780 spin_lock_bh(&idev->mc_lock);
781 pmc->next = idev->mc_tomb;
783 spin_unlock_bh(&idev->mc_lock);
786 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
788 struct ifmcaddr6 *pmc, *pmc_prev;
789 struct ip6_sf_list *psf, *psf_next;
791 spin_lock_bh(&idev->mc_lock);
793 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
794 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
800 pmc_prev->next = pmc->next;
802 idev->mc_tomb = pmc->next;
804 spin_unlock_bh(&idev->mc_lock);
807 for (psf=pmc->mca_tomb; psf; psf=psf_next) {
808 psf_next = psf->sf_next;
811 in6_dev_put(pmc->idev);
816 static void mld_clear_delrec(struct inet6_dev *idev)
818 struct ifmcaddr6 *pmc, *nextpmc;
820 spin_lock_bh(&idev->mc_lock);
822 idev->mc_tomb = NULL;
823 spin_unlock_bh(&idev->mc_lock);
825 for (; pmc; pmc = nextpmc) {
827 ip6_mc_clear_src(pmc);
828 in6_dev_put(pmc->idev);
832 /* clear dead sources, too */
833 read_lock_bh(&idev->lock);
834 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
835 struct ip6_sf_list *psf, *psf_next;
837 spin_lock_bh(&pmc->mca_lock);
839 pmc->mca_tomb = NULL;
840 spin_unlock_bh(&pmc->mca_lock);
841 for (; psf; psf=psf_next) {
842 psf_next = psf->sf_next;
846 read_unlock_bh(&idev->lock);
851 * device multicast group inc (add if not found)
853 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
855 struct ifmcaddr6 *mc;
856 struct inet6_dev *idev;
860 /* we need to take a reference on idev */
861 idev = in6_dev_get(dev);
866 write_lock_bh(&idev->lock);
868 write_unlock_bh(&idev->lock);
873 for (mc = idev->mc_list; mc; mc = mc->next) {
874 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
876 write_unlock_bh(&idev->lock);
877 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
885 * not found: create a new one.
888 mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
891 write_unlock_bh(&idev->lock);
896 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
898 mc->mca_addr = *addr;
899 mc->idev = idev; /* (reference taken) */
901 /* mca_stamp should be updated upon changes */
902 mc->mca_cstamp = mc->mca_tstamp = jiffies;
903 atomic_set(&mc->mca_refcnt, 2);
904 spin_lock_init(&mc->mca_lock);
906 /* initial mode is (EX, empty) */
907 mc->mca_sfmode = MCAST_EXCLUDE;
908 mc->mca_sfcount[MCAST_EXCLUDE] = 1;
910 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
911 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
912 mc->mca_flags |= MAF_NOREPORT;
914 mc->next = idev->mc_list;
916 write_unlock_bh(&idev->lock);
918 mld_del_delrec(idev, &mc->mca_addr);
919 igmp6_group_added(mc);
925 * device multicast group del
927 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
929 struct ifmcaddr6 *ma, **map;
933 write_lock_bh(&idev->lock);
934 for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
935 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
936 if (--ma->mca_users == 0) {
938 write_unlock_bh(&idev->lock);
940 igmp6_group_dropped(ma);
945 write_unlock_bh(&idev->lock);
949 write_unlock_bh(&idev->lock);
954 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
956 struct inet6_dev *idev;
961 idev = __in6_dev_get(dev);
965 err = __ipv6_dev_mc_dec(idev, addr);
972 * check if the interface/address pair is valid
974 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
975 const struct in6_addr *src_addr)
977 struct inet6_dev *idev;
978 struct ifmcaddr6 *mc;
982 idev = __in6_dev_get(dev);
984 read_lock_bh(&idev->lock);
985 for (mc = idev->mc_list; mc; mc=mc->next) {
986 if (ipv6_addr_equal(&mc->mca_addr, group))
990 if (src_addr && !ipv6_addr_any(src_addr)) {
991 struct ip6_sf_list *psf;
993 spin_lock_bh(&mc->mca_lock);
994 for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
995 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
999 rv = psf->sf_count[MCAST_INCLUDE] ||
1000 psf->sf_count[MCAST_EXCLUDE] !=
1001 mc->mca_sfcount[MCAST_EXCLUDE];
1003 rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
1004 spin_unlock_bh(&mc->mca_lock);
1006 rv = true; /* don't filter unspecified source */
1008 read_unlock_bh(&idev->lock);
1014 static void mld_gq_start_timer(struct inet6_dev *idev)
1016 unsigned long tv = prandom_u32() % idev->mc_maxdelay;
1018 idev->mc_gq_running = 1;
1019 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1023 static void mld_gq_stop_timer(struct inet6_dev *idev)
1025 idev->mc_gq_running = 0;
1026 if (del_timer(&idev->mc_gq_timer))
1027 __in6_dev_put(idev);
1030 static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
1032 unsigned long tv = prandom_u32() % delay;
1034 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1038 static void mld_ifc_stop_timer(struct inet6_dev *idev)
1040 idev->mc_ifc_count = 0;
1041 if (del_timer(&idev->mc_ifc_timer))
1042 __in6_dev_put(idev);
1045 static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
1047 unsigned long tv = prandom_u32() % delay;
1049 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
1053 static void mld_dad_stop_timer(struct inet6_dev *idev)
1055 if (del_timer(&idev->mc_dad_timer))
1056 __in6_dev_put(idev);
1060 * IGMP handling (alias multicast ICMPv6 messages)
1063 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1065 unsigned long delay = resptime;
1067 /* Do not start timer for these addresses */
1068 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1069 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1072 if (del_timer(&ma->mca_timer)) {
1073 atomic_dec(&ma->mca_refcnt);
1074 delay = ma->mca_timer.expires - jiffies;
1077 if (delay >= resptime)
1078 delay = prandom_u32() % resptime;
1080 ma->mca_timer.expires = jiffies + delay;
1081 if (!mod_timer(&ma->mca_timer, jiffies + delay))
1082 atomic_inc(&ma->mca_refcnt);
1083 ma->mca_flags |= MAF_TIMER_RUNNING;
1086 /* mark EXCLUDE-mode sources */
1087 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1088 const struct in6_addr *srcs)
1090 struct ip6_sf_list *psf;
1094 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1095 if (scount == nsrcs)
1097 for (i=0; i<nsrcs; i++) {
1098 /* skip inactive filters */
1099 if (psf->sf_count[MCAST_INCLUDE] ||
1100 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1101 psf->sf_count[MCAST_EXCLUDE])
1103 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1109 pmc->mca_flags &= ~MAF_GSQUERY;
1110 if (scount == nsrcs) /* all sources excluded */
1115 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1116 const struct in6_addr *srcs)
1118 struct ip6_sf_list *psf;
1121 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1122 return mld_xmarksources(pmc, nsrcs, srcs);
1124 /* mark INCLUDE-mode sources */
1127 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1128 if (scount == nsrcs)
1130 for (i=0; i<nsrcs; i++) {
1131 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1139 pmc->mca_flags &= ~MAF_GSQUERY;
1142 pmc->mca_flags |= MAF_GSQUERY;
1146 static int mld_force_mld_version(const struct inet6_dev *idev)
1148 /* Normally, both are 0 here. If enforcement to a particular is
1149 * being used, individual device enforcement will have a lower
1150 * precedence over 'all' device (.../conf/all/force_mld_version).
1153 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1154 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1156 return idev->cnf.force_mld_version;
1159 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1161 return mld_force_mld_version(idev) == 2;
1164 static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1166 return mld_force_mld_version(idev) == 1;
1169 static bool mld_in_v1_mode(const struct inet6_dev *idev)
1171 if (mld_in_v2_mode_only(idev))
1173 if (mld_in_v1_mode_only(idev))
1175 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1181 static void mld_set_v1_mode(struct inet6_dev *idev)
1183 /* RFC3810, relevant sections:
1184 * - 9.1. Robustness Variable
1185 * - 9.2. Query Interval
1186 * - 9.3. Query Response Interval
1187 * - 9.12. Older Version Querier Present Timeout
1189 unsigned long switchback;
1191 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1193 idev->mc_v1_seen = jiffies + switchback;
1196 static void mld_update_qrv(struct inet6_dev *idev,
1197 const struct mld2_query *mlh2)
1199 /* RFC3810, relevant sections:
1200 * - 5.1.8. QRV (Querier's Robustness Variable)
1201 * - 9.1. Robustness Variable
1204 /* The value of the Robustness Variable MUST NOT be zero,
1205 * and SHOULD NOT be one. Catch this here if we ever run
1206 * into such a case in future.
1208 WARN_ON(idev->mc_qrv == 0);
1210 if (mlh2->mld2q_qrv > 0)
1211 idev->mc_qrv = mlh2->mld2q_qrv;
1213 if (unlikely(idev->mc_qrv < 2)) {
1214 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1215 idev->mc_qrv, MLD_QRV_DEFAULT);
1216 idev->mc_qrv = MLD_QRV_DEFAULT;
1220 static void mld_update_qi(struct inet6_dev *idev,
1221 const struct mld2_query *mlh2)
1223 /* RFC3810, relevant sections:
1224 * - 5.1.9. QQIC (Querier's Query Interval Code)
1225 * - 9.2. Query Interval
1226 * - 9.12. Older Version Querier Present Timeout
1227 * (the [Query Interval] in the last Query received)
1229 unsigned long mc_qqi;
1231 if (mlh2->mld2q_qqic < 128) {
1232 mc_qqi = mlh2->mld2q_qqic;
1234 unsigned long mc_man, mc_exp;
1236 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1237 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1239 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1242 idev->mc_qi = mc_qqi * HZ;
1245 static void mld_update_qri(struct inet6_dev *idev,
1246 const struct mld2_query *mlh2)
1248 /* RFC3810, relevant sections:
1249 * - 5.1.3. Maximum Response Code
1250 * - 9.3. Query Response Interval
1252 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1255 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1256 unsigned long *max_delay)
1258 unsigned long mldv1_md;
1260 /* Ignore v1 queries */
1261 if (mld_in_v2_mode_only(idev))
1264 /* MLDv1 router present */
1265 mldv1_md = ntohs(mld->mld_maxdelay);
1266 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1268 mld_set_v1_mode(idev);
1270 /* cancel MLDv2 report timer */
1271 mld_gq_stop_timer(idev);
1272 /* cancel the interface change timer */
1273 mld_ifc_stop_timer(idev);
1274 /* clear deleted report items */
1275 mld_clear_delrec(idev);
1280 static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1281 unsigned long *max_delay)
1283 /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */
1284 if (mld_in_v1_mode(idev))
1287 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1289 mld_update_qrv(idev, mld);
1290 mld_update_qi(idev, mld);
1291 mld_update_qri(idev, mld);
1293 idev->mc_maxdelay = *max_delay;
1298 /* called with rcu_read_lock() */
1299 int igmp6_event_query(struct sk_buff *skb)
1301 struct mld2_query *mlh2 = NULL;
1302 struct ifmcaddr6 *ma;
1303 const struct in6_addr *group;
1304 unsigned long max_delay;
1305 struct inet6_dev *idev;
1306 struct mld_msg *mld;
1311 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1314 /* compute payload length excluding extension headers */
1315 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1316 len -= skb_network_header_len(skb);
1318 /* Drop queries with not link local source */
1319 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
1322 idev = __in6_dev_get(skb->dev);
1326 mld = (struct mld_msg *)icmp6_hdr(skb);
1327 group = &mld->mld_mca;
1328 group_type = ipv6_addr_type(group);
1330 if (group_type != IPV6_ADDR_ANY &&
1331 !(group_type&IPV6_ADDR_MULTICAST))
1334 if (len == MLD_V1_QUERY_LEN) {
1335 err = mld_process_v1(idev, mld, &max_delay);
1338 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1339 int srcs_offset = sizeof(struct mld2_query) -
1340 sizeof(struct icmp6hdr);
1342 if (!pskb_may_pull(skb, srcs_offset))
1345 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1347 err = mld_process_v2(idev, mlh2, &max_delay);
1351 if (group_type == IPV6_ADDR_ANY) { /* general query */
1352 if (mlh2->mld2q_nsrcs)
1353 return -EINVAL; /* no sources allowed */
1355 mld_gq_start_timer(idev);
1358 /* mark sources to include, if group & source-specific */
1359 if (mlh2->mld2q_nsrcs != 0) {
1360 if (!pskb_may_pull(skb, srcs_offset +
1361 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1364 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1370 read_lock_bh(&idev->lock);
1371 if (group_type == IPV6_ADDR_ANY) {
1372 for (ma = idev->mc_list; ma; ma=ma->next) {
1373 spin_lock_bh(&ma->mca_lock);
1374 igmp6_group_queried(ma, max_delay);
1375 spin_unlock_bh(&ma->mca_lock);
1378 for (ma = idev->mc_list; ma; ma=ma->next) {
1379 if (!ipv6_addr_equal(group, &ma->mca_addr))
1381 spin_lock_bh(&ma->mca_lock);
1382 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1383 /* gsquery <- gsquery && mark */
1385 ma->mca_flags &= ~MAF_GSQUERY;
1387 /* gsquery <- mark */
1389 ma->mca_flags |= MAF_GSQUERY;
1391 ma->mca_flags &= ~MAF_GSQUERY;
1393 if (!(ma->mca_flags & MAF_GSQUERY) ||
1394 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1395 igmp6_group_queried(ma, max_delay);
1396 spin_unlock_bh(&ma->mca_lock);
1400 read_unlock_bh(&idev->lock);
1405 /* called with rcu_read_lock() */
1406 int igmp6_event_report(struct sk_buff *skb)
1408 struct ifmcaddr6 *ma;
1409 struct inet6_dev *idev;
1410 struct mld_msg *mld;
1413 /* Our own report looped back. Ignore it. */
1414 if (skb->pkt_type == PACKET_LOOPBACK)
1417 /* send our report if the MC router may not have heard this report */
1418 if (skb->pkt_type != PACKET_MULTICAST &&
1419 skb->pkt_type != PACKET_BROADCAST)
1422 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1425 mld = (struct mld_msg *)icmp6_hdr(skb);
1427 /* Drop reports with not link local source */
1428 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1429 if (addr_type != IPV6_ADDR_ANY &&
1430 !(addr_type&IPV6_ADDR_LINKLOCAL))
1433 idev = __in6_dev_get(skb->dev);
1438 * Cancel the timer for this group
1441 read_lock_bh(&idev->lock);
1442 for (ma = idev->mc_list; ma; ma=ma->next) {
1443 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1444 spin_lock(&ma->mca_lock);
1445 if (del_timer(&ma->mca_timer))
1446 atomic_dec(&ma->mca_refcnt);
1447 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1448 spin_unlock(&ma->mca_lock);
1452 read_unlock_bh(&idev->lock);
1456 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1457 int gdeleted, int sdeleted)
1460 case MLD2_MODE_IS_INCLUDE:
1461 case MLD2_MODE_IS_EXCLUDE:
1462 if (gdeleted || sdeleted)
1464 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1465 if (pmc->mca_sfmode == MCAST_INCLUDE)
1467 /* don't include if this source is excluded
1470 if (psf->sf_count[MCAST_INCLUDE])
1471 return type == MLD2_MODE_IS_INCLUDE;
1472 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1473 psf->sf_count[MCAST_EXCLUDE];
1476 case MLD2_CHANGE_TO_INCLUDE:
1477 if (gdeleted || sdeleted)
1479 return psf->sf_count[MCAST_INCLUDE] != 0;
1480 case MLD2_CHANGE_TO_EXCLUDE:
1481 if (gdeleted || sdeleted)
1483 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1484 psf->sf_count[MCAST_INCLUDE])
1486 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1487 psf->sf_count[MCAST_EXCLUDE];
1488 case MLD2_ALLOW_NEW_SOURCES:
1489 if (gdeleted || !psf->sf_crcount)
1491 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1492 case MLD2_BLOCK_OLD_SOURCES:
1493 if (pmc->mca_sfmode == MCAST_INCLUDE)
1494 return gdeleted || (psf->sf_crcount && sdeleted);
1495 return psf->sf_crcount && !gdeleted && !sdeleted;
1501 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1503 struct ip6_sf_list *psf;
1506 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1507 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1514 static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1515 struct net_device *dev,
1516 const struct in6_addr *saddr,
1517 const struct in6_addr *daddr,
1520 struct ipv6hdr *hdr;
1522 skb->protocol = htons(ETH_P_IPV6);
1525 skb_reset_network_header(skb);
1526 skb_put(skb, sizeof(struct ipv6hdr));
1527 hdr = ipv6_hdr(skb);
1529 ip6_flow_hdr(hdr, 0, 0);
1531 hdr->payload_len = htons(len);
1532 hdr->nexthdr = proto;
1533 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1535 hdr->saddr = *saddr;
1536 hdr->daddr = *daddr;
1539 static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
1541 struct net_device *dev = idev->dev;
1542 struct net *net = dev_net(dev);
1543 struct sock *sk = net->ipv6.igmp_sk;
1544 struct sk_buff *skb;
1545 struct mld2_report *pmr;
1546 struct in6_addr addr_buf;
1547 const struct in6_addr *saddr;
1548 int hlen = LL_RESERVED_SPACE(dev);
1549 int tlen = dev->needed_tailroom;
1551 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1552 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1555 /* we assume size > sizeof(ra) here */
1556 size += hlen + tlen;
1557 /* limit our allocations to order-0 page */
1558 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1559 skb = sock_alloc_send_skb(sk, size, 1, &err);
1564 skb->priority = TC_PRIO_CONTROL;
1565 skb_reserve(skb, hlen);
1567 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
1568 /* <draft-ietf-magma-mld-source-05.txt>:
1569 * use unspecified address as the source address
1570 * when a valid link-local address is not available.
1572 saddr = &in6addr_any;
1576 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1578 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1580 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1581 skb_put(skb, sizeof(*pmr));
1582 pmr = (struct mld2_report *)skb_transport_header(skb);
1583 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1584 pmr->mld2r_resv1 = 0;
1585 pmr->mld2r_cksum = 0;
1586 pmr->mld2r_resv2 = 0;
1587 pmr->mld2r_ngrec = 0;
1591 static void mld_sendpack(struct sk_buff *skb)
1593 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1594 struct mld2_report *pmr =
1595 (struct mld2_report *)skb_transport_header(skb);
1596 int payload_len, mldlen;
1597 struct inet6_dev *idev;
1598 struct net *net = dev_net(skb->dev);
1601 struct dst_entry *dst;
1604 idev = __in6_dev_get(skb->dev);
1605 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1607 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1609 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1610 pip6->payload_len = htons(payload_len);
1612 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1614 csum_partial(skb_transport_header(skb),
1617 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1618 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1620 dst = icmp6_dst_alloc(skb->dev, &fl6);
1627 skb_dst_set(skb, dst);
1631 payload_len = skb->len;
1633 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1637 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1638 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1639 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1641 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1652 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1654 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1657 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1658 int type, struct mld2_grec **ppgr)
1660 struct net_device *dev = pmc->idev->dev;
1661 struct mld2_report *pmr;
1662 struct mld2_grec *pgr;
1665 skb = mld_newpack(pmc->idev, dev->mtu);
1668 pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
1669 pgr->grec_type = type;
1670 pgr->grec_auxwords = 0;
1671 pgr->grec_nsrcs = 0;
1672 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1673 pmr = (struct mld2_report *)skb_transport_header(skb);
1674 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1679 #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
1680 skb_tailroom(skb)) : 0)
1682 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1683 int type, int gdeleted, int sdeleted, int crsend)
1685 struct inet6_dev *idev = pmc->idev;
1686 struct net_device *dev = idev->dev;
1687 struct mld2_report *pmr;
1688 struct mld2_grec *pgr = NULL;
1689 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1690 int scount, stotal, first, isquery, truncate;
1692 if (pmc->mca_flags & MAF_NOREPORT)
1695 isquery = type == MLD2_MODE_IS_INCLUDE ||
1696 type == MLD2_MODE_IS_EXCLUDE;
1697 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1698 type == MLD2_CHANGE_TO_EXCLUDE;
1700 stotal = scount = 0;
1702 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1707 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1709 /* EX and TO_EX get a fresh packet, if needed */
1711 if (pmr && pmr->mld2r_ngrec &&
1712 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1715 skb = mld_newpack(idev, dev->mtu);
1720 for (psf=*psf_list; psf; psf=psf_next) {
1721 struct in6_addr *psrc;
1723 psf_next = psf->sf_next;
1725 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
1730 /* clear marks on query responses */
1734 if (AVAILABLE(skb) < sizeof(*psrc) +
1735 first*sizeof(struct mld2_grec)) {
1736 if (truncate && !first)
1737 break; /* truncate these */
1739 pgr->grec_nsrcs = htons(scount);
1742 skb = mld_newpack(idev, dev->mtu);
1747 skb = add_grhead(skb, pmc, type, &pgr);
1752 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1753 *psrc = psf->sf_addr;
1755 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1756 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1758 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1760 psf_prev->sf_next = psf->sf_next;
1762 *psf_list = psf->sf_next;
1772 if (type == MLD2_ALLOW_NEW_SOURCES ||
1773 type == MLD2_BLOCK_OLD_SOURCES)
1775 if (pmc->mca_crcount || isquery || crsend) {
1776 /* make sure we have room for group header */
1777 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1779 skb = NULL; /* add_grhead will get a new one */
1781 skb = add_grhead(skb, pmc, type, &pgr);
1785 pgr->grec_nsrcs = htons(scount);
1788 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1792 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1794 struct sk_buff *skb = NULL;
1797 read_lock_bh(&idev->lock);
1799 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1800 if (pmc->mca_flags & MAF_NOREPORT)
1802 spin_lock_bh(&pmc->mca_lock);
1803 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1804 type = MLD2_MODE_IS_EXCLUDE;
1806 type = MLD2_MODE_IS_INCLUDE;
1807 skb = add_grec(skb, pmc, type, 0, 0, 0);
1808 spin_unlock_bh(&pmc->mca_lock);
1811 spin_lock_bh(&pmc->mca_lock);
1812 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1813 type = MLD2_MODE_IS_EXCLUDE;
1815 type = MLD2_MODE_IS_INCLUDE;
1816 skb = add_grec(skb, pmc, type, 0, 0, 0);
1817 spin_unlock_bh(&pmc->mca_lock);
1819 read_unlock_bh(&idev->lock);
1825 * remove zero-count source records from a source filter list
1827 static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1829 struct ip6_sf_list *psf_prev, *psf_next, *psf;
1832 for (psf=*ppsf; psf; psf = psf_next) {
1833 psf_next = psf->sf_next;
1834 if (psf->sf_crcount == 0) {
1836 psf_prev->sf_next = psf->sf_next;
1838 *ppsf = psf->sf_next;
1845 static void mld_send_cr(struct inet6_dev *idev)
1847 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1848 struct sk_buff *skb = NULL;
1851 read_lock_bh(&idev->lock);
1852 spin_lock(&idev->mc_lock);
1856 for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
1857 pmc_next = pmc->next;
1858 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1859 type = MLD2_BLOCK_OLD_SOURCES;
1860 dtype = MLD2_BLOCK_OLD_SOURCES;
1861 skb = add_grec(skb, pmc, type, 1, 0, 0);
1862 skb = add_grec(skb, pmc, dtype, 1, 1, 0);
1864 if (pmc->mca_crcount) {
1865 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1866 type = MLD2_CHANGE_TO_INCLUDE;
1867 skb = add_grec(skb, pmc, type, 1, 0, 0);
1870 if (pmc->mca_crcount == 0) {
1871 mld_clear_zeros(&pmc->mca_tomb);
1872 mld_clear_zeros(&pmc->mca_sources);
1875 if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1876 !pmc->mca_sources) {
1878 pmc_prev->next = pmc_next;
1880 idev->mc_tomb = pmc_next;
1881 in6_dev_put(pmc->idev);
1886 spin_unlock(&idev->mc_lock);
1889 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1890 spin_lock_bh(&pmc->mca_lock);
1891 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1892 type = MLD2_BLOCK_OLD_SOURCES;
1893 dtype = MLD2_ALLOW_NEW_SOURCES;
1895 type = MLD2_ALLOW_NEW_SOURCES;
1896 dtype = MLD2_BLOCK_OLD_SOURCES;
1898 skb = add_grec(skb, pmc, type, 0, 0, 0);
1899 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
1901 /* filter mode changes */
1902 if (pmc->mca_crcount) {
1903 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1904 type = MLD2_CHANGE_TO_EXCLUDE;
1906 type = MLD2_CHANGE_TO_INCLUDE;
1907 skb = add_grec(skb, pmc, type, 0, 0, 0);
1910 spin_unlock_bh(&pmc->mca_lock);
1912 read_unlock_bh(&idev->lock);
1915 (void) mld_sendpack(skb);
1918 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1920 struct net *net = dev_net(dev);
1921 struct sock *sk = net->ipv6.igmp_sk;
1922 struct inet6_dev *idev;
1923 struct sk_buff *skb;
1924 struct mld_msg *hdr;
1925 const struct in6_addr *snd_addr, *saddr;
1926 struct in6_addr addr_buf;
1927 int hlen = LL_RESERVED_SPACE(dev);
1928 int tlen = dev->needed_tailroom;
1929 int err, len, payload_len, full_len;
1930 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1931 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1934 struct dst_entry *dst;
1936 if (type == ICMPV6_MGM_REDUCTION)
1937 snd_addr = &in6addr_linklocal_allrouters;
1941 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
1942 payload_len = len + sizeof(ra);
1943 full_len = sizeof(struct ipv6hdr) + payload_len;
1946 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1947 IPSTATS_MIB_OUT, full_len);
1950 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
1954 IP6_INC_STATS(net, __in6_dev_get(dev),
1955 IPSTATS_MIB_OUTDISCARDS);
1959 skb->priority = TC_PRIO_CONTROL;
1960 skb_reserve(skb, hlen);
1962 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1963 /* <draft-ietf-magma-mld-source-05.txt>:
1964 * use unspecified address as the source address
1965 * when a valid link-local address is not available.
1967 saddr = &in6addr_any;
1971 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1973 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1975 hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
1976 memset(hdr, 0, sizeof(struct mld_msg));
1977 hdr->mld_type = type;
1978 hdr->mld_mca = *addr;
1980 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1982 csum_partial(hdr, len, 0));
1985 idev = __in6_dev_get(skb->dev);
1987 icmpv6_flow_init(sk, &fl6, type,
1988 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1990 dst = icmp6_dst_alloc(skb->dev, &fl6);
1996 skb_dst_set(skb, dst);
1997 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
2001 ICMP6MSGOUT_INC_STATS(net, idev, type);
2002 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2003 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
2005 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2015 static void mld_send_initial_cr(struct inet6_dev *idev)
2017 struct sk_buff *skb;
2018 struct ifmcaddr6 *pmc;
2021 if (mld_in_v1_mode(idev))
2025 read_lock_bh(&idev->lock);
2026 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2027 spin_lock_bh(&pmc->mca_lock);
2028 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2029 type = MLD2_CHANGE_TO_EXCLUDE;
2031 type = MLD2_CHANGE_TO_INCLUDE;
2032 skb = add_grec(skb, pmc, type, 0, 0, 1);
2033 spin_unlock_bh(&pmc->mca_lock);
2035 read_unlock_bh(&idev->lock);
2040 void ipv6_mc_dad_complete(struct inet6_dev *idev)
2042 idev->mc_dad_count = idev->mc_qrv;
2043 if (idev->mc_dad_count) {
2044 mld_send_initial_cr(idev);
2045 idev->mc_dad_count--;
2046 if (idev->mc_dad_count)
2047 mld_dad_start_timer(idev, idev->mc_maxdelay);
2051 static void mld_dad_timer_expire(unsigned long data)
2053 struct inet6_dev *idev = (struct inet6_dev *)data;
2055 mld_send_initial_cr(idev);
2056 if (idev->mc_dad_count) {
2057 idev->mc_dad_count--;
2058 if (idev->mc_dad_count)
2059 mld_dad_start_timer(idev, idev->mc_maxdelay);
2064 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2065 const struct in6_addr *psfsrc)
2067 struct ip6_sf_list *psf, *psf_prev;
2071 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2072 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2076 if (!psf || psf->sf_count[sfmode] == 0) {
2077 /* source filter not found, or count wrong => bug */
2080 psf->sf_count[sfmode]--;
2081 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2082 struct inet6_dev *idev = pmc->idev;
2084 /* no more filters for this source */
2086 psf_prev->sf_next = psf->sf_next;
2088 pmc->mca_sources = psf->sf_next;
2089 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2090 !mld_in_v1_mode(idev)) {
2091 psf->sf_crcount = idev->mc_qrv;
2092 psf->sf_next = pmc->mca_tomb;
2093 pmc->mca_tomb = psf;
2101 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2102 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2105 struct ifmcaddr6 *pmc;
2111 read_lock_bh(&idev->lock);
2112 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2113 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2117 /* MCA not found?? bug */
2118 read_unlock_bh(&idev->lock);
2121 spin_lock_bh(&pmc->mca_lock);
2124 if (!pmc->mca_sfcount[sfmode]) {
2125 spin_unlock_bh(&pmc->mca_lock);
2126 read_unlock_bh(&idev->lock);
2129 pmc->mca_sfcount[sfmode]--;
2132 for (i=0; i<sfcount; i++) {
2133 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2135 changerec |= rv > 0;
2139 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2140 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2141 pmc->mca_sfcount[MCAST_INCLUDE]) {
2142 struct ip6_sf_list *psf;
2144 /* filter mode change */
2145 pmc->mca_sfmode = MCAST_INCLUDE;
2146 pmc->mca_crcount = idev->mc_qrv;
2147 idev->mc_ifc_count = pmc->mca_crcount;
2148 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2149 psf->sf_crcount = 0;
2150 mld_ifc_event(pmc->idev);
2151 } else if (sf_setstate(pmc) || changerec)
2152 mld_ifc_event(pmc->idev);
2153 spin_unlock_bh(&pmc->mca_lock);
2154 read_unlock_bh(&idev->lock);
2159 * Add multicast single-source filter to the interface list
2161 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2162 const struct in6_addr *psfsrc)
2164 struct ip6_sf_list *psf, *psf_prev;
2167 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2168 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2173 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
2177 psf->sf_addr = *psfsrc;
2179 psf_prev->sf_next = psf;
2181 pmc->mca_sources = psf;
2183 psf->sf_count[sfmode]++;
2187 static void sf_markstate(struct ifmcaddr6 *pmc)
2189 struct ip6_sf_list *psf;
2190 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2192 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
2193 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2194 psf->sf_oldin = mca_xcount ==
2195 psf->sf_count[MCAST_EXCLUDE] &&
2196 !psf->sf_count[MCAST_INCLUDE];
2198 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2201 static int sf_setstate(struct ifmcaddr6 *pmc)
2203 struct ip6_sf_list *psf, *dpsf;
2204 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2205 int qrv = pmc->idev->mc_qrv;
2209 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2210 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2211 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2212 !psf->sf_count[MCAST_INCLUDE];
2214 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2216 if (!psf->sf_oldin) {
2217 struct ip6_sf_list *prev = NULL;
2219 for (dpsf=pmc->mca_tomb; dpsf;
2220 dpsf=dpsf->sf_next) {
2221 if (ipv6_addr_equal(&dpsf->sf_addr,
2228 prev->sf_next = dpsf->sf_next;
2230 pmc->mca_tomb = dpsf->sf_next;
2233 psf->sf_crcount = qrv;
2236 } else if (psf->sf_oldin) {
2237 psf->sf_crcount = 0;
2239 * add or update "delete" records if an active filter
2242 for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
2243 if (ipv6_addr_equal(&dpsf->sf_addr,
2247 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2251 /* pmc->mca_lock held by callers */
2252 dpsf->sf_next = pmc->mca_tomb;
2253 pmc->mca_tomb = dpsf;
2255 dpsf->sf_crcount = qrv;
2263 * Add multicast source filter list to the interface list
2265 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2266 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2269 struct ifmcaddr6 *pmc;
2275 read_lock_bh(&idev->lock);
2276 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2277 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2281 /* MCA not found?? bug */
2282 read_unlock_bh(&idev->lock);
2285 spin_lock_bh(&pmc->mca_lock);
2288 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2290 pmc->mca_sfcount[sfmode]++;
2292 for (i=0; i<sfcount; i++) {
2293 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2301 pmc->mca_sfcount[sfmode]--;
2303 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2304 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2305 struct ip6_sf_list *psf;
2307 /* filter mode change */
2308 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2309 pmc->mca_sfmode = MCAST_EXCLUDE;
2310 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2311 pmc->mca_sfmode = MCAST_INCLUDE;
2312 /* else no filters; keep old mode for reports */
2314 pmc->mca_crcount = idev->mc_qrv;
2315 idev->mc_ifc_count = pmc->mca_crcount;
2316 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2317 psf->sf_crcount = 0;
2318 mld_ifc_event(idev);
2319 } else if (sf_setstate(pmc))
2320 mld_ifc_event(idev);
2321 spin_unlock_bh(&pmc->mca_lock);
2322 read_unlock_bh(&idev->lock);
2326 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2328 struct ip6_sf_list *psf, *nextpsf;
2330 for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
2331 nextpsf = psf->sf_next;
2334 pmc->mca_tomb = NULL;
2335 for (psf=pmc->mca_sources; psf; psf=nextpsf) {
2336 nextpsf = psf->sf_next;
2339 pmc->mca_sources = NULL;
2340 pmc->mca_sfmode = MCAST_EXCLUDE;
2341 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2342 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2346 static void igmp6_join_group(struct ifmcaddr6 *ma)
2348 unsigned long delay;
2350 if (ma->mca_flags & MAF_NOREPORT)
2353 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2355 delay = prandom_u32() % unsolicited_report_interval(ma->idev);
2357 spin_lock_bh(&ma->mca_lock);
2358 if (del_timer(&ma->mca_timer)) {
2359 atomic_dec(&ma->mca_refcnt);
2360 delay = ma->mca_timer.expires - jiffies;
2363 if (!mod_timer(&ma->mca_timer, jiffies + delay))
2364 atomic_inc(&ma->mca_refcnt);
2365 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2366 spin_unlock_bh(&ma->mca_lock);
2369 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2370 struct inet6_dev *idev)
2374 /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
2375 * so no other readers or writers of iml or its sflist
2378 /* any-source empty exclude case */
2379 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2381 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2382 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2383 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2388 static void igmp6_leave_group(struct ifmcaddr6 *ma)
2390 if (mld_in_v1_mode(ma->idev)) {
2391 if (ma->mca_flags & MAF_LAST_REPORTER)
2392 igmp6_send(&ma->mca_addr, ma->idev->dev,
2393 ICMPV6_MGM_REDUCTION);
2395 mld_add_delrec(ma->idev, ma);
2396 mld_ifc_event(ma->idev);
2400 static void mld_gq_timer_expire(unsigned long data)
2402 struct inet6_dev *idev = (struct inet6_dev *)data;
2404 idev->mc_gq_running = 0;
2405 mld_send_report(idev, NULL);
2409 static void mld_ifc_timer_expire(unsigned long data)
2411 struct inet6_dev *idev = (struct inet6_dev *)data;
2414 if (idev->mc_ifc_count) {
2415 idev->mc_ifc_count--;
2416 if (idev->mc_ifc_count)
2417 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2422 static void mld_ifc_event(struct inet6_dev *idev)
2424 if (mld_in_v1_mode(idev))
2426 idev->mc_ifc_count = idev->mc_qrv;
2427 mld_ifc_start_timer(idev, 1);
2431 static void igmp6_timer_handler(unsigned long data)
2433 struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
2435 if (mld_in_v1_mode(ma->idev))
2436 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2438 mld_send_report(ma->idev, ma);
2440 spin_lock(&ma->mca_lock);
2441 ma->mca_flags |= MAF_LAST_REPORTER;
2442 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2443 spin_unlock(&ma->mca_lock);
2447 /* Device changing type */
2449 void ipv6_mc_unmap(struct inet6_dev *idev)
2451 struct ifmcaddr6 *i;
2453 /* Install multicast list, except for all-nodes (already installed) */
2455 read_lock_bh(&idev->lock);
2456 for (i = idev->mc_list; i; i = i->next)
2457 igmp6_group_dropped(i);
2458 read_unlock_bh(&idev->lock);
2461 void ipv6_mc_remap(struct inet6_dev *idev)
2466 /* Device going down */
2468 void ipv6_mc_down(struct inet6_dev *idev)
2470 struct ifmcaddr6 *i;
2472 /* Withdraw multicast list */
2474 read_lock_bh(&idev->lock);
2475 mld_ifc_stop_timer(idev);
2476 mld_gq_stop_timer(idev);
2477 mld_dad_stop_timer(idev);
2479 for (i = idev->mc_list; i; i=i->next)
2480 igmp6_group_dropped(i);
2481 read_unlock_bh(&idev->lock);
2483 mld_clear_delrec(idev);
2487 /* Device going up */
2489 void ipv6_mc_up(struct inet6_dev *idev)
2491 struct ifmcaddr6 *i;
2493 /* Install multicast list, except for all-nodes (already installed) */
2495 read_lock_bh(&idev->lock);
2496 for (i = idev->mc_list; i; i=i->next)
2497 igmp6_group_added(i);
2498 read_unlock_bh(&idev->lock);
2501 /* IPv6 device initialization. */
2503 void ipv6_mc_init_dev(struct inet6_dev *idev)
2505 write_lock_bh(&idev->lock);
2506 spin_lock_init(&idev->mc_lock);
2507 idev->mc_gq_running = 0;
2508 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2509 (unsigned long)idev);
2510 idev->mc_tomb = NULL;
2511 idev->mc_ifc_count = 0;
2512 setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
2513 (unsigned long)idev);
2514 setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
2515 (unsigned long)idev);
2517 idev->mc_qrv = MLD_QRV_DEFAULT;
2518 idev->mc_qi = MLD_QI_DEFAULT;
2519 idev->mc_qri = MLD_QRI_DEFAULT;
2521 idev->mc_maxdelay = unsolicited_report_interval(idev);
2522 idev->mc_v1_seen = 0;
2523 write_unlock_bh(&idev->lock);
2527 * Device is about to be destroyed: clean up.
2530 void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2532 struct ifmcaddr6 *i;
2534 /* Deactivate timers */
2537 /* Delete all-nodes address. */
2538 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2539 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2542 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2544 if (idev->cnf.forwarding)
2545 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2547 write_lock_bh(&idev->lock);
2548 while ((i = idev->mc_list) != NULL) {
2549 idev->mc_list = i->next;
2550 write_unlock_bh(&idev->lock);
2552 igmp6_group_dropped(i);
2555 write_lock_bh(&idev->lock);
2557 write_unlock_bh(&idev->lock);
2560 #ifdef CONFIG_PROC_FS
2561 struct igmp6_mc_iter_state {
2562 struct seq_net_private p;
2563 struct net_device *dev;
2564 struct inet6_dev *idev;
2567 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2569 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2571 struct ifmcaddr6 *im = NULL;
2572 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2573 struct net *net = seq_file_net(seq);
2576 for_each_netdev_rcu(net, state->dev) {
2577 struct inet6_dev *idev;
2578 idev = __in6_dev_get(state->dev);
2581 read_lock_bh(&idev->lock);
2587 read_unlock_bh(&idev->lock);
2592 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2594 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2598 if (likely(state->idev != NULL))
2599 read_unlock_bh(&state->idev->lock);
2601 state->dev = next_net_device_rcu(state->dev);
2606 state->idev = __in6_dev_get(state->dev);
2609 read_lock_bh(&state->idev->lock);
2610 im = state->idev->mc_list;
2615 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2617 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2619 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2621 return pos ? NULL : im;
2624 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2628 return igmp6_mc_get_idx(seq, *pos);
2631 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2633 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2639 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2642 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2644 if (likely(state->idev != NULL)) {
2645 read_unlock_bh(&state->idev->lock);
2652 static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2654 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2655 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2658 "%-4d %-15s %pi6 %5d %08X %ld\n",
2659 state->dev->ifindex, state->dev->name,
2661 im->mca_users, im->mca_flags,
2662 (im->mca_flags&MAF_TIMER_RUNNING) ?
2663 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2667 static const struct seq_operations igmp6_mc_seq_ops = {
2668 .start = igmp6_mc_seq_start,
2669 .next = igmp6_mc_seq_next,
2670 .stop = igmp6_mc_seq_stop,
2671 .show = igmp6_mc_seq_show,
2674 static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
2676 return seq_open_net(inode, file, &igmp6_mc_seq_ops,
2677 sizeof(struct igmp6_mc_iter_state));
2680 static const struct file_operations igmp6_mc_seq_fops = {
2681 .owner = THIS_MODULE,
2682 .open = igmp6_mc_seq_open,
2684 .llseek = seq_lseek,
2685 .release = seq_release_net,
2688 struct igmp6_mcf_iter_state {
2689 struct seq_net_private p;
2690 struct net_device *dev;
2691 struct inet6_dev *idev;
2692 struct ifmcaddr6 *im;
2695 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2697 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2699 struct ip6_sf_list *psf = NULL;
2700 struct ifmcaddr6 *im = NULL;
2701 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2702 struct net *net = seq_file_net(seq);
2706 for_each_netdev_rcu(net, state->dev) {
2707 struct inet6_dev *idev;
2708 idev = __in6_dev_get(state->dev);
2709 if (unlikely(idev == NULL))
2711 read_lock_bh(&idev->lock);
2713 if (likely(im != NULL)) {
2714 spin_lock_bh(&im->mca_lock);
2715 psf = im->mca_sources;
2716 if (likely(psf != NULL)) {
2721 spin_unlock_bh(&im->mca_lock);
2723 read_unlock_bh(&idev->lock);
2728 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2730 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2734 spin_unlock_bh(&state->im->mca_lock);
2735 state->im = state->im->next;
2736 while (!state->im) {
2737 if (likely(state->idev != NULL))
2738 read_unlock_bh(&state->idev->lock);
2740 state->dev = next_net_device_rcu(state->dev);
2745 state->idev = __in6_dev_get(state->dev);
2748 read_lock_bh(&state->idev->lock);
2749 state->im = state->idev->mc_list;
2753 spin_lock_bh(&state->im->mca_lock);
2754 psf = state->im->mca_sources;
2760 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2762 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2764 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2766 return pos ? NULL : psf;
2769 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2773 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2776 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2778 struct ip6_sf_list *psf;
2779 if (v == SEQ_START_TOKEN)
2780 psf = igmp6_mcf_get_first(seq);
2782 psf = igmp6_mcf_get_next(seq, v);
2787 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2790 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2791 if (likely(state->im != NULL)) {
2792 spin_unlock_bh(&state->im->mca_lock);
2795 if (likely(state->idev != NULL)) {
2796 read_unlock_bh(&state->idev->lock);
2803 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2805 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2806 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2808 if (v == SEQ_START_TOKEN) {
2811 "%32s %32s %6s %6s\n", "Idx",
2812 "Device", "Multicast Address",
2813 "Source Address", "INC", "EXC");
2816 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2817 state->dev->ifindex, state->dev->name,
2818 &state->im->mca_addr,
2820 psf->sf_count[MCAST_INCLUDE],
2821 psf->sf_count[MCAST_EXCLUDE]);
2826 static const struct seq_operations igmp6_mcf_seq_ops = {
2827 .start = igmp6_mcf_seq_start,
2828 .next = igmp6_mcf_seq_next,
2829 .stop = igmp6_mcf_seq_stop,
2830 .show = igmp6_mcf_seq_show,
2833 static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2835 return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
2836 sizeof(struct igmp6_mcf_iter_state));
2839 static const struct file_operations igmp6_mcf_seq_fops = {
2840 .owner = THIS_MODULE,
2841 .open = igmp6_mcf_seq_open,
2843 .llseek = seq_lseek,
2844 .release = seq_release_net,
2847 static int __net_init igmp6_proc_init(struct net *net)
2852 if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
2854 if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
2855 &igmp6_mcf_seq_fops))
2856 goto out_proc_net_igmp6;
2863 remove_proc_entry("igmp6", net->proc_net);
2867 static void __net_exit igmp6_proc_exit(struct net *net)
2869 remove_proc_entry("mcfilter6", net->proc_net);
2870 remove_proc_entry("igmp6", net->proc_net);
2873 static inline int igmp6_proc_init(struct net *net)
2877 static inline void igmp6_proc_exit(struct net *net)
2882 static int __net_init igmp6_net_init(struct net *net)
2886 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2887 SOCK_RAW, IPPROTO_ICMPV6, net);
2889 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2894 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2896 err = igmp6_proc_init(net);
2898 goto out_sock_create;
2903 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2907 static void __net_exit igmp6_net_exit(struct net *net)
2909 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2910 igmp6_proc_exit(net);
2913 static struct pernet_operations igmp6_net_ops = {
2914 .init = igmp6_net_init,
2915 .exit = igmp6_net_exit,
2918 int __init igmp6_init(void)
2920 return register_pernet_subsys(&igmp6_net_ops);
2923 void igmp6_cleanup(void)
2925 unregister_pernet_subsys(&igmp6_net_ops);