2 * Multicast support for IPv6
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * yoshfuji : fix format of router-alert option
19 * YOSHIFUJI Hideaki @USAGI:
20 * Fixed source address for MLD message based on
21 * <draft-ietf-magma-mld-source-05.txt>.
22 * YOSHIFUJI Hideaki @USAGI:
23 * - Ignore Queries for invalid addresses.
24 * - MLD for link-local addresses.
25 * David L Stevens <dlstevens@us.ibm.com>:
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/jiffies.h>
36 #include <linux/times.h>
37 #include <linux/net.h>
39 #include <linux/in6.h>
40 #include <linux/netdevice.h>
41 #include <linux/if_arp.h>
42 #include <linux/route.h>
43 #include <linux/init.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/slab.h>
47 #include <linux/pkt_sched.h>
50 #include <linux/netfilter.h>
51 #include <linux/netfilter_ipv6.h>
53 #include <net/net_namespace.h>
58 #include <net/protocol.h>
59 #include <net/if_inet6.h>
60 #include <net/ndisc.h>
61 #include <net/addrconf.h>
62 #include <net/ip6_route.h>
63 #include <net/inet_common.h>
65 #include <net/ip6_checksum.h>
67 /* Set to 3 to get tracing... */
71 #define MDBG(x) printk x
76 /* Ensure that we have struct in6_addr aligned on 32bit word. */
77 static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
78 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
79 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
80 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
83 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
85 /* Big mc list lock for all the sockets */
86 static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
88 static void igmp6_join_group(struct ifmcaddr6 *ma);
89 static void igmp6_leave_group(struct ifmcaddr6 *ma);
90 static void igmp6_timer_handler(unsigned long data);
92 static void mld_gq_timer_expire(unsigned long data);
93 static void mld_ifc_timer_expire(unsigned long data);
94 static void mld_ifc_event(struct inet6_dev *idev);
95 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
96 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
97 static void mld_clear_delrec(struct inet6_dev *idev);
98 static bool mld_in_v1_mode(const struct inet6_dev *idev);
99 static int sf_setstate(struct ifmcaddr6 *pmc);
100 static void sf_markstate(struct ifmcaddr6 *pmc);
101 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
102 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
103 int sfmode, int sfcount, const struct in6_addr *psfsrc,
105 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
106 int sfmode, int sfcount, const struct in6_addr *psfsrc,
108 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
109 struct inet6_dev *idev);
111 #define MLD_QRV_DEFAULT 2
112 /* RFC3810, 9.2. Query Interval */
113 #define MLD_QI_DEFAULT (125 * HZ)
114 /* RFC3810, 9.3. Query Response Interval */
115 #define MLD_QRI_DEFAULT (10 * HZ)
117 /* RFC3810, 8.1 Query Version Distinctions */
118 #define MLD_V1_QUERY_LEN 24
119 #define MLD_V2_QUERY_LEN_MIN 28
121 #define IPV6_MLD_MAX_MSF 64
123 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
126 * socket join on multicast group
129 #define for_each_pmc_rcu(np, pmc) \
130 for (pmc = rcu_dereference(np->ipv6_mc_list); \
132 pmc = rcu_dereference(pmc->next))
134 static int unsolicited_report_interval(struct inet6_dev *idev)
138 if (mld_in_v1_mode(idev))
139 iv = idev->cnf.mldv1_unsolicited_report_interval;
141 iv = idev->cnf.mldv2_unsolicited_report_interval;
143 return iv > 0 ? iv : 1;
146 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
148 struct net_device *dev = NULL;
149 struct ipv6_mc_socklist *mc_lst;
150 struct ipv6_pinfo *np = inet6_sk(sk);
151 struct net *net = sock_net(sk);
154 if (!ipv6_addr_is_multicast(addr))
158 for_each_pmc_rcu(np, mc_lst) {
159 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
160 ipv6_addr_equal(&mc_lst->addr, addr)) {
167 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
173 mc_lst->addr = *addr;
178 rt = rt6_lookup(net, addr, NULL, 0, 0);
184 dev = dev_get_by_index_rcu(net, ifindex);
188 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
192 mc_lst->ifindex = dev->ifindex;
193 mc_lst->sfmode = MCAST_EXCLUDE;
194 rwlock_init(&mc_lst->sflock);
195 mc_lst->sflist = NULL;
198 * now add/increase the group membership on the device
201 err = ipv6_dev_mc_inc(dev, addr);
205 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
209 spin_lock(&ipv6_sk_mc_lock);
210 mc_lst->next = np->ipv6_mc_list;
211 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
212 spin_unlock(&ipv6_sk_mc_lock);
220 * socket leave on multicast group
222 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
224 struct ipv6_pinfo *np = inet6_sk(sk);
225 struct ipv6_mc_socklist *mc_lst;
226 struct ipv6_mc_socklist __rcu **lnk;
227 struct net *net = sock_net(sk);
229 if (!ipv6_addr_is_multicast(addr))
232 spin_lock(&ipv6_sk_mc_lock);
233 for (lnk = &np->ipv6_mc_list;
234 (mc_lst = rcu_dereference_protected(*lnk,
235 lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
236 lnk = &mc_lst->next) {
237 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
238 ipv6_addr_equal(&mc_lst->addr, addr)) {
239 struct net_device *dev;
242 spin_unlock(&ipv6_sk_mc_lock);
245 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
247 struct inet6_dev *idev = __in6_dev_get(dev);
249 (void) ip6_mc_leave_src(sk, mc_lst, idev);
251 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
253 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
255 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
256 kfree_rcu(mc_lst, rcu);
260 spin_unlock(&ipv6_sk_mc_lock);
262 return -EADDRNOTAVAIL;
265 /* called with rcu_read_lock() */
266 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
267 const struct in6_addr *group,
270 struct net_device *dev = NULL;
271 struct inet6_dev *idev = NULL;
274 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
281 dev = dev_get_by_index_rcu(net, ifindex);
285 idev = __in6_dev_get(dev);
288 read_lock_bh(&idev->lock);
290 read_unlock_bh(&idev->lock);
296 void ipv6_sock_mc_close(struct sock *sk)
298 struct ipv6_pinfo *np = inet6_sk(sk);
299 struct ipv6_mc_socklist *mc_lst;
300 struct net *net = sock_net(sk);
302 if (!rcu_access_pointer(np->ipv6_mc_list))
305 spin_lock(&ipv6_sk_mc_lock);
306 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
307 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
308 struct net_device *dev;
310 np->ipv6_mc_list = mc_lst->next;
311 spin_unlock(&ipv6_sk_mc_lock);
314 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
316 struct inet6_dev *idev = __in6_dev_get(dev);
318 (void) ip6_mc_leave_src(sk, mc_lst, idev);
320 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
322 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
325 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
326 kfree_rcu(mc_lst, rcu);
328 spin_lock(&ipv6_sk_mc_lock);
330 spin_unlock(&ipv6_sk_mc_lock);
333 int ip6_mc_source(int add, int omode, struct sock *sk,
334 struct group_source_req *pgsr)
336 struct in6_addr *source, *group;
337 struct ipv6_mc_socklist *pmc;
338 struct inet6_dev *idev;
339 struct ipv6_pinfo *inet6 = inet6_sk(sk);
340 struct ip6_sf_socklist *psl;
341 struct net *net = sock_net(sk);
347 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
348 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
350 if (!ipv6_addr_is_multicast(group))
354 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
360 err = -EADDRNOTAVAIL;
362 for_each_pmc_rcu(inet6, pmc) {
363 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
365 if (ipv6_addr_equal(&pmc->addr, group))
368 if (!pmc) { /* must have a prior join */
372 /* if a source filter was set, must be the same mode as before */
374 if (pmc->sfmode != omode) {
378 } else if (pmc->sfmode != omode) {
379 /* allow mode switches for empty-set filters */
380 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
381 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
385 write_lock(&pmc->sflock);
391 goto done; /* err = -EADDRNOTAVAIL */
393 for (i=0; i<psl->sl_count; i++) {
394 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
398 if (rv) /* source not found */
399 goto done; /* err = -EADDRNOTAVAIL */
401 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
402 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
407 /* update the interface filter */
408 ip6_mc_del_src(idev, group, omode, 1, source, 1);
410 for (j=i+1; j<psl->sl_count; j++)
411 psl->sl_addr[j-1] = psl->sl_addr[j];
416 /* else, add a new source to the filter */
418 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
422 if (!psl || psl->sl_count == psl->sl_max) {
423 struct ip6_sf_socklist *newpsl;
424 int count = IP6_SFBLOCK;
427 count += psl->sl_max;
428 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
433 newpsl->sl_max = count;
434 newpsl->sl_count = count - IP6_SFBLOCK;
436 for (i=0; i<psl->sl_count; i++)
437 newpsl->sl_addr[i] = psl->sl_addr[i];
438 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
440 pmc->sflist = psl = newpsl;
442 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
443 for (i=0; i<psl->sl_count; i++) {
444 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
445 if (rv == 0) /* There is an error in the address. */
448 for (j=psl->sl_count-1; j>=i; j--)
449 psl->sl_addr[j+1] = psl->sl_addr[j];
450 psl->sl_addr[i] = *source;
453 /* update the interface list */
454 ip6_mc_add_src(idev, group, omode, 1, source, 1);
457 write_unlock(&pmc->sflock);
458 read_unlock_bh(&idev->lock);
461 return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
465 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
467 const struct in6_addr *group;
468 struct ipv6_mc_socklist *pmc;
469 struct inet6_dev *idev;
470 struct ipv6_pinfo *inet6 = inet6_sk(sk);
471 struct ip6_sf_socklist *newpsl, *psl;
472 struct net *net = sock_net(sk);
476 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
478 if (!ipv6_addr_is_multicast(group))
480 if (gsf->gf_fmode != MCAST_INCLUDE &&
481 gsf->gf_fmode != MCAST_EXCLUDE)
485 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
494 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
499 for_each_pmc_rcu(inet6, pmc) {
500 if (pmc->ifindex != gsf->gf_interface)
502 if (ipv6_addr_equal(&pmc->addr, group))
505 if (!pmc) { /* must have a prior join */
509 if (gsf->gf_numsrc) {
510 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
516 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
517 for (i=0; i<newpsl->sl_count; ++i) {
518 struct sockaddr_in6 *psin6;
520 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
521 newpsl->sl_addr[i] = psin6->sin6_addr;
523 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
524 newpsl->sl_count, newpsl->sl_addr, 0);
526 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
531 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
534 write_lock(&pmc->sflock);
537 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
538 psl->sl_count, psl->sl_addr, 0);
539 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
541 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
542 pmc->sflist = newpsl;
543 pmc->sfmode = gsf->gf_fmode;
544 write_unlock(&pmc->sflock);
547 read_unlock_bh(&idev->lock);
550 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
554 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
555 struct group_filter __user *optval, int __user *optlen)
557 int err, i, count, copycount;
558 const struct in6_addr *group;
559 struct ipv6_mc_socklist *pmc;
560 struct inet6_dev *idev;
561 struct ipv6_pinfo *inet6 = inet6_sk(sk);
562 struct ip6_sf_socklist *psl;
563 struct net *net = sock_net(sk);
565 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
567 if (!ipv6_addr_is_multicast(group))
571 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
578 err = -EADDRNOTAVAIL;
580 * changes to the ipv6_mc_list require the socket lock and
581 * a read lock on ip6_sk_mc_lock. We have the socket lock,
582 * so reading the list is safe.
585 for_each_pmc_rcu(inet6, pmc) {
586 if (pmc->ifindex != gsf->gf_interface)
588 if (ipv6_addr_equal(group, &pmc->addr))
591 if (!pmc) /* must have a prior join */
593 gsf->gf_fmode = pmc->sfmode;
595 count = psl ? psl->sl_count : 0;
596 read_unlock_bh(&idev->lock);
599 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
600 gsf->gf_numsrc = count;
601 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
602 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
605 /* changes to psl require the socket lock, a read lock on
606 * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
607 * have the socket lock, so reading here is safe.
609 for (i=0; i<copycount; i++) {
610 struct sockaddr_in6 *psin6;
611 struct sockaddr_storage ss;
613 psin6 = (struct sockaddr_in6 *)&ss;
614 memset(&ss, 0, sizeof(ss));
615 psin6->sin6_family = AF_INET6;
616 psin6->sin6_addr = psl->sl_addr[i];
617 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
622 read_unlock_bh(&idev->lock);
627 bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
628 const struct in6_addr *src_addr)
630 struct ipv6_pinfo *np = inet6_sk(sk);
631 struct ipv6_mc_socklist *mc;
632 struct ip6_sf_socklist *psl;
636 for_each_pmc_rcu(np, mc) {
637 if (ipv6_addr_equal(&mc->addr, mc_addr))
644 read_lock(&mc->sflock);
647 rv = mc->sfmode == MCAST_EXCLUDE;
651 for (i=0; i<psl->sl_count; i++) {
652 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
655 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
657 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
660 read_unlock(&mc->sflock);
666 static void ma_put(struct ifmcaddr6 *mc)
668 if (atomic_dec_and_test(&mc->mca_refcnt)) {
669 in6_dev_put(mc->idev);
674 static void igmp6_group_added(struct ifmcaddr6 *mc)
676 struct net_device *dev = mc->idev->dev;
677 char buf[MAX_ADDR_LEN];
679 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
680 IPV6_ADDR_SCOPE_LINKLOCAL)
683 spin_lock_bh(&mc->mca_lock);
684 if (!(mc->mca_flags&MAF_LOADED)) {
685 mc->mca_flags |= MAF_LOADED;
686 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
687 dev_mc_add(dev, buf);
689 spin_unlock_bh(&mc->mca_lock);
691 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
694 if (mld_in_v1_mode(mc->idev)) {
695 igmp6_join_group(mc);
700 mc->mca_crcount = mc->idev->mc_qrv;
701 mld_ifc_event(mc->idev);
704 static void igmp6_group_dropped(struct ifmcaddr6 *mc)
706 struct net_device *dev = mc->idev->dev;
707 char buf[MAX_ADDR_LEN];
709 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
710 IPV6_ADDR_SCOPE_LINKLOCAL)
713 spin_lock_bh(&mc->mca_lock);
714 if (mc->mca_flags&MAF_LOADED) {
715 mc->mca_flags &= ~MAF_LOADED;
716 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
717 dev_mc_del(dev, buf);
720 if (mc->mca_flags & MAF_NOREPORT)
722 spin_unlock_bh(&mc->mca_lock);
725 igmp6_leave_group(mc);
727 spin_lock_bh(&mc->mca_lock);
728 if (del_timer(&mc->mca_timer))
729 atomic_dec(&mc->mca_refcnt);
731 ip6_mc_clear_src(mc);
732 spin_unlock_bh(&mc->mca_lock);
736 * deleted ifmcaddr6 manipulation
738 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
740 struct ifmcaddr6 *pmc;
742 /* this is an "ifmcaddr6" for convenience; only the fields below
743 * are actually used. In particular, the refcnt and users are not
744 * used for management of the delete list. Using the same structure
745 * for deleted items allows change reports to use common code with
746 * non-deleted or query-response MCA's.
748 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
752 spin_lock_bh(&im->mca_lock);
753 spin_lock_init(&pmc->mca_lock);
754 pmc->idev = im->idev;
756 pmc->mca_addr = im->mca_addr;
757 pmc->mca_crcount = idev->mc_qrv;
758 pmc->mca_sfmode = im->mca_sfmode;
759 if (pmc->mca_sfmode == MCAST_INCLUDE) {
760 struct ip6_sf_list *psf;
762 pmc->mca_tomb = im->mca_tomb;
763 pmc->mca_sources = im->mca_sources;
764 im->mca_tomb = im->mca_sources = NULL;
765 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
766 psf->sf_crcount = pmc->mca_crcount;
768 spin_unlock_bh(&im->mca_lock);
770 spin_lock_bh(&idev->mc_lock);
771 pmc->next = idev->mc_tomb;
773 spin_unlock_bh(&idev->mc_lock);
776 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
778 struct ifmcaddr6 *pmc, *pmc_prev;
779 struct ip6_sf_list *psf, *psf_next;
781 spin_lock_bh(&idev->mc_lock);
783 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
784 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
790 pmc_prev->next = pmc->next;
792 idev->mc_tomb = pmc->next;
794 spin_unlock_bh(&idev->mc_lock);
797 for (psf=pmc->mca_tomb; psf; psf=psf_next) {
798 psf_next = psf->sf_next;
801 in6_dev_put(pmc->idev);
806 static void mld_clear_delrec(struct inet6_dev *idev)
808 struct ifmcaddr6 *pmc, *nextpmc;
810 spin_lock_bh(&idev->mc_lock);
812 idev->mc_tomb = NULL;
813 spin_unlock_bh(&idev->mc_lock);
815 for (; pmc; pmc = nextpmc) {
817 ip6_mc_clear_src(pmc);
818 in6_dev_put(pmc->idev);
822 /* clear dead sources, too */
823 read_lock_bh(&idev->lock);
824 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
825 struct ip6_sf_list *psf, *psf_next;
827 spin_lock_bh(&pmc->mca_lock);
829 pmc->mca_tomb = NULL;
830 spin_unlock_bh(&pmc->mca_lock);
831 for (; psf; psf=psf_next) {
832 psf_next = psf->sf_next;
836 read_unlock_bh(&idev->lock);
841 * device multicast group inc (add if not found)
843 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
845 struct ifmcaddr6 *mc;
846 struct inet6_dev *idev;
848 /* we need to take a reference on idev */
849 idev = in6_dev_get(dev);
854 write_lock_bh(&idev->lock);
856 write_unlock_bh(&idev->lock);
861 for (mc = idev->mc_list; mc; mc = mc->next) {
862 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
864 write_unlock_bh(&idev->lock);
865 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
873 * not found: create a new one.
876 mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
879 write_unlock_bh(&idev->lock);
884 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
886 mc->mca_addr = *addr;
887 mc->idev = idev; /* (reference taken) */
889 /* mca_stamp should be updated upon changes */
890 mc->mca_cstamp = mc->mca_tstamp = jiffies;
891 atomic_set(&mc->mca_refcnt, 2);
892 spin_lock_init(&mc->mca_lock);
894 /* initial mode is (EX, empty) */
895 mc->mca_sfmode = MCAST_EXCLUDE;
896 mc->mca_sfcount[MCAST_EXCLUDE] = 1;
898 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
899 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
900 mc->mca_flags |= MAF_NOREPORT;
902 mc->next = idev->mc_list;
904 write_unlock_bh(&idev->lock);
906 mld_del_delrec(idev, &mc->mca_addr);
907 igmp6_group_added(mc);
913 * device multicast group del
915 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
917 struct ifmcaddr6 *ma, **map;
919 write_lock_bh(&idev->lock);
920 for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
921 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
922 if (--ma->mca_users == 0) {
924 write_unlock_bh(&idev->lock);
926 igmp6_group_dropped(ma);
931 write_unlock_bh(&idev->lock);
935 write_unlock_bh(&idev->lock);
940 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
942 struct inet6_dev *idev;
947 idev = __in6_dev_get(dev);
951 err = __ipv6_dev_mc_dec(idev, addr);
958 * check if the interface/address pair is valid
960 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
961 const struct in6_addr *src_addr)
963 struct inet6_dev *idev;
964 struct ifmcaddr6 *mc;
968 idev = __in6_dev_get(dev);
970 read_lock_bh(&idev->lock);
971 for (mc = idev->mc_list; mc; mc=mc->next) {
972 if (ipv6_addr_equal(&mc->mca_addr, group))
976 if (src_addr && !ipv6_addr_any(src_addr)) {
977 struct ip6_sf_list *psf;
979 spin_lock_bh(&mc->mca_lock);
980 for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
981 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
985 rv = psf->sf_count[MCAST_INCLUDE] ||
986 psf->sf_count[MCAST_EXCLUDE] !=
987 mc->mca_sfcount[MCAST_EXCLUDE];
989 rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
990 spin_unlock_bh(&mc->mca_lock);
992 rv = true; /* don't filter unspecified source */
994 read_unlock_bh(&idev->lock);
1000 static void mld_gq_start_timer(struct inet6_dev *idev)
1002 unsigned long tv = net_random() % idev->mc_maxdelay;
1004 idev->mc_gq_running = 1;
1005 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1009 static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
1011 unsigned long tv = net_random() % delay;
1013 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1017 static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
1019 unsigned long tv = net_random() % delay;
1021 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
1026 * IGMP handling (alias multicast ICMPv6 messages)
1029 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1031 unsigned long delay = resptime;
1033 /* Do not start timer for these addresses */
1034 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1035 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1038 if (del_timer(&ma->mca_timer)) {
1039 atomic_dec(&ma->mca_refcnt);
1040 delay = ma->mca_timer.expires - jiffies;
1043 if (delay >= resptime)
1044 delay = net_random() % resptime;
1046 ma->mca_timer.expires = jiffies + delay;
1047 if (!mod_timer(&ma->mca_timer, jiffies + delay))
1048 atomic_inc(&ma->mca_refcnt);
1049 ma->mca_flags |= MAF_TIMER_RUNNING;
1052 /* mark EXCLUDE-mode sources */
1053 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1054 const struct in6_addr *srcs)
1056 struct ip6_sf_list *psf;
1060 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1061 if (scount == nsrcs)
1063 for (i=0; i<nsrcs; i++) {
1064 /* skip inactive filters */
1065 if (psf->sf_count[MCAST_INCLUDE] ||
1066 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1067 psf->sf_count[MCAST_EXCLUDE])
1069 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1075 pmc->mca_flags &= ~MAF_GSQUERY;
1076 if (scount == nsrcs) /* all sources excluded */
1081 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1082 const struct in6_addr *srcs)
1084 struct ip6_sf_list *psf;
1087 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1088 return mld_xmarksources(pmc, nsrcs, srcs);
1090 /* mark INCLUDE-mode sources */
1093 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1094 if (scount == nsrcs)
1096 for (i=0; i<nsrcs; i++) {
1097 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1105 pmc->mca_flags &= ~MAF_GSQUERY;
1108 pmc->mca_flags |= MAF_GSQUERY;
1112 static int mld_force_mld_version(const struct inet6_dev *idev)
1114 /* Normally, both are 0 here. If enforcement to a particular is
1115 * being used, individual device enforcement will have a lower
1116 * precedence over 'all' device (.../conf/all/force_mld_version).
1119 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1120 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1122 return idev->cnf.force_mld_version;
1125 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1127 return mld_force_mld_version(idev) == 2;
1130 static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1132 return mld_force_mld_version(idev) == 1;
1135 static bool mld_in_v1_mode(const struct inet6_dev *idev)
1137 if (mld_in_v2_mode_only(idev))
1139 if (mld_in_v1_mode_only(idev))
1141 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1147 static void mld_set_v1_mode(struct inet6_dev *idev)
1149 /* RFC3810, relevant sections:
1150 * - 9.1. Robustness Variable
1151 * - 9.2. Query Interval
1152 * - 9.3. Query Response Interval
1153 * - 9.12. Older Version Querier Present Timeout
1155 unsigned long switchback;
1157 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1159 idev->mc_v1_seen = jiffies + switchback;
1162 static void mld_update_qrv(struct inet6_dev *idev,
1163 const struct mld2_query *mlh2)
1165 /* RFC3810, relevant sections:
1166 * - 5.1.8. QRV (Querier's Robustness Variable)
1167 * - 9.1. Robustness Variable
1170 /* The value of the Robustness Variable MUST NOT be zero,
1171 * and SHOULD NOT be one. Catch this here if we ever run
1172 * into such a case in future.
1174 WARN_ON(idev->mc_qrv == 0);
1176 if (mlh2->mld2q_qrv > 0)
1177 idev->mc_qrv = mlh2->mld2q_qrv;
1179 if (unlikely(idev->mc_qrv < 2)) {
1180 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1181 idev->mc_qrv, MLD_QRV_DEFAULT);
1182 idev->mc_qrv = MLD_QRV_DEFAULT;
1186 static void mld_update_qi(struct inet6_dev *idev,
1187 const struct mld2_query *mlh2)
1189 /* RFC3810, relevant sections:
1190 * - 5.1.9. QQIC (Querier's Query Interval Code)
1191 * - 9.2. Query Interval
1192 * - 9.12. Older Version Querier Present Timeout
1193 * (the [Query Interval] in the last Query received)
1195 unsigned long mc_qqi;
1197 if (mlh2->mld2q_qqic < 128) {
1198 mc_qqi = mlh2->mld2q_qqic;
1200 unsigned long mc_man, mc_exp;
1202 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1203 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1205 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1208 idev->mc_qi = mc_qqi * HZ;
1211 static void mld_update_qri(struct inet6_dev *idev,
1212 const struct mld2_query *mlh2)
1214 /* RFC3810, relevant sections:
1215 * - 5.1.3. Maximum Response Code
1216 * - 9.3. Query Response Interval
1218 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1221 /* called with rcu_read_lock() */
1222 int igmp6_event_query(struct sk_buff *skb)
1224 struct mld2_query *mlh2 = NULL;
1225 struct ifmcaddr6 *ma;
1226 const struct in6_addr *group;
1227 unsigned long max_delay;
1228 struct inet6_dev *idev;
1229 struct mld_msg *mld;
1234 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1237 /* compute payload length excluding extension headers */
1238 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1239 len -= skb_network_header_len(skb);
1241 /* Drop queries with not link local source */
1242 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
1245 idev = __in6_dev_get(skb->dev);
1249 mld = (struct mld_msg *)icmp6_hdr(skb);
1250 group = &mld->mld_mca;
1251 group_type = ipv6_addr_type(group);
1253 if (group_type != IPV6_ADDR_ANY &&
1254 !(group_type&IPV6_ADDR_MULTICAST))
1257 if (len == MLD_V1_QUERY_LEN) {
1258 unsigned long mldv1_md;
1260 /* Ignore v1 queries */
1261 if (mld_in_v2_mode_only(idev))
1264 /* MLDv1 router present */
1265 mldv1_md = ntohs(mld->mld_maxdelay);
1266 max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1268 mld_set_v1_mode(idev);
1270 /* cancel MLDv2 report timer */
1271 idev->mc_gq_running = 0;
1272 if (del_timer(&idev->mc_gq_timer))
1273 __in6_dev_put(idev);
1275 /* cancel the interface change timer */
1276 idev->mc_ifc_count = 0;
1277 if (del_timer(&idev->mc_ifc_timer))
1278 __in6_dev_put(idev);
1279 /* clear deleted report items */
1280 mld_clear_delrec(idev);
1281 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1282 int srcs_offset = sizeof(struct mld2_query) -
1283 sizeof(struct icmp6hdr);
1285 /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */
1286 if (mld_in_v1_mode(idev))
1288 if (!pskb_may_pull(skb, srcs_offset))
1291 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1293 max_delay = max(msecs_to_jiffies(mldv2_mrc(mlh2)), 1UL);
1294 idev->mc_maxdelay = max_delay;
1296 mld_update_qrv(idev, mlh2);
1297 mld_update_qi(idev, mlh2);
1298 mld_update_qri(idev, mlh2);
1300 if (group_type == IPV6_ADDR_ANY) { /* general query */
1301 if (mlh2->mld2q_nsrcs)
1302 return -EINVAL; /* no sources allowed */
1304 mld_gq_start_timer(idev);
1307 /* mark sources to include, if group & source-specific */
1308 if (mlh2->mld2q_nsrcs != 0) {
1309 if (!pskb_may_pull(skb, srcs_offset +
1310 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1313 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1319 read_lock_bh(&idev->lock);
1320 if (group_type == IPV6_ADDR_ANY) {
1321 for (ma = idev->mc_list; ma; ma=ma->next) {
1322 spin_lock_bh(&ma->mca_lock);
1323 igmp6_group_queried(ma, max_delay);
1324 spin_unlock_bh(&ma->mca_lock);
1327 for (ma = idev->mc_list; ma; ma=ma->next) {
1328 if (!ipv6_addr_equal(group, &ma->mca_addr))
1330 spin_lock_bh(&ma->mca_lock);
1331 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1332 /* gsquery <- gsquery && mark */
1334 ma->mca_flags &= ~MAF_GSQUERY;
1336 /* gsquery <- mark */
1338 ma->mca_flags |= MAF_GSQUERY;
1340 ma->mca_flags &= ~MAF_GSQUERY;
1342 if (!(ma->mca_flags & MAF_GSQUERY) ||
1343 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1344 igmp6_group_queried(ma, max_delay);
1345 spin_unlock_bh(&ma->mca_lock);
1349 read_unlock_bh(&idev->lock);
1354 /* called with rcu_read_lock() */
1355 int igmp6_event_report(struct sk_buff *skb)
1357 struct ifmcaddr6 *ma;
1358 struct inet6_dev *idev;
1359 struct mld_msg *mld;
1362 /* Our own report looped back. Ignore it. */
1363 if (skb->pkt_type == PACKET_LOOPBACK)
1366 /* send our report if the MC router may not have heard this report */
1367 if (skb->pkt_type != PACKET_MULTICAST &&
1368 skb->pkt_type != PACKET_BROADCAST)
1371 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1374 mld = (struct mld_msg *)icmp6_hdr(skb);
1376 /* Drop reports with not link local source */
1377 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1378 if (addr_type != IPV6_ADDR_ANY &&
1379 !(addr_type&IPV6_ADDR_LINKLOCAL))
1382 idev = __in6_dev_get(skb->dev);
1387 * Cancel the timer for this group
1390 read_lock_bh(&idev->lock);
1391 for (ma = idev->mc_list; ma; ma=ma->next) {
1392 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1393 spin_lock(&ma->mca_lock);
1394 if (del_timer(&ma->mca_timer))
1395 atomic_dec(&ma->mca_refcnt);
1396 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1397 spin_unlock(&ma->mca_lock);
1401 read_unlock_bh(&idev->lock);
1405 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1406 int gdeleted, int sdeleted)
1409 case MLD2_MODE_IS_INCLUDE:
1410 case MLD2_MODE_IS_EXCLUDE:
1411 if (gdeleted || sdeleted)
1413 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1414 if (pmc->mca_sfmode == MCAST_INCLUDE)
1416 /* don't include if this source is excluded
1419 if (psf->sf_count[MCAST_INCLUDE])
1420 return type == MLD2_MODE_IS_INCLUDE;
1421 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1422 psf->sf_count[MCAST_EXCLUDE];
1425 case MLD2_CHANGE_TO_INCLUDE:
1426 if (gdeleted || sdeleted)
1428 return psf->sf_count[MCAST_INCLUDE] != 0;
1429 case MLD2_CHANGE_TO_EXCLUDE:
1430 if (gdeleted || sdeleted)
1432 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1433 psf->sf_count[MCAST_INCLUDE])
1435 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1436 psf->sf_count[MCAST_EXCLUDE];
1437 case MLD2_ALLOW_NEW_SOURCES:
1438 if (gdeleted || !psf->sf_crcount)
1440 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1441 case MLD2_BLOCK_OLD_SOURCES:
1442 if (pmc->mca_sfmode == MCAST_INCLUDE)
1443 return gdeleted || (psf->sf_crcount && sdeleted);
1444 return psf->sf_crcount && !gdeleted && !sdeleted;
1450 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1452 struct ip6_sf_list *psf;
1455 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1456 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1463 static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1464 struct net_device *dev,
1465 const struct in6_addr *saddr,
1466 const struct in6_addr *daddr,
1469 struct ipv6hdr *hdr;
1471 skb->protocol = htons(ETH_P_IPV6);
1474 skb_reset_network_header(skb);
1475 skb_put(skb, sizeof(struct ipv6hdr));
1476 hdr = ipv6_hdr(skb);
1478 ip6_flow_hdr(hdr, 0, 0);
1480 hdr->payload_len = htons(len);
1481 hdr->nexthdr = proto;
1482 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1484 hdr->saddr = *saddr;
1485 hdr->daddr = *daddr;
1488 static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
1490 struct net_device *dev = idev->dev;
1491 struct net *net = dev_net(dev);
1492 struct sock *sk = net->ipv6.igmp_sk;
1493 struct sk_buff *skb;
1494 struct mld2_report *pmr;
1495 struct in6_addr addr_buf;
1496 const struct in6_addr *saddr;
1497 int hlen = LL_RESERVED_SPACE(dev);
1498 int tlen = dev->needed_tailroom;
1500 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1501 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1504 /* we assume size > sizeof(ra) here */
1505 size += hlen + tlen;
1506 /* limit our allocations to order-0 page */
1507 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1508 skb = sock_alloc_send_skb(sk, size, 1, &err);
1513 skb->priority = TC_PRIO_CONTROL;
1514 skb_reserve(skb, hlen);
1516 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
1517 /* <draft-ietf-magma-mld-source-05.txt>:
1518 * use unspecified address as the source address
1519 * when a valid link-local address is not available.
1521 saddr = &in6addr_any;
1525 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1527 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1529 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1530 skb_put(skb, sizeof(*pmr));
1531 pmr = (struct mld2_report *)skb_transport_header(skb);
1532 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1533 pmr->mld2r_resv1 = 0;
1534 pmr->mld2r_cksum = 0;
1535 pmr->mld2r_resv2 = 0;
1536 pmr->mld2r_ngrec = 0;
1540 static void mld_sendpack(struct sk_buff *skb)
1542 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1543 struct mld2_report *pmr =
1544 (struct mld2_report *)skb_transport_header(skb);
1545 int payload_len, mldlen;
1546 struct inet6_dev *idev;
1547 struct net *net = dev_net(skb->dev);
1550 struct dst_entry *dst;
1553 idev = __in6_dev_get(skb->dev);
1554 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1556 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1558 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1559 pip6->payload_len = htons(payload_len);
1561 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1563 csum_partial(skb_transport_header(skb),
1566 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1567 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1569 dst = icmp6_dst_alloc(skb->dev, &fl6);
1576 skb_dst_set(skb, dst);
1580 payload_len = skb->len;
1582 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1586 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
1587 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1588 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1590 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1600 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1602 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1605 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1606 int type, struct mld2_grec **ppgr)
1608 struct net_device *dev = pmc->idev->dev;
1609 struct mld2_report *pmr;
1610 struct mld2_grec *pgr;
1613 skb = mld_newpack(pmc->idev, dev->mtu);
1616 pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
1617 pgr->grec_type = type;
1618 pgr->grec_auxwords = 0;
1619 pgr->grec_nsrcs = 0;
1620 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1621 pmr = (struct mld2_report *)skb_transport_header(skb);
1622 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1627 #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
1628 skb_tailroom(skb)) : 0)
1630 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1631 int type, int gdeleted, int sdeleted)
1633 struct inet6_dev *idev = pmc->idev;
1634 struct net_device *dev = idev->dev;
1635 struct mld2_report *pmr;
1636 struct mld2_grec *pgr = NULL;
1637 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1638 int scount, stotal, first, isquery, truncate;
1640 if (pmc->mca_flags & MAF_NOREPORT)
1643 isquery = type == MLD2_MODE_IS_INCLUDE ||
1644 type == MLD2_MODE_IS_EXCLUDE;
1645 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1646 type == MLD2_CHANGE_TO_EXCLUDE;
1648 stotal = scount = 0;
1650 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1655 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1657 /* EX and TO_EX get a fresh packet, if needed */
1659 if (pmr && pmr->mld2r_ngrec &&
1660 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1663 skb = mld_newpack(idev, dev->mtu);
1668 for (psf=*psf_list; psf; psf=psf_next) {
1669 struct in6_addr *psrc;
1671 psf_next = psf->sf_next;
1673 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
1678 /* clear marks on query responses */
1682 if (AVAILABLE(skb) < sizeof(*psrc) +
1683 first*sizeof(struct mld2_grec)) {
1684 if (truncate && !first)
1685 break; /* truncate these */
1687 pgr->grec_nsrcs = htons(scount);
1690 skb = mld_newpack(idev, dev->mtu);
1695 skb = add_grhead(skb, pmc, type, &pgr);
1700 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1701 *psrc = psf->sf_addr;
1703 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1704 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1706 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1708 psf_prev->sf_next = psf->sf_next;
1710 *psf_list = psf->sf_next;
1720 if (type == MLD2_ALLOW_NEW_SOURCES ||
1721 type == MLD2_BLOCK_OLD_SOURCES)
1723 if (pmc->mca_crcount || isquery) {
1724 /* make sure we have room for group header */
1725 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1727 skb = NULL; /* add_grhead will get a new one */
1729 skb = add_grhead(skb, pmc, type, &pgr);
1733 pgr->grec_nsrcs = htons(scount);
1736 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1740 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1742 struct sk_buff *skb = NULL;
1745 read_lock_bh(&idev->lock);
1747 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1748 if (pmc->mca_flags & MAF_NOREPORT)
1750 spin_lock_bh(&pmc->mca_lock);
1751 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1752 type = MLD2_MODE_IS_EXCLUDE;
1754 type = MLD2_MODE_IS_INCLUDE;
1755 skb = add_grec(skb, pmc, type, 0, 0);
1756 spin_unlock_bh(&pmc->mca_lock);
1759 spin_lock_bh(&pmc->mca_lock);
1760 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1761 type = MLD2_MODE_IS_EXCLUDE;
1763 type = MLD2_MODE_IS_INCLUDE;
1764 skb = add_grec(skb, pmc, type, 0, 0);
1765 spin_unlock_bh(&pmc->mca_lock);
1767 read_unlock_bh(&idev->lock);
1773 * remove zero-count source records from a source filter list
1775 static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1777 struct ip6_sf_list *psf_prev, *psf_next, *psf;
1780 for (psf=*ppsf; psf; psf = psf_next) {
1781 psf_next = psf->sf_next;
1782 if (psf->sf_crcount == 0) {
1784 psf_prev->sf_next = psf->sf_next;
1786 *ppsf = psf->sf_next;
1793 static void mld_send_cr(struct inet6_dev *idev)
1795 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1796 struct sk_buff *skb = NULL;
1799 read_lock_bh(&idev->lock);
1800 spin_lock(&idev->mc_lock);
1804 for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
1805 pmc_next = pmc->next;
1806 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1807 type = MLD2_BLOCK_OLD_SOURCES;
1808 dtype = MLD2_BLOCK_OLD_SOURCES;
1809 skb = add_grec(skb, pmc, type, 1, 0);
1810 skb = add_grec(skb, pmc, dtype, 1, 1);
1812 if (pmc->mca_crcount) {
1813 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1814 type = MLD2_CHANGE_TO_INCLUDE;
1815 skb = add_grec(skb, pmc, type, 1, 0);
1818 if (pmc->mca_crcount == 0) {
1819 mld_clear_zeros(&pmc->mca_tomb);
1820 mld_clear_zeros(&pmc->mca_sources);
1823 if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1824 !pmc->mca_sources) {
1826 pmc_prev->next = pmc_next;
1828 idev->mc_tomb = pmc_next;
1829 in6_dev_put(pmc->idev);
1834 spin_unlock(&idev->mc_lock);
1837 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1838 spin_lock_bh(&pmc->mca_lock);
1839 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1840 type = MLD2_BLOCK_OLD_SOURCES;
1841 dtype = MLD2_ALLOW_NEW_SOURCES;
1843 type = MLD2_ALLOW_NEW_SOURCES;
1844 dtype = MLD2_BLOCK_OLD_SOURCES;
1846 skb = add_grec(skb, pmc, type, 0, 0);
1847 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
1849 /* filter mode changes */
1850 if (pmc->mca_crcount) {
1851 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1852 type = MLD2_CHANGE_TO_EXCLUDE;
1854 type = MLD2_CHANGE_TO_INCLUDE;
1855 skb = add_grec(skb, pmc, type, 0, 0);
1858 spin_unlock_bh(&pmc->mca_lock);
1860 read_unlock_bh(&idev->lock);
1863 (void) mld_sendpack(skb);
1866 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1868 struct net *net = dev_net(dev);
1869 struct sock *sk = net->ipv6.igmp_sk;
1870 struct inet6_dev *idev;
1871 struct sk_buff *skb;
1872 struct mld_msg *hdr;
1873 const struct in6_addr *snd_addr, *saddr;
1874 struct in6_addr addr_buf;
1875 int hlen = LL_RESERVED_SPACE(dev);
1876 int tlen = dev->needed_tailroom;
1877 int err, len, payload_len, full_len;
1878 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1879 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1882 struct dst_entry *dst;
1884 if (type == ICMPV6_MGM_REDUCTION)
1885 snd_addr = &in6addr_linklocal_allrouters;
1889 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
1890 payload_len = len + sizeof(ra);
1891 full_len = sizeof(struct ipv6hdr) + payload_len;
1894 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1895 IPSTATS_MIB_OUT, full_len);
1898 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
1902 IP6_INC_STATS(net, __in6_dev_get(dev),
1903 IPSTATS_MIB_OUTDISCARDS);
1907 skb->priority = TC_PRIO_CONTROL;
1908 skb_reserve(skb, hlen);
1910 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1911 /* <draft-ietf-magma-mld-source-05.txt>:
1912 * use unspecified address as the source address
1913 * when a valid link-local address is not available.
1915 saddr = &in6addr_any;
1919 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1921 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1923 hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
1924 memset(hdr, 0, sizeof(struct mld_msg));
1925 hdr->mld_type = type;
1926 hdr->mld_mca = *addr;
1928 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1930 csum_partial(hdr, len, 0));
1933 idev = __in6_dev_get(skb->dev);
1935 icmpv6_flow_init(sk, &fl6, type,
1936 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1938 dst = icmp6_dst_alloc(skb->dev, &fl6);
1944 skb_dst_set(skb, dst);
1945 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1949 ICMP6MSGOUT_INC_STATS(net, idev, type);
1950 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1951 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1953 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1963 static void mld_resend_report(struct inet6_dev *idev)
1965 if (mld_in_v1_mode(idev)) {
1966 struct ifmcaddr6 *mcaddr;
1967 read_lock_bh(&idev->lock);
1968 for (mcaddr = idev->mc_list; mcaddr; mcaddr = mcaddr->next) {
1969 if (!(mcaddr->mca_flags & MAF_NOREPORT))
1970 igmp6_send(&mcaddr->mca_addr, idev->dev,
1973 read_unlock_bh(&idev->lock);
1975 mld_send_report(idev, NULL);
1979 void ipv6_mc_dad_complete(struct inet6_dev *idev)
1981 idev->mc_dad_count = idev->mc_qrv;
1982 if (idev->mc_dad_count) {
1983 mld_resend_report(idev);
1984 idev->mc_dad_count--;
1985 if (idev->mc_dad_count)
1986 mld_dad_start_timer(idev, idev->mc_maxdelay);
1990 static void mld_dad_timer_expire(unsigned long data)
1992 struct inet6_dev *idev = (struct inet6_dev *)data;
1994 mld_resend_report(idev);
1995 if (idev->mc_dad_count) {
1996 idev->mc_dad_count--;
1997 if (idev->mc_dad_count)
1998 mld_dad_start_timer(idev, idev->mc_maxdelay);
2000 __in6_dev_put(idev);
2003 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2004 const struct in6_addr *psfsrc)
2006 struct ip6_sf_list *psf, *psf_prev;
2010 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2011 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2015 if (!psf || psf->sf_count[sfmode] == 0) {
2016 /* source filter not found, or count wrong => bug */
2019 psf->sf_count[sfmode]--;
2020 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2021 struct inet6_dev *idev = pmc->idev;
2023 /* no more filters for this source */
2025 psf_prev->sf_next = psf->sf_next;
2027 pmc->mca_sources = psf->sf_next;
2028 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2029 !mld_in_v1_mode(idev)) {
2030 psf->sf_crcount = idev->mc_qrv;
2031 psf->sf_next = pmc->mca_tomb;
2032 pmc->mca_tomb = psf;
2040 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2041 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2044 struct ifmcaddr6 *pmc;
2050 read_lock_bh(&idev->lock);
2051 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2052 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2056 /* MCA not found?? bug */
2057 read_unlock_bh(&idev->lock);
2060 spin_lock_bh(&pmc->mca_lock);
2063 if (!pmc->mca_sfcount[sfmode]) {
2064 spin_unlock_bh(&pmc->mca_lock);
2065 read_unlock_bh(&idev->lock);
2068 pmc->mca_sfcount[sfmode]--;
2071 for (i=0; i<sfcount; i++) {
2072 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2074 changerec |= rv > 0;
2078 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2079 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2080 pmc->mca_sfcount[MCAST_INCLUDE]) {
2081 struct ip6_sf_list *psf;
2083 /* filter mode change */
2084 pmc->mca_sfmode = MCAST_INCLUDE;
2085 pmc->mca_crcount = idev->mc_qrv;
2086 idev->mc_ifc_count = pmc->mca_crcount;
2087 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2088 psf->sf_crcount = 0;
2089 mld_ifc_event(pmc->idev);
2090 } else if (sf_setstate(pmc) || changerec)
2091 mld_ifc_event(pmc->idev);
2092 spin_unlock_bh(&pmc->mca_lock);
2093 read_unlock_bh(&idev->lock);
2098 * Add multicast single-source filter to the interface list
2100 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2101 const struct in6_addr *psfsrc)
2103 struct ip6_sf_list *psf, *psf_prev;
2106 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2107 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2112 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
2116 psf->sf_addr = *psfsrc;
2118 psf_prev->sf_next = psf;
2120 pmc->mca_sources = psf;
2122 psf->sf_count[sfmode]++;
2126 static void sf_markstate(struct ifmcaddr6 *pmc)
2128 struct ip6_sf_list *psf;
2129 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2131 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
2132 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2133 psf->sf_oldin = mca_xcount ==
2134 psf->sf_count[MCAST_EXCLUDE] &&
2135 !psf->sf_count[MCAST_INCLUDE];
2137 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2140 static int sf_setstate(struct ifmcaddr6 *pmc)
2142 struct ip6_sf_list *psf, *dpsf;
2143 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2144 int qrv = pmc->idev->mc_qrv;
2148 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2149 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2150 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2151 !psf->sf_count[MCAST_INCLUDE];
2153 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2155 if (!psf->sf_oldin) {
2156 struct ip6_sf_list *prev = NULL;
2158 for (dpsf=pmc->mca_tomb; dpsf;
2159 dpsf=dpsf->sf_next) {
2160 if (ipv6_addr_equal(&dpsf->sf_addr,
2167 prev->sf_next = dpsf->sf_next;
2169 pmc->mca_tomb = dpsf->sf_next;
2172 psf->sf_crcount = qrv;
2175 } else if (psf->sf_oldin) {
2176 psf->sf_crcount = 0;
2178 * add or update "delete" records if an active filter
2181 for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
2182 if (ipv6_addr_equal(&dpsf->sf_addr,
2186 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2190 /* pmc->mca_lock held by callers */
2191 dpsf->sf_next = pmc->mca_tomb;
2192 pmc->mca_tomb = dpsf;
2194 dpsf->sf_crcount = qrv;
2202 * Add multicast source filter list to the interface list
2204 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2205 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2208 struct ifmcaddr6 *pmc;
2214 read_lock_bh(&idev->lock);
2215 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2216 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2220 /* MCA not found?? bug */
2221 read_unlock_bh(&idev->lock);
2224 spin_lock_bh(&pmc->mca_lock);
2227 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2229 pmc->mca_sfcount[sfmode]++;
2231 for (i=0; i<sfcount; i++) {
2232 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2240 pmc->mca_sfcount[sfmode]--;
2242 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2243 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2244 struct ip6_sf_list *psf;
2246 /* filter mode change */
2247 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2248 pmc->mca_sfmode = MCAST_EXCLUDE;
2249 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2250 pmc->mca_sfmode = MCAST_INCLUDE;
2251 /* else no filters; keep old mode for reports */
2253 pmc->mca_crcount = idev->mc_qrv;
2254 idev->mc_ifc_count = pmc->mca_crcount;
2255 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2256 psf->sf_crcount = 0;
2257 mld_ifc_event(idev);
2258 } else if (sf_setstate(pmc))
2259 mld_ifc_event(idev);
2260 spin_unlock_bh(&pmc->mca_lock);
2261 read_unlock_bh(&idev->lock);
2265 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2267 struct ip6_sf_list *psf, *nextpsf;
2269 for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
2270 nextpsf = psf->sf_next;
2273 pmc->mca_tomb = NULL;
2274 for (psf=pmc->mca_sources; psf; psf=nextpsf) {
2275 nextpsf = psf->sf_next;
2278 pmc->mca_sources = NULL;
2279 pmc->mca_sfmode = MCAST_EXCLUDE;
2280 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2281 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2285 static void igmp6_join_group(struct ifmcaddr6 *ma)
2287 unsigned long delay;
2289 if (ma->mca_flags & MAF_NOREPORT)
2292 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2294 delay = net_random() % unsolicited_report_interval(ma->idev);
2296 spin_lock_bh(&ma->mca_lock);
2297 if (del_timer(&ma->mca_timer)) {
2298 atomic_dec(&ma->mca_refcnt);
2299 delay = ma->mca_timer.expires - jiffies;
2302 if (!mod_timer(&ma->mca_timer, jiffies + delay))
2303 atomic_inc(&ma->mca_refcnt);
2304 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2305 spin_unlock_bh(&ma->mca_lock);
2308 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2309 struct inet6_dev *idev)
2313 /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
2314 * so no other readers or writers of iml or its sflist
2317 /* any-source empty exclude case */
2318 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2320 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2321 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2322 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2327 static void igmp6_leave_group(struct ifmcaddr6 *ma)
2329 if (mld_in_v1_mode(ma->idev)) {
2330 if (ma->mca_flags & MAF_LAST_REPORTER)
2331 igmp6_send(&ma->mca_addr, ma->idev->dev,
2332 ICMPV6_MGM_REDUCTION);
2334 mld_add_delrec(ma->idev, ma);
2335 mld_ifc_event(ma->idev);
2339 static void mld_gq_timer_expire(unsigned long data)
2341 struct inet6_dev *idev = (struct inet6_dev *)data;
2343 idev->mc_gq_running = 0;
2344 mld_send_report(idev, NULL);
2345 __in6_dev_put(idev);
2348 static void mld_ifc_timer_expire(unsigned long data)
2350 struct inet6_dev *idev = (struct inet6_dev *)data;
2353 if (idev->mc_ifc_count) {
2354 idev->mc_ifc_count--;
2355 if (idev->mc_ifc_count)
2356 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2358 __in6_dev_put(idev);
2361 static void mld_ifc_event(struct inet6_dev *idev)
2363 if (mld_in_v1_mode(idev))
2365 idev->mc_ifc_count = idev->mc_qrv;
2366 mld_ifc_start_timer(idev, 1);
2370 static void igmp6_timer_handler(unsigned long data)
2372 struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
2374 if (mld_in_v1_mode(ma->idev))
2375 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2377 mld_send_report(ma->idev, ma);
2379 spin_lock(&ma->mca_lock);
2380 ma->mca_flags |= MAF_LAST_REPORTER;
2381 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2382 spin_unlock(&ma->mca_lock);
2386 /* Device changing type */
2388 void ipv6_mc_unmap(struct inet6_dev *idev)
2390 struct ifmcaddr6 *i;
2392 /* Install multicast list, except for all-nodes (already installed) */
2394 read_lock_bh(&idev->lock);
2395 for (i = idev->mc_list; i; i = i->next)
2396 igmp6_group_dropped(i);
2397 read_unlock_bh(&idev->lock);
2400 void ipv6_mc_remap(struct inet6_dev *idev)
2405 /* Device going down */
2407 void ipv6_mc_down(struct inet6_dev *idev)
2409 struct ifmcaddr6 *i;
2411 /* Withdraw multicast list */
2413 read_lock_bh(&idev->lock);
2414 idev->mc_ifc_count = 0;
2415 if (del_timer(&idev->mc_ifc_timer))
2416 __in6_dev_put(idev);
2417 idev->mc_gq_running = 0;
2418 if (del_timer(&idev->mc_gq_timer))
2419 __in6_dev_put(idev);
2420 if (del_timer(&idev->mc_dad_timer))
2421 __in6_dev_put(idev);
2423 for (i = idev->mc_list; i; i=i->next)
2424 igmp6_group_dropped(i);
2425 read_unlock_bh(&idev->lock);
2427 mld_clear_delrec(idev);
2431 /* Device going up */
2433 void ipv6_mc_up(struct inet6_dev *idev)
2435 struct ifmcaddr6 *i;
2437 /* Install multicast list, except for all-nodes (already installed) */
2439 read_lock_bh(&idev->lock);
2440 for (i = idev->mc_list; i; i=i->next)
2441 igmp6_group_added(i);
2442 read_unlock_bh(&idev->lock);
2445 /* IPv6 device initialization. */
2447 void ipv6_mc_init_dev(struct inet6_dev *idev)
2449 write_lock_bh(&idev->lock);
2450 spin_lock_init(&idev->mc_lock);
2451 idev->mc_gq_running = 0;
2452 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2453 (unsigned long)idev);
2454 idev->mc_tomb = NULL;
2455 idev->mc_ifc_count = 0;
2456 setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
2457 (unsigned long)idev);
2458 setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
2459 (unsigned long)idev);
2461 idev->mc_qrv = MLD_QRV_DEFAULT;
2462 idev->mc_qi = MLD_QI_DEFAULT;
2463 idev->mc_qri = MLD_QRI_DEFAULT;
2465 idev->mc_maxdelay = unsolicited_report_interval(idev);
2466 idev->mc_v1_seen = 0;
2467 write_unlock_bh(&idev->lock);
2471 * Device is about to be destroyed: clean up.
2474 void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2476 struct ifmcaddr6 *i;
2478 /* Deactivate timers */
2481 /* Delete all-nodes address. */
2482 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2483 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2486 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2488 if (idev->cnf.forwarding)
2489 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2491 write_lock_bh(&idev->lock);
2492 while ((i = idev->mc_list) != NULL) {
2493 idev->mc_list = i->next;
2494 write_unlock_bh(&idev->lock);
2496 igmp6_group_dropped(i);
2499 write_lock_bh(&idev->lock);
2501 write_unlock_bh(&idev->lock);
2504 #ifdef CONFIG_PROC_FS
2505 struct igmp6_mc_iter_state {
2506 struct seq_net_private p;
2507 struct net_device *dev;
2508 struct inet6_dev *idev;
2511 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2513 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2515 struct ifmcaddr6 *im = NULL;
2516 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2517 struct net *net = seq_file_net(seq);
2520 for_each_netdev_rcu(net, state->dev) {
2521 struct inet6_dev *idev;
2522 idev = __in6_dev_get(state->dev);
2525 read_lock_bh(&idev->lock);
2531 read_unlock_bh(&idev->lock);
2536 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2538 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2542 if (likely(state->idev != NULL))
2543 read_unlock_bh(&state->idev->lock);
2545 state->dev = next_net_device_rcu(state->dev);
2550 state->idev = __in6_dev_get(state->dev);
2553 read_lock_bh(&state->idev->lock);
2554 im = state->idev->mc_list;
2559 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2561 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2563 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2565 return pos ? NULL : im;
2568 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2572 return igmp6_mc_get_idx(seq, *pos);
2575 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2577 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2583 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2586 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2588 if (likely(state->idev != NULL)) {
2589 read_unlock_bh(&state->idev->lock);
2596 static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2598 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2599 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2602 "%-4d %-15s %pi6 %5d %08X %ld\n",
2603 state->dev->ifindex, state->dev->name,
2605 im->mca_users, im->mca_flags,
2606 (im->mca_flags&MAF_TIMER_RUNNING) ?
2607 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2611 static const struct seq_operations igmp6_mc_seq_ops = {
2612 .start = igmp6_mc_seq_start,
2613 .next = igmp6_mc_seq_next,
2614 .stop = igmp6_mc_seq_stop,
2615 .show = igmp6_mc_seq_show,
2618 static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
2620 return seq_open_net(inode, file, &igmp6_mc_seq_ops,
2621 sizeof(struct igmp6_mc_iter_state));
2624 static const struct file_operations igmp6_mc_seq_fops = {
2625 .owner = THIS_MODULE,
2626 .open = igmp6_mc_seq_open,
2628 .llseek = seq_lseek,
2629 .release = seq_release_net,
2632 struct igmp6_mcf_iter_state {
2633 struct seq_net_private p;
2634 struct net_device *dev;
2635 struct inet6_dev *idev;
2636 struct ifmcaddr6 *im;
2639 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2641 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2643 struct ip6_sf_list *psf = NULL;
2644 struct ifmcaddr6 *im = NULL;
2645 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2646 struct net *net = seq_file_net(seq);
2650 for_each_netdev_rcu(net, state->dev) {
2651 struct inet6_dev *idev;
2652 idev = __in6_dev_get(state->dev);
2653 if (unlikely(idev == NULL))
2655 read_lock_bh(&idev->lock);
2657 if (likely(im != NULL)) {
2658 spin_lock_bh(&im->mca_lock);
2659 psf = im->mca_sources;
2660 if (likely(psf != NULL)) {
2665 spin_unlock_bh(&im->mca_lock);
2667 read_unlock_bh(&idev->lock);
2672 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2674 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2678 spin_unlock_bh(&state->im->mca_lock);
2679 state->im = state->im->next;
2680 while (!state->im) {
2681 if (likely(state->idev != NULL))
2682 read_unlock_bh(&state->idev->lock);
2684 state->dev = next_net_device_rcu(state->dev);
2689 state->idev = __in6_dev_get(state->dev);
2692 read_lock_bh(&state->idev->lock);
2693 state->im = state->idev->mc_list;
2697 spin_lock_bh(&state->im->mca_lock);
2698 psf = state->im->mca_sources;
2704 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2706 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2708 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2710 return pos ? NULL : psf;
2713 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2717 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2720 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2722 struct ip6_sf_list *psf;
2723 if (v == SEQ_START_TOKEN)
2724 psf = igmp6_mcf_get_first(seq);
2726 psf = igmp6_mcf_get_next(seq, v);
2731 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2734 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2735 if (likely(state->im != NULL)) {
2736 spin_unlock_bh(&state->im->mca_lock);
2739 if (likely(state->idev != NULL)) {
2740 read_unlock_bh(&state->idev->lock);
2747 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2749 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2750 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2752 if (v == SEQ_START_TOKEN) {
2755 "%32s %32s %6s %6s\n", "Idx",
2756 "Device", "Multicast Address",
2757 "Source Address", "INC", "EXC");
2760 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2761 state->dev->ifindex, state->dev->name,
2762 &state->im->mca_addr,
2764 psf->sf_count[MCAST_INCLUDE],
2765 psf->sf_count[MCAST_EXCLUDE]);
2770 static const struct seq_operations igmp6_mcf_seq_ops = {
2771 .start = igmp6_mcf_seq_start,
2772 .next = igmp6_mcf_seq_next,
2773 .stop = igmp6_mcf_seq_stop,
2774 .show = igmp6_mcf_seq_show,
2777 static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2779 return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
2780 sizeof(struct igmp6_mcf_iter_state));
2783 static const struct file_operations igmp6_mcf_seq_fops = {
2784 .owner = THIS_MODULE,
2785 .open = igmp6_mcf_seq_open,
2787 .llseek = seq_lseek,
2788 .release = seq_release_net,
2791 static int __net_init igmp6_proc_init(struct net *net)
2796 if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
2798 if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
2799 &igmp6_mcf_seq_fops))
2800 goto out_proc_net_igmp6;
2807 remove_proc_entry("igmp6", net->proc_net);
2811 static void __net_exit igmp6_proc_exit(struct net *net)
2813 remove_proc_entry("mcfilter6", net->proc_net);
2814 remove_proc_entry("igmp6", net->proc_net);
2817 static inline int igmp6_proc_init(struct net *net)
2821 static inline void igmp6_proc_exit(struct net *net)
2826 static int __net_init igmp6_net_init(struct net *net)
2830 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2831 SOCK_RAW, IPPROTO_ICMPV6, net);
2833 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2838 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2840 err = igmp6_proc_init(net);
2842 goto out_sock_create;
2847 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2851 static void __net_exit igmp6_net_exit(struct net *net)
2853 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2854 igmp6_proc_exit(net);
2857 static struct pernet_operations igmp6_net_ops = {
2858 .init = igmp6_net_init,
2859 .exit = igmp6_net_exit,
2862 int __init igmp6_init(void)
2864 return register_pernet_subsys(&igmp6_net_ops);
2867 void igmp6_cleanup(void)
2869 unregister_pernet_subsys(&igmp6_net_ops);