2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
71 struct fib_rule common;
78 /* Big lock, protecting vif table, mrt cache and mroute socket state.
79 * Note that the changes are semaphored via rtnl_lock.
82 static DEFINE_RWLOCK(mrt_lock);
84 /* Multicast router control variables */
86 /* Special spinlock for queue of unresolved entries */
87 static DEFINE_SPINLOCK(mfc_unres_lock);
89 /* We return to original Alan's scheme. Hash table of resolved
90 * entries is changed only in process context and protected
91 * with weak lock mrt_lock. Queue of unresolved entries is protected
92 * with strong spinlock mfc_unres_lock.
94 * In this case data path is free of exclusive locks at all.
97 static struct kmem_cache *mrt_cachep __read_mostly;
99 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
100 static void ipmr_free_table(struct mr_table *mrt);
102 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
103 struct sk_buff *skb, struct mfc_cache *cache,
105 static int ipmr_cache_report(struct mr_table *mrt,
106 struct sk_buff *pkt, vifi_t vifi, int assert);
107 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
108 struct mfc_cache *c, struct rtmsg *rtm);
109 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
111 static void mroute_clean_tables(struct mr_table *mrt);
112 static void ipmr_expire_process(unsigned long arg);
114 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
115 #define ipmr_for_each_table(mrt, net) \
116 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
118 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
120 struct mr_table *mrt;
122 ipmr_for_each_table(mrt, net) {
129 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
130 struct mr_table **mrt)
133 struct ipmr_result res;
134 struct fib_lookup_arg arg = {
136 .flags = FIB_LOOKUP_NOREF,
139 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
140 flowi4_to_flowi(flp4), 0, &arg);
147 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
148 int flags, struct fib_lookup_arg *arg)
150 struct ipmr_result *res = arg->result;
151 struct mr_table *mrt;
153 switch (rule->action) {
156 case FR_ACT_UNREACHABLE:
158 case FR_ACT_PROHIBIT:
160 case FR_ACT_BLACKHOLE:
165 mrt = ipmr_get_table(rule->fr_net, rule->table);
172 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
177 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
181 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
182 struct fib_rule_hdr *frh, struct nlattr **tb)
187 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
193 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh)
202 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
203 .family = RTNL_FAMILY_IPMR,
204 .rule_size = sizeof(struct ipmr_rule),
205 .addr_size = sizeof(u32),
206 .action = ipmr_rule_action,
207 .match = ipmr_rule_match,
208 .configure = ipmr_rule_configure,
209 .compare = ipmr_rule_compare,
210 .fill = ipmr_rule_fill,
211 .nlgroup = RTNLGRP_IPV4_RULE,
212 .policy = ipmr_rule_policy,
213 .owner = THIS_MODULE,
216 static int __net_init ipmr_rules_init(struct net *net)
218 struct fib_rules_ops *ops;
219 struct mr_table *mrt;
222 ops = fib_rules_register(&ipmr_rules_ops_template, net);
226 INIT_LIST_HEAD(&net->ipv4.mr_tables);
228 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
234 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
238 net->ipv4.mr_rules_ops = ops;
242 ipmr_free_table(mrt);
244 fib_rules_unregister(ops);
248 static void __net_exit ipmr_rules_exit(struct net *net)
250 struct mr_table *mrt, *next;
253 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
254 list_del(&mrt->list);
255 ipmr_free_table(mrt);
257 fib_rules_unregister(net->ipv4.mr_rules_ops);
261 #define ipmr_for_each_table(mrt, net) \
262 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
264 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
266 return net->ipv4.mrt;
269 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
270 struct mr_table **mrt)
272 *mrt = net->ipv4.mrt;
276 static int __net_init ipmr_rules_init(struct net *net)
278 struct mr_table *mrt;
280 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
287 static void __net_exit ipmr_rules_exit(struct net *net)
290 ipmr_free_table(net->ipv4.mrt);
291 net->ipv4.mrt = NULL;
296 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
298 struct mr_table *mrt;
301 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
302 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
303 return ERR_PTR(-EINVAL);
305 mrt = ipmr_get_table(net, id);
309 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
311 return ERR_PTR(-ENOMEM);
312 write_pnet(&mrt->net, net);
315 /* Forwarding cache */
316 for (i = 0; i < MFC_LINES; i++)
317 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
319 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
321 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
324 mrt->mroute_reg_vif_num = -1;
325 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
326 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
331 static void ipmr_free_table(struct mr_table *mrt)
333 del_timer_sync(&mrt->ipmr_expire_timer);
334 mroute_clean_tables(mrt);
338 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
342 struct net *net = dev_net(dev);
346 dev = __dev_get_by_name(net, "tunl0");
348 const struct net_device_ops *ops = dev->netdev_ops;
350 struct ip_tunnel_parm p;
352 memset(&p, 0, sizeof(p));
353 p.iph.daddr = v->vifc_rmt_addr.s_addr;
354 p.iph.saddr = v->vifc_lcl_addr.s_addr;
357 p.iph.protocol = IPPROTO_IPIP;
358 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
359 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
361 if (ops->ndo_do_ioctl) {
362 mm_segment_t oldfs = get_fs();
365 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
371 /* Initialize ipmr pimreg/tunnel in_device */
372 static bool ipmr_init_vif_indev(const struct net_device *dev)
374 struct in_device *in_dev;
378 in_dev = __in_dev_get_rtnl(dev);
381 ipv4_devconf_setall(in_dev);
382 neigh_parms_data_state_setall(in_dev->arp_parms);
383 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
388 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
390 struct net_device *dev;
392 dev = __dev_get_by_name(net, "tunl0");
395 const struct net_device_ops *ops = dev->netdev_ops;
398 struct ip_tunnel_parm p;
400 memset(&p, 0, sizeof(p));
401 p.iph.daddr = v->vifc_rmt_addr.s_addr;
402 p.iph.saddr = v->vifc_lcl_addr.s_addr;
405 p.iph.protocol = IPPROTO_IPIP;
406 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
407 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
409 if (ops->ndo_do_ioctl) {
410 mm_segment_t oldfs = get_fs();
413 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
421 (dev = __dev_get_by_name(net, p.name)) != NULL) {
422 dev->flags |= IFF_MULTICAST;
423 if (!ipmr_init_vif_indev(dev))
433 /* allow the register to be completed before unregistering. */
437 unregister_netdevice(dev);
441 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
442 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
444 struct net *net = dev_net(dev);
445 struct mr_table *mrt;
446 struct flowi4 fl4 = {
447 .flowi4_oif = dev->ifindex,
448 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
449 .flowi4_mark = skb->mark,
453 err = ipmr_fib_lookup(net, &fl4, &mrt);
459 read_lock(&mrt_lock);
460 dev->stats.tx_bytes += skb->len;
461 dev->stats.tx_packets++;
462 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
463 read_unlock(&mrt_lock);
468 static int reg_vif_get_iflink(const struct net_device *dev)
473 static const struct net_device_ops reg_vif_netdev_ops = {
474 .ndo_start_xmit = reg_vif_xmit,
475 .ndo_get_iflink = reg_vif_get_iflink,
478 static void reg_vif_setup(struct net_device *dev)
480 dev->type = ARPHRD_PIMREG;
481 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
482 dev->flags = IFF_NOARP;
483 dev->netdev_ops = ®_vif_netdev_ops;
484 dev->destructor = free_netdev;
485 dev->features |= NETIF_F_NETNS_LOCAL;
488 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
490 struct net_device *dev;
493 if (mrt->id == RT_TABLE_DEFAULT)
494 sprintf(name, "pimreg");
496 sprintf(name, "pimreg%u", mrt->id);
498 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
503 dev_net_set(dev, net);
505 if (register_netdevice(dev)) {
510 if (!ipmr_init_vif_indev(dev))
520 /* allow the register to be completed before unregistering. */
524 unregister_netdevice(dev);
528 /* called with rcu_read_lock() */
529 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
532 struct net_device *reg_dev = NULL;
535 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
537 * a. packet is really sent to a multicast group
538 * b. packet is not a NULL-REGISTER
539 * c. packet is not truncated
541 if (!ipv4_is_multicast(encap->daddr) ||
542 encap->tot_len == 0 ||
543 ntohs(encap->tot_len) + pimlen > skb->len)
546 read_lock(&mrt_lock);
547 if (mrt->mroute_reg_vif_num >= 0)
548 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
549 read_unlock(&mrt_lock);
554 skb->mac_header = skb->network_header;
555 skb_pull(skb, (u8 *)encap - skb->data);
556 skb_reset_network_header(skb);
557 skb->protocol = htons(ETH_P_IP);
558 skb->ip_summed = CHECKSUM_NONE;
560 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
564 return NET_RX_SUCCESS;
567 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
574 * vif_delete - Delete a VIF entry
575 * @notify: Set to 1, if the caller is a notifier_call
577 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
578 struct list_head *head)
580 struct vif_device *v;
581 struct net_device *dev;
582 struct in_device *in_dev;
584 if (vifi < 0 || vifi >= mrt->maxvif)
585 return -EADDRNOTAVAIL;
587 v = &mrt->vif_table[vifi];
589 write_lock_bh(&mrt_lock);
594 write_unlock_bh(&mrt_lock);
595 return -EADDRNOTAVAIL;
598 if (vifi == mrt->mroute_reg_vif_num)
599 mrt->mroute_reg_vif_num = -1;
601 if (vifi + 1 == mrt->maxvif) {
604 for (tmp = vifi - 1; tmp >= 0; tmp--) {
605 if (VIF_EXISTS(mrt, tmp))
611 write_unlock_bh(&mrt_lock);
613 dev_set_allmulti(dev, -1);
615 in_dev = __in_dev_get_rtnl(dev);
617 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
618 inet_netconf_notify_devconf(dev_net(dev),
619 NETCONFA_MC_FORWARDING,
620 dev->ifindex, &in_dev->cnf);
621 ip_rt_multicast_event(in_dev);
624 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
625 unregister_netdevice_queue(dev, head);
631 static void ipmr_cache_free_rcu(struct rcu_head *head)
633 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
635 kmem_cache_free(mrt_cachep, c);
638 static inline void ipmr_cache_free(struct mfc_cache *c)
640 call_rcu(&c->rcu, ipmr_cache_free_rcu);
643 /* Destroy an unresolved cache entry, killing queued skbs
644 * and reporting error to netlink readers.
646 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
648 struct net *net = read_pnet(&mrt->net);
652 atomic_dec(&mrt->cache_resolve_queue_len);
654 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
655 if (ip_hdr(skb)->version == 0) {
656 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
657 nlh->nlmsg_type = NLMSG_ERROR;
658 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
659 skb_trim(skb, nlh->nlmsg_len);
661 e->error = -ETIMEDOUT;
662 memset(&e->msg, 0, sizeof(e->msg));
664 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
673 /* Timer process for the unresolved queue. */
674 static void ipmr_expire_process(unsigned long arg)
676 struct mr_table *mrt = (struct mr_table *)arg;
678 unsigned long expires;
679 struct mfc_cache *c, *next;
681 if (!spin_trylock(&mfc_unres_lock)) {
682 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
686 if (list_empty(&mrt->mfc_unres_queue))
692 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
693 if (time_after(c->mfc_un.unres.expires, now)) {
694 unsigned long interval = c->mfc_un.unres.expires - now;
695 if (interval < expires)
701 mroute_netlink_event(mrt, c, RTM_DELROUTE);
702 ipmr_destroy_unres(mrt, c);
705 if (!list_empty(&mrt->mfc_unres_queue))
706 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
709 spin_unlock(&mfc_unres_lock);
712 /* Fill oifs list. It is called under write locked mrt_lock. */
713 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
718 cache->mfc_un.res.minvif = MAXVIFS;
719 cache->mfc_un.res.maxvif = 0;
720 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
722 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
723 if (VIF_EXISTS(mrt, vifi) &&
724 ttls[vifi] && ttls[vifi] < 255) {
725 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
726 if (cache->mfc_un.res.minvif > vifi)
727 cache->mfc_un.res.minvif = vifi;
728 if (cache->mfc_un.res.maxvif <= vifi)
729 cache->mfc_un.res.maxvif = vifi + 1;
734 static int vif_add(struct net *net, struct mr_table *mrt,
735 struct vifctl *vifc, int mrtsock)
737 int vifi = vifc->vifc_vifi;
738 struct vif_device *v = &mrt->vif_table[vifi];
739 struct net_device *dev;
740 struct in_device *in_dev;
744 if (VIF_EXISTS(mrt, vifi))
747 switch (vifc->vifc_flags) {
749 if (!ipmr_pimsm_enabled())
751 /* Special Purpose VIF in PIM
752 * All the packets will be sent to the daemon
754 if (mrt->mroute_reg_vif_num >= 0)
756 dev = ipmr_reg_vif(net, mrt);
759 err = dev_set_allmulti(dev, 1);
761 unregister_netdevice(dev);
767 dev = ipmr_new_tunnel(net, vifc);
770 err = dev_set_allmulti(dev, 1);
772 ipmr_del_tunnel(dev, vifc);
777 case VIFF_USE_IFINDEX:
779 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
780 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
781 if (dev && !__in_dev_get_rtnl(dev)) {
783 return -EADDRNOTAVAIL;
786 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
789 return -EADDRNOTAVAIL;
790 err = dev_set_allmulti(dev, 1);
800 in_dev = __in_dev_get_rtnl(dev);
803 return -EADDRNOTAVAIL;
805 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
806 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
808 ip_rt_multicast_event(in_dev);
810 /* Fill in the VIF structures */
812 v->rate_limit = vifc->vifc_rate_limit;
813 v->local = vifc->vifc_lcl_addr.s_addr;
814 v->remote = vifc->vifc_rmt_addr.s_addr;
815 v->flags = vifc->vifc_flags;
817 v->flags |= VIFF_STATIC;
818 v->threshold = vifc->vifc_threshold;
823 v->link = dev->ifindex;
824 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
825 v->link = dev_get_iflink(dev);
827 /* And finish update writing critical data */
828 write_lock_bh(&mrt_lock);
830 if (v->flags & VIFF_REGISTER)
831 mrt->mroute_reg_vif_num = vifi;
832 if (vifi+1 > mrt->maxvif)
833 mrt->maxvif = vifi+1;
834 write_unlock_bh(&mrt_lock);
838 /* called with rcu_read_lock() */
839 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
843 int line = MFC_HASH(mcastgrp, origin);
846 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
847 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
853 /* Look for a (*,*,oif) entry */
854 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
857 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
860 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
861 if (c->mfc_origin == htonl(INADDR_ANY) &&
862 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
863 c->mfc_un.res.ttls[vifi] < 255)
869 /* Look for a (*,G) entry */
870 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
871 __be32 mcastgrp, int vifi)
873 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
874 struct mfc_cache *c, *proxy;
876 if (mcastgrp == htonl(INADDR_ANY))
879 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
880 if (c->mfc_origin == htonl(INADDR_ANY) &&
881 c->mfc_mcastgrp == mcastgrp) {
882 if (c->mfc_un.res.ttls[vifi] < 255)
885 /* It's ok if the vifi is part of the static tree */
886 proxy = ipmr_cache_find_any_parent(mrt,
888 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
893 return ipmr_cache_find_any_parent(mrt, vifi);
896 /* Allocate a multicast cache entry */
897 static struct mfc_cache *ipmr_cache_alloc(void)
899 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
902 c->mfc_un.res.minvif = MAXVIFS;
906 static struct mfc_cache *ipmr_cache_alloc_unres(void)
908 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
911 skb_queue_head_init(&c->mfc_un.unres.unresolved);
912 c->mfc_un.unres.expires = jiffies + 10*HZ;
917 /* A cache entry has gone into a resolved state from queued */
918 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
919 struct mfc_cache *uc, struct mfc_cache *c)
924 /* Play the pending entries through our router */
925 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
926 if (ip_hdr(skb)->version == 0) {
927 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
929 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
930 nlh->nlmsg_len = skb_tail_pointer(skb) -
933 nlh->nlmsg_type = NLMSG_ERROR;
934 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
935 skb_trim(skb, nlh->nlmsg_len);
937 e->error = -EMSGSIZE;
938 memset(&e->msg, 0, sizeof(e->msg));
941 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
943 ip_mr_forward(net, mrt, skb, c, 0);
948 /* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
949 * expects the following bizarre scheme.
951 * Called under mrt_lock.
953 static int ipmr_cache_report(struct mr_table *mrt,
954 struct sk_buff *pkt, vifi_t vifi, int assert)
956 const int ihl = ip_hdrlen(pkt);
957 struct sock *mroute_sk;
958 struct igmphdr *igmp;
963 if (assert == IGMPMSG_WHOLEPKT)
964 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
966 skb = alloc_skb(128, GFP_ATOMIC);
971 if (assert == IGMPMSG_WHOLEPKT) {
972 /* Ugly, but we have no choice with this interface.
973 * Duplicate old header, fix ihl, length etc.
974 * And all this only to mangle msg->im_msgtype and
975 * to set msg->im_mbz to "mbz" :-)
977 skb_push(skb, sizeof(struct iphdr));
978 skb_reset_network_header(skb);
979 skb_reset_transport_header(skb);
980 msg = (struct igmpmsg *)skb_network_header(skb);
981 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
982 msg->im_msgtype = IGMPMSG_WHOLEPKT;
984 msg->im_vif = mrt->mroute_reg_vif_num;
985 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
986 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
987 sizeof(struct iphdr));
989 /* Copy the IP header */
990 skb_set_network_header(skb, skb->len);
992 skb_copy_to_linear_data(skb, pkt->data, ihl);
993 /* Flag to the kernel this is a route add */
994 ip_hdr(skb)->protocol = 0;
995 msg = (struct igmpmsg *)skb_network_header(skb);
997 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
999 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1000 igmp->type = assert;
1001 msg->im_msgtype = assert;
1003 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1004 skb->transport_header = skb->network_header;
1008 mroute_sk = rcu_dereference(mrt->mroute_sk);
1015 /* Deliver to mrouted */
1016 ret = sock_queue_rcv_skb(mroute_sk, skb);
1019 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1026 /* Queue a packet for resolution. It gets locked cache entry! */
1027 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1028 struct sk_buff *skb)
1032 struct mfc_cache *c;
1033 const struct iphdr *iph = ip_hdr(skb);
1035 spin_lock_bh(&mfc_unres_lock);
1036 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1037 if (c->mfc_mcastgrp == iph->daddr &&
1038 c->mfc_origin == iph->saddr) {
1045 /* Create a new entry if allowable */
1046 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1047 (c = ipmr_cache_alloc_unres()) == NULL) {
1048 spin_unlock_bh(&mfc_unres_lock);
1054 /* Fill in the new cache entry */
1056 c->mfc_origin = iph->saddr;
1057 c->mfc_mcastgrp = iph->daddr;
1059 /* Reflect first query at mrouted. */
1060 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1062 /* If the report failed throw the cache entry
1065 spin_unlock_bh(&mfc_unres_lock);
1072 atomic_inc(&mrt->cache_resolve_queue_len);
1073 list_add(&c->list, &mrt->mfc_unres_queue);
1074 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1076 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1077 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1080 /* See if we can append the packet */
1081 if (c->mfc_un.unres.unresolved.qlen > 3) {
1085 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1089 spin_unlock_bh(&mfc_unres_lock);
1093 /* MFC cache manipulation by user space mroute daemon */
1095 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1098 struct mfc_cache *c, *next;
1100 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1102 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1103 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1104 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1105 (parent == -1 || parent == c->mfc_parent)) {
1106 list_del_rcu(&c->list);
1107 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1115 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1116 struct mfcctl *mfc, int mrtsock, int parent)
1120 struct mfc_cache *uc, *c;
1122 if (mfc->mfcc_parent >= MAXVIFS)
1125 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1127 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1128 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1129 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1130 (parent == -1 || parent == c->mfc_parent)) {
1137 write_lock_bh(&mrt_lock);
1138 c->mfc_parent = mfc->mfcc_parent;
1139 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1141 c->mfc_flags |= MFC_STATIC;
1142 write_unlock_bh(&mrt_lock);
1143 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1147 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1148 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1151 c = ipmr_cache_alloc();
1155 c->mfc_origin = mfc->mfcc_origin.s_addr;
1156 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1157 c->mfc_parent = mfc->mfcc_parent;
1158 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1160 c->mfc_flags |= MFC_STATIC;
1162 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1164 /* Check to see if we resolved a queued list. If so we
1165 * need to send on the frames and tidy up.
1168 spin_lock_bh(&mfc_unres_lock);
1169 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1170 if (uc->mfc_origin == c->mfc_origin &&
1171 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1172 list_del(&uc->list);
1173 atomic_dec(&mrt->cache_resolve_queue_len);
1178 if (list_empty(&mrt->mfc_unres_queue))
1179 del_timer(&mrt->ipmr_expire_timer);
1180 spin_unlock_bh(&mfc_unres_lock);
1183 ipmr_cache_resolve(net, mrt, uc, c);
1184 ipmr_cache_free(uc);
1186 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1190 /* Close the multicast socket, and clear the vif tables etc */
1191 static void mroute_clean_tables(struct mr_table *mrt)
1195 struct mfc_cache *c, *next;
1197 /* Shut down all active vif entries */
1198 for (i = 0; i < mrt->maxvif; i++) {
1199 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1200 vif_delete(mrt, i, 0, &list);
1202 unregister_netdevice_many(&list);
1204 /* Wipe the cache */
1205 for (i = 0; i < MFC_LINES; i++) {
1206 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1207 if (c->mfc_flags & MFC_STATIC)
1209 list_del_rcu(&c->list);
1210 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1215 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1216 spin_lock_bh(&mfc_unres_lock);
1217 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1219 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1220 ipmr_destroy_unres(mrt, c);
1222 spin_unlock_bh(&mfc_unres_lock);
1226 /* called from ip_ra_control(), before an RCU grace period,
1227 * we dont need to call synchronize_rcu() here
1229 static void mrtsock_destruct(struct sock *sk)
1231 struct net *net = sock_net(sk);
1232 struct mr_table *mrt;
1235 ipmr_for_each_table(mrt, net) {
1236 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1237 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1238 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1239 NETCONFA_IFINDEX_ALL,
1240 net->ipv4.devconf_all);
1241 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1242 mroute_clean_tables(mrt);
1248 /* Socket options and virtual interface manipulation. The whole
1249 * virtual interface system is a complete heap, but unfortunately
1250 * that's how BSD mrouted happens to think. Maybe one day with a proper
1251 * MOSPF/PIM router set up we can clean this up.
1254 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1255 unsigned int optlen)
1257 struct net *net = sock_net(sk);
1258 int val, ret = 0, parent = 0;
1259 struct mr_table *mrt;
1264 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1266 if (sk->sk_type != SOCK_RAW ||
1267 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1272 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1277 if (optname != MRT_INIT) {
1278 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1279 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1287 if (optlen != sizeof(int)) {
1291 if (rtnl_dereference(mrt->mroute_sk)) {
1296 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1298 rcu_assign_pointer(mrt->mroute_sk, sk);
1299 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1300 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1301 NETCONFA_IFINDEX_ALL,
1302 net->ipv4.devconf_all);
1306 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1309 /* We need to unlock here because mrtsock_destruct takes
1310 * care of rtnl itself and we can't change that due to
1311 * the IP_ROUTER_ALERT setsockopt which runs without it.
1314 ret = ip_ra_control(sk, 0, NULL);
1320 if (optlen != sizeof(vif)) {
1324 if (copy_from_user(&vif, optval, sizeof(vif))) {
1328 if (vif.vifc_vifi >= MAXVIFS) {
1332 if (optname == MRT_ADD_VIF) {
1333 ret = vif_add(net, mrt, &vif,
1334 sk == rtnl_dereference(mrt->mroute_sk));
1336 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1339 /* Manipulate the forwarding caches. These live
1340 * in a sort of kernel/user symbiosis.
1345 case MRT_ADD_MFC_PROXY:
1346 case MRT_DEL_MFC_PROXY:
1347 if (optlen != sizeof(mfc)) {
1351 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1356 parent = mfc.mfcc_parent;
1357 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1358 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1360 ret = ipmr_mfc_add(net, mrt, &mfc,
1361 sk == rtnl_dereference(mrt->mroute_sk),
1364 /* Control PIM assert. */
1366 if (optlen != sizeof(val)) {
1370 if (get_user(val, (int __user *)optval)) {
1374 mrt->mroute_do_assert = val;
1377 if (!ipmr_pimsm_enabled()) {
1381 if (optlen != sizeof(val)) {
1385 if (get_user(val, (int __user *)optval)) {
1391 if (val != mrt->mroute_do_pim) {
1392 mrt->mroute_do_pim = val;
1393 mrt->mroute_do_assert = val;
1397 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1401 if (optlen != sizeof(uval)) {
1405 if (get_user(uval, (u32 __user *)optval)) {
1410 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1413 mrt = ipmr_new_table(net, uval);
1417 raw_sk(sk)->ipmr_table = uval;
1420 /* Spurious command, or MRT_VERSION which you cannot set. */
1430 /* Getsock opt support for the multicast routing system. */
1431 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1435 struct net *net = sock_net(sk);
1436 struct mr_table *mrt;
1438 if (sk->sk_type != SOCK_RAW ||
1439 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1442 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1451 if (!ipmr_pimsm_enabled())
1452 return -ENOPROTOOPT;
1453 val = mrt->mroute_do_pim;
1456 val = mrt->mroute_do_assert;
1459 return -ENOPROTOOPT;
1462 if (get_user(olr, optlen))
1464 olr = min_t(unsigned int, olr, sizeof(int));
1467 if (put_user(olr, optlen))
1469 if (copy_to_user(optval, &val, olr))
1474 /* The IP multicast ioctl support routines. */
1475 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1477 struct sioc_sg_req sr;
1478 struct sioc_vif_req vr;
1479 struct vif_device *vif;
1480 struct mfc_cache *c;
1481 struct net *net = sock_net(sk);
1482 struct mr_table *mrt;
1484 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1490 if (copy_from_user(&vr, arg, sizeof(vr)))
1492 if (vr.vifi >= mrt->maxvif)
1494 read_lock(&mrt_lock);
1495 vif = &mrt->vif_table[vr.vifi];
1496 if (VIF_EXISTS(mrt, vr.vifi)) {
1497 vr.icount = vif->pkt_in;
1498 vr.ocount = vif->pkt_out;
1499 vr.ibytes = vif->bytes_in;
1500 vr.obytes = vif->bytes_out;
1501 read_unlock(&mrt_lock);
1503 if (copy_to_user(arg, &vr, sizeof(vr)))
1507 read_unlock(&mrt_lock);
1508 return -EADDRNOTAVAIL;
1510 if (copy_from_user(&sr, arg, sizeof(sr)))
1514 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1516 sr.pktcnt = c->mfc_un.res.pkt;
1517 sr.bytecnt = c->mfc_un.res.bytes;
1518 sr.wrong_if = c->mfc_un.res.wrong_if;
1521 if (copy_to_user(arg, &sr, sizeof(sr)))
1526 return -EADDRNOTAVAIL;
1528 return -ENOIOCTLCMD;
1532 #ifdef CONFIG_COMPAT
1533 struct compat_sioc_sg_req {
1536 compat_ulong_t pktcnt;
1537 compat_ulong_t bytecnt;
1538 compat_ulong_t wrong_if;
1541 struct compat_sioc_vif_req {
1542 vifi_t vifi; /* Which iface */
1543 compat_ulong_t icount;
1544 compat_ulong_t ocount;
1545 compat_ulong_t ibytes;
1546 compat_ulong_t obytes;
1549 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1551 struct compat_sioc_sg_req sr;
1552 struct compat_sioc_vif_req vr;
1553 struct vif_device *vif;
1554 struct mfc_cache *c;
1555 struct net *net = sock_net(sk);
1556 struct mr_table *mrt;
1558 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1564 if (copy_from_user(&vr, arg, sizeof(vr)))
1566 if (vr.vifi >= mrt->maxvif)
1568 read_lock(&mrt_lock);
1569 vif = &mrt->vif_table[vr.vifi];
1570 if (VIF_EXISTS(mrt, vr.vifi)) {
1571 vr.icount = vif->pkt_in;
1572 vr.ocount = vif->pkt_out;
1573 vr.ibytes = vif->bytes_in;
1574 vr.obytes = vif->bytes_out;
1575 read_unlock(&mrt_lock);
1577 if (copy_to_user(arg, &vr, sizeof(vr)))
1581 read_unlock(&mrt_lock);
1582 return -EADDRNOTAVAIL;
1584 if (copy_from_user(&sr, arg, sizeof(sr)))
1588 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1590 sr.pktcnt = c->mfc_un.res.pkt;
1591 sr.bytecnt = c->mfc_un.res.bytes;
1592 sr.wrong_if = c->mfc_un.res.wrong_if;
1595 if (copy_to_user(arg, &sr, sizeof(sr)))
1600 return -EADDRNOTAVAIL;
1602 return -ENOIOCTLCMD;
1607 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1609 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1610 struct net *net = dev_net(dev);
1611 struct mr_table *mrt;
1612 struct vif_device *v;
1615 if (event != NETDEV_UNREGISTER)
1618 ipmr_for_each_table(mrt, net) {
1619 v = &mrt->vif_table[0];
1620 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1622 vif_delete(mrt, ct, 1, NULL);
1628 static struct notifier_block ip_mr_notifier = {
1629 .notifier_call = ipmr_device_event,
1632 /* Encapsulate a packet by attaching a valid IPIP header to it.
1633 * This avoids tunnel drivers and other mess and gives us the speed so
1634 * important for multicast video.
1636 static void ip_encap(struct net *net, struct sk_buff *skb,
1637 __be32 saddr, __be32 daddr)
1640 const struct iphdr *old_iph = ip_hdr(skb);
1642 skb_push(skb, sizeof(struct iphdr));
1643 skb->transport_header = skb->network_header;
1644 skb_reset_network_header(skb);
1648 iph->tos = old_iph->tos;
1649 iph->ttl = old_iph->ttl;
1653 iph->protocol = IPPROTO_IPIP;
1655 iph->tot_len = htons(skb->len);
1656 ip_select_ident(net, skb, NULL);
1659 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1663 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1664 struct sk_buff *skb)
1666 struct ip_options *opt = &(IPCB(skb)->opt);
1668 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1669 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1671 if (unlikely(opt->optlen))
1672 ip_forward_options(skb);
1674 return dst_output(net, sk, skb);
1677 /* Processing handlers for ipmr_forward */
1679 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1680 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1682 const struct iphdr *iph = ip_hdr(skb);
1683 struct vif_device *vif = &mrt->vif_table[vifi];
1684 struct net_device *dev;
1692 if (vif->flags & VIFF_REGISTER) {
1694 vif->bytes_out += skb->len;
1695 vif->dev->stats.tx_bytes += skb->len;
1696 vif->dev->stats.tx_packets++;
1697 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1701 if (vif->flags & VIFF_TUNNEL) {
1702 rt = ip_route_output_ports(net, &fl4, NULL,
1703 vif->remote, vif->local,
1706 RT_TOS(iph->tos), vif->link);
1709 encap = sizeof(struct iphdr);
1711 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1714 RT_TOS(iph->tos), vif->link);
1721 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1722 /* Do not fragment multicasts. Alas, IPv4 does not
1723 * allow to send ICMP, so that packets will disappear
1726 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1731 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1733 if (skb_cow(skb, encap)) {
1739 vif->bytes_out += skb->len;
1742 skb_dst_set(skb, &rt->dst);
1743 ip_decrease_ttl(ip_hdr(skb));
1745 /* FIXME: forward and output firewalls used to be called here.
1746 * What do we do with netfilter? -- RR
1748 if (vif->flags & VIFF_TUNNEL) {
1749 ip_encap(net, skb, vif->local, vif->remote);
1750 /* FIXME: extra output firewall step used to be here. --RR */
1751 vif->dev->stats.tx_packets++;
1752 vif->dev->stats.tx_bytes += skb->len;
1755 IPCB(skb)->flags |= IPSKB_FORWARDED;
1757 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1758 * not only before forwarding, but after forwarding on all output
1759 * interfaces. It is clear, if mrouter runs a multicasting
1760 * program, it should receive packets not depending to what interface
1761 * program is joined.
1762 * If we will not make it, the program will have to join on all
1763 * interfaces. On the other hand, multihoming host (or router, but
1764 * not mrouter) cannot join to more than one interface - it will
1765 * result in receiving multiple packets.
1767 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1768 net, NULL, skb, skb->dev, dev,
1769 ipmr_forward_finish);
1776 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1780 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1781 if (mrt->vif_table[ct].dev == dev)
1787 /* "local" means that we should preserve one skb (for local delivery) */
1788 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1789 struct sk_buff *skb, struct mfc_cache *cache,
1794 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1796 vif = cache->mfc_parent;
1797 cache->mfc_un.res.pkt++;
1798 cache->mfc_un.res.bytes += skb->len;
1800 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1801 struct mfc_cache *cache_proxy;
1803 /* For an (*,G) entry, we only check that the incomming
1804 * interface is part of the static tree.
1806 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1808 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1812 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1813 if (mrt->vif_table[vif].dev != skb->dev) {
1814 if (rt_is_output_route(skb_rtable(skb))) {
1815 /* It is our own packet, looped back.
1816 * Very complicated situation...
1818 * The best workaround until routing daemons will be
1819 * fixed is not to redistribute packet, if it was
1820 * send through wrong interface. It means, that
1821 * multicast applications WILL NOT work for
1822 * (S,G), which have default multicast route pointing
1823 * to wrong oif. In any case, it is not a good
1824 * idea to use multicasting applications on router.
1829 cache->mfc_un.res.wrong_if++;
1831 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1832 /* pimsm uses asserts, when switching from RPT to SPT,
1833 * so that we cannot check that packet arrived on an oif.
1834 * It is bad, but otherwise we would need to move pretty
1835 * large chunk of pimd to kernel. Ough... --ANK
1837 (mrt->mroute_do_pim ||
1838 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1840 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1841 cache->mfc_un.res.last_assert = jiffies;
1842 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1848 mrt->vif_table[vif].pkt_in++;
1849 mrt->vif_table[vif].bytes_in += skb->len;
1851 /* Forward the frame */
1852 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1853 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1854 if (true_vifi >= 0 &&
1855 true_vifi != cache->mfc_parent &&
1857 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1858 /* It's an (*,*) entry and the packet is not coming from
1859 * the upstream: forward the packet to the upstream
1862 psend = cache->mfc_parent;
1867 for (ct = cache->mfc_un.res.maxvif - 1;
1868 ct >= cache->mfc_un.res.minvif; ct--) {
1869 /* For (*,G) entry, don't forward to the incoming interface */
1870 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1872 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1874 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1877 ipmr_queue_xmit(net, mrt, skb2, cache,
1886 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1889 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1891 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1901 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1903 struct rtable *rt = skb_rtable(skb);
1904 struct iphdr *iph = ip_hdr(skb);
1905 struct flowi4 fl4 = {
1906 .daddr = iph->daddr,
1907 .saddr = iph->saddr,
1908 .flowi4_tos = RT_TOS(iph->tos),
1909 .flowi4_oif = (rt_is_output_route(rt) ?
1910 skb->dev->ifindex : 0),
1911 .flowi4_iif = (rt_is_output_route(rt) ?
1914 .flowi4_mark = skb->mark,
1916 struct mr_table *mrt;
1919 err = ipmr_fib_lookup(net, &fl4, &mrt);
1921 return ERR_PTR(err);
1925 /* Multicast packets for forwarding arrive here
1926 * Called with rcu_read_lock();
1928 int ip_mr_input(struct sk_buff *skb)
1930 struct mfc_cache *cache;
1931 struct net *net = dev_net(skb->dev);
1932 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1933 struct mr_table *mrt;
1935 /* Packet is looped back after forward, it should not be
1936 * forwarded second time, but still can be delivered locally.
1938 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1941 mrt = ipmr_rt_fib_lookup(net, skb);
1944 return PTR_ERR(mrt);
1947 if (IPCB(skb)->opt.router_alert) {
1948 if (ip_call_ra_chain(skb))
1950 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1951 /* IGMPv1 (and broken IGMPv2 implementations sort of
1952 * Cisco IOS <= 11.2(8)) do not put router alert
1953 * option to IGMP packets destined to routable
1954 * groups. It is very bad, because it means
1955 * that we can forward NO IGMP messages.
1957 struct sock *mroute_sk;
1959 mroute_sk = rcu_dereference(mrt->mroute_sk);
1962 raw_rcv(mroute_sk, skb);
1968 /* already under rcu_read_lock() */
1969 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1971 int vif = ipmr_find_vif(mrt, skb->dev);
1974 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1978 /* No usable cache entry */
1983 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1984 ip_local_deliver(skb);
1990 read_lock(&mrt_lock);
1991 vif = ipmr_find_vif(mrt, skb->dev);
1993 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1994 read_unlock(&mrt_lock);
1998 read_unlock(&mrt_lock);
2003 read_lock(&mrt_lock);
2004 ip_mr_forward(net, mrt, skb, cache, local);
2005 read_unlock(&mrt_lock);
2008 return ip_local_deliver(skb);
2014 return ip_local_deliver(skb);
2019 #ifdef CONFIG_IP_PIMSM_V1
2020 /* Handle IGMP messages of PIMv1 */
2021 int pim_rcv_v1(struct sk_buff *skb)
2023 struct igmphdr *pim;
2024 struct net *net = dev_net(skb->dev);
2025 struct mr_table *mrt;
2027 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2030 pim = igmp_hdr(skb);
2032 mrt = ipmr_rt_fib_lookup(net, skb);
2035 if (!mrt->mroute_do_pim ||
2036 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2039 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2047 #ifdef CONFIG_IP_PIMSM_V2
2048 static int pim_rcv(struct sk_buff *skb)
2050 struct pimreghdr *pim;
2051 struct net *net = dev_net(skb->dev);
2052 struct mr_table *mrt;
2054 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2057 pim = (struct pimreghdr *)skb_transport_header(skb);
2058 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2059 (pim->flags & PIM_NULL_REGISTER) ||
2060 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2061 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2064 mrt = ipmr_rt_fib_lookup(net, skb);
2067 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2075 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2076 struct mfc_cache *c, struct rtmsg *rtm)
2079 struct rtnexthop *nhp;
2080 struct nlattr *mp_attr;
2081 struct rta_mfc_stats mfcs;
2083 /* If cache is unresolved, don't try to parse IIF and OIF */
2084 if (c->mfc_parent >= MAXVIFS)
2087 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2088 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2091 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2094 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2095 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2096 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2097 nla_nest_cancel(skb, mp_attr);
2101 nhp->rtnh_flags = 0;
2102 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2103 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2104 nhp->rtnh_len = sizeof(*nhp);
2108 nla_nest_end(skb, mp_attr);
2110 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2111 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2112 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2113 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2116 rtm->rtm_type = RTN_MULTICAST;
2120 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2121 __be32 saddr, __be32 daddr,
2122 struct rtmsg *rtm, int nowait)
2124 struct mfc_cache *cache;
2125 struct mr_table *mrt;
2128 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2133 cache = ipmr_cache_find(mrt, saddr, daddr);
2134 if (!cache && skb->dev) {
2135 int vif = ipmr_find_vif(mrt, skb->dev);
2138 cache = ipmr_cache_find_any(mrt, daddr, vif);
2141 struct sk_buff *skb2;
2143 struct net_device *dev;
2152 read_lock(&mrt_lock);
2154 vif = ipmr_find_vif(mrt, dev);
2156 read_unlock(&mrt_lock);
2160 skb2 = skb_clone(skb, GFP_ATOMIC);
2162 read_unlock(&mrt_lock);
2167 skb_push(skb2, sizeof(struct iphdr));
2168 skb_reset_network_header(skb2);
2170 iph->ihl = sizeof(struct iphdr) >> 2;
2174 err = ipmr_cache_unresolved(mrt, vif, skb2);
2175 read_unlock(&mrt_lock);
2180 read_lock(&mrt_lock);
2181 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2182 read_unlock(&mrt_lock);
2187 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2188 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2191 struct nlmsghdr *nlh;
2195 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2199 rtm = nlmsg_data(nlh);
2200 rtm->rtm_family = RTNL_FAMILY_IPMR;
2201 rtm->rtm_dst_len = 32;
2202 rtm->rtm_src_len = 32;
2204 rtm->rtm_table = mrt->id;
2205 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2206 goto nla_put_failure;
2207 rtm->rtm_type = RTN_MULTICAST;
2208 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2209 if (c->mfc_flags & MFC_STATIC)
2210 rtm->rtm_protocol = RTPROT_STATIC;
2212 rtm->rtm_protocol = RTPROT_MROUTED;
2215 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2216 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2217 goto nla_put_failure;
2218 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2219 /* do not break the dump if cache is unresolved */
2220 if (err < 0 && err != -ENOENT)
2221 goto nla_put_failure;
2223 nlmsg_end(skb, nlh);
2227 nlmsg_cancel(skb, nlh);
2231 static size_t mroute_msgsize(bool unresolved, int maxvif)
2234 NLMSG_ALIGN(sizeof(struct rtmsg))
2235 + nla_total_size(4) /* RTA_TABLE */
2236 + nla_total_size(4) /* RTA_SRC */
2237 + nla_total_size(4) /* RTA_DST */
2242 + nla_total_size(4) /* RTA_IIF */
2243 + nla_total_size(0) /* RTA_MULTIPATH */
2244 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2246 + nla_total_size(sizeof(struct rta_mfc_stats))
2252 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2255 struct net *net = read_pnet(&mrt->net);
2256 struct sk_buff *skb;
2259 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2264 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2268 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2274 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2277 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2279 struct net *net = sock_net(skb->sk);
2280 struct mr_table *mrt;
2281 struct mfc_cache *mfc;
2282 unsigned int t = 0, s_t;
2283 unsigned int h = 0, s_h;
2284 unsigned int e = 0, s_e;
2291 ipmr_for_each_table(mrt, net) {
2296 for (h = s_h; h < MFC_LINES; h++) {
2297 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2300 if (ipmr_fill_mroute(mrt, skb,
2301 NETLINK_CB(cb->skb).portid,
2311 spin_lock_bh(&mfc_unres_lock);
2312 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2315 if (ipmr_fill_mroute(mrt, skb,
2316 NETLINK_CB(cb->skb).portid,
2320 spin_unlock_bh(&mfc_unres_lock);
2326 spin_unlock_bh(&mfc_unres_lock);
2342 #ifdef CONFIG_PROC_FS
2343 /* The /proc interfaces to multicast routing :
2344 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2346 struct ipmr_vif_iter {
2347 struct seq_net_private p;
2348 struct mr_table *mrt;
2352 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2353 struct ipmr_vif_iter *iter,
2356 struct mr_table *mrt = iter->mrt;
2358 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2359 if (!VIF_EXISTS(mrt, iter->ct))
2362 return &mrt->vif_table[iter->ct];
2367 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2368 __acquires(mrt_lock)
2370 struct ipmr_vif_iter *iter = seq->private;
2371 struct net *net = seq_file_net(seq);
2372 struct mr_table *mrt;
2374 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2376 return ERR_PTR(-ENOENT);
2380 read_lock(&mrt_lock);
2381 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2385 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2387 struct ipmr_vif_iter *iter = seq->private;
2388 struct net *net = seq_file_net(seq);
2389 struct mr_table *mrt = iter->mrt;
2392 if (v == SEQ_START_TOKEN)
2393 return ipmr_vif_seq_idx(net, iter, 0);
2395 while (++iter->ct < mrt->maxvif) {
2396 if (!VIF_EXISTS(mrt, iter->ct))
2398 return &mrt->vif_table[iter->ct];
2403 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2404 __releases(mrt_lock)
2406 read_unlock(&mrt_lock);
2409 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2411 struct ipmr_vif_iter *iter = seq->private;
2412 struct mr_table *mrt = iter->mrt;
2414 if (v == SEQ_START_TOKEN) {
2416 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2418 const struct vif_device *vif = v;
2419 const char *name = vif->dev ? vif->dev->name : "none";
2422 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2423 vif - mrt->vif_table,
2424 name, vif->bytes_in, vif->pkt_in,
2425 vif->bytes_out, vif->pkt_out,
2426 vif->flags, vif->local, vif->remote);
2431 static const struct seq_operations ipmr_vif_seq_ops = {
2432 .start = ipmr_vif_seq_start,
2433 .next = ipmr_vif_seq_next,
2434 .stop = ipmr_vif_seq_stop,
2435 .show = ipmr_vif_seq_show,
2438 static int ipmr_vif_open(struct inode *inode, struct file *file)
2440 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2441 sizeof(struct ipmr_vif_iter));
2444 static const struct file_operations ipmr_vif_fops = {
2445 .owner = THIS_MODULE,
2446 .open = ipmr_vif_open,
2448 .llseek = seq_lseek,
2449 .release = seq_release_net,
2452 struct ipmr_mfc_iter {
2453 struct seq_net_private p;
2454 struct mr_table *mrt;
2455 struct list_head *cache;
2460 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2461 struct ipmr_mfc_iter *it, loff_t pos)
2463 struct mr_table *mrt = it->mrt;
2464 struct mfc_cache *mfc;
2467 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2468 it->cache = &mrt->mfc_cache_array[it->ct];
2469 list_for_each_entry_rcu(mfc, it->cache, list)
2475 spin_lock_bh(&mfc_unres_lock);
2476 it->cache = &mrt->mfc_unres_queue;
2477 list_for_each_entry(mfc, it->cache, list)
2480 spin_unlock_bh(&mfc_unres_lock);
2487 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2489 struct ipmr_mfc_iter *it = seq->private;
2490 struct net *net = seq_file_net(seq);
2491 struct mr_table *mrt;
2493 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2495 return ERR_PTR(-ENOENT);
2500 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2504 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2506 struct mfc_cache *mfc = v;
2507 struct ipmr_mfc_iter *it = seq->private;
2508 struct net *net = seq_file_net(seq);
2509 struct mr_table *mrt = it->mrt;
2513 if (v == SEQ_START_TOKEN)
2514 return ipmr_mfc_seq_idx(net, seq->private, 0);
2516 if (mfc->list.next != it->cache)
2517 return list_entry(mfc->list.next, struct mfc_cache, list);
2519 if (it->cache == &mrt->mfc_unres_queue)
2522 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2524 while (++it->ct < MFC_LINES) {
2525 it->cache = &mrt->mfc_cache_array[it->ct];
2526 if (list_empty(it->cache))
2528 return list_first_entry(it->cache, struct mfc_cache, list);
2531 /* exhausted cache_array, show unresolved */
2533 it->cache = &mrt->mfc_unres_queue;
2536 spin_lock_bh(&mfc_unres_lock);
2537 if (!list_empty(it->cache))
2538 return list_first_entry(it->cache, struct mfc_cache, list);
2541 spin_unlock_bh(&mfc_unres_lock);
2547 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2549 struct ipmr_mfc_iter *it = seq->private;
2550 struct mr_table *mrt = it->mrt;
2552 if (it->cache == &mrt->mfc_unres_queue)
2553 spin_unlock_bh(&mfc_unres_lock);
2554 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2558 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2562 if (v == SEQ_START_TOKEN) {
2564 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2566 const struct mfc_cache *mfc = v;
2567 const struct ipmr_mfc_iter *it = seq->private;
2568 const struct mr_table *mrt = it->mrt;
2570 seq_printf(seq, "%08X %08X %-3hd",
2571 (__force u32) mfc->mfc_mcastgrp,
2572 (__force u32) mfc->mfc_origin,
2575 if (it->cache != &mrt->mfc_unres_queue) {
2576 seq_printf(seq, " %8lu %8lu %8lu",
2577 mfc->mfc_un.res.pkt,
2578 mfc->mfc_un.res.bytes,
2579 mfc->mfc_un.res.wrong_if);
2580 for (n = mfc->mfc_un.res.minvif;
2581 n < mfc->mfc_un.res.maxvif; n++) {
2582 if (VIF_EXISTS(mrt, n) &&
2583 mfc->mfc_un.res.ttls[n] < 255)
2586 n, mfc->mfc_un.res.ttls[n]);
2589 /* unresolved mfc_caches don't contain
2590 * pkt, bytes and wrong_if values
2592 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2594 seq_putc(seq, '\n');
2599 static const struct seq_operations ipmr_mfc_seq_ops = {
2600 .start = ipmr_mfc_seq_start,
2601 .next = ipmr_mfc_seq_next,
2602 .stop = ipmr_mfc_seq_stop,
2603 .show = ipmr_mfc_seq_show,
2606 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2608 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2609 sizeof(struct ipmr_mfc_iter));
2612 static const struct file_operations ipmr_mfc_fops = {
2613 .owner = THIS_MODULE,
2614 .open = ipmr_mfc_open,
2616 .llseek = seq_lseek,
2617 .release = seq_release_net,
2621 #ifdef CONFIG_IP_PIMSM_V2
2622 static const struct net_protocol pim_protocol = {
2628 /* Setup for IP multicast routing */
2629 static int __net_init ipmr_net_init(struct net *net)
2633 err = ipmr_rules_init(net);
2637 #ifdef CONFIG_PROC_FS
2639 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2641 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2642 goto proc_cache_fail;
2646 #ifdef CONFIG_PROC_FS
2648 remove_proc_entry("ip_mr_vif", net->proc_net);
2650 ipmr_rules_exit(net);
2656 static void __net_exit ipmr_net_exit(struct net *net)
2658 #ifdef CONFIG_PROC_FS
2659 remove_proc_entry("ip_mr_cache", net->proc_net);
2660 remove_proc_entry("ip_mr_vif", net->proc_net);
2662 ipmr_rules_exit(net);
2665 static struct pernet_operations ipmr_net_ops = {
2666 .init = ipmr_net_init,
2667 .exit = ipmr_net_exit,
2670 int __init ip_mr_init(void)
2674 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2675 sizeof(struct mfc_cache),
2676 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2679 err = register_pernet_subsys(&ipmr_net_ops);
2681 goto reg_pernet_fail;
2683 err = register_netdevice_notifier(&ip_mr_notifier);
2685 goto reg_notif_fail;
2686 #ifdef CONFIG_IP_PIMSM_V2
2687 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2688 pr_err("%s: can't add PIM protocol\n", __func__);
2690 goto add_proto_fail;
2693 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2694 NULL, ipmr_rtm_dumproute, NULL);
2697 #ifdef CONFIG_IP_PIMSM_V2
2699 unregister_netdevice_notifier(&ip_mr_notifier);
2702 unregister_pernet_subsys(&ipmr_net_ops);
2704 kmem_cache_destroy(mrt_cachep);