2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
71 struct list_head list;
74 struct sock __rcu *mroute_sk;
75 struct timer_list ipmr_expire_timer;
76 struct list_head mfc_unres_queue;
77 struct list_head mfc_cache_array[MFC_LINES];
78 struct vif_device vif_table[MAXVIFS];
80 atomic_t cache_resolve_queue_len;
81 bool mroute_do_assert;
83 int mroute_reg_vif_num;
87 struct fib_rule common;
94 static inline bool pimsm_enabled(void)
96 return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
99 /* Big lock, protecting vif table, mrt cache and mroute socket state.
100 * Note that the changes are semaphored via rtnl_lock.
103 static DEFINE_RWLOCK(mrt_lock);
105 /* Multicast router control variables */
107 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
109 /* Special spinlock for queue of unresolved entries */
110 static DEFINE_SPINLOCK(mfc_unres_lock);
112 /* We return to original Alan's scheme. Hash table of resolved
113 * entries is changed only in process context and protected
114 * with weak lock mrt_lock. Queue of unresolved entries is protected
115 * with strong spinlock mfc_unres_lock.
117 * In this case data path is free of exclusive locks at all.
120 static struct kmem_cache *mrt_cachep __read_mostly;
122 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
123 static void ipmr_free_table(struct mr_table *mrt);
125 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
126 struct sk_buff *skb, struct mfc_cache *cache,
128 static int ipmr_cache_report(struct mr_table *mrt,
129 struct sk_buff *pkt, vifi_t vifi, int assert);
130 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
131 struct mfc_cache *c, struct rtmsg *rtm);
132 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
134 static void mroute_clean_tables(struct mr_table *mrt);
135 static void ipmr_expire_process(unsigned long arg);
137 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
138 #define ipmr_for_each_table(mrt, net) \
139 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
141 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
143 struct mr_table *mrt;
145 ipmr_for_each_table(mrt, net) {
152 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
153 struct mr_table **mrt)
156 struct ipmr_result res;
157 struct fib_lookup_arg arg = {
159 .flags = FIB_LOOKUP_NOREF,
162 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
163 flowi4_to_flowi(flp4), 0, &arg);
170 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
171 int flags, struct fib_lookup_arg *arg)
173 struct ipmr_result *res = arg->result;
174 struct mr_table *mrt;
176 switch (rule->action) {
179 case FR_ACT_UNREACHABLE:
181 case FR_ACT_PROHIBIT:
183 case FR_ACT_BLACKHOLE:
188 mrt = ipmr_get_table(rule->fr_net, rule->table);
195 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
200 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
204 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
205 struct fib_rule_hdr *frh, struct nlattr **tb)
210 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
216 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
217 struct fib_rule_hdr *frh)
225 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
226 .family = RTNL_FAMILY_IPMR,
227 .rule_size = sizeof(struct ipmr_rule),
228 .addr_size = sizeof(u32),
229 .action = ipmr_rule_action,
230 .match = ipmr_rule_match,
231 .configure = ipmr_rule_configure,
232 .compare = ipmr_rule_compare,
233 .fill = ipmr_rule_fill,
234 .nlgroup = RTNLGRP_IPV4_RULE,
235 .policy = ipmr_rule_policy,
236 .owner = THIS_MODULE,
239 static int __net_init ipmr_rules_init(struct net *net)
241 struct fib_rules_ops *ops;
242 struct mr_table *mrt;
245 ops = fib_rules_register(&ipmr_rules_ops_template, net);
249 INIT_LIST_HEAD(&net->ipv4.mr_tables);
251 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
257 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
261 net->ipv4.mr_rules_ops = ops;
265 ipmr_free_table(mrt);
267 fib_rules_unregister(ops);
271 static void __net_exit ipmr_rules_exit(struct net *net)
273 struct mr_table *mrt, *next;
276 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
277 list_del(&mrt->list);
278 ipmr_free_table(mrt);
280 fib_rules_unregister(net->ipv4.mr_rules_ops);
284 #define ipmr_for_each_table(mrt, net) \
285 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
287 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
289 return net->ipv4.mrt;
292 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
293 struct mr_table **mrt)
295 *mrt = net->ipv4.mrt;
299 static int __net_init ipmr_rules_init(struct net *net)
301 struct mr_table *mrt;
303 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
310 static void __net_exit ipmr_rules_exit(struct net *net)
313 ipmr_free_table(net->ipv4.mrt);
314 net->ipv4.mrt = NULL;
319 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
321 struct mr_table *mrt;
324 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
325 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
326 return ERR_PTR(-EINVAL);
328 mrt = ipmr_get_table(net, id);
332 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
334 return ERR_PTR(-ENOMEM);
335 write_pnet(&mrt->net, net);
338 /* Forwarding cache */
339 for (i = 0; i < MFC_LINES; i++)
340 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
342 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
344 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
347 mrt->mroute_reg_vif_num = -1;
348 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
349 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
354 static void ipmr_free_table(struct mr_table *mrt)
356 del_timer_sync(&mrt->ipmr_expire_timer);
357 mroute_clean_tables(mrt);
361 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
363 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
365 struct net *net = dev_net(dev);
369 dev = __dev_get_by_name(net, "tunl0");
371 const struct net_device_ops *ops = dev->netdev_ops;
373 struct ip_tunnel_parm p;
375 memset(&p, 0, sizeof(p));
376 p.iph.daddr = v->vifc_rmt_addr.s_addr;
377 p.iph.saddr = v->vifc_lcl_addr.s_addr;
380 p.iph.protocol = IPPROTO_IPIP;
381 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
382 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
384 if (ops->ndo_do_ioctl) {
385 mm_segment_t oldfs = get_fs();
388 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
394 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
396 struct net_device *dev;
398 dev = __dev_get_by_name(net, "tunl0");
401 const struct net_device_ops *ops = dev->netdev_ops;
404 struct ip_tunnel_parm p;
405 struct in_device *in_dev;
407 memset(&p, 0, sizeof(p));
408 p.iph.daddr = v->vifc_rmt_addr.s_addr;
409 p.iph.saddr = v->vifc_lcl_addr.s_addr;
412 p.iph.protocol = IPPROTO_IPIP;
413 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
414 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
416 if (ops->ndo_do_ioctl) {
417 mm_segment_t oldfs = get_fs();
420 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
428 (dev = __dev_get_by_name(net, p.name)) != NULL) {
429 dev->flags |= IFF_MULTICAST;
431 in_dev = __in_dev_get_rtnl(dev);
435 ipv4_devconf_setall(in_dev);
436 neigh_parms_data_state_setall(in_dev->arp_parms);
437 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
447 /* allow the register to be completed before unregistering. */
451 unregister_netdevice(dev);
455 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
456 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
458 struct net *net = dev_net(dev);
459 struct mr_table *mrt;
460 struct flowi4 fl4 = {
461 .flowi4_oif = dev->ifindex,
462 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
463 .flowi4_mark = skb->mark,
467 err = ipmr_fib_lookup(net, &fl4, &mrt);
473 read_lock(&mrt_lock);
474 dev->stats.tx_bytes += skb->len;
475 dev->stats.tx_packets++;
476 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
477 read_unlock(&mrt_lock);
482 static int reg_vif_get_iflink(const struct net_device *dev)
487 static const struct net_device_ops reg_vif_netdev_ops = {
488 .ndo_start_xmit = reg_vif_xmit,
489 .ndo_get_iflink = reg_vif_get_iflink,
492 static void reg_vif_setup(struct net_device *dev)
494 dev->type = ARPHRD_PIMREG;
495 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
496 dev->flags = IFF_NOARP;
497 dev->netdev_ops = ®_vif_netdev_ops;
498 dev->destructor = free_netdev;
499 dev->features |= NETIF_F_NETNS_LOCAL;
502 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
504 struct net_device *dev;
505 struct in_device *in_dev;
508 if (mrt->id == RT_TABLE_DEFAULT)
509 sprintf(name, "pimreg");
511 sprintf(name, "pimreg%u", mrt->id);
513 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
518 dev_net_set(dev, net);
520 if (register_netdevice(dev)) {
526 in_dev = __in_dev_get_rcu(dev);
532 ipv4_devconf_setall(in_dev);
533 neigh_parms_data_state_setall(in_dev->arp_parms);
534 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
545 /* allow the register to be completed before unregistering. */
549 unregister_netdevice(dev);
553 /* called with rcu_read_lock() */
554 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
557 struct net_device *reg_dev = NULL;
560 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
562 * a. packet is really sent to a multicast group
563 * b. packet is not a NULL-REGISTER
564 * c. packet is not truncated
566 if (!ipv4_is_multicast(encap->daddr) ||
567 encap->tot_len == 0 ||
568 ntohs(encap->tot_len) + pimlen > skb->len)
571 read_lock(&mrt_lock);
572 if (mrt->mroute_reg_vif_num >= 0)
573 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
574 read_unlock(&mrt_lock);
579 skb->mac_header = skb->network_header;
580 skb_pull(skb, (u8 *)encap - skb->data);
581 skb_reset_network_header(skb);
582 skb->protocol = htons(ETH_P_IP);
583 skb->ip_summed = CHECKSUM_NONE;
585 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
589 return NET_RX_SUCCESS;
592 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
599 * vif_delete - Delete a VIF entry
600 * @notify: Set to 1, if the caller is a notifier_call
602 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
603 struct list_head *head)
605 struct vif_device *v;
606 struct net_device *dev;
607 struct in_device *in_dev;
609 if (vifi < 0 || vifi >= mrt->maxvif)
610 return -EADDRNOTAVAIL;
612 v = &mrt->vif_table[vifi];
614 write_lock_bh(&mrt_lock);
619 write_unlock_bh(&mrt_lock);
620 return -EADDRNOTAVAIL;
623 if (vifi == mrt->mroute_reg_vif_num)
624 mrt->mroute_reg_vif_num = -1;
626 if (vifi + 1 == mrt->maxvif) {
629 for (tmp = vifi - 1; tmp >= 0; tmp--) {
630 if (VIF_EXISTS(mrt, tmp))
636 write_unlock_bh(&mrt_lock);
638 dev_set_allmulti(dev, -1);
640 in_dev = __in_dev_get_rtnl(dev);
642 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
643 inet_netconf_notify_devconf(dev_net(dev),
644 NETCONFA_MC_FORWARDING,
645 dev->ifindex, &in_dev->cnf);
646 ip_rt_multicast_event(in_dev);
649 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
650 unregister_netdevice_queue(dev, head);
656 static void ipmr_cache_free_rcu(struct rcu_head *head)
658 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
660 kmem_cache_free(mrt_cachep, c);
663 static inline void ipmr_cache_free(struct mfc_cache *c)
665 call_rcu(&c->rcu, ipmr_cache_free_rcu);
668 /* Destroy an unresolved cache entry, killing queued skbs
669 * and reporting error to netlink readers.
671 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
673 struct net *net = read_pnet(&mrt->net);
677 atomic_dec(&mrt->cache_resolve_queue_len);
679 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
680 if (ip_hdr(skb)->version == 0) {
681 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
682 nlh->nlmsg_type = NLMSG_ERROR;
683 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
684 skb_trim(skb, nlh->nlmsg_len);
686 e->error = -ETIMEDOUT;
687 memset(&e->msg, 0, sizeof(e->msg));
689 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
698 /* Timer process for the unresolved queue. */
699 static void ipmr_expire_process(unsigned long arg)
701 struct mr_table *mrt = (struct mr_table *)arg;
703 unsigned long expires;
704 struct mfc_cache *c, *next;
706 if (!spin_trylock(&mfc_unres_lock)) {
707 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
711 if (list_empty(&mrt->mfc_unres_queue))
717 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
718 if (time_after(c->mfc_un.unres.expires, now)) {
719 unsigned long interval = c->mfc_un.unres.expires - now;
720 if (interval < expires)
726 mroute_netlink_event(mrt, c, RTM_DELROUTE);
727 ipmr_destroy_unres(mrt, c);
730 if (!list_empty(&mrt->mfc_unres_queue))
731 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
734 spin_unlock(&mfc_unres_lock);
737 /* Fill oifs list. It is called under write locked mrt_lock. */
738 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
743 cache->mfc_un.res.minvif = MAXVIFS;
744 cache->mfc_un.res.maxvif = 0;
745 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
747 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
748 if (VIF_EXISTS(mrt, vifi) &&
749 ttls[vifi] && ttls[vifi] < 255) {
750 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
751 if (cache->mfc_un.res.minvif > vifi)
752 cache->mfc_un.res.minvif = vifi;
753 if (cache->mfc_un.res.maxvif <= vifi)
754 cache->mfc_un.res.maxvif = vifi + 1;
759 static int vif_add(struct net *net, struct mr_table *mrt,
760 struct vifctl *vifc, int mrtsock)
762 int vifi = vifc->vifc_vifi;
763 struct vif_device *v = &mrt->vif_table[vifi];
764 struct net_device *dev;
765 struct in_device *in_dev;
769 if (VIF_EXISTS(mrt, vifi))
772 switch (vifc->vifc_flags) {
774 if (!pimsm_enabled())
776 /* Special Purpose VIF in PIM
777 * All the packets will be sent to the daemon
779 if (mrt->mroute_reg_vif_num >= 0)
781 dev = ipmr_reg_vif(net, mrt);
784 err = dev_set_allmulti(dev, 1);
786 unregister_netdevice(dev);
792 dev = ipmr_new_tunnel(net, vifc);
795 err = dev_set_allmulti(dev, 1);
797 ipmr_del_tunnel(dev, vifc);
802 case VIFF_USE_IFINDEX:
804 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
805 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
806 if (dev && !__in_dev_get_rtnl(dev)) {
808 return -EADDRNOTAVAIL;
811 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
814 return -EADDRNOTAVAIL;
815 err = dev_set_allmulti(dev, 1);
825 in_dev = __in_dev_get_rtnl(dev);
828 return -EADDRNOTAVAIL;
830 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
831 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
833 ip_rt_multicast_event(in_dev);
835 /* Fill in the VIF structures */
837 v->rate_limit = vifc->vifc_rate_limit;
838 v->local = vifc->vifc_lcl_addr.s_addr;
839 v->remote = vifc->vifc_rmt_addr.s_addr;
840 v->flags = vifc->vifc_flags;
842 v->flags |= VIFF_STATIC;
843 v->threshold = vifc->vifc_threshold;
848 v->link = dev->ifindex;
849 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
850 v->link = dev_get_iflink(dev);
852 /* And finish update writing critical data */
853 write_lock_bh(&mrt_lock);
855 if (v->flags & VIFF_REGISTER)
856 mrt->mroute_reg_vif_num = vifi;
857 if (vifi+1 > mrt->maxvif)
858 mrt->maxvif = vifi+1;
859 write_unlock_bh(&mrt_lock);
863 /* called with rcu_read_lock() */
864 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
868 int line = MFC_HASH(mcastgrp, origin);
871 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
872 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
878 /* Look for a (*,*,oif) entry */
879 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
882 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
885 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
886 if (c->mfc_origin == htonl(INADDR_ANY) &&
887 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
888 c->mfc_un.res.ttls[vifi] < 255)
894 /* Look for a (*,G) entry */
895 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
896 __be32 mcastgrp, int vifi)
898 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
899 struct mfc_cache *c, *proxy;
901 if (mcastgrp == htonl(INADDR_ANY))
904 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
905 if (c->mfc_origin == htonl(INADDR_ANY) &&
906 c->mfc_mcastgrp == mcastgrp) {
907 if (c->mfc_un.res.ttls[vifi] < 255)
910 /* It's ok if the vifi is part of the static tree */
911 proxy = ipmr_cache_find_any_parent(mrt,
913 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
918 return ipmr_cache_find_any_parent(mrt, vifi);
921 /* Allocate a multicast cache entry */
922 static struct mfc_cache *ipmr_cache_alloc(void)
924 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
927 c->mfc_un.res.minvif = MAXVIFS;
931 static struct mfc_cache *ipmr_cache_alloc_unres(void)
933 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
936 skb_queue_head_init(&c->mfc_un.unres.unresolved);
937 c->mfc_un.unres.expires = jiffies + 10*HZ;
942 /* A cache entry has gone into a resolved state from queued */
943 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
944 struct mfc_cache *uc, struct mfc_cache *c)
949 /* Play the pending entries through our router */
950 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
951 if (ip_hdr(skb)->version == 0) {
952 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
954 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
955 nlh->nlmsg_len = skb_tail_pointer(skb) -
958 nlh->nlmsg_type = NLMSG_ERROR;
959 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
960 skb_trim(skb, nlh->nlmsg_len);
962 e->error = -EMSGSIZE;
963 memset(&e->msg, 0, sizeof(e->msg));
966 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
968 ip_mr_forward(net, mrt, skb, c, 0);
973 /* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
974 * expects the following bizarre scheme.
976 * Called under mrt_lock.
978 static int ipmr_cache_report(struct mr_table *mrt,
979 struct sk_buff *pkt, vifi_t vifi, int assert)
981 const int ihl = ip_hdrlen(pkt);
982 struct sock *mroute_sk;
983 struct igmphdr *igmp;
988 if (assert == IGMPMSG_WHOLEPKT)
989 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
991 skb = alloc_skb(128, GFP_ATOMIC);
996 if (assert == IGMPMSG_WHOLEPKT) {
997 /* Ugly, but we have no choice with this interface.
998 * Duplicate old header, fix ihl, length etc.
999 * And all this only to mangle msg->im_msgtype and
1000 * to set msg->im_mbz to "mbz" :-)
1002 skb_push(skb, sizeof(struct iphdr));
1003 skb_reset_network_header(skb);
1004 skb_reset_transport_header(skb);
1005 msg = (struct igmpmsg *)skb_network_header(skb);
1006 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1007 msg->im_msgtype = IGMPMSG_WHOLEPKT;
1009 msg->im_vif = mrt->mroute_reg_vif_num;
1010 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1011 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1012 sizeof(struct iphdr));
1014 /* Copy the IP header */
1015 skb_set_network_header(skb, skb->len);
1017 skb_copy_to_linear_data(skb, pkt->data, ihl);
1018 /* Flag to the kernel this is a route add */
1019 ip_hdr(skb)->protocol = 0;
1020 msg = (struct igmpmsg *)skb_network_header(skb);
1022 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1023 /* Add our header */
1024 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1025 igmp->type = assert;
1026 msg->im_msgtype = assert;
1028 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1029 skb->transport_header = skb->network_header;
1033 mroute_sk = rcu_dereference(mrt->mroute_sk);
1040 /* Deliver to mrouted */
1041 ret = sock_queue_rcv_skb(mroute_sk, skb);
1044 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1051 /* Queue a packet for resolution. It gets locked cache entry! */
1052 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1053 struct sk_buff *skb)
1057 struct mfc_cache *c;
1058 const struct iphdr *iph = ip_hdr(skb);
1060 spin_lock_bh(&mfc_unres_lock);
1061 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1062 if (c->mfc_mcastgrp == iph->daddr &&
1063 c->mfc_origin == iph->saddr) {
1070 /* Create a new entry if allowable */
1071 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1072 (c = ipmr_cache_alloc_unres()) == NULL) {
1073 spin_unlock_bh(&mfc_unres_lock);
1079 /* Fill in the new cache entry */
1081 c->mfc_origin = iph->saddr;
1082 c->mfc_mcastgrp = iph->daddr;
1084 /* Reflect first query at mrouted. */
1085 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1087 /* If the report failed throw the cache entry
1090 spin_unlock_bh(&mfc_unres_lock);
1097 atomic_inc(&mrt->cache_resolve_queue_len);
1098 list_add(&c->list, &mrt->mfc_unres_queue);
1099 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1101 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1102 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1105 /* See if we can append the packet */
1106 if (c->mfc_un.unres.unresolved.qlen > 3) {
1110 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1114 spin_unlock_bh(&mfc_unres_lock);
1118 /* MFC cache manipulation by user space mroute daemon */
1120 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1123 struct mfc_cache *c, *next;
1125 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1127 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1128 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1129 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1130 (parent == -1 || parent == c->mfc_parent)) {
1131 list_del_rcu(&c->list);
1132 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1140 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1141 struct mfcctl *mfc, int mrtsock, int parent)
1145 struct mfc_cache *uc, *c;
1147 if (mfc->mfcc_parent >= MAXVIFS)
1150 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1152 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1153 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1154 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1155 (parent == -1 || parent == c->mfc_parent)) {
1162 write_lock_bh(&mrt_lock);
1163 c->mfc_parent = mfc->mfcc_parent;
1164 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1166 c->mfc_flags |= MFC_STATIC;
1167 write_unlock_bh(&mrt_lock);
1168 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1172 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1173 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1176 c = ipmr_cache_alloc();
1180 c->mfc_origin = mfc->mfcc_origin.s_addr;
1181 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1182 c->mfc_parent = mfc->mfcc_parent;
1183 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1185 c->mfc_flags |= MFC_STATIC;
1187 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1189 /* Check to see if we resolved a queued list. If so we
1190 * need to send on the frames and tidy up.
1193 spin_lock_bh(&mfc_unres_lock);
1194 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1195 if (uc->mfc_origin == c->mfc_origin &&
1196 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1197 list_del(&uc->list);
1198 atomic_dec(&mrt->cache_resolve_queue_len);
1203 if (list_empty(&mrt->mfc_unres_queue))
1204 del_timer(&mrt->ipmr_expire_timer);
1205 spin_unlock_bh(&mfc_unres_lock);
1208 ipmr_cache_resolve(net, mrt, uc, c);
1209 ipmr_cache_free(uc);
1211 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1215 /* Close the multicast socket, and clear the vif tables etc */
1216 static void mroute_clean_tables(struct mr_table *mrt)
1220 struct mfc_cache *c, *next;
1222 /* Shut down all active vif entries */
1223 for (i = 0; i < mrt->maxvif; i++) {
1224 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1225 vif_delete(mrt, i, 0, &list);
1227 unregister_netdevice_many(&list);
1229 /* Wipe the cache */
1230 for (i = 0; i < MFC_LINES; i++) {
1231 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1232 if (c->mfc_flags & MFC_STATIC)
1234 list_del_rcu(&c->list);
1235 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1240 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1241 spin_lock_bh(&mfc_unres_lock);
1242 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1244 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1245 ipmr_destroy_unres(mrt, c);
1247 spin_unlock_bh(&mfc_unres_lock);
1251 /* called from ip_ra_control(), before an RCU grace period,
1252 * we dont need to call synchronize_rcu() here
1254 static void mrtsock_destruct(struct sock *sk)
1256 struct net *net = sock_net(sk);
1257 struct mr_table *mrt;
1260 ipmr_for_each_table(mrt, net) {
1261 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1262 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1263 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1264 NETCONFA_IFINDEX_ALL,
1265 net->ipv4.devconf_all);
1266 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1267 mroute_clean_tables(mrt);
1273 /* Socket options and virtual interface manipulation. The whole
1274 * virtual interface system is a complete heap, but unfortunately
1275 * that's how BSD mrouted happens to think. Maybe one day with a proper
1276 * MOSPF/PIM router set up we can clean this up.
1279 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1281 int ret, parent = 0;
1284 struct net *net = sock_net(sk);
1285 struct mr_table *mrt;
1287 if (sk->sk_type != SOCK_RAW ||
1288 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1291 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1295 if (optname != MRT_INIT) {
1296 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1297 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1303 if (optlen != sizeof(int))
1307 if (rtnl_dereference(mrt->mroute_sk)) {
1312 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1314 rcu_assign_pointer(mrt->mroute_sk, sk);
1315 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1316 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1317 NETCONFA_IFINDEX_ALL,
1318 net->ipv4.devconf_all);
1323 if (sk != rcu_access_pointer(mrt->mroute_sk))
1325 return ip_ra_control(sk, 0, NULL);
1328 if (optlen != sizeof(vif))
1330 if (copy_from_user(&vif, optval, sizeof(vif)))
1332 if (vif.vifc_vifi >= MAXVIFS)
1335 if (optname == MRT_ADD_VIF) {
1336 ret = vif_add(net, mrt, &vif,
1337 sk == rtnl_dereference(mrt->mroute_sk));
1339 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1344 /* Manipulate the forwarding caches. These live
1345 * in a sort of kernel/user symbiosis.
1350 case MRT_ADD_MFC_PROXY:
1351 case MRT_DEL_MFC_PROXY:
1352 if (optlen != sizeof(mfc))
1354 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1357 parent = mfc.mfcc_parent;
1359 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1360 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1362 ret = ipmr_mfc_add(net, mrt, &mfc,
1363 sk == rtnl_dereference(mrt->mroute_sk),
1367 /* Control PIM assert. */
1371 if (optlen != sizeof(v))
1373 if (get_user(v, (int __user *)optval))
1375 mrt->mroute_do_assert = v;
1382 if (!pimsm_enabled())
1383 return -ENOPROTOOPT;
1384 if (optlen != sizeof(v))
1386 if (get_user(v, (int __user *)optval))
1392 if (v != mrt->mroute_do_pim) {
1393 mrt->mroute_do_pim = v;
1394 mrt->mroute_do_assert = v;
1403 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES))
1404 return -ENOPROTOOPT;
1405 if (optlen != sizeof(u32))
1407 if (get_user(v, (u32 __user *)optval))
1412 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1415 mrt = ipmr_new_table(net, v);
1419 raw_sk(sk)->ipmr_table = v;
1424 /* Spurious command, or MRT_VERSION which you cannot set. */
1426 return -ENOPROTOOPT;
1430 /* Getsock opt support for the multicast routing system. */
1431 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1435 struct net *net = sock_net(sk);
1436 struct mr_table *mrt;
1438 if (sk->sk_type != SOCK_RAW ||
1439 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1442 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1451 if (!pimsm_enabled())
1452 return -ENOPROTOOPT;
1453 val = mrt->mroute_do_pim;
1456 val = mrt->mroute_do_assert;
1459 return -ENOPROTOOPT;
1462 if (get_user(olr, optlen))
1464 olr = min_t(unsigned int, olr, sizeof(int));
1467 if (put_user(olr, optlen))
1469 if (copy_to_user(optval, &val, olr))
1474 /* The IP multicast ioctl support routines. */
1475 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1477 struct sioc_sg_req sr;
1478 struct sioc_vif_req vr;
1479 struct vif_device *vif;
1480 struct mfc_cache *c;
1481 struct net *net = sock_net(sk);
1482 struct mr_table *mrt;
1484 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1490 if (copy_from_user(&vr, arg, sizeof(vr)))
1492 if (vr.vifi >= mrt->maxvif)
1494 read_lock(&mrt_lock);
1495 vif = &mrt->vif_table[vr.vifi];
1496 if (VIF_EXISTS(mrt, vr.vifi)) {
1497 vr.icount = vif->pkt_in;
1498 vr.ocount = vif->pkt_out;
1499 vr.ibytes = vif->bytes_in;
1500 vr.obytes = vif->bytes_out;
1501 read_unlock(&mrt_lock);
1503 if (copy_to_user(arg, &vr, sizeof(vr)))
1507 read_unlock(&mrt_lock);
1508 return -EADDRNOTAVAIL;
1510 if (copy_from_user(&sr, arg, sizeof(sr)))
1514 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1516 sr.pktcnt = c->mfc_un.res.pkt;
1517 sr.bytecnt = c->mfc_un.res.bytes;
1518 sr.wrong_if = c->mfc_un.res.wrong_if;
1521 if (copy_to_user(arg, &sr, sizeof(sr)))
1526 return -EADDRNOTAVAIL;
1528 return -ENOIOCTLCMD;
1532 #ifdef CONFIG_COMPAT
1533 struct compat_sioc_sg_req {
1536 compat_ulong_t pktcnt;
1537 compat_ulong_t bytecnt;
1538 compat_ulong_t wrong_if;
1541 struct compat_sioc_vif_req {
1542 vifi_t vifi; /* Which iface */
1543 compat_ulong_t icount;
1544 compat_ulong_t ocount;
1545 compat_ulong_t ibytes;
1546 compat_ulong_t obytes;
1549 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1551 struct compat_sioc_sg_req sr;
1552 struct compat_sioc_vif_req vr;
1553 struct vif_device *vif;
1554 struct mfc_cache *c;
1555 struct net *net = sock_net(sk);
1556 struct mr_table *mrt;
1558 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1564 if (copy_from_user(&vr, arg, sizeof(vr)))
1566 if (vr.vifi >= mrt->maxvif)
1568 read_lock(&mrt_lock);
1569 vif = &mrt->vif_table[vr.vifi];
1570 if (VIF_EXISTS(mrt, vr.vifi)) {
1571 vr.icount = vif->pkt_in;
1572 vr.ocount = vif->pkt_out;
1573 vr.ibytes = vif->bytes_in;
1574 vr.obytes = vif->bytes_out;
1575 read_unlock(&mrt_lock);
1577 if (copy_to_user(arg, &vr, sizeof(vr)))
1581 read_unlock(&mrt_lock);
1582 return -EADDRNOTAVAIL;
1584 if (copy_from_user(&sr, arg, sizeof(sr)))
1588 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1590 sr.pktcnt = c->mfc_un.res.pkt;
1591 sr.bytecnt = c->mfc_un.res.bytes;
1592 sr.wrong_if = c->mfc_un.res.wrong_if;
1595 if (copy_to_user(arg, &sr, sizeof(sr)))
1600 return -EADDRNOTAVAIL;
1602 return -ENOIOCTLCMD;
1607 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1609 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1610 struct net *net = dev_net(dev);
1611 struct mr_table *mrt;
1612 struct vif_device *v;
1615 if (event != NETDEV_UNREGISTER)
1618 ipmr_for_each_table(mrt, net) {
1619 v = &mrt->vif_table[0];
1620 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1622 vif_delete(mrt, ct, 1, NULL);
1628 static struct notifier_block ip_mr_notifier = {
1629 .notifier_call = ipmr_device_event,
1632 /* Encapsulate a packet by attaching a valid IPIP header to it.
1633 * This avoids tunnel drivers and other mess and gives us the speed so
1634 * important for multicast video.
1636 static void ip_encap(struct net *net, struct sk_buff *skb,
1637 __be32 saddr, __be32 daddr)
1640 const struct iphdr *old_iph = ip_hdr(skb);
1642 skb_push(skb, sizeof(struct iphdr));
1643 skb->transport_header = skb->network_header;
1644 skb_reset_network_header(skb);
1648 iph->tos = old_iph->tos;
1649 iph->ttl = old_iph->ttl;
1653 iph->protocol = IPPROTO_IPIP;
1655 iph->tot_len = htons(skb->len);
1656 ip_select_ident(net, skb, NULL);
1659 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1663 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1664 struct sk_buff *skb)
1666 struct ip_options *opt = &(IPCB(skb)->opt);
1668 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1669 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1671 if (unlikely(opt->optlen))
1672 ip_forward_options(skb);
1674 return dst_output(net, sk, skb);
1677 /* Processing handlers for ipmr_forward */
1679 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1680 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1682 const struct iphdr *iph = ip_hdr(skb);
1683 struct vif_device *vif = &mrt->vif_table[vifi];
1684 struct net_device *dev;
1692 if (vif->flags & VIFF_REGISTER) {
1694 vif->bytes_out += skb->len;
1695 vif->dev->stats.tx_bytes += skb->len;
1696 vif->dev->stats.tx_packets++;
1697 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1701 if (vif->flags & VIFF_TUNNEL) {
1702 rt = ip_route_output_ports(net, &fl4, NULL,
1703 vif->remote, vif->local,
1706 RT_TOS(iph->tos), vif->link);
1709 encap = sizeof(struct iphdr);
1711 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1714 RT_TOS(iph->tos), vif->link);
1721 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1722 /* Do not fragment multicasts. Alas, IPv4 does not
1723 * allow to send ICMP, so that packets will disappear
1726 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1731 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1733 if (skb_cow(skb, encap)) {
1739 vif->bytes_out += skb->len;
1742 skb_dst_set(skb, &rt->dst);
1743 ip_decrease_ttl(ip_hdr(skb));
1745 /* FIXME: forward and output firewalls used to be called here.
1746 * What do we do with netfilter? -- RR
1748 if (vif->flags & VIFF_TUNNEL) {
1749 ip_encap(net, skb, vif->local, vif->remote);
1750 /* FIXME: extra output firewall step used to be here. --RR */
1751 vif->dev->stats.tx_packets++;
1752 vif->dev->stats.tx_bytes += skb->len;
1755 IPCB(skb)->flags |= IPSKB_FORWARDED;
1757 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1758 * not only before forwarding, but after forwarding on all output
1759 * interfaces. It is clear, if mrouter runs a multicasting
1760 * program, it should receive packets not depending to what interface
1761 * program is joined.
1762 * If we will not make it, the program will have to join on all
1763 * interfaces. On the other hand, multihoming host (or router, but
1764 * not mrouter) cannot join to more than one interface - it will
1765 * result in receiving multiple packets.
1767 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1768 net, NULL, skb, skb->dev, dev,
1769 ipmr_forward_finish);
1776 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1780 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1781 if (mrt->vif_table[ct].dev == dev)
1787 /* "local" means that we should preserve one skb (for local delivery) */
1788 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1789 struct sk_buff *skb, struct mfc_cache *cache,
1794 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1796 vif = cache->mfc_parent;
1797 cache->mfc_un.res.pkt++;
1798 cache->mfc_un.res.bytes += skb->len;
1800 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1801 struct mfc_cache *cache_proxy;
1803 /* For an (*,G) entry, we only check that the incomming
1804 * interface is part of the static tree.
1806 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1808 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1812 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1813 if (mrt->vif_table[vif].dev != skb->dev) {
1814 if (rt_is_output_route(skb_rtable(skb))) {
1815 /* It is our own packet, looped back.
1816 * Very complicated situation...
1818 * The best workaround until routing daemons will be
1819 * fixed is not to redistribute packet, if it was
1820 * send through wrong interface. It means, that
1821 * multicast applications WILL NOT work for
1822 * (S,G), which have default multicast route pointing
1823 * to wrong oif. In any case, it is not a good
1824 * idea to use multicasting applications on router.
1829 cache->mfc_un.res.wrong_if++;
1831 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1832 /* pimsm uses asserts, when switching from RPT to SPT,
1833 * so that we cannot check that packet arrived on an oif.
1834 * It is bad, but otherwise we would need to move pretty
1835 * large chunk of pimd to kernel. Ough... --ANK
1837 (mrt->mroute_do_pim ||
1838 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1840 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1841 cache->mfc_un.res.last_assert = jiffies;
1842 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1848 mrt->vif_table[vif].pkt_in++;
1849 mrt->vif_table[vif].bytes_in += skb->len;
1851 /* Forward the frame */
1852 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1853 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1854 if (true_vifi >= 0 &&
1855 true_vifi != cache->mfc_parent &&
1857 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1858 /* It's an (*,*) entry and the packet is not coming from
1859 * the upstream: forward the packet to the upstream
1862 psend = cache->mfc_parent;
1867 for (ct = cache->mfc_un.res.maxvif - 1;
1868 ct >= cache->mfc_un.res.minvif; ct--) {
1869 /* For (*,G) entry, don't forward to the incoming interface */
1870 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1872 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1874 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1877 ipmr_queue_xmit(net, mrt, skb2, cache,
1886 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1889 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1891 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1901 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1903 struct rtable *rt = skb_rtable(skb);
1904 struct iphdr *iph = ip_hdr(skb);
1905 struct flowi4 fl4 = {
1906 .daddr = iph->daddr,
1907 .saddr = iph->saddr,
1908 .flowi4_tos = RT_TOS(iph->tos),
1909 .flowi4_oif = (rt_is_output_route(rt) ?
1910 skb->dev->ifindex : 0),
1911 .flowi4_iif = (rt_is_output_route(rt) ?
1914 .flowi4_mark = skb->mark,
1916 struct mr_table *mrt;
1919 err = ipmr_fib_lookup(net, &fl4, &mrt);
1921 return ERR_PTR(err);
1925 /* Multicast packets for forwarding arrive here
1926 * Called with rcu_read_lock();
1928 int ip_mr_input(struct sk_buff *skb)
1930 struct mfc_cache *cache;
1931 struct net *net = dev_net(skb->dev);
1932 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1933 struct mr_table *mrt;
1935 /* Packet is looped back after forward, it should not be
1936 * forwarded second time, but still can be delivered locally.
1938 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1941 mrt = ipmr_rt_fib_lookup(net, skb);
1944 return PTR_ERR(mrt);
1947 if (IPCB(skb)->opt.router_alert) {
1948 if (ip_call_ra_chain(skb))
1950 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1951 /* IGMPv1 (and broken IGMPv2 implementations sort of
1952 * Cisco IOS <= 11.2(8)) do not put router alert
1953 * option to IGMP packets destined to routable
1954 * groups. It is very bad, because it means
1955 * that we can forward NO IGMP messages.
1957 struct sock *mroute_sk;
1959 mroute_sk = rcu_dereference(mrt->mroute_sk);
1962 raw_rcv(mroute_sk, skb);
1968 /* already under rcu_read_lock() */
1969 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1971 int vif = ipmr_find_vif(mrt, skb->dev);
1974 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1978 /* No usable cache entry */
1983 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1984 ip_local_deliver(skb);
1990 read_lock(&mrt_lock);
1991 vif = ipmr_find_vif(mrt, skb->dev);
1993 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1994 read_unlock(&mrt_lock);
1998 read_unlock(&mrt_lock);
2003 read_lock(&mrt_lock);
2004 ip_mr_forward(net, mrt, skb, cache, local);
2005 read_unlock(&mrt_lock);
2008 return ip_local_deliver(skb);
2014 return ip_local_deliver(skb);
2019 #ifdef CONFIG_IP_PIMSM_V1
2020 /* Handle IGMP messages of PIMv1 */
2021 int pim_rcv_v1(struct sk_buff *skb)
2023 struct igmphdr *pim;
2024 struct net *net = dev_net(skb->dev);
2025 struct mr_table *mrt;
2027 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2030 pim = igmp_hdr(skb);
2032 mrt = ipmr_rt_fib_lookup(net, skb);
2035 if (!mrt->mroute_do_pim ||
2036 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2039 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2047 #ifdef CONFIG_IP_PIMSM_V2
2048 static int pim_rcv(struct sk_buff *skb)
2050 struct pimreghdr *pim;
2051 struct net *net = dev_net(skb->dev);
2052 struct mr_table *mrt;
2054 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2057 pim = (struct pimreghdr *)skb_transport_header(skb);
2058 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2059 (pim->flags & PIM_NULL_REGISTER) ||
2060 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2061 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2064 mrt = ipmr_rt_fib_lookup(net, skb);
2067 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2075 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2076 struct mfc_cache *c, struct rtmsg *rtm)
2079 struct rtnexthop *nhp;
2080 struct nlattr *mp_attr;
2081 struct rta_mfc_stats mfcs;
2083 /* If cache is unresolved, don't try to parse IIF and OIF */
2084 if (c->mfc_parent >= MAXVIFS)
2087 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2088 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2091 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2094 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2095 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2096 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2097 nla_nest_cancel(skb, mp_attr);
2101 nhp->rtnh_flags = 0;
2102 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2103 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2104 nhp->rtnh_len = sizeof(*nhp);
2108 nla_nest_end(skb, mp_attr);
2110 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2111 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2112 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2113 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2116 rtm->rtm_type = RTN_MULTICAST;
2120 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2121 __be32 saddr, __be32 daddr,
2122 struct rtmsg *rtm, int nowait)
2124 struct mfc_cache *cache;
2125 struct mr_table *mrt;
2128 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2133 cache = ipmr_cache_find(mrt, saddr, daddr);
2134 if (!cache && skb->dev) {
2135 int vif = ipmr_find_vif(mrt, skb->dev);
2138 cache = ipmr_cache_find_any(mrt, daddr, vif);
2141 struct sk_buff *skb2;
2143 struct net_device *dev;
2152 read_lock(&mrt_lock);
2154 vif = ipmr_find_vif(mrt, dev);
2156 read_unlock(&mrt_lock);
2160 skb2 = skb_clone(skb, GFP_ATOMIC);
2162 read_unlock(&mrt_lock);
2167 skb_push(skb2, sizeof(struct iphdr));
2168 skb_reset_network_header(skb2);
2170 iph->ihl = sizeof(struct iphdr) >> 2;
2174 err = ipmr_cache_unresolved(mrt, vif, skb2);
2175 read_unlock(&mrt_lock);
2180 read_lock(&mrt_lock);
2181 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2182 cache->mfc_flags |= MFC_NOTIFY;
2183 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2184 read_unlock(&mrt_lock);
2189 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2190 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2193 struct nlmsghdr *nlh;
2197 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2201 rtm = nlmsg_data(nlh);
2202 rtm->rtm_family = RTNL_FAMILY_IPMR;
2203 rtm->rtm_dst_len = 32;
2204 rtm->rtm_src_len = 32;
2206 rtm->rtm_table = mrt->id;
2207 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2208 goto nla_put_failure;
2209 rtm->rtm_type = RTN_MULTICAST;
2210 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2211 if (c->mfc_flags & MFC_STATIC)
2212 rtm->rtm_protocol = RTPROT_STATIC;
2214 rtm->rtm_protocol = RTPROT_MROUTED;
2217 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2218 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2219 goto nla_put_failure;
2220 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2221 /* do not break the dump if cache is unresolved */
2222 if (err < 0 && err != -ENOENT)
2223 goto nla_put_failure;
2225 nlmsg_end(skb, nlh);
2229 nlmsg_cancel(skb, nlh);
2233 static size_t mroute_msgsize(bool unresolved, int maxvif)
2236 NLMSG_ALIGN(sizeof(struct rtmsg))
2237 + nla_total_size(4) /* RTA_TABLE */
2238 + nla_total_size(4) /* RTA_SRC */
2239 + nla_total_size(4) /* RTA_DST */
2244 + nla_total_size(4) /* RTA_IIF */
2245 + nla_total_size(0) /* RTA_MULTIPATH */
2246 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2248 + nla_total_size(sizeof(struct rta_mfc_stats))
2254 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2257 struct net *net = read_pnet(&mrt->net);
2258 struct sk_buff *skb;
2261 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2266 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2270 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2276 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2279 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2281 struct net *net = sock_net(skb->sk);
2282 struct mr_table *mrt;
2283 struct mfc_cache *mfc;
2284 unsigned int t = 0, s_t;
2285 unsigned int h = 0, s_h;
2286 unsigned int e = 0, s_e;
2293 ipmr_for_each_table(mrt, net) {
2298 for (h = s_h; h < MFC_LINES; h++) {
2299 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2302 if (ipmr_fill_mroute(mrt, skb,
2303 NETLINK_CB(cb->skb).portid,
2313 spin_lock_bh(&mfc_unres_lock);
2314 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2317 if (ipmr_fill_mroute(mrt, skb,
2318 NETLINK_CB(cb->skb).portid,
2322 spin_unlock_bh(&mfc_unres_lock);
2328 spin_unlock_bh(&mfc_unres_lock);
2344 #ifdef CONFIG_PROC_FS
2345 /* The /proc interfaces to multicast routing :
2346 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2348 struct ipmr_vif_iter {
2349 struct seq_net_private p;
2350 struct mr_table *mrt;
2354 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2355 struct ipmr_vif_iter *iter,
2358 struct mr_table *mrt = iter->mrt;
2360 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2361 if (!VIF_EXISTS(mrt, iter->ct))
2364 return &mrt->vif_table[iter->ct];
2369 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2370 __acquires(mrt_lock)
2372 struct ipmr_vif_iter *iter = seq->private;
2373 struct net *net = seq_file_net(seq);
2374 struct mr_table *mrt;
2376 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2378 return ERR_PTR(-ENOENT);
2382 read_lock(&mrt_lock);
2383 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2387 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2389 struct ipmr_vif_iter *iter = seq->private;
2390 struct net *net = seq_file_net(seq);
2391 struct mr_table *mrt = iter->mrt;
2394 if (v == SEQ_START_TOKEN)
2395 return ipmr_vif_seq_idx(net, iter, 0);
2397 while (++iter->ct < mrt->maxvif) {
2398 if (!VIF_EXISTS(mrt, iter->ct))
2400 return &mrt->vif_table[iter->ct];
2405 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2406 __releases(mrt_lock)
2408 read_unlock(&mrt_lock);
2411 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2413 struct ipmr_vif_iter *iter = seq->private;
2414 struct mr_table *mrt = iter->mrt;
2416 if (v == SEQ_START_TOKEN) {
2418 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2420 const struct vif_device *vif = v;
2421 const char *name = vif->dev ? vif->dev->name : "none";
2424 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2425 vif - mrt->vif_table,
2426 name, vif->bytes_in, vif->pkt_in,
2427 vif->bytes_out, vif->pkt_out,
2428 vif->flags, vif->local, vif->remote);
2433 static const struct seq_operations ipmr_vif_seq_ops = {
2434 .start = ipmr_vif_seq_start,
2435 .next = ipmr_vif_seq_next,
2436 .stop = ipmr_vif_seq_stop,
2437 .show = ipmr_vif_seq_show,
2440 static int ipmr_vif_open(struct inode *inode, struct file *file)
2442 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2443 sizeof(struct ipmr_vif_iter));
2446 static const struct file_operations ipmr_vif_fops = {
2447 .owner = THIS_MODULE,
2448 .open = ipmr_vif_open,
2450 .llseek = seq_lseek,
2451 .release = seq_release_net,
2454 struct ipmr_mfc_iter {
2455 struct seq_net_private p;
2456 struct mr_table *mrt;
2457 struct list_head *cache;
2462 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2463 struct ipmr_mfc_iter *it, loff_t pos)
2465 struct mr_table *mrt = it->mrt;
2466 struct mfc_cache *mfc;
2469 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2470 it->cache = &mrt->mfc_cache_array[it->ct];
2471 list_for_each_entry_rcu(mfc, it->cache, list)
2477 spin_lock_bh(&mfc_unres_lock);
2478 it->cache = &mrt->mfc_unres_queue;
2479 list_for_each_entry(mfc, it->cache, list)
2482 spin_unlock_bh(&mfc_unres_lock);
2489 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2491 struct ipmr_mfc_iter *it = seq->private;
2492 struct net *net = seq_file_net(seq);
2493 struct mr_table *mrt;
2495 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2497 return ERR_PTR(-ENOENT);
2502 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2506 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2508 struct mfc_cache *mfc = v;
2509 struct ipmr_mfc_iter *it = seq->private;
2510 struct net *net = seq_file_net(seq);
2511 struct mr_table *mrt = it->mrt;
2515 if (v == SEQ_START_TOKEN)
2516 return ipmr_mfc_seq_idx(net, seq->private, 0);
2518 if (mfc->list.next != it->cache)
2519 return list_entry(mfc->list.next, struct mfc_cache, list);
2521 if (it->cache == &mrt->mfc_unres_queue)
2524 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2526 while (++it->ct < MFC_LINES) {
2527 it->cache = &mrt->mfc_cache_array[it->ct];
2528 if (list_empty(it->cache))
2530 return list_first_entry(it->cache, struct mfc_cache, list);
2533 /* exhausted cache_array, show unresolved */
2535 it->cache = &mrt->mfc_unres_queue;
2538 spin_lock_bh(&mfc_unres_lock);
2539 if (!list_empty(it->cache))
2540 return list_first_entry(it->cache, struct mfc_cache, list);
2543 spin_unlock_bh(&mfc_unres_lock);
2549 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2551 struct ipmr_mfc_iter *it = seq->private;
2552 struct mr_table *mrt = it->mrt;
2554 if (it->cache == &mrt->mfc_unres_queue)
2555 spin_unlock_bh(&mfc_unres_lock);
2556 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2560 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2564 if (v == SEQ_START_TOKEN) {
2566 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2568 const struct mfc_cache *mfc = v;
2569 const struct ipmr_mfc_iter *it = seq->private;
2570 const struct mr_table *mrt = it->mrt;
2572 seq_printf(seq, "%08X %08X %-3hd",
2573 (__force u32) mfc->mfc_mcastgrp,
2574 (__force u32) mfc->mfc_origin,
2577 if (it->cache != &mrt->mfc_unres_queue) {
2578 seq_printf(seq, " %8lu %8lu %8lu",
2579 mfc->mfc_un.res.pkt,
2580 mfc->mfc_un.res.bytes,
2581 mfc->mfc_un.res.wrong_if);
2582 for (n = mfc->mfc_un.res.minvif;
2583 n < mfc->mfc_un.res.maxvif; n++) {
2584 if (VIF_EXISTS(mrt, n) &&
2585 mfc->mfc_un.res.ttls[n] < 255)
2588 n, mfc->mfc_un.res.ttls[n]);
2591 /* unresolved mfc_caches don't contain
2592 * pkt, bytes and wrong_if values
2594 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2596 seq_putc(seq, '\n');
2601 static const struct seq_operations ipmr_mfc_seq_ops = {
2602 .start = ipmr_mfc_seq_start,
2603 .next = ipmr_mfc_seq_next,
2604 .stop = ipmr_mfc_seq_stop,
2605 .show = ipmr_mfc_seq_show,
2608 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2610 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2611 sizeof(struct ipmr_mfc_iter));
2614 static const struct file_operations ipmr_mfc_fops = {
2615 .owner = THIS_MODULE,
2616 .open = ipmr_mfc_open,
2618 .llseek = seq_lseek,
2619 .release = seq_release_net,
2623 #ifdef CONFIG_IP_PIMSM_V2
2624 static const struct net_protocol pim_protocol = {
2630 /* Setup for IP multicast routing */
2631 static int __net_init ipmr_net_init(struct net *net)
2635 err = ipmr_rules_init(net);
2639 #ifdef CONFIG_PROC_FS
2641 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2643 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2644 goto proc_cache_fail;
2648 #ifdef CONFIG_PROC_FS
2650 remove_proc_entry("ip_mr_vif", net->proc_net);
2652 ipmr_rules_exit(net);
2658 static void __net_exit ipmr_net_exit(struct net *net)
2660 #ifdef CONFIG_PROC_FS
2661 remove_proc_entry("ip_mr_cache", net->proc_net);
2662 remove_proc_entry("ip_mr_vif", net->proc_net);
2664 ipmr_rules_exit(net);
2667 static struct pernet_operations ipmr_net_ops = {
2668 .init = ipmr_net_init,
2669 .exit = ipmr_net_exit,
2672 int __init ip_mr_init(void)
2676 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2677 sizeof(struct mfc_cache),
2678 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2681 err = register_pernet_subsys(&ipmr_net_ops);
2683 goto reg_pernet_fail;
2685 err = register_netdevice_notifier(&ip_mr_notifier);
2687 goto reg_notif_fail;
2688 #ifdef CONFIG_IP_PIMSM_V2
2689 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2690 pr_err("%s: can't add PIM protocol\n", __func__);
2692 goto add_proto_fail;
2695 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2696 NULL, ipmr_rtm_dumproute, NULL);
2699 #ifdef CONFIG_IP_PIMSM_V2
2701 unregister_netdevice_notifier(&ip_mr_notifier);
2704 unregister_pernet_subsys(&ipmr_net_ops);
2706 kmem_cache_destroy(mrt_cachep);