2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
43 #include <linux/err.h>
48 #include <net/protocol.h>
49 #include <net/ip_tunnels.h>
51 #include <net/checksum.h>
52 #include <net/dsfield.h>
53 #include <net/inet_ecn.h>
55 #include <net/net_namespace.h>
56 #include <net/netns/generic.h>
57 #include <net/rtnetlink.h>
59 #if IS_ENABLED(CONFIG_IPV6)
61 #include <net/ip6_fib.h>
62 #include <net/ip6_route.h>
65 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
67 return hash_32((__force u32)key ^ (__force u32)remote,
71 static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
72 struct dst_entry *dst, __be32 saddr)
74 struct dst_entry *old_dst;
77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
82 static void tunnel_dst_set(struct ip_tunnel *t,
83 struct dst_entry *dst, __be32 saddr)
85 __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
88 static void tunnel_dst_reset(struct ip_tunnel *t)
90 tunnel_dst_set(t, NULL, 0);
93 void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
97 for_each_possible_cpu(i)
98 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
100 EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
102 static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
103 u32 cookie, __be32 *saddr)
105 struct ip_tunnel_dst *idst;
106 struct dst_entry *dst;
109 idst = this_cpu_ptr(t->dst_cache);
110 dst = rcu_dereference(idst->dst);
111 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
114 if (!dst->obsolete || dst->ops->check(dst, cookie)) {
115 *saddr = idst->saddr;
123 return (struct rtable *)dst;
126 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
127 __be16 flags, __be32 key)
129 if (p->i_flags & TUNNEL_KEY) {
130 if (flags & TUNNEL_KEY)
131 return key == p->i_key;
133 /* key expected, none present */
136 return !(flags & TUNNEL_KEY);
139 /* Fallback tunnel: no source, no destination, no key, no options
142 We require exact key match i.e. if a key is present in packet
143 it will match only tunnel with the same key; if it is not present,
144 it will match only keyless tunnel.
146 All keysless packets, if not matched configured keyless tunnels
147 will match fallback tunnel.
148 Given src, dst and key, find appropriate for input tunnel.
150 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
151 int link, __be16 flags,
152 __be32 remote, __be32 local,
156 struct ip_tunnel *t, *cand = NULL;
157 struct hlist_head *head;
159 hash = ip_tunnel_hash(key, remote);
160 head = &itn->tunnels[hash];
162 hlist_for_each_entry_rcu(t, head, hash_node) {
163 if (local != t->parms.iph.saddr ||
164 remote != t->parms.iph.daddr ||
165 !(t->dev->flags & IFF_UP))
168 if (!ip_tunnel_key_match(&t->parms, flags, key))
171 if (t->parms.link == link)
177 hlist_for_each_entry_rcu(t, head, hash_node) {
178 if (remote != t->parms.iph.daddr ||
179 t->parms.iph.saddr != 0 ||
180 !(t->dev->flags & IFF_UP))
183 if (!ip_tunnel_key_match(&t->parms, flags, key))
186 if (t->parms.link == link)
192 hash = ip_tunnel_hash(key, 0);
193 head = &itn->tunnels[hash];
195 hlist_for_each_entry_rcu(t, head, hash_node) {
196 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
197 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
200 if (!(t->dev->flags & IFF_UP))
203 if (!ip_tunnel_key_match(&t->parms, flags, key))
206 if (t->parms.link == link)
212 if (flags & TUNNEL_NO_KEY)
213 goto skip_key_lookup;
215 hlist_for_each_entry_rcu(t, head, hash_node) {
216 if (t->parms.i_key != key ||
217 t->parms.iph.saddr != 0 ||
218 t->parms.iph.daddr != 0 ||
219 !(t->dev->flags & IFF_UP))
222 if (t->parms.link == link)
232 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
233 return netdev_priv(itn->fb_tunnel_dev);
238 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
240 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
241 struct ip_tunnel_parm *parms)
246 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
247 remote = parms->iph.daddr;
251 h = ip_tunnel_hash(parms->i_key, remote);
252 return &itn->tunnels[h];
255 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
257 struct hlist_head *head = ip_bucket(itn, &t->parms);
259 hlist_add_head_rcu(&t->hash_node, head);
262 static void ip_tunnel_del(struct ip_tunnel *t)
264 hlist_del_init_rcu(&t->hash_node);
267 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
268 struct ip_tunnel_parm *parms,
271 __be32 remote = parms->iph.daddr;
272 __be32 local = parms->iph.saddr;
273 __be32 key = parms->i_key;
274 int link = parms->link;
275 struct ip_tunnel *t = NULL;
276 struct hlist_head *head = ip_bucket(itn, parms);
278 hlist_for_each_entry_rcu(t, head, hash_node) {
279 if (local == t->parms.iph.saddr &&
280 remote == t->parms.iph.daddr &&
281 key == t->parms.i_key &&
282 link == t->parms.link &&
283 type == t->dev->type)
289 static struct net_device *__ip_tunnel_create(struct net *net,
290 const struct rtnl_link_ops *ops,
291 struct ip_tunnel_parm *parms)
294 struct ip_tunnel *tunnel;
295 struct net_device *dev;
299 strlcpy(name, parms->name, IFNAMSIZ);
301 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
305 strlcpy(name, ops->kind, IFNAMSIZ);
306 strncat(name, "%d", 2);
310 dev = alloc_netdev(ops->priv_size, name, ops->setup);
315 dev_net_set(dev, net);
317 dev->rtnl_link_ops = ops;
319 tunnel = netdev_priv(dev);
320 tunnel->parms = *parms;
323 err = register_netdevice(dev);
335 static inline void init_tunnel_flow(struct flowi4 *fl4,
337 __be32 daddr, __be32 saddr,
338 __be32 key, __u8 tos, int oif)
340 memset(fl4, 0, sizeof(*fl4));
341 fl4->flowi4_oif = oif;
344 fl4->flowi4_tos = tos;
345 fl4->flowi4_proto = proto;
346 fl4->fl4_gre_key = key;
349 static int ip_tunnel_bind_dev(struct net_device *dev)
351 struct net_device *tdev = NULL;
352 struct ip_tunnel *tunnel = netdev_priv(dev);
353 const struct iphdr *iph;
354 int hlen = LL_MAX_HEADER;
355 int mtu = ETH_DATA_LEN;
356 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
358 iph = &tunnel->parms.iph;
360 /* Guess output device to choose reasonable mtu and needed_headroom */
365 init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
366 iph->saddr, tunnel->parms.o_key,
367 RT_TOS(iph->tos), tunnel->parms.link);
368 rt = ip_route_output_key(tunnel->net, &fl4);
372 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
375 if (dev->type != ARPHRD_ETHER)
376 dev->flags |= IFF_POINTOPOINT;
379 if (!tdev && tunnel->parms.link)
380 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
383 hlen = tdev->hard_header_len + tdev->needed_headroom;
386 dev->iflink = tunnel->parms.link;
388 dev->needed_headroom = t_hlen + hlen;
389 mtu -= (dev->hard_header_len + t_hlen);
397 static struct ip_tunnel *ip_tunnel_create(struct net *net,
398 struct ip_tunnel_net *itn,
399 struct ip_tunnel_parm *parms)
401 struct ip_tunnel *nt, *fbt;
402 struct net_device *dev;
404 BUG_ON(!itn->fb_tunnel_dev);
405 fbt = netdev_priv(itn->fb_tunnel_dev);
406 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
410 dev->mtu = ip_tunnel_bind_dev(dev);
412 nt = netdev_priv(dev);
413 ip_tunnel_add(itn, nt);
417 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
418 const struct tnl_ptk_info *tpi, bool log_ecn_error)
420 struct pcpu_sw_netstats *tstats;
421 const struct iphdr *iph = ip_hdr(skb);
424 #ifdef CONFIG_NET_IPGRE_BROADCAST
425 if (ipv4_is_multicast(iph->daddr)) {
426 tunnel->dev->stats.multicast++;
427 skb->pkt_type = PACKET_BROADCAST;
431 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
432 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
433 tunnel->dev->stats.rx_crc_errors++;
434 tunnel->dev->stats.rx_errors++;
438 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
439 if (!(tpi->flags&TUNNEL_SEQ) ||
440 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
441 tunnel->dev->stats.rx_fifo_errors++;
442 tunnel->dev->stats.rx_errors++;
445 tunnel->i_seqno = ntohl(tpi->seq) + 1;
448 skb_reset_network_header(skb);
450 err = IP_ECN_decapsulate(iph, skb);
453 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
454 &iph->saddr, iph->tos);
456 ++tunnel->dev->stats.rx_frame_errors;
457 ++tunnel->dev->stats.rx_errors;
462 tstats = this_cpu_ptr(tunnel->dev->tstats);
463 u64_stats_update_begin(&tstats->syncp);
464 tstats->rx_packets++;
465 tstats->rx_bytes += skb->len;
466 u64_stats_update_end(&tstats->syncp);
468 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
470 if (tunnel->dev->type == ARPHRD_ETHER) {
471 skb->protocol = eth_type_trans(skb, tunnel->dev);
472 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
474 skb->dev = tunnel->dev;
477 gro_cells_receive(&tunnel->gro_cells, skb);
484 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
486 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
487 struct rtable *rt, __be16 df)
489 struct ip_tunnel *tunnel = netdev_priv(dev);
490 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
494 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
495 - sizeof(struct iphdr) - tunnel->hlen;
497 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
500 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
502 if (skb->protocol == htons(ETH_P_IP)) {
503 if (!skb_is_gso(skb) &&
504 (df & htons(IP_DF)) && mtu < pkt_size) {
505 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
506 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
510 #if IS_ENABLED(CONFIG_IPV6)
511 else if (skb->protocol == htons(ETH_P_IPV6)) {
512 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
514 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
515 mtu >= IPV6_MIN_MTU) {
516 if ((tunnel->parms.iph.daddr &&
517 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
518 rt6->rt6i_dst.plen == 128) {
519 rt6->rt6i_flags |= RTF_MODIFIED;
520 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
524 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
526 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
534 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
535 const struct iphdr *tnl_params, const u8 protocol)
537 struct ip_tunnel *tunnel = netdev_priv(dev);
538 const struct iphdr *inner_iph;
542 struct rtable *rt; /* Route to the other host */
543 unsigned int max_headroom; /* The extra header space needed */
548 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
549 connected = (tunnel->parms.iph.daddr != 0);
551 dst = tnl_params->daddr;
555 if (skb_dst(skb) == NULL) {
556 dev->stats.tx_fifo_errors++;
560 if (skb->protocol == htons(ETH_P_IP)) {
561 rt = skb_rtable(skb);
562 dst = rt_nexthop(rt, inner_iph->daddr);
564 #if IS_ENABLED(CONFIG_IPV6)
565 else if (skb->protocol == htons(ETH_P_IPV6)) {
566 const struct in6_addr *addr6;
567 struct neighbour *neigh;
568 bool do_tx_error_icmp;
571 neigh = dst_neigh_lookup(skb_dst(skb),
572 &ipv6_hdr(skb)->daddr);
576 addr6 = (const struct in6_addr *)&neigh->primary_key;
577 addr_type = ipv6_addr_type(addr6);
579 if (addr_type == IPV6_ADDR_ANY) {
580 addr6 = &ipv6_hdr(skb)->daddr;
581 addr_type = ipv6_addr_type(addr6);
584 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
585 do_tx_error_icmp = true;
587 do_tx_error_icmp = false;
588 dst = addr6->s6_addr32[3];
590 neigh_release(neigh);
591 if (do_tx_error_icmp)
601 tos = tnl_params->tos;
604 if (skb->protocol == htons(ETH_P_IP)) {
605 tos = inner_iph->tos;
607 } else if (skb->protocol == htons(ETH_P_IPV6)) {
608 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
613 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
614 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
616 rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
619 rt = ip_route_output_key(tunnel->net, &fl4);
622 dev->stats.tx_carrier_errors++;
626 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
629 if (rt->dst.dev == dev) {
631 dev->stats.collisions++;
635 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
640 if (tunnel->err_count > 0) {
641 if (time_before(jiffies,
642 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
645 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
646 dst_link_failure(skb);
648 tunnel->err_count = 0;
651 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
652 ttl = tnl_params->ttl;
654 if (skb->protocol == htons(ETH_P_IP))
655 ttl = inner_iph->ttl;
656 #if IS_ENABLED(CONFIG_IPV6)
657 else if (skb->protocol == htons(ETH_P_IPV6))
658 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
661 ttl = ip4_dst_hoplimit(&rt->dst);
664 df = tnl_params->frag_off;
665 if (skb->protocol == htons(ETH_P_IP))
666 df |= (inner_iph->frag_off&htons(IP_DF));
668 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
669 + rt->dst.header_len;
670 if (max_headroom > dev->needed_headroom)
671 dev->needed_headroom = max_headroom;
673 if (skb_cow_head(skb, dev->needed_headroom)) {
674 dev->stats.tx_dropped++;
679 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
680 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
681 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
685 #if IS_ENABLED(CONFIG_IPV6)
687 dst_link_failure(skb);
690 dev->stats.tx_errors++;
693 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
695 static void ip_tunnel_update(struct ip_tunnel_net *itn,
697 struct net_device *dev,
698 struct ip_tunnel_parm *p,
702 t->parms.iph.saddr = p->iph.saddr;
703 t->parms.iph.daddr = p->iph.daddr;
704 t->parms.i_key = p->i_key;
705 t->parms.o_key = p->o_key;
706 if (dev->type != ARPHRD_ETHER) {
707 memcpy(dev->dev_addr, &p->iph.saddr, 4);
708 memcpy(dev->broadcast, &p->iph.daddr, 4);
710 ip_tunnel_add(itn, t);
712 t->parms.iph.ttl = p->iph.ttl;
713 t->parms.iph.tos = p->iph.tos;
714 t->parms.iph.frag_off = p->iph.frag_off;
716 if (t->parms.link != p->link) {
719 t->parms.link = p->link;
720 mtu = ip_tunnel_bind_dev(dev);
724 ip_tunnel_dst_reset_all(t);
725 netdev_state_change(dev);
728 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
732 struct net *net = dev_net(dev);
733 struct ip_tunnel *tunnel = netdev_priv(dev);
734 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
736 BUG_ON(!itn->fb_tunnel_dev);
740 if (dev == itn->fb_tunnel_dev)
741 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
743 t = netdev_priv(dev);
744 memcpy(p, &t->parms, sizeof(*p));
750 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
753 p->iph.frag_off |= htons(IP_DF);
754 if (!(p->i_flags&TUNNEL_KEY))
756 if (!(p->o_flags&TUNNEL_KEY))
759 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
761 if (!t && (cmd == SIOCADDTUNNEL))
762 t = ip_tunnel_create(net, itn, p);
764 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
771 unsigned int nflags = 0;
773 if (ipv4_is_multicast(p->iph.daddr))
774 nflags = IFF_BROADCAST;
775 else if (p->iph.daddr)
776 nflags = IFF_POINTOPOINT;
778 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
783 t = netdev_priv(dev);
789 ip_tunnel_update(itn, t, dev, p, true);
791 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
796 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
799 if (dev == itn->fb_tunnel_dev) {
801 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
805 if (t == netdev_priv(itn->fb_tunnel_dev))
809 unregister_netdevice(dev);
820 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
822 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
824 struct ip_tunnel *tunnel = netdev_priv(dev);
825 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
828 new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
833 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
835 static void ip_tunnel_dev_free(struct net_device *dev)
837 struct ip_tunnel *tunnel = netdev_priv(dev);
839 gro_cells_destroy(&tunnel->gro_cells);
840 free_percpu(tunnel->dst_cache);
841 free_percpu(dev->tstats);
845 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
847 struct ip_tunnel *tunnel = netdev_priv(dev);
848 struct ip_tunnel_net *itn;
850 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
852 if (itn->fb_tunnel_dev != dev) {
853 ip_tunnel_del(netdev_priv(dev));
854 unregister_netdevice_queue(dev, head);
857 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
859 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
860 struct rtnl_link_ops *ops, char *devname)
862 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
863 struct ip_tunnel_parm parms;
866 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
867 INIT_HLIST_HEAD(&itn->tunnels[i]);
870 itn->fb_tunnel_dev = NULL;
874 memset(&parms, 0, sizeof(parms));
876 strlcpy(parms.name, devname, IFNAMSIZ);
879 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
880 /* FB netdevice is special: we have one, and only one per netns.
881 * Allowing to move it to another netns is clearly unsafe.
883 if (!IS_ERR(itn->fb_tunnel_dev)) {
884 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
885 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
886 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
890 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
892 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
894 static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
895 struct rtnl_link_ops *ops)
897 struct net *net = dev_net(itn->fb_tunnel_dev);
898 struct net_device *dev, *aux;
901 for_each_netdev_safe(net, dev, aux)
902 if (dev->rtnl_link_ops == ops)
903 unregister_netdevice_queue(dev, head);
905 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
907 struct hlist_node *n;
908 struct hlist_head *thead = &itn->tunnels[h];
910 hlist_for_each_entry_safe(t, n, thead, hash_node)
911 /* If dev is in the same netns, it has already
912 * been added to the list by the previous loop.
914 if (!net_eq(dev_net(t->dev), net))
915 unregister_netdevice_queue(t->dev, head);
919 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
924 ip_tunnel_destroy(itn, &list, ops);
925 unregister_netdevice_many(&list);
928 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
930 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
931 struct ip_tunnel_parm *p)
933 struct ip_tunnel *nt;
934 struct net *net = dev_net(dev);
935 struct ip_tunnel_net *itn;
939 nt = netdev_priv(dev);
940 itn = net_generic(net, nt->ip_tnl_net_id);
942 if (ip_tunnel_find(itn, p, dev->type))
947 err = register_netdevice(dev);
951 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
952 eth_hw_addr_random(dev);
954 mtu = ip_tunnel_bind_dev(dev);
958 ip_tunnel_add(itn, nt);
963 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
965 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
966 struct ip_tunnel_parm *p)
969 struct ip_tunnel *tunnel = netdev_priv(dev);
970 struct net *net = tunnel->net;
971 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
973 if (dev == itn->fb_tunnel_dev)
976 t = ip_tunnel_find(itn, p, dev->type);
984 if (dev->type != ARPHRD_ETHER) {
985 unsigned int nflags = 0;
987 if (ipv4_is_multicast(p->iph.daddr))
988 nflags = IFF_BROADCAST;
989 else if (p->iph.daddr)
990 nflags = IFF_POINTOPOINT;
992 if ((dev->flags ^ nflags) &
993 (IFF_POINTOPOINT | IFF_BROADCAST))
998 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
1001 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1003 int ip_tunnel_init(struct net_device *dev)
1005 struct ip_tunnel *tunnel = netdev_priv(dev);
1006 struct iphdr *iph = &tunnel->parms.iph;
1009 dev->destructor = ip_tunnel_dev_free;
1010 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
1014 for_each_possible_cpu(i) {
1015 struct pcpu_sw_netstats *ipt_stats;
1016 ipt_stats = per_cpu_ptr(dev->tstats, i);
1017 u64_stats_init(&ipt_stats->syncp);
1020 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
1021 if (!tunnel->dst_cache) {
1022 free_percpu(dev->tstats);
1026 err = gro_cells_init(&tunnel->gro_cells, dev);
1028 free_percpu(tunnel->dst_cache);
1029 free_percpu(dev->tstats);
1034 tunnel->net = dev_net(dev);
1035 strcpy(tunnel->parms.name, dev->name);
1041 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1043 void ip_tunnel_uninit(struct net_device *dev)
1045 struct ip_tunnel *tunnel = netdev_priv(dev);
1046 struct net *net = tunnel->net;
1047 struct ip_tunnel_net *itn;
1049 itn = net_generic(net, tunnel->ip_tnl_net_id);
1050 /* fb_tunnel_dev will be unregisted in net-exit call. */
1051 if (itn->fb_tunnel_dev != dev)
1052 ip_tunnel_del(netdev_priv(dev));
1054 ip_tunnel_dst_reset_all(tunnel);
1056 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1058 /* Do least required initialization, rest of init is done in tunnel_init call */
1059 void ip_tunnel_setup(struct net_device *dev, int net_id)
1061 struct ip_tunnel *tunnel = netdev_priv(dev);
1062 tunnel->ip_tnl_net_id = net_id;
1064 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1066 MODULE_LICENSE("GPL");