2 * Linux NET3: IP/IP protocol decoder.
5 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
8 * Alan Cox : Merged and made usable non modular (its so tiny its silly as
9 * a module taking up 2 pages).
10 * Alan Cox : Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
11 * to keep ip_forward happy.
12 * Alan Cox : More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
13 * Kai Schulte : Fixed #defines for IP_FIREWALL->FIREWALL
14 * David Woodhouse : Perform some basic ICMP handling.
15 * IPIP Routing without decapsulation.
16 * Carlos Picoto : GRE over IP support
17 * Alexey Kuznetsov: Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
18 * I do not want to merge them together.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
27 /* tunnel.c: an IP tunnel driver
29 The purpose of this driver is to provide an IP tunnel through
30 which you can tunnel network traffic transparently across subnets.
32 This was written by looking at Nick Holloway's dummy driver
33 Thanks for the great code!
35 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
38 Cleaned up the code a little and added some pre-1.3.0 tweaks.
39 dev->hard_header/hard_header_len changed to use no headers.
40 Comments/bracketing tweaked.
41 Made the tunnels use dev->name not tunnel: when error reporting.
44 -Alan Cox (alan@lxorguk.ukuu.org.uk) 21 March 95
47 Changed to tunnel to destination gateway in addition to the
48 tunnel's pointopoint address
49 Almost completely rewritten
50 Note: There is currently no firewall or ICMP handling done.
52 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96
56 /* Things I wish I had known when writing the tunnel driver:
58 When the tunnel_xmit() function is called, the skb contains the
59 packet to be sent (plus a great deal of extra info), and dev
60 contains the tunnel device that _we_ are.
62 When we are passed a packet, we are expected to fill in the
63 source address with our source IP address.
65 What is the proper way to allocate, copy and free a buffer?
66 After you allocate it, it is a "0 length" chunk of memory
67 starting at zero. If you want to add headers to the buffer
68 later, you'll have to call "skb_reserve(skb, amount)" with
69 the amount of memory you want reserved. Then, you call
70 "skb_put(skb, amount)" with the amount of space you want in
71 the buffer. skb_put() returns a pointer to the top (#0) of
72 that buffer. skb->len is set to the amount of space you have
73 "allocated" with skb_put(). You can then write up to skb->len
74 bytes to that buffer. If you need more, you can call skb_put()
75 again with the additional amount of space you need. You can
76 find out how much more space you can allocate by calling
78 Now, to add header space, call "skb_push(skb, header_len)".
79 This creates space at the beginning of the buffer and returns
80 a pointer to this new space. If later you need to strip a
81 header from a buffer, call "skb_pull(skb, header_len)".
82 skb_headroom() will return how much space is left at the top
83 of the buffer (before the main data). Remember, this headroom
84 space must be reserved before the skb_put() function is called.
88 This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
90 For comments look at net/ipv4/ip_gre.c --ANK
94 #include <linux/capability.h>
95 #include <linux/module.h>
96 #include <linux/types.h>
97 #include <linux/kernel.h>
98 #include <linux/slab.h>
99 #include <asm/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <linux/in.h>
103 #include <linux/tcp.h>
104 #include <linux/udp.h>
105 #include <linux/if_arp.h>
106 #include <linux/mroute.h>
107 #include <linux/init.h>
108 #include <linux/netfilter_ipv4.h>
109 #include <linux/if_ether.h>
111 #include <net/sock.h>
113 #include <net/icmp.h>
114 #include <net/ipip.h>
115 #include <net/inet_ecn.h>
116 #include <net/xfrm.h>
117 #include <net/net_namespace.h>
118 #include <net/netns/generic.h>
121 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
123 static int ipip_net_id __read_mostly;
125 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
126 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
127 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
128 struct ip_tunnel __rcu *tunnels_wc[1];
129 struct ip_tunnel __rcu **tunnels[4];
131 struct net_device *fb_tunnel_dev;
134 static int ipip_tunnel_init(struct net_device *dev);
135 static void ipip_tunnel_setup(struct net_device *dev);
136 static void ipip_dev_free(struct net_device *dev);
139 * Locking : hash tables are protected by RCU and RTNL
142 #define for_each_ip_tunnel_rcu(start) \
143 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
145 /* often modified stats are per cpu, other are shared (netdev->stats) */
151 struct u64_stats_sync syncp;
154 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
155 struct rtnl_link_stats64 *tot)
159 for_each_possible_cpu(i) {
160 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
161 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
165 start = u64_stats_fetch_begin_bh(&tstats->syncp);
166 rx_packets = tstats->rx_packets;
167 tx_packets = tstats->tx_packets;
168 rx_bytes = tstats->rx_bytes;
169 tx_bytes = tstats->tx_bytes;
170 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
172 tot->rx_packets += rx_packets;
173 tot->tx_packets += tx_packets;
174 tot->rx_bytes += rx_bytes;
175 tot->tx_bytes += tx_bytes;
178 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
179 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
180 tot->tx_dropped = dev->stats.tx_dropped;
181 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
182 tot->tx_errors = dev->stats.tx_errors;
183 tot->collisions = dev->stats.collisions;
188 static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
189 __be32 remote, __be32 local)
191 unsigned int h0 = HASH(remote);
192 unsigned int h1 = HASH(local);
194 struct ipip_net *ipn = net_generic(net, ipip_net_id);
196 for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
197 if (local == t->parms.iph.saddr &&
198 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
201 for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
202 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
205 for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
206 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
209 t = rcu_dereference(ipn->tunnels_wc[0]);
210 if (t && (t->dev->flags&IFF_UP))
215 static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
216 struct ip_tunnel_parm *parms)
218 __be32 remote = parms->iph.daddr;
219 __be32 local = parms->iph.saddr;
231 return &ipn->tunnels[prio][h];
234 static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
237 return __ipip_bucket(ipn, &t->parms);
240 static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
242 struct ip_tunnel __rcu **tp;
243 struct ip_tunnel *iter;
245 for (tp = ipip_bucket(ipn, t);
246 (iter = rtnl_dereference(*tp)) != NULL;
249 rcu_assign_pointer(*tp, t->next);
255 static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
257 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
259 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
260 rcu_assign_pointer(*tp, t);
263 static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
264 struct ip_tunnel_parm *parms, int create)
266 __be32 remote = parms->iph.daddr;
267 __be32 local = parms->iph.saddr;
268 struct ip_tunnel *t, *nt;
269 struct ip_tunnel __rcu **tp;
270 struct net_device *dev;
272 struct ipip_net *ipn = net_generic(net, ipip_net_id);
274 for (tp = __ipip_bucket(ipn, parms);
275 (t = rtnl_dereference(*tp)) != NULL;
277 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
284 strlcpy(name, parms->name, IFNAMSIZ);
286 strcpy(name, "tunl%d");
288 dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
292 dev_net_set(dev, net);
294 nt = netdev_priv(dev);
297 if (ipip_tunnel_init(dev) < 0)
300 if (register_netdevice(dev) < 0)
303 strcpy(nt->parms.name, dev->name);
306 ipip_tunnel_link(ipn, nt);
314 /* called with RTNL */
315 static void ipip_tunnel_uninit(struct net_device *dev)
317 struct net *net = dev_net(dev);
318 struct ipip_net *ipn = net_generic(net, ipip_net_id);
320 if (dev == ipn->fb_tunnel_dev)
321 RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL);
323 ipip_tunnel_unlink(ipn, netdev_priv(dev));
327 static int ipip_err(struct sk_buff *skb, u32 info)
330 /* All the routers (except for Linux) return only
331 8 bytes of packet payload. It means, that precise relaying of
332 ICMP in the real Internet is absolutely infeasible.
334 const struct iphdr *iph = (const struct iphdr *)skb->data;
335 const int type = icmp_hdr(skb)->type;
336 const int code = icmp_hdr(skb)->code;
342 case ICMP_PARAMETERPROB:
345 case ICMP_DEST_UNREACH:
348 case ICMP_PORT_UNREACH:
349 /* Impossible event. */
352 /* All others are translated to HOST_UNREACH.
353 rfc2003 contains "deep thoughts" about NET_UNREACH,
354 I believe they are just ether pollution. --ANK
359 case ICMP_TIME_EXCEEDED:
360 if (code != ICMP_EXC_TTL)
370 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
374 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
375 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
376 t->dev->ifindex, 0, IPPROTO_IPIP, 0);
381 if (type == ICMP_REDIRECT) {
382 ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
388 if (t->parms.iph.daddr == 0)
392 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
395 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
399 t->err_time = jiffies;
405 static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
408 struct iphdr *inner_iph = ip_hdr(skb);
410 if (INET_ECN_is_ce(outer_iph->tos))
411 IP_ECN_set_ce(inner_iph);
414 static int ipip_rcv(struct sk_buff *skb)
416 struct ip_tunnel *tunnel;
417 const struct iphdr *iph = ip_hdr(skb);
420 tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
421 if (tunnel != NULL) {
422 struct pcpu_tstats *tstats;
424 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
432 skb->mac_header = skb->network_header;
433 skb_reset_network_header(skb);
434 skb->protocol = htons(ETH_P_IP);
435 skb->pkt_type = PACKET_HOST;
437 tstats = this_cpu_ptr(tunnel->dev->tstats);
438 u64_stats_update_begin(&tstats->syncp);
439 tstats->rx_packets++;
440 tstats->rx_bytes += skb->len;
441 u64_stats_update_end(&tstats->syncp);
443 __skb_tunnel_rx(skb, tunnel->dev);
445 ipip_ecn_decapsulate(iph, skb);
458 * This function assumes it is being called from dev_queue_xmit()
459 * and that skb is filled properly by that function.
462 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
464 struct ip_tunnel *tunnel = netdev_priv(dev);
465 struct pcpu_tstats *tstats;
466 const struct iphdr *tiph = &tunnel->parms.iph;
467 u8 tos = tunnel->parms.iph.tos;
468 __be16 df = tiph->frag_off;
469 struct rtable *rt; /* Route to the other host */
470 struct net_device *tdev; /* Device to other host */
471 const struct iphdr *old_iph = ip_hdr(skb);
472 struct iphdr *iph; /* Our new IP header */
473 unsigned int max_headroom; /* The extra header space needed */
474 __be32 dst = tiph->daddr;
478 if (skb->protocol != htons(ETH_P_IP))
486 if ((rt = skb_rtable(skb)) == NULL) {
487 dev->stats.tx_fifo_errors++;
490 dst = rt_nexthop(rt, old_iph->daddr);
493 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
496 IPPROTO_IPIP, RT_TOS(tos),
499 dev->stats.tx_carrier_errors++;
506 dev->stats.collisions++;
510 df |= old_iph->frag_off & htons(IP_DF);
513 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
516 dev->stats.collisions++;
522 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
524 if ((old_iph->frag_off & htons(IP_DF)) &&
525 mtu < ntohs(old_iph->tot_len)) {
526 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
533 if (tunnel->err_count > 0) {
534 if (time_before(jiffies,
535 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
537 dst_link_failure(skb);
539 tunnel->err_count = 0;
543 * Okay, now see if we can stuff it in the buffer as-is.
545 max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr));
547 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
548 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
549 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
552 dev->stats.tx_dropped++;
557 skb_set_owner_w(new_skb, skb->sk);
560 old_iph = ip_hdr(skb);
563 skb->transport_header = skb->network_header;
564 skb_push(skb, sizeof(struct iphdr));
565 skb_reset_network_header(skb);
566 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
567 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
570 skb_dst_set(skb, &rt->dst);
573 * Push down and install the IPIP header.
578 iph->ihl = sizeof(struct iphdr)>>2;
580 iph->protocol = IPPROTO_IPIP;
581 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
582 iph->daddr = fl4.daddr;
583 iph->saddr = fl4.saddr;
585 if ((iph->ttl = tiph->ttl) == 0)
586 iph->ttl = old_iph->ttl;
589 tstats = this_cpu_ptr(dev->tstats);
590 __IPTUNNEL_XMIT(tstats, &dev->stats);
594 dst_link_failure(skb);
596 dev->stats.tx_errors++;
601 static void ipip_tunnel_bind_dev(struct net_device *dev)
603 struct net_device *tdev = NULL;
604 struct ip_tunnel *tunnel;
605 const struct iphdr *iph;
607 tunnel = netdev_priv(dev);
608 iph = &tunnel->parms.iph;
614 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
615 iph->daddr, iph->saddr,
624 dev->flags |= IFF_POINTOPOINT;
627 if (!tdev && tunnel->parms.link)
628 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
631 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
632 dev->mtu = tdev->mtu - sizeof(struct iphdr);
634 dev->iflink = tunnel->parms.link;
638 ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
641 struct ip_tunnel_parm p;
643 struct net *net = dev_net(dev);
644 struct ipip_net *ipn = net_generic(net, ipip_net_id);
649 if (dev == ipn->fb_tunnel_dev) {
650 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
654 t = ipip_tunnel_locate(net, &p, 0);
657 t = netdev_priv(dev);
658 memcpy(&p, &t->parms, sizeof(p));
659 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
666 if (!capable(CAP_NET_ADMIN))
670 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
674 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
675 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
678 p.iph.frag_off |= htons(IP_DF);
680 t = ipip_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
682 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
689 if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
690 (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
694 t = netdev_priv(dev);
695 ipip_tunnel_unlink(ipn, t);
697 t->parms.iph.saddr = p.iph.saddr;
698 t->parms.iph.daddr = p.iph.daddr;
699 memcpy(dev->dev_addr, &p.iph.saddr, 4);
700 memcpy(dev->broadcast, &p.iph.daddr, 4);
701 ipip_tunnel_link(ipn, t);
702 netdev_state_change(dev);
708 if (cmd == SIOCCHGTUNNEL) {
709 t->parms.iph.ttl = p.iph.ttl;
710 t->parms.iph.tos = p.iph.tos;
711 t->parms.iph.frag_off = p.iph.frag_off;
712 if (t->parms.link != p.link) {
713 t->parms.link = p.link;
714 ipip_tunnel_bind_dev(dev);
715 netdev_state_change(dev);
718 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
721 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
726 if (!capable(CAP_NET_ADMIN))
729 if (dev == ipn->fb_tunnel_dev) {
731 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
734 if ((t = ipip_tunnel_locate(net, &p, 0)) == NULL)
737 if (t->dev == ipn->fb_tunnel_dev)
741 unregister_netdevice(dev);
753 static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
755 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
761 static const struct net_device_ops ipip_netdev_ops = {
762 .ndo_uninit = ipip_tunnel_uninit,
763 .ndo_start_xmit = ipip_tunnel_xmit,
764 .ndo_do_ioctl = ipip_tunnel_ioctl,
765 .ndo_change_mtu = ipip_tunnel_change_mtu,
766 .ndo_get_stats64 = ipip_get_stats64,
769 static void ipip_dev_free(struct net_device *dev)
771 free_percpu(dev->tstats);
775 static void ipip_tunnel_setup(struct net_device *dev)
777 dev->netdev_ops = &ipip_netdev_ops;
778 dev->destructor = ipip_dev_free;
780 dev->type = ARPHRD_TUNNEL;
781 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
782 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
783 dev->flags = IFF_NOARP;
786 dev->features |= NETIF_F_NETNS_LOCAL;
787 dev->features |= NETIF_F_LLTX;
788 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
791 static int ipip_tunnel_init(struct net_device *dev)
793 struct ip_tunnel *tunnel = netdev_priv(dev);
797 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
798 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
800 ipip_tunnel_bind_dev(dev);
802 dev->tstats = alloc_percpu(struct pcpu_tstats);
809 static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
811 struct ip_tunnel *tunnel = netdev_priv(dev);
812 struct iphdr *iph = &tunnel->parms.iph;
813 struct ipip_net *ipn = net_generic(dev_net(dev), ipip_net_id);
816 strcpy(tunnel->parms.name, dev->name);
819 iph->protocol = IPPROTO_IPIP;
822 dev->tstats = alloc_percpu(struct pcpu_tstats);
827 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
831 static struct xfrm_tunnel ipip_handler __read_mostly = {
833 .err_handler = ipip_err,
837 static const char banner[] __initconst =
838 KERN_INFO "IPv4 over IPv4 tunneling driver\n";
840 static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
844 for (prio = 1; prio < 4; prio++) {
846 for (h = 0; h < HASH_SIZE; h++) {
849 t = rtnl_dereference(ipn->tunnels[prio][h]);
851 unregister_netdevice_queue(t->dev, head);
852 t = rtnl_dereference(t->next);
858 static int __net_init ipip_init_net(struct net *net)
860 struct ipip_net *ipn = net_generic(net, ipip_net_id);
864 ipn->tunnels[0] = ipn->tunnels_wc;
865 ipn->tunnels[1] = ipn->tunnels_l;
866 ipn->tunnels[2] = ipn->tunnels_r;
867 ipn->tunnels[3] = ipn->tunnels_r_l;
869 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
872 if (!ipn->fb_tunnel_dev) {
876 dev_net_set(ipn->fb_tunnel_dev, net);
878 err = ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
882 if ((err = register_netdev(ipn->fb_tunnel_dev)))
885 t = netdev_priv(ipn->fb_tunnel_dev);
887 strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
891 ipip_dev_free(ipn->fb_tunnel_dev);
897 static void __net_exit ipip_exit_net(struct net *net)
899 struct ipip_net *ipn = net_generic(net, ipip_net_id);
903 ipip_destroy_tunnels(ipn, &list);
904 unregister_netdevice_queue(ipn->fb_tunnel_dev, &list);
905 unregister_netdevice_many(&list);
909 static struct pernet_operations ipip_net_ops = {
910 .init = ipip_init_net,
911 .exit = ipip_exit_net,
913 .size = sizeof(struct ipip_net),
916 static int __init ipip_init(void)
922 err = register_pernet_device(&ipip_net_ops);
925 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
927 unregister_pernet_device(&ipip_net_ops);
928 pr_info("%s: can't register tunnel\n", __func__);
933 static void __exit ipip_fini(void)
935 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
936 pr_info("%s: can't deregister tunnel\n", __func__);
938 unregister_pernet_device(&ipip_net_ops);
941 module_init(ipip_init);
942 module_exit(ipip_fini);
943 MODULE_LICENSE("GPL");
944 MODULE_ALIAS_NETDEV("tunl0");