1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 Nicira, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/capability.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/uaccess.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
17 #include <linux/tcp.h>
18 #include <linux/udp.h>
19 #include <linux/if_arp.h>
20 #include <linux/init.h>
21 #include <linux/in6.h>
22 #include <linux/inetdevice.h>
23 #include <linux/igmp.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rculist.h>
29 #include <linux/err.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
45 #include <net/dst_metadata.h>
47 #if IS_ENABLED(CONFIG_IPV6)
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
53 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
55 return hash_32((__force u32)key ^ (__force u32)remote,
59 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
60 __be16 flags, __be32 key)
62 if (p->i_flags & TUNNEL_KEY) {
63 if (flags & TUNNEL_KEY)
64 return key == p->i_key;
66 /* key expected, none present */
69 return !(flags & TUNNEL_KEY);
72 /* Fallback tunnel: no source, no destination, no key, no options
75 We require exact key match i.e. if a key is present in packet
76 it will match only tunnel with the same key; if it is not present,
77 it will match only keyless tunnel.
79 All keysless packets, if not matched configured keyless tunnels
80 will match fallback tunnel.
81 Given src, dst and key, find appropriate for input tunnel.
83 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
84 int link, __be16 flags,
85 __be32 remote, __be32 local,
88 struct ip_tunnel *t, *cand = NULL;
89 struct hlist_head *head;
90 struct net_device *ndev;
93 hash = ip_tunnel_hash(key, remote);
94 head = &itn->tunnels[hash];
96 hlist_for_each_entry_rcu(t, head, hash_node) {
97 if (local != t->parms.iph.saddr ||
98 remote != t->parms.iph.daddr ||
99 !(t->dev->flags & IFF_UP))
102 if (!ip_tunnel_key_match(&t->parms, flags, key))
105 if (t->parms.link == link)
111 hlist_for_each_entry_rcu(t, head, hash_node) {
112 if (remote != t->parms.iph.daddr ||
113 t->parms.iph.saddr != 0 ||
114 !(t->dev->flags & IFF_UP))
117 if (!ip_tunnel_key_match(&t->parms, flags, key))
120 if (t->parms.link == link)
126 hash = ip_tunnel_hash(key, 0);
127 head = &itn->tunnels[hash];
129 hlist_for_each_entry_rcu(t, head, hash_node) {
130 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
131 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
134 if (!(t->dev->flags & IFF_UP))
137 if (!ip_tunnel_key_match(&t->parms, flags, key))
140 if (t->parms.link == link)
146 hlist_for_each_entry_rcu(t, head, hash_node) {
147 if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
148 t->parms.iph.saddr != 0 ||
149 t->parms.iph.daddr != 0 ||
150 !(t->dev->flags & IFF_UP))
153 if (t->parms.link == link)
162 t = rcu_dereference(itn->collect_md_tun);
163 if (t && t->dev->flags & IFF_UP)
166 ndev = READ_ONCE(itn->fb_tunnel_dev);
167 if (ndev && ndev->flags & IFF_UP)
168 return netdev_priv(ndev);
172 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
174 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
175 struct ip_tunnel_parm *parms)
179 __be32 i_key = parms->i_key;
181 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
182 remote = parms->iph.daddr;
186 if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
189 h = ip_tunnel_hash(i_key, remote);
190 return &itn->tunnels[h];
193 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
195 struct hlist_head *head = ip_bucket(itn, &t->parms);
198 rcu_assign_pointer(itn->collect_md_tun, t);
199 hlist_add_head_rcu(&t->hash_node, head);
202 static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
205 rcu_assign_pointer(itn->collect_md_tun, NULL);
206 hlist_del_init_rcu(&t->hash_node);
209 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
210 struct ip_tunnel_parm *parms,
213 __be32 remote = parms->iph.daddr;
214 __be32 local = parms->iph.saddr;
215 __be32 key = parms->i_key;
216 __be16 flags = parms->i_flags;
217 int link = parms->link;
218 struct ip_tunnel *t = NULL;
219 struct hlist_head *head = ip_bucket(itn, parms);
221 hlist_for_each_entry_rcu(t, head, hash_node) {
222 if (local == t->parms.iph.saddr &&
223 remote == t->parms.iph.daddr &&
224 link == t->parms.link &&
225 type == t->dev->type &&
226 ip_tunnel_key_match(&t->parms, flags, key))
232 static struct net_device *__ip_tunnel_create(struct net *net,
233 const struct rtnl_link_ops *ops,
234 struct ip_tunnel_parm *parms)
237 struct ip_tunnel *tunnel;
238 struct net_device *dev;
242 if (parms->name[0]) {
243 if (!dev_valid_name(parms->name))
245 strscpy(name, parms->name, IFNAMSIZ);
247 if (strlen(ops->kind) > (IFNAMSIZ - 3))
249 strcpy(name, ops->kind);
254 dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
259 dev_net_set(dev, net);
261 dev->rtnl_link_ops = ops;
263 tunnel = netdev_priv(dev);
264 tunnel->parms = *parms;
267 err = register_netdevice(dev);
279 static int ip_tunnel_bind_dev(struct net_device *dev)
281 struct net_device *tdev = NULL;
282 struct ip_tunnel *tunnel = netdev_priv(dev);
283 const struct iphdr *iph;
284 int hlen = LL_MAX_HEADER;
285 int mtu = ETH_DATA_LEN;
286 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
288 iph = &tunnel->parms.iph;
290 /* Guess output device to choose reasonable mtu and needed_headroom */
295 ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
296 iph->saddr, tunnel->parms.o_key,
297 RT_TOS(iph->tos), dev_net(dev),
298 tunnel->parms.link, tunnel->fwmark, 0);
299 rt = ip_route_output_key(tunnel->net, &fl4);
305 if (dev->type != ARPHRD_ETHER)
306 dev->flags |= IFF_POINTOPOINT;
308 dst_cache_reset(&tunnel->dst_cache);
311 if (!tdev && tunnel->parms.link)
312 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
315 hlen = tdev->hard_header_len + tdev->needed_headroom;
316 mtu = min(tdev->mtu, IP_MAX_MTU);
319 dev->needed_headroom = t_hlen + hlen;
320 mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0);
322 if (mtu < IPV4_MIN_MTU)
328 static struct ip_tunnel *ip_tunnel_create(struct net *net,
329 struct ip_tunnel_net *itn,
330 struct ip_tunnel_parm *parms)
332 struct ip_tunnel *nt;
333 struct net_device *dev;
338 dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms);
340 return ERR_CAST(dev);
342 mtu = ip_tunnel_bind_dev(dev);
343 err = dev_set_mtu(dev, mtu);
345 goto err_dev_set_mtu;
347 nt = netdev_priv(dev);
348 t_hlen = nt->hlen + sizeof(struct iphdr);
349 dev->min_mtu = ETH_MIN_MTU;
350 dev->max_mtu = IP_MAX_MTU - t_hlen;
351 if (dev->type == ARPHRD_ETHER)
352 dev->max_mtu -= dev->hard_header_len;
354 ip_tunnel_add(itn, nt);
358 unregister_netdevice(dev);
362 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
363 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
366 const struct iphdr *iph = ip_hdr(skb);
369 #ifdef CONFIG_NET_IPGRE_BROADCAST
370 if (ipv4_is_multicast(iph->daddr)) {
371 tunnel->dev->stats.multicast++;
372 skb->pkt_type = PACKET_BROADCAST;
376 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
377 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
378 tunnel->dev->stats.rx_crc_errors++;
379 tunnel->dev->stats.rx_errors++;
383 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
384 if (!(tpi->flags&TUNNEL_SEQ) ||
385 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
386 tunnel->dev->stats.rx_fifo_errors++;
387 tunnel->dev->stats.rx_errors++;
390 tunnel->i_seqno = ntohl(tpi->seq) + 1;
393 skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
395 err = IP_ECN_decapsulate(iph, skb);
398 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
399 &iph->saddr, iph->tos);
401 ++tunnel->dev->stats.rx_frame_errors;
402 ++tunnel->dev->stats.rx_errors;
407 dev_sw_netstats_rx_add(tunnel->dev, skb->len);
408 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
410 if (tunnel->dev->type == ARPHRD_ETHER) {
411 skb->protocol = eth_type_trans(skb, tunnel->dev);
412 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
414 skb->dev = tunnel->dev;
418 skb_dst_set(skb, (struct dst_entry *)tun_dst);
420 gro_cells_receive(&tunnel->gro_cells, skb);
425 dst_release((struct dst_entry *)tun_dst);
429 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
431 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
434 if (num >= MAX_IPTUN_ENCAP_OPS)
437 return !cmpxchg((const struct ip_tunnel_encap_ops **)
441 EXPORT_SYMBOL(ip_tunnel_encap_add_ops);
443 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
448 if (num >= MAX_IPTUN_ENCAP_OPS)
451 ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
453 ops, NULL) == ops) ? 0 : -1;
459 EXPORT_SYMBOL(ip_tunnel_encap_del_ops);
461 int ip_tunnel_encap_setup(struct ip_tunnel *t,
462 struct ip_tunnel_encap *ipencap)
466 memset(&t->encap, 0, sizeof(t->encap));
468 hlen = ip_encap_hlen(ipencap);
472 t->encap.type = ipencap->type;
473 t->encap.sport = ipencap->sport;
474 t->encap.dport = ipencap->dport;
475 t->encap.flags = ipencap->flags;
477 t->encap_hlen = hlen;
478 t->hlen = t->encap_hlen + t->tun_hlen;
482 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
484 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
485 struct rtable *rt, __be16 df,
486 const struct iphdr *inner_iph,
487 int tunnel_hlen, __be32 dst, bool md)
489 struct ip_tunnel *tunnel = netdev_priv(dev);
493 tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
494 pkt_size = skb->len - tunnel_hlen;
495 pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
498 mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
499 mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
501 mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
504 if (skb_valid_dst(skb))
505 skb_dst_update_pmtu_no_confirm(skb, mtu);
507 if (skb->protocol == htons(ETH_P_IP)) {
508 if (!skb_is_gso(skb) &&
509 (inner_iph->frag_off & htons(IP_DF)) &&
511 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
515 #if IS_ENABLED(CONFIG_IPV6)
516 else if (skb->protocol == htons(ETH_P_IPV6)) {
517 struct rt6_info *rt6;
520 rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
522 daddr = md ? dst : tunnel->parms.iph.daddr;
524 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
525 mtu >= IPV6_MIN_MTU) {
526 if ((daddr && !ipv4_is_multicast(daddr)) ||
527 rt6->rt6i_dst.plen == 128) {
528 rt6->rt6i_flags |= RTF_MODIFIED;
529 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
533 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
535 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
543 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
544 u8 proto, int tunnel_hlen)
546 struct ip_tunnel *tunnel = netdev_priv(dev);
547 u32 headroom = sizeof(struct iphdr);
548 struct ip_tunnel_info *tun_info;
549 const struct ip_tunnel_key *key;
550 const struct iphdr *inner_iph;
551 struct rtable *rt = NULL;
557 tun_info = skb_tunnel_info(skb);
558 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
559 ip_tunnel_info_af(tun_info) != AF_INET))
561 key = &tun_info->key;
562 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
563 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
566 if (skb->protocol == htons(ETH_P_IP))
567 tos = inner_iph->tos;
568 else if (skb->protocol == htons(ETH_P_IPV6))
569 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
571 ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
572 tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
573 dev_net(dev), 0, skb->mark, skb_get_hash(skb));
574 if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
577 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
579 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
581 rt = ip_route_output_key(tunnel->net, &fl4);
583 dev->stats.tx_carrier_errors++;
587 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
590 if (rt->dst.dev == dev) {
592 dev->stats.collisions++;
596 if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
598 if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
599 key->u.ipv4.dst, true)) {
604 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
607 if (skb->protocol == htons(ETH_P_IP))
608 ttl = inner_iph->ttl;
609 else if (skb->protocol == htons(ETH_P_IPV6))
610 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
612 ttl = ip4_dst_hoplimit(&rt->dst);
615 headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
616 if (headroom > dev->needed_headroom)
617 dev->needed_headroom = headroom;
619 if (skb_cow_head(skb, dev->needed_headroom)) {
623 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
624 df, !net_eq(tunnel->net, dev_net(dev)));
627 dev->stats.tx_errors++;
630 dev->stats.tx_dropped++;
634 EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
636 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
637 const struct iphdr *tnl_params, u8 protocol)
639 struct ip_tunnel *tunnel = netdev_priv(dev);
640 struct ip_tunnel_info *tun_info = NULL;
641 const struct iphdr *inner_iph;
642 unsigned int max_headroom; /* The extra header space needed */
643 struct rtable *rt = NULL; /* Route to the other host */
644 __be16 payload_protocol;
645 bool use_cache = false;
653 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
654 connected = (tunnel->parms.iph.daddr != 0);
655 payload_protocol = skb_protocol(skb, true);
657 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
659 dst = tnl_params->daddr;
664 dev->stats.tx_fifo_errors++;
668 tun_info = skb_tunnel_info(skb);
669 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
670 ip_tunnel_info_af(tun_info) == AF_INET &&
671 tun_info->key.u.ipv4.dst) {
672 dst = tun_info->key.u.ipv4.dst;
675 } else if (payload_protocol == htons(ETH_P_IP)) {
676 rt = skb_rtable(skb);
677 dst = rt_nexthop(rt, inner_iph->daddr);
679 #if IS_ENABLED(CONFIG_IPV6)
680 else if (payload_protocol == htons(ETH_P_IPV6)) {
681 const struct in6_addr *addr6;
682 struct neighbour *neigh;
683 bool do_tx_error_icmp;
686 neigh = dst_neigh_lookup(skb_dst(skb),
687 &ipv6_hdr(skb)->daddr);
691 addr6 = (const struct in6_addr *)&neigh->primary_key;
692 addr_type = ipv6_addr_type(addr6);
694 if (addr_type == IPV6_ADDR_ANY) {
695 addr6 = &ipv6_hdr(skb)->daddr;
696 addr_type = ipv6_addr_type(addr6);
699 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
700 do_tx_error_icmp = true;
702 do_tx_error_icmp = false;
703 dst = addr6->s6_addr32[3];
705 neigh_release(neigh);
706 if (do_tx_error_icmp)
717 tos = tnl_params->tos;
720 if (payload_protocol == htons(ETH_P_IP)) {
721 tos = inner_iph->tos;
723 } else if (payload_protocol == htons(ETH_P_IPV6)) {
724 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
729 ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
730 tunnel->parms.o_key, RT_TOS(tos),
731 dev_net(dev), tunnel->parms.link,
732 tunnel->fwmark, skb_get_hash(skb));
734 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
737 if (connected && md) {
738 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
740 rt = dst_cache_get_ip4(&tun_info->dst_cache,
743 rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache,
748 rt = ip_route_output_key(tunnel->net, &fl4);
751 dev->stats.tx_carrier_errors++;
755 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
757 else if (!md && connected)
758 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
762 if (rt->dst.dev == dev) {
764 dev->stats.collisions++;
768 df = tnl_params->frag_off;
769 if (payload_protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
770 df |= (inner_iph->frag_off & htons(IP_DF));
772 if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
777 if (tunnel->err_count > 0) {
778 if (time_before(jiffies,
779 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
782 dst_link_failure(skb);
784 tunnel->err_count = 0;
787 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
788 ttl = tnl_params->ttl;
790 if (payload_protocol == htons(ETH_P_IP))
791 ttl = inner_iph->ttl;
792 #if IS_ENABLED(CONFIG_IPV6)
793 else if (payload_protocol == htons(ETH_P_IPV6))
794 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
797 ttl = ip4_dst_hoplimit(&rt->dst);
800 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
801 + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
802 if (max_headroom > dev->needed_headroom)
803 dev->needed_headroom = max_headroom;
805 if (skb_cow_head(skb, dev->needed_headroom)) {
807 dev->stats.tx_dropped++;
812 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
813 df, !net_eq(tunnel->net, dev_net(dev)));
816 #if IS_ENABLED(CONFIG_IPV6)
818 dst_link_failure(skb);
821 dev->stats.tx_errors++;
824 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
826 static void ip_tunnel_update(struct ip_tunnel_net *itn,
828 struct net_device *dev,
829 struct ip_tunnel_parm *p,
833 ip_tunnel_del(itn, t);
834 t->parms.iph.saddr = p->iph.saddr;
835 t->parms.iph.daddr = p->iph.daddr;
836 t->parms.i_key = p->i_key;
837 t->parms.o_key = p->o_key;
838 if (dev->type != ARPHRD_ETHER) {
839 __dev_addr_set(dev, &p->iph.saddr, 4);
840 memcpy(dev->broadcast, &p->iph.daddr, 4);
842 ip_tunnel_add(itn, t);
844 t->parms.iph.ttl = p->iph.ttl;
845 t->parms.iph.tos = p->iph.tos;
846 t->parms.iph.frag_off = p->iph.frag_off;
848 if (t->parms.link != p->link || t->fwmark != fwmark) {
851 t->parms.link = p->link;
853 mtu = ip_tunnel_bind_dev(dev);
857 dst_cache_reset(&t->dst_cache);
858 netdev_state_change(dev);
861 int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
864 struct ip_tunnel *t = netdev_priv(dev);
865 struct net *net = t->net;
866 struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
870 if (dev == itn->fb_tunnel_dev) {
871 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
873 t = netdev_priv(dev);
875 memcpy(p, &t->parms, sizeof(*p));
881 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
884 p->iph.frag_off |= htons(IP_DF);
885 if (!(p->i_flags & VTI_ISVTI)) {
886 if (!(p->i_flags & TUNNEL_KEY))
888 if (!(p->o_flags & TUNNEL_KEY))
892 t = ip_tunnel_find(itn, p, itn->type);
894 if (cmd == SIOCADDTUNNEL) {
896 t = ip_tunnel_create(net, itn, p);
897 err = PTR_ERR_OR_ZERO(t);
904 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
911 unsigned int nflags = 0;
913 if (ipv4_is_multicast(p->iph.daddr))
914 nflags = IFF_BROADCAST;
915 else if (p->iph.daddr)
916 nflags = IFF_POINTOPOINT;
918 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
923 t = netdev_priv(dev);
929 ip_tunnel_update(itn, t, dev, p, true, 0);
937 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
940 if (dev == itn->fb_tunnel_dev) {
942 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
946 if (t == netdev_priv(itn->fb_tunnel_dev))
950 unregister_netdevice(dev);
961 EXPORT_SYMBOL_GPL(ip_tunnel_ctl);
963 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
964 void __user *data, int cmd)
966 struct ip_tunnel_parm p;
969 if (copy_from_user(&p, data, sizeof(p)))
971 err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd);
972 if (!err && copy_to_user(data, &p, sizeof(p)))
976 EXPORT_SYMBOL_GPL(ip_tunnel_siocdevprivate);
978 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
980 struct ip_tunnel *tunnel = netdev_priv(dev);
981 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
982 int max_mtu = IP_MAX_MTU - t_hlen;
984 if (dev->type == ARPHRD_ETHER)
985 max_mtu -= dev->hard_header_len;
987 if (new_mtu < ETH_MIN_MTU)
990 if (new_mtu > max_mtu) {
1000 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
1002 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1004 return __ip_tunnel_change_mtu(dev, new_mtu, true);
1006 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
1008 static void ip_tunnel_dev_free(struct net_device *dev)
1010 struct ip_tunnel *tunnel = netdev_priv(dev);
1012 gro_cells_destroy(&tunnel->gro_cells);
1013 dst_cache_destroy(&tunnel->dst_cache);
1014 free_percpu(dev->tstats);
1017 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
1019 struct ip_tunnel *tunnel = netdev_priv(dev);
1020 struct ip_tunnel_net *itn;
1022 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
1024 if (itn->fb_tunnel_dev != dev) {
1025 ip_tunnel_del(itn, netdev_priv(dev));
1026 unregister_netdevice_queue(dev, head);
1029 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
1031 struct net *ip_tunnel_get_link_net(const struct net_device *dev)
1033 struct ip_tunnel *tunnel = netdev_priv(dev);
1037 EXPORT_SYMBOL(ip_tunnel_get_link_net);
1039 int ip_tunnel_get_iflink(const struct net_device *dev)
1041 struct ip_tunnel *tunnel = netdev_priv(dev);
1043 return tunnel->parms.link;
1045 EXPORT_SYMBOL(ip_tunnel_get_iflink);
1047 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
1048 struct rtnl_link_ops *ops, char *devname)
1050 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
1051 struct ip_tunnel_parm parms;
1054 itn->rtnl_link_ops = ops;
1055 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
1056 INIT_HLIST_HEAD(&itn->tunnels[i]);
1058 if (!ops || !net_has_fallback_tunnels(net)) {
1059 struct ip_tunnel_net *it_init_net;
1061 it_init_net = net_generic(&init_net, ip_tnl_net_id);
1062 itn->type = it_init_net->type;
1063 itn->fb_tunnel_dev = NULL;
1067 memset(&parms, 0, sizeof(parms));
1069 strscpy(parms.name, devname, IFNAMSIZ);
1072 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
1073 /* FB netdevice is special: we have one, and only one per netns.
1074 * Allowing to move it to another netns is clearly unsafe.
1076 if (!IS_ERR(itn->fb_tunnel_dev)) {
1077 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1078 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
1079 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
1080 itn->type = itn->fb_tunnel_dev->type;
1084 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
1086 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
1088 static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
1089 struct list_head *head,
1090 struct rtnl_link_ops *ops)
1092 struct net_device *dev, *aux;
1095 for_each_netdev_safe(net, dev, aux)
1096 if (dev->rtnl_link_ops == ops)
1097 unregister_netdevice_queue(dev, head);
1099 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
1100 struct ip_tunnel *t;
1101 struct hlist_node *n;
1102 struct hlist_head *thead = &itn->tunnels[h];
1104 hlist_for_each_entry_safe(t, n, thead, hash_node)
1105 /* If dev is in the same netns, it has already
1106 * been added to the list by the previous loop.
1108 if (!net_eq(dev_net(t->dev), net))
1109 unregister_netdevice_queue(t->dev, head);
1113 void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
1114 struct rtnl_link_ops *ops)
1116 struct ip_tunnel_net *itn;
1121 list_for_each_entry(net, net_list, exit_list) {
1122 itn = net_generic(net, id);
1123 ip_tunnel_destroy(net, itn, &list, ops);
1125 unregister_netdevice_many(&list);
1128 EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
1130 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1131 struct ip_tunnel_parm *p, __u32 fwmark)
1133 struct ip_tunnel *nt;
1134 struct net *net = dev_net(dev);
1135 struct ip_tunnel_net *itn;
1139 nt = netdev_priv(dev);
1140 itn = net_generic(net, nt->ip_tnl_net_id);
1142 if (nt->collect_md) {
1143 if (rtnl_dereference(itn->collect_md_tun))
1146 if (ip_tunnel_find(itn, p, dev->type))
1152 nt->fwmark = fwmark;
1153 err = register_netdevice(dev);
1155 goto err_register_netdevice;
1157 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1158 eth_hw_addr_random(dev);
1160 mtu = ip_tunnel_bind_dev(dev);
1162 unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
1164 if (dev->type == ARPHRD_ETHER)
1165 max -= dev->hard_header_len;
1167 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
1170 err = dev_set_mtu(dev, mtu);
1172 goto err_dev_set_mtu;
1174 ip_tunnel_add(itn, nt);
1178 unregister_netdevice(dev);
1179 err_register_netdevice:
1182 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
1184 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
1185 struct ip_tunnel_parm *p, __u32 fwmark)
1187 struct ip_tunnel *t;
1188 struct ip_tunnel *tunnel = netdev_priv(dev);
1189 struct net *net = tunnel->net;
1190 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
1192 if (dev == itn->fb_tunnel_dev)
1195 t = ip_tunnel_find(itn, p, dev->type);
1203 if (dev->type != ARPHRD_ETHER) {
1204 unsigned int nflags = 0;
1206 if (ipv4_is_multicast(p->iph.daddr))
1207 nflags = IFF_BROADCAST;
1208 else if (p->iph.daddr)
1209 nflags = IFF_POINTOPOINT;
1211 if ((dev->flags ^ nflags) &
1212 (IFF_POINTOPOINT | IFF_BROADCAST))
1217 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
1220 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1222 int ip_tunnel_init(struct net_device *dev)
1224 struct ip_tunnel *tunnel = netdev_priv(dev);
1225 struct iphdr *iph = &tunnel->parms.iph;
1228 dev->needs_free_netdev = true;
1229 dev->priv_destructor = ip_tunnel_dev_free;
1230 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1234 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1236 free_percpu(dev->tstats);
1240 err = gro_cells_init(&tunnel->gro_cells, dev);
1242 dst_cache_destroy(&tunnel->dst_cache);
1243 free_percpu(dev->tstats);
1248 tunnel->net = dev_net(dev);
1249 strcpy(tunnel->parms.name, dev->name);
1253 if (tunnel->collect_md)
1254 netif_keep_dst(dev);
1257 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1259 void ip_tunnel_uninit(struct net_device *dev)
1261 struct ip_tunnel *tunnel = netdev_priv(dev);
1262 struct net *net = tunnel->net;
1263 struct ip_tunnel_net *itn;
1265 itn = net_generic(net, tunnel->ip_tnl_net_id);
1266 ip_tunnel_del(itn, netdev_priv(dev));
1267 if (itn->fb_tunnel_dev == dev)
1268 WRITE_ONCE(itn->fb_tunnel_dev, NULL);
1270 dst_cache_reset(&tunnel->dst_cache);
1272 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1274 /* Do least required initialization, rest of init is done in tunnel_init call */
1275 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id)
1277 struct ip_tunnel *tunnel = netdev_priv(dev);
1278 tunnel->ip_tnl_net_id = net_id;
1280 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1282 MODULE_LICENSE("GPL");