1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * GRE over IPv6 protocol decoder.
5 * Authors: Dmitry Kozlov (xeb@mail.ru)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/in6.h>
24 #include <linux/inetdevice.h>
25 #include <linux/igmp.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/etherdevice.h>
28 #include <linux/if_ether.h>
29 #include <linux/hash.h>
30 #include <linux/if_tunnel.h>
31 #include <linux/ip6_tunnel.h>
35 #include <net/ip_tunnels.h>
37 #include <net/protocol.h>
38 #include <net/addrconf.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 #include <net/rtnetlink.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ip6_tunnel.h>
53 #include <net/erspan.h>
54 #include <net/dst_metadata.h>
57 static bool log_ecn_error = true;
58 module_param(log_ecn_error, bool, 0644);
59 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
61 #define IP6_GRE_HASH_SIZE_SHIFT 5
62 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
64 static unsigned int ip6gre_net_id __read_mostly;
66 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
68 struct ip6_tnl __rcu *collect_md_tun;
69 struct ip6_tnl __rcu *collect_md_tun_erspan;
70 struct net_device *fb_tunnel_dev;
73 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
74 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
75 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
76 static int ip6gre_tunnel_init(struct net_device *dev);
77 static void ip6gre_tunnel_setup(struct net_device *dev);
78 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
79 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
80 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
82 /* Tunnel hash table */
92 We require exact key match i.e. if a key is present in packet
93 it will match only tunnel with the same key; if it is not present,
94 it will match only keyless tunnel.
96 All keysless packets, if not matched configured keyless tunnels
97 will match fallback tunnel.
100 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
101 static u32 HASH_ADDR(const struct in6_addr *addr)
103 u32 hash = ipv6_addr_hash(addr);
105 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
108 #define tunnels_r_l tunnels[3]
109 #define tunnels_r tunnels[2]
110 #define tunnels_l tunnels[1]
111 #define tunnels_wc tunnels[0]
113 /* Given src, dst and key, find appropriate for input tunnel. */
115 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
116 const struct in6_addr *remote, const struct in6_addr *local,
117 __be32 key, __be16 gre_proto)
119 struct net *net = dev_net(dev);
120 int link = dev->ifindex;
121 unsigned int h0 = HASH_ADDR(remote);
122 unsigned int h1 = HASH_KEY(key);
123 struct ip6_tnl *t, *cand = NULL;
124 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
125 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
126 gre_proto == htons(ETH_P_ERSPAN) ||
127 gre_proto == htons(ETH_P_ERSPAN2)) ?
128 ARPHRD_ETHER : ARPHRD_IP6GRE;
129 int score, cand_score = 4;
130 struct net_device *ndev;
132 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
133 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
134 !ipv6_addr_equal(remote, &t->parms.raddr) ||
135 key != t->parms.i_key ||
136 !(t->dev->flags & IFF_UP))
139 if (t->dev->type != ARPHRD_IP6GRE &&
140 t->dev->type != dev_type)
144 if (t->parms.link != link)
146 if (t->dev->type != dev_type)
151 if (score < cand_score) {
157 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
158 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
159 key != t->parms.i_key ||
160 !(t->dev->flags & IFF_UP))
163 if (t->dev->type != ARPHRD_IP6GRE &&
164 t->dev->type != dev_type)
168 if (t->parms.link != link)
170 if (t->dev->type != dev_type)
175 if (score < cand_score) {
181 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
182 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
183 (!ipv6_addr_equal(local, &t->parms.raddr) ||
184 !ipv6_addr_is_multicast(local))) ||
185 key != t->parms.i_key ||
186 !(t->dev->flags & IFF_UP))
189 if (t->dev->type != ARPHRD_IP6GRE &&
190 t->dev->type != dev_type)
194 if (t->parms.link != link)
196 if (t->dev->type != dev_type)
201 if (score < cand_score) {
207 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
208 if (t->parms.i_key != key ||
209 !(t->dev->flags & IFF_UP))
212 if (t->dev->type != ARPHRD_IP6GRE &&
213 t->dev->type != dev_type)
217 if (t->parms.link != link)
219 if (t->dev->type != dev_type)
224 if (score < cand_score) {
233 if (gre_proto == htons(ETH_P_ERSPAN) ||
234 gre_proto == htons(ETH_P_ERSPAN2))
235 t = rcu_dereference(ign->collect_md_tun_erspan);
237 t = rcu_dereference(ign->collect_md_tun);
239 if (t && t->dev->flags & IFF_UP)
242 ndev = READ_ONCE(ign->fb_tunnel_dev);
243 if (ndev && ndev->flags & IFF_UP)
244 return netdev_priv(ndev);
249 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
250 const struct __ip6_tnl_parm *p)
252 const struct in6_addr *remote = &p->raddr;
253 const struct in6_addr *local = &p->laddr;
254 unsigned int h = HASH_KEY(p->i_key);
257 if (!ipv6_addr_any(local))
259 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
261 h ^= HASH_ADDR(remote);
264 return &ign->tunnels[prio][h];
267 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
269 if (t->parms.collect_md)
270 rcu_assign_pointer(ign->collect_md_tun, t);
273 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
275 if (t->parms.collect_md)
276 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
279 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
281 if (t->parms.collect_md)
282 rcu_assign_pointer(ign->collect_md_tun, NULL);
285 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
288 if (t->parms.collect_md)
289 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
292 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
293 const struct ip6_tnl *t)
295 return __ip6gre_bucket(ign, &t->parms);
298 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
300 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
302 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
303 rcu_assign_pointer(*tp, t);
306 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
308 struct ip6_tnl __rcu **tp;
309 struct ip6_tnl *iter;
311 for (tp = ip6gre_bucket(ign, t);
312 (iter = rtnl_dereference(*tp)) != NULL;
315 rcu_assign_pointer(*tp, t->next);
321 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
322 const struct __ip6_tnl_parm *parms,
325 const struct in6_addr *remote = &parms->raddr;
326 const struct in6_addr *local = &parms->laddr;
327 __be32 key = parms->i_key;
328 int link = parms->link;
330 struct ip6_tnl __rcu **tp;
331 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
333 for (tp = __ip6gre_bucket(ign, parms);
334 (t = rtnl_dereference(*tp)) != NULL;
336 if (ipv6_addr_equal(local, &t->parms.laddr) &&
337 ipv6_addr_equal(remote, &t->parms.raddr) &&
338 key == t->parms.i_key &&
339 link == t->parms.link &&
340 type == t->dev->type)
346 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
347 const struct __ip6_tnl_parm *parms, int create)
349 struct ip6_tnl *t, *nt;
350 struct net_device *dev;
352 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
354 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
360 if (parms->name[0]) {
361 if (!dev_valid_name(parms->name))
363 strlcpy(name, parms->name, IFNAMSIZ);
365 strcpy(name, "ip6gre%d");
367 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
368 ip6gre_tunnel_setup);
372 dev_net_set(dev, net);
374 nt = netdev_priv(dev);
376 dev->rtnl_link_ops = &ip6gre_link_ops;
379 nt->net = dev_net(dev);
381 if (register_netdevice(dev) < 0)
384 ip6gre_tnl_link_config(nt, 1);
386 /* Can use a lockless transmit, unless we generate output sequences */
387 if (!(nt->parms.o_flags & TUNNEL_SEQ))
388 dev->features |= NETIF_F_LLTX;
391 ip6gre_tunnel_link(ign, nt);
399 static void ip6erspan_tunnel_uninit(struct net_device *dev)
401 struct ip6_tnl *t = netdev_priv(dev);
402 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
404 ip6erspan_tunnel_unlink_md(ign, t);
405 ip6gre_tunnel_unlink(ign, t);
406 dst_cache_reset(&t->dst_cache);
410 static void ip6gre_tunnel_uninit(struct net_device *dev)
412 struct ip6_tnl *t = netdev_priv(dev);
413 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
415 ip6gre_tunnel_unlink_md(ign, t);
416 ip6gre_tunnel_unlink(ign, t);
417 if (ign->fb_tunnel_dev == dev)
418 WRITE_ONCE(ign->fb_tunnel_dev, NULL);
419 dst_cache_reset(&t->dst_cache);
424 static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
425 u8 type, u8 code, int offset, __be32 info)
427 struct net *net = dev_net(skb->dev);
428 const struct ipv6hdr *ipv6h;
429 struct tnl_ptk_info tpi;
432 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
436 ipv6h = (const struct ipv6hdr *)skb->data;
437 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
443 case ICMPV6_DEST_UNREACH:
444 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
446 if (code != ICMPV6_PORT_UNREACH)
449 case ICMPV6_TIME_EXCEED:
450 if (code == ICMPV6_EXC_HOPLIMIT) {
451 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
456 case ICMPV6_PARAMPROB: {
457 struct ipv6_tlv_tnl_enc_lim *tel;
461 if (code == ICMPV6_HDR_FIELD)
462 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
464 if (teli && teli == be32_to_cpu(info) - 2) {
465 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
466 if (tel->encap_limit == 0) {
467 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
471 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
476 case ICMPV6_PKT_TOOBIG:
477 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
480 ip6_redirect(skb, net, skb->dev->ifindex, 0,
481 sock_net_uid(net, NULL));
485 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
489 t->err_time = jiffies;
494 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
496 const struct ipv6hdr *ipv6h;
497 struct ip6_tnl *tunnel;
499 ipv6h = ipv6_hdr(skb);
500 tunnel = ip6gre_tunnel_lookup(skb->dev,
501 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
504 if (tunnel->parms.collect_md) {
505 struct metadata_dst *tun_dst;
510 tun_id = key32_to_tunnel_id(tpi->key);
512 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
514 return PACKET_REJECT;
516 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
518 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
524 return PACKET_REJECT;
527 static int ip6erspan_rcv(struct sk_buff *skb,
528 struct tnl_ptk_info *tpi,
531 struct erspan_base_hdr *ershdr;
532 const struct ipv6hdr *ipv6h;
533 struct erspan_md2 *md2;
534 struct ip6_tnl *tunnel;
537 ipv6h = ipv6_hdr(skb);
538 ershdr = (struct erspan_base_hdr *)skb->data;
541 tunnel = ip6gre_tunnel_lookup(skb->dev,
542 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
545 int len = erspan_hdr_len(ver);
547 if (unlikely(!pskb_may_pull(skb, len)))
548 return PACKET_REJECT;
550 if (__iptunnel_pull_header(skb, len,
553 return PACKET_REJECT;
555 if (tunnel->parms.collect_md) {
556 struct erspan_metadata *pkt_md, *md;
557 struct metadata_dst *tun_dst;
558 struct ip_tunnel_info *info;
563 tpi->flags |= TUNNEL_KEY;
565 tun_id = key32_to_tunnel_id(tpi->key);
567 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
570 return PACKET_REJECT;
572 /* skb can be uncloned in __iptunnel_pull_header, so
573 * old pkt_md is no longer valid and we need to reset
576 gh = skb_network_header(skb) +
577 skb_network_header_len(skb);
578 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
580 info = &tun_dst->u.tun_info;
581 md = ip_tunnel_info_opts(info);
584 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
586 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
587 info->options_len = sizeof(*md);
589 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
592 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
598 return PACKET_REJECT;
601 static int gre_rcv(struct sk_buff *skb)
603 struct tnl_ptk_info tpi;
604 bool csum_err = false;
607 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
611 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
614 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
615 tpi.proto == htons(ETH_P_ERSPAN2))) {
616 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
621 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
625 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
631 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
633 return iptunnel_handle_offloads(skb,
634 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
637 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
638 struct net_device *dev,
639 struct flowi6 *fl6, __u8 *dsfield,
642 const struct iphdr *iph = ip_hdr(skb);
643 struct ip6_tnl *t = netdev_priv(dev);
645 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
646 *encap_limit = t->parms.encap_limit;
648 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
650 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
651 *dsfield = ipv4_get_dsfield(iph);
653 *dsfield = ip6_tclass(t->parms.flowinfo);
655 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
656 fl6->flowi6_mark = skb->mark;
658 fl6->flowi6_mark = t->parms.fwmark;
660 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
663 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
664 struct net_device *dev,
665 struct flowi6 *fl6, __u8 *dsfield,
668 struct ipv6hdr *ipv6h;
669 struct ip6_tnl *t = netdev_priv(dev);
672 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
673 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
674 ipv6h = ipv6_hdr(skb);
677 struct ipv6_tlv_tnl_enc_lim *tel;
679 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
680 if (tel->encap_limit == 0) {
681 icmpv6_send(skb, ICMPV6_PARAMPROB,
682 ICMPV6_HDR_FIELD, offset + 2);
685 *encap_limit = tel->encap_limit - 1;
686 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
687 *encap_limit = t->parms.encap_limit;
690 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
692 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
693 *dsfield = ipv6_get_dsfield(ipv6h);
695 *dsfield = ip6_tclass(t->parms.flowinfo);
697 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
698 fl6->flowlabel |= ip6_flowlabel(ipv6h);
700 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
701 fl6->flowi6_mark = skb->mark;
703 fl6->flowi6_mark = t->parms.fwmark;
705 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
710 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
711 struct net_device *dev, __u8 dsfield,
712 struct flowi6 *fl6, int encap_limit,
713 __u32 *pmtu, __be16 proto)
715 struct ip6_tnl *tunnel = netdev_priv(dev);
718 if (dev->type == ARPHRD_ETHER)
719 IPCB(skb)->flags = 0;
721 if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
722 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
724 fl6->daddr = tunnel->parms.raddr;
726 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
729 /* Push GRE header. */
730 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
732 if (tunnel->parms.collect_md) {
733 struct ip_tunnel_info *tun_info;
734 const struct ip_tunnel_key *key;
737 tun_info = skb_tunnel_info(skb);
738 if (unlikely(!tun_info ||
739 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
740 ip_tunnel_info_af(tun_info) != AF_INET6))
743 key = &tun_info->key;
744 memset(fl6, 0, sizeof(*fl6));
745 fl6->flowi6_proto = IPPROTO_GRE;
746 fl6->daddr = key->u.ipv6.dst;
747 fl6->flowlabel = key->label;
748 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
751 flags = key->tun_flags &
752 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
753 tunnel->tun_hlen = gre_calc_hlen(flags);
755 gre_build_header(skb, tunnel->tun_hlen,
757 tunnel_id_to_key32(tun_info->key.tun_id),
758 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
762 if (tunnel->parms.o_flags & TUNNEL_SEQ)
765 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
766 protocol, tunnel->parms.o_key,
767 htonl(tunnel->o_seqno));
770 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
774 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
776 struct ip6_tnl *t = netdev_priv(dev);
777 int encap_limit = -1;
783 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
785 if (!t->parms.collect_md)
786 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
787 &dsfield, &encap_limit);
789 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
793 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
796 /* XXX: send ICMP error even if DF is not set. */
797 if (err == -EMSGSIZE)
798 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
806 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
808 struct ip6_tnl *t = netdev_priv(dev);
809 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
810 int encap_limit = -1;
816 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
819 if (!t->parms.collect_md &&
820 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
823 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
826 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
827 &mtu, skb->protocol);
829 if (err == -EMSGSIZE)
830 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
838 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
839 * @t: the outgoing tunnel device
840 * @hdr: IPv6 header from the incoming packet
843 * Avoid trivial tunneling loop by checking that tunnel exit-point
844 * doesn't match source of incoming packet.
851 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
852 const struct ipv6hdr *hdr)
854 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
857 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
859 struct ip6_tnl *t = netdev_priv(dev);
860 int encap_limit = -1;
865 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
866 encap_limit = t->parms.encap_limit;
868 if (!t->parms.collect_md)
869 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
871 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
875 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
880 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
881 struct net_device *dev)
883 struct ip6_tnl *t = netdev_priv(dev);
884 struct net_device_stats *stats = &t->dev->stats;
887 if (!pskb_inet_may_pull(skb))
890 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
893 switch (skb->protocol) {
894 case htons(ETH_P_IP):
895 ret = ip6gre_xmit_ipv4(skb, dev);
897 case htons(ETH_P_IPV6):
898 ret = ip6gre_xmit_ipv6(skb, dev);
901 ret = ip6gre_xmit_other(skb, dev);
917 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
918 struct net_device *dev)
920 struct ip6_tnl *t = netdev_priv(dev);
921 struct dst_entry *dst = skb_dst(skb);
922 struct net_device_stats *stats;
923 bool truncate = false;
924 int encap_limit = -1;
925 __u8 dsfield = false;
933 if (!pskb_inet_may_pull(skb))
936 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
939 if (gre_handle_offloads(skb, false))
942 if (skb->len > dev->mtu + dev->hard_header_len) {
943 pskb_trim(skb, dev->mtu + dev->hard_header_len);
947 nhoff = skb_network_header(skb) - skb_mac_header(skb);
948 if (skb->protocol == htons(ETH_P_IP) &&
949 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
952 thoff = skb_transport_header(skb) - skb_mac_header(skb);
953 if (skb->protocol == htons(ETH_P_IPV6) &&
954 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
957 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
960 t->parms.o_flags &= ~TUNNEL_KEY;
961 IPCB(skb)->flags = 0;
963 /* For collect_md mode, derive fl6 from the tunnel key,
964 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
966 if (t->parms.collect_md) {
967 struct ip_tunnel_info *tun_info;
968 const struct ip_tunnel_key *key;
969 struct erspan_metadata *md;
972 tun_info = skb_tunnel_info(skb);
973 if (unlikely(!tun_info ||
974 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
975 ip_tunnel_info_af(tun_info) != AF_INET6))
978 key = &tun_info->key;
979 memset(&fl6, 0, sizeof(fl6));
980 fl6.flowi6_proto = IPPROTO_GRE;
981 fl6.daddr = key->u.ipv6.dst;
982 fl6.flowlabel = key->label;
983 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
986 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
988 if (tun_info->options_len < sizeof(*md))
990 md = ip_tunnel_info_opts(tun_info);
992 tun_id = tunnel_id_to_key32(key->tun_id);
993 if (md->version == 1) {
994 erspan_build_header(skb,
996 ntohl(md->u.index), truncate,
998 } else if (md->version == 2) {
999 erspan_build_header_v2(skb,
1002 get_hwid(&md->u.md2),
1008 switch (skb->protocol) {
1009 case htons(ETH_P_IP):
1010 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1011 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
1012 &dsfield, &encap_limit);
1014 case htons(ETH_P_IPV6):
1015 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
1017 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
1018 &dsfield, &encap_limit))
1022 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1026 if (t->parms.erspan_ver == 1)
1027 erspan_build_header(skb, ntohl(t->parms.o_key),
1030 else if (t->parms.erspan_ver == 2)
1031 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
1038 fl6.daddr = t->parms.raddr;
1041 /* Push GRE header. */
1042 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
1043 : htons(ETH_P_ERSPAN2);
1044 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
1046 /* TooBig packet may have updated dst->dev's mtu */
1047 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
1048 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
1050 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1053 /* XXX: send ICMP error even if DF is not set. */
1054 if (err == -EMSGSIZE) {
1055 if (skb->protocol == htons(ETH_P_IP))
1056 icmp_send(skb, ICMP_DEST_UNREACH,
1057 ICMP_FRAG_NEEDED, htonl(mtu));
1059 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1064 return NETDEV_TX_OK;
1067 stats = &t->dev->stats;
1069 stats->tx_dropped++;
1071 return NETDEV_TX_OK;
1074 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1076 struct net_device *dev = t->dev;
1077 struct __ip6_tnl_parm *p = &t->parms;
1078 struct flowi6 *fl6 = &t->fl.u.ip6;
1080 if (dev->type != ARPHRD_ETHER) {
1081 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1082 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1085 /* Set up flowi template */
1086 fl6->saddr = p->laddr;
1087 fl6->daddr = p->raddr;
1088 fl6->flowi6_oif = p->link;
1090 fl6->flowi6_proto = IPPROTO_GRE;
1092 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1093 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1094 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1095 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1097 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1098 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1100 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1101 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1102 dev->flags |= IFF_POINTOPOINT;
1104 dev->flags &= ~IFF_POINTOPOINT;
1107 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1110 const struct __ip6_tnl_parm *p = &t->parms;
1111 struct net_device *dev = t->dev;
1113 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1114 int strict = (ipv6_addr_type(&p->raddr) &
1115 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1117 struct rt6_info *rt = rt6_lookup(t->net,
1118 &p->raddr, &p->laddr,
1119 p->link, NULL, strict);
1125 dev->needed_headroom = rt->dst.dev->hard_header_len +
1129 dev->mtu = rt->dst.dev->mtu - t_hlen;
1130 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1132 if (dev->type == ARPHRD_ETHER)
1133 dev->mtu -= ETH_HLEN;
1135 if (dev->mtu < IPV6_MIN_MTU)
1136 dev->mtu = IPV6_MIN_MTU;
1143 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1147 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1148 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1150 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1151 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1155 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1157 ip6gre_tnl_link_config_common(t);
1158 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1161 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1162 const struct __ip6_tnl_parm *p)
1164 t->parms.laddr = p->laddr;
1165 t->parms.raddr = p->raddr;
1166 t->parms.flags = p->flags;
1167 t->parms.hop_limit = p->hop_limit;
1168 t->parms.encap_limit = p->encap_limit;
1169 t->parms.flowinfo = p->flowinfo;
1170 t->parms.link = p->link;
1171 t->parms.proto = p->proto;
1172 t->parms.i_key = p->i_key;
1173 t->parms.o_key = p->o_key;
1174 t->parms.i_flags = p->i_flags;
1175 t->parms.o_flags = p->o_flags;
1176 t->parms.fwmark = p->fwmark;
1177 t->parms.erspan_ver = p->erspan_ver;
1178 t->parms.index = p->index;
1179 t->parms.dir = p->dir;
1180 t->parms.hwid = p->hwid;
1181 dst_cache_reset(&t->dst_cache);
1184 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1187 ip6gre_tnl_copy_tnl_parm(t, p);
1188 ip6gre_tnl_link_config(t, set_mtu);
1192 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1193 const struct ip6_tnl_parm2 *u)
1195 p->laddr = u->laddr;
1196 p->raddr = u->raddr;
1197 p->flags = u->flags;
1198 p->hop_limit = u->hop_limit;
1199 p->encap_limit = u->encap_limit;
1200 p->flowinfo = u->flowinfo;
1202 p->i_key = u->i_key;
1203 p->o_key = u->o_key;
1204 p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
1205 p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
1206 memcpy(p->name, u->name, sizeof(u->name));
1209 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1210 const struct __ip6_tnl_parm *p)
1212 u->proto = IPPROTO_GRE;
1213 u->laddr = p->laddr;
1214 u->raddr = p->raddr;
1215 u->flags = p->flags;
1216 u->hop_limit = p->hop_limit;
1217 u->encap_limit = p->encap_limit;
1218 u->flowinfo = p->flowinfo;
1220 u->i_key = p->i_key;
1221 u->o_key = p->o_key;
1222 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1223 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1224 memcpy(u->name, p->name, sizeof(u->name));
1227 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1228 struct ifreq *ifr, int cmd)
1231 struct ip6_tnl_parm2 p;
1232 struct __ip6_tnl_parm p1;
1233 struct ip6_tnl *t = netdev_priv(dev);
1234 struct net *net = t->net;
1235 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1237 memset(&p1, 0, sizeof(p1));
1241 if (dev == ign->fb_tunnel_dev) {
1242 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1246 ip6gre_tnl_parm_from_user(&p1, &p);
1247 t = ip6gre_tunnel_locate(net, &p1, 0);
1249 t = netdev_priv(dev);
1251 memset(&p, 0, sizeof(p));
1252 ip6gre_tnl_parm_to_user(&p, &t->parms);
1253 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1260 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1264 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1268 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1271 if (!(p.i_flags&GRE_KEY))
1273 if (!(p.o_flags&GRE_KEY))
1276 ip6gre_tnl_parm_from_user(&p1, &p);
1277 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1279 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1281 if (t->dev != dev) {
1286 t = netdev_priv(dev);
1288 ip6gre_tunnel_unlink(ign, t);
1290 ip6gre_tnl_change(t, &p1, 1);
1291 ip6gre_tunnel_link(ign, t);
1292 netdev_state_change(dev);
1299 memset(&p, 0, sizeof(p));
1300 ip6gre_tnl_parm_to_user(&p, &t->parms);
1301 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1304 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1309 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1312 if (dev == ign->fb_tunnel_dev) {
1314 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1317 ip6gre_tnl_parm_from_user(&p1, &p);
1318 t = ip6gre_tunnel_locate(net, &p1, 0);
1322 if (t == netdev_priv(ign->fb_tunnel_dev))
1326 unregister_netdevice(dev);
1338 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1339 unsigned short type, const void *daddr,
1340 const void *saddr, unsigned int len)
1342 struct ip6_tnl *t = netdev_priv(dev);
1343 struct ipv6hdr *ipv6h;
1346 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1347 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1348 t->fl.u.ip6.flowlabel,
1349 true, &t->fl.u.ip6));
1350 ipv6h->hop_limit = t->parms.hop_limit;
1351 ipv6h->nexthdr = NEXTHDR_GRE;
1352 ipv6h->saddr = t->parms.laddr;
1353 ipv6h->daddr = t->parms.raddr;
1355 p = (__be16 *)(ipv6h + 1);
1356 p[0] = t->parms.o_flags;
1360 * Set the source hardware address.
1364 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1366 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1367 if (!ipv6_addr_any(&ipv6h->daddr))
1373 static const struct header_ops ip6gre_header_ops = {
1374 .create = ip6gre_header,
1377 static const struct net_device_ops ip6gre_netdev_ops = {
1378 .ndo_init = ip6gre_tunnel_init,
1379 .ndo_uninit = ip6gre_tunnel_uninit,
1380 .ndo_start_xmit = ip6gre_tunnel_xmit,
1381 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1382 .ndo_change_mtu = ip6_tnl_change_mtu,
1383 .ndo_get_stats64 = ip_tunnel_get_stats64,
1384 .ndo_get_iflink = ip6_tnl_get_iflink,
1387 static void ip6gre_dev_free(struct net_device *dev)
1389 struct ip6_tnl *t = netdev_priv(dev);
1391 gro_cells_destroy(&t->gro_cells);
1392 dst_cache_destroy(&t->dst_cache);
1393 free_percpu(dev->tstats);
1396 static void ip6gre_tunnel_setup(struct net_device *dev)
1398 dev->netdev_ops = &ip6gre_netdev_ops;
1399 dev->needs_free_netdev = true;
1400 dev->priv_destructor = ip6gre_dev_free;
1402 dev->type = ARPHRD_IP6GRE;
1404 dev->flags |= IFF_NOARP;
1405 dev->addr_len = sizeof(struct in6_addr);
1406 netif_keep_dst(dev);
1407 /* This perm addr will be used as interface identifier by IPv6 */
1408 dev->addr_assign_type = NET_ADDR_RANDOM;
1409 eth_random_addr(dev->perm_addr);
1412 #define GRE6_FEATURES (NETIF_F_SG | \
1413 NETIF_F_FRAGLIST | \
1417 static void ip6gre_tnl_init_features(struct net_device *dev)
1419 struct ip6_tnl *nt = netdev_priv(dev);
1421 dev->features |= GRE6_FEATURES;
1422 dev->hw_features |= GRE6_FEATURES;
1424 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1425 /* TCP offload with GRE SEQ is not supported, nor
1426 * can we support 2 levels of outer headers requiring
1429 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1430 nt->encap.type == TUNNEL_ENCAP_NONE) {
1431 dev->features |= NETIF_F_GSO_SOFTWARE;
1432 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1435 /* Can use a lockless transmit, unless we generate
1438 dev->features |= NETIF_F_LLTX;
1442 static int ip6gre_tunnel_init_common(struct net_device *dev)
1444 struct ip6_tnl *tunnel;
1448 tunnel = netdev_priv(dev);
1451 tunnel->net = dev_net(dev);
1452 strcpy(tunnel->parms.name, dev->name);
1454 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1458 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1460 goto cleanup_alloc_pcpu_stats;
1462 ret = gro_cells_init(&tunnel->gro_cells, dev);
1464 goto cleanup_dst_cache_init;
1466 t_hlen = ip6gre_calc_hlen(tunnel);
1467 dev->mtu = ETH_DATA_LEN - t_hlen;
1468 if (dev->type == ARPHRD_ETHER)
1469 dev->mtu -= ETH_HLEN;
1470 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1473 if (tunnel->parms.collect_md) {
1474 netif_keep_dst(dev);
1476 ip6gre_tnl_init_features(dev);
1480 cleanup_dst_cache_init:
1481 dst_cache_destroy(&tunnel->dst_cache);
1482 cleanup_alloc_pcpu_stats:
1483 free_percpu(dev->tstats);
1488 static int ip6gre_tunnel_init(struct net_device *dev)
1490 struct ip6_tnl *tunnel;
1493 ret = ip6gre_tunnel_init_common(dev);
1497 tunnel = netdev_priv(dev);
1499 if (tunnel->parms.collect_md)
1502 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1503 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1505 if (ipv6_addr_any(&tunnel->parms.raddr))
1506 dev->header_ops = &ip6gre_header_ops;
1511 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1513 struct ip6_tnl *tunnel = netdev_priv(dev);
1516 tunnel->net = dev_net(dev);
1517 strcpy(tunnel->parms.name, dev->name);
1519 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1524 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1526 .err_handler = ip6gre_err,
1527 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1530 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1532 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1533 struct net_device *dev, *aux;
1536 for_each_netdev_safe(net, dev, aux)
1537 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1538 dev->rtnl_link_ops == &ip6gre_tap_ops ||
1539 dev->rtnl_link_ops == &ip6erspan_tap_ops)
1540 unregister_netdevice_queue(dev, head);
1542 for (prio = 0; prio < 4; prio++) {
1544 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1547 t = rtnl_dereference(ign->tunnels[prio][h]);
1550 /* If dev is in the same netns, it has already
1551 * been added to the list by the previous loop.
1553 if (!net_eq(dev_net(t->dev), net))
1554 unregister_netdevice_queue(t->dev,
1556 t = rtnl_dereference(t->next);
1562 static int __net_init ip6gre_init_net(struct net *net)
1564 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1567 if (!net_has_fallback_tunnels(net))
1569 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1571 ip6gre_tunnel_setup);
1572 if (!ign->fb_tunnel_dev) {
1576 dev_net_set(ign->fb_tunnel_dev, net);
1577 /* FB netdevice is special: we have one, and only one per netns.
1578 * Allowing to move it to another netns is clearly unsafe.
1580 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1583 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1584 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1586 err = register_netdev(ign->fb_tunnel_dev);
1590 rcu_assign_pointer(ign->tunnels_wc[0],
1591 netdev_priv(ign->fb_tunnel_dev));
1595 free_netdev(ign->fb_tunnel_dev);
1600 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
1606 list_for_each_entry(net, net_list, exit_list)
1607 ip6gre_destroy_tunnels(net, &list);
1608 unregister_netdevice_many(&list);
1612 static struct pernet_operations ip6gre_net_ops = {
1613 .init = ip6gre_init_net,
1614 .exit_batch = ip6gre_exit_batch_net,
1615 .id = &ip6gre_net_id,
1616 .size = sizeof(struct ip6gre_net),
1619 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1620 struct netlink_ext_ack *extack)
1628 if (data[IFLA_GRE_IFLAGS])
1629 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1630 if (data[IFLA_GRE_OFLAGS])
1631 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1632 if (flags & (GRE_VERSION|GRE_ROUTING))
1638 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1639 struct netlink_ext_ack *extack)
1641 struct in6_addr daddr;
1643 if (tb[IFLA_ADDRESS]) {
1644 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1646 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1647 return -EADDRNOTAVAIL;
1653 if (data[IFLA_GRE_REMOTE]) {
1654 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1655 if (ipv6_addr_any(&daddr))
1660 return ip6gre_tunnel_validate(tb, data, extack);
1663 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1664 struct netlink_ext_ack *extack)
1672 ret = ip6gre_tap_validate(tb, data, extack);
1676 /* ERSPAN should only have GRE sequence and key flag */
1677 if (data[IFLA_GRE_OFLAGS])
1678 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1679 if (data[IFLA_GRE_IFLAGS])
1680 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1681 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1682 flags != (GRE_SEQ | GRE_KEY))
1685 /* ERSPAN Session ID only has 10-bit. Since we reuse
1686 * 32-bit key field as ID, check it's range.
1688 if (data[IFLA_GRE_IKEY] &&
1689 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1692 if (data[IFLA_GRE_OKEY] &&
1693 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1696 if (data[IFLA_GRE_ERSPAN_VER]) {
1697 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1698 if (ver != 1 && ver != 2)
1703 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1704 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1706 if (index & ~INDEX_MASK)
1709 } else if (ver == 2) {
1710 if (data[IFLA_GRE_ERSPAN_DIR]) {
1711 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1713 if (dir & ~(DIR_MASK >> DIR_OFFSET))
1717 if (data[IFLA_GRE_ERSPAN_HWID]) {
1718 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1720 if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1728 static void ip6erspan_set_version(struct nlattr *data[],
1729 struct __ip6_tnl_parm *parms)
1734 parms->erspan_ver = 1;
1735 if (data[IFLA_GRE_ERSPAN_VER])
1736 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1738 if (parms->erspan_ver == 1) {
1739 if (data[IFLA_GRE_ERSPAN_INDEX])
1740 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1741 } else if (parms->erspan_ver == 2) {
1742 if (data[IFLA_GRE_ERSPAN_DIR])
1743 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1744 if (data[IFLA_GRE_ERSPAN_HWID])
1745 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1749 static void ip6gre_netlink_parms(struct nlattr *data[],
1750 struct __ip6_tnl_parm *parms)
1752 memset(parms, 0, sizeof(*parms));
1757 if (data[IFLA_GRE_LINK])
1758 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1760 if (data[IFLA_GRE_IFLAGS])
1761 parms->i_flags = gre_flags_to_tnl_flags(
1762 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1764 if (data[IFLA_GRE_OFLAGS])
1765 parms->o_flags = gre_flags_to_tnl_flags(
1766 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1768 if (data[IFLA_GRE_IKEY])
1769 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1771 if (data[IFLA_GRE_OKEY])
1772 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1774 if (data[IFLA_GRE_LOCAL])
1775 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1777 if (data[IFLA_GRE_REMOTE])
1778 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1780 if (data[IFLA_GRE_TTL])
1781 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1783 if (data[IFLA_GRE_ENCAP_LIMIT])
1784 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1786 if (data[IFLA_GRE_FLOWINFO])
1787 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1789 if (data[IFLA_GRE_FLAGS])
1790 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1792 if (data[IFLA_GRE_FWMARK])
1793 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1795 if (data[IFLA_GRE_COLLECT_METADATA])
1796 parms->collect_md = true;
1799 static int ip6gre_tap_init(struct net_device *dev)
1803 ret = ip6gre_tunnel_init_common(dev);
1807 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1812 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1813 .ndo_init = ip6gre_tap_init,
1814 .ndo_uninit = ip6gre_tunnel_uninit,
1815 .ndo_start_xmit = ip6gre_tunnel_xmit,
1816 .ndo_set_mac_address = eth_mac_addr,
1817 .ndo_validate_addr = eth_validate_addr,
1818 .ndo_change_mtu = ip6_tnl_change_mtu,
1819 .ndo_get_stats64 = ip_tunnel_get_stats64,
1820 .ndo_get_iflink = ip6_tnl_get_iflink,
1823 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1827 tunnel->tun_hlen = 8;
1828 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1829 erspan_hdr_len(tunnel->parms.erspan_ver);
1831 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1832 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1836 static int ip6erspan_tap_init(struct net_device *dev)
1838 struct ip6_tnl *tunnel;
1842 tunnel = netdev_priv(dev);
1845 tunnel->net = dev_net(dev);
1846 strcpy(tunnel->parms.name, dev->name);
1848 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1852 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1854 goto cleanup_alloc_pcpu_stats;
1856 ret = gro_cells_init(&tunnel->gro_cells, dev);
1858 goto cleanup_dst_cache_init;
1860 t_hlen = ip6erspan_calc_hlen(tunnel);
1861 dev->mtu = ETH_DATA_LEN - t_hlen;
1862 if (dev->type == ARPHRD_ETHER)
1863 dev->mtu -= ETH_HLEN;
1864 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1867 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1868 ip6erspan_tnl_link_config(tunnel, 1);
1872 cleanup_dst_cache_init:
1873 dst_cache_destroy(&tunnel->dst_cache);
1874 cleanup_alloc_pcpu_stats:
1875 free_percpu(dev->tstats);
1880 static const struct net_device_ops ip6erspan_netdev_ops = {
1881 .ndo_init = ip6erspan_tap_init,
1882 .ndo_uninit = ip6erspan_tunnel_uninit,
1883 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1884 .ndo_set_mac_address = eth_mac_addr,
1885 .ndo_validate_addr = eth_validate_addr,
1886 .ndo_change_mtu = ip6_tnl_change_mtu,
1887 .ndo_get_stats64 = ip_tunnel_get_stats64,
1888 .ndo_get_iflink = ip6_tnl_get_iflink,
1891 static void ip6gre_tap_setup(struct net_device *dev)
1897 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1898 dev->needs_free_netdev = true;
1899 dev->priv_destructor = ip6gre_dev_free;
1901 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1902 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1903 netif_keep_dst(dev);
1906 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1907 struct ip_tunnel_encap *ipencap)
1911 memset(ipencap, 0, sizeof(*ipencap));
1916 if (data[IFLA_GRE_ENCAP_TYPE]) {
1918 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1921 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1923 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1926 if (data[IFLA_GRE_ENCAP_SPORT]) {
1928 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1931 if (data[IFLA_GRE_ENCAP_DPORT]) {
1933 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1939 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1940 struct nlattr *tb[], struct nlattr *data[],
1941 struct netlink_ext_ack *extack)
1944 struct ip_tunnel_encap ipencap;
1947 nt = netdev_priv(dev);
1949 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1950 int err = ip6_tnl_encap_setup(nt, &ipencap);
1956 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1957 eth_hw_addr_random(dev);
1960 nt->net = dev_net(dev);
1962 err = register_netdevice(dev);
1967 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1975 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1976 struct nlattr *tb[], struct nlattr *data[],
1977 struct netlink_ext_ack *extack)
1979 struct ip6_tnl *nt = netdev_priv(dev);
1980 struct net *net = dev_net(dev);
1981 struct ip6gre_net *ign;
1984 ip6gre_netlink_parms(data, &nt->parms);
1985 ign = net_generic(net, ip6gre_net_id);
1987 if (nt->parms.collect_md) {
1988 if (rtnl_dereference(ign->collect_md_tun))
1991 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1995 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
1997 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1998 ip6gre_tunnel_link_md(ign, nt);
1999 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2004 static struct ip6_tnl *
2005 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
2006 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
2007 struct netlink_ext_ack *extack)
2009 struct ip6_tnl *t, *nt = netdev_priv(dev);
2010 struct net *net = nt->net;
2011 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2012 struct ip_tunnel_encap ipencap;
2014 if (dev == ign->fb_tunnel_dev)
2015 return ERR_PTR(-EINVAL);
2017 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
2018 int err = ip6_tnl_encap_setup(nt, &ipencap);
2021 return ERR_PTR(err);
2024 ip6gre_netlink_parms(data, p_p);
2026 t = ip6gre_tunnel_locate(net, p_p, 0);
2030 return ERR_PTR(-EEXIST);
2038 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2039 struct nlattr *data[],
2040 struct netlink_ext_ack *extack)
2042 struct ip6_tnl *t = netdev_priv(dev);
2043 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2044 struct __ip6_tnl_parm p;
2046 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2050 ip6gre_tunnel_unlink_md(ign, t);
2051 ip6gre_tunnel_unlink(ign, t);
2052 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2053 ip6gre_tunnel_link_md(ign, t);
2054 ip6gre_tunnel_link(ign, t);
2058 static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2060 struct net *net = dev_net(dev);
2061 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2063 if (dev != ign->fb_tunnel_dev)
2064 unregister_netdevice_queue(dev, head);
2067 static size_t ip6gre_get_size(const struct net_device *dev)
2072 /* IFLA_GRE_IFLAGS */
2074 /* IFLA_GRE_OFLAGS */
2080 /* IFLA_GRE_LOCAL */
2081 nla_total_size(sizeof(struct in6_addr)) +
2082 /* IFLA_GRE_REMOTE */
2083 nla_total_size(sizeof(struct in6_addr)) +
2086 /* IFLA_GRE_ENCAP_LIMIT */
2088 /* IFLA_GRE_FLOWINFO */
2090 /* IFLA_GRE_FLAGS */
2092 /* IFLA_GRE_ENCAP_TYPE */
2094 /* IFLA_GRE_ENCAP_FLAGS */
2096 /* IFLA_GRE_ENCAP_SPORT */
2098 /* IFLA_GRE_ENCAP_DPORT */
2100 /* IFLA_GRE_COLLECT_METADATA */
2102 /* IFLA_GRE_FWMARK */
2104 /* IFLA_GRE_ERSPAN_INDEX */
2109 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2111 struct ip6_tnl *t = netdev_priv(dev);
2112 struct __ip6_tnl_parm *p = &t->parms;
2113 __be16 o_flags = p->o_flags;
2115 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2117 o_flags |= TUNNEL_KEY;
2119 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2120 goto nla_put_failure;
2122 if (p->erspan_ver == 1) {
2123 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2124 goto nla_put_failure;
2126 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2127 goto nla_put_failure;
2128 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2129 goto nla_put_failure;
2133 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2134 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2135 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2136 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2137 gre_tnl_flags_to_gre_flags(o_flags)) ||
2138 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2139 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2140 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2141 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
2142 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
2143 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2144 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2145 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2146 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2147 goto nla_put_failure;
2149 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
2151 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2153 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2155 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2157 goto nla_put_failure;
2159 if (p->collect_md) {
2160 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2161 goto nla_put_failure;
2170 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2171 [IFLA_GRE_LINK] = { .type = NLA_U32 },
2172 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2173 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2174 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
2175 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
2176 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) },
2177 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) },
2178 [IFLA_GRE_TTL] = { .type = NLA_U8 },
2179 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2180 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2181 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2182 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2183 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2184 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2185 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2186 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2187 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2188 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2189 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2190 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2191 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2194 static void ip6erspan_tap_setup(struct net_device *dev)
2199 dev->netdev_ops = &ip6erspan_netdev_ops;
2200 dev->needs_free_netdev = true;
2201 dev->priv_destructor = ip6gre_dev_free;
2203 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2204 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2205 netif_keep_dst(dev);
2208 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2209 struct nlattr *tb[], struct nlattr *data[],
2210 struct netlink_ext_ack *extack)
2212 struct ip6_tnl *nt = netdev_priv(dev);
2213 struct net *net = dev_net(dev);
2214 struct ip6gre_net *ign;
2217 ip6gre_netlink_parms(data, &nt->parms);
2218 ip6erspan_set_version(data, &nt->parms);
2219 ign = net_generic(net, ip6gre_net_id);
2221 if (nt->parms.collect_md) {
2222 if (rtnl_dereference(ign->collect_md_tun_erspan))
2225 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2229 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2231 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2232 ip6erspan_tunnel_link_md(ign, nt);
2233 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2238 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2240 ip6gre_tnl_link_config_common(t);
2241 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2244 static int ip6erspan_tnl_change(struct ip6_tnl *t,
2245 const struct __ip6_tnl_parm *p, int set_mtu)
2247 ip6gre_tnl_copy_tnl_parm(t, p);
2248 ip6erspan_tnl_link_config(t, set_mtu);
2252 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2253 struct nlattr *data[],
2254 struct netlink_ext_ack *extack)
2256 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2257 struct __ip6_tnl_parm p;
2260 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2264 ip6erspan_set_version(data, &p);
2265 ip6gre_tunnel_unlink_md(ign, t);
2266 ip6gre_tunnel_unlink(ign, t);
2267 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2268 ip6erspan_tunnel_link_md(ign, t);
2269 ip6gre_tunnel_link(ign, t);
2273 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2275 .maxtype = IFLA_GRE_MAX,
2276 .policy = ip6gre_policy,
2277 .priv_size = sizeof(struct ip6_tnl),
2278 .setup = ip6gre_tunnel_setup,
2279 .validate = ip6gre_tunnel_validate,
2280 .newlink = ip6gre_newlink,
2281 .changelink = ip6gre_changelink,
2282 .dellink = ip6gre_dellink,
2283 .get_size = ip6gre_get_size,
2284 .fill_info = ip6gre_fill_info,
2285 .get_link_net = ip6_tnl_get_link_net,
2288 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2289 .kind = "ip6gretap",
2290 .maxtype = IFLA_GRE_MAX,
2291 .policy = ip6gre_policy,
2292 .priv_size = sizeof(struct ip6_tnl),
2293 .setup = ip6gre_tap_setup,
2294 .validate = ip6gre_tap_validate,
2295 .newlink = ip6gre_newlink,
2296 .changelink = ip6gre_changelink,
2297 .get_size = ip6gre_get_size,
2298 .fill_info = ip6gre_fill_info,
2299 .get_link_net = ip6_tnl_get_link_net,
2302 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2303 .kind = "ip6erspan",
2304 .maxtype = IFLA_GRE_MAX,
2305 .policy = ip6gre_policy,
2306 .priv_size = sizeof(struct ip6_tnl),
2307 .setup = ip6erspan_tap_setup,
2308 .validate = ip6erspan_tap_validate,
2309 .newlink = ip6erspan_newlink,
2310 .changelink = ip6erspan_changelink,
2311 .get_size = ip6gre_get_size,
2312 .fill_info = ip6gre_fill_info,
2313 .get_link_net = ip6_tnl_get_link_net,
2317 * And now the modules code and kernel interface.
2320 static int __init ip6gre_init(void)
2324 pr_info("GRE over IPv6 tunneling driver\n");
2326 err = register_pernet_device(&ip6gre_net_ops);
2330 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2332 pr_info("%s: can't add protocol\n", __func__);
2333 goto add_proto_failed;
2336 err = rtnl_link_register(&ip6gre_link_ops);
2338 goto rtnl_link_failed;
2340 err = rtnl_link_register(&ip6gre_tap_ops);
2342 goto tap_ops_failed;
2344 err = rtnl_link_register(&ip6erspan_tap_ops);
2346 goto erspan_link_failed;
2352 rtnl_link_unregister(&ip6gre_tap_ops);
2354 rtnl_link_unregister(&ip6gre_link_ops);
2356 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2358 unregister_pernet_device(&ip6gre_net_ops);
2362 static void __exit ip6gre_fini(void)
2364 rtnl_link_unregister(&ip6gre_tap_ops);
2365 rtnl_link_unregister(&ip6gre_link_ops);
2366 rtnl_link_unregister(&ip6erspan_tap_ops);
2367 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2368 unregister_pernet_device(&ip6gre_net_ops);
2371 module_init(ip6gre_init);
2372 module_exit(ip6gre_fini);
2373 MODULE_LICENSE("GPL");
2374 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2375 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2376 MODULE_ALIAS_RTNL_LINK("ip6gre");
2377 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2378 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2379 MODULE_ALIAS_NETDEV("ip6gre0");