1 // SPDX-License-Identifier: GPL-2.0-only
3 * VXLAN: Virtual eXtensible Local Area Network
5 * Copyright (c) 2012-2013 Vyatta Inc.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/udp.h>
15 #include <linux/igmp.h>
16 #include <linux/if_ether.h>
17 #include <linux/ethtool.h>
19 #include <net/ndisc.h>
20 #include <net/ipv6_stubs.h>
23 #include <net/rtnetlink.h>
24 #include <net/inet_ecn.h>
25 #include <net/net_namespace.h>
26 #include <net/netns/generic.h>
27 #include <net/tun_proto.h>
28 #include <net/vxlan.h>
30 #if IS_ENABLED(CONFIG_IPV6)
31 #include <net/ip6_tunnel.h>
32 #include <net/ip6_checksum.h>
35 #define VXLAN_VERSION "0.1"
37 #define PORT_HASH_BITS 8
38 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
39 #define FDB_AGE_DEFAULT 300 /* 5 min */
40 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
42 /* UDP port for VXLAN traffic.
43 * The IANA assigned port is 4789, but the Linux default is 8472
44 * for compatibility with early adopters.
46 static unsigned short vxlan_port __read_mostly = 8472;
47 module_param_named(udp_port, vxlan_port, ushort, 0444);
48 MODULE_PARM_DESC(udp_port, "Destination UDP port");
50 static bool log_ecn_error = true;
51 module_param(log_ecn_error, bool, 0644);
52 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
54 static unsigned int vxlan_net_id;
55 static struct rtnl_link_ops vxlan_link_ops;
57 static const u8 all_zeros_mac[ETH_ALEN + 2];
59 static int vxlan_sock_add(struct vxlan_dev *vxlan);
61 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
63 /* per-network namespace private data for this module */
65 struct list_head vxlan_list;
66 struct hlist_head sock_list[PORT_HASH_SIZE];
70 /* Forwarding table entry */
72 struct hlist_node hlist; /* linked list of entries */
74 unsigned long updated; /* jiffies */
76 struct list_head remotes;
77 u8 eth_addr[ETH_ALEN];
78 u16 state; /* see ndm_state */
80 u16 flags; /* see ndm_flags and below */
83 #define NTF_VXLAN_ADDED_BY_USER 0x100
85 /* salt for hash table */
86 static u32 vxlan_salt __read_mostly;
88 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
90 return vs->flags & VXLAN_F_COLLECT_METADATA ||
91 ip_tunnel_collect_metadata();
94 #if IS_ENABLED(CONFIG_IPV6)
96 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
98 if (a->sa.sa_family != b->sa.sa_family)
100 if (a->sa.sa_family == AF_INET6)
101 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
103 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
106 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
108 if (nla_len(nla) >= sizeof(struct in6_addr)) {
109 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
110 ip->sa.sa_family = AF_INET6;
112 } else if (nla_len(nla) >= sizeof(__be32)) {
113 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
114 ip->sa.sa_family = AF_INET;
117 return -EAFNOSUPPORT;
121 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
122 const union vxlan_addr *ip)
124 if (ip->sa.sa_family == AF_INET6)
125 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
127 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
130 #else /* !CONFIG_IPV6 */
133 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
135 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
138 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
140 if (nla_len(nla) >= sizeof(struct in6_addr)) {
141 return -EAFNOSUPPORT;
142 } else if (nla_len(nla) >= sizeof(__be32)) {
143 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
144 ip->sa.sa_family = AF_INET;
147 return -EAFNOSUPPORT;
151 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
152 const union vxlan_addr *ip)
154 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
158 /* Virtual Network hash table head */
159 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
161 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
164 /* Socket hash table head */
165 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
167 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
169 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
172 /* First remote destination for a forwarding entry.
173 * Guaranteed to be non-NULL because remotes are never deleted.
175 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
177 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
180 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
182 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
185 /* Find VXLAN socket based on network namespace, address family and UDP port
186 * and enabled unshareable flags.
188 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
189 __be16 port, u32 flags, int ifindex)
191 struct vxlan_sock *vs;
193 flags &= VXLAN_F_RCV_FLAGS;
195 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
196 if (inet_sk(vs->sock->sk)->inet_sport == port &&
197 vxlan_get_sk_family(vs) == family &&
198 vs->flags == flags &&
199 vs->sock->sk->sk_bound_dev_if == ifindex)
205 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
208 struct vxlan_dev_node *node;
210 /* For flow based devices, map all packets to VNI 0 */
211 if (vs->flags & VXLAN_F_COLLECT_METADATA)
214 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
215 if (node->vxlan->default_dst.remote_vni != vni)
218 if (IS_ENABLED(CONFIG_IPV6)) {
219 const struct vxlan_config *cfg = &node->vxlan->cfg;
221 if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
222 cfg->remote_ifindex != ifindex)
232 /* Look up VNI in a per net namespace table */
233 static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
234 __be32 vni, sa_family_t family,
235 __be16 port, u32 flags)
237 struct vxlan_sock *vs;
239 vs = vxlan_find_sock(net, family, port, flags, ifindex);
243 return vxlan_vs_find_vni(vs, ifindex, vni);
246 /* Fill in neighbour message in skbuff. */
247 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
248 const struct vxlan_fdb *fdb,
249 u32 portid, u32 seq, int type, unsigned int flags,
250 const struct vxlan_rdst *rdst)
252 unsigned long now = jiffies;
253 struct nda_cacheinfo ci;
254 struct nlmsghdr *nlh;
256 bool send_ip, send_eth;
258 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
262 ndm = nlmsg_data(nlh);
263 memset(ndm, 0, sizeof(*ndm));
265 send_eth = send_ip = true;
267 if (type == RTM_GETNEIGH) {
268 send_ip = !vxlan_addr_any(&rdst->remote_ip);
269 send_eth = !is_zero_ether_addr(fdb->eth_addr);
270 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
272 ndm->ndm_family = AF_BRIDGE;
273 ndm->ndm_state = fdb->state;
274 ndm->ndm_ifindex = vxlan->dev->ifindex;
275 ndm->ndm_flags = fdb->flags;
277 ndm->ndm_flags |= NTF_OFFLOADED;
278 ndm->ndm_type = RTN_UNICAST;
280 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
281 nla_put_s32(skb, NDA_LINK_NETNSID,
282 peernet2id(dev_net(vxlan->dev), vxlan->net)))
283 goto nla_put_failure;
285 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
286 goto nla_put_failure;
288 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
289 goto nla_put_failure;
291 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
292 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
293 goto nla_put_failure;
294 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
295 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
296 goto nla_put_failure;
297 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
298 nla_put_u32(skb, NDA_SRC_VNI,
299 be32_to_cpu(fdb->vni)))
300 goto nla_put_failure;
301 if (rdst->remote_ifindex &&
302 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
303 goto nla_put_failure;
305 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
306 ci.ndm_confirmed = 0;
307 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
310 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
311 goto nla_put_failure;
317 nlmsg_cancel(skb, nlh);
321 static inline size_t vxlan_nlmsg_size(void)
323 return NLMSG_ALIGN(sizeof(struct ndmsg))
324 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
325 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
326 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
327 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
328 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
329 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
330 + nla_total_size(sizeof(struct nda_cacheinfo));
333 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
334 struct vxlan_rdst *rd, int type)
336 struct net *net = dev_net(vxlan->dev);
340 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
344 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
346 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
347 WARN_ON(err == -EMSGSIZE);
352 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
356 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
359 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
360 const struct vxlan_fdb *fdb,
361 const struct vxlan_rdst *rd,
362 struct netlink_ext_ack *extack,
363 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
365 fdb_info->info.dev = vxlan->dev;
366 fdb_info->info.extack = extack;
367 fdb_info->remote_ip = rd->remote_ip;
368 fdb_info->remote_port = rd->remote_port;
369 fdb_info->remote_vni = rd->remote_vni;
370 fdb_info->remote_ifindex = rd->remote_ifindex;
371 memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN);
372 fdb_info->vni = fdb->vni;
373 fdb_info->offloaded = rd->offloaded;
374 fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
377 static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
378 struct vxlan_fdb *fdb,
379 struct vxlan_rdst *rd,
381 struct netlink_ext_ack *extack)
383 struct switchdev_notifier_vxlan_fdb_info info;
384 enum switchdev_notifier_type notifier_type;
390 notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
391 : SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
392 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info);
393 ret = call_switchdev_notifiers(notifier_type, vxlan->dev,
395 return notifier_to_errno(ret);
398 static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
399 struct vxlan_rdst *rd, int type, bool swdev_notify,
400 struct netlink_ext_ack *extack)
407 err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
413 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
419 __vxlan_fdb_notify(vxlan, fdb, rd, type);
423 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
425 struct vxlan_dev *vxlan = netdev_priv(dev);
426 struct vxlan_fdb f = {
429 struct vxlan_rdst remote = {
430 .remote_ip = *ipa, /* goes to NDA_DST */
431 .remote_vni = cpu_to_be32(VXLAN_N_VID),
434 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
437 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
439 struct vxlan_fdb f = {
442 struct vxlan_rdst remote = { };
444 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
446 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
449 /* Hash Ethernet address */
450 static u32 eth_hash(const unsigned char *addr)
452 u64 value = get_unaligned((u64 *)addr);
454 /* only want 6 bytes */
460 return hash_64(value, FDB_HASH_BITS);
463 static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
465 /* use 1 byte of OUI and 3 bytes of NIC */
466 u32 key = get_unaligned((u32 *)(addr + 2));
468 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
471 /* Hash chain to use given mac address */
472 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
473 const u8 *mac, __be32 vni)
475 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
476 return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
478 return &vxlan->fdb_head[eth_hash(mac)];
481 /* Look up Ethernet address in forwarding table */
482 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
483 const u8 *mac, __be32 vni)
485 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
488 hlist_for_each_entry_rcu(f, head, hlist) {
489 if (ether_addr_equal(mac, f->eth_addr)) {
490 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
502 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
503 const u8 *mac, __be32 vni)
507 f = __vxlan_find_mac(vxlan, mac, vni);
508 if (f && f->used != jiffies)
514 /* caller should hold vxlan->hash_lock */
515 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
516 union vxlan_addr *ip, __be16 port,
517 __be32 vni, __u32 ifindex)
519 struct vxlan_rdst *rd;
521 list_for_each_entry(rd, &f->remotes, list) {
522 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
523 rd->remote_port == port &&
524 rd->remote_vni == vni &&
525 rd->remote_ifindex == ifindex)
532 int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
533 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
535 struct vxlan_dev *vxlan = netdev_priv(dev);
536 u8 eth_addr[ETH_ALEN + 2] = { 0 };
537 struct vxlan_rdst *rdst;
541 if (is_multicast_ether_addr(mac) ||
542 is_zero_ether_addr(mac))
545 ether_addr_copy(eth_addr, mac);
549 f = __vxlan_find_mac(vxlan, eth_addr, vni);
555 rdst = first_remote_rcu(f);
556 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info);
562 EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
564 static int vxlan_fdb_notify_one(struct notifier_block *nb,
565 const struct vxlan_dev *vxlan,
566 const struct vxlan_fdb *f,
567 const struct vxlan_rdst *rdst,
568 struct netlink_ext_ack *extack)
570 struct switchdev_notifier_vxlan_fdb_info fdb_info;
573 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info);
574 rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
576 return notifier_to_errno(rc);
579 int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
580 struct notifier_block *nb,
581 struct netlink_ext_ack *extack)
583 struct vxlan_dev *vxlan;
584 struct vxlan_rdst *rdst;
589 if (!netif_is_vxlan(dev))
591 vxlan = netdev_priv(dev);
593 spin_lock_bh(&vxlan->hash_lock);
594 for (h = 0; h < FDB_HASH_SIZE; ++h) {
595 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
597 list_for_each_entry(rdst, &f->remotes, list) {
598 rc = vxlan_fdb_notify_one(nb, vxlan,
609 spin_unlock_bh(&vxlan->hash_lock);
612 EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
614 void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
616 struct vxlan_dev *vxlan;
617 struct vxlan_rdst *rdst;
621 if (!netif_is_vxlan(dev))
623 vxlan = netdev_priv(dev);
625 spin_lock_bh(&vxlan->hash_lock);
626 for (h = 0; h < FDB_HASH_SIZE; ++h) {
627 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
629 list_for_each_entry(rdst, &f->remotes, list)
630 rdst->offloaded = false;
632 spin_unlock_bh(&vxlan->hash_lock);
634 EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
636 /* Replace destination of unicast mac */
637 static int vxlan_fdb_replace(struct vxlan_fdb *f,
638 union vxlan_addr *ip, __be16 port, __be32 vni,
639 __u32 ifindex, struct vxlan_rdst *oldrd)
641 struct vxlan_rdst *rd;
643 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
647 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
652 dst_cache_reset(&rd->dst_cache);
654 rd->remote_port = port;
655 rd->remote_vni = vni;
656 rd->remote_ifindex = ifindex;
657 rd->offloaded = false;
661 /* Add/update destinations for multicast */
662 static int vxlan_fdb_append(struct vxlan_fdb *f,
663 union vxlan_addr *ip, __be16 port, __be32 vni,
664 __u32 ifindex, struct vxlan_rdst **rdp)
666 struct vxlan_rdst *rd;
668 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
672 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
676 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
682 rd->remote_port = port;
683 rd->offloaded = false;
684 rd->remote_vni = vni;
685 rd->remote_ifindex = ifindex;
687 list_add_tail_rcu(&rd->list, &f->remotes);
693 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
695 struct vxlanhdr *vh, size_t hdrlen,
697 struct gro_remcsum *grc,
700 size_t start, offset;
702 if (skb->remcsum_offload)
705 if (!NAPI_GRO_CB(skb)->csum_valid)
708 start = vxlan_rco_start(vni_field);
709 offset = start + vxlan_rco_offset(vni_field);
711 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
712 start, offset, grc, nopartial);
714 skb->remcsum_offload = 1;
719 static struct sk_buff *vxlan_gro_receive(struct sock *sk,
720 struct list_head *head,
723 struct sk_buff *pp = NULL;
725 struct vxlanhdr *vh, *vh2;
726 unsigned int hlen, off_vx;
728 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
730 struct gro_remcsum grc;
732 skb_gro_remcsum_init(&grc);
734 off_vx = skb_gro_offset(skb);
735 hlen = off_vx + sizeof(*vh);
736 vh = skb_gro_header_fast(skb, off_vx);
737 if (skb_gro_header_hard(skb, hlen)) {
738 vh = skb_gro_header_slow(skb, hlen, off_vx);
743 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
745 flags = vh->vx_flags;
747 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
748 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
751 VXLAN_F_REMCSUM_NOPARTIAL));
757 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
759 list_for_each_entry(p, head, list) {
760 if (!NAPI_GRO_CB(p)->same_flow)
763 vh2 = (struct vxlanhdr *)(p->data + off_vx);
764 if (vh->vx_flags != vh2->vx_flags ||
765 vh->vx_vni != vh2->vx_vni) {
766 NAPI_GRO_CB(p)->same_flow = 0;
771 pp = call_gro_receive(eth_gro_receive, head, skb);
775 skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
780 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
782 /* Sets 'skb->inner_mac_header' since we are always called with
783 * 'skb->encapsulation' set.
785 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
788 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
789 const u8 *mac, __u16 state,
790 __be32 src_vni, __u16 ndm_flags)
794 f = kmalloc(sizeof(*f), GFP_ATOMIC);
798 f->flags = ndm_flags;
799 f->updated = f->used = jiffies;
801 INIT_LIST_HEAD(&f->remotes);
802 memcpy(f->eth_addr, mac, ETH_ALEN);
807 static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
808 __be32 src_vni, struct vxlan_fdb *f)
811 hlist_add_head_rcu(&f->hlist,
812 vxlan_fdb_head(vxlan, mac, src_vni));
815 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
816 const u8 *mac, union vxlan_addr *ip,
817 __u16 state, __be16 port, __be32 src_vni,
818 __be32 vni, __u32 ifindex, __u16 ndm_flags,
819 struct vxlan_fdb **fdb)
821 struct vxlan_rdst *rd = NULL;
825 if (vxlan->cfg.addrmax &&
826 vxlan->addrcnt >= vxlan->cfg.addrmax)
829 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
830 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
834 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
845 static void __vxlan_fdb_free(struct vxlan_fdb *f)
847 struct vxlan_rdst *rd, *nd;
849 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
850 dst_cache_destroy(&rd->dst_cache);
856 static void vxlan_fdb_free(struct rcu_head *head)
858 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
863 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
864 bool do_notify, bool swdev_notify)
866 struct vxlan_rdst *rd;
868 netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
872 list_for_each_entry(rd, &f->remotes, list)
873 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
876 hlist_del_rcu(&f->hlist);
877 call_rcu(&f->rcu, vxlan_fdb_free);
880 static void vxlan_dst_free(struct rcu_head *head)
882 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
884 dst_cache_destroy(&rd->dst_cache);
888 static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
889 union vxlan_addr *ip,
890 __u16 state, __u16 flags,
891 __be16 port, __be32 vni,
892 __u32 ifindex, __u16 ndm_flags,
895 struct netlink_ext_ack *extack)
897 __u16 fdb_flags = (ndm_flags & ~NTF_USE);
898 struct vxlan_rdst *rd = NULL;
899 struct vxlan_rdst oldrd;
904 /* Do not allow an externally learned entry to take over an entry added
907 if (!(fdb_flags & NTF_EXT_LEARNED) ||
908 !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
909 if (f->state != state) {
911 f->updated = jiffies;
914 if (f->flags != fdb_flags) {
915 f->flags = fdb_flags;
916 f->updated = jiffies;
921 if ((flags & NLM_F_REPLACE)) {
922 /* Only change unicasts */
923 if (!(is_multicast_ether_addr(f->eth_addr) ||
924 is_zero_ether_addr(f->eth_addr))) {
925 rc = vxlan_fdb_replace(f, ip, port, vni,
932 if ((flags & NLM_F_APPEND) &&
933 (is_multicast_ether_addr(f->eth_addr) ||
934 is_zero_ether_addr(f->eth_addr))) {
935 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
942 if (ndm_flags & NTF_USE)
947 rd = first_remote_rtnl(f);
949 err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH,
950 swdev_notify, extack);
958 if ((flags & NLM_F_REPLACE) && rc)
960 else if ((flags & NLM_F_APPEND) && rc) {
961 list_del_rcu(&rd->list);
962 call_rcu(&rd->rcu, vxlan_dst_free);
967 static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
968 const u8 *mac, union vxlan_addr *ip,
969 __u16 state, __u16 flags,
970 __be16 port, __be32 src_vni, __be32 vni,
971 __u32 ifindex, __u16 ndm_flags,
973 struct netlink_ext_ack *extack)
975 __u16 fdb_flags = (ndm_flags & ~NTF_USE);
979 /* Disallow replace to add a multicast entry */
980 if ((flags & NLM_F_REPLACE) &&
981 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
984 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
985 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
986 vni, ifindex, fdb_flags, &f);
990 vxlan_fdb_insert(vxlan, mac, src_vni, f);
991 rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
992 swdev_notify, extack);
999 vxlan_fdb_destroy(vxlan, f, false, false);
1003 /* Add new entry to forwarding table -- assumes lock held */
1004 static int vxlan_fdb_update(struct vxlan_dev *vxlan,
1005 const u8 *mac, union vxlan_addr *ip,
1006 __u16 state, __u16 flags,
1007 __be16 port, __be32 src_vni, __be32 vni,
1008 __u32 ifindex, __u16 ndm_flags,
1010 struct netlink_ext_ack *extack)
1012 struct vxlan_fdb *f;
1014 f = __vxlan_find_mac(vxlan, mac, src_vni);
1016 if (flags & NLM_F_EXCL) {
1017 netdev_dbg(vxlan->dev,
1018 "lost race to create %pM\n", mac);
1022 return vxlan_fdb_update_existing(vxlan, ip, state, flags, port,
1023 vni, ifindex, ndm_flags, f,
1024 swdev_notify, extack);
1026 if (!(flags & NLM_F_CREATE))
1029 return vxlan_fdb_update_create(vxlan, mac, ip, state, flags,
1030 port, src_vni, vni, ifindex,
1031 ndm_flags, swdev_notify, extack);
1035 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
1036 struct vxlan_rdst *rd, bool swdev_notify)
1038 list_del_rcu(&rd->list);
1039 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL);
1040 call_rcu(&rd->rcu, vxlan_dst_free);
1043 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
1044 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
1045 __be32 *vni, u32 *ifindex)
1047 struct net *net = dev_net(vxlan->dev);
1051 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
1055 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
1056 if (remote->sa.sa_family == AF_INET) {
1057 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
1058 ip->sa.sa_family = AF_INET;
1059 #if IS_ENABLED(CONFIG_IPV6)
1061 ip->sin6.sin6_addr = in6addr_any;
1062 ip->sa.sa_family = AF_INET6;
1068 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
1070 *port = nla_get_be16(tb[NDA_PORT]);
1072 *port = vxlan->cfg.dst_port;
1076 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
1078 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
1080 *vni = vxlan->default_dst.remote_vni;
1083 if (tb[NDA_SRC_VNI]) {
1084 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
1086 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
1088 *src_vni = vxlan->default_dst.remote_vni;
1091 if (tb[NDA_IFINDEX]) {
1092 struct net_device *tdev;
1094 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
1096 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
1097 tdev = __dev_get_by_index(net, *ifindex);
1099 return -EADDRNOTAVAIL;
1107 /* Add static entry (via netlink) */
1108 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1109 struct net_device *dev,
1110 const unsigned char *addr, u16 vid, u16 flags,
1111 struct netlink_ext_ack *extack)
1113 struct vxlan_dev *vxlan = netdev_priv(dev);
1114 /* struct net *net = dev_net(vxlan->dev); */
1115 union vxlan_addr ip;
1117 __be32 src_vni, vni;
1121 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
1122 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
1127 if (tb[NDA_DST] == NULL)
1130 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
1134 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
1135 return -EAFNOSUPPORT;
1137 spin_lock_bh(&vxlan->hash_lock);
1138 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
1139 port, src_vni, vni, ifindex,
1140 ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
1142 spin_unlock_bh(&vxlan->hash_lock);
1147 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
1148 const unsigned char *addr, union vxlan_addr ip,
1149 __be16 port, __be32 src_vni, __be32 vni,
1150 u32 ifindex, bool swdev_notify)
1152 struct vxlan_fdb *f;
1153 struct vxlan_rdst *rd = NULL;
1156 f = vxlan_find_mac(vxlan, addr, src_vni);
1160 if (!vxlan_addr_any(&ip)) {
1161 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
1166 /* remove a destination if it's not the only one on the list,
1167 * otherwise destroy the fdb entry
1169 if (rd && !list_is_singular(&f->remotes)) {
1170 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify);
1174 vxlan_fdb_destroy(vxlan, f, true, swdev_notify);
1180 /* Delete entry (via netlink) */
1181 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1182 struct net_device *dev,
1183 const unsigned char *addr, u16 vid)
1185 struct vxlan_dev *vxlan = netdev_priv(dev);
1186 union vxlan_addr ip;
1187 __be32 src_vni, vni;
1192 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
1196 spin_lock_bh(&vxlan->hash_lock);
1197 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
1199 spin_unlock_bh(&vxlan->hash_lock);
1204 /* Dump forwarding table */
1205 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
1206 struct net_device *dev,
1207 struct net_device *filter_dev, int *idx)
1209 struct vxlan_dev *vxlan = netdev_priv(dev);
1213 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1214 struct vxlan_fdb *f;
1216 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
1217 struct vxlan_rdst *rd;
1219 list_for_each_entry_rcu(rd, &f->remotes, list) {
1220 if (*idx < cb->args[2])
1223 err = vxlan_fdb_info(skb, vxlan, f,
1224 NETLINK_CB(cb->skb).portid,
1239 static int vxlan_fdb_get(struct sk_buff *skb,
1240 struct nlattr *tb[],
1241 struct net_device *dev,
1242 const unsigned char *addr,
1243 u16 vid, u32 portid, u32 seq,
1244 struct netlink_ext_ack *extack)
1246 struct vxlan_dev *vxlan = netdev_priv(dev);
1247 struct vxlan_fdb *f;
1252 vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
1254 vni = vxlan->default_dst.remote_vni;
1258 f = __vxlan_find_mac(vxlan, addr, vni);
1260 NL_SET_ERR_MSG(extack, "Fdb entry not found");
1265 err = vxlan_fdb_info(skb, vxlan, f, portid, seq,
1266 RTM_NEWNEIGH, 0, first_remote_rcu(f));
1272 /* Watch incoming packets to learn mapping between Ethernet address
1273 * and Tunnel endpoint.
1274 * Return true if packet is bogus and should be dropped.
1276 static bool vxlan_snoop(struct net_device *dev,
1277 union vxlan_addr *src_ip, const u8 *src_mac,
1278 u32 src_ifindex, __be32 vni)
1280 struct vxlan_dev *vxlan = netdev_priv(dev);
1281 struct vxlan_fdb *f;
1284 #if IS_ENABLED(CONFIG_IPV6)
1285 if (src_ip->sa.sa_family == AF_INET6 &&
1286 (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
1287 ifindex = src_ifindex;
1290 f = vxlan_find_mac(vxlan, src_mac, vni);
1292 struct vxlan_rdst *rdst = first_remote_rcu(f);
1294 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
1295 rdst->remote_ifindex == ifindex))
1298 /* Don't migrate static entries, drop packets */
1299 if (f->state & (NUD_PERMANENT | NUD_NOARP))
1302 if (net_ratelimit())
1304 "%pM migrated from %pIS to %pIS\n",
1305 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1307 rdst->remote_ip = *src_ip;
1308 f->updated = jiffies;
1309 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
1311 /* learned new entry */
1312 spin_lock(&vxlan->hash_lock);
1314 /* close off race between vxlan_flush and incoming packets */
1315 if (netif_running(dev))
1316 vxlan_fdb_update(vxlan, src_mac, src_ip,
1318 NLM_F_EXCL|NLM_F_CREATE,
1319 vxlan->cfg.dst_port,
1321 vxlan->default_dst.remote_vni,
1322 ifindex, NTF_SELF, true, NULL);
1323 spin_unlock(&vxlan->hash_lock);
1329 /* See if multicast group is already in use by other ID */
1330 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1332 struct vxlan_dev *vxlan;
1333 struct vxlan_sock *sock4;
1334 #if IS_ENABLED(CONFIG_IPV6)
1335 struct vxlan_sock *sock6;
1337 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1339 sock4 = rtnl_dereference(dev->vn4_sock);
1341 /* The vxlan_sock is only used by dev, leaving group has
1342 * no effect on other vxlan devices.
1344 if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
1346 #if IS_ENABLED(CONFIG_IPV6)
1347 sock6 = rtnl_dereference(dev->vn6_sock);
1348 if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
1352 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1353 if (!netif_running(vxlan->dev) || vxlan == dev)
1356 if (family == AF_INET &&
1357 rtnl_dereference(vxlan->vn4_sock) != sock4)
1359 #if IS_ENABLED(CONFIG_IPV6)
1360 if (family == AF_INET6 &&
1361 rtnl_dereference(vxlan->vn6_sock) != sock6)
1365 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1366 &dev->default_dst.remote_ip))
1369 if (vxlan->default_dst.remote_ifindex !=
1370 dev->default_dst.remote_ifindex)
1379 static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1381 struct vxlan_net *vn;
1385 if (!refcount_dec_and_test(&vs->refcnt))
1388 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1389 spin_lock(&vn->sock_lock);
1390 hlist_del_rcu(&vs->hlist);
1391 udp_tunnel_notify_del_rx_port(vs->sock,
1392 (vs->flags & VXLAN_F_GPE) ?
1393 UDP_TUNNEL_TYPE_VXLAN_GPE :
1394 UDP_TUNNEL_TYPE_VXLAN);
1395 spin_unlock(&vn->sock_lock);
1400 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1402 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1403 #if IS_ENABLED(CONFIG_IPV6)
1404 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1406 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
1409 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
1412 vxlan_vs_del_dev(vxlan);
1414 if (__vxlan_sock_release_prep(sock4)) {
1415 udp_tunnel_sock_release(sock4->sock);
1419 #if IS_ENABLED(CONFIG_IPV6)
1420 if (__vxlan_sock_release_prep(sock6)) {
1421 udp_tunnel_sock_release(sock6->sock);
1427 /* Update multicast group membership when first VNI on
1428 * multicast address is brought up
1430 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1433 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1434 int ifindex = vxlan->default_dst.remote_ifindex;
1437 if (ip->sa.sa_family == AF_INET) {
1438 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1439 struct ip_mreqn mreq = {
1440 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1441 .imr_ifindex = ifindex,
1444 sk = sock4->sock->sk;
1446 ret = ip_mc_join_group(sk, &mreq);
1448 #if IS_ENABLED(CONFIG_IPV6)
1450 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1452 sk = sock6->sock->sk;
1454 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1455 &ip->sin6.sin6_addr);
1463 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1464 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1467 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1468 int ifindex = vxlan->default_dst.remote_ifindex;
1471 if (ip->sa.sa_family == AF_INET) {
1472 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1473 struct ip_mreqn mreq = {
1474 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1475 .imr_ifindex = ifindex,
1478 sk = sock4->sock->sk;
1480 ret = ip_mc_leave_group(sk, &mreq);
1482 #if IS_ENABLED(CONFIG_IPV6)
1484 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1486 sk = sock6->sock->sk;
1488 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1489 &ip->sin6.sin6_addr);
1497 static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1498 struct sk_buff *skb, u32 vxflags)
1500 size_t start, offset;
1502 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1505 start = vxlan_rco_start(unparsed->vx_vni);
1506 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1508 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1511 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1512 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1514 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1515 unparsed->vx_vni &= VXLAN_VNI_MASK;
1519 static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1520 struct sk_buff *skb, u32 vxflags,
1521 struct vxlan_metadata *md)
1523 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1524 struct metadata_dst *tun_dst;
1526 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1529 md->gbp = ntohs(gbp->policy_id);
1531 tun_dst = (struct metadata_dst *)skb_dst(skb);
1533 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1534 tun_dst->u.tun_info.options_len = sizeof(*md);
1536 if (gbp->dont_learn)
1537 md->gbp |= VXLAN_GBP_DONT_LEARN;
1539 if (gbp->policy_applied)
1540 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1542 /* In flow-based mode, GBP is carried in dst_metadata */
1543 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1544 skb->mark = md->gbp;
1546 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1549 static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1551 struct sk_buff *skb, u32 vxflags)
1553 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1555 /* Need to have Next Protocol set for interfaces in GPE mode. */
1556 if (!gpe->np_applied)
1558 /* "The initial version is 0. If a receiver does not support the
1559 * version indicated it MUST drop the packet.
1561 if (gpe->version != 0)
1563 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1564 * processing MUST occur." However, we don't implement OAM
1565 * processing, thus drop the packet.
1570 *protocol = tun_p_to_eth_p(gpe->next_protocol);
1574 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1578 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1579 struct vxlan_sock *vs,
1580 struct sk_buff *skb, __be32 vni)
1582 union vxlan_addr saddr;
1583 u32 ifindex = skb->dev->ifindex;
1585 skb_reset_mac_header(skb);
1586 skb->protocol = eth_type_trans(skb, vxlan->dev);
1587 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1589 /* Ignore packet loops (and multicast echo) */
1590 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1593 /* Get address from the outer IP header */
1594 if (vxlan_get_sk_family(vs) == AF_INET) {
1595 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1596 saddr.sa.sa_family = AF_INET;
1597 #if IS_ENABLED(CONFIG_IPV6)
1599 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1600 saddr.sa.sa_family = AF_INET6;
1604 if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
1605 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
1611 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1612 struct sk_buff *skb)
1616 if (vxlan_get_sk_family(vs) == AF_INET)
1617 err = IP_ECN_decapsulate(oiph, skb);
1618 #if IS_ENABLED(CONFIG_IPV6)
1620 err = IP6_ECN_decapsulate(oiph, skb);
1623 if (unlikely(err) && log_ecn_error) {
1624 if (vxlan_get_sk_family(vs) == AF_INET)
1625 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1626 &((struct iphdr *)oiph)->saddr,
1627 ((struct iphdr *)oiph)->tos);
1629 net_info_ratelimited("non-ECT from %pI6\n",
1630 &((struct ipv6hdr *)oiph)->saddr);
1635 /* Callback from net/ipv4/udp.c to receive packets */
1636 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1638 struct pcpu_sw_netstats *stats;
1639 struct vxlan_dev *vxlan;
1640 struct vxlan_sock *vs;
1641 struct vxlanhdr unparsed;
1642 struct vxlan_metadata _md;
1643 struct vxlan_metadata *md = &_md;
1644 __be16 protocol = htons(ETH_P_TEB);
1645 bool raw_proto = false;
1649 /* Need UDP and VXLAN header to be present */
1650 if (!pskb_may_pull(skb, VXLAN_HLEN))
1653 unparsed = *vxlan_hdr(skb);
1654 /* VNI flag always required to be set */
1655 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1656 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1657 ntohl(vxlan_hdr(skb)->vx_flags),
1658 ntohl(vxlan_hdr(skb)->vx_vni));
1659 /* Return non vxlan pkt */
1662 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1663 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1665 vs = rcu_dereference_sk_user_data(sk);
1669 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1671 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
1675 /* For backwards compatibility, only allow reserved fields to be
1676 * used by VXLAN extensions if explicitly requested.
1678 if (vs->flags & VXLAN_F_GPE) {
1679 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1684 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1685 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1688 if (vxlan_collect_metadata(vs)) {
1689 struct metadata_dst *tun_dst;
1691 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1692 key32_to_tunnel_id(vni), sizeof(*md));
1697 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1699 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1701 memset(md, 0, sizeof(*md));
1704 if (vs->flags & VXLAN_F_REMCSUM_RX)
1705 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1707 if (vs->flags & VXLAN_F_GBP)
1708 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1709 /* Note that GBP and GPE can never be active together. This is
1710 * ensured in vxlan_dev_configure.
1713 if (unparsed.vx_flags || unparsed.vx_vni) {
1714 /* If there are any unprocessed flags remaining treat
1715 * this as a malformed packet. This behavior diverges from
1716 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1717 * in reserved fields are to be ignored. The approach here
1718 * maintains compatibility with previous stack code, and also
1719 * is more robust and provides a little more security in
1720 * adding extensions to VXLAN.
1726 if (!vxlan_set_mac(vxlan, vs, skb, vni))
1729 skb_reset_mac_header(skb);
1730 skb->dev = vxlan->dev;
1731 skb->pkt_type = PACKET_HOST;
1734 oiph = skb_network_header(skb);
1735 skb_reset_network_header(skb);
1737 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1738 ++vxlan->dev->stats.rx_frame_errors;
1739 ++vxlan->dev->stats.rx_errors;
1745 if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
1747 atomic_long_inc(&vxlan->dev->rx_dropped);
1751 stats = this_cpu_ptr(vxlan->dev->tstats);
1752 u64_stats_update_begin(&stats->syncp);
1753 stats->rx_packets++;
1754 stats->rx_bytes += skb->len;
1755 u64_stats_update_end(&stats->syncp);
1757 gro_cells_receive(&vxlan->gro_cells, skb);
1764 /* Consume bad packet */
1769 /* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
1770 static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
1772 struct vxlan_dev *vxlan;
1773 struct vxlan_sock *vs;
1774 struct vxlanhdr *hdr;
1777 if (!pskb_may_pull(skb, skb_transport_offset(skb) + VXLAN_HLEN))
1780 hdr = vxlan_hdr(skb);
1782 if (!(hdr->vx_flags & VXLAN_HF_VNI))
1785 vs = rcu_dereference_sk_user_data(sk);
1789 vni = vxlan_vni(hdr->vx_vni);
1790 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
1797 static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1799 struct vxlan_dev *vxlan = netdev_priv(dev);
1800 struct arphdr *parp;
1803 struct neighbour *n;
1805 if (dev->flags & IFF_NOARP)
1808 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1809 dev->stats.tx_dropped++;
1812 parp = arp_hdr(skb);
1814 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1815 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1816 parp->ar_pro != htons(ETH_P_IP) ||
1817 parp->ar_op != htons(ARPOP_REQUEST) ||
1818 parp->ar_hln != dev->addr_len ||
1821 arpptr = (u8 *)parp + sizeof(struct arphdr);
1823 arpptr += dev->addr_len; /* sha */
1824 memcpy(&sip, arpptr, sizeof(sip));
1825 arpptr += sizeof(sip);
1826 arpptr += dev->addr_len; /* tha */
1827 memcpy(&tip, arpptr, sizeof(tip));
1829 if (ipv4_is_loopback(tip) ||
1830 ipv4_is_multicast(tip))
1833 n = neigh_lookup(&arp_tbl, &tip, dev);
1836 struct vxlan_fdb *f;
1837 struct sk_buff *reply;
1839 if (!(n->nud_state & NUD_CONNECTED)) {
1844 f = vxlan_find_mac(vxlan, n->ha, vni);
1845 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1846 /* bridge-local neighbor */
1851 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1859 skb_reset_mac_header(reply);
1860 __skb_pull(reply, skb_network_offset(reply));
1861 reply->ip_summed = CHECKSUM_UNNECESSARY;
1862 reply->pkt_type = PACKET_HOST;
1864 if (netif_rx_ni(reply) == NET_RX_DROP)
1865 dev->stats.rx_dropped++;
1866 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1867 union vxlan_addr ipa = {
1868 .sin.sin_addr.s_addr = tip,
1869 .sin.sin_family = AF_INET,
1872 vxlan_ip_miss(dev, &ipa);
1876 return NETDEV_TX_OK;
1879 #if IS_ENABLED(CONFIG_IPV6)
1880 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1881 struct neighbour *n, bool isrouter)
1883 struct net_device *dev = request->dev;
1884 struct sk_buff *reply;
1885 struct nd_msg *ns, *na;
1886 struct ipv6hdr *pip6;
1888 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1892 if (dev == NULL || !pskb_may_pull(request, request->len))
1895 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1896 sizeof(*na) + na_olen + dev->needed_tailroom;
1897 reply = alloc_skb(len, GFP_ATOMIC);
1901 reply->protocol = htons(ETH_P_IPV6);
1903 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1904 skb_push(reply, sizeof(struct ethhdr));
1905 skb_reset_mac_header(reply);
1907 ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
1909 daddr = eth_hdr(request)->h_source;
1910 ns_olen = request->len - skb_network_offset(request) -
1911 sizeof(struct ipv6hdr) - sizeof(*ns);
1912 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1913 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1914 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1919 /* Ethernet header */
1920 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1921 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1922 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1923 reply->protocol = htons(ETH_P_IPV6);
1925 skb_pull(reply, sizeof(struct ethhdr));
1926 skb_reset_network_header(reply);
1927 skb_put(reply, sizeof(struct ipv6hdr));
1931 pip6 = ipv6_hdr(reply);
1932 memset(pip6, 0, sizeof(struct ipv6hdr));
1934 pip6->priority = ipv6_hdr(request)->priority;
1935 pip6->nexthdr = IPPROTO_ICMPV6;
1936 pip6->hop_limit = 255;
1937 pip6->daddr = ipv6_hdr(request)->saddr;
1938 pip6->saddr = *(struct in6_addr *)n->primary_key;
1940 skb_pull(reply, sizeof(struct ipv6hdr));
1941 skb_reset_transport_header(reply);
1943 /* Neighbor Advertisement */
1944 na = skb_put_zero(reply, sizeof(*na) + na_olen);
1945 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1946 na->icmph.icmp6_router = isrouter;
1947 na->icmph.icmp6_override = 1;
1948 na->icmph.icmp6_solicited = 1;
1949 na->target = ns->target;
1950 ether_addr_copy(&na->opt[2], n->ha);
1951 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1952 na->opt[1] = na_olen >> 3;
1954 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1955 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1956 csum_partial(na, sizeof(*na)+na_olen, 0));
1958 pip6->payload_len = htons(sizeof(*na)+na_olen);
1960 skb_push(reply, sizeof(struct ipv6hdr));
1962 reply->ip_summed = CHECKSUM_UNNECESSARY;
1967 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1969 struct vxlan_dev *vxlan = netdev_priv(dev);
1970 const struct in6_addr *daddr;
1971 const struct ipv6hdr *iphdr;
1972 struct inet6_dev *in6_dev;
1973 struct neighbour *n;
1976 in6_dev = __in6_dev_get(dev);
1980 iphdr = ipv6_hdr(skb);
1981 daddr = &iphdr->daddr;
1982 msg = (struct nd_msg *)(iphdr + 1);
1984 if (ipv6_addr_loopback(daddr) ||
1985 ipv6_addr_is_multicast(&msg->target))
1988 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1991 struct vxlan_fdb *f;
1992 struct sk_buff *reply;
1994 if (!(n->nud_state & NUD_CONNECTED)) {
1999 f = vxlan_find_mac(vxlan, n->ha, vni);
2000 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
2001 /* bridge-local neighbor */
2006 reply = vxlan_na_create(skb, n,
2007 !!(f ? f->flags & NTF_ROUTER : 0));
2014 if (netif_rx_ni(reply) == NET_RX_DROP)
2015 dev->stats.rx_dropped++;
2017 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
2018 union vxlan_addr ipa = {
2019 .sin6.sin6_addr = msg->target,
2020 .sin6.sin6_family = AF_INET6,
2023 vxlan_ip_miss(dev, &ipa);
2028 return NETDEV_TX_OK;
2032 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
2034 struct vxlan_dev *vxlan = netdev_priv(dev);
2035 struct neighbour *n;
2037 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
2041 switch (ntohs(eth_hdr(skb)->h_proto)) {
2046 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
2049 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
2050 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
2051 union vxlan_addr ipa = {
2052 .sin.sin_addr.s_addr = pip->daddr,
2053 .sin.sin_family = AF_INET,
2056 vxlan_ip_miss(dev, &ipa);
2062 #if IS_ENABLED(CONFIG_IPV6)
2065 struct ipv6hdr *pip6;
2067 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
2069 pip6 = ipv6_hdr(skb);
2070 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
2071 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
2072 union vxlan_addr ipa = {
2073 .sin6.sin6_addr = pip6->daddr,
2074 .sin6.sin6_family = AF_INET6,
2077 vxlan_ip_miss(dev, &ipa);
2091 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
2093 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
2095 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
2104 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
2105 struct vxlan_metadata *md)
2107 struct vxlanhdr_gbp *gbp;
2112 gbp = (struct vxlanhdr_gbp *)vxh;
2113 vxh->vx_flags |= VXLAN_HF_GBP;
2115 if (md->gbp & VXLAN_GBP_DONT_LEARN)
2116 gbp->dont_learn = 1;
2118 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
2119 gbp->policy_applied = 1;
2121 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
2124 static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
2127 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
2129 gpe->np_applied = 1;
2130 gpe->next_protocol = tun_p_from_eth_p(protocol);
2131 if (!gpe->next_protocol)
2132 return -EPFNOSUPPORT;
2136 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
2137 int iphdr_len, __be32 vni,
2138 struct vxlan_metadata *md, u32 vxflags,
2141 struct vxlanhdr *vxh;
2144 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
2145 __be16 inner_protocol = htons(ETH_P_TEB);
2147 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
2148 skb->ip_summed == CHECKSUM_PARTIAL) {
2149 int csum_start = skb_checksum_start_offset(skb);
2151 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
2152 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
2153 (skb->csum_offset == offsetof(struct udphdr, check) ||
2154 skb->csum_offset == offsetof(struct tcphdr, check)))
2155 type |= SKB_GSO_TUNNEL_REMCSUM;
2158 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
2159 + VXLAN_HLEN + iphdr_len;
2161 /* Need space for new headers (invalidates iph ptr) */
2162 err = skb_cow_head(skb, min_headroom);
2166 err = iptunnel_handle_offloads(skb, type);
2170 vxh = __skb_push(skb, sizeof(*vxh));
2171 vxh->vx_flags = VXLAN_HF_VNI;
2172 vxh->vx_vni = vxlan_vni_field(vni);
2174 if (type & SKB_GSO_TUNNEL_REMCSUM) {
2177 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
2178 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
2179 vxh->vx_flags |= VXLAN_HF_RCO;
2181 if (!skb_is_gso(skb)) {
2182 skb->ip_summed = CHECKSUM_NONE;
2183 skb->encapsulation = 0;
2187 if (vxflags & VXLAN_F_GBP)
2188 vxlan_build_gbp_hdr(vxh, vxflags, md);
2189 if (vxflags & VXLAN_F_GPE) {
2190 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
2193 inner_protocol = skb->protocol;
2196 skb_set_inner_protocol(skb, inner_protocol);
2200 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
2201 struct vxlan_sock *sock4,
2202 struct sk_buff *skb, int oif, u8 tos,
2203 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
2204 struct dst_cache *dst_cache,
2205 const struct ip_tunnel_info *info)
2207 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
2208 struct rtable *rt = NULL;
2212 return ERR_PTR(-EIO);
2217 rt = dst_cache_get_ip4(dst_cache, saddr);
2222 memset(&fl4, 0, sizeof(fl4));
2223 fl4.flowi4_oif = oif;
2224 fl4.flowi4_tos = RT_TOS(tos);
2225 fl4.flowi4_mark = skb->mark;
2226 fl4.flowi4_proto = IPPROTO_UDP;
2229 fl4.fl4_dport = dport;
2230 fl4.fl4_sport = sport;
2232 rt = ip_route_output_key(vxlan->net, &fl4);
2233 if (likely(!IS_ERR(rt))) {
2234 if (rt->dst.dev == dev) {
2235 netdev_dbg(dev, "circular route to %pI4\n", &daddr);
2237 return ERR_PTR(-ELOOP);
2242 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
2244 netdev_dbg(dev, "no route to %pI4\n", &daddr);
2245 return ERR_PTR(-ENETUNREACH);
2250 #if IS_ENABLED(CONFIG_IPV6)
2251 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
2252 struct net_device *dev,
2253 struct vxlan_sock *sock6,
2254 struct sk_buff *skb, int oif, u8 tos,
2256 const struct in6_addr *daddr,
2257 struct in6_addr *saddr,
2258 __be16 dport, __be16 sport,
2259 struct dst_cache *dst_cache,
2260 const struct ip_tunnel_info *info)
2262 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
2263 struct dst_entry *ndst;
2268 return ERR_PTR(-EIO);
2273 ndst = dst_cache_get_ip6(dst_cache, saddr);
2278 memset(&fl6, 0, sizeof(fl6));
2279 fl6.flowi6_oif = oif;
2282 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
2283 fl6.flowi6_mark = skb->mark;
2284 fl6.flowi6_proto = IPPROTO_UDP;
2285 fl6.fl6_dport = dport;
2286 fl6.fl6_sport = sport;
2288 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
2291 if (unlikely(err < 0)) {
2292 netdev_dbg(dev, "no route to %pI6\n", daddr);
2293 return ERR_PTR(-ENETUNREACH);
2296 if (unlikely(ndst->dev == dev)) {
2297 netdev_dbg(dev, "circular route to %pI6\n", daddr);
2299 return ERR_PTR(-ELOOP);
2304 dst_cache_set_ip6(dst_cache, ndst, saddr);
2309 /* Bypass encapsulation if the destination is local */
2310 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2311 struct vxlan_dev *dst_vxlan, __be32 vni)
2313 struct pcpu_sw_netstats *tx_stats, *rx_stats;
2314 union vxlan_addr loopback;
2315 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
2316 struct net_device *dev;
2319 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
2320 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
2321 skb->pkt_type = PACKET_HOST;
2322 skb->encapsulation = 0;
2323 skb->dev = dst_vxlan->dev;
2324 __skb_pull(skb, skb_network_offset(skb));
2326 if (remote_ip->sa.sa_family == AF_INET) {
2327 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2328 loopback.sa.sa_family = AF_INET;
2329 #if IS_ENABLED(CONFIG_IPV6)
2331 loopback.sin6.sin6_addr = in6addr_loopback;
2332 loopback.sa.sa_family = AF_INET6;
2338 if (unlikely(!(dev->flags & IFF_UP))) {
2343 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
2344 vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
2346 u64_stats_update_begin(&tx_stats->syncp);
2347 tx_stats->tx_packets++;
2348 tx_stats->tx_bytes += len;
2349 u64_stats_update_end(&tx_stats->syncp);
2351 if (netif_rx(skb) == NET_RX_SUCCESS) {
2352 u64_stats_update_begin(&rx_stats->syncp);
2353 rx_stats->rx_packets++;
2354 rx_stats->rx_bytes += len;
2355 u64_stats_update_end(&rx_stats->syncp);
2358 dev->stats.rx_dropped++;
2363 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
2364 struct vxlan_dev *vxlan,
2365 union vxlan_addr *daddr,
2366 __be16 dst_port, int dst_ifindex, __be32 vni,
2367 struct dst_entry *dst,
2370 #if IS_ENABLED(CONFIG_IPV6)
2371 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
2372 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
2373 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
2375 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
2377 /* Bypass encapsulation if the destination is local */
2378 if (rt_flags & RTCF_LOCAL &&
2379 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2380 struct vxlan_dev *dst_vxlan;
2383 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
2384 daddr->sa.sa_family, dst_port,
2387 dev->stats.tx_errors++;
2392 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
2399 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2400 __be32 default_vni, struct vxlan_rdst *rdst,
2403 struct dst_cache *dst_cache;
2404 struct ip_tunnel_info *info;
2405 struct vxlan_dev *vxlan = netdev_priv(dev);
2406 const struct iphdr *old_iph = ip_hdr(skb);
2407 union vxlan_addr *dst;
2408 union vxlan_addr remote_ip, local_ip;
2409 struct vxlan_metadata _md;
2410 struct vxlan_metadata *md = &_md;
2411 __be16 src_port = 0, dst_port;
2412 struct dst_entry *ndst = NULL;
2417 u32 flags = vxlan->cfg.flags;
2418 bool udp_sum = false;
2419 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
2421 info = skb_tunnel_info(skb);
2424 dst = &rdst->remote_ip;
2425 if (vxlan_addr_any(dst)) {
2427 /* short-circuited back to local bridge */
2428 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
2434 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
2435 vni = (rdst->remote_vni) ? : default_vni;
2436 ifindex = rdst->remote_ifindex;
2437 local_ip = vxlan->cfg.saddr;
2438 dst_cache = &rdst->dst_cache;
2439 md->gbp = skb->mark;
2440 if (flags & VXLAN_F_TTL_INHERIT) {
2441 ttl = ip_tunnel_get_ttl(old_iph, skb);
2443 ttl = vxlan->cfg.ttl;
2444 if (!ttl && vxlan_addr_multicast(dst))
2448 tos = vxlan->cfg.tos;
2450 tos = ip_tunnel_get_dsfield(old_iph, skb);
2452 if (dst->sa.sa_family == AF_INET)
2453 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2455 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2456 label = vxlan->cfg.label;
2459 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2463 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
2464 if (remote_ip.sa.sa_family == AF_INET) {
2465 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
2466 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
2468 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
2469 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2472 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
2473 vni = tunnel_id_to_key32(info->key.tun_id);
2475 dst_cache = &info->dst_cache;
2476 if (info->options_len &&
2477 info->key.tun_flags & TUNNEL_VXLAN_OPT)
2478 md = ip_tunnel_info_opts(info);
2479 ttl = info->key.ttl;
2480 tos = info->key.tos;
2481 label = info->key.label;
2482 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2484 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2485 vxlan->cfg.port_max, true);
2488 if (dst->sa.sa_family == AF_INET) {
2489 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2494 ifindex = sock4->sock->sk->sk_bound_dev_if;
2496 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
2497 dst->sin.sin_addr.s_addr,
2498 &local_ip.sin.sin_addr.s_addr,
2507 /* Bypass encapsulation if the destination is local */
2508 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2509 dst_port, ifindex, vni,
2510 &rt->dst, rt->rt_flags);
2514 if (vxlan->cfg.df == VXLAN_DF_SET) {
2516 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) {
2517 struct ethhdr *eth = eth_hdr(skb);
2519 if (ntohs(eth->h_proto) == ETH_P_IPV6 ||
2520 (ntohs(eth->h_proto) == ETH_P_IP &&
2521 old_iph->frag_off & htons(IP_DF)))
2524 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2529 skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
2531 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2532 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2533 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
2534 vni, md, flags, udp_sum);
2538 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
2539 dst->sin.sin_addr.s_addr, tos, ttl, df,
2540 src_port, dst_port, xnet, !udp_sum);
2541 #if IS_ENABLED(CONFIG_IPV6)
2543 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2546 ifindex = sock6->sock->sk->sk_bound_dev_if;
2548 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
2549 label, &dst->sin6.sin6_addr,
2550 &local_ip.sin6.sin6_addr,
2554 err = PTR_ERR(ndst);
2560 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2562 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2563 dst_port, ifindex, vni,
2569 skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
2571 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2572 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2573 skb_scrub_packet(skb, xnet);
2574 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2575 vni, md, flags, udp_sum);
2579 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
2580 &local_ip.sin6.sin6_addr,
2581 &dst->sin6.sin6_addr, tos, ttl,
2582 label, src_port, dst_port, !udp_sum);
2590 dev->stats.tx_dropped++;
2597 dev->stats.collisions++;
2598 else if (err == -ENETUNREACH)
2599 dev->stats.tx_carrier_errors++;
2601 dev->stats.tx_errors++;
2605 /* Transmit local packets over Vxlan
2607 * Outer IP header inherits ECN and DF from inner header.
2608 * Outer UDP destination is the VXLAN assigned port.
2609 * source port is based on hash of flow
2611 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2613 struct vxlan_dev *vxlan = netdev_priv(dev);
2614 struct vxlan_rdst *rdst, *fdst = NULL;
2615 const struct ip_tunnel_info *info;
2616 bool did_rsc = false;
2617 struct vxlan_fdb *f;
2621 info = skb_tunnel_info(skb);
2623 skb_reset_mac_header(skb);
2625 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
2626 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
2627 info->mode & IP_TUNNEL_INFO_TX) {
2628 vni = tunnel_id_to_key32(info->key.tun_id);
2630 if (info && info->mode & IP_TUNNEL_INFO_TX)
2631 vxlan_xmit_one(skb, dev, vni, NULL, false);
2634 return NETDEV_TX_OK;
2638 if (vxlan->cfg.flags & VXLAN_F_PROXY) {
2640 if (ntohs(eth->h_proto) == ETH_P_ARP)
2641 return arp_reduce(dev, skb, vni);
2642 #if IS_ENABLED(CONFIG_IPV6)
2643 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2644 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
2645 sizeof(struct nd_msg)) &&
2646 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2647 struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
2649 if (m->icmph.icmp6_code == 0 &&
2650 m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2651 return neigh_reduce(dev, skb, vni);
2657 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2660 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
2661 (ntohs(eth->h_proto) == ETH_P_IP ||
2662 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2663 did_rsc = route_shortcircuit(dev, skb);
2665 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2669 f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
2671 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
2672 !is_multicast_ether_addr(eth->h_dest))
2673 vxlan_fdb_miss(vxlan, eth->h_dest);
2675 dev->stats.tx_dropped++;
2677 return NETDEV_TX_OK;
2681 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2682 struct sk_buff *skb1;
2688 skb1 = skb_clone(skb, GFP_ATOMIC);
2690 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
2694 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
2697 return NETDEV_TX_OK;
2700 /* Walk the forwarding table and purge stale entries */
2701 static void vxlan_cleanup(struct timer_list *t)
2703 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
2704 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2707 if (!netif_running(vxlan->dev))
2710 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2711 struct hlist_node *p, *n;
2713 spin_lock(&vxlan->hash_lock);
2714 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2716 = container_of(p, struct vxlan_fdb, hlist);
2717 unsigned long timeout;
2719 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2722 if (f->flags & NTF_EXT_LEARNED)
2725 timeout = f->used + vxlan->cfg.age_interval * HZ;
2726 if (time_before_eq(timeout, jiffies)) {
2727 netdev_dbg(vxlan->dev,
2728 "garbage collect %pM\n",
2730 f->state = NUD_STALE;
2731 vxlan_fdb_destroy(vxlan, f, true, true);
2732 } else if (time_before(timeout, next_timer))
2733 next_timer = timeout;
2735 spin_unlock(&vxlan->hash_lock);
2738 mod_timer(&vxlan->age_timer, next_timer);
2741 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2743 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2745 spin_lock(&vn->sock_lock);
2746 hlist_del_init_rcu(&vxlan->hlist4.hlist);
2747 #if IS_ENABLED(CONFIG_IPV6)
2748 hlist_del_init_rcu(&vxlan->hlist6.hlist);
2750 spin_unlock(&vn->sock_lock);
2753 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2754 struct vxlan_dev_node *node)
2756 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2757 __be32 vni = vxlan->default_dst.remote_vni;
2759 node->vxlan = vxlan;
2760 spin_lock(&vn->sock_lock);
2761 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
2762 spin_unlock(&vn->sock_lock);
2765 /* Setup stats when device is created */
2766 static int vxlan_init(struct net_device *dev)
2768 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2775 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2777 struct vxlan_fdb *f;
2779 spin_lock_bh(&vxlan->hash_lock);
2780 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2782 vxlan_fdb_destroy(vxlan, f, true, true);
2783 spin_unlock_bh(&vxlan->hash_lock);
2786 static void vxlan_uninit(struct net_device *dev)
2788 struct vxlan_dev *vxlan = netdev_priv(dev);
2790 gro_cells_destroy(&vxlan->gro_cells);
2792 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
2794 free_percpu(dev->tstats);
2797 /* Start ageing timer and join group when device is brought up */
2798 static int vxlan_open(struct net_device *dev)
2800 struct vxlan_dev *vxlan = netdev_priv(dev);
2803 ret = vxlan_sock_add(vxlan);
2807 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2808 ret = vxlan_igmp_join(vxlan);
2809 if (ret == -EADDRINUSE)
2812 vxlan_sock_release(vxlan);
2817 if (vxlan->cfg.age_interval)
2818 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2823 /* Purge the forwarding table */
2824 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2828 spin_lock_bh(&vxlan->hash_lock);
2829 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2830 struct hlist_node *p, *n;
2831 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2833 = container_of(p, struct vxlan_fdb, hlist);
2834 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2836 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2837 if (!is_zero_ether_addr(f->eth_addr))
2838 vxlan_fdb_destroy(vxlan, f, true, true);
2841 spin_unlock_bh(&vxlan->hash_lock);
2844 /* Cleanup timer and forwarding table on shutdown */
2845 static int vxlan_stop(struct net_device *dev)
2847 struct vxlan_dev *vxlan = netdev_priv(dev);
2848 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2851 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2852 !vxlan_group_used(vn, vxlan))
2853 ret = vxlan_igmp_leave(vxlan);
2855 del_timer_sync(&vxlan->age_timer);
2857 vxlan_flush(vxlan, false);
2858 vxlan_sock_release(vxlan);
2863 /* Stub, nothing needs to be done. */
2864 static void vxlan_set_multicast_list(struct net_device *dev)
2868 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2870 struct vxlan_dev *vxlan = netdev_priv(dev);
2871 struct vxlan_rdst *dst = &vxlan->default_dst;
2872 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2873 dst->remote_ifindex);
2874 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
2876 /* This check is different than dev->max_mtu, because it looks at
2877 * the lowerdev->mtu, rather than the static dev->max_mtu
2880 int max_mtu = lowerdev->mtu -
2881 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2882 if (new_mtu > max_mtu)
2890 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2892 struct vxlan_dev *vxlan = netdev_priv(dev);
2893 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2894 __be16 sport, dport;
2896 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2897 vxlan->cfg.port_max, true);
2898 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2900 if (ip_tunnel_info_af(info) == AF_INET) {
2901 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2904 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2905 info->key.u.ipv4.dst,
2906 &info->key.u.ipv4.src, dport, sport,
2907 &info->dst_cache, info);
2912 #if IS_ENABLED(CONFIG_IPV6)
2913 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2914 struct dst_entry *ndst;
2916 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2917 info->key.label, &info->key.u.ipv6.dst,
2918 &info->key.u.ipv6.src, dport, sport,
2919 &info->dst_cache, info);
2921 return PTR_ERR(ndst);
2923 #else /* !CONFIG_IPV6 */
2924 return -EPFNOSUPPORT;
2927 info->key.tp_src = sport;
2928 info->key.tp_dst = dport;
2932 static const struct net_device_ops vxlan_netdev_ether_ops = {
2933 .ndo_init = vxlan_init,
2934 .ndo_uninit = vxlan_uninit,
2935 .ndo_open = vxlan_open,
2936 .ndo_stop = vxlan_stop,
2937 .ndo_start_xmit = vxlan_xmit,
2938 .ndo_get_stats64 = ip_tunnel_get_stats64,
2939 .ndo_set_rx_mode = vxlan_set_multicast_list,
2940 .ndo_change_mtu = vxlan_change_mtu,
2941 .ndo_validate_addr = eth_validate_addr,
2942 .ndo_set_mac_address = eth_mac_addr,
2943 .ndo_fdb_add = vxlan_fdb_add,
2944 .ndo_fdb_del = vxlan_fdb_delete,
2945 .ndo_fdb_dump = vxlan_fdb_dump,
2946 .ndo_fdb_get = vxlan_fdb_get,
2947 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2948 .ndo_change_proto_down = dev_change_proto_down_generic,
2951 static const struct net_device_ops vxlan_netdev_raw_ops = {
2952 .ndo_init = vxlan_init,
2953 .ndo_uninit = vxlan_uninit,
2954 .ndo_open = vxlan_open,
2955 .ndo_stop = vxlan_stop,
2956 .ndo_start_xmit = vxlan_xmit,
2957 .ndo_get_stats64 = ip_tunnel_get_stats64,
2958 .ndo_change_mtu = vxlan_change_mtu,
2959 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2962 /* Info for udev, that this is a virtual tunnel endpoint */
2963 static struct device_type vxlan_type = {
2967 /* Calls the ndo_udp_tunnel_add of the caller in order to
2968 * supply the listening VXLAN udp ports. Callers are expected
2969 * to implement the ndo_udp_tunnel_add.
2971 static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
2973 struct vxlan_sock *vs;
2974 struct net *net = dev_net(dev);
2975 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2978 spin_lock(&vn->sock_lock);
2979 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2980 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2981 unsigned short type;
2983 if (vs->flags & VXLAN_F_GPE)
2984 type = UDP_TUNNEL_TYPE_VXLAN_GPE;
2986 type = UDP_TUNNEL_TYPE_VXLAN;
2989 udp_tunnel_push_rx_port(dev, vs->sock, type);
2991 udp_tunnel_drop_rx_port(dev, vs->sock, type);
2994 spin_unlock(&vn->sock_lock);
2997 /* Initialize the device structure. */
2998 static void vxlan_setup(struct net_device *dev)
3000 struct vxlan_dev *vxlan = netdev_priv(dev);
3003 eth_hw_addr_random(dev);
3006 dev->needs_free_netdev = true;
3007 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
3009 dev->features |= NETIF_F_LLTX;
3010 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
3011 dev->features |= NETIF_F_RXCSUM;
3012 dev->features |= NETIF_F_GSO_SOFTWARE;
3014 dev->vlan_features = dev->features;
3015 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3016 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
3017 netif_keep_dst(dev);
3018 dev->priv_flags |= IFF_NO_QUEUE;
3020 /* MTU range: 68 - 65535 */
3021 dev->min_mtu = ETH_MIN_MTU;
3022 dev->max_mtu = ETH_MAX_MTU;
3024 INIT_LIST_HEAD(&vxlan->next);
3025 spin_lock_init(&vxlan->hash_lock);
3027 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
3031 gro_cells_init(&vxlan->gro_cells, dev);
3033 for (h = 0; h < FDB_HASH_SIZE; ++h)
3034 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
3037 static void vxlan_ether_setup(struct net_device *dev)
3039 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
3040 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3041 dev->netdev_ops = &vxlan_netdev_ether_ops;
3044 static void vxlan_raw_setup(struct net_device *dev)
3046 dev->header_ops = NULL;
3047 dev->type = ARPHRD_NONE;
3048 dev->hard_header_len = 0;
3050 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
3051 dev->netdev_ops = &vxlan_netdev_raw_ops;
3054 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
3055 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
3056 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
3057 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
3058 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
3059 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
3060 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
3061 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
3062 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
3063 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
3064 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
3065 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
3066 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
3067 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
3068 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
3069 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
3070 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
3071 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
3072 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
3073 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
3074 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
3075 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
3076 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
3077 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
3078 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
3079 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
3080 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
3081 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
3082 [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
3083 [IFLA_VXLAN_DF] = { .type = NLA_U8 },
3086 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
3087 struct netlink_ext_ack *extack)
3089 if (tb[IFLA_ADDRESS]) {
3090 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
3091 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
3092 "Provided link layer address is not Ethernet");
3096 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
3097 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
3098 "Provided Ethernet address is not unicast");
3099 return -EADDRNOTAVAIL;
3104 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3106 if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) {
3107 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
3108 "MTU must be between 68 and 65535");
3114 NL_SET_ERR_MSG(extack,
3115 "Required attributes not provided to perform the operation");
3119 if (data[IFLA_VXLAN_ID]) {
3120 u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
3122 if (id >= VXLAN_N_VID) {
3123 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
3124 "VXLAN ID must be lower than 16777216");
3129 if (data[IFLA_VXLAN_PORT_RANGE]) {
3130 const struct ifla_vxlan_port_range *p
3131 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3133 if (ntohs(p->high) < ntohs(p->low)) {
3134 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
3135 "Invalid source port range");
3140 if (data[IFLA_VXLAN_DF]) {
3141 enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
3143 if (df < 0 || df > VXLAN_DF_MAX) {
3144 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
3145 "Invalid DF attribute");
3153 static void vxlan_get_drvinfo(struct net_device *netdev,
3154 struct ethtool_drvinfo *drvinfo)
3156 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
3157 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
3160 static const struct ethtool_ops vxlan_ethtool_ops = {
3161 .get_drvinfo = vxlan_get_drvinfo,
3162 .get_link = ethtool_op_get_link,
3165 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
3166 __be16 port, u32 flags, int ifindex)
3168 struct socket *sock;
3169 struct udp_port_cfg udp_conf;
3172 memset(&udp_conf, 0, sizeof(udp_conf));
3175 udp_conf.family = AF_INET6;
3176 udp_conf.use_udp6_rx_checksums =
3177 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
3178 udp_conf.ipv6_v6only = 1;
3180 udp_conf.family = AF_INET;
3183 udp_conf.local_udp_port = port;
3184 udp_conf.bind_ifindex = ifindex;
3186 /* Open UDP socket */
3187 err = udp_sock_create(net, &udp_conf, &sock);
3189 return ERR_PTR(err);
3194 /* Create new listen socket if needed */
3195 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
3196 __be16 port, u32 flags,
3199 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3200 struct vxlan_sock *vs;
3201 struct socket *sock;
3203 struct udp_tunnel_sock_cfg tunnel_cfg;
3205 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
3207 return ERR_PTR(-ENOMEM);
3209 for (h = 0; h < VNI_HASH_SIZE; ++h)
3210 INIT_HLIST_HEAD(&vs->vni_list[h]);
3212 sock = vxlan_create_sock(net, ipv6, port, flags, ifindex);
3215 return ERR_CAST(sock);
3219 refcount_set(&vs->refcnt, 1);
3220 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
3222 spin_lock(&vn->sock_lock);
3223 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
3224 udp_tunnel_notify_add_rx_port(sock,
3225 (vs->flags & VXLAN_F_GPE) ?
3226 UDP_TUNNEL_TYPE_VXLAN_GPE :
3227 UDP_TUNNEL_TYPE_VXLAN);
3228 spin_unlock(&vn->sock_lock);
3230 /* Mark socket as an encapsulation socket. */
3231 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
3232 tunnel_cfg.sk_user_data = vs;
3233 tunnel_cfg.encap_type = 1;
3234 tunnel_cfg.encap_rcv = vxlan_rcv;
3235 tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
3236 tunnel_cfg.encap_destroy = NULL;
3237 tunnel_cfg.gro_receive = vxlan_gro_receive;
3238 tunnel_cfg.gro_complete = vxlan_gro_complete;
3240 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
3245 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
3247 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3248 struct vxlan_sock *vs = NULL;
3249 struct vxlan_dev_node *node;
3250 int l3mdev_index = 0;
3252 if (vxlan->cfg.remote_ifindex)
3253 l3mdev_index = l3mdev_master_upper_ifindex_by_index(
3254 vxlan->net, vxlan->cfg.remote_ifindex);
3256 if (!vxlan->cfg.no_share) {
3257 spin_lock(&vn->sock_lock);
3258 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
3259 vxlan->cfg.dst_port, vxlan->cfg.flags,
3261 if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
3262 spin_unlock(&vn->sock_lock);
3265 spin_unlock(&vn->sock_lock);
3268 vs = vxlan_socket_create(vxlan->net, ipv6,
3269 vxlan->cfg.dst_port, vxlan->cfg.flags,
3273 #if IS_ENABLED(CONFIG_IPV6)
3275 rcu_assign_pointer(vxlan->vn6_sock, vs);
3276 node = &vxlan->hlist6;
3280 rcu_assign_pointer(vxlan->vn4_sock, vs);
3281 node = &vxlan->hlist4;
3283 vxlan_vs_add_dev(vs, vxlan, node);
3287 static int vxlan_sock_add(struct vxlan_dev *vxlan)
3289 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
3290 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
3291 bool ipv4 = !ipv6 || metadata;
3294 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
3295 #if IS_ENABLED(CONFIG_IPV6)
3296 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
3298 ret = __vxlan_sock_add(vxlan, true);
3299 if (ret < 0 && ret != -EAFNOSUPPORT)
3304 ret = __vxlan_sock_add(vxlan, false);
3306 vxlan_sock_release(vxlan);
3310 static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
3311 struct net_device **lower,
3312 struct vxlan_dev *old,
3313 struct netlink_ext_ack *extack)
3315 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
3316 struct vxlan_dev *tmp;
3317 bool use_ipv6 = false;
3319 if (conf->flags & VXLAN_F_GPE) {
3320 /* For now, allow GPE only together with
3321 * COLLECT_METADATA. This can be relaxed later; in such
3322 * case, the other side of the PtP link will have to be
3325 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
3326 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3327 NL_SET_ERR_MSG(extack,
3328 "VXLAN GPE does not support this combination of attributes");
3333 if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
3334 /* Unless IPv6 is explicitly requested, assume IPv4 */
3335 conf->remote_ip.sa.sa_family = AF_INET;
3336 conf->saddr.sa.sa_family = AF_INET;
3337 } else if (!conf->remote_ip.sa.sa_family) {
3338 conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
3339 } else if (!conf->saddr.sa.sa_family) {
3340 conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
3343 if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) {
3344 NL_SET_ERR_MSG(extack,
3345 "Local and remote address must be from the same family");
3349 if (vxlan_addr_multicast(&conf->saddr)) {
3350 NL_SET_ERR_MSG(extack, "Local address cannot be multicast");
3354 if (conf->saddr.sa.sa_family == AF_INET6) {
3355 if (!IS_ENABLED(CONFIG_IPV6)) {
3356 NL_SET_ERR_MSG(extack,
3357 "IPv6 support not enabled in the kernel");
3358 return -EPFNOSUPPORT;
3361 conf->flags |= VXLAN_F_IPV6;
3363 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3365 ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
3367 ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
3369 if (local_type & IPV6_ADDR_LINKLOCAL) {
3370 if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
3371 (remote_type != IPV6_ADDR_ANY)) {
3372 NL_SET_ERR_MSG(extack,
3373 "Invalid combination of local and remote address scopes");
3377 conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
3380 (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) {
3381 NL_SET_ERR_MSG(extack,
3382 "Invalid combination of local and remote address scopes");
3386 conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
3391 if (conf->label && !use_ipv6) {
3392 NL_SET_ERR_MSG(extack,
3393 "Label attribute only applies to IPv6 VXLAN devices");
3397 if (conf->remote_ifindex) {
3398 struct net_device *lowerdev;
3400 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
3402 NL_SET_ERR_MSG(extack,
3403 "Invalid local interface, device not found");
3407 #if IS_ENABLED(CONFIG_IPV6)
3409 struct inet6_dev *idev = __in6_dev_get(lowerdev);
3410 if (idev && idev->cnf.disable_ipv6) {
3411 NL_SET_ERR_MSG(extack,
3412 "IPv6 support disabled by administrator");
3420 if (vxlan_addr_multicast(&conf->remote_ip)) {
3421 NL_SET_ERR_MSG(extack,
3422 "Local interface required for multicast remote destination");
3427 #if IS_ENABLED(CONFIG_IPV6)
3428 if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) {
3429 NL_SET_ERR_MSG(extack,
3430 "Local interface required for link-local local/remote addresses");
3438 if (!conf->dst_port) {
3439 if (conf->flags & VXLAN_F_GPE)
3440 conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
3442 conf->dst_port = htons(vxlan_port);
3445 if (!conf->age_interval)
3446 conf->age_interval = FDB_AGE_DEFAULT;
3448 list_for_each_entry(tmp, &vn->vxlan_list, next) {
3452 if (tmp->cfg.vni != conf->vni)
3454 if (tmp->cfg.dst_port != conf->dst_port)
3456 if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
3457 (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
3460 if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
3461 tmp->cfg.remote_ifindex != conf->remote_ifindex)
3464 NL_SET_ERR_MSG(extack,
3465 "A VXLAN device with the specified VNI already exists");
3472 static void vxlan_config_apply(struct net_device *dev,
3473 struct vxlan_config *conf,
3474 struct net_device *lowerdev,
3475 struct net *src_net,
3478 struct vxlan_dev *vxlan = netdev_priv(dev);
3479 struct vxlan_rdst *dst = &vxlan->default_dst;
3480 unsigned short needed_headroom = ETH_HLEN;
3481 bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
3482 int max_mtu = ETH_MAX_MTU;
3485 if (conf->flags & VXLAN_F_GPE)
3486 vxlan_raw_setup(dev);
3488 vxlan_ether_setup(dev);
3491 dev->mtu = conf->mtu;
3493 vxlan->net = src_net;
3496 dst->remote_vni = conf->vni;
3498 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
3501 dst->remote_ifindex = conf->remote_ifindex;
3503 dev->gso_max_size = lowerdev->gso_max_size;
3504 dev->gso_max_segs = lowerdev->gso_max_segs;
3506 needed_headroom = lowerdev->hard_header_len;
3508 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
3510 if (max_mtu < ETH_MIN_MTU)
3511 max_mtu = ETH_MIN_MTU;
3513 if (!changelink && !conf->mtu)
3517 if (dev->mtu > max_mtu)
3520 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
3521 needed_headroom += VXLAN6_HEADROOM;
3523 needed_headroom += VXLAN_HEADROOM;
3524 dev->needed_headroom = needed_headroom;
3526 memcpy(&vxlan->cfg, conf, sizeof(*conf));
3529 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
3530 struct vxlan_config *conf, bool changelink,
3531 struct netlink_ext_ack *extack)
3533 struct vxlan_dev *vxlan = netdev_priv(dev);
3534 struct net_device *lowerdev;
3537 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack);
3541 vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
3546 static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3547 struct vxlan_config *conf,
3548 struct netlink_ext_ack *extack)
3550 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3551 struct vxlan_dev *vxlan = netdev_priv(dev);
3552 struct vxlan_fdb *f = NULL;
3553 bool unregister = false;
3556 err = vxlan_dev_configure(net, dev, conf, false, extack);
3560 dev->ethtool_ops = &vxlan_ethtool_ops;
3562 /* create an fdb entry for a valid default destination */
3563 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3564 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3565 &vxlan->default_dst.remote_ip,
3566 NUD_REACHABLE | NUD_PERMANENT,
3567 vxlan->cfg.dst_port,
3568 vxlan->default_dst.remote_vni,
3569 vxlan->default_dst.remote_vni,
3570 vxlan->default_dst.remote_ifindex,
3576 err = register_netdevice(dev);
3581 err = rtnl_configure_link(dev, NULL);
3586 vxlan_fdb_insert(vxlan, all_zeros_mac,
3587 vxlan->default_dst.remote_vni, f);
3589 /* notify default fdb entry */
3590 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
3591 RTM_NEWNEIGH, true, extack);
3593 vxlan_fdb_destroy(vxlan, f, false, false);
3598 list_add(&vxlan->next, &vn->vxlan_list);
3602 /* unregister_netdevice() destroys the default FDB entry with deletion
3603 * notification. But the addition notification was not sent yet, so
3604 * destroy the entry by hand here.
3607 __vxlan_fdb_free(f);
3610 unregister_netdevice(dev);
3614 /* Set/clear flags based on attribute */
3615 static int vxlan_nl2flag(struct vxlan_config *conf, struct nlattr *tb[],
3616 int attrtype, unsigned long mask, bool changelink,
3617 bool changelink_supported,
3618 struct netlink_ext_ack *extack)
3620 unsigned long flags;
3625 if (changelink && !changelink_supported) {
3626 vxlan_flag_attr_error(attrtype, extack);
3630 if (vxlan_policy[attrtype].type == NLA_FLAG)
3631 flags = conf->flags | mask;
3632 else if (nla_get_u8(tb[attrtype]))
3633 flags = conf->flags | mask;
3635 flags = conf->flags & ~mask;
3637 conf->flags = flags;
3642 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3643 struct net_device *dev, struct vxlan_config *conf,
3644 bool changelink, struct netlink_ext_ack *extack)
3646 struct vxlan_dev *vxlan = netdev_priv(dev);
3649 memset(conf, 0, sizeof(*conf));
3651 /* if changelink operation, start with old existing cfg */
3653 memcpy(conf, &vxlan->cfg, sizeof(*conf));
3655 if (data[IFLA_VXLAN_ID]) {
3656 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3658 if (changelink && (vni != conf->vni)) {
3659 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI");
3662 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3665 if (data[IFLA_VXLAN_GROUP]) {
3666 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET)) {
3667 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP], "New group address family does not match old group");
3671 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
3672 conf->remote_ip.sa.sa_family = AF_INET;
3673 } else if (data[IFLA_VXLAN_GROUP6]) {
3674 if (!IS_ENABLED(CONFIG_IPV6)) {
3675 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "IPv6 support not enabled in the kernel");
3676 return -EPFNOSUPPORT;
3679 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6)) {
3680 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "New group address family does not match old group");
3684 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
3685 conf->remote_ip.sa.sa_family = AF_INET6;
3688 if (data[IFLA_VXLAN_LOCAL]) {
3689 if (changelink && (conf->saddr.sa.sa_family != AF_INET)) {
3690 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL], "New local address family does not match old");
3694 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
3695 conf->saddr.sa.sa_family = AF_INET;
3696 } else if (data[IFLA_VXLAN_LOCAL6]) {
3697 if (!IS_ENABLED(CONFIG_IPV6)) {
3698 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "IPv6 support not enabled in the kernel");
3699 return -EPFNOSUPPORT;
3702 if (changelink && (conf->saddr.sa.sa_family != AF_INET6)) {
3703 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "New local address family does not match old");
3707 /* TODO: respect scope id */
3708 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
3709 conf->saddr.sa.sa_family = AF_INET6;
3712 if (data[IFLA_VXLAN_LINK])
3713 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
3715 if (data[IFLA_VXLAN_TOS])
3716 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
3718 if (data[IFLA_VXLAN_TTL])
3719 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
3721 if (data[IFLA_VXLAN_TTL_INHERIT]) {
3722 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_TTL_INHERIT,
3723 VXLAN_F_TTL_INHERIT, changelink, false,
3730 if (data[IFLA_VXLAN_LABEL])
3731 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
3732 IPV6_FLOWLABEL_MASK;
3734 if (data[IFLA_VXLAN_LEARNING]) {
3735 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LEARNING,
3736 VXLAN_F_LEARN, changelink, true,
3740 } else if (!changelink) {
3741 /* default to learn on a new device */
3742 conf->flags |= VXLAN_F_LEARN;
3745 if (data[IFLA_VXLAN_AGEING])
3746 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
3748 if (data[IFLA_VXLAN_PROXY]) {
3749 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_PROXY,
3750 VXLAN_F_PROXY, changelink, false,
3756 if (data[IFLA_VXLAN_RSC]) {
3757 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_RSC,
3758 VXLAN_F_RSC, changelink, false,
3764 if (data[IFLA_VXLAN_L2MISS]) {
3765 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L2MISS,
3766 VXLAN_F_L2MISS, changelink, false,
3772 if (data[IFLA_VXLAN_L3MISS]) {
3773 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L3MISS,
3774 VXLAN_F_L3MISS, changelink, false,
3780 if (data[IFLA_VXLAN_LIMIT]) {
3782 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LIMIT],
3783 "Cannot change limit");
3786 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3789 if (data[IFLA_VXLAN_COLLECT_METADATA]) {
3790 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_COLLECT_METADATA,
3791 VXLAN_F_COLLECT_METADATA, changelink, false,
3797 if (data[IFLA_VXLAN_PORT_RANGE]) {
3799 const struct ifla_vxlan_port_range *p
3800 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3801 conf->port_min = ntohs(p->low);
3802 conf->port_max = ntohs(p->high);
3804 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
3805 "Cannot change port range");
3810 if (data[IFLA_VXLAN_PORT]) {
3812 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT],
3813 "Cannot change port");
3816 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3819 if (data[IFLA_VXLAN_UDP_CSUM]) {
3821 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_UDP_CSUM],
3822 "Cannot change UDP_CSUM flag");
3825 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3826 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3829 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
3830 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3831 VXLAN_F_UDP_ZERO_CSUM6_TX, changelink,
3837 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
3838 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3839 VXLAN_F_UDP_ZERO_CSUM6_RX, changelink,
3845 if (data[IFLA_VXLAN_REMCSUM_TX]) {
3846 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_TX,
3847 VXLAN_F_REMCSUM_TX, changelink, false,
3853 if (data[IFLA_VXLAN_REMCSUM_RX]) {
3854 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_RX,
3855 VXLAN_F_REMCSUM_RX, changelink, false,
3861 if (data[IFLA_VXLAN_GBP]) {
3862 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GBP,
3863 VXLAN_F_GBP, changelink, false, extack);
3868 if (data[IFLA_VXLAN_GPE]) {
3869 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GPE,
3870 VXLAN_F_GPE, changelink, false,
3876 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
3877 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_NOPARTIAL,
3878 VXLAN_F_REMCSUM_NOPARTIAL, changelink,
3886 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
3887 "Cannot change mtu");
3890 conf->mtu = nla_get_u32(tb[IFLA_MTU]);
3893 if (data[IFLA_VXLAN_DF])
3894 conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
3899 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3900 struct nlattr *tb[], struct nlattr *data[],
3901 struct netlink_ext_ack *extack)
3903 struct vxlan_config conf;
3906 err = vxlan_nl2conf(tb, data, dev, &conf, false, extack);
3910 return __vxlan_dev_create(src_net, dev, &conf, extack);
3913 static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3914 struct nlattr *data[],
3915 struct netlink_ext_ack *extack)
3917 struct vxlan_dev *vxlan = netdev_priv(dev);
3918 struct vxlan_rdst *dst = &vxlan->default_dst;
3919 struct net_device *lowerdev;
3920 struct vxlan_config conf;
3923 err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
3927 err = vxlan_config_validate(vxlan->net, &conf, &lowerdev,
3932 /* handle default dst entry */
3933 if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
3934 spin_lock_bh(&vxlan->hash_lock);
3935 if (!vxlan_addr_any(&conf.remote_ip)) {
3936 err = vxlan_fdb_update(vxlan, all_zeros_mac,
3938 NUD_REACHABLE | NUD_PERMANENT,
3939 NLM_F_APPEND | NLM_F_CREATE,
3940 vxlan->cfg.dst_port,
3942 conf.remote_ifindex,
3943 NTF_SELF, true, extack);
3945 spin_unlock_bh(&vxlan->hash_lock);
3949 if (!vxlan_addr_any(&dst->remote_ip))
3950 __vxlan_fdb_delete(vxlan, all_zeros_mac,
3952 vxlan->cfg.dst_port,
3955 dst->remote_ifindex,
3957 spin_unlock_bh(&vxlan->hash_lock);
3960 if (conf.age_interval != vxlan->cfg.age_interval)
3961 mod_timer(&vxlan->age_timer, jiffies);
3963 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
3967 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3969 struct vxlan_dev *vxlan = netdev_priv(dev);
3971 vxlan_flush(vxlan, true);
3973 list_del(&vxlan->next);
3974 unregister_netdevice_queue(dev, head);
3977 static size_t vxlan_get_size(const struct net_device *dev)
3980 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3981 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3982 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3983 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3984 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3985 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
3986 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3987 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */
3988 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3989 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3990 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3991 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3992 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3993 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3994 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3995 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3996 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3997 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3998 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3999 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
4000 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
4001 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
4002 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
4003 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
4007 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
4009 const struct vxlan_dev *vxlan = netdev_priv(dev);
4010 const struct vxlan_rdst *dst = &vxlan->default_dst;
4011 struct ifla_vxlan_port_range ports = {
4012 .low = htons(vxlan->cfg.port_min),
4013 .high = htons(vxlan->cfg.port_max),
4016 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
4017 goto nla_put_failure;
4019 if (!vxlan_addr_any(&dst->remote_ip)) {
4020 if (dst->remote_ip.sa.sa_family == AF_INET) {
4021 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
4022 dst->remote_ip.sin.sin_addr.s_addr))
4023 goto nla_put_failure;
4024 #if IS_ENABLED(CONFIG_IPV6)
4026 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
4027 &dst->remote_ip.sin6.sin6_addr))
4028 goto nla_put_failure;
4033 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
4034 goto nla_put_failure;
4036 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
4037 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
4038 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
4039 vxlan->cfg.saddr.sin.sin_addr.s_addr))
4040 goto nla_put_failure;
4041 #if IS_ENABLED(CONFIG_IPV6)
4043 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
4044 &vxlan->cfg.saddr.sin6.sin6_addr))
4045 goto nla_put_failure;
4050 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
4051 nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
4052 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
4053 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
4054 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
4055 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
4056 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
4057 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
4058 nla_put_u8(skb, IFLA_VXLAN_PROXY,
4059 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
4060 nla_put_u8(skb, IFLA_VXLAN_RSC,
4061 !!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
4062 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
4063 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
4064 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
4065 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
4066 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
4067 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
4068 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
4069 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
4070 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
4071 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
4072 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
4073 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
4074 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
4075 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
4076 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
4077 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
4078 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
4079 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
4080 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
4081 goto nla_put_failure;
4083 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
4084 goto nla_put_failure;
4086 if (vxlan->cfg.flags & VXLAN_F_GBP &&
4087 nla_put_flag(skb, IFLA_VXLAN_GBP))
4088 goto nla_put_failure;
4090 if (vxlan->cfg.flags & VXLAN_F_GPE &&
4091 nla_put_flag(skb, IFLA_VXLAN_GPE))
4092 goto nla_put_failure;
4094 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
4095 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
4096 goto nla_put_failure;
4104 static struct net *vxlan_get_link_net(const struct net_device *dev)
4106 struct vxlan_dev *vxlan = netdev_priv(dev);
4111 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
4113 .maxtype = IFLA_VXLAN_MAX,
4114 .policy = vxlan_policy,
4115 .priv_size = sizeof(struct vxlan_dev),
4116 .setup = vxlan_setup,
4117 .validate = vxlan_validate,
4118 .newlink = vxlan_newlink,
4119 .changelink = vxlan_changelink,
4120 .dellink = vxlan_dellink,
4121 .get_size = vxlan_get_size,
4122 .fill_info = vxlan_fill_info,
4123 .get_link_net = vxlan_get_link_net,
4126 struct net_device *vxlan_dev_create(struct net *net, const char *name,
4127 u8 name_assign_type,
4128 struct vxlan_config *conf)
4130 struct nlattr *tb[IFLA_MAX + 1];
4131 struct net_device *dev;
4134 memset(&tb, 0, sizeof(tb));
4136 dev = rtnl_create_link(net, name, name_assign_type,
4137 &vxlan_link_ops, tb, NULL);
4141 err = __vxlan_dev_create(net, dev, conf, NULL);
4144 return ERR_PTR(err);
4147 err = rtnl_configure_link(dev, NULL);
4149 LIST_HEAD(list_kill);
4151 vxlan_dellink(dev, &list_kill);
4152 unregister_netdevice_many(&list_kill);
4153 return ERR_PTR(err);
4158 EXPORT_SYMBOL_GPL(vxlan_dev_create);
4160 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
4161 struct net_device *dev)
4163 struct vxlan_dev *vxlan, *next;
4164 LIST_HEAD(list_kill);
4166 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
4167 struct vxlan_rdst *dst = &vxlan->default_dst;
4169 /* In case we created vxlan device with carrier
4170 * and we loose the carrier due to module unload
4171 * we also need to remove vxlan device. In other
4172 * cases, it's not necessary and remote_ifindex
4173 * is 0 here, so no matches.
4175 if (dst->remote_ifindex == dev->ifindex)
4176 vxlan_dellink(vxlan->dev, &list_kill);
4179 unregister_netdevice_many(&list_kill);
4182 static int vxlan_netdevice_event(struct notifier_block *unused,
4183 unsigned long event, void *ptr)
4185 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4186 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
4188 if (event == NETDEV_UNREGISTER) {
4189 vxlan_offload_rx_ports(dev, false);
4190 vxlan_handle_lowerdev_unregister(vn, dev);
4191 } else if (event == NETDEV_REGISTER) {
4192 vxlan_offload_rx_ports(dev, true);
4193 } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
4194 event == NETDEV_UDP_TUNNEL_DROP_INFO) {
4195 vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
4201 static struct notifier_block vxlan_notifier_block __read_mostly = {
4202 .notifier_call = vxlan_netdevice_event,
4206 vxlan_fdb_offloaded_set(struct net_device *dev,
4207 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
4209 struct vxlan_dev *vxlan = netdev_priv(dev);
4210 struct vxlan_rdst *rdst;
4211 struct vxlan_fdb *f;
4213 spin_lock_bh(&vxlan->hash_lock);
4215 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
4219 rdst = vxlan_fdb_find_rdst(f, &fdb_info->remote_ip,
4220 fdb_info->remote_port,
4221 fdb_info->remote_vni,
4222 fdb_info->remote_ifindex);
4226 rdst->offloaded = fdb_info->offloaded;
4229 spin_unlock_bh(&vxlan->hash_lock);
4233 vxlan_fdb_external_learn_add(struct net_device *dev,
4234 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
4236 struct vxlan_dev *vxlan = netdev_priv(dev);
4237 struct netlink_ext_ack *extack;
4240 extack = switchdev_notifier_info_to_extack(&fdb_info->info);
4242 spin_lock_bh(&vxlan->hash_lock);
4243 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
4245 NLM_F_CREATE | NLM_F_REPLACE,
4246 fdb_info->remote_port,
4248 fdb_info->remote_vni,
4249 fdb_info->remote_ifindex,
4250 NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
4252 spin_unlock_bh(&vxlan->hash_lock);
4258 vxlan_fdb_external_learn_del(struct net_device *dev,
4259 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
4261 struct vxlan_dev *vxlan = netdev_priv(dev);
4262 struct vxlan_fdb *f;
4265 spin_lock_bh(&vxlan->hash_lock);
4267 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
4270 else if (f->flags & NTF_EXT_LEARNED)
4271 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr,
4272 fdb_info->remote_ip,
4273 fdb_info->remote_port,
4275 fdb_info->remote_vni,
4276 fdb_info->remote_ifindex,
4279 spin_unlock_bh(&vxlan->hash_lock);
4284 static int vxlan_switchdev_event(struct notifier_block *unused,
4285 unsigned long event, void *ptr)
4287 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
4288 struct switchdev_notifier_vxlan_fdb_info *fdb_info;
4292 case SWITCHDEV_VXLAN_FDB_OFFLOADED:
4293 vxlan_fdb_offloaded_set(dev, ptr);
4295 case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE:
4297 err = vxlan_fdb_external_learn_add(dev, fdb_info);
4299 err = notifier_from_errno(err);
4302 fdb_info->offloaded = true;
4303 vxlan_fdb_offloaded_set(dev, fdb_info);
4305 case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE:
4307 err = vxlan_fdb_external_learn_del(dev, fdb_info);
4309 err = notifier_from_errno(err);
4312 fdb_info->offloaded = false;
4313 vxlan_fdb_offloaded_set(dev, fdb_info);
4320 static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
4321 .notifier_call = vxlan_switchdev_event,
4324 static __net_init int vxlan_init_net(struct net *net)
4326 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
4329 INIT_LIST_HEAD(&vn->vxlan_list);
4330 spin_lock_init(&vn->sock_lock);
4332 for (h = 0; h < PORT_HASH_SIZE; ++h)
4333 INIT_HLIST_HEAD(&vn->sock_list[h]);
4338 static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
4340 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
4341 struct vxlan_dev *vxlan, *next;
4342 struct net_device *dev, *aux;
4345 for_each_netdev_safe(net, dev, aux)
4346 if (dev->rtnl_link_ops == &vxlan_link_ops)
4347 unregister_netdevice_queue(dev, head);
4349 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
4350 /* If vxlan->dev is in the same netns, it has already been added
4351 * to the list by the previous loop.
4353 if (!net_eq(dev_net(vxlan->dev), net))
4354 unregister_netdevice_queue(vxlan->dev, head);
4357 for (h = 0; h < PORT_HASH_SIZE; ++h)
4358 WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
4361 static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
4367 list_for_each_entry(net, net_list, exit_list)
4368 vxlan_destroy_tunnels(net, &list);
4370 unregister_netdevice_many(&list);
4374 static struct pernet_operations vxlan_net_ops = {
4375 .init = vxlan_init_net,
4376 .exit_batch = vxlan_exit_batch_net,
4377 .id = &vxlan_net_id,
4378 .size = sizeof(struct vxlan_net),
4381 static int __init vxlan_init_module(void)
4385 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
4387 rc = register_pernet_subsys(&vxlan_net_ops);
4391 rc = register_netdevice_notifier(&vxlan_notifier_block);
4395 rc = register_switchdev_notifier(&vxlan_switchdev_notifier_block);
4399 rc = rtnl_link_register(&vxlan_link_ops);
4405 unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
4407 unregister_netdevice_notifier(&vxlan_notifier_block);
4409 unregister_pernet_subsys(&vxlan_net_ops);
4413 late_initcall(vxlan_init_module);
4415 static void __exit vxlan_cleanup_module(void)
4417 rtnl_link_unregister(&vxlan_link_ops);
4418 unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
4419 unregister_netdevice_notifier(&vxlan_notifier_block);
4420 unregister_pernet_subsys(&vxlan_net_ops);
4421 /* rcu_barrier() is called by netns */
4423 module_exit(vxlan_cleanup_module);
4425 MODULE_LICENSE("GPL");
4426 MODULE_VERSION(VXLAN_VERSION);
4427 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
4428 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
4429 MODULE_ALIAS_RTNL_LINK("vxlan");