1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2017 Nicira, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/skbuff.h>
11 #include <linux/openvswitch.h>
12 #include <linux/sctp.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
22 #include <net/ip6_fib.h>
23 #include <net/checksum.h>
24 #include <net/dsfield.h>
26 #include <net/sctp/checksum.h>
30 #include "conntrack.h"
32 #include "flow_netlink.h"
33 #include "openvswitch_trace.h"
35 struct deferred_action {
37 const struct nlattr *actions;
40 /* Store pkt_key clone when creating deferred action. */
41 struct sw_flow_key pkt_key;
44 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
45 struct ovs_frag_data {
49 __be16 inner_protocol;
50 u16 network_offset; /* valid only for MPLS */
55 u8 l2_data[MAX_L2_LEN];
58 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
60 #define DEFERRED_ACTION_FIFO_SIZE 10
61 #define OVS_RECURSION_LIMIT 5
62 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
66 /* Deferred action fifo queue storage. */
67 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
70 struct action_flow_keys {
71 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
74 static struct action_fifo __percpu *action_fifos;
75 static struct action_flow_keys __percpu *flow_keys;
76 static DEFINE_PER_CPU(int, exec_actions_level);
78 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
79 * space. Return NULL if out of key spaces.
81 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
83 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
84 int level = this_cpu_read(exec_actions_level);
85 struct sw_flow_key *key = NULL;
87 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
88 key = &keys->key[level - 1];
95 static void action_fifo_init(struct action_fifo *fifo)
101 static bool action_fifo_is_empty(const struct action_fifo *fifo)
103 return (fifo->head == fifo->tail);
106 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
108 if (action_fifo_is_empty(fifo))
111 return &fifo->fifo[fifo->tail++];
114 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
116 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
119 return &fifo->fifo[fifo->head++];
122 /* Return true if fifo is not full */
123 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
124 const struct sw_flow_key *key,
125 const struct nlattr *actions,
126 const int actions_len)
128 struct action_fifo *fifo;
129 struct deferred_action *da;
131 fifo = this_cpu_ptr(action_fifos);
132 da = action_fifo_put(fifo);
135 da->actions = actions;
136 da->actions_len = actions_len;
143 static void invalidate_flow_key(struct sw_flow_key *key)
145 key->mac_proto |= SW_FLOW_KEY_INVALID;
148 static bool is_flow_key_valid(const struct sw_flow_key *key)
150 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
153 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
154 struct sw_flow_key *key,
156 const struct nlattr *actions, int len,
157 bool last, bool clone_flow_key);
159 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
160 struct sw_flow_key *key,
161 const struct nlattr *attr, int len);
163 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
164 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
168 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
173 key->mac_proto = MAC_PROTO_NONE;
175 invalidate_flow_key(key);
179 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
180 const __be16 ethertype)
184 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
185 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
189 if (ethertype == htons(ETH_P_TEB))
190 key->mac_proto = MAC_PROTO_ETHERNET;
192 invalidate_flow_key(key);
196 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
197 const __be32 *mpls_lse, const __be32 *mask)
199 struct mpls_shim_hdr *stack;
203 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
206 stack = mpls_hdr(skb);
207 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
208 err = skb_mpls_update_lse(skb, lse);
212 flow_key->mpls.lse[0] = lse;
216 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
220 err = skb_vlan_pop(skb);
221 if (skb_vlan_tag_present(skb)) {
222 invalidate_flow_key(key);
224 key->eth.vlan.tci = 0;
225 key->eth.vlan.tpid = 0;
230 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
231 const struct ovs_action_push_vlan *vlan)
233 if (skb_vlan_tag_present(skb)) {
234 invalidate_flow_key(key);
236 key->eth.vlan.tci = vlan->vlan_tci;
237 key->eth.vlan.tpid = vlan->vlan_tpid;
239 return skb_vlan_push(skb, vlan->vlan_tpid,
240 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
243 /* 'src' is already properly masked. */
244 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
246 u16 *dst = (u16 *)dst_;
247 const u16 *src = (const u16 *)src_;
248 const u16 *mask = (const u16 *)mask_;
250 OVS_SET_MASKED(dst[0], src[0], mask[0]);
251 OVS_SET_MASKED(dst[1], src[1], mask[1]);
252 OVS_SET_MASKED(dst[2], src[2], mask[2]);
255 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
256 const struct ovs_key_ethernet *key,
257 const struct ovs_key_ethernet *mask)
261 err = skb_ensure_writable(skb, ETH_HLEN);
265 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
267 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
269 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
272 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
274 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
275 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
279 /* pop_eth does not support VLAN packets as this action is never called
282 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
286 err = skb_eth_pop(skb);
290 /* safe right before invalidate_flow_key */
291 key->mac_proto = MAC_PROTO_NONE;
292 invalidate_flow_key(key);
296 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
297 const struct ovs_action_push_eth *ethh)
301 err = skb_eth_push(skb, ethh->addresses.eth_dst,
302 ethh->addresses.eth_src);
306 /* safe right before invalidate_flow_key */
307 key->mac_proto = MAC_PROTO_ETHERNET;
308 invalidate_flow_key(key);
312 static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
313 const struct nshhdr *nh)
317 err = nsh_push(skb, nh);
321 /* safe right before invalidate_flow_key */
322 key->mac_proto = MAC_PROTO_NONE;
323 invalidate_flow_key(key);
327 static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
335 /* safe right before invalidate_flow_key */
336 if (skb->protocol == htons(ETH_P_TEB))
337 key->mac_proto = MAC_PROTO_ETHERNET;
339 key->mac_proto = MAC_PROTO_NONE;
340 invalidate_flow_key(key);
344 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
345 __be32 addr, __be32 new_addr)
347 int transport_len = skb->len - skb_transport_offset(skb);
349 if (nh->frag_off & htons(IP_OFFSET))
352 if (nh->protocol == IPPROTO_TCP) {
353 if (likely(transport_len >= sizeof(struct tcphdr)))
354 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
355 addr, new_addr, true);
356 } else if (nh->protocol == IPPROTO_UDP) {
357 if (likely(transport_len >= sizeof(struct udphdr))) {
358 struct udphdr *uh = udp_hdr(skb);
360 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
361 inet_proto_csum_replace4(&uh->check, skb,
362 addr, new_addr, true);
364 uh->check = CSUM_MANGLED_0;
370 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
371 __be32 *addr, __be32 new_addr)
373 update_ip_l4_checksum(skb, nh, *addr, new_addr);
374 csum_replace4(&nh->check, *addr, new_addr);
376 ovs_ct_clear(skb, NULL);
380 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
381 __be32 addr[4], const __be32 new_addr[4])
383 int transport_len = skb->len - skb_transport_offset(skb);
385 if (l4_proto == NEXTHDR_TCP) {
386 if (likely(transport_len >= sizeof(struct tcphdr)))
387 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
388 addr, new_addr, true);
389 } else if (l4_proto == NEXTHDR_UDP) {
390 if (likely(transport_len >= sizeof(struct udphdr))) {
391 struct udphdr *uh = udp_hdr(skb);
393 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
394 inet_proto_csum_replace16(&uh->check, skb,
395 addr, new_addr, true);
397 uh->check = CSUM_MANGLED_0;
400 } else if (l4_proto == NEXTHDR_ICMP) {
401 if (likely(transport_len >= sizeof(struct icmp6hdr)))
402 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
403 skb, addr, new_addr, true);
407 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
408 const __be32 mask[4], __be32 masked[4])
410 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
411 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
412 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
413 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
416 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
417 __be32 addr[4], const __be32 new_addr[4],
418 bool recalculate_csum)
420 if (recalculate_csum)
421 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
424 ovs_ct_clear(skb, NULL);
425 memcpy(addr, new_addr, sizeof(__be32[4]));
428 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
430 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
432 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
434 if (skb->ip_summed == CHECKSUM_COMPLETE)
435 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
436 (__force __wsum)(ipv6_tclass << 12));
438 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
441 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
445 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
446 fl = OVS_MASKED(ofl, fl, mask);
448 /* Bits 21-24 are always unmasked, so this retains their values. */
449 nh->flow_lbl[0] = (u8)(fl >> 16);
450 nh->flow_lbl[1] = (u8)(fl >> 8);
451 nh->flow_lbl[2] = (u8)fl;
453 if (skb->ip_summed == CHECKSUM_COMPLETE)
454 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
457 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
459 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
461 if (skb->ip_summed == CHECKSUM_COMPLETE)
462 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
463 (__force __wsum)(new_ttl << 8));
464 nh->hop_limit = new_ttl;
467 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
470 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
472 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
476 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
477 const struct ovs_key_ipv4 *key,
478 const struct ovs_key_ipv4 *mask)
484 err = skb_ensure_writable(skb, skb_network_offset(skb) +
485 sizeof(struct iphdr));
491 /* Setting an IP addresses is typically only a side effect of
492 * matching on them in the current userspace implementation, so it
493 * makes sense to check if the value actually changed.
495 if (mask->ipv4_src) {
496 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
498 if (unlikely(new_addr != nh->saddr)) {
499 set_ip_addr(skb, nh, &nh->saddr, new_addr);
500 flow_key->ipv4.addr.src = new_addr;
503 if (mask->ipv4_dst) {
504 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
506 if (unlikely(new_addr != nh->daddr)) {
507 set_ip_addr(skb, nh, &nh->daddr, new_addr);
508 flow_key->ipv4.addr.dst = new_addr;
511 if (mask->ipv4_tos) {
512 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
513 flow_key->ip.tos = nh->tos;
515 if (mask->ipv4_ttl) {
516 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
517 flow_key->ip.ttl = nh->ttl;
523 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
525 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
528 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
529 const struct ovs_key_ipv6 *key,
530 const struct ovs_key_ipv6 *mask)
535 err = skb_ensure_writable(skb, skb_network_offset(skb) +
536 sizeof(struct ipv6hdr));
542 /* Setting an IP addresses is typically only a side effect of
543 * matching on them in the current userspace implementation, so it
544 * makes sense to check if the value actually changed.
546 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
547 __be32 *saddr = (__be32 *)&nh->saddr;
550 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
552 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
553 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
555 memcpy(&flow_key->ipv6.addr.src, masked,
556 sizeof(flow_key->ipv6.addr.src));
559 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
560 unsigned int offset = 0;
561 int flags = IP6_FH_F_SKIP_RH;
562 bool recalc_csum = true;
563 __be32 *daddr = (__be32 *)&nh->daddr;
566 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
568 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
569 if (ipv6_ext_hdr(nh->nexthdr))
570 recalc_csum = (ipv6_find_hdr(skb, &offset,
575 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
577 memcpy(&flow_key->ipv6.addr.dst, masked,
578 sizeof(flow_key->ipv6.addr.dst));
581 if (mask->ipv6_tclass) {
582 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
583 flow_key->ip.tos = ipv6_get_dsfield(nh);
585 if (mask->ipv6_label) {
586 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
587 ntohl(mask->ipv6_label));
588 flow_key->ipv6.label =
589 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
591 if (mask->ipv6_hlimit) {
592 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
593 flow_key->ip.ttl = nh->hop_limit;
598 static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
599 const struct nlattr *a)
608 struct ovs_key_nsh key;
609 struct ovs_key_nsh mask;
611 err = nsh_key_from_nlattr(a, &key, &mask);
615 /* Make sure the NSH base header is there */
616 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
620 length = nsh_hdr_len(nh);
622 /* Make sure the whole NSH header is there */
623 err = skb_ensure_writable(skb, skb_network_offset(skb) +
629 skb_postpull_rcsum(skb, nh, length);
630 flags = nsh_get_flags(nh);
631 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
632 flow_key->nsh.base.flags = flags;
633 ttl = nsh_get_ttl(nh);
634 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
635 flow_key->nsh.base.ttl = ttl;
636 nsh_set_flags_and_ttl(nh, flags, ttl);
637 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
639 flow_key->nsh.base.path_hdr = nh->path_hdr;
640 switch (nh->mdtype) {
642 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
644 OVS_MASKED(nh->md1.context[i], key.context[i],
647 memcpy(flow_key->nsh.context, nh->md1.context,
648 sizeof(nh->md1.context));
651 memset(flow_key->nsh.context, 0,
652 sizeof(flow_key->nsh.context));
657 skb_postpush_rcsum(skb, nh, length);
661 /* Must follow skb_ensure_writable() since that can move the skb data. */
662 static void set_tp_port(struct sk_buff *skb, __be16 *port,
663 __be16 new_port, __sum16 *check)
665 ovs_ct_clear(skb, NULL);
666 inet_proto_csum_replace2(check, skb, *port, new_port, false);
670 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
671 const struct ovs_key_udp *key,
672 const struct ovs_key_udp *mask)
678 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
679 sizeof(struct udphdr));
684 /* Either of the masks is non-zero, so do not bother checking them. */
685 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
686 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
688 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
689 if (likely(src != uh->source)) {
690 set_tp_port(skb, &uh->source, src, &uh->check);
691 flow_key->tp.src = src;
693 if (likely(dst != uh->dest)) {
694 set_tp_port(skb, &uh->dest, dst, &uh->check);
695 flow_key->tp.dst = dst;
698 if (unlikely(!uh->check))
699 uh->check = CSUM_MANGLED_0;
703 flow_key->tp.src = src;
704 flow_key->tp.dst = dst;
705 ovs_ct_clear(skb, NULL);
713 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
714 const struct ovs_key_tcp *key,
715 const struct ovs_key_tcp *mask)
721 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
722 sizeof(struct tcphdr));
727 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
728 if (likely(src != th->source)) {
729 set_tp_port(skb, &th->source, src, &th->check);
730 flow_key->tp.src = src;
732 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
733 if (likely(dst != th->dest)) {
734 set_tp_port(skb, &th->dest, dst, &th->check);
735 flow_key->tp.dst = dst;
742 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
743 const struct ovs_key_sctp *key,
744 const struct ovs_key_sctp *mask)
746 unsigned int sctphoff = skb_transport_offset(skb);
748 __le32 old_correct_csum, new_csum, old_csum;
751 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
756 old_csum = sh->checksum;
757 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
759 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
760 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
762 new_csum = sctp_compute_cksum(skb, sctphoff);
764 /* Carry any checksum errors through. */
765 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
768 ovs_ct_clear(skb, NULL);
770 flow_key->tp.src = sh->source;
771 flow_key->tp.dst = sh->dest;
776 static int ovs_vport_output(struct net *net, struct sock *sk,
779 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
780 struct vport *vport = data->vport;
782 if (skb_cow_head(skb, data->l2_len) < 0) {
787 __skb_dst_copy(skb, data->dst);
788 *OVS_CB(skb) = data->cb;
789 skb->inner_protocol = data->inner_protocol;
790 if (data->vlan_tci & VLAN_CFI_MASK)
791 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
793 __vlan_hwaccel_clear_tag(skb);
795 /* Reconstruct the MAC header. */
796 skb_push(skb, data->l2_len);
797 memcpy(skb->data, &data->l2_data, data->l2_len);
798 skb_postpush_rcsum(skb, skb->data, data->l2_len);
799 skb_reset_mac_header(skb);
801 if (eth_p_mpls(skb->protocol)) {
802 skb->inner_network_header = skb->network_header;
803 skb_set_network_header(skb, data->network_offset);
804 skb_reset_mac_len(skb);
807 ovs_vport_send(vport, skb, data->mac_proto);
812 ovs_dst_get_mtu(const struct dst_entry *dst)
814 return dst->dev->mtu;
817 static struct dst_ops ovs_dst_ops = {
819 .mtu = ovs_dst_get_mtu,
822 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
823 * ovs_vport_output(), which is called once per fragmented packet.
825 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
826 u16 orig_network_offset, u8 mac_proto)
828 unsigned int hlen = skb_network_offset(skb);
829 struct ovs_frag_data *data;
831 data = this_cpu_ptr(&ovs_frag_data_storage);
832 data->dst = skb->_skb_refdst;
834 data->cb = *OVS_CB(skb);
835 data->inner_protocol = skb->inner_protocol;
836 data->network_offset = orig_network_offset;
837 if (skb_vlan_tag_present(skb))
838 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
841 data->vlan_proto = skb->vlan_proto;
842 data->mac_proto = mac_proto;
844 memcpy(&data->l2_data, skb->data, hlen);
846 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
850 static void ovs_fragment(struct net *net, struct vport *vport,
851 struct sk_buff *skb, u16 mru,
852 struct sw_flow_key *key)
854 u16 orig_network_offset = 0;
856 if (eth_p_mpls(skb->protocol)) {
857 orig_network_offset = skb_network_offset(skb);
858 skb->network_header = skb->inner_network_header;
861 if (skb_network_offset(skb) > MAX_L2_LEN) {
862 OVS_NLERR(1, "L2 header too long to fragment");
866 if (key->eth.type == htons(ETH_P_IP)) {
867 struct rtable ovs_rt = { 0 };
868 unsigned long orig_dst;
870 prepare_frag(vport, skb, orig_network_offset,
871 ovs_key_mac_proto(key));
872 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
873 DST_OBSOLETE_NONE, DST_NOCOUNT);
874 ovs_rt.dst.dev = vport->dev;
876 orig_dst = skb->_skb_refdst;
877 skb_dst_set_noref(skb, &ovs_rt.dst);
878 IPCB(skb)->frag_max_size = mru;
880 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
881 refdst_drop(orig_dst);
882 } else if (key->eth.type == htons(ETH_P_IPV6)) {
883 unsigned long orig_dst;
884 struct rt6_info ovs_rt;
886 prepare_frag(vport, skb, orig_network_offset,
887 ovs_key_mac_proto(key));
888 memset(&ovs_rt, 0, sizeof(ovs_rt));
889 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
890 DST_OBSOLETE_NONE, DST_NOCOUNT);
891 ovs_rt.dst.dev = vport->dev;
893 orig_dst = skb->_skb_refdst;
894 skb_dst_set_noref(skb, &ovs_rt.dst);
895 IP6CB(skb)->frag_max_size = mru;
897 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
898 refdst_drop(orig_dst);
900 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
901 ovs_vport_name(vport), ntohs(key->eth.type), mru,
911 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
912 struct sw_flow_key *key)
914 struct vport *vport = ovs_vport_rcu(dp, out_port);
916 if (likely(vport && netif_carrier_ok(vport->dev))) {
917 u16 mru = OVS_CB(skb)->mru;
918 u32 cutlen = OVS_CB(skb)->cutlen;
920 if (unlikely(cutlen > 0)) {
921 if (skb->len - cutlen > ovs_mac_header_len(key))
922 pskb_trim(skb, skb->len - cutlen);
924 pskb_trim(skb, ovs_mac_header_len(key));
928 (skb->len <= mru + vport->dev->hard_header_len))) {
929 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
930 } else if (mru <= vport->dev->mtu) {
931 struct net *net = read_pnet(&dp->net);
933 ovs_fragment(net, vport, skb, mru, key);
942 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
943 struct sw_flow_key *key, const struct nlattr *attr,
944 const struct nlattr *actions, int actions_len,
947 struct dp_upcall_info upcall;
948 const struct nlattr *a;
951 memset(&upcall, 0, sizeof(upcall));
952 upcall.cmd = OVS_PACKET_CMD_ACTION;
953 upcall.mru = OVS_CB(skb)->mru;
955 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
956 a = nla_next(a, &rem)) {
957 switch (nla_type(a)) {
958 case OVS_USERSPACE_ATTR_USERDATA:
962 case OVS_USERSPACE_ATTR_PID:
963 if (dp->user_features &
964 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
966 ovs_dp_get_upcall_portid(dp,
969 upcall.portid = nla_get_u32(a);
972 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
973 /* Get out tunnel info. */
976 vport = ovs_vport_rcu(dp, nla_get_u32(a));
980 err = dev_fill_metadata_dst(vport->dev, skb);
982 upcall.egress_tun_info = skb_tunnel_info(skb);
988 case OVS_USERSPACE_ATTR_ACTIONS: {
989 /* Include actions. */
990 upcall.actions = actions;
991 upcall.actions_len = actions_len;
995 } /* End of switch. */
998 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1001 static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
1002 struct sw_flow_key *key,
1003 const struct nlattr *attr)
1005 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1006 struct nlattr *actions = nla_data(attr);
1008 if (nla_len(actions))
1009 return clone_execute(dp, skb, key, 0, nla_data(actions),
1010 nla_len(actions), true, false);
1016 /* When 'last' is true, sample() should always consume the 'skb'.
1017 * Otherwise, sample() should keep 'skb' intact regardless what
1018 * actions are executed within sample().
1020 static int sample(struct datapath *dp, struct sk_buff *skb,
1021 struct sw_flow_key *key, const struct nlattr *attr,
1024 struct nlattr *actions;
1025 struct nlattr *sample_arg;
1026 int rem = nla_len(attr);
1027 const struct sample_arg *arg;
1028 bool clone_flow_key;
1030 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1031 sample_arg = nla_data(attr);
1032 arg = nla_data(sample_arg);
1033 actions = nla_next(sample_arg, &rem);
1035 if ((arg->probability != U32_MAX) &&
1036 (!arg->probability || get_random_u32() > arg->probability)) {
1042 clone_flow_key = !arg->exec;
1043 return clone_execute(dp, skb, key, 0, actions, rem, last,
1047 /* When 'last' is true, clone() should always consume the 'skb'.
1048 * Otherwise, clone() should keep 'skb' intact regardless what
1049 * actions are executed within clone().
1051 static int clone(struct datapath *dp, struct sk_buff *skb,
1052 struct sw_flow_key *key, const struct nlattr *attr,
1055 struct nlattr *actions;
1056 struct nlattr *clone_arg;
1057 int rem = nla_len(attr);
1058 bool dont_clone_flow_key;
1060 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1061 clone_arg = nla_data(attr);
1062 dont_clone_flow_key = nla_get_u32(clone_arg);
1063 actions = nla_next(clone_arg, &rem);
1065 return clone_execute(dp, skb, key, 0, actions, rem, last,
1066 !dont_clone_flow_key);
1069 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1070 const struct nlattr *attr)
1072 struct ovs_action_hash *hash_act = nla_data(attr);
1075 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1076 hash = skb_get_hash(skb);
1077 hash = jhash_1word(hash, hash_act->hash_basis);
1081 key->ovs_flow_hash = hash;
1084 static int execute_set_action(struct sk_buff *skb,
1085 struct sw_flow_key *flow_key,
1086 const struct nlattr *a)
1088 /* Only tunnel set execution is supported without a mask. */
1089 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1090 struct ovs_tunnel_info *tun = nla_data(a);
1093 dst_hold((struct dst_entry *)tun->tun_dst);
1094 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1101 /* Mask is at the midpoint of the data. */
1102 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1104 static int execute_masked_set_action(struct sk_buff *skb,
1105 struct sw_flow_key *flow_key,
1106 const struct nlattr *a)
1110 switch (nla_type(a)) {
1111 case OVS_KEY_ATTR_PRIORITY:
1112 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1113 *get_mask(a, u32 *));
1114 flow_key->phy.priority = skb->priority;
1117 case OVS_KEY_ATTR_SKB_MARK:
1118 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1119 flow_key->phy.skb_mark = skb->mark;
1122 case OVS_KEY_ATTR_TUNNEL_INFO:
1123 /* Masked data not supported for tunnel. */
1127 case OVS_KEY_ATTR_ETHERNET:
1128 err = set_eth_addr(skb, flow_key, nla_data(a),
1129 get_mask(a, struct ovs_key_ethernet *));
1132 case OVS_KEY_ATTR_NSH:
1133 err = set_nsh(skb, flow_key, a);
1136 case OVS_KEY_ATTR_IPV4:
1137 err = set_ipv4(skb, flow_key, nla_data(a),
1138 get_mask(a, struct ovs_key_ipv4 *));
1141 case OVS_KEY_ATTR_IPV6:
1142 err = set_ipv6(skb, flow_key, nla_data(a),
1143 get_mask(a, struct ovs_key_ipv6 *));
1146 case OVS_KEY_ATTR_TCP:
1147 err = set_tcp(skb, flow_key, nla_data(a),
1148 get_mask(a, struct ovs_key_tcp *));
1151 case OVS_KEY_ATTR_UDP:
1152 err = set_udp(skb, flow_key, nla_data(a),
1153 get_mask(a, struct ovs_key_udp *));
1156 case OVS_KEY_ATTR_SCTP:
1157 err = set_sctp(skb, flow_key, nla_data(a),
1158 get_mask(a, struct ovs_key_sctp *));
1161 case OVS_KEY_ATTR_MPLS:
1162 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1166 case OVS_KEY_ATTR_CT_STATE:
1167 case OVS_KEY_ATTR_CT_ZONE:
1168 case OVS_KEY_ATTR_CT_MARK:
1169 case OVS_KEY_ATTR_CT_LABELS:
1170 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1171 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1179 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1180 struct sw_flow_key *key,
1181 const struct nlattr *a, bool last)
1185 if (!is_flow_key_valid(key)) {
1188 err = ovs_flow_key_update(skb, key);
1192 BUG_ON(!is_flow_key_valid(key));
1194 recirc_id = nla_get_u32(a);
1195 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1198 static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1199 struct sw_flow_key *key,
1200 const struct nlattr *attr, bool last)
1202 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1203 const struct nlattr *actions, *cpl_arg;
1204 int len, max_len, rem = nla_len(attr);
1205 const struct check_pkt_len_arg *arg;
1206 bool clone_flow_key;
1208 /* The first netlink attribute in 'attr' is always
1209 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1211 cpl_arg = nla_data(attr);
1212 arg = nla_data(cpl_arg);
1214 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1215 max_len = arg->pkt_len;
1217 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1219 /* Second netlink attribute in 'attr' is always
1220 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1222 actions = nla_next(cpl_arg, &rem);
1223 clone_flow_key = !arg->exec_for_lesser_equal;
1225 /* Third netlink attribute in 'attr' is always
1226 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1228 actions = nla_next(cpl_arg, &rem);
1229 actions = nla_next(actions, &rem);
1230 clone_flow_key = !arg->exec_for_greater;
1233 return clone_execute(dp, skb, key, 0, nla_data(actions),
1234 nla_len(actions), last, clone_flow_key);
1237 static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1241 if (skb->protocol == htons(ETH_P_IPV6)) {
1244 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1251 if (nh->hop_limit <= 1)
1252 return -EHOSTUNREACH;
1254 key->ip.ttl = --nh->hop_limit;
1255 } else if (skb->protocol == htons(ETH_P_IP)) {
1259 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1266 return -EHOSTUNREACH;
1268 old_ttl = nh->ttl--;
1269 csum_replace2(&nh->check, htons(old_ttl << 8),
1270 htons(nh->ttl << 8));
1271 key->ip.ttl = nh->ttl;
1276 /* Execute a list of actions against 'skb'. */
1277 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1278 struct sw_flow_key *key,
1279 const struct nlattr *attr, int len)
1281 const struct nlattr *a;
1284 for (a = attr, rem = len; rem > 0;
1285 a = nla_next(a, &rem)) {
1288 if (trace_ovs_do_execute_action_enabled())
1289 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1291 switch (nla_type(a)) {
1292 case OVS_ACTION_ATTR_OUTPUT: {
1293 int port = nla_get_u32(a);
1294 struct sk_buff *clone;
1296 /* Every output action needs a separate clone
1297 * of 'skb', In case the output action is the
1298 * last action, cloning can be avoided.
1300 if (nla_is_last(a, rem)) {
1301 do_output(dp, skb, port, key);
1302 /* 'skb' has been used for output.
1307 clone = skb_clone(skb, GFP_ATOMIC);
1309 do_output(dp, clone, port, key);
1310 OVS_CB(skb)->cutlen = 0;
1314 case OVS_ACTION_ATTR_TRUNC: {
1315 struct ovs_action_trunc *trunc = nla_data(a);
1317 if (skb->len > trunc->max_len)
1318 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1322 case OVS_ACTION_ATTR_USERSPACE:
1323 output_userspace(dp, skb, key, a, attr,
1324 len, OVS_CB(skb)->cutlen);
1325 OVS_CB(skb)->cutlen = 0;
1328 case OVS_ACTION_ATTR_HASH:
1329 execute_hash(skb, key, a);
1332 case OVS_ACTION_ATTR_PUSH_MPLS: {
1333 struct ovs_action_push_mpls *mpls = nla_data(a);
1335 err = push_mpls(skb, key, mpls->mpls_lse,
1336 mpls->mpls_ethertype, skb->mac_len);
1339 case OVS_ACTION_ATTR_ADD_MPLS: {
1340 struct ovs_action_add_mpls *mpls = nla_data(a);
1343 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1344 mac_len = skb->mac_len;
1346 err = push_mpls(skb, key, mpls->mpls_lse,
1347 mpls->mpls_ethertype, mac_len);
1350 case OVS_ACTION_ATTR_POP_MPLS:
1351 err = pop_mpls(skb, key, nla_get_be16(a));
1354 case OVS_ACTION_ATTR_PUSH_VLAN:
1355 err = push_vlan(skb, key, nla_data(a));
1358 case OVS_ACTION_ATTR_POP_VLAN:
1359 err = pop_vlan(skb, key);
1362 case OVS_ACTION_ATTR_RECIRC: {
1363 bool last = nla_is_last(a, rem);
1365 err = execute_recirc(dp, skb, key, a, last);
1367 /* If this is the last action, the skb has
1368 * been consumed or freed.
1369 * Return immediately.
1376 case OVS_ACTION_ATTR_SET:
1377 err = execute_set_action(skb, key, nla_data(a));
1380 case OVS_ACTION_ATTR_SET_MASKED:
1381 case OVS_ACTION_ATTR_SET_TO_MASKED:
1382 err = execute_masked_set_action(skb, key, nla_data(a));
1385 case OVS_ACTION_ATTR_SAMPLE: {
1386 bool last = nla_is_last(a, rem);
1388 err = sample(dp, skb, key, a, last);
1395 case OVS_ACTION_ATTR_CT:
1396 if (!is_flow_key_valid(key)) {
1397 err = ovs_flow_key_update(skb, key);
1402 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1405 /* Hide stolen IP fragments from user space. */
1407 return err == -EINPROGRESS ? 0 : err;
1410 case OVS_ACTION_ATTR_CT_CLEAR:
1411 err = ovs_ct_clear(skb, key);
1414 case OVS_ACTION_ATTR_PUSH_ETH:
1415 err = push_eth(skb, key, nla_data(a));
1418 case OVS_ACTION_ATTR_POP_ETH:
1419 err = pop_eth(skb, key);
1422 case OVS_ACTION_ATTR_PUSH_NSH: {
1423 u8 buffer[NSH_HDR_MAX_LEN];
1424 struct nshhdr *nh = (struct nshhdr *)buffer;
1426 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1430 err = push_nsh(skb, key, nh);
1434 case OVS_ACTION_ATTR_POP_NSH:
1435 err = pop_nsh(skb, key);
1438 case OVS_ACTION_ATTR_METER:
1439 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1445 case OVS_ACTION_ATTR_CLONE: {
1446 bool last = nla_is_last(a, rem);
1448 err = clone(dp, skb, key, a, last);
1455 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1456 bool last = nla_is_last(a, rem);
1458 err = execute_check_pkt_len(dp, skb, key, a, last);
1465 case OVS_ACTION_ATTR_DEC_TTL:
1466 err = execute_dec_ttl(skb, key);
1467 if (err == -EHOSTUNREACH)
1468 return dec_ttl_exception_handler(dp, skb,
1473 if (unlikely(err)) {
1483 /* Execute the actions on the clone of the packet. The effect of the
1484 * execution does not affect the original 'skb' nor the original 'key'.
1486 * The execution may be deferred in case the actions can not be executed
1489 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1490 struct sw_flow_key *key, u32 recirc_id,
1491 const struct nlattr *actions, int len,
1492 bool last, bool clone_flow_key)
1494 struct deferred_action *da;
1495 struct sw_flow_key *clone;
1497 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1499 /* Out of memory, skip this action.
1504 /* When clone_flow_key is false, the 'key' will not be change
1505 * by the actions, then the 'key' can be used directly.
1506 * Otherwise, try to clone key from the next recursion level of
1507 * 'flow_keys'. If clone is successful, execute the actions
1508 * without deferring.
1510 clone = clone_flow_key ? clone_key(key) : key;
1514 if (actions) { /* Sample action */
1516 __this_cpu_inc(exec_actions_level);
1518 err = do_execute_actions(dp, skb, clone,
1522 __this_cpu_dec(exec_actions_level);
1523 } else { /* Recirc action */
1524 clone->recirc_id = recirc_id;
1525 ovs_dp_process_packet(skb, clone);
1530 /* Out of 'flow_keys' space. Defer actions */
1531 da = add_deferred_actions(skb, key, actions, len);
1533 if (!actions) { /* Recirc action */
1535 key->recirc_id = recirc_id;
1538 /* Out of per CPU action FIFO space. Drop the 'skb' and
1543 if (net_ratelimit()) {
1544 if (actions) { /* Sample action */
1545 pr_warn("%s: deferred action limit reached, drop sample action\n",
1547 } else { /* Recirc action */
1548 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1549 ovs_dp_name(dp), recirc_id);
1556 static void process_deferred_actions(struct datapath *dp)
1558 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1560 /* Do not touch the FIFO in case there is no deferred actions. */
1561 if (action_fifo_is_empty(fifo))
1564 /* Finishing executing all deferred actions. */
1566 struct deferred_action *da = action_fifo_get(fifo);
1567 struct sk_buff *skb = da->skb;
1568 struct sw_flow_key *key = &da->pkt_key;
1569 const struct nlattr *actions = da->actions;
1570 int actions_len = da->actions_len;
1573 do_execute_actions(dp, skb, key, actions, actions_len);
1575 ovs_dp_process_packet(skb, key);
1576 } while (!action_fifo_is_empty(fifo));
1578 /* Reset FIFO for the next packet. */
1579 action_fifo_init(fifo);
1582 /* Execute a list of actions against 'skb'. */
1583 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1584 const struct sw_flow_actions *acts,
1585 struct sw_flow_key *key)
1589 level = __this_cpu_inc_return(exec_actions_level);
1590 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1591 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1598 OVS_CB(skb)->acts_origlen = acts->orig_len;
1599 err = do_execute_actions(dp, skb, key,
1600 acts->actions, acts->actions_len);
1603 process_deferred_actions(dp);
1606 __this_cpu_dec(exec_actions_level);
1610 int action_fifos_init(void)
1612 action_fifos = alloc_percpu(struct action_fifo);
1616 flow_keys = alloc_percpu(struct action_flow_keys);
1618 free_percpu(action_fifos);
1625 void action_fifos_exit(void)
1627 free_percpu(action_fifos);
1628 free_percpu(flow_keys);