1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 Nicira, Inc.
6 #include <linux/module.h>
7 #include <linux/openvswitch.h>
10 #include <linux/sctp.h>
11 #include <linux/static_key.h>
13 #include <net/genetlink.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_count.h>
16 #include <net/netfilter/nf_conntrack_helper.h>
17 #include <net/netfilter/nf_conntrack_labels.h>
18 #include <net/netfilter/nf_conntrack_seqadj.h>
19 #include <net/netfilter/nf_conntrack_timeout.h>
20 #include <net/netfilter/nf_conntrack_zones.h>
21 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
22 #include <net/ipv6_frag.h>
24 #if IS_ENABLED(CONFIG_NF_NAT)
25 #include <net/netfilter/nf_nat.h>
29 #include "conntrack.h"
31 #include "flow_netlink.h"
33 struct ovs_ct_len_tbl {
38 /* Metadata mark for masked write to conntrack mark */
44 /* Metadata label for masked write to conntrack label. */
46 struct ovs_key_ct_labels value;
47 struct ovs_key_ct_labels mask;
51 OVS_CT_NAT = 1 << 0, /* NAT for committed connections only. */
52 OVS_CT_SRC_NAT = 1 << 1, /* Source NAT for NEW connections. */
53 OVS_CT_DST_NAT = 1 << 2, /* Destination NAT for NEW connections. */
56 /* Conntrack action context for execution. */
57 struct ovs_conntrack_info {
58 struct nf_conntrack_helper *helper;
59 struct nf_conntrack_zone zone;
62 u8 nat : 3; /* enum ovs_ct_nat */
64 u8 have_eventmask : 1;
66 u32 eventmask; /* Mask of 1 << IPCT_*. */
68 struct md_labels labels;
69 char timeout[CTNL_TIMEOUT_NAME_MAX];
70 struct nf_ct_timeout *nf_ct_timeout;
71 #if IS_ENABLED(CONFIG_NF_NAT)
72 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
76 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
77 #define OVS_CT_LIMIT_UNLIMITED 0
78 #define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
79 #define CT_LIMIT_HASH_BUCKETS 512
80 static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled);
83 /* Elements in ovs_ct_limit_info->limits hash table */
84 struct hlist_node hlist_node;
90 struct ovs_ct_limit_info {
92 struct hlist_head *limits;
93 struct nf_conncount_data *data;
96 static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = {
97 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, },
101 static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
103 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
105 static u16 key_to_nfproto(const struct sw_flow_key *key)
107 switch (ntohs(key->eth.type)) {
113 return NFPROTO_UNSPEC;
117 /* Map SKB connection state into the values used by flow definition. */
118 static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
120 u8 ct_state = OVS_CS_F_TRACKED;
123 case IP_CT_ESTABLISHED_REPLY:
124 case IP_CT_RELATED_REPLY:
125 ct_state |= OVS_CS_F_REPLY_DIR;
132 case IP_CT_ESTABLISHED:
133 case IP_CT_ESTABLISHED_REPLY:
134 ct_state |= OVS_CS_F_ESTABLISHED;
137 case IP_CT_RELATED_REPLY:
138 ct_state |= OVS_CS_F_RELATED;
141 ct_state |= OVS_CS_F_NEW;
150 static u32 ovs_ct_get_mark(const struct nf_conn *ct)
152 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
153 return ct ? ct->mark : 0;
159 /* Guard against conntrack labels max size shrinking below 128 bits. */
160 #if NF_CT_LABELS_MAX_SIZE < 16
161 #error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
164 static void ovs_ct_get_labels(const struct nf_conn *ct,
165 struct ovs_key_ct_labels *labels)
167 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
170 memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
172 memset(labels, 0, OVS_CT_LABELS_LEN);
175 static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key,
176 const struct nf_conntrack_tuple *orig,
179 key->ct_orig_proto = orig->dst.protonum;
180 if (orig->dst.protonum == icmp_proto) {
181 key->ct.orig_tp.src = htons(orig->dst.u.icmp.type);
182 key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code);
184 key->ct.orig_tp.src = orig->src.u.all;
185 key->ct.orig_tp.dst = orig->dst.u.all;
189 static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
190 const struct nf_conntrack_zone *zone,
191 const struct nf_conn *ct)
193 key->ct_state = state;
194 key->ct_zone = zone->id;
195 key->ct.mark = ovs_ct_get_mark(ct);
196 ovs_ct_get_labels(ct, &key->ct.labels);
199 const struct nf_conntrack_tuple *orig;
201 /* Use the master if we have one. */
204 orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
206 /* IP version must match with the master connection. */
207 if (key->eth.type == htons(ETH_P_IP) &&
208 nf_ct_l3num(ct) == NFPROTO_IPV4) {
209 key->ipv4.ct_orig.src = orig->src.u3.ip;
210 key->ipv4.ct_orig.dst = orig->dst.u3.ip;
211 __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP);
213 } else if (key->eth.type == htons(ETH_P_IPV6) &&
214 !sw_flow_key_is_nd(key) &&
215 nf_ct_l3num(ct) == NFPROTO_IPV6) {
216 key->ipv6.ct_orig.src = orig->src.u3.in6;
217 key->ipv6.ct_orig.dst = orig->dst.u3.in6;
218 __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP);
222 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
223 * original direction key fields.
225 key->ct_orig_proto = 0;
228 /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
229 * previously sent the packet to conntrack via the ct action. If
230 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
231 * initialized from the connection status.
233 static void ovs_ct_update_key(const struct sk_buff *skb,
234 const struct ovs_conntrack_info *info,
235 struct sw_flow_key *key, bool post_ct,
238 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
239 enum ip_conntrack_info ctinfo;
243 ct = nf_ct_get(skb, &ctinfo);
245 state = ovs_ct_get_state(ctinfo);
246 /* All unconfirmed entries are NEW connections. */
247 if (!nf_ct_is_confirmed(ct))
248 state |= OVS_CS_F_NEW;
249 /* OVS persists the related flag for the duration of the
253 state |= OVS_CS_F_RELATED;
254 if (keep_nat_flags) {
255 state |= key->ct_state & OVS_CS_F_NAT_MASK;
257 if (ct->status & IPS_SRC_NAT)
258 state |= OVS_CS_F_SRC_NAT;
259 if (ct->status & IPS_DST_NAT)
260 state |= OVS_CS_F_DST_NAT;
262 zone = nf_ct_zone(ct);
263 } else if (post_ct) {
264 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
268 __ovs_ct_update_key(key, state, zone, ct);
271 /* This is called to initialize CT key fields possibly coming in from the local
274 void ovs_ct_fill_key(const struct sk_buff *skb,
275 struct sw_flow_key *key,
278 ovs_ct_update_key(skb, NULL, key, post_ct, false);
281 int ovs_ct_put_key(const struct sw_flow_key *swkey,
282 const struct sw_flow_key *output, struct sk_buff *skb)
284 if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
287 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
288 nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
291 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
292 nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
295 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
296 nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
300 if (swkey->ct_orig_proto) {
301 if (swkey->eth.type == htons(ETH_P_IP)) {
302 struct ovs_key_ct_tuple_ipv4 orig;
304 memset(&orig, 0, sizeof(orig));
305 orig.ipv4_src = output->ipv4.ct_orig.src;
306 orig.ipv4_dst = output->ipv4.ct_orig.dst;
307 orig.src_port = output->ct.orig_tp.src;
308 orig.dst_port = output->ct.orig_tp.dst;
309 orig.ipv4_proto = output->ct_orig_proto;
311 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
312 sizeof(orig), &orig))
314 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
315 struct ovs_key_ct_tuple_ipv6 orig;
317 memset(&orig, 0, sizeof(orig));
318 memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
319 sizeof(orig.ipv6_src));
320 memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
321 sizeof(orig.ipv6_dst));
322 orig.src_port = output->ct.orig_tp.src;
323 orig.dst_port = output->ct.orig_tp.dst;
324 orig.ipv6_proto = output->ct_orig_proto;
326 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
327 sizeof(orig), &orig))
335 static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
336 u32 ct_mark, u32 mask)
338 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
341 new_mark = ct_mark | (ct->mark & ~(mask));
342 if (ct->mark != new_mark) {
344 if (nf_ct_is_confirmed(ct))
345 nf_conntrack_event_cache(IPCT_MARK, ct);
346 key->ct.mark = new_mark;
355 static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct)
357 struct nf_conn_labels *cl;
359 cl = nf_ct_labels_find(ct);
361 nf_ct_labels_ext_add(ct);
362 cl = nf_ct_labels_find(ct);
368 /* Initialize labels for a new, yet to be committed conntrack entry. Note that
369 * since the new connection is not yet confirmed, and thus no-one else has
370 * access to it's labels, we simply write them over.
372 static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key,
373 const struct ovs_key_ct_labels *labels,
374 const struct ovs_key_ct_labels *mask)
376 struct nf_conn_labels *cl, *master_cl;
377 bool have_mask = labels_nonzero(mask);
379 /* Inherit master's labels to the related connection? */
380 master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL;
382 if (!master_cl && !have_mask)
383 return 0; /* Nothing to do. */
385 cl = ovs_ct_get_conn_labels(ct);
389 /* Inherit the master's labels, if any. */
394 u32 *dst = (u32 *)cl->bits;
397 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
398 dst[i] = (dst[i] & ~mask->ct_labels_32[i]) |
399 (labels->ct_labels_32[i]
400 & mask->ct_labels_32[i]);
403 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
404 * IPCT_LABEL bit is set in the event cache.
406 nf_conntrack_event_cache(IPCT_LABEL, ct);
408 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
413 static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
414 const struct ovs_key_ct_labels *labels,
415 const struct ovs_key_ct_labels *mask)
417 struct nf_conn_labels *cl;
420 cl = ovs_ct_get_conn_labels(ct);
424 err = nf_connlabels_replace(ct, labels->ct_labels_32,
426 OVS_CT_LABELS_LEN_32);
430 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
435 /* 'skb' should already be pulled to nh_ofs. */
436 static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
438 const struct nf_conntrack_helper *helper;
439 const struct nf_conn_help *help;
440 enum ip_conntrack_info ctinfo;
441 unsigned int protoff;
445 ct = nf_ct_get(skb, &ctinfo);
446 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
449 help = nfct_help(ct);
453 helper = rcu_dereference(help->helper);
459 protoff = ip_hdrlen(skb);
462 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
466 ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
468 if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
469 pr_debug("proto header not found\n");
476 WARN_ONCE(1, "helper invoked on non-IP family!");
480 err = helper->help(skb, protoff, ct, ctinfo);
481 if (err != NF_ACCEPT)
484 /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
485 * FTP with NAT) adusting the TCP payload size when mangling IP
486 * addresses and/or port numbers in the text-based control connection.
488 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
489 !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
494 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
495 * value if 'skb' is freed.
497 static int handle_fragments(struct net *net, struct sw_flow_key *key,
498 u16 zone, struct sk_buff *skb)
500 struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
503 if (key->eth.type == htons(ETH_P_IP)) {
504 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
506 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
507 err = ip_defrag(net, skb, user);
511 ovs_cb.mru = IPCB(skb)->frag_max_size;
512 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
513 } else if (key->eth.type == htons(ETH_P_IPV6)) {
514 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
516 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
517 err = nf_ct_frag6_gather(net, skb, user);
519 if (err != -EINPROGRESS)
524 key->ip.proto = ipv6_hdr(skb)->nexthdr;
525 ovs_cb.mru = IP6CB(skb)->frag_max_size;
529 return -EPFNOSUPPORT;
532 /* The key extracted from the fragment that completed this datagram
533 * likely didn't have an L4 header, so regenerate it.
535 ovs_flow_key_update_l3l4(skb, key);
537 key->ip.frag = OVS_FRAG_TYPE_NONE;
540 *OVS_CB(skb) = ovs_cb;
545 static struct nf_conntrack_expect *
546 ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
547 u16 proto, const struct sk_buff *skb)
549 struct nf_conntrack_tuple tuple;
550 struct nf_conntrack_expect *exp;
552 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
555 exp = __nf_ct_expect_find(net, zone, &tuple);
557 struct nf_conntrack_tuple_hash *h;
559 /* Delete existing conntrack entry, if it clashes with the
560 * expectation. This can happen since conntrack ALGs do not
561 * check for clashes between (new) expectations and existing
562 * conntrack entries. nf_conntrack_in() will check the
563 * expectations only if a conntrack entry can not be found,
564 * which can lead to OVS finding the expectation (here) in the
565 * init direction, but which will not be removed by the
566 * nf_conntrack_in() call, if a matching conntrack entry is
567 * found instead. In this case all init direction packets
568 * would be reported as new related packets, while reply
569 * direction packets would be reported as un-related
570 * established packets.
572 h = nf_conntrack_find_get(net, zone, &tuple);
574 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
576 nf_ct_delete(ct, 0, 0);
577 nf_conntrack_put(&ct->ct_general);
584 /* This replicates logic from nf_conntrack_core.c that is not exported. */
585 static enum ip_conntrack_info
586 ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h)
588 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
590 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
591 return IP_CT_ESTABLISHED_REPLY;
592 /* Once we've had two way comms, always ESTABLISHED. */
593 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
594 return IP_CT_ESTABLISHED;
595 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
596 return IP_CT_RELATED;
600 /* Find an existing connection which this packet belongs to without
601 * re-attributing statistics or modifying the connection state. This allows an
602 * skb->_nfct lost due to an upcall to be recovered during actions execution.
604 * Must be called with rcu_read_lock.
606 * On success, populates skb->_nfct and returns the connection. Returns NULL
607 * if there is no existing entry.
609 static struct nf_conn *
610 ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
611 u8 l3num, struct sk_buff *skb, bool natted)
613 struct nf_conntrack_tuple tuple;
614 struct nf_conntrack_tuple_hash *h;
617 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
619 pr_debug("ovs_ct_find_existing: Can't get tuple\n");
623 /* Must invert the tuple if skb has been transformed by NAT. */
625 struct nf_conntrack_tuple inverse;
627 if (!nf_ct_invert_tuple(&inverse, &tuple)) {
628 pr_debug("ovs_ct_find_existing: Inversion failed!\n");
634 /* look for tuple match */
635 h = nf_conntrack_find_get(net, zone, &tuple);
637 return NULL; /* Not found. */
639 ct = nf_ct_tuplehash_to_ctrack(h);
641 /* Inverted packet tuple matches the reverse direction conntrack tuple,
642 * select the other tuplehash to get the right 'ctinfo' bits for this
646 h = &ct->tuplehash[!h->tuple.dst.dir];
648 nf_ct_set(skb, ct, ovs_ct_get_info(h));
653 struct nf_conn *ovs_ct_executed(struct net *net,
654 const struct sw_flow_key *key,
655 const struct ovs_conntrack_info *info,
659 struct nf_conn *ct = NULL;
661 /* If no ct, check if we have evidence that an existing conntrack entry
662 * might be found for this skb. This happens when we lose a skb->_nfct
663 * due to an upcall, or if the direction is being forced. If the
664 * connection was not confirmed, it is not cached and needs to be run
665 * through conntrack again.
667 *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
668 !(key->ct_state & OVS_CS_F_INVALID) &&
669 (key->ct_zone == info->zone.id);
671 if (*ct_executed || (!key->ct_state && info->force)) {
672 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
680 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
681 static bool skb_nfct_cached(struct net *net,
682 const struct sw_flow_key *key,
683 const struct ovs_conntrack_info *info,
686 enum ip_conntrack_info ctinfo;
688 bool ct_executed = true;
690 ct = nf_ct_get(skb, &ctinfo);
692 ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
695 nf_ct_get(skb, &ctinfo);
699 if (!net_eq(net, read_pnet(&ct->ct_net)))
701 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
704 struct nf_conn_help *help;
706 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
707 if (help && rcu_access_pointer(help->helper) != info->helper)
710 if (info->nf_ct_timeout) {
711 struct nf_conn_timeout *timeout_ext;
713 timeout_ext = nf_ct_timeout_find(ct);
714 if (!timeout_ext || info->nf_ct_timeout !=
715 rcu_dereference(timeout_ext->timeout))
718 /* Force conntrack entry direction to the current packet? */
719 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
720 /* Delete the conntrack entry if confirmed, else just release
723 if (nf_ct_is_confirmed(ct))
724 nf_ct_delete(ct, 0, 0);
726 nf_conntrack_put(&ct->ct_general);
727 nf_ct_set(skb, NULL, 0);
734 #if IS_ENABLED(CONFIG_NF_NAT)
735 /* Modelled after nf_nat_ipv[46]_fn().
736 * range is only used for new, uninitialized NAT state.
737 * Returns either NF_ACCEPT or NF_DROP.
739 static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
740 enum ip_conntrack_info ctinfo,
741 const struct nf_nat_range2 *range,
742 enum nf_nat_manip_type maniptype)
744 int hooknum, nh_off, err = NF_ACCEPT;
746 nh_off = skb_network_offset(skb);
747 skb_pull_rcsum(skb, nh_off);
749 /* See HOOK2MANIP(). */
750 if (maniptype == NF_NAT_MANIP_SRC)
751 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
753 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
757 case IP_CT_RELATED_REPLY:
758 if (IS_ENABLED(CONFIG_NF_NAT) &&
759 skb->protocol == htons(ETH_P_IP) &&
760 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
761 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
765 } else if (IS_ENABLED(CONFIG_IPV6) &&
766 skb->protocol == htons(ETH_P_IPV6)) {
768 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
769 int hdrlen = ipv6_skip_exthdr(skb,
770 sizeof(struct ipv6hdr),
771 &nexthdr, &frag_off);
773 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
774 if (!nf_nat_icmpv6_reply_translation(skb, ct,
782 /* Non-ICMP, fall thru to initialize if needed. */
785 /* Seen it before? This can happen for loopback, retrans,
788 if (!nf_nat_initialized(ct, maniptype)) {
789 /* Initialize according to the NAT action. */
790 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
791 /* Action is set up to establish a new
794 ? nf_nat_setup_info(ct, range, maniptype)
795 : nf_nat_alloc_null_binding(ct, hooknum);
796 if (err != NF_ACCEPT)
801 case IP_CT_ESTABLISHED:
802 case IP_CT_ESTABLISHED_REPLY:
810 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
812 skb_push_rcsum(skb, nh_off);
817 static void ovs_nat_update_key(struct sw_flow_key *key,
818 const struct sk_buff *skb,
819 enum nf_nat_manip_type maniptype)
821 if (maniptype == NF_NAT_MANIP_SRC) {
824 key->ct_state |= OVS_CS_F_SRC_NAT;
825 if (key->eth.type == htons(ETH_P_IP))
826 key->ipv4.addr.src = ip_hdr(skb)->saddr;
827 else if (key->eth.type == htons(ETH_P_IPV6))
828 memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
829 sizeof(key->ipv6.addr.src));
833 if (key->ip.proto == IPPROTO_UDP)
834 src = udp_hdr(skb)->source;
835 else if (key->ip.proto == IPPROTO_TCP)
836 src = tcp_hdr(skb)->source;
837 else if (key->ip.proto == IPPROTO_SCTP)
838 src = sctp_hdr(skb)->source;
846 key->ct_state |= OVS_CS_F_DST_NAT;
847 if (key->eth.type == htons(ETH_P_IP))
848 key->ipv4.addr.dst = ip_hdr(skb)->daddr;
849 else if (key->eth.type == htons(ETH_P_IPV6))
850 memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
851 sizeof(key->ipv6.addr.dst));
855 if (key->ip.proto == IPPROTO_UDP)
856 dst = udp_hdr(skb)->dest;
857 else if (key->ip.proto == IPPROTO_TCP)
858 dst = tcp_hdr(skb)->dest;
859 else if (key->ip.proto == IPPROTO_SCTP)
860 dst = sctp_hdr(skb)->dest;
868 /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
869 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
870 const struct ovs_conntrack_info *info,
871 struct sk_buff *skb, struct nf_conn *ct,
872 enum ip_conntrack_info ctinfo)
874 enum nf_nat_manip_type maniptype;
877 /* Add NAT extension if not confirmed yet. */
878 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
879 return NF_ACCEPT; /* Can't NAT. */
881 /* Determine NAT type.
882 * Check if the NAT type can be deduced from the tracked connection.
883 * Make sure new expected connections (IP_CT_RELATED) are NATted only
886 if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
887 ct->status & IPS_NAT_MASK &&
888 (ctinfo != IP_CT_RELATED || info->commit)) {
889 /* NAT an established or related connection like before. */
890 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
891 /* This is the REPLY direction for a connection
892 * for which NAT was applied in the forward
893 * direction. Do the reverse NAT.
895 maniptype = ct->status & IPS_SRC_NAT
896 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
898 maniptype = ct->status & IPS_SRC_NAT
899 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
900 } else if (info->nat & OVS_CT_SRC_NAT) {
901 maniptype = NF_NAT_MANIP_SRC;
902 } else if (info->nat & OVS_CT_DST_NAT) {
903 maniptype = NF_NAT_MANIP_DST;
905 return NF_ACCEPT; /* Connection is not NATed. */
907 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
909 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
910 if (ct->status & IPS_SRC_NAT) {
911 if (maniptype == NF_NAT_MANIP_SRC)
912 maniptype = NF_NAT_MANIP_DST;
914 maniptype = NF_NAT_MANIP_SRC;
916 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
918 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
919 err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL,
924 /* Mark NAT done if successful and update the flow key. */
925 if (err == NF_ACCEPT)
926 ovs_nat_update_key(key, skb, maniptype);
930 #else /* !CONFIG_NF_NAT */
931 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
932 const struct ovs_conntrack_info *info,
933 struct sk_buff *skb, struct nf_conn *ct,
934 enum ip_conntrack_info ctinfo)
940 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
941 * not done already. Update key with new CT state after passing the packet
943 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
944 * set to NULL and 0 will be returned.
946 static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
947 const struct ovs_conntrack_info *info,
950 /* If we are recirculating packets to match on conntrack fields and
951 * committing with a separate conntrack action, then we don't need to
952 * actually run the packet through conntrack twice unless it's for a
955 bool cached = skb_nfct_cached(net, key, info, skb);
956 enum ip_conntrack_info ctinfo;
960 struct nf_hook_state state = {
961 .hook = NF_INET_PRE_ROUTING,
965 struct nf_conn *tmpl = info->ct;
968 /* Associate skb with specified zone. */
970 nf_conntrack_put(skb_nfct(skb));
971 nf_conntrack_get(&tmpl->ct_general);
972 nf_ct_set(skb, tmpl, IP_CT_NEW);
975 err = nf_conntrack_in(skb, &state);
976 if (err != NF_ACCEPT)
979 /* Clear CT state NAT flags to mark that we have not yet done
980 * NAT after the nf_conntrack_in() call. We can actually clear
981 * the whole state, as it will be re-initialized below.
985 /* Update the key, but keep the NAT flags. */
986 ovs_ct_update_key(skb, info, key, true, true);
989 ct = nf_ct_get(skb, &ctinfo);
991 bool add_helper = false;
993 /* Packets starting a new connection must be NATted before the
994 * helper, so that the helper knows about the NAT. We enforce
995 * this by delaying both NAT and helper calls for unconfirmed
996 * connections until the committing CT action. For later
997 * packets NAT and Helper may be called in either order.
999 * NAT will be done only if the CT action has NAT, and only
1000 * once per packet (per zone), as guarded by the NAT bits in
1001 * the key->ct_state.
1003 if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
1004 (nf_ct_is_confirmed(ct) || info->commit) &&
1005 ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
1009 /* Userspace may decide to perform a ct lookup without a helper
1010 * specified followed by a (recirculate and) commit with one,
1011 * or attach a helper in a later commit. Therefore, for
1012 * connections which we will commit, we may need to attach
1015 if (info->commit && info->helper && !nfct_help(ct)) {
1016 int err = __nf_ct_try_assign_helper(ct, info->ct,
1022 /* helper installed, add seqadj if NAT is required */
1023 if (info->nat && !nfct_seqadj(ct)) {
1024 if (!nfct_seqadj_ext_add(ct))
1029 /* Call the helper only if:
1030 * - nf_conntrack_in() was executed above ("!cached") or a
1031 * helper was just attached ("add_helper") for a confirmed
1033 * - When committing an unconfirmed connection.
1035 if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
1037 ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
1041 if (nf_ct_protonum(ct) == IPPROTO_TCP &&
1042 nf_ct_is_confirmed(ct) && nf_conntrack_tcp_established(ct)) {
1043 /* Be liberal for tcp packets so that out-of-window
1044 * packets are not marked invalid.
1046 nf_ct_set_tcp_be_liberal(ct);
1053 /* Lookup connection and read fields into key. */
1054 static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
1055 const struct ovs_conntrack_info *info,
1056 struct sk_buff *skb)
1058 struct nf_conntrack_expect *exp;
1060 /* If we pass an expected packet through nf_conntrack_in() the
1061 * expectation is typically removed, but the packet could still be
1062 * lost in upcall processing. To prevent this from happening we
1063 * perform an explicit expectation lookup. Expected connections are
1064 * always new, and will be passed through conntrack only when they are
1065 * committed, as it is OK to remove the expectation at that time.
1067 exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
1071 /* NOTE: New connections are NATted and Helped only when
1072 * committed, so we are not calling into NAT here.
1074 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
1075 __ovs_ct_update_key(key, state, &info->zone, exp->master);
1080 err = __ovs_ct_lookup(net, key, info, skb);
1084 ct = (struct nf_conn *)skb_nfct(skb);
1086 nf_ct_deliver_cached_events(ct);
1092 static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
1096 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
1097 if (labels->ct_labels_32[i])
1103 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1104 static struct hlist_head *ct_limit_hash_bucket(
1105 const struct ovs_ct_limit_info *info, u16 zone)
1107 return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
1110 /* Call with ovs_mutex */
1111 static void ct_limit_set(const struct ovs_ct_limit_info *info,
1112 struct ovs_ct_limit *new_ct_limit)
1114 struct ovs_ct_limit *ct_limit;
1115 struct hlist_head *head;
1117 head = ct_limit_hash_bucket(info, new_ct_limit->zone);
1118 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1119 if (ct_limit->zone == new_ct_limit->zone) {
1120 hlist_replace_rcu(&ct_limit->hlist_node,
1121 &new_ct_limit->hlist_node);
1122 kfree_rcu(ct_limit, rcu);
1127 hlist_add_head_rcu(&new_ct_limit->hlist_node, head);
1130 /* Call with ovs_mutex */
1131 static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
1133 struct ovs_ct_limit *ct_limit;
1134 struct hlist_head *head;
1135 struct hlist_node *n;
1137 head = ct_limit_hash_bucket(info, zone);
1138 hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) {
1139 if (ct_limit->zone == zone) {
1140 hlist_del_rcu(&ct_limit->hlist_node);
1141 kfree_rcu(ct_limit, rcu);
1147 /* Call with RCU read lock */
1148 static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
1150 struct ovs_ct_limit *ct_limit;
1151 struct hlist_head *head;
1153 head = ct_limit_hash_bucket(info, zone);
1154 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1155 if (ct_limit->zone == zone)
1156 return ct_limit->limit;
1159 return info->default_limit;
1162 static int ovs_ct_check_limit(struct net *net,
1163 const struct ovs_conntrack_info *info,
1164 const struct nf_conntrack_tuple *tuple)
1166 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1167 const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
1168 u32 per_zone_limit, connections;
1171 conncount_key = info->zone.id;
1173 per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
1174 if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED)
1177 connections = nf_conncount_count(net, ct_limit_info->data,
1178 &conncount_key, tuple, &info->zone);
1179 if (connections > per_zone_limit)
1186 /* Lookup connection and confirm if unconfirmed. */
1187 static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1188 const struct ovs_conntrack_info *info,
1189 struct sk_buff *skb)
1191 enum ip_conntrack_info ctinfo;
1195 err = __ovs_ct_lookup(net, key, info, skb);
1199 /* The connection could be invalid, in which case this is a no-op.*/
1200 ct = nf_ct_get(skb, &ctinfo);
1204 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1205 if (static_branch_unlikely(&ovs_ct_limit_enabled)) {
1206 if (!nf_ct_is_confirmed(ct)) {
1207 err = ovs_ct_check_limit(net, info,
1208 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1210 net_warn_ratelimited("openvswitch: zone: %u "
1211 "exceeds conntrack limit\n",
1219 /* Set the conntrack event mask if given. NEW and DELETE events have
1220 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
1221 * typically would receive many kinds of updates. Setting the event
1222 * mask allows those events to be filtered. The set event mask will
1223 * remain in effect for the lifetime of the connection unless changed
1224 * by a further CT action with both the commit flag and the eventmask
1226 if (info->have_eventmask) {
1227 struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct);
1230 cache->ctmask = info->eventmask;
1233 /* Apply changes before confirming the connection so that the initial
1234 * conntrack NEW netlink event carries the values given in the CT
1237 if (info->mark.mask) {
1238 err = ovs_ct_set_mark(ct, key, info->mark.value,
1243 if (!nf_ct_is_confirmed(ct)) {
1244 err = ovs_ct_init_labels(ct, key, &info->labels.value,
1245 &info->labels.mask);
1248 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1249 labels_nonzero(&info->labels.mask)) {
1250 err = ovs_ct_set_labels(ct, key, &info->labels.value,
1251 &info->labels.mask);
1255 /* This will take care of sending queued events even if the connection
1256 * is already confirmed.
1258 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1264 /* Trim the skb to the length specified by the IP/IPv6 header,
1265 * removing any trailing lower-layer padding. This prepares the skb
1266 * for higher-layer processing that assumes skb->len excludes padding
1267 * (such as nf_ip_checksum). The caller needs to pull the skb to the
1268 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
1270 static int ovs_skb_network_trim(struct sk_buff *skb)
1275 switch (skb->protocol) {
1276 case htons(ETH_P_IP):
1277 len = ntohs(ip_hdr(skb)->tot_len);
1279 case htons(ETH_P_IPV6):
1280 len = sizeof(struct ipv6hdr)
1281 + ntohs(ipv6_hdr(skb)->payload_len);
1287 err = pskb_trim_rcsum(skb, len);
1294 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
1295 * value if 'skb' is freed.
1297 int ovs_ct_execute(struct net *net, struct sk_buff *skb,
1298 struct sw_flow_key *key,
1299 const struct ovs_conntrack_info *info)
1304 /* The conntrack module expects to be working at L3. */
1305 nh_ofs = skb_network_offset(skb);
1306 skb_pull_rcsum(skb, nh_ofs);
1308 err = ovs_skb_network_trim(skb);
1312 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
1313 err = handle_fragments(net, key, info->zone.id, skb);
1319 err = ovs_ct_commit(net, key, info, skb);
1321 err = ovs_ct_lookup(net, key, info, skb);
1323 skb_push_rcsum(skb, nh_ofs);
1329 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
1331 nf_conntrack_put(skb_nfct(skb));
1332 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
1333 ovs_ct_fill_key(skb, key, false);
1338 static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1339 const struct sw_flow_key *key, bool log)
1341 struct nf_conntrack_helper *helper;
1342 struct nf_conn_help *help;
1345 helper = nf_conntrack_helper_try_module_get(name, info->family,
1348 OVS_NLERR(log, "Unknown helper \"%s\"", name);
1352 help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL);
1354 nf_conntrack_helper_put(helper);
1358 #if IS_ENABLED(CONFIG_NF_NAT)
1360 ret = nf_nat_helper_try_module_get(name, info->family,
1363 nf_conntrack_helper_put(helper);
1364 OVS_NLERR(log, "Failed to load \"%s\" NAT helper, error: %d",
1370 rcu_assign_pointer(help->helper, helper);
1371 info->helper = helper;
1375 #if IS_ENABLED(CONFIG_NF_NAT)
1376 static int parse_nat(const struct nlattr *attr,
1377 struct ovs_conntrack_info *info, bool log)
1381 bool have_ip_max = false;
1382 bool have_proto_max = false;
1383 bool ip_vers = (info->family == NFPROTO_IPV6);
1385 nla_for_each_nested(a, attr, rem) {
1386 static const int ovs_nat_attr_lens[OVS_NAT_ATTR_MAX + 1][2] = {
1387 [OVS_NAT_ATTR_SRC] = {0, 0},
1388 [OVS_NAT_ATTR_DST] = {0, 0},
1389 [OVS_NAT_ATTR_IP_MIN] = {sizeof(struct in_addr),
1390 sizeof(struct in6_addr)},
1391 [OVS_NAT_ATTR_IP_MAX] = {sizeof(struct in_addr),
1392 sizeof(struct in6_addr)},
1393 [OVS_NAT_ATTR_PROTO_MIN] = {sizeof(u16), sizeof(u16)},
1394 [OVS_NAT_ATTR_PROTO_MAX] = {sizeof(u16), sizeof(u16)},
1395 [OVS_NAT_ATTR_PERSISTENT] = {0, 0},
1396 [OVS_NAT_ATTR_PROTO_HASH] = {0, 0},
1397 [OVS_NAT_ATTR_PROTO_RANDOM] = {0, 0},
1399 int type = nla_type(a);
1401 if (type > OVS_NAT_ATTR_MAX) {
1402 OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)",
1403 type, OVS_NAT_ATTR_MAX);
1407 if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) {
1408 OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)",
1410 ovs_nat_attr_lens[type][ip_vers]);
1415 case OVS_NAT_ATTR_SRC:
1416 case OVS_NAT_ATTR_DST:
1418 OVS_NLERR(log, "Only one type of NAT may be specified");
1421 info->nat |= OVS_CT_NAT;
1422 info->nat |= ((type == OVS_NAT_ATTR_SRC)
1423 ? OVS_CT_SRC_NAT : OVS_CT_DST_NAT);
1426 case OVS_NAT_ATTR_IP_MIN:
1427 nla_memcpy(&info->range.min_addr, a,
1428 sizeof(info->range.min_addr));
1429 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1432 case OVS_NAT_ATTR_IP_MAX:
1434 nla_memcpy(&info->range.max_addr, a,
1435 sizeof(info->range.max_addr));
1436 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1439 case OVS_NAT_ATTR_PROTO_MIN:
1440 info->range.min_proto.all = htons(nla_get_u16(a));
1441 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1444 case OVS_NAT_ATTR_PROTO_MAX:
1445 have_proto_max = true;
1446 info->range.max_proto.all = htons(nla_get_u16(a));
1447 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1450 case OVS_NAT_ATTR_PERSISTENT:
1451 info->range.flags |= NF_NAT_RANGE_PERSISTENT;
1454 case OVS_NAT_ATTR_PROTO_HASH:
1455 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM;
1458 case OVS_NAT_ATTR_PROTO_RANDOM:
1459 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
1463 OVS_NLERR(log, "Unknown nat attribute (%d)", type);
1469 OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem);
1473 /* Do not allow flags if no type is given. */
1474 if (info->range.flags) {
1476 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
1480 info->nat = OVS_CT_NAT; /* NAT existing connections. */
1481 } else if (!info->commit) {
1483 "NAT attributes may be specified only when CT COMMIT flag is also specified."
1487 /* Allow missing IP_MAX. */
1488 if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) {
1489 memcpy(&info->range.max_addr, &info->range.min_addr,
1490 sizeof(info->range.max_addr));
1492 /* Allow missing PROTO_MAX. */
1493 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1495 info->range.max_proto.all = info->range.min_proto.all;
1501 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
1502 [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
1503 [OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 },
1504 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
1505 .maxlen = sizeof(u16) },
1506 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
1507 .maxlen = sizeof(struct md_mark) },
1508 [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
1509 .maxlen = sizeof(struct md_labels) },
1510 [OVS_CT_ATTR_HELPER] = { .minlen = 1,
1511 .maxlen = NF_CT_HELPER_NAME_LEN },
1512 #if IS_ENABLED(CONFIG_NF_NAT)
1513 /* NAT length is checked when parsing the nested attributes. */
1514 [OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX },
1516 [OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32),
1517 .maxlen = sizeof(u32) },
1518 [OVS_CT_ATTR_TIMEOUT] = { .minlen = 1,
1519 .maxlen = CTNL_TIMEOUT_NAME_MAX },
1522 static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1523 const char **helper, bool log)
1528 nla_for_each_nested(a, attr, rem) {
1529 int type = nla_type(a);
1533 if (type > OVS_CT_ATTR_MAX) {
1535 "Unknown conntrack attr (type=%d, max=%d)",
1536 type, OVS_CT_ATTR_MAX);
1540 maxlen = ovs_ct_attr_lens[type].maxlen;
1541 minlen = ovs_ct_attr_lens[type].minlen;
1542 if (nla_len(a) < minlen || nla_len(a) > maxlen) {
1544 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
1545 type, nla_len(a), maxlen);
1550 case OVS_CT_ATTR_FORCE_COMMIT:
1553 case OVS_CT_ATTR_COMMIT:
1554 info->commit = true;
1556 #ifdef CONFIG_NF_CONNTRACK_ZONES
1557 case OVS_CT_ATTR_ZONE:
1558 info->zone.id = nla_get_u16(a);
1561 #ifdef CONFIG_NF_CONNTRACK_MARK
1562 case OVS_CT_ATTR_MARK: {
1563 struct md_mark *mark = nla_data(a);
1566 OVS_NLERR(log, "ct_mark mask cannot be 0");
1573 #ifdef CONFIG_NF_CONNTRACK_LABELS
1574 case OVS_CT_ATTR_LABELS: {
1575 struct md_labels *labels = nla_data(a);
1577 if (!labels_nonzero(&labels->mask)) {
1578 OVS_NLERR(log, "ct_labels mask cannot be 0");
1581 info->labels = *labels;
1585 case OVS_CT_ATTR_HELPER:
1586 *helper = nla_data(a);
1587 if (!memchr(*helper, '\0', nla_len(a))) {
1588 OVS_NLERR(log, "Invalid conntrack helper");
1592 #if IS_ENABLED(CONFIG_NF_NAT)
1593 case OVS_CT_ATTR_NAT: {
1594 int err = parse_nat(a, info, log);
1601 case OVS_CT_ATTR_EVENTMASK:
1602 info->have_eventmask = true;
1603 info->eventmask = nla_get_u32(a);
1605 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1606 case OVS_CT_ATTR_TIMEOUT:
1607 memcpy(info->timeout, nla_data(a), nla_len(a));
1608 if (!memchr(info->timeout, '\0', nla_len(a))) {
1609 OVS_NLERR(log, "Invalid conntrack timeout");
1616 OVS_NLERR(log, "Unknown conntrack attr (%d)",
1622 #ifdef CONFIG_NF_CONNTRACK_MARK
1623 if (!info->commit && info->mark.mask) {
1625 "Setting conntrack mark requires 'commit' flag.");
1629 #ifdef CONFIG_NF_CONNTRACK_LABELS
1630 if (!info->commit && labels_nonzero(&info->labels.mask)) {
1632 "Setting conntrack labels requires 'commit' flag.");
1637 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
1644 bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
1646 if (attr == OVS_KEY_ATTR_CT_STATE)
1648 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1649 attr == OVS_KEY_ATTR_CT_ZONE)
1651 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1652 attr == OVS_KEY_ATTR_CT_MARK)
1654 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1655 attr == OVS_KEY_ATTR_CT_LABELS) {
1656 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1658 return ovs_net->xt_label;
1664 int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1665 const struct sw_flow_key *key,
1666 struct sw_flow_actions **sfa, bool log)
1668 struct ovs_conntrack_info ct_info;
1669 const char *helper = NULL;
1673 family = key_to_nfproto(key);
1674 if (family == NFPROTO_UNSPEC) {
1675 OVS_NLERR(log, "ct family unspecified");
1679 memset(&ct_info, 0, sizeof(ct_info));
1680 ct_info.family = family;
1682 nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
1683 NF_CT_DEFAULT_ZONE_DIR, 0);
1685 err = parse_ct(attr, &ct_info, &helper, log);
1689 /* Set up template for tracking connections in specific zones. */
1690 ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
1692 OVS_NLERR(log, "Failed to allocate conntrack template");
1696 if (ct_info.timeout[0]) {
1697 if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
1699 pr_info_ratelimited("Failed to associated timeout "
1700 "policy `%s'\n", ct_info.timeout);
1702 ct_info.nf_ct_timeout = rcu_dereference(
1703 nf_ct_timeout_find(ct_info.ct)->timeout);
1708 err = ovs_ct_add_helper(&ct_info, helper, key, log);
1713 err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
1714 sizeof(ct_info), log);
1718 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1719 nf_conntrack_get(&ct_info.ct->ct_general);
1722 __ovs_ct_free_action(&ct_info);
1726 #if IS_ENABLED(CONFIG_NF_NAT)
1727 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1728 struct sk_buff *skb)
1730 struct nlattr *start;
1732 start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT);
1736 if (info->nat & OVS_CT_SRC_NAT) {
1737 if (nla_put_flag(skb, OVS_NAT_ATTR_SRC))
1739 } else if (info->nat & OVS_CT_DST_NAT) {
1740 if (nla_put_flag(skb, OVS_NAT_ATTR_DST))
1746 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
1747 if (IS_ENABLED(CONFIG_NF_NAT) &&
1748 info->family == NFPROTO_IPV4) {
1749 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
1750 info->range.min_addr.ip) ||
1751 (info->range.max_addr.ip
1752 != info->range.min_addr.ip &&
1753 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
1754 info->range.max_addr.ip))))
1756 } else if (IS_ENABLED(CONFIG_IPV6) &&
1757 info->family == NFPROTO_IPV6) {
1758 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
1759 &info->range.min_addr.in6) ||
1760 (memcmp(&info->range.max_addr.in6,
1761 &info->range.min_addr.in6,
1762 sizeof(info->range.max_addr.in6)) &&
1763 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
1764 &info->range.max_addr.in6))))
1770 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1771 (nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN,
1772 ntohs(info->range.min_proto.all)) ||
1773 (info->range.max_proto.all != info->range.min_proto.all &&
1774 nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX,
1775 ntohs(info->range.max_proto.all)))))
1778 if (info->range.flags & NF_NAT_RANGE_PERSISTENT &&
1779 nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT))
1781 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM &&
1782 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH))
1784 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY &&
1785 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM))
1788 nla_nest_end(skb, start);
1794 int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
1795 struct sk_buff *skb)
1797 struct nlattr *start;
1799 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT);
1803 if (ct_info->commit && nla_put_flag(skb, ct_info->force
1804 ? OVS_CT_ATTR_FORCE_COMMIT
1805 : OVS_CT_ATTR_COMMIT))
1807 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1808 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
1810 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
1811 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
1814 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1815 labels_nonzero(&ct_info->labels.mask) &&
1816 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
1819 if (ct_info->helper) {
1820 if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
1821 ct_info->helper->name))
1824 if (ct_info->have_eventmask &&
1825 nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask))
1827 if (ct_info->timeout[0]) {
1828 if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout))
1832 #if IS_ENABLED(CONFIG_NF_NAT)
1833 if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
1836 nla_nest_end(skb, start);
1841 void ovs_ct_free_action(const struct nlattr *a)
1843 struct ovs_conntrack_info *ct_info = nla_data(a);
1845 __ovs_ct_free_action(ct_info);
1848 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1850 if (ct_info->helper) {
1851 #if IS_ENABLED(CONFIG_NF_NAT)
1853 nf_nat_helper_put(ct_info->helper);
1855 nf_conntrack_helper_put(ct_info->helper);
1858 if (ct_info->timeout[0])
1859 nf_ct_destroy_timeout(ct_info->ct);
1860 nf_ct_tmpl_free(ct_info->ct);
1864 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1865 static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
1869 ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info),
1871 if (!ovs_net->ct_limit_info)
1874 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
1875 ovs_net->ct_limit_info->limits =
1876 kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head),
1878 if (!ovs_net->ct_limit_info->limits) {
1879 kfree(ovs_net->ct_limit_info);
1883 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
1884 INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
1886 ovs_net->ct_limit_info->data =
1887 nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
1889 if (IS_ERR(ovs_net->ct_limit_info->data)) {
1890 err = PTR_ERR(ovs_net->ct_limit_info->data);
1891 kfree(ovs_net->ct_limit_info->limits);
1892 kfree(ovs_net->ct_limit_info);
1893 pr_err("openvswitch: failed to init nf_conncount %d\n", err);
1899 static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
1901 const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
1904 nf_conncount_destroy(net, NFPROTO_INET, info->data);
1905 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
1906 struct hlist_head *head = &info->limits[i];
1907 struct ovs_ct_limit *ct_limit;
1909 hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
1910 lockdep_ovsl_is_held())
1911 kfree_rcu(ct_limit, rcu);
1913 kfree(info->limits);
1917 static struct sk_buff *
1918 ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
1919 struct ovs_header **ovs_reply_header)
1921 struct ovs_header *ovs_header = info->userhdr;
1922 struct sk_buff *skb;
1924 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1926 return ERR_PTR(-ENOMEM);
1928 *ovs_reply_header = genlmsg_put(skb, info->snd_portid,
1930 &dp_ct_limit_genl_family, 0, cmd);
1932 if (!*ovs_reply_header) {
1934 return ERR_PTR(-EMSGSIZE);
1936 (*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
1941 static bool check_zone_id(int zone_id, u16 *pzone)
1943 if (zone_id >= 0 && zone_id <= 65535) {
1944 *pzone = (u16)zone_id;
1950 static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit,
1951 struct ovs_ct_limit_info *info)
1953 struct ovs_zone_limit *zone_limit;
1957 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1958 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1960 while (rem >= sizeof(*zone_limit)) {
1961 if (unlikely(zone_limit->zone_id ==
1962 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1964 info->default_limit = zone_limit->limit;
1966 } else if (unlikely(!check_zone_id(
1967 zone_limit->zone_id, &zone))) {
1968 OVS_NLERR(true, "zone id is out of range");
1970 struct ovs_ct_limit *ct_limit;
1972 ct_limit = kmalloc(sizeof(*ct_limit), GFP_KERNEL);
1976 ct_limit->zone = zone;
1977 ct_limit->limit = zone_limit->limit;
1980 ct_limit_set(info, ct_limit);
1983 rem -= NLA_ALIGN(sizeof(*zone_limit));
1984 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1985 NLA_ALIGN(sizeof(*zone_limit)));
1989 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem);
1994 static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
1995 struct ovs_ct_limit_info *info)
1997 struct ovs_zone_limit *zone_limit;
2001 rem = NLA_ALIGN(nla_len(nla_zone_limit));
2002 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
2004 while (rem >= sizeof(*zone_limit)) {
2005 if (unlikely(zone_limit->zone_id ==
2006 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
2008 info->default_limit = OVS_CT_LIMIT_DEFAULT;
2010 } else if (unlikely(!check_zone_id(
2011 zone_limit->zone_id, &zone))) {
2012 OVS_NLERR(true, "zone id is out of range");
2015 ct_limit_del(info, zone);
2018 rem -= NLA_ALIGN(sizeof(*zone_limit));
2019 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2020 NLA_ALIGN(sizeof(*zone_limit)));
2024 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem);
2029 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
2030 struct sk_buff *reply)
2032 struct ovs_zone_limit zone_limit = {
2033 .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
2034 .limit = info->default_limit,
2037 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2040 static int __ovs_ct_limit_get_zone_limit(struct net *net,
2041 struct nf_conncount_data *data,
2042 u16 zone_id, u32 limit,
2043 struct sk_buff *reply)
2045 struct nf_conntrack_zone ct_zone;
2046 struct ovs_zone_limit zone_limit;
2047 u32 conncount_key = zone_id;
2049 zone_limit.zone_id = zone_id;
2050 zone_limit.limit = limit;
2051 nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0);
2053 zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL,
2055 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2058 static int ovs_ct_limit_get_zone_limit(struct net *net,
2059 struct nlattr *nla_zone_limit,
2060 struct ovs_ct_limit_info *info,
2061 struct sk_buff *reply)
2063 struct ovs_zone_limit *zone_limit;
2068 rem = NLA_ALIGN(nla_len(nla_zone_limit));
2069 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
2071 while (rem >= sizeof(*zone_limit)) {
2072 if (unlikely(zone_limit->zone_id ==
2073 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
2074 err = ovs_ct_limit_get_default_limit(info, reply);
2077 } else if (unlikely(!check_zone_id(zone_limit->zone_id,
2079 OVS_NLERR(true, "zone id is out of range");
2082 limit = ct_limit_get(info, zone);
2085 err = __ovs_ct_limit_get_zone_limit(
2086 net, info->data, zone, limit, reply);
2090 rem -= NLA_ALIGN(sizeof(*zone_limit));
2091 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2092 NLA_ALIGN(sizeof(*zone_limit)));
2096 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem);
2101 static int ovs_ct_limit_get_all_zone_limit(struct net *net,
2102 struct ovs_ct_limit_info *info,
2103 struct sk_buff *reply)
2105 struct ovs_ct_limit *ct_limit;
2106 struct hlist_head *head;
2109 err = ovs_ct_limit_get_default_limit(info, reply);
2114 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
2115 head = &info->limits[i];
2116 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
2117 err = __ovs_ct_limit_get_zone_limit(net, info->data,
2118 ct_limit->zone, ct_limit->limit, reply);
2129 static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
2131 struct nlattr **a = info->attrs;
2132 struct sk_buff *reply;
2133 struct ovs_header *ovs_reply_header;
2134 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2135 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2138 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET,
2141 return PTR_ERR(reply);
2143 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2148 err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2153 static_branch_enable(&ovs_ct_limit_enabled);
2155 genlmsg_end(reply, ovs_reply_header);
2156 return genlmsg_reply(reply, info);
2163 static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
2165 struct nlattr **a = info->attrs;
2166 struct sk_buff *reply;
2167 struct ovs_header *ovs_reply_header;
2168 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2169 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2172 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL,
2175 return PTR_ERR(reply);
2177 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2182 err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2187 genlmsg_end(reply, ovs_reply_header);
2188 return genlmsg_reply(reply, info);
2195 static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
2197 struct nlattr **a = info->attrs;
2198 struct nlattr *nla_reply;
2199 struct sk_buff *reply;
2200 struct ovs_header *ovs_reply_header;
2201 struct net *net = sock_net(skb->sk);
2202 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2203 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2206 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET,
2209 return PTR_ERR(reply);
2211 nla_reply = nla_nest_start_noflag(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2217 if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2218 err = ovs_ct_limit_get_zone_limit(
2219 net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info,
2224 err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info,
2230 nla_nest_end(reply, nla_reply);
2231 genlmsg_end(reply, ovs_reply_header);
2232 return genlmsg_reply(reply, info);
2239 static const struct genl_small_ops ct_limit_genl_ops[] = {
2240 { .cmd = OVS_CT_LIMIT_CMD_SET,
2241 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2242 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2244 .doit = ovs_ct_limit_cmd_set,
2246 { .cmd = OVS_CT_LIMIT_CMD_DEL,
2247 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2248 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2250 .doit = ovs_ct_limit_cmd_del,
2252 { .cmd = OVS_CT_LIMIT_CMD_GET,
2253 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2254 .flags = 0, /* OK for unprivileged users. */
2255 .doit = ovs_ct_limit_cmd_get,
2259 static const struct genl_multicast_group ovs_ct_limit_multicast_group = {
2260 .name = OVS_CT_LIMIT_MCGROUP,
2263 struct genl_family dp_ct_limit_genl_family __ro_after_init = {
2264 .hdrsize = sizeof(struct ovs_header),
2265 .name = OVS_CT_LIMIT_FAMILY,
2266 .version = OVS_CT_LIMIT_VERSION,
2267 .maxattr = OVS_CT_LIMIT_ATTR_MAX,
2268 .policy = ct_limit_policy,
2270 .parallel_ops = true,
2271 .small_ops = ct_limit_genl_ops,
2272 .n_small_ops = ARRAY_SIZE(ct_limit_genl_ops),
2273 .mcgrps = &ovs_ct_limit_multicast_group,
2275 .module = THIS_MODULE,
2279 int ovs_ct_init(struct net *net)
2281 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
2282 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2284 if (nf_connlabels_get(net, n_bits - 1)) {
2285 ovs_net->xt_label = false;
2286 OVS_NLERR(true, "Failed to set connlabel length");
2288 ovs_net->xt_label = true;
2291 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2292 return ovs_ct_limit_init(net, ovs_net);
2298 void ovs_ct_exit(struct net *net)
2300 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2302 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2303 ovs_ct_limit_exit(net, ovs_net);
2306 if (ovs_net->xt_label)
2307 nf_connlabels_put(net);