1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * net/sched/act_ct.c Connection Tracking action
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <uapi/linux/netfilter/nf_nat.h>
37 static struct workqueue_struct *act_ct_wq;
38 static struct rhashtable zones_ht;
39 static DEFINE_MUTEX(zones_mutex);
41 struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
52 static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
59 static struct flow_action_entry *
60 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
62 int i = flow_action->num_entries++;
64 return &flow_action->entries[i];
67 static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
73 struct flow_action_entry *entry;
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
83 /* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
88 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
101 be32_to_cpu(target.dst.u3.ip));
105 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
118 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
133 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
151 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
158 if (target_src != tuple->src.u.udp.port)
159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
168 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
174 enum ip_conntrack_info ctinfo;
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = READ_ONCE(ct->mark);
182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
188 act_ct_labels = entry->ct_metadata.labels;
189 ct_labels = nf_ct_labels_find(ct);
191 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
193 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
196 static int tcf_ct_flow_table_add_action_nat(struct net *net,
198 enum ip_conntrack_dir dir,
199 struct flow_action *action)
201 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
202 struct nf_conntrack_tuple target;
204 if (!(ct->status & IPS_NAT_MASK))
207 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
209 switch (tuple->src.l3num) {
211 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
215 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
222 switch (nf_ct_protonum(ct)) {
224 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
227 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
236 static int tcf_ct_flow_table_fill_actions(struct net *net,
237 const struct flow_offload *flow,
238 enum flow_offload_tuple_dir tdir,
239 struct nf_flow_rule *flow_rule)
241 struct flow_action *action = &flow_rule->rule->action;
242 int num_entries = action->num_entries;
243 struct nf_conn *ct = flow->ct;
244 enum ip_conntrack_dir dir;
248 case FLOW_OFFLOAD_DIR_ORIGINAL:
249 dir = IP_CT_DIR_ORIGINAL;
251 case FLOW_OFFLOAD_DIR_REPLY:
252 dir = IP_CT_DIR_REPLY;
258 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
262 tcf_ct_flow_table_add_action_meta(ct, dir, action);
266 /* Clear filled actions */
267 for (i = num_entries; i < action->num_entries; i++)
268 memset(&action->entries[i], 0, sizeof(action->entries[i]));
269 action->num_entries = num_entries;
274 static struct nf_flowtable_type flowtable_ct = {
275 .action = tcf_ct_flow_table_fill_actions,
276 .owner = THIS_MODULE,
279 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
281 struct tcf_ct_flow_table *ct_ft;
284 mutex_lock(&zones_mutex);
285 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
286 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
289 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
292 refcount_set(&ct_ft->ref, 1);
294 ct_ft->zone = params->zone;
295 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
299 ct_ft->nf_ft.type = &flowtable_ct;
300 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
301 NF_FLOWTABLE_COUNTER;
302 err = nf_flow_table_init(&ct_ft->nf_ft);
306 __module_get(THIS_MODULE);
308 params->ct_ft = ct_ft;
309 params->nf_ft = &ct_ft->nf_ft;
310 mutex_unlock(&zones_mutex);
315 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
319 mutex_unlock(&zones_mutex);
323 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
325 struct flow_block_cb *block_cb, *tmp_cb;
326 struct tcf_ct_flow_table *ct_ft;
327 struct flow_block *block;
329 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
331 nf_flow_table_free(&ct_ft->nf_ft);
333 /* Remove any remaining callbacks before cleanup */
334 block = &ct_ft->nf_ft.flow_block;
335 down_write(&ct_ft->nf_ft.flow_block_lock);
336 list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
337 list_del(&block_cb->list);
338 flow_block_cb_free(block_cb);
340 up_write(&ct_ft->nf_ft.flow_block_lock);
343 module_put(THIS_MODULE);
346 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
348 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
350 if (refcount_dec_and_test(¶ms->ct_ft->ref)) {
351 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
352 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
353 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
357 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
361 struct flow_offload *entry;
364 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
367 entry = flow_offload_alloc(ct);
374 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
375 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
378 err = flow_offload_add(&ct_ft->nf_ft, entry);
385 flow_offload_free(entry);
387 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
390 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
392 enum ip_conntrack_info ctinfo)
396 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
399 switch (nf_ct_protonum(ct)) {
402 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
411 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
412 ct->status & IPS_SEQ_ADJUST)
415 tcf_ct_flow_table_add(ct_ft, ct, tcp);
419 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
420 struct flow_offload_tuple *tuple,
421 struct tcphdr **tcph)
423 struct flow_ports *ports;
427 if (!pskb_network_may_pull(skb, sizeof(*iph)))
431 thoff = iph->ihl * 4;
433 if (ip_is_fragment(iph) ||
434 unlikely(thoff != sizeof(struct iphdr)))
437 if (iph->protocol != IPPROTO_TCP &&
438 iph->protocol != IPPROTO_UDP)
444 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
445 thoff + sizeof(struct tcphdr) :
446 thoff + sizeof(*ports)))
450 if (iph->protocol == IPPROTO_TCP)
451 *tcph = (void *)(skb_network_header(skb) + thoff);
453 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
454 tuple->src_v4.s_addr = iph->saddr;
455 tuple->dst_v4.s_addr = iph->daddr;
456 tuple->src_port = ports->source;
457 tuple->dst_port = ports->dest;
458 tuple->l3proto = AF_INET;
459 tuple->l4proto = iph->protocol;
465 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
466 struct flow_offload_tuple *tuple,
467 struct tcphdr **tcph)
469 struct flow_ports *ports;
470 struct ipv6hdr *ip6h;
473 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
476 ip6h = ipv6_hdr(skb);
478 if (ip6h->nexthdr != IPPROTO_TCP &&
479 ip6h->nexthdr != IPPROTO_UDP)
482 if (ip6h->hop_limit <= 1)
485 thoff = sizeof(*ip6h);
486 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
487 thoff + sizeof(struct tcphdr) :
488 thoff + sizeof(*ports)))
491 ip6h = ipv6_hdr(skb);
492 if (ip6h->nexthdr == IPPROTO_TCP)
493 *tcph = (void *)(skb_network_header(skb) + thoff);
495 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
496 tuple->src_v6 = ip6h->saddr;
497 tuple->dst_v6 = ip6h->daddr;
498 tuple->src_port = ports->source;
499 tuple->dst_port = ports->dest;
500 tuple->l3proto = AF_INET6;
501 tuple->l4proto = ip6h->nexthdr;
506 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
510 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
511 struct flow_offload_tuple_rhash *tuplehash;
512 struct flow_offload_tuple tuple = {};
513 enum ip_conntrack_info ctinfo;
514 struct tcphdr *tcph = NULL;
515 struct flow_offload *flow;
521 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
525 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
532 tuplehash = flow_offload_lookup(nf_ft, &tuple);
536 dir = tuplehash->tuple.dir;
537 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
540 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
541 flow_offload_teardown(flow);
545 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
546 IP_CT_ESTABLISHED_REPLY;
548 flow_offload_refresh(nf_ft, flow);
549 nf_conntrack_get(&ct->ct_general);
550 nf_ct_set(skb, ct, ctinfo);
551 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
552 nf_ct_acct_update(ct, dir, skb->len);
557 static int tcf_ct_flow_tables_init(void)
559 return rhashtable_init(&zones_ht, &zones_params);
562 static void tcf_ct_flow_tables_uninit(void)
564 rhashtable_destroy(&zones_ht);
567 static struct tc_action_ops act_ct_ops;
568 static unsigned int ct_net_id;
570 struct tc_ct_action_net {
571 struct tc_action_net tn; /* Must be first */
575 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
576 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
577 u16 zone_id, bool force)
579 enum ip_conntrack_info ctinfo;
582 ct = nf_ct_get(skb, &ctinfo);
585 if (!net_eq(net, read_pnet(&ct->ct_net)))
587 if (nf_ct_zone(ct)->id != zone_id)
590 /* Force conntrack entry direction. */
591 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
592 if (nf_ct_is_confirmed(ct))
602 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
607 /* Trim the skb to the length specified by the IP/IPv6 header,
608 * removing any trailing lower-layer padding. This prepares the skb
609 * for higher-layer processing that assumes skb->len excludes padding
610 * (such as nf_ip_checksum). The caller needs to pull the skb to the
611 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
613 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
620 len = ntohs(ip_hdr(skb)->tot_len);
623 len = sizeof(struct ipv6hdr)
624 + ntohs(ipv6_hdr(skb)->payload_len);
630 err = pskb_trim_rcsum(skb, len);
635 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
637 u8 family = NFPROTO_UNSPEC;
639 switch (skb_protocol(skb, true)) {
640 case htons(ETH_P_IP):
641 family = NFPROTO_IPV4;
643 case htons(ETH_P_IPV6):
644 family = NFPROTO_IPV6;
653 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
657 len = skb_network_offset(skb) + sizeof(struct iphdr);
658 if (unlikely(skb->len < len))
660 if (unlikely(!pskb_may_pull(skb, len)))
663 *frag = ip_is_fragment(ip_hdr(skb));
667 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
669 unsigned int flags = 0, len, payload_ofs = 0;
670 unsigned short frag_off;
673 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
674 if (unlikely(skb->len < len))
676 if (unlikely(!pskb_may_pull(skb, len)))
679 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
680 if (unlikely(nexthdr < 0))
683 *frag = flags & IP6_FH_F_FRAG;
687 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
688 u8 family, u16 zone, bool *defrag)
690 enum ip_conntrack_info ctinfo;
696 /* Previously seen (loopback)? Ignore. */
697 ct = nf_ct_get(skb, &ctinfo);
698 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
701 if (family == NFPROTO_IPV4)
702 err = tcf_ct_ipv4_is_fragment(skb, &frag);
704 err = tcf_ct_ipv6_is_fragment(skb, &frag);
709 mru = tc_skb_cb(skb)->mru;
711 if (family == NFPROTO_IPV4) {
712 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
714 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
716 err = ip_defrag(net, skb, user);
718 if (err && err != -EINPROGRESS)
723 mru = IPCB(skb)->frag_max_size;
725 } else { /* NFPROTO_IPV6 */
726 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
727 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
729 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
730 err = nf_ct_frag6_gather(net, skb, user);
731 if (err && err != -EINPROGRESS)
736 mru = IP6CB(skb)->frag_max_size;
744 if (err != -EINPROGRESS)
745 tc_skb_cb(skb)->mru = mru;
755 static void tcf_ct_params_free(struct rcu_head *head)
757 struct tcf_ct_params *params = container_of(head,
758 struct tcf_ct_params, rcu);
760 tcf_ct_flow_table_put(params);
763 nf_ct_put(params->tmpl);
767 #if IS_ENABLED(CONFIG_NF_NAT)
768 /* Modelled after nf_nat_ipv[46]_fn().
769 * range is only used for new, uninitialized NAT state.
770 * Returns either NF_ACCEPT or NF_DROP.
772 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
773 enum ip_conntrack_info ctinfo,
774 const struct nf_nat_range2 *range,
775 enum nf_nat_manip_type maniptype)
777 __be16 proto = skb_protocol(skb, true);
778 int hooknum, err = NF_ACCEPT;
780 /* See HOOK2MANIP(). */
781 if (maniptype == NF_NAT_MANIP_SRC)
782 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
784 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
788 case IP_CT_RELATED_REPLY:
789 if (proto == htons(ETH_P_IP) &&
790 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
791 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
795 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
797 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
798 int hdrlen = ipv6_skip_exthdr(skb,
799 sizeof(struct ipv6hdr),
800 &nexthdr, &frag_off);
802 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
803 if (!nf_nat_icmpv6_reply_translation(skb, ct,
811 /* Non-ICMP, fall thru to initialize if needed. */
814 /* Seen it before? This can happen for loopback, retrans,
817 if (!nf_nat_initialized(ct, maniptype)) {
818 /* Initialize according to the NAT action. */
819 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
820 /* Action is set up to establish a new
823 ? nf_nat_setup_info(ct, range, maniptype)
824 : nf_nat_alloc_null_binding(ct, hooknum);
825 if (err != NF_ACCEPT)
830 case IP_CT_ESTABLISHED:
831 case IP_CT_ESTABLISHED_REPLY:
839 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
840 if (err == NF_ACCEPT) {
841 if (maniptype == NF_NAT_MANIP_SRC)
842 tc_skb_cb(skb)->post_ct_snat = 1;
843 if (maniptype == NF_NAT_MANIP_DST)
844 tc_skb_cb(skb)->post_ct_dnat = 1;
849 #endif /* CONFIG_NF_NAT */
851 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
853 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
859 new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
860 if (READ_ONCE(ct->mark) != new_mark) {
861 WRITE_ONCE(ct->mark, new_mark);
862 if (nf_ct_is_confirmed(ct))
863 nf_conntrack_event_cache(IPCT_MARK, ct);
868 static void tcf_ct_act_set_labels(struct nf_conn *ct,
872 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
873 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
875 if (!memchr_inv(labels_m, 0, labels_sz))
878 nf_connlabels_replace(ct, labels, labels_m, 4);
882 static int tcf_ct_act_nat(struct sk_buff *skb,
884 enum ip_conntrack_info ctinfo,
886 struct nf_nat_range2 *range,
889 #if IS_ENABLED(CONFIG_NF_NAT)
891 enum nf_nat_manip_type maniptype;
893 if (!(ct_action & TCA_CT_ACT_NAT))
896 /* Add NAT extension if not confirmed yet. */
897 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
898 return NF_DROP; /* Can't NAT. */
900 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
901 (ctinfo != IP_CT_RELATED || commit)) {
902 /* NAT an established or related connection like before. */
903 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
904 /* This is the REPLY direction for a connection
905 * for which NAT was applied in the forward
906 * direction. Do the reverse NAT.
908 maniptype = ct->status & IPS_SRC_NAT
909 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
911 maniptype = ct->status & IPS_SRC_NAT
912 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
913 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
914 maniptype = NF_NAT_MANIP_SRC;
915 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
916 maniptype = NF_NAT_MANIP_DST;
921 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
922 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
923 if (ct->status & IPS_SRC_NAT) {
924 if (maniptype == NF_NAT_MANIP_SRC)
925 maniptype = NF_NAT_MANIP_DST;
927 maniptype = NF_NAT_MANIP_SRC;
929 err = ct_nat_execute(skb, ct, ctinfo, range,
931 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
932 err = ct_nat_execute(skb, ct, ctinfo, NULL,
942 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
943 struct tcf_result *res)
945 struct net *net = dev_net(skb->dev);
946 bool cached, commit, clear, force;
947 enum ip_conntrack_info ctinfo;
948 struct tcf_ct *c = to_ct(a);
949 struct nf_conn *tmpl = NULL;
950 struct nf_hook_state state;
951 int nh_ofs, err, retval;
952 struct tcf_ct_params *p;
953 bool skip_add = false;
958 p = rcu_dereference_bh(c->params);
960 retval = READ_ONCE(c->tcf_action);
961 commit = p->ct_action & TCA_CT_ACT_COMMIT;
962 clear = p->ct_action & TCA_CT_ACT_CLEAR;
963 force = p->ct_action & TCA_CT_ACT_FORCE;
966 tcf_lastuse_update(&c->tcf_tm);
967 tcf_action_update_bstats(&c->common, skb);
970 tc_skb_cb(skb)->post_ct = false;
971 ct = nf_ct_get(skb, &ctinfo);
974 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
980 family = tcf_ct_skb_nf_family(skb);
981 if (family == NFPROTO_UNSPEC)
984 /* The conntrack module expects to be working at L3.
985 * We also try to pull the IPv4/6 header to linear area
987 nh_ofs = skb_network_offset(skb);
988 skb_pull_rcsum(skb, nh_ofs);
989 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
990 if (err == -EINPROGRESS) {
991 retval = TC_ACT_STOLEN;
997 err = tcf_ct_skb_network_trim(skb, family);
1001 /* If we are recirculating packets to match on ct fields and
1002 * committing with a separate ct action, then we don't need to
1003 * actually run the packet through conntrack twice unless it's for a
1006 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
1008 if (tcf_ct_flow_table_lookup(p, skb, family)) {
1013 /* Associate skb with specified zone. */
1015 nf_conntrack_put(skb_nfct(skb));
1016 nf_conntrack_get(&tmpl->ct_general);
1017 nf_ct_set(skb, tmpl, IP_CT_NEW);
1020 state.hook = NF_INET_PRE_ROUTING;
1023 err = nf_conntrack_in(skb, &state);
1024 if (err != NF_ACCEPT)
1029 ct = nf_ct_get(skb, &ctinfo);
1032 nf_ct_deliver_cached_events(ct);
1034 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1035 if (err != NF_ACCEPT)
1039 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1040 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1042 /* This will take care of sending queued events
1043 * even if the connection is already confirmed.
1045 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1050 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1053 skb_push_rcsum(skb, nh_ofs);
1055 tc_skb_cb(skb)->post_ct = true;
1056 tc_skb_cb(skb)->zone = p->zone;
1059 qdisc_skb_cb(skb)->pkt_len = skb->len;
1063 tcf_action_inc_drop_qstats(&c->common);
1067 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1068 [TCA_CT_ACTION] = { .type = NLA_U16 },
1069 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1070 [TCA_CT_ZONE] = { .type = NLA_U16 },
1071 [TCA_CT_MARK] = { .type = NLA_U32 },
1072 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1073 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1074 .len = 128 / BITS_PER_BYTE },
1075 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1076 .len = 128 / BITS_PER_BYTE },
1077 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1078 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1079 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1080 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1081 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1082 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1085 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1088 struct netlink_ext_ack *extack)
1090 struct nf_nat_range2 *range;
1092 if (!(p->ct_action & TCA_CT_ACT_NAT))
1095 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1096 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1100 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1103 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1104 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1105 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1110 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1111 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1113 p->ipv4_range = true;
1114 range->flags |= NF_NAT_RANGE_MAP_IPS;
1115 range->min_addr.ip =
1116 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1118 range->max_addr.ip = max_attr ?
1119 nla_get_in_addr(max_attr) :
1121 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1122 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1124 p->ipv4_range = false;
1125 range->flags |= NF_NAT_RANGE_MAP_IPS;
1126 range->min_addr.in6 =
1127 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1129 range->max_addr.in6 = max_attr ?
1130 nla_get_in6_addr(max_attr) :
1131 range->min_addr.in6;
1134 if (tb[TCA_CT_NAT_PORT_MIN]) {
1135 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1136 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1138 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1139 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1140 range->min_proto.all;
1146 static void tcf_ct_set_key_val(struct nlattr **tb,
1147 void *val, int val_type,
1148 void *mask, int mask_type,
1153 nla_memcpy(val, tb[val_type], len);
1158 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1159 memset(mask, 0xff, len);
1161 nla_memcpy(mask, tb[mask_type], len);
1164 static int tcf_ct_fill_params(struct net *net,
1165 struct tcf_ct_params *p,
1168 struct netlink_ext_ack *extack)
1170 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1171 struct nf_conntrack_zone zone;
1172 struct nf_conn *tmpl;
1175 p->zone = NF_CT_DEFAULT_ZONE_ID;
1177 tcf_ct_set_key_val(tb,
1178 &p->ct_action, TCA_CT_ACTION,
1179 NULL, TCA_CT_UNSPEC,
1180 sizeof(p->ct_action));
1182 if (p->ct_action & TCA_CT_ACT_CLEAR)
1185 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1189 if (tb[TCA_CT_MARK]) {
1190 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1191 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1194 tcf_ct_set_key_val(tb,
1195 &p->mark, TCA_CT_MARK,
1196 &p->mark_mask, TCA_CT_MARK_MASK,
1200 if (tb[TCA_CT_LABELS]) {
1201 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1202 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1207 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1210 tcf_ct_set_key_val(tb,
1211 p->labels, TCA_CT_LABELS,
1212 p->labels_mask, TCA_CT_LABELS_MASK,
1216 if (tb[TCA_CT_ZONE]) {
1217 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1218 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1222 tcf_ct_set_key_val(tb,
1223 &p->zone, TCA_CT_ZONE,
1224 NULL, TCA_CT_UNSPEC,
1228 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1229 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1231 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1234 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1240 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1241 struct nlattr *est, struct tc_action **a,
1242 struct tcf_proto *tp, u32 flags,
1243 struct netlink_ext_ack *extack)
1245 struct tc_action_net *tn = net_generic(net, ct_net_id);
1246 bool bind = flags & TCA_ACT_FLAGS_BIND;
1247 struct tcf_ct_params *params = NULL;
1248 struct nlattr *tb[TCA_CT_MAX + 1];
1249 struct tcf_chain *goto_ch = NULL;
1256 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1260 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1264 if (!tb[TCA_CT_PARMS]) {
1265 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1268 parm = nla_data(tb[TCA_CT_PARMS]);
1269 index = parm->index;
1270 err = tcf_idr_check_alloc(tn, &index, a, bind);
1275 err = tcf_idr_create_from_flags(tn, index, est, a,
1276 &act_ct_ops, bind, flags);
1278 tcf_idr_cleanup(tn, index);
1281 res = ACT_P_CREATED;
1286 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1287 tcf_idr_release(*a, bind);
1291 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1297 params = kzalloc(sizeof(*params), GFP_KERNEL);
1298 if (unlikely(!params)) {
1303 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1307 err = tcf_ct_flow_table_get(params);
1309 goto cleanup_params;
1311 spin_lock_bh(&c->tcf_lock);
1312 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1313 params = rcu_replace_pointer(c->params, params,
1314 lockdep_is_held(&c->tcf_lock));
1315 spin_unlock_bh(&c->tcf_lock);
1318 tcf_chain_put_by_act(goto_ch);
1320 call_rcu(¶ms->rcu, tcf_ct_params_free);
1326 nf_ct_put(params->tmpl);
1329 tcf_chain_put_by_act(goto_ch);
1331 tcf_idr_release(*a, bind);
1335 static void tcf_ct_cleanup(struct tc_action *a)
1337 struct tcf_ct_params *params;
1338 struct tcf_ct *c = to_ct(a);
1340 params = rcu_dereference_protected(c->params, 1);
1342 call_rcu(¶ms->rcu, tcf_ct_params_free);
1345 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1346 void *val, int val_type,
1347 void *mask, int mask_type,
1352 if (mask && !memchr_inv(mask, 0, len))
1355 err = nla_put(skb, val_type, len, val);
1359 if (mask_type != TCA_CT_UNSPEC) {
1360 err = nla_put(skb, mask_type, len, mask);
1368 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1370 struct nf_nat_range2 *range = &p->range;
1372 if (!(p->ct_action & TCA_CT_ACT_NAT))
1375 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1378 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1379 if (p->ipv4_range) {
1380 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1381 range->min_addr.ip))
1383 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1384 range->max_addr.ip))
1387 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1388 &range->min_addr.in6))
1390 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1391 &range->max_addr.in6))
1396 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1397 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1398 range->min_proto.all))
1400 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1401 range->max_proto.all))
1408 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1411 unsigned char *b = skb_tail_pointer(skb);
1412 struct tcf_ct *c = to_ct(a);
1413 struct tcf_ct_params *p;
1415 struct tc_ct opt = {
1416 .index = c->tcf_index,
1417 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1418 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1422 spin_lock_bh(&c->tcf_lock);
1423 p = rcu_dereference_protected(c->params,
1424 lockdep_is_held(&c->tcf_lock));
1425 opt.action = c->tcf_action;
1427 if (tcf_ct_dump_key_val(skb,
1428 &p->ct_action, TCA_CT_ACTION,
1429 NULL, TCA_CT_UNSPEC,
1430 sizeof(p->ct_action)))
1431 goto nla_put_failure;
1433 if (p->ct_action & TCA_CT_ACT_CLEAR)
1436 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1437 tcf_ct_dump_key_val(skb,
1438 &p->mark, TCA_CT_MARK,
1439 &p->mark_mask, TCA_CT_MARK_MASK,
1441 goto nla_put_failure;
1443 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1444 tcf_ct_dump_key_val(skb,
1445 p->labels, TCA_CT_LABELS,
1446 p->labels_mask, TCA_CT_LABELS_MASK,
1448 goto nla_put_failure;
1450 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1451 tcf_ct_dump_key_val(skb,
1452 &p->zone, TCA_CT_ZONE,
1453 NULL, TCA_CT_UNSPEC,
1455 goto nla_put_failure;
1457 if (tcf_ct_dump_nat(skb, p))
1458 goto nla_put_failure;
1461 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1462 goto nla_put_failure;
1464 tcf_tm_dump(&t, &c->tcf_tm);
1465 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1466 goto nla_put_failure;
1467 spin_unlock_bh(&c->tcf_lock);
1471 spin_unlock_bh(&c->tcf_lock);
1476 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1477 struct netlink_callback *cb, int type,
1478 const struct tc_action_ops *ops,
1479 struct netlink_ext_ack *extack)
1481 struct tc_action_net *tn = net_generic(net, ct_net_id);
1483 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1486 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1488 struct tc_action_net *tn = net_generic(net, ct_net_id);
1490 return tcf_idr_search(tn, a, index);
1493 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1494 u64 drops, u64 lastuse, bool hw)
1496 struct tcf_ct *c = to_ct(a);
1498 tcf_action_update_stats(a, bytes, packets, drops, hw);
1499 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1502 static struct tc_action_ops act_ct_ops = {
1505 .owner = THIS_MODULE,
1507 .dump = tcf_ct_dump,
1508 .init = tcf_ct_init,
1509 .cleanup = tcf_ct_cleanup,
1510 .walk = tcf_ct_walker,
1511 .lookup = tcf_ct_search,
1512 .stats_update = tcf_stats_update,
1513 .size = sizeof(struct tcf_ct),
1516 static __net_init int ct_init_net(struct net *net)
1518 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1519 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1521 if (nf_connlabels_get(net, n_bits - 1)) {
1523 pr_err("act_ct: Failed to set connlabels length");
1528 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1531 static void __net_exit ct_exit_net(struct list_head *net_list)
1536 list_for_each_entry(net, net_list, exit_list) {
1537 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1540 nf_connlabels_put(net);
1544 tc_action_net_exit(net_list, ct_net_id);
1547 static struct pernet_operations ct_net_ops = {
1548 .init = ct_init_net,
1549 .exit_batch = ct_exit_net,
1551 .size = sizeof(struct tc_ct_action_net),
1554 static int __init ct_init_module(void)
1558 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1562 err = tcf_ct_flow_tables_init();
1566 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1570 static_branch_inc(&tcf_frag_xmit_count);
1575 tcf_ct_flow_tables_uninit();
1577 destroy_workqueue(act_ct_wq);
1581 static void __exit ct_cleanup_module(void)
1583 static_branch_dec(&tcf_frag_xmit_count);
1584 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1585 tcf_ct_flow_tables_uninit();
1586 destroy_workqueue(act_ct_wq);
1589 module_init(ct_init_module);
1590 module_exit(ct_cleanup_module);
1591 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1592 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1593 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1594 MODULE_DESCRIPTION("Connection tracking action");
1595 MODULE_LICENSE("GPL v2");