1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
29 switch (ctt->src.l3num) {
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
43 switch (ctt->dst.protonum) {
46 ft->src_port = ctt->src.u.tcp.port;
47 ft->dst_port = ctt->dst.u.tcp.port;
52 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
54 struct flow_offload *flow;
56 if (unlikely(nf_ct_is_dying(ct)))
59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
63 refcount_inc(&ct->ct_general.use);
66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
69 if (ct->status & IPS_SRC_NAT)
70 __set_bit(NF_FLOW_SNAT, &flow->flags);
71 if (ct->status & IPS_DST_NAT)
72 __set_bit(NF_FLOW_DNAT, &flow->flags);
76 EXPORT_SYMBOL_GPL(flow_offload_alloc);
78 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
80 const struct rt6_info *rt;
82 if (flow_tuple->l3proto == NFPROTO_IPV6) {
83 rt = (const struct rt6_info *)flow_tuple->dst_cache;
84 return rt6_get_cookie(rt);
90 static int flow_offload_fill_route(struct flow_offload *flow,
91 const struct nf_flow_route *route,
92 enum flow_offload_tuple_dir dir)
94 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
95 struct dst_entry *dst = route->tuple[dir].dst;
98 switch (flow_tuple->l3proto) {
100 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
103 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
107 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
108 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
109 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
110 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
111 if (route->tuple[dir].in.ingress_vlans & BIT(i))
112 flow_tuple->in_vlan_ingress |= BIT(j);
115 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
117 switch (route->tuple[dir].xmit_type) {
118 case FLOW_OFFLOAD_XMIT_DIRECT:
119 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
121 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
123 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
124 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
126 case FLOW_OFFLOAD_XMIT_XFRM:
127 case FLOW_OFFLOAD_XMIT_NEIGH:
128 if (!dst_hold_safe(route->tuple[dir].dst))
131 flow_tuple->dst_cache = dst;
132 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
138 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
143 static void nft_flow_dst_release(struct flow_offload *flow,
144 enum flow_offload_tuple_dir dir)
146 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
147 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
148 dst_release(flow->tuplehash[dir].tuple.dst_cache);
151 int flow_offload_route_init(struct flow_offload *flow,
152 const struct nf_flow_route *route)
156 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
160 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
162 goto err_route_reply;
164 flow->type = NF_FLOW_OFFLOAD_ROUTE;
169 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
173 EXPORT_SYMBOL_GPL(flow_offload_route_init);
175 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
177 tcp->seen[0].td_maxwin = 0;
178 tcp->seen[1].td_maxwin = 0;
181 static void flow_offload_fixup_ct(struct nf_conn *ct)
183 struct net *net = nf_ct_net(ct);
184 int l4num = nf_ct_protonum(ct);
187 if (l4num == IPPROTO_TCP) {
188 struct nf_tcp_net *tn = nf_tcp_pernet(net);
190 flow_offload_fixup_tcp(&ct->proto.tcp);
192 timeout = tn->timeouts[ct->proto.tcp.state];
193 timeout -= tn->offload_timeout;
194 } else if (l4num == IPPROTO_UDP) {
195 struct nf_udp_net *tn = nf_udp_pernet(net);
197 timeout = tn->timeouts[UDP_CT_REPLIED];
198 timeout -= tn->offload_timeout;
206 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
207 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
210 static void flow_offload_route_release(struct flow_offload *flow)
212 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
213 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
216 void flow_offload_free(struct flow_offload *flow)
218 switch (flow->type) {
219 case NF_FLOW_OFFLOAD_ROUTE:
220 flow_offload_route_release(flow);
226 kfree_rcu(flow, rcu_head);
228 EXPORT_SYMBOL_GPL(flow_offload_free);
230 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
232 const struct flow_offload_tuple *tuple = data;
234 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
237 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
239 const struct flow_offload_tuple_rhash *tuplehash = data;
241 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
244 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
247 const struct flow_offload_tuple *tuple = arg->key;
248 const struct flow_offload_tuple_rhash *x = ptr;
250 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
256 static const struct rhashtable_params nf_flow_offload_rhash_params = {
257 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
258 .hashfn = flow_offload_hash,
259 .obj_hashfn = flow_offload_hash_obj,
260 .obj_cmpfn = flow_offload_hash_cmp,
261 .automatic_shrinking = true,
264 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
266 unsigned long timeout = NF_FLOW_TIMEOUT;
267 struct net *net = nf_ct_net(flow->ct);
268 int l4num = nf_ct_protonum(flow->ct);
270 if (l4num == IPPROTO_TCP) {
271 struct nf_tcp_net *tn = nf_tcp_pernet(net);
273 timeout = tn->offload_timeout;
274 } else if (l4num == IPPROTO_UDP) {
275 struct nf_udp_net *tn = nf_udp_pernet(net);
277 timeout = tn->offload_timeout;
283 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
287 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
289 err = rhashtable_insert_fast(&flow_table->rhashtable,
290 &flow->tuplehash[0].node,
291 nf_flow_offload_rhash_params);
295 err = rhashtable_insert_fast(&flow_table->rhashtable,
296 &flow->tuplehash[1].node,
297 nf_flow_offload_rhash_params);
299 rhashtable_remove_fast(&flow_table->rhashtable,
300 &flow->tuplehash[0].node,
301 nf_flow_offload_rhash_params);
305 nf_ct_offload_timeout(flow->ct);
307 if (nf_flowtable_hw_offload(flow_table)) {
308 __set_bit(NF_FLOW_HW, &flow->flags);
309 nf_flow_offload_add(flow_table, flow);
314 EXPORT_SYMBOL_GPL(flow_offload_add);
316 void flow_offload_refresh(struct nf_flowtable *flow_table,
317 struct flow_offload *flow)
321 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
322 if (timeout - READ_ONCE(flow->timeout) > HZ)
323 WRITE_ONCE(flow->timeout, timeout);
327 if (likely(!nf_flowtable_hw_offload(flow_table)))
330 nf_flow_offload_add(flow_table, flow);
332 EXPORT_SYMBOL_GPL(flow_offload_refresh);
334 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
336 return nf_flow_timeout_delta(flow->timeout) <= 0;
339 static void flow_offload_del(struct nf_flowtable *flow_table,
340 struct flow_offload *flow)
342 rhashtable_remove_fast(&flow_table->rhashtable,
343 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
344 nf_flow_offload_rhash_params);
345 rhashtable_remove_fast(&flow_table->rhashtable,
346 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
347 nf_flow_offload_rhash_params);
348 flow_offload_free(flow);
351 void flow_offload_teardown(struct flow_offload *flow)
353 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
354 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
355 flow_offload_fixup_ct(flow->ct);
357 EXPORT_SYMBOL_GPL(flow_offload_teardown);
359 struct flow_offload_tuple_rhash *
360 flow_offload_lookup(struct nf_flowtable *flow_table,
361 struct flow_offload_tuple *tuple)
363 struct flow_offload_tuple_rhash *tuplehash;
364 struct flow_offload *flow;
367 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
368 nf_flow_offload_rhash_params);
372 dir = tuplehash->tuple.dir;
373 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
374 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
377 if (unlikely(nf_ct_is_dying(flow->ct)))
382 EXPORT_SYMBOL_GPL(flow_offload_lookup);
385 nf_flow_table_iterate(struct nf_flowtable *flow_table,
386 void (*iter)(struct nf_flowtable *flowtable,
387 struct flow_offload *flow, void *data),
390 struct flow_offload_tuple_rhash *tuplehash;
391 struct rhashtable_iter hti;
392 struct flow_offload *flow;
395 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
396 rhashtable_walk_start(&hti);
398 while ((tuplehash = rhashtable_walk_next(&hti))) {
399 if (IS_ERR(tuplehash)) {
400 if (PTR_ERR(tuplehash) != -EAGAIN) {
401 err = PTR_ERR(tuplehash);
406 if (tuplehash->tuple.dir)
409 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
411 iter(flow_table, flow, data);
413 rhashtable_walk_stop(&hti);
414 rhashtable_walk_exit(&hti);
419 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
420 struct flow_offload *flow, void *data)
422 if (nf_flow_has_expired(flow) ||
423 nf_ct_is_dying(flow->ct))
424 flow_offload_teardown(flow);
426 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
427 if (test_bit(NF_FLOW_HW, &flow->flags)) {
428 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
429 nf_flow_offload_del(flow_table, flow);
430 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
431 flow_offload_del(flow_table, flow);
433 flow_offload_del(flow_table, flow);
435 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
436 nf_flow_offload_stats(flow_table, flow);
440 void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
442 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
445 static void nf_flow_offload_work_gc(struct work_struct *work)
447 struct nf_flowtable *flow_table;
449 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
450 nf_flow_table_gc_run(flow_table);
451 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
454 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
455 __be16 port, __be16 new_port)
459 tcph = (void *)(skb_network_header(skb) + thoff);
460 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
463 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
464 __be16 port, __be16 new_port)
468 udph = (void *)(skb_network_header(skb) + thoff);
469 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
470 inet_proto_csum_replace2(&udph->check, skb, port,
473 udph->check = CSUM_MANGLED_0;
477 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
478 u8 protocol, __be16 port, __be16 new_port)
482 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
485 nf_flow_nat_port_udp(skb, thoff, port, new_port);
490 void nf_flow_snat_port(const struct flow_offload *flow,
491 struct sk_buff *skb, unsigned int thoff,
492 u8 protocol, enum flow_offload_tuple_dir dir)
494 struct flow_ports *hdr;
495 __be16 port, new_port;
497 hdr = (void *)(skb_network_header(skb) + thoff);
500 case FLOW_OFFLOAD_DIR_ORIGINAL:
502 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
503 hdr->source = new_port;
505 case FLOW_OFFLOAD_DIR_REPLY:
507 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
508 hdr->dest = new_port;
512 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
514 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
516 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
517 unsigned int thoff, u8 protocol,
518 enum flow_offload_tuple_dir dir)
520 struct flow_ports *hdr;
521 __be16 port, new_port;
523 hdr = (void *)(skb_network_header(skb) + thoff);
526 case FLOW_OFFLOAD_DIR_ORIGINAL:
528 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
529 hdr->dest = new_port;
531 case FLOW_OFFLOAD_DIR_REPLY:
533 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
534 hdr->source = new_port;
538 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
540 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
542 int nf_flow_table_init(struct nf_flowtable *flowtable)
546 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
547 flow_block_init(&flowtable->flow_block);
548 init_rwsem(&flowtable->flow_block_lock);
550 err = rhashtable_init(&flowtable->rhashtable,
551 &nf_flow_offload_rhash_params);
555 queue_delayed_work(system_power_efficient_wq,
556 &flowtable->gc_work, HZ);
558 mutex_lock(&flowtable_lock);
559 list_add(&flowtable->list, &flowtables);
560 mutex_unlock(&flowtable_lock);
564 EXPORT_SYMBOL_GPL(nf_flow_table_init);
566 static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
567 struct flow_offload *flow, void *data)
569 struct net_device *dev = data;
572 flow_offload_teardown(flow);
576 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
577 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
578 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
579 flow_offload_teardown(flow);
582 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
583 struct net_device *dev)
585 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
586 flush_delayed_work(&flowtable->gc_work);
587 nf_flow_table_offload_flush(flowtable);
590 void nf_flow_table_cleanup(struct net_device *dev)
592 struct nf_flowtable *flowtable;
594 mutex_lock(&flowtable_lock);
595 list_for_each_entry(flowtable, &flowtables, list)
596 nf_flow_table_gc_cleanup(flowtable, dev);
597 mutex_unlock(&flowtable_lock);
599 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
601 void nf_flow_table_free(struct nf_flowtable *flow_table)
603 mutex_lock(&flowtable_lock);
604 list_del(&flow_table->list);
605 mutex_unlock(&flowtable_lock);
607 cancel_delayed_work_sync(&flow_table->gc_work);
608 nf_flow_table_offload_flush(flow_table);
609 /* ... no more pending work after this stage ... */
610 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
611 nf_flow_table_gc_run(flow_table);
612 nf_flow_table_offload_flush_cleanup(flow_table);
613 rhashtable_destroy(&flow_table->rhashtable);
615 EXPORT_SYMBOL_GPL(nf_flow_table_free);
617 static int nf_flow_table_init_net(struct net *net)
619 net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
620 return net->ft.stat ? 0 : -ENOMEM;
623 static void nf_flow_table_fini_net(struct net *net)
625 free_percpu(net->ft.stat);
628 static int nf_flow_table_pernet_init(struct net *net)
632 ret = nf_flow_table_init_net(net);
636 ret = nf_flow_table_init_proc(net);
643 nf_flow_table_fini_net(net);
647 static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
651 list_for_each_entry(net, net_exit_list, exit_list) {
652 nf_flow_table_fini_proc(net);
653 nf_flow_table_fini_net(net);
657 static struct pernet_operations nf_flow_table_net_ops = {
658 .init = nf_flow_table_pernet_init,
659 .exit_batch = nf_flow_table_pernet_exit,
662 static int __init nf_flow_table_module_init(void)
666 ret = register_pernet_subsys(&nf_flow_table_net_ops);
670 ret = nf_flow_table_offload_init();
677 unregister_pernet_subsys(&nf_flow_table_net_ops);
681 static void __exit nf_flow_table_module_exit(void)
683 nf_flow_table_offload_exit();
684 unregister_pernet_subsys(&nf_flow_table_net_ops);
687 module_init(nf_flow_table_module_init);
688 module_exit(nf_flow_table_module_exit);
690 MODULE_LICENSE("GPL");
691 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
692 MODULE_DESCRIPTION("Netfilter flow table module");