1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
13 #include <net/ip6_route.h>
14 #include <net/neighbour.h>
15 #include <net/netfilter/nf_flow_table.h>
16 #include <net/netfilter/nf_conntrack_acct.h>
17 /* For layer 4 checksum field offset. */
18 #include <linux/tcp.h>
19 #include <linux/udp.h>
21 static int nf_flow_state_check(struct flow_offload *flow, int proto,
22 struct sk_buff *skb, unsigned int thoff)
26 if (proto != IPPROTO_TCP)
29 tcph = (void *)(skb_network_header(skb) + thoff);
30 if (unlikely(tcph->fin || tcph->rst)) {
31 flow_offload_teardown(flow);
38 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
39 __be32 addr, __be32 new_addr)
43 tcph = (void *)(skb_network_header(skb) + thoff);
44 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
47 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
48 __be32 addr, __be32 new_addr)
52 udph = (void *)(skb_network_header(skb) + thoff);
53 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
54 inet_proto_csum_replace4(&udph->check, skb, addr,
57 udph->check = CSUM_MANGLED_0;
61 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
62 unsigned int thoff, __be32 addr,
65 switch (iph->protocol) {
67 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
70 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
75 static void nf_flow_snat_ip(const struct flow_offload *flow,
76 struct sk_buff *skb, struct iphdr *iph,
77 unsigned int thoff, enum flow_offload_tuple_dir dir)
79 __be32 addr, new_addr;
82 case FLOW_OFFLOAD_DIR_ORIGINAL:
84 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
85 iph->saddr = new_addr;
87 case FLOW_OFFLOAD_DIR_REPLY:
89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
90 iph->daddr = new_addr;
93 csum_replace4(&iph->check, addr, new_addr);
95 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
98 static void nf_flow_dnat_ip(const struct flow_offload *flow,
99 struct sk_buff *skb, struct iphdr *iph,
100 unsigned int thoff, enum flow_offload_tuple_dir dir)
102 __be32 addr, new_addr;
105 case FLOW_OFFLOAD_DIR_ORIGINAL:
107 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
108 iph->daddr = new_addr;
110 case FLOW_OFFLOAD_DIR_REPLY:
112 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
113 iph->saddr = new_addr;
116 csum_replace4(&iph->check, addr, new_addr);
118 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
121 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
122 unsigned int thoff, enum flow_offload_tuple_dir dir,
125 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
126 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
127 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
129 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
130 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
131 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
135 static bool ip_has_options(unsigned int thoff)
137 return thoff != sizeof(struct iphdr);
140 static void nf_flow_tuple_encap(struct sk_buff *skb,
141 struct flow_offload_tuple *tuple)
143 struct vlan_ethhdr *veth;
144 struct pppoe_hdr *phdr;
147 if (skb_vlan_tag_present(skb)) {
148 tuple->encap[i].id = skb_vlan_tag_get(skb);
149 tuple->encap[i].proto = skb->vlan_proto;
152 switch (skb->protocol) {
153 case htons(ETH_P_8021Q):
154 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
155 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
156 tuple->encap[i].proto = skb->protocol;
158 case htons(ETH_P_PPP_SES):
159 phdr = (struct pppoe_hdr *)skb_mac_header(skb);
160 tuple->encap[i].id = ntohs(phdr->sid);
161 tuple->encap[i].proto = skb->protocol;
166 struct nf_flowtable_ctx {
167 const struct net_device *in;
172 static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
173 struct flow_offload_tuple *tuple)
175 struct flow_ports *ports;
180 if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset))
183 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
184 thoff = (iph->ihl * 4);
186 if (ip_is_fragment(iph) ||
187 unlikely(ip_has_options(thoff)))
190 thoff += ctx->offset;
192 ipproto = iph->protocol;
195 ctx->hdrsize = sizeof(struct tcphdr);
198 ctx->hdrsize = sizeof(struct udphdr);
200 #ifdef CONFIG_NF_CT_PROTO_GRE
202 ctx->hdrsize = sizeof(struct gre_base_hdr);
212 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
218 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
219 tuple->src_port = ports->source;
220 tuple->dst_port = ports->dest;
223 struct gre_base_hdr *greh;
225 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
226 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
232 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
234 tuple->src_v4.s_addr = iph->saddr;
235 tuple->dst_v4.s_addr = iph->daddr;
236 tuple->l3proto = AF_INET;
237 tuple->l4proto = ipproto;
238 tuple->iifidx = ctx->in->ifindex;
239 nf_flow_tuple_encap(skb, tuple);
244 /* Based on ip_exceeds_mtu(). */
245 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
250 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
256 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
258 if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
259 tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
262 return dst_check(tuple->dst_cache, tuple->dst_cookie);
265 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
266 const struct nf_hook_state *state,
267 struct dst_entry *dst)
270 skb_dst_set_noref(skb, dst);
271 dst_output(state->net, state->sk, skb);
275 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
278 struct vlan_ethhdr *veth;
280 switch (skb->protocol) {
281 case htons(ETH_P_8021Q):
282 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
283 if (veth->h_vlan_encapsulated_proto == proto) {
284 *offset += VLAN_HLEN;
288 case htons(ETH_P_PPP_SES):
289 if (nf_flow_pppoe_proto(skb) == proto) {
290 *offset += PPPOE_SES_HLEN;
299 static void nf_flow_encap_pop(struct sk_buff *skb,
300 struct flow_offload_tuple_rhash *tuplehash)
302 struct vlan_hdr *vlan_hdr;
305 for (i = 0; i < tuplehash->tuple.encap_num; i++) {
306 if (skb_vlan_tag_present(skb)) {
307 __vlan_hwaccel_clear_tag(skb);
310 switch (skb->protocol) {
311 case htons(ETH_P_8021Q):
312 vlan_hdr = (struct vlan_hdr *)skb->data;
313 __skb_pull(skb, VLAN_HLEN);
314 vlan_set_encap_proto(skb, vlan_hdr);
315 skb_reset_network_header(skb);
317 case htons(ETH_P_PPP_SES):
318 skb->protocol = nf_flow_pppoe_proto(skb);
319 skb_pull(skb, PPPOE_SES_HLEN);
320 skb_reset_network_header(skb);
326 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
327 const struct flow_offload_tuple_rhash *tuplehash,
330 struct net_device *outdev;
332 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
337 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
338 tuplehash->tuple.out.h_source, skb->len);
344 static struct flow_offload_tuple_rhash *
345 nf_flow_offload_lookup(struct nf_flowtable_ctx *ctx,
346 struct nf_flowtable *flow_table, struct sk_buff *skb)
348 struct flow_offload_tuple tuple = {};
350 if (skb->protocol != htons(ETH_P_IP) &&
351 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset))
354 if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0)
357 return flow_offload_lookup(flow_table, &tuple);
360 static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx,
361 struct nf_flowtable *flow_table,
362 struct flow_offload_tuple_rhash *tuplehash,
365 enum flow_offload_tuple_dir dir;
366 struct flow_offload *flow;
367 unsigned int thoff, mtu;
370 dir = tuplehash->tuple.dir;
371 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
373 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
374 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
377 iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
378 thoff = (iph->ihl * 4) + ctx->offset;
379 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
382 if (!nf_flow_dst_check(&tuplehash->tuple)) {
383 flow_offload_teardown(flow);
387 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
390 flow_offload_refresh(flow_table, flow);
392 nf_flow_encap_pop(skb, tuplehash);
393 thoff -= ctx->offset;
396 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
398 ip_decrease_ttl(iph);
399 skb_clear_tstamp(skb);
401 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
402 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
408 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
409 const struct nf_hook_state *state)
411 struct flow_offload_tuple_rhash *tuplehash;
412 struct nf_flowtable *flow_table = priv;
413 enum flow_offload_tuple_dir dir;
414 struct nf_flowtable_ctx ctx = {
417 struct flow_offload *flow;
418 struct net_device *outdev;
423 tuplehash = nf_flow_offload_lookup(&ctx, flow_table, skb);
427 ret = nf_flow_offload_forward(&ctx, flow_table, tuplehash, skb);
433 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
434 rt = (struct rtable *)tuplehash->tuple.dst_cache;
435 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
436 IPCB(skb)->iif = skb->dev->ifindex;
437 IPCB(skb)->flags = IPSKB_FORWARDED;
438 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
441 dir = tuplehash->tuple.dir;
442 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
444 switch (tuplehash->tuple.xmit_type) {
445 case FLOW_OFFLOAD_XMIT_NEIGH:
446 rt = (struct rtable *)tuplehash->tuple.dst_cache;
447 outdev = rt->dst.dev;
449 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
450 skb_dst_set_noref(skb, &rt->dst);
451 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
454 case FLOW_OFFLOAD_XMIT_DIRECT:
455 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
457 flow_offload_teardown(flow);
467 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
469 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
470 struct in6_addr *addr,
471 struct in6_addr *new_addr,
472 struct ipv6hdr *ip6h)
476 tcph = (void *)(skb_network_header(skb) + thoff);
477 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
478 new_addr->s6_addr32, true);
481 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
482 struct in6_addr *addr,
483 struct in6_addr *new_addr)
487 udph = (void *)(skb_network_header(skb) + thoff);
488 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
489 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
490 new_addr->s6_addr32, true);
492 udph->check = CSUM_MANGLED_0;
496 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
497 unsigned int thoff, struct in6_addr *addr,
498 struct in6_addr *new_addr)
500 switch (ip6h->nexthdr) {
502 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
505 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
510 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
511 struct sk_buff *skb, struct ipv6hdr *ip6h,
513 enum flow_offload_tuple_dir dir)
515 struct in6_addr addr, new_addr;
518 case FLOW_OFFLOAD_DIR_ORIGINAL:
520 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
521 ip6h->saddr = new_addr;
523 case FLOW_OFFLOAD_DIR_REPLY:
525 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
526 ip6h->daddr = new_addr;
530 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
533 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
534 struct sk_buff *skb, struct ipv6hdr *ip6h,
536 enum flow_offload_tuple_dir dir)
538 struct in6_addr addr, new_addr;
541 case FLOW_OFFLOAD_DIR_ORIGINAL:
543 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
544 ip6h->daddr = new_addr;
546 case FLOW_OFFLOAD_DIR_REPLY:
548 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
549 ip6h->saddr = new_addr;
553 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
556 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
558 enum flow_offload_tuple_dir dir,
559 struct ipv6hdr *ip6h)
561 unsigned int thoff = sizeof(*ip6h);
563 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
564 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
565 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
567 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
568 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
569 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
573 static int nf_flow_tuple_ipv6(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
574 struct flow_offload_tuple *tuple)
576 struct flow_ports *ports;
577 struct ipv6hdr *ip6h;
581 thoff = sizeof(*ip6h) + ctx->offset;
582 if (!pskb_may_pull(skb, thoff))
585 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
587 nexthdr = ip6h->nexthdr;
590 ctx->hdrsize = sizeof(struct tcphdr);
593 ctx->hdrsize = sizeof(struct udphdr);
595 #ifdef CONFIG_NF_CT_PROTO_GRE
597 ctx->hdrsize = sizeof(struct gre_base_hdr);
604 if (ip6h->hop_limit <= 1)
607 if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
613 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
614 tuple->src_port = ports->source;
615 tuple->dst_port = ports->dest;
618 struct gre_base_hdr *greh;
620 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
621 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
627 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
629 tuple->src_v6 = ip6h->saddr;
630 tuple->dst_v6 = ip6h->daddr;
631 tuple->l3proto = AF_INET6;
632 tuple->l4proto = nexthdr;
633 tuple->iifidx = ctx->in->ifindex;
634 nf_flow_tuple_encap(skb, tuple);
639 static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx,
640 struct nf_flowtable *flow_table,
641 struct flow_offload_tuple_rhash *tuplehash,
644 enum flow_offload_tuple_dir dir;
645 struct flow_offload *flow;
646 unsigned int thoff, mtu;
647 struct ipv6hdr *ip6h;
649 dir = tuplehash->tuple.dir;
650 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
652 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
653 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
656 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
657 thoff = sizeof(*ip6h) + ctx->offset;
658 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
661 if (!nf_flow_dst_check(&tuplehash->tuple)) {
662 flow_offload_teardown(flow);
666 if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
669 flow_offload_refresh(flow_table, flow);
671 nf_flow_encap_pop(skb, tuplehash);
673 ip6h = ipv6_hdr(skb);
674 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
677 skb_clear_tstamp(skb);
679 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
680 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
685 static struct flow_offload_tuple_rhash *
686 nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx *ctx,
687 struct nf_flowtable *flow_table,
690 struct flow_offload_tuple tuple = {};
692 if (skb->protocol != htons(ETH_P_IPV6) &&
693 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset))
696 if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)
699 return flow_offload_lookup(flow_table, &tuple);
703 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
704 const struct nf_hook_state *state)
706 struct flow_offload_tuple_rhash *tuplehash;
707 struct nf_flowtable *flow_table = priv;
708 enum flow_offload_tuple_dir dir;
709 struct nf_flowtable_ctx ctx = {
712 const struct in6_addr *nexthop;
713 struct flow_offload *flow;
714 struct net_device *outdev;
718 tuplehash = nf_flow_offload_ipv6_lookup(&ctx, flow_table, skb);
719 if (tuplehash == NULL)
722 ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb);
728 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
729 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
730 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
731 IP6CB(skb)->iif = skb->dev->ifindex;
732 IP6CB(skb)->flags = IP6SKB_FORWARDED;
733 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
736 dir = tuplehash->tuple.dir;
737 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
739 switch (tuplehash->tuple.xmit_type) {
740 case FLOW_OFFLOAD_XMIT_NEIGH:
741 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
742 outdev = rt->dst.dev;
744 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
745 skb_dst_set_noref(skb, &rt->dst);
746 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
749 case FLOW_OFFLOAD_XMIT_DIRECT:
750 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
752 flow_offload_teardown(flow);
762 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);