2 * ip_vs_xmit.c: various packet transmitters for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
14 * Description of forwarding methods:
15 * - all transmitters are called from LOCAL_IN (remote clients) and
16 * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17 * - not all connections have destination server, for example,
18 * connections in backup server when fwmark is used
19 * - bypass connections use daddr from packet
21 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22 * - skb->pkt_type is not set yet
23 * - the only place where we can see skb->sk != NULL
26 #define KMSG_COMPONENT "IPVS"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/tcp.h> /* for tcphdr */
33 #include <net/tcp.h> /* for csum_tcpudp_magic */
35 #include <net/icmp.h> /* for icmp_send */
36 #include <net/route.h> /* for ip_route_output */
38 #include <net/ip6_route.h>
39 #include <net/addrconf.h>
40 #include <linux/icmpv6.h>
41 #include <linux/netfilter.h>
42 #include <linux/netfilter_ipv4.h>
44 #include <net/ip_vs.h>
47 IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
48 IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
49 IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
52 IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
53 IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */
57 * Destination cache to speed up outgoing route lookup
60 __ip_vs_dst_set(struct ip_vs_dest *dest, struct dst_entry *dst, u32 dst_cookie)
62 struct dst_entry *old_dst;
64 old_dst = dest->dst_cache;
65 dest->dst_cache = dst;
66 dest->dst_cookie = dst_cookie;
70 static inline struct dst_entry *
71 __ip_vs_dst_check(struct ip_vs_dest *dest)
73 struct dst_entry *dst = dest->dst_cache;
77 if (dst->obsolete && dst->ops->check(dst, dest->dst_cookie) == NULL) {
78 dest->dst_cache = NULL;
87 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
89 if (IP6CB(skb)->frag_max_size) {
90 /* frag_max_size tell us that, this packet have been
91 * defragmented by netfilter IPv6 conntrack module.
93 if (IP6CB(skb)->frag_max_size > mtu)
94 return true; /* largest fragment violate MTU */
96 else if (skb->len > mtu && !skb_is_gso(skb)) {
97 return true; /* Packet size violate MTU size */
102 /* Get route to daddr, update *saddr, optionally bind route to saddr */
103 static struct rtable *do_output_route4(struct net *net, __be32 daddr,
104 int rt_mode, __be32 *saddr)
110 memset(&fl4, 0, sizeof(fl4));
112 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
113 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
114 FLOWI_FLAG_KNOWN_NH : 0;
117 rt = ip_route_output_key(net, &fl4);
119 /* Invalid saddr ? */
120 if (PTR_ERR(rt) == -EINVAL && *saddr &&
121 rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
123 flowi4_update_output(&fl4, 0, 0, daddr, 0);
126 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
128 } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
131 flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr);
139 /* Get route to destination or remote server */
140 static struct rtable *
141 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
142 __be32 daddr, int rt_mode, __be32 *ret_saddr)
144 struct net *net = dev_net(skb_dst(skb)->dev);
145 struct rtable *rt; /* Route to the other host */
146 struct rtable *ort; /* Original route */
150 spin_lock(&dest->dst_lock);
151 rt = (struct rtable *) __ip_vs_dst_check(dest);
153 rt = do_output_route4(net, dest->addr.ip, rt_mode,
154 &dest->dst_saddr.ip);
156 spin_unlock(&dest->dst_lock);
159 __ip_vs_dst_set(dest, dst_clone(&rt->dst), 0);
160 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n",
161 &dest->addr.ip, &dest->dst_saddr.ip,
162 atomic_read(&rt->dst.__refcnt));
164 daddr = dest->addr.ip;
166 *ret_saddr = dest->dst_saddr.ip;
167 spin_unlock(&dest->dst_lock);
169 __be32 saddr = htonl(INADDR_ANY);
171 /* For such unconfigured boxes avoid many route lookups
172 * for performance reasons because we do not remember saddr
174 rt_mode &= ~IP_VS_RT_MODE_CONNECT;
175 rt = do_output_route4(net, daddr, rt_mode, &saddr);
182 local = rt->rt_flags & RTCF_LOCAL;
183 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
185 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
186 (rt->rt_flags & RTCF_LOCAL) ?
187 "local":"non-local", &daddr);
191 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
192 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
193 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
194 "requires NAT method, dest: %pI4\n",
195 &ip_hdr(skb)->daddr, &daddr);
199 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
200 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
201 "to non-local address, dest: %pI4\n",
202 &ip_hdr(skb)->saddr, &daddr);
210 /* Reroute packet to local IPv4 stack after DNAT */
212 __ip_vs_reroute_locally(struct sk_buff *skb)
214 struct rtable *rt = skb_rtable(skb);
215 struct net_device *dev = rt->dst.dev;
216 struct net *net = dev_net(dev);
217 struct iphdr *iph = ip_hdr(skb);
219 if (rt_is_input_route(rt)) {
220 unsigned long orefdst = skb->_skb_refdst;
222 if (ip_route_input(skb, iph->daddr, iph->saddr,
225 refdst_drop(orefdst);
227 struct flowi4 fl4 = {
230 .flowi4_tos = RT_TOS(iph->tos),
231 .flowi4_mark = skb->mark,
234 rt = ip_route_output_key(net, &fl4);
237 if (!(rt->rt_flags & RTCF_LOCAL)) {
241 /* Drop old route. */
243 skb_dst_set(skb, &rt->dst);
248 #ifdef CONFIG_IP_VS_IPV6
250 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
252 return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
255 static struct dst_entry *
256 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
257 struct in6_addr *ret_saddr, int do_xfrm)
259 struct dst_entry *dst;
260 struct flowi6 fl6 = {
264 dst = ip6_route_output(net, NULL, &fl6);
269 if (ipv6_addr_any(&fl6.saddr) &&
270 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
271 &fl6.daddr, 0, &fl6.saddr) < 0)
274 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
280 *ret_saddr = fl6.saddr;
285 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
290 * Get route to destination or remote server
292 static struct rt6_info *
293 __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
294 struct in6_addr *daddr, struct in6_addr *ret_saddr,
295 int do_xfrm, int rt_mode)
297 struct net *net = dev_net(skb_dst(skb)->dev);
298 struct rt6_info *rt; /* Route to the other host */
299 struct rt6_info *ort; /* Original route */
300 struct dst_entry *dst;
304 spin_lock(&dest->dst_lock);
305 rt = (struct rt6_info *)__ip_vs_dst_check(dest);
309 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
310 &dest->dst_saddr.in6,
313 spin_unlock(&dest->dst_lock);
316 rt = (struct rt6_info *) dst;
317 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
318 __ip_vs_dst_set(dest, dst_clone(&rt->dst), cookie);
319 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
320 &dest->addr.in6, &dest->dst_saddr.in6,
321 atomic_read(&rt->dst.__refcnt));
324 *ret_saddr = dest->dst_saddr.in6;
325 spin_unlock(&dest->dst_lock);
327 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
330 rt = (struct rt6_info *) dst;
333 local = __ip_vs_is_local_route6(rt);
334 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
336 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
337 local ? "local":"non-local", daddr);
338 dst_release(&rt->dst);
341 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
342 !((ort = (struct rt6_info *) skb_dst(skb)) &&
343 __ip_vs_is_local_route6(ort))) {
344 IP_VS_DBG_RL("Redirect from non-local address %pI6c to local "
345 "requires NAT method, dest: %pI6c\n",
346 &ipv6_hdr(skb)->daddr, daddr);
347 dst_release(&rt->dst);
350 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
351 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
352 IPV6_ADDR_LOOPBACK)) {
353 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6c "
354 "to non-local address, dest: %pI6c\n",
355 &ipv6_hdr(skb)->saddr, daddr);
356 dst_release(&rt->dst);
366 * Release dest->dst_cache before a dest is removed
369 ip_vs_dst_reset(struct ip_vs_dest *dest)
371 struct dst_entry *old_dst;
373 old_dst = dest->dst_cache;
374 dest->dst_cache = NULL;
375 dst_release(old_dst);
376 dest->dst_saddr.ip = 0;
379 /* return NF_ACCEPT to allow forwarding or other NF_xxx on error */
380 static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
381 struct ip_vs_conn *cp)
385 skb->ipvs_property = 1;
386 if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
387 ret = ip_vs_confirm_conntrack(skb);
388 if (ret == NF_ACCEPT) {
390 skb_forward_csum(skb);
395 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
396 static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
397 struct ip_vs_conn *cp, int local)
401 skb->ipvs_property = 1;
402 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
405 ip_vs_update_conntrack(skb, cp, 1);
407 skb_forward_csum(skb);
408 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
415 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
416 static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
417 struct ip_vs_conn *cp, int local)
421 skb->ipvs_property = 1;
422 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
425 skb_forward_csum(skb);
426 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
435 * NULL transmitter (do nothing except return NF_ACCEPT)
438 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
439 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
441 /* we do not touch skb and do not need pskb ptr */
442 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
448 * Let packets bypass the destination when the destination is not
449 * available, it may be only used in transparent cache cluster.
452 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
453 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
455 struct rtable *rt; /* Route to the other host */
456 struct iphdr *iph = ip_hdr(skb);
461 rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL,
467 mtu = dst_mtu(&rt->dst);
468 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
471 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
472 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
477 * Call ip_send_check because we are not sure it is called
478 * after ip_defrag. Is copy-on-write needed?
480 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
484 ip_send_check(ip_hdr(skb));
488 skb_dst_set(skb, &rt->dst);
490 /* Another hack: avoid icmp_send in ip_fragment */
493 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
499 dst_link_failure(skb);
506 #ifdef CONFIG_IP_VS_IPV6
508 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
509 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
511 struct rt6_info *rt; /* Route to the other host */
516 rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0,
517 IP_VS_RT_MODE_NON_LOCAL);
522 mtu = dst_mtu(&rt->dst);
523 if (__mtu_check_toobig_v6(skb, mtu)) {
525 struct net *net = dev_net(skb_dst(skb)->dev);
527 skb->dev = net->loopback_dev;
529 /* only send ICMP too big on first fragment */
531 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
532 dst_release(&rt->dst);
533 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
538 * Call ip_send_check because we are not sure it is called
539 * after ip_defrag. Is copy-on-write needed?
541 skb = skb_share_check(skb, GFP_ATOMIC);
542 if (unlikely(skb == NULL)) {
543 dst_release(&rt->dst);
549 skb_dst_set(skb, &rt->dst);
551 /* Another hack: avoid icmp_send in ip_fragment */
554 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
560 dst_link_failure(skb);
569 * NAT transmitter (only for outside-to-inside nat forwarding)
570 * Not used for related ICMP
573 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
574 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
576 struct rtable *rt; /* Route to the other host */
578 struct iphdr *iph = ip_hdr(skb);
583 /* check if it is a connection of no-client-port */
584 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
586 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
589 ip_vs_conn_fill_cport(cp, *p);
590 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
593 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
594 IP_VS_RT_MODE_LOCAL |
595 IP_VS_RT_MODE_NON_LOCAL |
596 IP_VS_RT_MODE_RDR, NULL)))
598 local = rt->rt_flags & RTCF_LOCAL;
600 * Avoid duplicate tuple in reply direction for NAT traffic
601 * to local address when connection is sync-ed
603 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
604 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
605 enum ip_conntrack_info ctinfo;
606 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
608 if (ct && !nf_ct_is_untracked(ct)) {
609 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
611 "stopping DNAT to local address");
617 /* From world but DNAT to loopback address? */
618 if (local && ipv4_is_loopback(cp->daddr.ip) &&
619 rt_is_input_route(skb_rtable(skb))) {
620 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
621 "stopping DNAT to loopback address");
626 mtu = dst_mtu(&rt->dst);
627 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
629 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
630 IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
631 "ip_vs_nat_xmit(): frag needed for");
635 /* copy-on-write the packet before mangling it */
636 if (!skb_make_writable(skb, sizeof(struct iphdr)))
639 if (skb_cow(skb, rt->dst.dev->hard_header_len))
642 /* mangle the packet */
643 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
645 ip_hdr(skb)->daddr = cp->daddr.ip;
646 ip_send_check(ip_hdr(skb));
651 skb_dst_set(skb, &rt->dst);
655 * Some IPv4 replies get local address from routes,
656 * not from iph, so while we DNAT after routing
657 * we need this second input/output route.
659 if (!__ip_vs_reroute_locally(skb))
663 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
665 /* FIXME: when application helper enlarges the packet and the length
666 is larger than the MTU of outgoing device, there will be still
669 /* Another hack: avoid icmp_send in ip_fragment */
672 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
678 dst_link_failure(skb);
688 #ifdef CONFIG_IP_VS_IPV6
690 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
691 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
693 struct rt6_info *rt; /* Route to the other host */
699 /* check if it is a connection of no-client-port */
700 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) {
702 p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt);
705 ip_vs_conn_fill_cport(cp, *p);
706 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
709 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
710 0, (IP_VS_RT_MODE_LOCAL |
711 IP_VS_RT_MODE_NON_LOCAL |
712 IP_VS_RT_MODE_RDR))))
714 local = __ip_vs_is_local_route6(rt);
716 * Avoid duplicate tuple in reply direction for NAT traffic
717 * to local address when connection is sync-ed
719 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
720 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
721 enum ip_conntrack_info ctinfo;
722 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
724 if (ct && !nf_ct_is_untracked(ct)) {
725 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
726 "ip_vs_nat_xmit_v6(): "
727 "stopping DNAT to local address");
733 /* From world but DNAT to loopback address? */
734 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
735 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
736 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
737 "ip_vs_nat_xmit_v6(): "
738 "stopping DNAT to loopback address");
743 mtu = dst_mtu(&rt->dst);
744 if (__mtu_check_toobig_v6(skb, mtu)) {
746 struct net *net = dev_net(skb_dst(skb)->dev);
748 skb->dev = net->loopback_dev;
750 /* only send ICMP too big on first fragment */
752 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
753 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
754 "ip_vs_nat_xmit_v6(): frag needed for");
758 /* copy-on-write the packet before mangling it */
759 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
762 if (skb_cow(skb, rt->dst.dev->hard_header_len))
765 /* mangle the packet */
766 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph))
768 ipv6_hdr(skb)->daddr = cp->daddr.in6;
770 if (!local || !skb->dev) {
771 /* drop the old route when skb is not shared */
773 skb_dst_set(skb, &rt->dst);
775 /* destined to loopback, do we need to change route? */
776 dst_release(&rt->dst);
779 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
781 /* FIXME: when application helper enlarges the packet and the length
782 is larger than the MTU of outgoing device, there will be still
785 /* Another hack: avoid icmp_send in ip_fragment */
788 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
794 dst_link_failure(skb);
800 dst_release(&rt->dst);
807 * IP Tunneling transmitter
809 * This function encapsulates the packet in a new IP packet, its
810 * destination will be set to cp->daddr. Most code of this function
811 * is taken from ipip.c.
813 * It is used in VS/TUN cluster. The load balancer selects a real
814 * server from a cluster based on a scheduling algorithm,
815 * encapsulates the request packet and forwards it to the selected
816 * server. For example, all real servers are configured with
817 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
818 * the encapsulated packet, it will decapsulate the packet, processe
819 * the request and return the response packets directly to the client
820 * without passing the load balancer. This can greatly increase the
821 * scalability of virtual server.
823 * Used for ANY protocol
826 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
827 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
829 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
830 struct rtable *rt; /* Route to the other host */
831 __be32 saddr; /* Source for tunnel */
832 struct net_device *tdev; /* Device to other host */
833 struct iphdr *old_iph = ip_hdr(skb);
834 u8 tos = old_iph->tos;
836 struct iphdr *iph; /* Our new IP header */
837 unsigned int max_headroom; /* The extra header space needed */
843 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
844 IP_VS_RT_MODE_LOCAL |
845 IP_VS_RT_MODE_NON_LOCAL |
846 IP_VS_RT_MODE_CONNECT, &saddr)))
848 if (rt->rt_flags & RTCF_LOCAL) {
850 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
855 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
857 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
860 if (rt_is_output_route(skb_rtable(skb)))
861 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
863 /* Copy DF, reset fragment offset and MF */
864 df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
866 if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
867 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
868 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
873 * Okay, now see if we can stuff it in the buffer as-is.
875 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
877 if (skb_headroom(skb) < max_headroom
878 || skb_cloned(skb) || skb_shared(skb)) {
879 struct sk_buff *new_skb =
880 skb_realloc_headroom(skb, max_headroom);
884 IP_VS_ERR_RL("%s(): no memory\n", __func__);
889 old_iph = ip_hdr(skb);
892 skb->transport_header = skb->network_header;
894 /* fix old IP header checksum */
895 ip_send_check(old_iph);
897 skb_push(skb, sizeof(struct iphdr));
898 skb_reset_network_header(skb);
899 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
903 skb_dst_set(skb, &rt->dst);
906 * Push down and install the IPIP header.
910 iph->ihl = sizeof(struct iphdr)>>2;
912 iph->protocol = IPPROTO_IPIP;
914 iph->daddr = cp->daddr.ip;
916 iph->ttl = old_iph->ttl;
917 ip_select_ident(iph, &rt->dst, NULL);
919 /* Another hack: avoid icmp_send in ip_fragment */
922 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
923 if (ret == NF_ACCEPT)
925 else if (ret == NF_DROP)
933 dst_link_failure(skb);
943 #ifdef CONFIG_IP_VS_IPV6
945 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
946 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
948 struct rt6_info *rt; /* Route to the other host */
949 struct in6_addr saddr; /* Source for tunnel */
950 struct net_device *tdev; /* Device to other host */
951 struct ipv6hdr *old_iph = ipv6_hdr(skb);
952 struct ipv6hdr *iph; /* Our new IP header */
953 unsigned int max_headroom; /* The extra header space needed */
959 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
960 &saddr, 1, (IP_VS_RT_MODE_LOCAL |
961 IP_VS_RT_MODE_NON_LOCAL))))
963 if (__ip_vs_is_local_route6(rt)) {
964 dst_release(&rt->dst);
965 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
970 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
971 if (mtu < IPV6_MIN_MTU) {
972 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
977 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
979 /* MTU checking: Notice that 'mtu' have been adjusted before hand */
980 if (__mtu_check_toobig_v6(skb, mtu)) {
982 struct net *net = dev_net(skb_dst(skb)->dev);
984 skb->dev = net->loopback_dev;
986 /* only send ICMP too big on first fragment */
987 if (!ipvsh->fragoffs)
988 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
989 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
994 * Okay, now see if we can stuff it in the buffer as-is.
996 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
998 if (skb_headroom(skb) < max_headroom
999 || skb_cloned(skb) || skb_shared(skb)) {
1000 struct sk_buff *new_skb =
1001 skb_realloc_headroom(skb, max_headroom);
1003 dst_release(&rt->dst);
1005 IP_VS_ERR_RL("%s(): no memory\n", __func__);
1010 old_iph = ipv6_hdr(skb);
1013 skb->transport_header = skb->network_header;
1015 skb_push(skb, sizeof(struct ipv6hdr));
1016 skb_reset_network_header(skb);
1017 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1019 /* drop old route */
1021 skb_dst_set(skb, &rt->dst);
1024 * Push down and install the IPIP header.
1026 iph = ipv6_hdr(skb);
1028 iph->nexthdr = IPPROTO_IPV6;
1029 iph->payload_len = old_iph->payload_len;
1030 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
1031 iph->priority = old_iph->priority;
1032 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
1033 iph->daddr = cp->daddr.in6;
1035 iph->hop_limit = old_iph->hop_limit;
1037 /* Another hack: avoid icmp_send in ip_fragment */
1040 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
1041 if (ret == NF_ACCEPT)
1043 else if (ret == NF_DROP)
1051 dst_link_failure(skb);
1057 dst_release(&rt->dst);
1064 * Direct Routing transmitter
1065 * Used for ANY protocol
1068 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1069 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1071 struct rtable *rt; /* Route to the other host */
1072 struct iphdr *iph = ip_hdr(skb);
1077 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1078 IP_VS_RT_MODE_LOCAL |
1079 IP_VS_RT_MODE_NON_LOCAL |
1080 IP_VS_RT_MODE_KNOWN_NH, NULL)))
1082 if (rt->rt_flags & RTCF_LOCAL) {
1084 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
1088 mtu = dst_mtu(&rt->dst);
1089 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
1091 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
1093 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1098 * Call ip_send_check because we are not sure it is called
1099 * after ip_defrag. Is copy-on-write needed?
1101 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1105 ip_send_check(ip_hdr(skb));
1107 /* drop old route */
1109 skb_dst_set(skb, &rt->dst);
1111 /* Another hack: avoid icmp_send in ip_fragment */
1114 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
1120 dst_link_failure(skb);
1127 #ifdef CONFIG_IP_VS_IPV6
1129 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1130 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
1132 struct rt6_info *rt; /* Route to the other host */
1137 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1138 0, (IP_VS_RT_MODE_LOCAL |
1139 IP_VS_RT_MODE_NON_LOCAL))))
1141 if (__ip_vs_is_local_route6(rt)) {
1142 dst_release(&rt->dst);
1143 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
1147 mtu = dst_mtu(&rt->dst);
1148 if (__mtu_check_toobig_v6(skb, mtu)) {
1150 struct net *net = dev_net(skb_dst(skb)->dev);
1152 skb->dev = net->loopback_dev;
1154 /* only send ICMP too big on first fragment */
1156 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1157 dst_release(&rt->dst);
1158 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1163 * Call ip_send_check because we are not sure it is called
1164 * after ip_defrag. Is copy-on-write needed?
1166 skb = skb_share_check(skb, GFP_ATOMIC);
1167 if (unlikely(skb == NULL)) {
1168 dst_release(&rt->dst);
1172 /* drop old route */
1174 skb_dst_set(skb, &rt->dst);
1176 /* Another hack: avoid icmp_send in ip_fragment */
1179 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
1185 dst_link_failure(skb);
1195 * ICMP packet transmitter
1196 * called by the ip_vs_in_icmp
1199 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1200 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1201 struct ip_vs_iphdr *iph)
1203 struct rtable *rt; /* Route to the other host */
1211 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1212 forwarded directly here, because there is no need to
1213 translate address/port back */
1214 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1215 if (cp->packet_xmit)
1216 rc = cp->packet_xmit(skb, cp, pp, iph);
1219 /* do not touch skb anymore */
1220 atomic_inc(&cp->in_pkts);
1225 * mangle and send the packet here (only for VS/NAT)
1228 /* LOCALNODE from FORWARD hook is not supported */
1229 rt_mode = (hooknum != NF_INET_FORWARD) ?
1230 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1231 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1232 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1235 local = rt->rt_flags & RTCF_LOCAL;
1238 * Avoid duplicate tuple in reply direction for NAT traffic
1239 * to local address when connection is sync-ed
1241 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1242 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1243 enum ip_conntrack_info ctinfo;
1244 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1246 if (ct && !nf_ct_is_untracked(ct)) {
1247 IP_VS_DBG(10, "%s(): "
1248 "stopping DNAT to local address %pI4\n",
1249 __func__, &cp->daddr.ip);
1255 /* From world but DNAT to loopback address? */
1256 if (local && ipv4_is_loopback(cp->daddr.ip) &&
1257 rt_is_input_route(skb_rtable(skb))) {
1258 IP_VS_DBG(1, "%s(): "
1259 "stopping DNAT to loopback %pI4\n",
1260 __func__, &cp->daddr.ip);
1265 mtu = dst_mtu(&rt->dst);
1266 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
1268 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1269 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1273 /* copy-on-write the packet before mangling it */
1274 if (!skb_make_writable(skb, offset))
1277 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1280 ip_vs_nat_icmp(skb, pp, cp, 0);
1283 /* drop the old route when skb is not shared */
1285 skb_dst_set(skb, &rt->dst);
1289 * Some IPv4 replies get local address from routes,
1290 * not from iph, so while we DNAT after routing
1291 * we need this second input/output route.
1293 if (!__ip_vs_reroute_locally(skb))
1297 /* Another hack: avoid icmp_send in ip_fragment */
1300 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
1304 dst_link_failure(skb);
1316 #ifdef CONFIG_IP_VS_IPV6
1318 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1319 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1320 struct ip_vs_iphdr *iph)
1322 struct rt6_info *rt; /* Route to the other host */
1330 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1331 forwarded directly here, because there is no need to
1332 translate address/port back */
1333 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1334 if (cp->packet_xmit)
1335 rc = cp->packet_xmit(skb, cp, pp, iph);
1338 /* do not touch skb anymore */
1339 atomic_inc(&cp->in_pkts);
1344 * mangle and send the packet here (only for VS/NAT)
1347 /* LOCALNODE from FORWARD hook is not supported */
1348 rt_mode = (hooknum != NF_INET_FORWARD) ?
1349 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1350 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1351 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1355 local = __ip_vs_is_local_route6(rt);
1357 * Avoid duplicate tuple in reply direction for NAT traffic
1358 * to local address when connection is sync-ed
1360 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1361 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1362 enum ip_conntrack_info ctinfo;
1363 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1365 if (ct && !nf_ct_is_untracked(ct)) {
1366 IP_VS_DBG(10, "%s(): "
1367 "stopping DNAT to local address %pI6\n",
1368 __func__, &cp->daddr.in6);
1374 /* From world but DNAT to loopback address? */
1375 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1376 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1377 IP_VS_DBG(1, "%s(): "
1378 "stopping DNAT to loopback %pI6\n",
1379 __func__, &cp->daddr.in6);
1384 mtu = dst_mtu(&rt->dst);
1385 if (__mtu_check_toobig_v6(skb, mtu)) {
1387 struct net *net = dev_net(skb_dst(skb)->dev);
1389 skb->dev = net->loopback_dev;
1391 /* only send ICMP too big on first fragment */
1393 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1394 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1398 /* copy-on-write the packet before mangling it */
1399 if (!skb_make_writable(skb, offset))
1402 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1405 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1407 if (!local || !skb->dev) {
1408 /* drop the old route when skb is not shared */
1410 skb_dst_set(skb, &rt->dst);
1412 /* destined to loopback, do we need to change route? */
1413 dst_release(&rt->dst);
1416 /* Another hack: avoid icmp_send in ip_fragment */
1419 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
1423 dst_link_failure(skb);
1431 dst_release(&rt->dst);