2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * TCPv4 GSO/GRO support
13 #include <linux/skbuff.h>
15 #include <net/protocol.h>
17 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
18 netdev_features_t features)
20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 unsigned int sum_truesize = 0;
28 struct sk_buff *gso_skb = skb;
30 bool ooo_okay, copy_destructor;
32 if (!pskb_may_pull(skb, sizeof(*th)))
37 if (thlen < sizeof(*th))
40 if (!pskb_may_pull(skb, thlen))
43 oldlen = (u16)~skb->len;
44 __skb_pull(skb, thlen);
46 mss = tcp_skb_mss(skb);
47 if (unlikely(skb->len <= mss))
50 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
51 /* Packet is from an untrusted source, reset gso_segs. */
52 int type = skb_shinfo(skb)->gso_type;
65 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
68 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
74 copy_destructor = gso_skb->destructor == tcp_wfree;
75 ooo_okay = gso_skb->ooo_okay;
76 /* All segments but the first should have ooo_okay cleared */
79 segs = skb_segment(skb, features);
83 /* Only first segment might have ooo_okay set */
84 segs->ooo_okay = ooo_okay;
86 delta = htonl(oldlen + (thlen + mss));
92 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
96 th->fin = th->psh = 0;
99 if (skb->ip_summed != CHECKSUM_PARTIAL)
101 csum_fold(csum_partial(skb_transport_header(skb),
105 if (copy_destructor) {
106 skb->destructor = gso_skb->destructor;
107 skb->sk = gso_skb->sk;
108 sum_truesize += skb->truesize;
113 th->seq = htonl(seq);
117 /* Following permits TCP Small Queues to work well with GSO :
118 * The callback to TCP stack will be called at the time last frag
119 * is freed at TX completion, and not right now when gso_skb
120 * is freed by GSO engine
122 if (copy_destructor) {
123 swap(gso_skb->sk, skb->sk);
124 swap(gso_skb->destructor, skb->destructor);
125 sum_truesize += skb->truesize;
126 atomic_add(sum_truesize - gso_skb->truesize,
127 &skb->sk->sk_wmem_alloc);
130 delta = htonl(oldlen + (skb_tail_pointer(skb) -
131 skb_transport_header(skb)) +
133 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
134 (__force u32)delta));
135 if (skb->ip_summed != CHECKSUM_PARTIAL)
136 th->check = csum_fold(csum_partial(skb_transport_header(skb),
142 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
144 struct sk_buff **pp = NULL;
151 unsigned int mss = 1;
157 off = skb_gro_offset(skb);
158 hlen = off + sizeof(*th);
159 th = skb_gro_header_fast(skb, off);
160 if (skb_gro_header_hard(skb, hlen)) {
161 th = skb_gro_header_slow(skb, hlen, off);
166 thlen = th->doff * 4;
167 if (thlen < sizeof(*th))
171 if (skb_gro_header_hard(skb, hlen)) {
172 th = skb_gro_header_slow(skb, hlen, off);
177 skb_gro_pull(skb, thlen);
179 len = skb_gro_len(skb);
180 flags = tcp_flag_word(th);
182 for (; (p = *head); head = &p->next) {
183 if (!NAPI_GRO_CB(p)->same_flow)
188 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
189 NAPI_GRO_CB(p)->same_flow = 0;
196 goto out_check_final;
199 /* Include the IP ID check below from the inner most IP hdr */
200 flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
201 flush |= (__force int)(flags & TCP_FLAG_CWR);
202 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
203 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
204 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
205 for (i = sizeof(*th); i < thlen; i += 4)
206 flush |= *(u32 *)((u8 *)th + i) ^
207 *(u32 *)((u8 *)th2 + i);
209 mss = tcp_skb_mss(p);
211 flush |= (len - 1) >= mss;
212 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
214 if (flush || skb_gro_receive(head, skb)) {
216 goto out_check_final;
221 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
225 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
226 TCP_FLAG_RST | TCP_FLAG_SYN |
229 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
233 NAPI_GRO_CB(skb)->flush |= (flush != 0);
238 int tcp_gro_complete(struct sk_buff *skb)
240 struct tcphdr *th = tcp_hdr(skb);
242 skb->csum_start = (unsigned char *)th - skb->head;
243 skb->csum_offset = offsetof(struct tcphdr, check);
244 skb->ip_summed = CHECKSUM_PARTIAL;
246 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
249 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
253 EXPORT_SYMBOL(tcp_gro_complete);
255 static int tcp_v4_gso_send_check(struct sk_buff *skb)
257 const struct iphdr *iph;
260 if (!pskb_may_pull(skb, sizeof(*th)))
267 skb->ip_summed = CHECKSUM_PARTIAL;
268 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
272 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
274 /* Use the IP hdr immediately proceeding for this transport */
275 const struct iphdr *iph = skb_gro_network_header(skb);
278 /* Don't bother verifying checksum if we're going to flush anyway. */
279 if (NAPI_GRO_CB(skb)->flush)
282 wsum = NAPI_GRO_CB(skb)->csum;
284 switch (skb->ip_summed) {
286 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
291 case CHECKSUM_COMPLETE:
292 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
294 skb->ip_summed = CHECKSUM_UNNECESSARY;
298 NAPI_GRO_CB(skb)->flush = 1;
303 return tcp_gro_receive(head, skb);
306 static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
308 const struct iphdr *iph = ip_hdr(skb);
309 struct tcphdr *th = tcp_hdr(skb);
311 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
313 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
315 return tcp_gro_complete(skb);
318 static const struct net_offload tcpv4_offload = {
320 .gso_send_check = tcp_v4_gso_send_check,
321 .gso_segment = tcp_gso_segment,
322 .gro_receive = tcp4_gro_receive,
323 .gro_complete = tcp4_gro_complete,
327 int __init tcpv4_offload_init(void)
329 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);