1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
6 * TCPv4 GSO/GRO support
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
14 #include <net/protocol.h>
16 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
17 unsigned int seq, unsigned int mss)
20 if (before(ts_seq, seq + mss)) {
21 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
22 skb_shinfo(skb)->tskey = ts_seq;
31 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
32 netdev_features_t features)
34 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
35 return ERR_PTR(-EINVAL);
37 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
38 return ERR_PTR(-EINVAL);
40 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
41 const struct iphdr *iph = ip_hdr(skb);
42 struct tcphdr *th = tcp_hdr(skb);
44 /* Set up checksum pseudo header, usually expect stack to
45 * have done this already.
49 skb->ip_summed = CHECKSUM_PARTIAL;
50 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
53 return tcp_gso_segment(skb, features);
56 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
57 netdev_features_t features)
59 struct sk_buff *segs = ERR_PTR(-EINVAL);
60 unsigned int sum_truesize = 0;
66 struct sk_buff *gso_skb = skb;
68 bool ooo_okay, copy_destructor;
73 if (thlen < sizeof(*th))
76 if (!pskb_may_pull(skb, thlen))
80 __skb_pull(skb, thlen);
82 mss = skb_shinfo(skb)->gso_size;
83 if (unlikely(skb->len <= mss))
86 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
87 /* Packet is from an untrusted source, reset gso_segs. */
89 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
95 copy_destructor = gso_skb->destructor == tcp_wfree;
96 ooo_okay = gso_skb->ooo_okay;
97 /* All segments but the first should have ooo_okay cleared */
100 segs = skb_segment(skb, features);
104 /* Only first segment might have ooo_okay set */
105 segs->ooo_okay = ooo_okay;
107 /* GSO partial and frag_list segmentation only requires splitting
108 * the frame into an MSS multiple and possibly a remainder, both
109 * cases return a GSO skb. So update the mss now.
111 if (skb_is_gso(segs))
112 mss *= skb_shinfo(segs)->gso_segs;
114 delta = (__force __wsum)htonl(oldlen + thlen + mss);
118 seq = ntohl(th->seq);
120 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
121 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
123 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
126 th->fin = th->psh = 0;
127 th->check = newcheck;
129 if (skb->ip_summed == CHECKSUM_PARTIAL)
130 gso_reset_checksum(skb, ~th->check);
132 th->check = gso_make_checksum(skb, ~th->check);
135 if (copy_destructor) {
136 skb->destructor = gso_skb->destructor;
137 skb->sk = gso_skb->sk;
138 sum_truesize += skb->truesize;
143 th->seq = htonl(seq);
147 /* Following permits TCP Small Queues to work well with GSO :
148 * The callback to TCP stack will be called at the time last frag
149 * is freed at TX completion, and not right now when gso_skb
150 * is freed by GSO engine
152 if (copy_destructor) {
155 swap(gso_skb->sk, skb->sk);
156 swap(gso_skb->destructor, skb->destructor);
157 sum_truesize += skb->truesize;
158 delta = sum_truesize - gso_skb->truesize;
159 /* In some pathological cases, delta can be negative.
160 * We need to either use refcount_add() or refcount_sub_and_test()
162 if (likely(delta >= 0))
163 refcount_add(delta, &skb->sk->sk_wmem_alloc);
165 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
168 delta = (__force __wsum)htonl(oldlen +
169 (skb_tail_pointer(skb) -
170 skb_transport_header(skb)) +
172 th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
173 if (skb->ip_summed == CHECKSUM_PARTIAL)
174 gso_reset_checksum(skb, ~th->check);
176 th->check = gso_make_checksum(skb, ~th->check);
181 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
183 struct sk_buff *pp = NULL;
190 unsigned int mss = 1;
196 off = skb_gro_offset(skb);
197 hlen = off + sizeof(*th);
198 th = skb_gro_header(skb, hlen, off);
202 thlen = th->doff * 4;
203 if (thlen < sizeof(*th))
207 if (skb_gro_header_hard(skb, hlen)) {
208 th = skb_gro_header_slow(skb, hlen, off);
213 skb_gro_pull(skb, thlen);
215 len = skb_gro_len(skb);
216 flags = tcp_flag_word(th);
218 list_for_each_entry(p, head, list) {
219 if (!NAPI_GRO_CB(p)->same_flow)
224 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
225 NAPI_GRO_CB(p)->same_flow = 0;
232 goto out_check_final;
235 /* Include the IP ID check below from the inner most IP hdr */
236 flush = NAPI_GRO_CB(p)->flush;
237 flush |= (__force int)(flags & TCP_FLAG_CWR);
238 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
239 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
240 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
241 for (i = sizeof(*th); i < thlen; i += 4)
242 flush |= *(u32 *)((u8 *)th + i) ^
243 *(u32 *)((u8 *)th2 + i);
245 /* When we receive our second frame we can made a decision on if we
246 * continue this flow as an atomic flow with a fixed ID or if we use
247 * an incrementing ID.
249 if (NAPI_GRO_CB(p)->flush_id != 1 ||
250 NAPI_GRO_CB(p)->count != 1 ||
251 !NAPI_GRO_CB(p)->is_atomic)
252 flush |= NAPI_GRO_CB(p)->flush_id;
254 NAPI_GRO_CB(p)->is_atomic = false;
256 mss = skb_shinfo(p)->gso_size;
258 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
259 * If it is a single frame, do not aggregate it if its length
260 * is bigger than our mss.
262 if (unlikely(skb_is_gso(skb)))
263 flush |= (mss != skb_shinfo(skb)->gso_size);
265 flush |= (len - 1) >= mss;
267 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
268 #ifdef CONFIG_TLS_DEVICE
269 flush |= p->decrypted ^ skb->decrypted;
272 if (flush || skb_gro_receive(p, skb)) {
274 goto out_check_final;
277 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
280 /* Force a flush if last segment is smaller than mss. */
281 if (unlikely(skb_is_gso(skb)))
282 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
286 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
287 TCP_FLAG_RST | TCP_FLAG_SYN |
290 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
294 NAPI_GRO_CB(skb)->flush |= (flush != 0);
299 void tcp_gro_complete(struct sk_buff *skb)
301 struct tcphdr *th = tcp_hdr(skb);
303 skb->csum_start = (unsigned char *)th - skb->head;
304 skb->csum_offset = offsetof(struct tcphdr, check);
305 skb->ip_summed = CHECKSUM_PARTIAL;
307 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
310 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
312 if (skb->encapsulation)
313 skb->inner_transport_header = skb->transport_header;
315 EXPORT_SYMBOL(tcp_gro_complete);
317 INDIRECT_CALLABLE_SCOPE
318 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
320 /* Don't bother verifying checksum if we're going to flush anyway. */
321 if (!NAPI_GRO_CB(skb)->flush &&
322 skb_gro_checksum_validate(skb, IPPROTO_TCP,
323 inet_gro_compute_pseudo)) {
324 NAPI_GRO_CB(skb)->flush = 1;
328 return tcp_gro_receive(head, skb);
331 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
333 const struct iphdr *iph = ip_hdr(skb);
334 struct tcphdr *th = tcp_hdr(skb);
336 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
338 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
340 if (NAPI_GRO_CB(skb)->is_atomic)
341 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
343 tcp_gro_complete(skb);
347 static const struct net_offload tcpv4_offload = {
349 .gso_segment = tcp4_gso_segment,
350 .gro_receive = tcp4_gro_receive,
351 .gro_complete = tcp4_gro_complete,
355 int __init tcpv4_offload_init(void)
357 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);