Bluetooth: MGMT: Fix marking SCAN_RSP as not connectable
[platform/kernel/linux-starfive.git] / net / ipv4 / tcp_offload.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      IPV4 GSO/GRO offload support
4  *      Linux INET implementation
5  *
6  *      TCPv4 GSO/GRO support
7  */
8
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
11 #include <net/gro.h>
12 #include <net/tcp.h>
13 #include <net/protocol.h>
14
15 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
16                            unsigned int seq, unsigned int mss)
17 {
18         while (skb) {
19                 if (before(ts_seq, seq + mss)) {
20                         skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
21                         skb_shinfo(skb)->tskey = ts_seq;
22                         return;
23                 }
24
25                 skb = skb->next;
26                 seq += mss;
27         }
28 }
29
30 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
31                                         netdev_features_t features)
32 {
33         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
34                 return ERR_PTR(-EINVAL);
35
36         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
37                 return ERR_PTR(-EINVAL);
38
39         if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
40                 const struct iphdr *iph = ip_hdr(skb);
41                 struct tcphdr *th = tcp_hdr(skb);
42
43                 /* Set up checksum pseudo header, usually expect stack to
44                  * have done this already.
45                  */
46
47                 th->check = 0;
48                 skb->ip_summed = CHECKSUM_PARTIAL;
49                 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
50         }
51
52         return tcp_gso_segment(skb, features);
53 }
54
55 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
56                                 netdev_features_t features)
57 {
58         struct sk_buff *segs = ERR_PTR(-EINVAL);
59         unsigned int sum_truesize = 0;
60         struct tcphdr *th;
61         unsigned int thlen;
62         unsigned int seq;
63         unsigned int oldlen;
64         unsigned int mss;
65         struct sk_buff *gso_skb = skb;
66         __sum16 newcheck;
67         bool ooo_okay, copy_destructor;
68         __wsum delta;
69
70         th = tcp_hdr(skb);
71         thlen = th->doff * 4;
72         if (thlen < sizeof(*th))
73                 goto out;
74
75         if (!pskb_may_pull(skb, thlen))
76                 goto out;
77
78         oldlen = ~skb->len;
79         __skb_pull(skb, thlen);
80
81         mss = skb_shinfo(skb)->gso_size;
82         if (unlikely(skb->len <= mss))
83                 goto out;
84
85         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
86                 /* Packet is from an untrusted source, reset gso_segs. */
87
88                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
89
90                 segs = NULL;
91                 goto out;
92         }
93
94         copy_destructor = gso_skb->destructor == tcp_wfree;
95         ooo_okay = gso_skb->ooo_okay;
96         /* All segments but the first should have ooo_okay cleared */
97         skb->ooo_okay = 0;
98
99         segs = skb_segment(skb, features);
100         if (IS_ERR(segs))
101                 goto out;
102
103         /* Only first segment might have ooo_okay set */
104         segs->ooo_okay = ooo_okay;
105
106         /* GSO partial and frag_list segmentation only requires splitting
107          * the frame into an MSS multiple and possibly a remainder, both
108          * cases return a GSO skb. So update the mss now.
109          */
110         if (skb_is_gso(segs))
111                 mss *= skb_shinfo(segs)->gso_segs;
112
113         delta = (__force __wsum)htonl(oldlen + thlen + mss);
114
115         skb = segs;
116         th = tcp_hdr(skb);
117         seq = ntohl(th->seq);
118
119         if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
120                 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
121
122         newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
123
124         while (skb->next) {
125                 th->fin = th->psh = 0;
126                 th->check = newcheck;
127
128                 if (skb->ip_summed == CHECKSUM_PARTIAL)
129                         gso_reset_checksum(skb, ~th->check);
130                 else
131                         th->check = gso_make_checksum(skb, ~th->check);
132
133                 seq += mss;
134                 if (copy_destructor) {
135                         skb->destructor = gso_skb->destructor;
136                         skb->sk = gso_skb->sk;
137                         sum_truesize += skb->truesize;
138                 }
139                 skb = skb->next;
140                 th = tcp_hdr(skb);
141
142                 th->seq = htonl(seq);
143                 th->cwr = 0;
144         }
145
146         /* Following permits TCP Small Queues to work well with GSO :
147          * The callback to TCP stack will be called at the time last frag
148          * is freed at TX completion, and not right now when gso_skb
149          * is freed by GSO engine
150          */
151         if (copy_destructor) {
152                 int delta;
153
154                 swap(gso_skb->sk, skb->sk);
155                 swap(gso_skb->destructor, skb->destructor);
156                 sum_truesize += skb->truesize;
157                 delta = sum_truesize - gso_skb->truesize;
158                 /* In some pathological cases, delta can be negative.
159                  * We need to either use refcount_add() or refcount_sub_and_test()
160                  */
161                 if (likely(delta >= 0))
162                         refcount_add(delta, &skb->sk->sk_wmem_alloc);
163                 else
164                         WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
165         }
166
167         delta = (__force __wsum)htonl(oldlen +
168                                       (skb_tail_pointer(skb) -
169                                        skb_transport_header(skb)) +
170                                       skb->data_len);
171         th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
172         if (skb->ip_summed == CHECKSUM_PARTIAL)
173                 gso_reset_checksum(skb, ~th->check);
174         else
175                 th->check = gso_make_checksum(skb, ~th->check);
176 out:
177         return segs;
178 }
179
180 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
181 {
182         struct sk_buff *pp = NULL;
183         struct sk_buff *p;
184         struct tcphdr *th;
185         struct tcphdr *th2;
186         unsigned int len;
187         unsigned int thlen;
188         __be32 flags;
189         unsigned int mss = 1;
190         unsigned int hlen;
191         unsigned int off;
192         int flush = 1;
193         int i;
194
195         off = skb_gro_offset(skb);
196         hlen = off + sizeof(*th);
197         th = skb_gro_header(skb, hlen, off);
198         if (unlikely(!th))
199                 goto out;
200
201         thlen = th->doff * 4;
202         if (thlen < sizeof(*th))
203                 goto out;
204
205         hlen = off + thlen;
206         if (skb_gro_header_hard(skb, hlen)) {
207                 th = skb_gro_header_slow(skb, hlen, off);
208                 if (unlikely(!th))
209                         goto out;
210         }
211
212         skb_gro_pull(skb, thlen);
213
214         len = skb_gro_len(skb);
215         flags = tcp_flag_word(th);
216
217         list_for_each_entry(p, head, list) {
218                 if (!NAPI_GRO_CB(p)->same_flow)
219                         continue;
220
221                 th2 = tcp_hdr(p);
222
223                 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
224                         NAPI_GRO_CB(p)->same_flow = 0;
225                         continue;
226                 }
227
228                 goto found;
229         }
230         p = NULL;
231         goto out_check_final;
232
233 found:
234         /* Include the IP ID check below from the inner most IP hdr */
235         flush = NAPI_GRO_CB(p)->flush;
236         flush |= (__force int)(flags & TCP_FLAG_CWR);
237         flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
238                   ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
239         flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
240         for (i = sizeof(*th); i < thlen; i += 4)
241                 flush |= *(u32 *)((u8 *)th + i) ^
242                          *(u32 *)((u8 *)th2 + i);
243
244         /* When we receive our second frame we can made a decision on if we
245          * continue this flow as an atomic flow with a fixed ID or if we use
246          * an incrementing ID.
247          */
248         if (NAPI_GRO_CB(p)->flush_id != 1 ||
249             NAPI_GRO_CB(p)->count != 1 ||
250             !NAPI_GRO_CB(p)->is_atomic)
251                 flush |= NAPI_GRO_CB(p)->flush_id;
252         else
253                 NAPI_GRO_CB(p)->is_atomic = false;
254
255         mss = skb_shinfo(p)->gso_size;
256
257         /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
258          * If it is a single frame, do not aggregate it if its length
259          * is bigger than our mss.
260          */
261         if (unlikely(skb_is_gso(skb)))
262                 flush |= (mss != skb_shinfo(skb)->gso_size);
263         else
264                 flush |= (len - 1) >= mss;
265
266         flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
267 #ifdef CONFIG_TLS_DEVICE
268         flush |= p->decrypted ^ skb->decrypted;
269 #endif
270
271         if (flush || skb_gro_receive(p, skb)) {
272                 mss = 1;
273                 goto out_check_final;
274         }
275
276         tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
277
278 out_check_final:
279         /* Force a flush if last segment is smaller than mss. */
280         if (unlikely(skb_is_gso(skb)))
281                 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
282         else
283                 flush = len < mss;
284
285         flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
286                                         TCP_FLAG_RST | TCP_FLAG_SYN |
287                                         TCP_FLAG_FIN));
288
289         if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
290                 pp = p;
291
292 out:
293         NAPI_GRO_CB(skb)->flush |= (flush != 0);
294
295         return pp;
296 }
297
298 int tcp_gro_complete(struct sk_buff *skb)
299 {
300         struct tcphdr *th = tcp_hdr(skb);
301
302         skb->csum_start = (unsigned char *)th - skb->head;
303         skb->csum_offset = offsetof(struct tcphdr, check);
304         skb->ip_summed = CHECKSUM_PARTIAL;
305
306         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
307
308         if (th->cwr)
309                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
310
311         if (skb->encapsulation)
312                 skb->inner_transport_header = skb->transport_header;
313
314         return 0;
315 }
316 EXPORT_SYMBOL(tcp_gro_complete);
317
318 INDIRECT_CALLABLE_SCOPE
319 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
320 {
321         /* Don't bother verifying checksum if we're going to flush anyway. */
322         if (!NAPI_GRO_CB(skb)->flush &&
323             skb_gro_checksum_validate(skb, IPPROTO_TCP,
324                                       inet_gro_compute_pseudo)) {
325                 NAPI_GRO_CB(skb)->flush = 1;
326                 return NULL;
327         }
328
329         return tcp_gro_receive(head, skb);
330 }
331
332 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
333 {
334         const struct iphdr *iph = ip_hdr(skb);
335         struct tcphdr *th = tcp_hdr(skb);
336
337         th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
338                                   iph->daddr, 0);
339         skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
340
341         if (NAPI_GRO_CB(skb)->is_atomic)
342                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
343
344         return tcp_gro_complete(skb);
345 }
346
347 static const struct net_offload tcpv4_offload = {
348         .callbacks = {
349                 .gso_segment    =       tcp4_gso_segment,
350                 .gro_receive    =       tcp4_gro_receive,
351                 .gro_complete   =       tcp4_gro_complete,
352         },
353 };
354
355 int __init tcpv4_offload_init(void)
356 {
357         return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
358 }