1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #ifndef _NET_IPV6_GRO_H
4 #define _NET_IPV6_GRO_H
6 #include <linux/indirect_call_wrapper.h>
8 #include <linux/ipv6.h>
9 #include <net/ip6_checksum.h>
10 #include <linux/skbuff.h>
14 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
17 /* Length of frag0. */
18 unsigned int frag0_len;
20 /* This indicates where we are processing relative to skb->data. */
23 /* This is non-zero if the packet cannot be merged with the new skb. */
26 /* Save the IP ID here and check when we get to the transport layer */
29 /* Number of segments aggregated. */
32 /* Used in ipv6_gro_receive() and foo-over-udp */
35 /* jiffies when first packet was created/queued */
38 /* Used in napi_gro_cb::free */
39 #define NAPI_GRO_FREE 1
40 #define NAPI_GRO_FREE_STOLEN_HEAD 2
41 /* portion of the cb set to zero at every gro iteration */
44 /* Start offset for remote checksum offload */
45 u16 gro_remcsum_start;
47 /* This is non-zero if the packet may be of the same flow. */
50 /* Used in tunnel GRO receive */
53 /* GRO checksum is valid */
56 /* Number of checksums via CHECKSUM_UNNECESSARY */
62 /* Used in foo-over-udp, set in udp[46]_gro_receive */
65 /* Used in GRE, set in fou/gue_gro_receive */
68 /* Used to determine if flush_id can be ignored */
71 /* Number of gro_receive callbacks this packet already went through */
72 u8 recursion_counter:4;
74 /* GRO is done by frag_list pointer chaining. */
78 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
81 /* used in skb_gro_receive() slow path */
85 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
87 #define GRO_RECURSION_LIMIT 15
88 static inline int gro_recursion_inc_test(struct sk_buff *skb)
90 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
93 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
94 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
95 struct list_head *head,
98 if (unlikely(gro_recursion_inc_test(skb))) {
99 NAPI_GRO_CB(skb)->flush |= 1;
103 return cb(head, skb);
106 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
108 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
110 struct list_head *head,
113 if (unlikely(gro_recursion_inc_test(skb))) {
114 NAPI_GRO_CB(skb)->flush |= 1;
118 return cb(sk, head, skb);
121 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
123 return NAPI_GRO_CB(skb)->data_offset;
126 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
128 return skb->len - NAPI_GRO_CB(skb)->data_offset;
131 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
133 NAPI_GRO_CB(skb)->data_offset += len;
136 static inline void *skb_gro_header_fast(struct sk_buff *skb,
139 return NAPI_GRO_CB(skb)->frag0 + offset;
142 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
144 return NAPI_GRO_CB(skb)->frag0_len < hlen;
147 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
149 NAPI_GRO_CB(skb)->frag0 = NULL;
150 NAPI_GRO_CB(skb)->frag0_len = 0;
153 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
156 if (!pskb_may_pull(skb, hlen))
159 skb_gro_frag0_invalidate(skb);
160 return skb->data + offset;
163 static inline void *skb_gro_header(struct sk_buff *skb,
164 unsigned int hlen, unsigned int offset)
168 ptr = skb_gro_header_fast(skb, offset);
169 if (skb_gro_header_hard(skb, hlen))
170 ptr = skb_gro_header_slow(skb, hlen, offset);
174 static inline void *skb_gro_network_header(struct sk_buff *skb)
176 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
177 skb_network_offset(skb);
180 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
182 const struct iphdr *iph = skb_gro_network_header(skb);
184 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
185 skb_gro_len(skb), proto, 0);
188 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
189 const void *start, unsigned int len)
191 if (NAPI_GRO_CB(skb)->csum_valid)
192 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
193 wsum_negate(NAPI_GRO_CB(skb)->csum)));
196 /* GRO checksum functions. These are logical equivalents of the normal
197 * checksum functions (in skbuff.h) except that they operate on the GRO
198 * offsets and fields in sk_buff.
201 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
203 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
205 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
208 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
212 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
213 skb_checksum_start_offset(skb) <
214 skb_gro_offset(skb)) &&
215 !skb_at_gro_remcsum_start(skb) &&
216 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
217 (!zero_okay || check));
220 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
223 if (NAPI_GRO_CB(skb)->csum_valid &&
224 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
227 NAPI_GRO_CB(skb)->csum = psum;
229 return __skb_gro_checksum_complete(skb);
232 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
234 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
235 /* Consume a checksum from CHECKSUM_UNNECESSARY */
236 NAPI_GRO_CB(skb)->csum_cnt--;
238 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
239 * verified a new top level checksum or an encapsulated one
240 * during GRO. This saves work if we fallback to normal path.
242 __skb_incr_checksum_unnecessary(skb);
246 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
250 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
251 __ret = __skb_gro_checksum_validate_complete(skb, \
252 compute_pseudo(skb, proto)); \
254 skb_gro_incr_csum_unnecessary(skb); \
258 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
259 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
261 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
263 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
265 #define skb_gro_checksum_simple_validate(skb) \
266 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
268 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
270 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
271 !NAPI_GRO_CB(skb)->csum_valid);
274 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
277 NAPI_GRO_CB(skb)->csum = ~pseudo;
278 NAPI_GRO_CB(skb)->csum_valid = 1;
281 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
283 if (__skb_gro_checksum_convert_check(skb)) \
284 __skb_gro_checksum_convert(skb, \
285 compute_pseudo(skb, proto)); \
293 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
299 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
300 unsigned int off, size_t hdrlen,
301 int start, int offset,
302 struct gro_remcsum *grc,
306 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
308 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
311 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
315 ptr = skb_gro_header(skb, off + plen, off);
319 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
322 /* Adjust skb->csum since we changed the packet */
323 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
325 grc->offset = off + hdrlen + offset;
331 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
332 struct gro_remcsum *grc)
335 size_t plen = grc->offset + sizeof(u16);
340 ptr = skb_gro_header(skb, plen, grc->offset);
344 remcsum_unadjust((__sum16 *)ptr, grc->delta);
347 #ifdef CONFIG_XFRM_OFFLOAD
348 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
350 if (PTR_ERR(pp) != -EINPROGRESS)
351 NAPI_GRO_CB(skb)->flush |= flush;
353 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
356 struct gro_remcsum *grc)
358 if (PTR_ERR(pp) != -EINPROGRESS) {
359 NAPI_GRO_CB(skb)->flush |= flush;
360 skb_gro_remcsum_cleanup(skb, grc);
361 skb->remcsum_offload = 0;
365 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
367 NAPI_GRO_CB(skb)->flush |= flush;
369 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
372 struct gro_remcsum *grc)
374 NAPI_GRO_CB(skb)->flush |= flush;
375 skb_gro_remcsum_cleanup(skb, grc);
376 skb->remcsum_offload = 0;
380 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
382 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
383 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
385 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
387 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
389 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
391 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
393 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
395 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
397 unlikely(gro_recursion_inc_test(skb)) ? \
398 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
399 INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
402 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
403 struct udphdr *uh, struct sock *sk);
404 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
406 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
409 unsigned int hlen, off;
411 off = skb_gro_offset(skb);
412 hlen = off + sizeof(*uh);
413 uh = skb_gro_header(skb, hlen, off);
418 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
420 const struct ipv6hdr *iph = skb_gro_network_header(skb);
422 return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
423 skb_gro_len(skb), proto, 0));
426 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
428 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
429 static inline void gro_normal_list(struct napi_struct *napi)
433 netif_receive_skb_list_internal(&napi->rx_list);
434 INIT_LIST_HEAD(&napi->rx_list);
438 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
439 * pass the whole batch up to the stack.
441 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
443 list_add_tail(&skb->list, &napi->rx_list);
444 napi->rx_count += segs;
445 if (napi->rx_count >= READ_ONCE(gro_normal_batch))
446 gro_normal_list(napi);
450 #endif /* _NET_IPV6_GRO_H */