bpf, sockmap: Reschedule is now done through backlog
[platform/kernel/linux-starfive.git] / net / core / gro.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6
7 #define MAX_GRO_SKBS 8
8
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12 static DEFINE_SPINLOCK(offload_lock);
13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16
17 /**
18  *      dev_add_offload - register offload handlers
19  *      @po: protocol offload declaration
20  *
21  *      Add protocol offload handlers to the networking stack. The passed
22  *      &proto_offload is linked into kernel lists and may not be freed until
23  *      it has been removed from the kernel lists.
24  *
25  *      This call does not sleep therefore it can not
26  *      guarantee all CPU's that are in middle of receiving packets
27  *      will see the new offload handlers (until the next received packet).
28  */
29 void dev_add_offload(struct packet_offload *po)
30 {
31         struct packet_offload *elem;
32
33         spin_lock(&offload_lock);
34         list_for_each_entry(elem, &offload_base, list) {
35                 if (po->priority < elem->priority)
36                         break;
37         }
38         list_add_rcu(&po->list, elem->list.prev);
39         spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42
43 /**
44  *      __dev_remove_offload     - remove offload handler
45  *      @po: packet offload declaration
46  *
47  *      Remove a protocol offload handler that was previously added to the
48  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
49  *      is removed from the kernel lists and can be freed or reused once this
50  *      function returns.
51  *
52  *      The packet type might still be in use by receivers
53  *      and must not be freed until after all the CPU's have gone
54  *      through a quiescent state.
55  */
56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58         struct list_head *head = &offload_base;
59         struct packet_offload *po1;
60
61         spin_lock(&offload_lock);
62
63         list_for_each_entry(po1, head, list) {
64                 if (po == po1) {
65                         list_del_rcu(&po->list);
66                         goto out;
67                 }
68         }
69
70         pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72         spin_unlock(&offload_lock);
73 }
74
75 /**
76  *      dev_remove_offload       - remove packet offload handler
77  *      @po: packet offload declaration
78  *
79  *      Remove a packet offload handler that was previously added to the kernel
80  *      offload handlers by dev_add_offload(). The passed &offload_type is
81  *      removed from the kernel lists and can be freed or reused once this
82  *      function returns.
83  *
84  *      This call sleeps to guarantee that no CPU is looking at the packet
85  *      type after return.
86  */
87 void dev_remove_offload(struct packet_offload *po)
88 {
89         __dev_remove_offload(po);
90
91         synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94
95 /**
96  *      skb_eth_gso_segment - segmentation handler for ethernet protocols.
97  *      @skb: buffer to segment
98  *      @features: features for the output path (see dev->features)
99  *      @type: Ethernet Protocol ID
100  */
101 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
102                                     netdev_features_t features, __be16 type)
103 {
104         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
105         struct packet_offload *ptype;
106
107         rcu_read_lock();
108         list_for_each_entry_rcu(ptype, &offload_base, list) {
109                 if (ptype->type == type && ptype->callbacks.gso_segment) {
110                         segs = ptype->callbacks.gso_segment(skb, features);
111                         break;
112                 }
113         }
114         rcu_read_unlock();
115
116         return segs;
117 }
118 EXPORT_SYMBOL(skb_eth_gso_segment);
119
120 /**
121  *      skb_mac_gso_segment - mac layer segmentation handler.
122  *      @skb: buffer to segment
123  *      @features: features for the output path (see dev->features)
124  */
125 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
126                                     netdev_features_t features)
127 {
128         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
129         struct packet_offload *ptype;
130         int vlan_depth = skb->mac_len;
131         __be16 type = skb_network_protocol(skb, &vlan_depth);
132
133         if (unlikely(!type))
134                 return ERR_PTR(-EINVAL);
135
136         __skb_pull(skb, vlan_depth);
137
138         rcu_read_lock();
139         list_for_each_entry_rcu(ptype, &offload_base, list) {
140                 if (ptype->type == type && ptype->callbacks.gso_segment) {
141                         segs = ptype->callbacks.gso_segment(skb, features);
142                         break;
143                 }
144         }
145         rcu_read_unlock();
146
147         __skb_push(skb, skb->data - skb_mac_header(skb));
148
149         return segs;
150 }
151 EXPORT_SYMBOL(skb_mac_gso_segment);
152
153 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
154 {
155         struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
156         unsigned int offset = skb_gro_offset(skb);
157         unsigned int headlen = skb_headlen(skb);
158         unsigned int len = skb_gro_len(skb);
159         unsigned int delta_truesize;
160         unsigned int gro_max_size;
161         unsigned int new_truesize;
162         struct sk_buff *lp;
163         int segs;
164
165         /* Do not splice page pool based packets w/ non-page pool
166          * packets. This can result in reference count issues as page
167          * pool pages will not decrement the reference count and will
168          * instead be immediately returned to the pool or have frag
169          * count decremented.
170          */
171         if (p->pp_recycle != skb->pp_recycle)
172                 return -ETOOMANYREFS;
173
174         /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
175         gro_max_size = READ_ONCE(p->dev->gro_max_size);
176
177         if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
178                 return -E2BIG;
179
180         if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
181                 if (p->protocol != htons(ETH_P_IPV6) ||
182                     skb_headroom(p) < sizeof(struct hop_jumbo_hdr) ||
183                     ipv6_hdr(p)->nexthdr != IPPROTO_TCP ||
184                     p->encapsulation)
185                         return -E2BIG;
186         }
187
188         segs = NAPI_GRO_CB(skb)->count;
189         lp = NAPI_GRO_CB(p)->last;
190         pinfo = skb_shinfo(lp);
191
192         if (headlen <= offset) {
193                 skb_frag_t *frag;
194                 skb_frag_t *frag2;
195                 int i = skbinfo->nr_frags;
196                 int nr_frags = pinfo->nr_frags + i;
197
198                 if (nr_frags > MAX_SKB_FRAGS)
199                         goto merge;
200
201                 offset -= headlen;
202                 pinfo->nr_frags = nr_frags;
203                 skbinfo->nr_frags = 0;
204
205                 frag = pinfo->frags + nr_frags;
206                 frag2 = skbinfo->frags + i;
207                 do {
208                         *--frag = *--frag2;
209                 } while (--i);
210
211                 skb_frag_off_add(frag, offset);
212                 skb_frag_size_sub(frag, offset);
213
214                 /* all fragments truesize : remove (head size + sk_buff) */
215                 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
216                 delta_truesize = skb->truesize - new_truesize;
217
218                 skb->truesize = new_truesize;
219                 skb->len -= skb->data_len;
220                 skb->data_len = 0;
221
222                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
223                 goto done;
224         } else if (skb->head_frag) {
225                 int nr_frags = pinfo->nr_frags;
226                 skb_frag_t *frag = pinfo->frags + nr_frags;
227                 struct page *page = virt_to_head_page(skb->head);
228                 unsigned int first_size = headlen - offset;
229                 unsigned int first_offset;
230
231                 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
232                         goto merge;
233
234                 first_offset = skb->data -
235                                (unsigned char *)page_address(page) +
236                                offset;
237
238                 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
239
240                 __skb_frag_set_page(frag, page);
241                 skb_frag_off_set(frag, first_offset);
242                 skb_frag_size_set(frag, first_size);
243
244                 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
245                 /* We dont need to clear skbinfo->nr_frags here */
246
247                 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
248                 delta_truesize = skb->truesize - new_truesize;
249                 skb->truesize = new_truesize;
250                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
251                 goto done;
252         }
253
254 merge:
255         /* sk owenrship - if any - completely transferred to the aggregated packet */
256         skb->destructor = NULL;
257         delta_truesize = skb->truesize;
258         if (offset > headlen) {
259                 unsigned int eat = offset - headlen;
260
261                 skb_frag_off_add(&skbinfo->frags[0], eat);
262                 skb_frag_size_sub(&skbinfo->frags[0], eat);
263                 skb->data_len -= eat;
264                 skb->len -= eat;
265                 offset = headlen;
266         }
267
268         __skb_pull(skb, offset);
269
270         if (NAPI_GRO_CB(p)->last == p)
271                 skb_shinfo(p)->frag_list = skb;
272         else
273                 NAPI_GRO_CB(p)->last->next = skb;
274         NAPI_GRO_CB(p)->last = skb;
275         __skb_header_release(skb);
276         lp = p;
277
278 done:
279         NAPI_GRO_CB(p)->count += segs;
280         p->data_len += len;
281         p->truesize += delta_truesize;
282         p->len += len;
283         if (lp != p) {
284                 lp->data_len += len;
285                 lp->truesize += delta_truesize;
286                 lp->len += len;
287         }
288         NAPI_GRO_CB(skb)->same_flow = 1;
289         return 0;
290 }
291
292
293 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
294 {
295         struct packet_offload *ptype;
296         __be16 type = skb->protocol;
297         struct list_head *head = &offload_base;
298         int err = -ENOENT;
299
300         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
301
302         if (NAPI_GRO_CB(skb)->count == 1) {
303                 skb_shinfo(skb)->gso_size = 0;
304                 goto out;
305         }
306
307         rcu_read_lock();
308         list_for_each_entry_rcu(ptype, head, list) {
309                 if (ptype->type != type || !ptype->callbacks.gro_complete)
310                         continue;
311
312                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
313                                          ipv6_gro_complete, inet_gro_complete,
314                                          skb, 0);
315                 break;
316         }
317         rcu_read_unlock();
318
319         if (err) {
320                 WARN_ON(&ptype->list == head);
321                 kfree_skb(skb);
322                 return;
323         }
324
325 out:
326         gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
327 }
328
329 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
330                                    bool flush_old)
331 {
332         struct list_head *head = &napi->gro_hash[index].list;
333         struct sk_buff *skb, *p;
334
335         list_for_each_entry_safe_reverse(skb, p, head, list) {
336                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
337                         return;
338                 skb_list_del_init(skb);
339                 napi_gro_complete(napi, skb);
340                 napi->gro_hash[index].count--;
341         }
342
343         if (!napi->gro_hash[index].count)
344                 __clear_bit(index, &napi->gro_bitmask);
345 }
346
347 /* napi->gro_hash[].list contains packets ordered by age.
348  * youngest packets at the head of it.
349  * Complete skbs in reverse order to reduce latencies.
350  */
351 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
352 {
353         unsigned long bitmask = napi->gro_bitmask;
354         unsigned int i, base = ~0U;
355
356         while ((i = ffs(bitmask)) != 0) {
357                 bitmask >>= i;
358                 base += i;
359                 __napi_gro_flush_chain(napi, base, flush_old);
360         }
361 }
362 EXPORT_SYMBOL(napi_gro_flush);
363
364 static void gro_list_prepare(const struct list_head *head,
365                              const struct sk_buff *skb)
366 {
367         unsigned int maclen = skb->dev->hard_header_len;
368         u32 hash = skb_get_hash_raw(skb);
369         struct sk_buff *p;
370
371         list_for_each_entry(p, head, list) {
372                 unsigned long diffs;
373
374                 NAPI_GRO_CB(p)->flush = 0;
375
376                 if (hash != skb_get_hash_raw(p)) {
377                         NAPI_GRO_CB(p)->same_flow = 0;
378                         continue;
379                 }
380
381                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
382                 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
383                 if (skb_vlan_tag_present(p))
384                         diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
385                 diffs |= skb_metadata_differs(p, skb);
386                 if (maclen == ETH_HLEN)
387                         diffs |= compare_ether_header(skb_mac_header(p),
388                                                       skb_mac_header(skb));
389                 else if (!diffs)
390                         diffs = memcmp(skb_mac_header(p),
391                                        skb_mac_header(skb),
392                                        maclen);
393
394                 /* in most common scenarions 'slow_gro' is 0
395                  * otherwise we are already on some slower paths
396                  * either skip all the infrequent tests altogether or
397                  * avoid trying too hard to skip each of them individually
398                  */
399                 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
400 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
401                         struct tc_skb_ext *skb_ext;
402                         struct tc_skb_ext *p_ext;
403 #endif
404
405                         diffs |= p->sk != skb->sk;
406                         diffs |= skb_metadata_dst_cmp(p, skb);
407                         diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
408
409 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
410                         skb_ext = skb_ext_find(skb, TC_SKB_EXT);
411                         p_ext = skb_ext_find(p, TC_SKB_EXT);
412
413                         diffs |= (!!p_ext) ^ (!!skb_ext);
414                         if (!diffs && unlikely(skb_ext))
415                                 diffs |= p_ext->chain ^ skb_ext->chain;
416 #endif
417                 }
418
419                 NAPI_GRO_CB(p)->same_flow = !diffs;
420         }
421 }
422
423 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
424 {
425         const struct skb_shared_info *pinfo = skb_shinfo(skb);
426         const skb_frag_t *frag0 = &pinfo->frags[0];
427
428         NAPI_GRO_CB(skb)->data_offset = 0;
429         NAPI_GRO_CB(skb)->frag0 = NULL;
430         NAPI_GRO_CB(skb)->frag0_len = 0;
431
432         if (!skb_headlen(skb) && pinfo->nr_frags &&
433             !PageHighMem(skb_frag_page(frag0)) &&
434             (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
435                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
436                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
437                                                     skb_frag_size(frag0),
438                                                     skb->end - skb->tail);
439         }
440 }
441
442 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
443 {
444         struct skb_shared_info *pinfo = skb_shinfo(skb);
445
446         BUG_ON(skb->end - skb->tail < grow);
447
448         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
449
450         skb->data_len -= grow;
451         skb->tail += grow;
452
453         skb_frag_off_add(&pinfo->frags[0], grow);
454         skb_frag_size_sub(&pinfo->frags[0], grow);
455
456         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
457                 skb_frag_unref(skb, 0);
458                 memmove(pinfo->frags, pinfo->frags + 1,
459                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
460         }
461 }
462
463 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
464 {
465         struct sk_buff *oldest;
466
467         oldest = list_last_entry(head, struct sk_buff, list);
468
469         /* We are called with head length >= MAX_GRO_SKBS, so this is
470          * impossible.
471          */
472         if (WARN_ON_ONCE(!oldest))
473                 return;
474
475         /* Do not adjust napi->gro_hash[].count, caller is adding a new
476          * SKB to the chain.
477          */
478         skb_list_del_init(oldest);
479         napi_gro_complete(napi, oldest);
480 }
481
482 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
483 {
484         u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
485         struct gro_list *gro_list = &napi->gro_hash[bucket];
486         struct list_head *head = &offload_base;
487         struct packet_offload *ptype;
488         __be16 type = skb->protocol;
489         struct sk_buff *pp = NULL;
490         enum gro_result ret;
491         int same_flow;
492         int grow;
493
494         if (netif_elide_gro(skb->dev))
495                 goto normal;
496
497         gro_list_prepare(&gro_list->list, skb);
498
499         rcu_read_lock();
500         list_for_each_entry_rcu(ptype, head, list) {
501                 if (ptype->type == type && ptype->callbacks.gro_receive)
502                         goto found_ptype;
503         }
504         rcu_read_unlock();
505         goto normal;
506
507 found_ptype:
508         skb_set_network_header(skb, skb_gro_offset(skb));
509         skb_reset_mac_len(skb);
510         BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
511         BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
512                                         sizeof(u32))); /* Avoid slow unaligned acc */
513         *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
514         NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
515         NAPI_GRO_CB(skb)->is_atomic = 1;
516         NAPI_GRO_CB(skb)->count = 1;
517         if (unlikely(skb_is_gso(skb))) {
518                 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
519                 /* Only support TCP and non DODGY users. */
520                 if (!skb_is_gso_tcp(skb) ||
521                     (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
522                         NAPI_GRO_CB(skb)->flush = 1;
523         }
524
525         /* Setup for GRO checksum validation */
526         switch (skb->ip_summed) {
527         case CHECKSUM_COMPLETE:
528                 NAPI_GRO_CB(skb)->csum = skb->csum;
529                 NAPI_GRO_CB(skb)->csum_valid = 1;
530                 break;
531         case CHECKSUM_UNNECESSARY:
532                 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
533                 break;
534         }
535
536         pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
537                                 ipv6_gro_receive, inet_gro_receive,
538                                 &gro_list->list, skb);
539
540         rcu_read_unlock();
541
542         if (PTR_ERR(pp) == -EINPROGRESS) {
543                 ret = GRO_CONSUMED;
544                 goto ok;
545         }
546
547         same_flow = NAPI_GRO_CB(skb)->same_flow;
548         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
549
550         if (pp) {
551                 skb_list_del_init(pp);
552                 napi_gro_complete(napi, pp);
553                 gro_list->count--;
554         }
555
556         if (same_flow)
557                 goto ok;
558
559         if (NAPI_GRO_CB(skb)->flush)
560                 goto normal;
561
562         if (unlikely(gro_list->count >= MAX_GRO_SKBS))
563                 gro_flush_oldest(napi, &gro_list->list);
564         else
565                 gro_list->count++;
566
567         NAPI_GRO_CB(skb)->age = jiffies;
568         NAPI_GRO_CB(skb)->last = skb;
569         if (!skb_is_gso(skb))
570                 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
571         list_add(&skb->list, &gro_list->list);
572         ret = GRO_HELD;
573
574 pull:
575         grow = skb_gro_offset(skb) - skb_headlen(skb);
576         if (grow > 0)
577                 gro_pull_from_frag0(skb, grow);
578 ok:
579         if (gro_list->count) {
580                 if (!test_bit(bucket, &napi->gro_bitmask))
581                         __set_bit(bucket, &napi->gro_bitmask);
582         } else if (test_bit(bucket, &napi->gro_bitmask)) {
583                 __clear_bit(bucket, &napi->gro_bitmask);
584         }
585
586         return ret;
587
588 normal:
589         ret = GRO_NORMAL;
590         goto pull;
591 }
592
593 struct packet_offload *gro_find_receive_by_type(__be16 type)
594 {
595         struct list_head *offload_head = &offload_base;
596         struct packet_offload *ptype;
597
598         list_for_each_entry_rcu(ptype, offload_head, list) {
599                 if (ptype->type != type || !ptype->callbacks.gro_receive)
600                         continue;
601                 return ptype;
602         }
603         return NULL;
604 }
605 EXPORT_SYMBOL(gro_find_receive_by_type);
606
607 struct packet_offload *gro_find_complete_by_type(__be16 type)
608 {
609         struct list_head *offload_head = &offload_base;
610         struct packet_offload *ptype;
611
612         list_for_each_entry_rcu(ptype, offload_head, list) {
613                 if (ptype->type != type || !ptype->callbacks.gro_complete)
614                         continue;
615                 return ptype;
616         }
617         return NULL;
618 }
619 EXPORT_SYMBOL(gro_find_complete_by_type);
620
621 static gro_result_t napi_skb_finish(struct napi_struct *napi,
622                                     struct sk_buff *skb,
623                                     gro_result_t ret)
624 {
625         switch (ret) {
626         case GRO_NORMAL:
627                 gro_normal_one(napi, skb, 1);
628                 break;
629
630         case GRO_MERGED_FREE:
631                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
632                         napi_skb_free_stolen_head(skb);
633                 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
634                         __kfree_skb(skb);
635                 else
636                         __kfree_skb_defer(skb);
637                 break;
638
639         case GRO_HELD:
640         case GRO_MERGED:
641         case GRO_CONSUMED:
642                 break;
643         }
644
645         return ret;
646 }
647
648 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
649 {
650         gro_result_t ret;
651
652         skb_mark_napi_id(skb, napi);
653         trace_napi_gro_receive_entry(skb);
654
655         skb_gro_reset_offset(skb, 0);
656
657         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
658         trace_napi_gro_receive_exit(ret);
659
660         return ret;
661 }
662 EXPORT_SYMBOL(napi_gro_receive);
663
664 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
665 {
666         if (unlikely(skb->pfmemalloc)) {
667                 consume_skb(skb);
668                 return;
669         }
670         __skb_pull(skb, skb_headlen(skb));
671         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
672         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
673         __vlan_hwaccel_clear_tag(skb);
674         skb->dev = napi->dev;
675         skb->skb_iif = 0;
676
677         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
678         skb->pkt_type = PACKET_HOST;
679
680         skb->encapsulation = 0;
681         skb_shinfo(skb)->gso_type = 0;
682         skb_shinfo(skb)->gso_size = 0;
683         if (unlikely(skb->slow_gro)) {
684                 skb_orphan(skb);
685                 skb_ext_reset(skb);
686                 nf_reset_ct(skb);
687                 skb->slow_gro = 0;
688         }
689
690         napi->skb = skb;
691 }
692
693 struct sk_buff *napi_get_frags(struct napi_struct *napi)
694 {
695         struct sk_buff *skb = napi->skb;
696
697         if (!skb) {
698                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
699                 if (skb) {
700                         napi->skb = skb;
701                         skb_mark_napi_id(skb, napi);
702                 }
703         }
704         return skb;
705 }
706 EXPORT_SYMBOL(napi_get_frags);
707
708 static gro_result_t napi_frags_finish(struct napi_struct *napi,
709                                       struct sk_buff *skb,
710                                       gro_result_t ret)
711 {
712         switch (ret) {
713         case GRO_NORMAL:
714         case GRO_HELD:
715                 __skb_push(skb, ETH_HLEN);
716                 skb->protocol = eth_type_trans(skb, skb->dev);
717                 if (ret == GRO_NORMAL)
718                         gro_normal_one(napi, skb, 1);
719                 break;
720
721         case GRO_MERGED_FREE:
722                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
723                         napi_skb_free_stolen_head(skb);
724                 else
725                         napi_reuse_skb(napi, skb);
726                 break;
727
728         case GRO_MERGED:
729         case GRO_CONSUMED:
730                 break;
731         }
732
733         return ret;
734 }
735
736 /* Upper GRO stack assumes network header starts at gro_offset=0
737  * Drivers could call both napi_gro_frags() and napi_gro_receive()
738  * We copy ethernet header into skb->data to have a common layout.
739  */
740 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
741 {
742         struct sk_buff *skb = napi->skb;
743         const struct ethhdr *eth;
744         unsigned int hlen = sizeof(*eth);
745
746         napi->skb = NULL;
747
748         skb_reset_mac_header(skb);
749         skb_gro_reset_offset(skb, hlen);
750
751         if (unlikely(skb_gro_header_hard(skb, hlen))) {
752                 eth = skb_gro_header_slow(skb, hlen, 0);
753                 if (unlikely(!eth)) {
754                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
755                                              __func__, napi->dev->name);
756                         napi_reuse_skb(napi, skb);
757                         return NULL;
758                 }
759         } else {
760                 eth = (const struct ethhdr *)skb->data;
761                 gro_pull_from_frag0(skb, hlen);
762                 NAPI_GRO_CB(skb)->frag0 += hlen;
763                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
764         }
765         __skb_pull(skb, hlen);
766
767         /*
768          * This works because the only protocols we care about don't require
769          * special handling.
770          * We'll fix it up properly in napi_frags_finish()
771          */
772         skb->protocol = eth->h_proto;
773
774         return skb;
775 }
776
777 gro_result_t napi_gro_frags(struct napi_struct *napi)
778 {
779         gro_result_t ret;
780         struct sk_buff *skb = napi_frags_skb(napi);
781
782         trace_napi_gro_frags_entry(skb);
783
784         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
785         trace_napi_gro_frags_exit(ret);
786
787         return ret;
788 }
789 EXPORT_SYMBOL(napi_gro_frags);
790
791 /* Compute the checksum from gro_offset and return the folded value
792  * after adding in any pseudo checksum.
793  */
794 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
795 {
796         __wsum wsum;
797         __sum16 sum;
798
799         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
800
801         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
802         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
803         /* See comments in __skb_checksum_complete(). */
804         if (likely(!sum)) {
805                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
806                     !skb->csum_complete_sw)
807                         netdev_rx_csum_fault(skb->dev, skb);
808         }
809
810         NAPI_GRO_CB(skb)->csum = wsum;
811         NAPI_GRO_CB(skb)->csum_valid = 1;
812
813         return sum;
814 }
815 EXPORT_SYMBOL(__skb_gro_checksum_complete);