10684833f86411b1f275742364bf9fdfbf14f96f
[platform/kernel/linux-starfive.git] / net / core / dev.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *      Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:        Ross Biro
7  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *      Additional Authors:
11  *              Florian la Roche <rzsfl@rz.uni-sb.de>
12  *              Alan Cox <gw4pts@gw4pts.ampr.org>
13  *              David Hinds <dahinds@users.sourceforge.net>
14  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15  *              Adam Sulmicki <adam@cfar.umd.edu>
16  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
17  *
18  *      Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *              Alan Cox        :       device private ioctl copies fields back.
24  *              Alan Cox        :       Transmit queue code does relevant
25  *                                      stunts to keep the queue safe.
26  *              Alan Cox        :       Fixed double lock.
27  *              Alan Cox        :       Fixed promisc NULL pointer trap
28  *              ????????        :       Support the full private ioctl range
29  *              Alan Cox        :       Moved ioctl permission check into
30  *                                      drivers
31  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
32  *              Alan Cox        :       100 backlog just doesn't cut it when
33  *                                      you start doing multicast video 8)
34  *              Alan Cox        :       Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *              Alan Cox        :       Took out transmit every packet pass
37  *                                      Saved a few bytes in the ioctl handler
38  *              Alan Cox        :       Network driver sets packet type before
39  *                                      calling netif_rx. Saves a function
40  *                                      call a packet.
41  *              Alan Cox        :       Hashed net_bh()
42  *              Richard Kooijman:       Timestamp fixes.
43  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
44  *              Alan Cox        :       Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *                                      changes.
47  *              Rudi Cilibrasi  :       Pass the right thing to
48  *                                      set_mac_address()
49  *              Dave Miller     :       32bit quantity for the device lock to
50  *                                      make it work out on a Sparc.
51  *              Bjorn Ekwall    :       Added KERNELD hack.
52  *              Alan Cox        :       Cleaned up the backlog initialise.
53  *              Craig Metz      :       SIOCGIFCONF fix if space for under
54  *                                      1 device.
55  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
56  *                                      is no device open function.
57  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
58  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
59  *              Cyrus Durgin    :       Cleaned for KMOD
60  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
61  *                                      A network device unload needs to purge
62  *                                      the backlog queue.
63  *      Paul Rusty Russell      :       SIOCSIFNAME
64  *              Pekka Riikonen  :       Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *                                      - netif_rx() feedback
69  */
70
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/string.h>
83 #include <linux/mm.h>
84 #include <linux/socket.h>
85 #include <linux/sockios.h>
86 #include <linux/errno.h>
87 #include <linux/interrupt.h>
88 #include <linux/if_ether.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/ethtool.h>
92 #include <linux/skbuff.h>
93 #include <linux/bpf.h>
94 #include <linux/bpf_trace.h>
95 #include <net/net_namespace.h>
96 #include <net/sock.h>
97 #include <net/busy_poll.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/stat.h>
100 #include <net/dst.h>
101 #include <net/dst_metadata.h>
102 #include <net/pkt_sched.h>
103 #include <net/pkt_cls.h>
104 #include <net/checksum.h>
105 #include <net/xfrm.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/module.h>
109 #include <linux/netpoll.h>
110 #include <linux/rcupdate.h>
111 #include <linux/delay.h>
112 #include <net/iw_handler.h>
113 #include <asm/current.h>
114 #include <linux/audit.h>
115 #include <linux/dmaengine.h>
116 #include <linux/err.h>
117 #include <linux/ctype.h>
118 #include <linux/if_arp.h>
119 #include <linux/if_vlan.h>
120 #include <linux/ip.h>
121 #include <net/ip.h>
122 #include <net/mpls.h>
123 #include <linux/ipv6.h>
124 #include <linux/in.h>
125 #include <linux/jhash.h>
126 #include <linux/random.h>
127 #include <trace/events/napi.h>
128 #include <trace/events/net.h>
129 #include <trace/events/skb.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138 #include <linux/netfilter_ingress.h>
139 #include <linux/crash_dump.h>
140 #include <linux/sctp.h>
141 #include <net/udp_tunnel.h>
142 #include <linux/net_namespace.h>
143 #include <linux/indirect_call_wrapper.h>
144 #include <net/devlink.h>
145
146 #include "net-sysfs.h"
147
148 #define MAX_GRO_SKBS 8
149
150 /* This should be increased if a protocol with a bigger head is added. */
151 #define GRO_MAX_HEAD (MAX_HEADER + 128)
152
153 static DEFINE_SPINLOCK(ptype_lock);
154 static DEFINE_SPINLOCK(offload_lock);
155 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
156 struct list_head ptype_all __read_mostly;       /* Taps */
157 static struct list_head offload_base __read_mostly;
158
159 static int netif_rx_internal(struct sk_buff *skb);
160 static int call_netdevice_notifiers_info(unsigned long val,
161                                          struct netdev_notifier_info *info);
162 static int call_netdevice_notifiers_extack(unsigned long val,
163                                            struct net_device *dev,
164                                            struct netlink_ext_ack *extack);
165 static struct napi_struct *napi_by_id(unsigned int napi_id);
166
167 /*
168  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
169  * semaphore.
170  *
171  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
172  *
173  * Writers must hold the rtnl semaphore while they loop through the
174  * dev_base_head list, and hold dev_base_lock for writing when they do the
175  * actual updates.  This allows pure readers to access the list even
176  * while a writer is preparing to update it.
177  *
178  * To put it another way, dev_base_lock is held for writing only to
179  * protect against pure readers; the rtnl semaphore provides the
180  * protection against other writers.
181  *
182  * See, for example usages, register_netdevice() and
183  * unregister_netdevice(), which must be called with the rtnl
184  * semaphore held.
185  */
186 DEFINE_RWLOCK(dev_base_lock);
187 EXPORT_SYMBOL(dev_base_lock);
188
189 static DEFINE_MUTEX(ifalias_mutex);
190
191 /* protects napi_hash addition/deletion and napi_gen_id */
192 static DEFINE_SPINLOCK(napi_hash_lock);
193
194 static unsigned int napi_gen_id = NR_CPUS;
195 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
196
197 static seqcount_t devnet_rename_seq;
198
199 static inline void dev_base_seq_inc(struct net *net)
200 {
201         while (++net->dev_base_seq == 0)
202                 ;
203 }
204
205 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
206 {
207         unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
208
209         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
210 }
211
212 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
213 {
214         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
215 }
216
217 static inline void rps_lock(struct softnet_data *sd)
218 {
219 #ifdef CONFIG_RPS
220         spin_lock(&sd->input_pkt_queue.lock);
221 #endif
222 }
223
224 static inline void rps_unlock(struct softnet_data *sd)
225 {
226 #ifdef CONFIG_RPS
227         spin_unlock(&sd->input_pkt_queue.lock);
228 #endif
229 }
230
231 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
232                                                        const char *name)
233 {
234         struct netdev_name_node *name_node;
235
236         name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
237         if (!name_node)
238                 return NULL;
239         INIT_HLIST_NODE(&name_node->hlist);
240         name_node->dev = dev;
241         name_node->name = name;
242         return name_node;
243 }
244
245 static struct netdev_name_node *
246 netdev_name_node_head_alloc(struct net_device *dev)
247 {
248         struct netdev_name_node *name_node;
249
250         name_node = netdev_name_node_alloc(dev, dev->name);
251         if (!name_node)
252                 return NULL;
253         INIT_LIST_HEAD(&name_node->list);
254         return name_node;
255 }
256
257 static void netdev_name_node_free(struct netdev_name_node *name_node)
258 {
259         kfree(name_node);
260 }
261
262 static void netdev_name_node_add(struct net *net,
263                                  struct netdev_name_node *name_node)
264 {
265         hlist_add_head_rcu(&name_node->hlist,
266                            dev_name_hash(net, name_node->name));
267 }
268
269 static void netdev_name_node_del(struct netdev_name_node *name_node)
270 {
271         hlist_del_rcu(&name_node->hlist);
272 }
273
274 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
275                                                         const char *name)
276 {
277         struct hlist_head *head = dev_name_hash(net, name);
278         struct netdev_name_node *name_node;
279
280         hlist_for_each_entry(name_node, head, hlist)
281                 if (!strcmp(name_node->name, name))
282                         return name_node;
283         return NULL;
284 }
285
286 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
287                                                             const char *name)
288 {
289         struct hlist_head *head = dev_name_hash(net, name);
290         struct netdev_name_node *name_node;
291
292         hlist_for_each_entry_rcu(name_node, head, hlist)
293                 if (!strcmp(name_node->name, name))
294                         return name_node;
295         return NULL;
296 }
297
298 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
299 {
300         struct netdev_name_node *name_node;
301         struct net *net = dev_net(dev);
302
303         name_node = netdev_name_node_lookup(net, name);
304         if (name_node)
305                 return -EEXIST;
306         name_node = netdev_name_node_alloc(dev, name);
307         if (!name_node)
308                 return -ENOMEM;
309         netdev_name_node_add(net, name_node);
310         /* The node that holds dev->name acts as a head of per-device list. */
311         list_add_tail(&name_node->list, &dev->name_node->list);
312
313         return 0;
314 }
315 EXPORT_SYMBOL(netdev_name_node_alt_create);
316
317 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
318 {
319         list_del(&name_node->list);
320         netdev_name_node_del(name_node);
321         kfree(name_node->name);
322         netdev_name_node_free(name_node);
323 }
324
325 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
326 {
327         struct netdev_name_node *name_node;
328         struct net *net = dev_net(dev);
329
330         name_node = netdev_name_node_lookup(net, name);
331         if (!name_node)
332                 return -ENOENT;
333         /* lookup might have found our primary name or a name belonging
334          * to another device.
335          */
336         if (name_node == dev->name_node || name_node->dev != dev)
337                 return -EINVAL;
338
339         __netdev_name_node_alt_destroy(name_node);
340
341         return 0;
342 }
343 EXPORT_SYMBOL(netdev_name_node_alt_destroy);
344
345 static void netdev_name_node_alt_flush(struct net_device *dev)
346 {
347         struct netdev_name_node *name_node, *tmp;
348
349         list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
350                 __netdev_name_node_alt_destroy(name_node);
351 }
352
353 /* Device list insertion */
354 static void list_netdevice(struct net_device *dev)
355 {
356         struct net *net = dev_net(dev);
357
358         ASSERT_RTNL();
359
360         write_lock_bh(&dev_base_lock);
361         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
362         netdev_name_node_add(net, dev->name_node);
363         hlist_add_head_rcu(&dev->index_hlist,
364                            dev_index_hash(net, dev->ifindex));
365         write_unlock_bh(&dev_base_lock);
366
367         dev_base_seq_inc(net);
368 }
369
370 /* Device list removal
371  * caller must respect a RCU grace period before freeing/reusing dev
372  */
373 static void unlist_netdevice(struct net_device *dev)
374 {
375         ASSERT_RTNL();
376
377         /* Unlink dev from the device chain */
378         write_lock_bh(&dev_base_lock);
379         list_del_rcu(&dev->dev_list);
380         netdev_name_node_del(dev->name_node);
381         hlist_del_rcu(&dev->index_hlist);
382         write_unlock_bh(&dev_base_lock);
383
384         dev_base_seq_inc(dev_net(dev));
385 }
386
387 /*
388  *      Our notifier list
389  */
390
391 static RAW_NOTIFIER_HEAD(netdev_chain);
392
393 /*
394  *      Device drivers call our routines to queue packets here. We empty the
395  *      queue in the local softnet handler.
396  */
397
398 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
399 EXPORT_PER_CPU_SYMBOL(softnet_data);
400
401 #ifdef CONFIG_LOCKDEP
402 /*
403  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
404  * according to dev->type
405  */
406 static const unsigned short netdev_lock_type[] = {
407          ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
408          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
409          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
410          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
411          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
412          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
413          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
414          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
415          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
416          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
417          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
418          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
419          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
420          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
421          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
422
423 static const char *const netdev_lock_name[] = {
424         "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
425         "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
426         "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
427         "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
428         "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
429         "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
430         "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
431         "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
432         "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
433         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
434         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
435         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
436         "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
437         "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
438         "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
439
440 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
441
442 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
443 {
444         int i;
445
446         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
447                 if (netdev_lock_type[i] == dev_type)
448                         return i;
449         /* the last key is used by default */
450         return ARRAY_SIZE(netdev_lock_type) - 1;
451 }
452
453 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
454                                                  unsigned short dev_type)
455 {
456         int i;
457
458         i = netdev_lock_pos(dev_type);
459         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
460                                    netdev_lock_name[i]);
461 }
462 #else
463 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
464                                                  unsigned short dev_type)
465 {
466 }
467 #endif
468
469 /*******************************************************************************
470  *
471  *              Protocol management and registration routines
472  *
473  *******************************************************************************/
474
475
476 /*
477  *      Add a protocol ID to the list. Now that the input handler is
478  *      smarter we can dispense with all the messy stuff that used to be
479  *      here.
480  *
481  *      BEWARE!!! Protocol handlers, mangling input packets,
482  *      MUST BE last in hash buckets and checking protocol handlers
483  *      MUST start from promiscuous ptype_all chain in net_bh.
484  *      It is true now, do not change it.
485  *      Explanation follows: if protocol handler, mangling packet, will
486  *      be the first on list, it is not able to sense, that packet
487  *      is cloned and should be copied-on-write, so that it will
488  *      change it and subsequent readers will get broken packet.
489  *                                                      --ANK (980803)
490  */
491
492 static inline struct list_head *ptype_head(const struct packet_type *pt)
493 {
494         if (pt->type == htons(ETH_P_ALL))
495                 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
496         else
497                 return pt->dev ? &pt->dev->ptype_specific :
498                                  &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
499 }
500
501 /**
502  *      dev_add_pack - add packet handler
503  *      @pt: packet type declaration
504  *
505  *      Add a protocol handler to the networking stack. The passed &packet_type
506  *      is linked into kernel lists and may not be freed until it has been
507  *      removed from the kernel lists.
508  *
509  *      This call does not sleep therefore it can not
510  *      guarantee all CPU's that are in middle of receiving packets
511  *      will see the new packet type (until the next received packet).
512  */
513
514 void dev_add_pack(struct packet_type *pt)
515 {
516         struct list_head *head = ptype_head(pt);
517
518         spin_lock(&ptype_lock);
519         list_add_rcu(&pt->list, head);
520         spin_unlock(&ptype_lock);
521 }
522 EXPORT_SYMBOL(dev_add_pack);
523
524 /**
525  *      __dev_remove_pack        - remove packet handler
526  *      @pt: packet type declaration
527  *
528  *      Remove a protocol handler that was previously added to the kernel
529  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
530  *      from the kernel lists and can be freed or reused once this function
531  *      returns.
532  *
533  *      The packet type might still be in use by receivers
534  *      and must not be freed until after all the CPU's have gone
535  *      through a quiescent state.
536  */
537 void __dev_remove_pack(struct packet_type *pt)
538 {
539         struct list_head *head = ptype_head(pt);
540         struct packet_type *pt1;
541
542         spin_lock(&ptype_lock);
543
544         list_for_each_entry(pt1, head, list) {
545                 if (pt == pt1) {
546                         list_del_rcu(&pt->list);
547                         goto out;
548                 }
549         }
550
551         pr_warn("dev_remove_pack: %p not found\n", pt);
552 out:
553         spin_unlock(&ptype_lock);
554 }
555 EXPORT_SYMBOL(__dev_remove_pack);
556
557 /**
558  *      dev_remove_pack  - remove packet handler
559  *      @pt: packet type declaration
560  *
561  *      Remove a protocol handler that was previously added to the kernel
562  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
563  *      from the kernel lists and can be freed or reused once this function
564  *      returns.
565  *
566  *      This call sleeps to guarantee that no CPU is looking at the packet
567  *      type after return.
568  */
569 void dev_remove_pack(struct packet_type *pt)
570 {
571         __dev_remove_pack(pt);
572
573         synchronize_net();
574 }
575 EXPORT_SYMBOL(dev_remove_pack);
576
577
578 /**
579  *      dev_add_offload - register offload handlers
580  *      @po: protocol offload declaration
581  *
582  *      Add protocol offload handlers to the networking stack. The passed
583  *      &proto_offload is linked into kernel lists and may not be freed until
584  *      it has been removed from the kernel lists.
585  *
586  *      This call does not sleep therefore it can not
587  *      guarantee all CPU's that are in middle of receiving packets
588  *      will see the new offload handlers (until the next received packet).
589  */
590 void dev_add_offload(struct packet_offload *po)
591 {
592         struct packet_offload *elem;
593
594         spin_lock(&offload_lock);
595         list_for_each_entry(elem, &offload_base, list) {
596                 if (po->priority < elem->priority)
597                         break;
598         }
599         list_add_rcu(&po->list, elem->list.prev);
600         spin_unlock(&offload_lock);
601 }
602 EXPORT_SYMBOL(dev_add_offload);
603
604 /**
605  *      __dev_remove_offload     - remove offload handler
606  *      @po: packet offload declaration
607  *
608  *      Remove a protocol offload handler that was previously added to the
609  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
610  *      is removed from the kernel lists and can be freed or reused once this
611  *      function returns.
612  *
613  *      The packet type might still be in use by receivers
614  *      and must not be freed until after all the CPU's have gone
615  *      through a quiescent state.
616  */
617 static void __dev_remove_offload(struct packet_offload *po)
618 {
619         struct list_head *head = &offload_base;
620         struct packet_offload *po1;
621
622         spin_lock(&offload_lock);
623
624         list_for_each_entry(po1, head, list) {
625                 if (po == po1) {
626                         list_del_rcu(&po->list);
627                         goto out;
628                 }
629         }
630
631         pr_warn("dev_remove_offload: %p not found\n", po);
632 out:
633         spin_unlock(&offload_lock);
634 }
635
636 /**
637  *      dev_remove_offload       - remove packet offload handler
638  *      @po: packet offload declaration
639  *
640  *      Remove a packet offload handler that was previously added to the kernel
641  *      offload handlers by dev_add_offload(). The passed &offload_type is
642  *      removed from the kernel lists and can be freed or reused once this
643  *      function returns.
644  *
645  *      This call sleeps to guarantee that no CPU is looking at the packet
646  *      type after return.
647  */
648 void dev_remove_offload(struct packet_offload *po)
649 {
650         __dev_remove_offload(po);
651
652         synchronize_net();
653 }
654 EXPORT_SYMBOL(dev_remove_offload);
655
656 /******************************************************************************
657  *
658  *                    Device Boot-time Settings Routines
659  *
660  ******************************************************************************/
661
662 /* Boot time configuration table */
663 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
664
665 /**
666  *      netdev_boot_setup_add   - add new setup entry
667  *      @name: name of the device
668  *      @map: configured settings for the device
669  *
670  *      Adds new setup entry to the dev_boot_setup list.  The function
671  *      returns 0 on error and 1 on success.  This is a generic routine to
672  *      all netdevices.
673  */
674 static int netdev_boot_setup_add(char *name, struct ifmap *map)
675 {
676         struct netdev_boot_setup *s;
677         int i;
678
679         s = dev_boot_setup;
680         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
681                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
682                         memset(s[i].name, 0, sizeof(s[i].name));
683                         strlcpy(s[i].name, name, IFNAMSIZ);
684                         memcpy(&s[i].map, map, sizeof(s[i].map));
685                         break;
686                 }
687         }
688
689         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
690 }
691
692 /**
693  * netdev_boot_setup_check      - check boot time settings
694  * @dev: the netdevice
695  *
696  * Check boot time settings for the device.
697  * The found settings are set for the device to be used
698  * later in the device probing.
699  * Returns 0 if no settings found, 1 if they are.
700  */
701 int netdev_boot_setup_check(struct net_device *dev)
702 {
703         struct netdev_boot_setup *s = dev_boot_setup;
704         int i;
705
706         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
707                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
708                     !strcmp(dev->name, s[i].name)) {
709                         dev->irq = s[i].map.irq;
710                         dev->base_addr = s[i].map.base_addr;
711                         dev->mem_start = s[i].map.mem_start;
712                         dev->mem_end = s[i].map.mem_end;
713                         return 1;
714                 }
715         }
716         return 0;
717 }
718 EXPORT_SYMBOL(netdev_boot_setup_check);
719
720
721 /**
722  * netdev_boot_base     - get address from boot time settings
723  * @prefix: prefix for network device
724  * @unit: id for network device
725  *
726  * Check boot time settings for the base address of device.
727  * The found settings are set for the device to be used
728  * later in the device probing.
729  * Returns 0 if no settings found.
730  */
731 unsigned long netdev_boot_base(const char *prefix, int unit)
732 {
733         const struct netdev_boot_setup *s = dev_boot_setup;
734         char name[IFNAMSIZ];
735         int i;
736
737         sprintf(name, "%s%d", prefix, unit);
738
739         /*
740          * If device already registered then return base of 1
741          * to indicate not to probe for this interface
742          */
743         if (__dev_get_by_name(&init_net, name))
744                 return 1;
745
746         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
747                 if (!strcmp(name, s[i].name))
748                         return s[i].map.base_addr;
749         return 0;
750 }
751
752 /*
753  * Saves at boot time configured settings for any netdevice.
754  */
755 int __init netdev_boot_setup(char *str)
756 {
757         int ints[5];
758         struct ifmap map;
759
760         str = get_options(str, ARRAY_SIZE(ints), ints);
761         if (!str || !*str)
762                 return 0;
763
764         /* Save settings */
765         memset(&map, 0, sizeof(map));
766         if (ints[0] > 0)
767                 map.irq = ints[1];
768         if (ints[0] > 1)
769                 map.base_addr = ints[2];
770         if (ints[0] > 2)
771                 map.mem_start = ints[3];
772         if (ints[0] > 3)
773                 map.mem_end = ints[4];
774
775         /* Add new entry to the list */
776         return netdev_boot_setup_add(str, &map);
777 }
778
779 __setup("netdev=", netdev_boot_setup);
780
781 /*******************************************************************************
782  *
783  *                          Device Interface Subroutines
784  *
785  *******************************************************************************/
786
787 /**
788  *      dev_get_iflink  - get 'iflink' value of a interface
789  *      @dev: targeted interface
790  *
791  *      Indicates the ifindex the interface is linked to.
792  *      Physical interfaces have the same 'ifindex' and 'iflink' values.
793  */
794
795 int dev_get_iflink(const struct net_device *dev)
796 {
797         if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
798                 return dev->netdev_ops->ndo_get_iflink(dev);
799
800         return dev->ifindex;
801 }
802 EXPORT_SYMBOL(dev_get_iflink);
803
804 /**
805  *      dev_fill_metadata_dst - Retrieve tunnel egress information.
806  *      @dev: targeted interface
807  *      @skb: The packet.
808  *
809  *      For better visibility of tunnel traffic OVS needs to retrieve
810  *      egress tunnel information for a packet. Following API allows
811  *      user to get this info.
812  */
813 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
814 {
815         struct ip_tunnel_info *info;
816
817         if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
818                 return -EINVAL;
819
820         info = skb_tunnel_info_unclone(skb);
821         if (!info)
822                 return -ENOMEM;
823         if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
824                 return -EINVAL;
825
826         return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
827 }
828 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
829
830 /**
831  *      __dev_get_by_name       - find a device by its name
832  *      @net: the applicable net namespace
833  *      @name: name to find
834  *
835  *      Find an interface by name. Must be called under RTNL semaphore
836  *      or @dev_base_lock. If the name is found a pointer to the device
837  *      is returned. If the name is not found then %NULL is returned. The
838  *      reference counters are not incremented so the caller must be
839  *      careful with locks.
840  */
841
842 struct net_device *__dev_get_by_name(struct net *net, const char *name)
843 {
844         struct netdev_name_node *node_name;
845
846         node_name = netdev_name_node_lookup(net, name);
847         return node_name ? node_name->dev : NULL;
848 }
849 EXPORT_SYMBOL(__dev_get_by_name);
850
851 /**
852  * dev_get_by_name_rcu  - find a device by its name
853  * @net: the applicable net namespace
854  * @name: name to find
855  *
856  * Find an interface by name.
857  * If the name is found a pointer to the device is returned.
858  * If the name is not found then %NULL is returned.
859  * The reference counters are not incremented so the caller must be
860  * careful with locks. The caller must hold RCU lock.
861  */
862
863 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
864 {
865         struct netdev_name_node *node_name;
866
867         node_name = netdev_name_node_lookup_rcu(net, name);
868         return node_name ? node_name->dev : NULL;
869 }
870 EXPORT_SYMBOL(dev_get_by_name_rcu);
871
872 /**
873  *      dev_get_by_name         - find a device by its name
874  *      @net: the applicable net namespace
875  *      @name: name to find
876  *
877  *      Find an interface by name. This can be called from any
878  *      context and does its own locking. The returned handle has
879  *      the usage count incremented and the caller must use dev_put() to
880  *      release it when it is no longer needed. %NULL is returned if no
881  *      matching device is found.
882  */
883
884 struct net_device *dev_get_by_name(struct net *net, const char *name)
885 {
886         struct net_device *dev;
887
888         rcu_read_lock();
889         dev = dev_get_by_name_rcu(net, name);
890         if (dev)
891                 dev_hold(dev);
892         rcu_read_unlock();
893         return dev;
894 }
895 EXPORT_SYMBOL(dev_get_by_name);
896
897 /**
898  *      __dev_get_by_index - find a device by its ifindex
899  *      @net: the applicable net namespace
900  *      @ifindex: index of device
901  *
902  *      Search for an interface by index. Returns %NULL if the device
903  *      is not found or a pointer to the device. The device has not
904  *      had its reference counter increased so the caller must be careful
905  *      about locking. The caller must hold either the RTNL semaphore
906  *      or @dev_base_lock.
907  */
908
909 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
910 {
911         struct net_device *dev;
912         struct hlist_head *head = dev_index_hash(net, ifindex);
913
914         hlist_for_each_entry(dev, head, index_hlist)
915                 if (dev->ifindex == ifindex)
916                         return dev;
917
918         return NULL;
919 }
920 EXPORT_SYMBOL(__dev_get_by_index);
921
922 /**
923  *      dev_get_by_index_rcu - find a device by its ifindex
924  *      @net: the applicable net namespace
925  *      @ifindex: index of device
926  *
927  *      Search for an interface by index. Returns %NULL if the device
928  *      is not found or a pointer to the device. The device has not
929  *      had its reference counter increased so the caller must be careful
930  *      about locking. The caller must hold RCU lock.
931  */
932
933 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
934 {
935         struct net_device *dev;
936         struct hlist_head *head = dev_index_hash(net, ifindex);
937
938         hlist_for_each_entry_rcu(dev, head, index_hlist)
939                 if (dev->ifindex == ifindex)
940                         return dev;
941
942         return NULL;
943 }
944 EXPORT_SYMBOL(dev_get_by_index_rcu);
945
946
947 /**
948  *      dev_get_by_index - find a device by its ifindex
949  *      @net: the applicable net namespace
950  *      @ifindex: index of device
951  *
952  *      Search for an interface by index. Returns NULL if the device
953  *      is not found or a pointer to the device. The device returned has
954  *      had a reference added and the pointer is safe until the user calls
955  *      dev_put to indicate they have finished with it.
956  */
957
958 struct net_device *dev_get_by_index(struct net *net, int ifindex)
959 {
960         struct net_device *dev;
961
962         rcu_read_lock();
963         dev = dev_get_by_index_rcu(net, ifindex);
964         if (dev)
965                 dev_hold(dev);
966         rcu_read_unlock();
967         return dev;
968 }
969 EXPORT_SYMBOL(dev_get_by_index);
970
971 /**
972  *      dev_get_by_napi_id - find a device by napi_id
973  *      @napi_id: ID of the NAPI struct
974  *
975  *      Search for an interface by NAPI ID. Returns %NULL if the device
976  *      is not found or a pointer to the device. The device has not had
977  *      its reference counter increased so the caller must be careful
978  *      about locking. The caller must hold RCU lock.
979  */
980
981 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
982 {
983         struct napi_struct *napi;
984
985         WARN_ON_ONCE(!rcu_read_lock_held());
986
987         if (napi_id < MIN_NAPI_ID)
988                 return NULL;
989
990         napi = napi_by_id(napi_id);
991
992         return napi ? napi->dev : NULL;
993 }
994 EXPORT_SYMBOL(dev_get_by_napi_id);
995
996 /**
997  *      netdev_get_name - get a netdevice name, knowing its ifindex.
998  *      @net: network namespace
999  *      @name: a pointer to the buffer where the name will be stored.
1000  *      @ifindex: the ifindex of the interface to get the name from.
1001  *
1002  *      The use of raw_seqcount_begin() and cond_resched() before
1003  *      retrying is required as we want to give the writers a chance
1004  *      to complete when CONFIG_PREEMPTION is not set.
1005  */
1006 int netdev_get_name(struct net *net, char *name, int ifindex)
1007 {
1008         struct net_device *dev;
1009         unsigned int seq;
1010
1011 retry:
1012         seq = raw_seqcount_begin(&devnet_rename_seq);
1013         rcu_read_lock();
1014         dev = dev_get_by_index_rcu(net, ifindex);
1015         if (!dev) {
1016                 rcu_read_unlock();
1017                 return -ENODEV;
1018         }
1019
1020         strcpy(name, dev->name);
1021         rcu_read_unlock();
1022         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
1023                 cond_resched();
1024                 goto retry;
1025         }
1026
1027         return 0;
1028 }
1029
1030 /**
1031  *      dev_getbyhwaddr_rcu - find a device by its hardware address
1032  *      @net: the applicable net namespace
1033  *      @type: media type of device
1034  *      @ha: hardware address
1035  *
1036  *      Search for an interface by MAC address. Returns NULL if the device
1037  *      is not found or a pointer to the device.
1038  *      The caller must hold RCU or RTNL.
1039  *      The returned device has not had its ref count increased
1040  *      and the caller must therefore be careful about locking
1041  *
1042  */
1043
1044 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1045                                        const char *ha)
1046 {
1047         struct net_device *dev;
1048
1049         for_each_netdev_rcu(net, dev)
1050                 if (dev->type == type &&
1051                     !memcmp(dev->dev_addr, ha, dev->addr_len))
1052                         return dev;
1053
1054         return NULL;
1055 }
1056 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
1057
1058 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1059 {
1060         struct net_device *dev;
1061
1062         ASSERT_RTNL();
1063         for_each_netdev(net, dev)
1064                 if (dev->type == type)
1065                         return dev;
1066
1067         return NULL;
1068 }
1069 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
1070
1071 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
1072 {
1073         struct net_device *dev, *ret = NULL;
1074
1075         rcu_read_lock();
1076         for_each_netdev_rcu(net, dev)
1077                 if (dev->type == type) {
1078                         dev_hold(dev);
1079                         ret = dev;
1080                         break;
1081                 }
1082         rcu_read_unlock();
1083         return ret;
1084 }
1085 EXPORT_SYMBOL(dev_getfirstbyhwtype);
1086
1087 /**
1088  *      __dev_get_by_flags - find any device with given flags
1089  *      @net: the applicable net namespace
1090  *      @if_flags: IFF_* values
1091  *      @mask: bitmask of bits in if_flags to check
1092  *
1093  *      Search for any interface with the given flags. Returns NULL if a device
1094  *      is not found or a pointer to the device. Must be called inside
1095  *      rtnl_lock(), and result refcount is unchanged.
1096  */
1097
1098 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1099                                       unsigned short mask)
1100 {
1101         struct net_device *dev, *ret;
1102
1103         ASSERT_RTNL();
1104
1105         ret = NULL;
1106         for_each_netdev(net, dev) {
1107                 if (((dev->flags ^ if_flags) & mask) == 0) {
1108                         ret = dev;
1109                         break;
1110                 }
1111         }
1112         return ret;
1113 }
1114 EXPORT_SYMBOL(__dev_get_by_flags);
1115
1116 /**
1117  *      dev_valid_name - check if name is okay for network device
1118  *      @name: name string
1119  *
1120  *      Network device names need to be valid file names to
1121  *      to allow sysfs to work.  We also disallow any kind of
1122  *      whitespace.
1123  */
1124 bool dev_valid_name(const char *name)
1125 {
1126         if (*name == '\0')
1127                 return false;
1128         if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1129                 return false;
1130         if (!strcmp(name, ".") || !strcmp(name, ".."))
1131                 return false;
1132
1133         while (*name) {
1134                 if (*name == '/' || *name == ':' || isspace(*name))
1135                         return false;
1136                 name++;
1137         }
1138         return true;
1139 }
1140 EXPORT_SYMBOL(dev_valid_name);
1141
1142 /**
1143  *      __dev_alloc_name - allocate a name for a device
1144  *      @net: network namespace to allocate the device name in
1145  *      @name: name format string
1146  *      @buf:  scratch buffer and result name string
1147  *
1148  *      Passed a format string - eg "lt%d" it will try and find a suitable
1149  *      id. It scans list of devices to build up a free map, then chooses
1150  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1151  *      while allocating the name and adding the device in order to avoid
1152  *      duplicates.
1153  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1154  *      Returns the number of the unit assigned or a negative errno code.
1155  */
1156
1157 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1158 {
1159         int i = 0;
1160         const char *p;
1161         const int max_netdevices = 8*PAGE_SIZE;
1162         unsigned long *inuse;
1163         struct net_device *d;
1164
1165         if (!dev_valid_name(name))
1166                 return -EINVAL;
1167
1168         p = strchr(name, '%');
1169         if (p) {
1170                 /*
1171                  * Verify the string as this thing may have come from
1172                  * the user.  There must be either one "%d" and no other "%"
1173                  * characters.
1174                  */
1175                 if (p[1] != 'd' || strchr(p + 2, '%'))
1176                         return -EINVAL;
1177
1178                 /* Use one page as a bit array of possible slots */
1179                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1180                 if (!inuse)
1181                         return -ENOMEM;
1182
1183                 for_each_netdev(net, d) {
1184                         if (!sscanf(d->name, name, &i))
1185                                 continue;
1186                         if (i < 0 || i >= max_netdevices)
1187                                 continue;
1188
1189                         /*  avoid cases where sscanf is not exact inverse of printf */
1190                         snprintf(buf, IFNAMSIZ, name, i);
1191                         if (!strncmp(buf, d->name, IFNAMSIZ))
1192                                 set_bit(i, inuse);
1193                 }
1194
1195                 i = find_first_zero_bit(inuse, max_netdevices);
1196                 free_page((unsigned long) inuse);
1197         }
1198
1199         snprintf(buf, IFNAMSIZ, name, i);
1200         if (!__dev_get_by_name(net, buf))
1201                 return i;
1202
1203         /* It is possible to run out of possible slots
1204          * when the name is long and there isn't enough space left
1205          * for the digits, or if all bits are used.
1206          */
1207         return -ENFILE;
1208 }
1209
1210 static int dev_alloc_name_ns(struct net *net,
1211                              struct net_device *dev,
1212                              const char *name)
1213 {
1214         char buf[IFNAMSIZ];
1215         int ret;
1216
1217         BUG_ON(!net);
1218         ret = __dev_alloc_name(net, name, buf);
1219         if (ret >= 0)
1220                 strlcpy(dev->name, buf, IFNAMSIZ);
1221         return ret;
1222 }
1223
1224 /**
1225  *      dev_alloc_name - allocate a name for a device
1226  *      @dev: device
1227  *      @name: name format string
1228  *
1229  *      Passed a format string - eg "lt%d" it will try and find a suitable
1230  *      id. It scans list of devices to build up a free map, then chooses
1231  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1232  *      while allocating the name and adding the device in order to avoid
1233  *      duplicates.
1234  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1235  *      Returns the number of the unit assigned or a negative errno code.
1236  */
1237
1238 int dev_alloc_name(struct net_device *dev, const char *name)
1239 {
1240         return dev_alloc_name_ns(dev_net(dev), dev, name);
1241 }
1242 EXPORT_SYMBOL(dev_alloc_name);
1243
1244 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1245                               const char *name)
1246 {
1247         BUG_ON(!net);
1248
1249         if (!dev_valid_name(name))
1250                 return -EINVAL;
1251
1252         if (strchr(name, '%'))
1253                 return dev_alloc_name_ns(net, dev, name);
1254         else if (__dev_get_by_name(net, name))
1255                 return -EEXIST;
1256         else if (dev->name != name)
1257                 strlcpy(dev->name, name, IFNAMSIZ);
1258
1259         return 0;
1260 }
1261
1262 /**
1263  *      dev_change_name - change name of a device
1264  *      @dev: device
1265  *      @newname: name (or format string) must be at least IFNAMSIZ
1266  *
1267  *      Change name of a device, can pass format strings "eth%d".
1268  *      for wildcarding.
1269  */
1270 int dev_change_name(struct net_device *dev, const char *newname)
1271 {
1272         unsigned char old_assign_type;
1273         char oldname[IFNAMSIZ];
1274         int err = 0;
1275         int ret;
1276         struct net *net;
1277
1278         ASSERT_RTNL();
1279         BUG_ON(!dev_net(dev));
1280
1281         net = dev_net(dev);
1282
1283         /* Some auto-enslaved devices e.g. failover slaves are
1284          * special, as userspace might rename the device after
1285          * the interface had been brought up and running since
1286          * the point kernel initiated auto-enslavement. Allow
1287          * live name change even when these slave devices are
1288          * up and running.
1289          *
1290          * Typically, users of these auto-enslaving devices
1291          * don't actually care about slave name change, as
1292          * they are supposed to operate on master interface
1293          * directly.
1294          */
1295         if (dev->flags & IFF_UP &&
1296             likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1297                 return -EBUSY;
1298
1299         write_seqcount_begin(&devnet_rename_seq);
1300
1301         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1302                 write_seqcount_end(&devnet_rename_seq);
1303                 return 0;
1304         }
1305
1306         memcpy(oldname, dev->name, IFNAMSIZ);
1307
1308         err = dev_get_valid_name(net, dev, newname);
1309         if (err < 0) {
1310                 write_seqcount_end(&devnet_rename_seq);
1311                 return err;
1312         }
1313
1314         if (oldname[0] && !strchr(oldname, '%'))
1315                 netdev_info(dev, "renamed from %s\n", oldname);
1316
1317         old_assign_type = dev->name_assign_type;
1318         dev->name_assign_type = NET_NAME_RENAMED;
1319
1320 rollback:
1321         ret = device_rename(&dev->dev, dev->name);
1322         if (ret) {
1323                 memcpy(dev->name, oldname, IFNAMSIZ);
1324                 dev->name_assign_type = old_assign_type;
1325                 write_seqcount_end(&devnet_rename_seq);
1326                 return ret;
1327         }
1328
1329         write_seqcount_end(&devnet_rename_seq);
1330
1331         netdev_adjacent_rename_links(dev, oldname);
1332
1333         write_lock_bh(&dev_base_lock);
1334         netdev_name_node_del(dev->name_node);
1335         write_unlock_bh(&dev_base_lock);
1336
1337         synchronize_rcu();
1338
1339         write_lock_bh(&dev_base_lock);
1340         netdev_name_node_add(net, dev->name_node);
1341         write_unlock_bh(&dev_base_lock);
1342
1343         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1344         ret = notifier_to_errno(ret);
1345
1346         if (ret) {
1347                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1348                 if (err >= 0) {
1349                         err = ret;
1350                         write_seqcount_begin(&devnet_rename_seq);
1351                         memcpy(dev->name, oldname, IFNAMSIZ);
1352                         memcpy(oldname, newname, IFNAMSIZ);
1353                         dev->name_assign_type = old_assign_type;
1354                         old_assign_type = NET_NAME_RENAMED;
1355                         goto rollback;
1356                 } else {
1357                         pr_err("%s: name change rollback failed: %d\n",
1358                                dev->name, ret);
1359                 }
1360         }
1361
1362         return err;
1363 }
1364
1365 /**
1366  *      dev_set_alias - change ifalias of a device
1367  *      @dev: device
1368  *      @alias: name up to IFALIASZ
1369  *      @len: limit of bytes to copy from info
1370  *
1371  *      Set ifalias for a device,
1372  */
1373 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1374 {
1375         struct dev_ifalias *new_alias = NULL;
1376
1377         if (len >= IFALIASZ)
1378                 return -EINVAL;
1379
1380         if (len) {
1381                 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1382                 if (!new_alias)
1383                         return -ENOMEM;
1384
1385                 memcpy(new_alias->ifalias, alias, len);
1386                 new_alias->ifalias[len] = 0;
1387         }
1388
1389         mutex_lock(&ifalias_mutex);
1390         new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1391                                         mutex_is_locked(&ifalias_mutex));
1392         mutex_unlock(&ifalias_mutex);
1393
1394         if (new_alias)
1395                 kfree_rcu(new_alias, rcuhead);
1396
1397         return len;
1398 }
1399 EXPORT_SYMBOL(dev_set_alias);
1400
1401 /**
1402  *      dev_get_alias - get ifalias of a device
1403  *      @dev: device
1404  *      @name: buffer to store name of ifalias
1405  *      @len: size of buffer
1406  *
1407  *      get ifalias for a device.  Caller must make sure dev cannot go
1408  *      away,  e.g. rcu read lock or own a reference count to device.
1409  */
1410 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1411 {
1412         const struct dev_ifalias *alias;
1413         int ret = 0;
1414
1415         rcu_read_lock();
1416         alias = rcu_dereference(dev->ifalias);
1417         if (alias)
1418                 ret = snprintf(name, len, "%s", alias->ifalias);
1419         rcu_read_unlock();
1420
1421         return ret;
1422 }
1423
1424 /**
1425  *      netdev_features_change - device changes features
1426  *      @dev: device to cause notification
1427  *
1428  *      Called to indicate a device has changed features.
1429  */
1430 void netdev_features_change(struct net_device *dev)
1431 {
1432         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1433 }
1434 EXPORT_SYMBOL(netdev_features_change);
1435
1436 /**
1437  *      netdev_state_change - device changes state
1438  *      @dev: device to cause notification
1439  *
1440  *      Called to indicate a device has changed state. This function calls
1441  *      the notifier chains for netdev_chain and sends a NEWLINK message
1442  *      to the routing socket.
1443  */
1444 void netdev_state_change(struct net_device *dev)
1445 {
1446         if (dev->flags & IFF_UP) {
1447                 struct netdev_notifier_change_info change_info = {
1448                         .info.dev = dev,
1449                 };
1450
1451                 call_netdevice_notifiers_info(NETDEV_CHANGE,
1452                                               &change_info.info);
1453                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1454         }
1455 }
1456 EXPORT_SYMBOL(netdev_state_change);
1457
1458 /**
1459  * netdev_notify_peers - notify network peers about existence of @dev
1460  * @dev: network device
1461  *
1462  * Generate traffic such that interested network peers are aware of
1463  * @dev, such as by generating a gratuitous ARP. This may be used when
1464  * a device wants to inform the rest of the network about some sort of
1465  * reconfiguration such as a failover event or virtual machine
1466  * migration.
1467  */
1468 void netdev_notify_peers(struct net_device *dev)
1469 {
1470         rtnl_lock();
1471         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1472         call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1473         rtnl_unlock();
1474 }
1475 EXPORT_SYMBOL(netdev_notify_peers);
1476
1477 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1478 {
1479         const struct net_device_ops *ops = dev->netdev_ops;
1480         int ret;
1481
1482         ASSERT_RTNL();
1483
1484         if (!netif_device_present(dev))
1485                 return -ENODEV;
1486
1487         /* Block netpoll from trying to do any rx path servicing.
1488          * If we don't do this there is a chance ndo_poll_controller
1489          * or ndo_poll may be running while we open the device
1490          */
1491         netpoll_poll_disable(dev);
1492
1493         ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1494         ret = notifier_to_errno(ret);
1495         if (ret)
1496                 return ret;
1497
1498         set_bit(__LINK_STATE_START, &dev->state);
1499
1500         if (ops->ndo_validate_addr)
1501                 ret = ops->ndo_validate_addr(dev);
1502
1503         if (!ret && ops->ndo_open)
1504                 ret = ops->ndo_open(dev);
1505
1506         netpoll_poll_enable(dev);
1507
1508         if (ret)
1509                 clear_bit(__LINK_STATE_START, &dev->state);
1510         else {
1511                 dev->flags |= IFF_UP;
1512                 dev_set_rx_mode(dev);
1513                 dev_activate(dev);
1514                 add_device_randomness(dev->dev_addr, dev->addr_len);
1515         }
1516
1517         return ret;
1518 }
1519
1520 /**
1521  *      dev_open        - prepare an interface for use.
1522  *      @dev: device to open
1523  *      @extack: netlink extended ack
1524  *
1525  *      Takes a device from down to up state. The device's private open
1526  *      function is invoked and then the multicast lists are loaded. Finally
1527  *      the device is moved into the up state and a %NETDEV_UP message is
1528  *      sent to the netdev notifier chain.
1529  *
1530  *      Calling this function on an active interface is a nop. On a failure
1531  *      a negative errno code is returned.
1532  */
1533 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1534 {
1535         int ret;
1536
1537         if (dev->flags & IFF_UP)
1538                 return 0;
1539
1540         ret = __dev_open(dev, extack);
1541         if (ret < 0)
1542                 return ret;
1543
1544         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1545         call_netdevice_notifiers(NETDEV_UP, dev);
1546
1547         return ret;
1548 }
1549 EXPORT_SYMBOL(dev_open);
1550
1551 static void __dev_close_many(struct list_head *head)
1552 {
1553         struct net_device *dev;
1554
1555         ASSERT_RTNL();
1556         might_sleep();
1557
1558         list_for_each_entry(dev, head, close_list) {
1559                 /* Temporarily disable netpoll until the interface is down */
1560                 netpoll_poll_disable(dev);
1561
1562                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1563
1564                 clear_bit(__LINK_STATE_START, &dev->state);
1565
1566                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1567                  * can be even on different cpu. So just clear netif_running().
1568                  *
1569                  * dev->stop() will invoke napi_disable() on all of it's
1570                  * napi_struct instances on this device.
1571                  */
1572                 smp_mb__after_atomic(); /* Commit netif_running(). */
1573         }
1574
1575         dev_deactivate_many(head);
1576
1577         list_for_each_entry(dev, head, close_list) {
1578                 const struct net_device_ops *ops = dev->netdev_ops;
1579
1580                 /*
1581                  *      Call the device specific close. This cannot fail.
1582                  *      Only if device is UP
1583                  *
1584                  *      We allow it to be called even after a DETACH hot-plug
1585                  *      event.
1586                  */
1587                 if (ops->ndo_stop)
1588                         ops->ndo_stop(dev);
1589
1590                 dev->flags &= ~IFF_UP;
1591                 netpoll_poll_enable(dev);
1592         }
1593 }
1594
1595 static void __dev_close(struct net_device *dev)
1596 {
1597         LIST_HEAD(single);
1598
1599         list_add(&dev->close_list, &single);
1600         __dev_close_many(&single);
1601         list_del(&single);
1602 }
1603
1604 void dev_close_many(struct list_head *head, bool unlink)
1605 {
1606         struct net_device *dev, *tmp;
1607
1608         /* Remove the devices that don't need to be closed */
1609         list_for_each_entry_safe(dev, tmp, head, close_list)
1610                 if (!(dev->flags & IFF_UP))
1611                         list_del_init(&dev->close_list);
1612
1613         __dev_close_many(head);
1614
1615         list_for_each_entry_safe(dev, tmp, head, close_list) {
1616                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1617                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1618                 if (unlink)
1619                         list_del_init(&dev->close_list);
1620         }
1621 }
1622 EXPORT_SYMBOL(dev_close_many);
1623
1624 /**
1625  *      dev_close - shutdown an interface.
1626  *      @dev: device to shutdown
1627  *
1628  *      This function moves an active device into down state. A
1629  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1630  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1631  *      chain.
1632  */
1633 void dev_close(struct net_device *dev)
1634 {
1635         if (dev->flags & IFF_UP) {
1636                 LIST_HEAD(single);
1637
1638                 list_add(&dev->close_list, &single);
1639                 dev_close_many(&single, true);
1640                 list_del(&single);
1641         }
1642 }
1643 EXPORT_SYMBOL(dev_close);
1644
1645
1646 /**
1647  *      dev_disable_lro - disable Large Receive Offload on a device
1648  *      @dev: device
1649  *
1650  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1651  *      called under RTNL.  This is needed if received packets may be
1652  *      forwarded to another interface.
1653  */
1654 void dev_disable_lro(struct net_device *dev)
1655 {
1656         struct net_device *lower_dev;
1657         struct list_head *iter;
1658
1659         dev->wanted_features &= ~NETIF_F_LRO;
1660         netdev_update_features(dev);
1661
1662         if (unlikely(dev->features & NETIF_F_LRO))
1663                 netdev_WARN(dev, "failed to disable LRO!\n");
1664
1665         netdev_for_each_lower_dev(dev, lower_dev, iter)
1666                 dev_disable_lro(lower_dev);
1667 }
1668 EXPORT_SYMBOL(dev_disable_lro);
1669
1670 /**
1671  *      dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1672  *      @dev: device
1673  *
1674  *      Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1675  *      called under RTNL.  This is needed if Generic XDP is installed on
1676  *      the device.
1677  */
1678 static void dev_disable_gro_hw(struct net_device *dev)
1679 {
1680         dev->wanted_features &= ~NETIF_F_GRO_HW;
1681         netdev_update_features(dev);
1682
1683         if (unlikely(dev->features & NETIF_F_GRO_HW))
1684                 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1685 }
1686
1687 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1688 {
1689 #define N(val)                                          \
1690         case NETDEV_##val:                              \
1691                 return "NETDEV_" __stringify(val);
1692         switch (cmd) {
1693         N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1694         N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1695         N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1696         N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1697         N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1698         N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1699         N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1700         N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1701         N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1702         N(PRE_CHANGEADDR)
1703         }
1704 #undef N
1705         return "UNKNOWN_NETDEV_EVENT";
1706 }
1707 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1708
1709 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1710                                    struct net_device *dev)
1711 {
1712         struct netdev_notifier_info info = {
1713                 .dev = dev,
1714         };
1715
1716         return nb->notifier_call(nb, val, &info);
1717 }
1718
1719 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1720                                              struct net_device *dev)
1721 {
1722         int err;
1723
1724         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1725         err = notifier_to_errno(err);
1726         if (err)
1727                 return err;
1728
1729         if (!(dev->flags & IFF_UP))
1730                 return 0;
1731
1732         call_netdevice_notifier(nb, NETDEV_UP, dev);
1733         return 0;
1734 }
1735
1736 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1737                                                 struct net_device *dev)
1738 {
1739         if (dev->flags & IFF_UP) {
1740                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1741                                         dev);
1742                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1743         }
1744         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1745 }
1746
1747 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1748                                                  struct net *net)
1749 {
1750         struct net_device *dev;
1751         int err;
1752
1753         for_each_netdev(net, dev) {
1754                 err = call_netdevice_register_notifiers(nb, dev);
1755                 if (err)
1756                         goto rollback;
1757         }
1758         return 0;
1759
1760 rollback:
1761         for_each_netdev_continue_reverse(net, dev)
1762                 call_netdevice_unregister_notifiers(nb, dev);
1763         return err;
1764 }
1765
1766 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1767                                                     struct net *net)
1768 {
1769         struct net_device *dev;
1770
1771         for_each_netdev(net, dev)
1772                 call_netdevice_unregister_notifiers(nb, dev);
1773 }
1774
1775 static int dev_boot_phase = 1;
1776
1777 /**
1778  * register_netdevice_notifier - register a network notifier block
1779  * @nb: notifier
1780  *
1781  * Register a notifier to be called when network device events occur.
1782  * The notifier passed is linked into the kernel structures and must
1783  * not be reused until it has been unregistered. A negative errno code
1784  * is returned on a failure.
1785  *
1786  * When registered all registration and up events are replayed
1787  * to the new notifier to allow device to have a race free
1788  * view of the network device list.
1789  */
1790
1791 int register_netdevice_notifier(struct notifier_block *nb)
1792 {
1793         struct net *net;
1794         int err;
1795
1796         /* Close race with setup_net() and cleanup_net() */
1797         down_write(&pernet_ops_rwsem);
1798         rtnl_lock();
1799         err = raw_notifier_chain_register(&netdev_chain, nb);
1800         if (err)
1801                 goto unlock;
1802         if (dev_boot_phase)
1803                 goto unlock;
1804         for_each_net(net) {
1805                 err = call_netdevice_register_net_notifiers(nb, net);
1806                 if (err)
1807                         goto rollback;
1808         }
1809
1810 unlock:
1811         rtnl_unlock();
1812         up_write(&pernet_ops_rwsem);
1813         return err;
1814
1815 rollback:
1816         for_each_net_continue_reverse(net)
1817                 call_netdevice_unregister_net_notifiers(nb, net);
1818
1819         raw_notifier_chain_unregister(&netdev_chain, nb);
1820         goto unlock;
1821 }
1822 EXPORT_SYMBOL(register_netdevice_notifier);
1823
1824 /**
1825  * unregister_netdevice_notifier - unregister a network notifier block
1826  * @nb: notifier
1827  *
1828  * Unregister a notifier previously registered by
1829  * register_netdevice_notifier(). The notifier is unlinked into the
1830  * kernel structures and may then be reused. A negative errno code
1831  * is returned on a failure.
1832  *
1833  * After unregistering unregister and down device events are synthesized
1834  * for all devices on the device list to the removed notifier to remove
1835  * the need for special case cleanup code.
1836  */
1837
1838 int unregister_netdevice_notifier(struct notifier_block *nb)
1839 {
1840         struct net *net;
1841         int err;
1842
1843         /* Close race with setup_net() and cleanup_net() */
1844         down_write(&pernet_ops_rwsem);
1845         rtnl_lock();
1846         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1847         if (err)
1848                 goto unlock;
1849
1850         for_each_net(net)
1851                 call_netdevice_unregister_net_notifiers(nb, net);
1852
1853 unlock:
1854         rtnl_unlock();
1855         up_write(&pernet_ops_rwsem);
1856         return err;
1857 }
1858 EXPORT_SYMBOL(unregister_netdevice_notifier);
1859
1860 static int __register_netdevice_notifier_net(struct net *net,
1861                                              struct notifier_block *nb,
1862                                              bool ignore_call_fail)
1863 {
1864         int err;
1865
1866         err = raw_notifier_chain_register(&net->netdev_chain, nb);
1867         if (err)
1868                 return err;
1869         if (dev_boot_phase)
1870                 return 0;
1871
1872         err = call_netdevice_register_net_notifiers(nb, net);
1873         if (err && !ignore_call_fail)
1874                 goto chain_unregister;
1875
1876         return 0;
1877
1878 chain_unregister:
1879         raw_notifier_chain_unregister(&net->netdev_chain, nb);
1880         return err;
1881 }
1882
1883 static int __unregister_netdevice_notifier_net(struct net *net,
1884                                                struct notifier_block *nb)
1885 {
1886         int err;
1887
1888         err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1889         if (err)
1890                 return err;
1891
1892         call_netdevice_unregister_net_notifiers(nb, net);
1893         return 0;
1894 }
1895
1896 /**
1897  * register_netdevice_notifier_net - register a per-netns network notifier block
1898  * @net: network namespace
1899  * @nb: notifier
1900  *
1901  * Register a notifier to be called when network device events occur.
1902  * The notifier passed is linked into the kernel structures and must
1903  * not be reused until it has been unregistered. A negative errno code
1904  * is returned on a failure.
1905  *
1906  * When registered all registration and up events are replayed
1907  * to the new notifier to allow device to have a race free
1908  * view of the network device list.
1909  */
1910
1911 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1912 {
1913         int err;
1914
1915         rtnl_lock();
1916         err = __register_netdevice_notifier_net(net, nb, false);
1917         rtnl_unlock();
1918         return err;
1919 }
1920 EXPORT_SYMBOL(register_netdevice_notifier_net);
1921
1922 /**
1923  * unregister_netdevice_notifier_net - unregister a per-netns
1924  *                                     network notifier block
1925  * @net: network namespace
1926  * @nb: notifier
1927  *
1928  * Unregister a notifier previously registered by
1929  * register_netdevice_notifier(). The notifier is unlinked into the
1930  * kernel structures and may then be reused. A negative errno code
1931  * is returned on a failure.
1932  *
1933  * After unregistering unregister and down device events are synthesized
1934  * for all devices on the device list to the removed notifier to remove
1935  * the need for special case cleanup code.
1936  */
1937
1938 int unregister_netdevice_notifier_net(struct net *net,
1939                                       struct notifier_block *nb)
1940 {
1941         int err;
1942
1943         rtnl_lock();
1944         err = __unregister_netdevice_notifier_net(net, nb);
1945         rtnl_unlock();
1946         return err;
1947 }
1948 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1949
1950 int register_netdevice_notifier_dev_net(struct net_device *dev,
1951                                         struct notifier_block *nb,
1952                                         struct netdev_net_notifier *nn)
1953 {
1954         int err;
1955
1956         rtnl_lock();
1957         err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1958         if (!err) {
1959                 nn->nb = nb;
1960                 list_add(&nn->list, &dev->net_notifier_list);
1961         }
1962         rtnl_unlock();
1963         return err;
1964 }
1965 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1966
1967 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1968                                           struct notifier_block *nb,
1969                                           struct netdev_net_notifier *nn)
1970 {
1971         int err;
1972
1973         rtnl_lock();
1974         list_del(&nn->list);
1975         err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1976         rtnl_unlock();
1977         return err;
1978 }
1979 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1980
1981 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1982                                              struct net *net)
1983 {
1984         struct netdev_net_notifier *nn;
1985
1986         list_for_each_entry(nn, &dev->net_notifier_list, list) {
1987                 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
1988                 __register_netdevice_notifier_net(net, nn->nb, true);
1989         }
1990 }
1991
1992 /**
1993  *      call_netdevice_notifiers_info - call all network notifier blocks
1994  *      @val: value passed unmodified to notifier function
1995  *      @info: notifier information data
1996  *
1997  *      Call all network notifier blocks.  Parameters and return value
1998  *      are as for raw_notifier_call_chain().
1999  */
2000
2001 static int call_netdevice_notifiers_info(unsigned long val,
2002                                          struct netdev_notifier_info *info)
2003 {
2004         struct net *net = dev_net(info->dev);
2005         int ret;
2006
2007         ASSERT_RTNL();
2008
2009         /* Run per-netns notifier block chain first, then run the global one.
2010          * Hopefully, one day, the global one is going to be removed after
2011          * all notifier block registrators get converted to be per-netns.
2012          */
2013         ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2014         if (ret & NOTIFY_STOP_MASK)
2015                 return ret;
2016         return raw_notifier_call_chain(&netdev_chain, val, info);
2017 }
2018
2019 static int call_netdevice_notifiers_extack(unsigned long val,
2020                                            struct net_device *dev,
2021                                            struct netlink_ext_ack *extack)
2022 {
2023         struct netdev_notifier_info info = {
2024                 .dev = dev,
2025                 .extack = extack,
2026         };
2027
2028         return call_netdevice_notifiers_info(val, &info);
2029 }
2030
2031 /**
2032  *      call_netdevice_notifiers - call all network notifier blocks
2033  *      @val: value passed unmodified to notifier function
2034  *      @dev: net_device pointer passed unmodified to notifier function
2035  *
2036  *      Call all network notifier blocks.  Parameters and return value
2037  *      are as for raw_notifier_call_chain().
2038  */
2039
2040 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2041 {
2042         return call_netdevice_notifiers_extack(val, dev, NULL);
2043 }
2044 EXPORT_SYMBOL(call_netdevice_notifiers);
2045
2046 /**
2047  *      call_netdevice_notifiers_mtu - call all network notifier blocks
2048  *      @val: value passed unmodified to notifier function
2049  *      @dev: net_device pointer passed unmodified to notifier function
2050  *      @arg: additional u32 argument passed to the notifier function
2051  *
2052  *      Call all network notifier blocks.  Parameters and return value
2053  *      are as for raw_notifier_call_chain().
2054  */
2055 static int call_netdevice_notifiers_mtu(unsigned long val,
2056                                         struct net_device *dev, u32 arg)
2057 {
2058         struct netdev_notifier_info_ext info = {
2059                 .info.dev = dev,
2060                 .ext.mtu = arg,
2061         };
2062
2063         BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2064
2065         return call_netdevice_notifiers_info(val, &info.info);
2066 }
2067
2068 #ifdef CONFIG_NET_INGRESS
2069 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2070
2071 void net_inc_ingress_queue(void)
2072 {
2073         static_branch_inc(&ingress_needed_key);
2074 }
2075 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2076
2077 void net_dec_ingress_queue(void)
2078 {
2079         static_branch_dec(&ingress_needed_key);
2080 }
2081 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2082 #endif
2083
2084 #ifdef CONFIG_NET_EGRESS
2085 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2086
2087 void net_inc_egress_queue(void)
2088 {
2089         static_branch_inc(&egress_needed_key);
2090 }
2091 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2092
2093 void net_dec_egress_queue(void)
2094 {
2095         static_branch_dec(&egress_needed_key);
2096 }
2097 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2098 #endif
2099
2100 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2101 #ifdef CONFIG_JUMP_LABEL
2102 static atomic_t netstamp_needed_deferred;
2103 static atomic_t netstamp_wanted;
2104 static void netstamp_clear(struct work_struct *work)
2105 {
2106         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2107         int wanted;
2108
2109         wanted = atomic_add_return(deferred, &netstamp_wanted);
2110         if (wanted > 0)
2111                 static_branch_enable(&netstamp_needed_key);
2112         else
2113                 static_branch_disable(&netstamp_needed_key);
2114 }
2115 static DECLARE_WORK(netstamp_work, netstamp_clear);
2116 #endif
2117
2118 void net_enable_timestamp(void)
2119 {
2120 #ifdef CONFIG_JUMP_LABEL
2121         int wanted;
2122
2123         while (1) {
2124                 wanted = atomic_read(&netstamp_wanted);
2125                 if (wanted <= 0)
2126                         break;
2127                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2128                         return;
2129         }
2130         atomic_inc(&netstamp_needed_deferred);
2131         schedule_work(&netstamp_work);
2132 #else
2133         static_branch_inc(&netstamp_needed_key);
2134 #endif
2135 }
2136 EXPORT_SYMBOL(net_enable_timestamp);
2137
2138 void net_disable_timestamp(void)
2139 {
2140 #ifdef CONFIG_JUMP_LABEL
2141         int wanted;
2142
2143         while (1) {
2144                 wanted = atomic_read(&netstamp_wanted);
2145                 if (wanted <= 1)
2146                         break;
2147                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2148                         return;
2149         }
2150         atomic_dec(&netstamp_needed_deferred);
2151         schedule_work(&netstamp_work);
2152 #else
2153         static_branch_dec(&netstamp_needed_key);
2154 #endif
2155 }
2156 EXPORT_SYMBOL(net_disable_timestamp);
2157
2158 static inline void net_timestamp_set(struct sk_buff *skb)
2159 {
2160         skb->tstamp = 0;
2161         if (static_branch_unlikely(&netstamp_needed_key))
2162                 __net_timestamp(skb);
2163 }
2164
2165 #define net_timestamp_check(COND, SKB)                          \
2166         if (static_branch_unlikely(&netstamp_needed_key)) {     \
2167                 if ((COND) && !(SKB)->tstamp)                   \
2168                         __net_timestamp(SKB);                   \
2169         }                                                       \
2170
2171 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2172 {
2173         unsigned int len;
2174
2175         if (!(dev->flags & IFF_UP))
2176                 return false;
2177
2178         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
2179         if (skb->len <= len)
2180                 return true;
2181
2182         /* if TSO is enabled, we don't care about the length as the packet
2183          * could be forwarded without being segmented before
2184          */
2185         if (skb_is_gso(skb))
2186                 return true;
2187
2188         return false;
2189 }
2190 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2191
2192 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2193 {
2194         int ret = ____dev_forward_skb(dev, skb);
2195
2196         if (likely(!ret)) {
2197                 skb->protocol = eth_type_trans(skb, dev);
2198                 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2199         }
2200
2201         return ret;
2202 }
2203 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2204
2205 /**
2206  * dev_forward_skb - loopback an skb to another netif
2207  *
2208  * @dev: destination network device
2209  * @skb: buffer to forward
2210  *
2211  * return values:
2212  *      NET_RX_SUCCESS  (no congestion)
2213  *      NET_RX_DROP     (packet was dropped, but freed)
2214  *
2215  * dev_forward_skb can be used for injecting an skb from the
2216  * start_xmit function of one device into the receive queue
2217  * of another device.
2218  *
2219  * The receiving device may be in another namespace, so
2220  * we have to clear all information in the skb that could
2221  * impact namespace isolation.
2222  */
2223 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2224 {
2225         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2226 }
2227 EXPORT_SYMBOL_GPL(dev_forward_skb);
2228
2229 static inline int deliver_skb(struct sk_buff *skb,
2230                               struct packet_type *pt_prev,
2231                               struct net_device *orig_dev)
2232 {
2233         if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2234                 return -ENOMEM;
2235         refcount_inc(&skb->users);
2236         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2237 }
2238
2239 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2240                                           struct packet_type **pt,
2241                                           struct net_device *orig_dev,
2242                                           __be16 type,
2243                                           struct list_head *ptype_list)
2244 {
2245         struct packet_type *ptype, *pt_prev = *pt;
2246
2247         list_for_each_entry_rcu(ptype, ptype_list, list) {
2248                 if (ptype->type != type)
2249                         continue;
2250                 if (pt_prev)
2251                         deliver_skb(skb, pt_prev, orig_dev);
2252                 pt_prev = ptype;
2253         }
2254         *pt = pt_prev;
2255 }
2256
2257 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2258 {
2259         if (!ptype->af_packet_priv || !skb->sk)
2260                 return false;
2261
2262         if (ptype->id_match)
2263                 return ptype->id_match(ptype, skb->sk);
2264         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2265                 return true;
2266
2267         return false;
2268 }
2269
2270 /**
2271  * dev_nit_active - return true if any network interface taps are in use
2272  *
2273  * @dev: network device to check for the presence of taps
2274  */
2275 bool dev_nit_active(struct net_device *dev)
2276 {
2277         return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2278 }
2279 EXPORT_SYMBOL_GPL(dev_nit_active);
2280
2281 /*
2282  *      Support routine. Sends outgoing frames to any network
2283  *      taps currently in use.
2284  */
2285
2286 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2287 {
2288         struct packet_type *ptype;
2289         struct sk_buff *skb2 = NULL;
2290         struct packet_type *pt_prev = NULL;
2291         struct list_head *ptype_list = &ptype_all;
2292
2293         rcu_read_lock();
2294 again:
2295         list_for_each_entry_rcu(ptype, ptype_list, list) {
2296                 if (ptype->ignore_outgoing)
2297                         continue;
2298
2299                 /* Never send packets back to the socket
2300                  * they originated from - MvS (miquels@drinkel.ow.org)
2301                  */
2302                 if (skb_loop_sk(ptype, skb))
2303                         continue;
2304
2305                 if (pt_prev) {
2306                         deliver_skb(skb2, pt_prev, skb->dev);
2307                         pt_prev = ptype;
2308                         continue;
2309                 }
2310
2311                 /* need to clone skb, done only once */
2312                 skb2 = skb_clone(skb, GFP_ATOMIC);
2313                 if (!skb2)
2314                         goto out_unlock;
2315
2316                 net_timestamp_set(skb2);
2317
2318                 /* skb->nh should be correctly
2319                  * set by sender, so that the second statement is
2320                  * just protection against buggy protocols.
2321                  */
2322                 skb_reset_mac_header(skb2);
2323
2324                 if (skb_network_header(skb2) < skb2->data ||
2325                     skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2326                         net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2327                                              ntohs(skb2->protocol),
2328                                              dev->name);
2329                         skb_reset_network_header(skb2);
2330                 }
2331
2332                 skb2->transport_header = skb2->network_header;
2333                 skb2->pkt_type = PACKET_OUTGOING;
2334                 pt_prev = ptype;
2335         }
2336
2337         if (ptype_list == &ptype_all) {
2338                 ptype_list = &dev->ptype_all;
2339                 goto again;
2340         }
2341 out_unlock:
2342         if (pt_prev) {
2343                 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2344                         pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2345                 else
2346                         kfree_skb(skb2);
2347         }
2348         rcu_read_unlock();
2349 }
2350 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2351
2352 /**
2353  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2354  * @dev: Network device
2355  * @txq: number of queues available
2356  *
2357  * If real_num_tx_queues is changed the tc mappings may no longer be
2358  * valid. To resolve this verify the tc mapping remains valid and if
2359  * not NULL the mapping. With no priorities mapping to this
2360  * offset/count pair it will no longer be used. In the worst case TC0
2361  * is invalid nothing can be done so disable priority mappings. If is
2362  * expected that drivers will fix this mapping if they can before
2363  * calling netif_set_real_num_tx_queues.
2364  */
2365 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2366 {
2367         int i;
2368         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2369
2370         /* If TC0 is invalidated disable TC mapping */
2371         if (tc->offset + tc->count > txq) {
2372                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2373                 dev->num_tc = 0;
2374                 return;
2375         }
2376
2377         /* Invalidated prio to tc mappings set to TC0 */
2378         for (i = 1; i < TC_BITMASK + 1; i++) {
2379                 int q = netdev_get_prio_tc_map(dev, i);
2380
2381                 tc = &dev->tc_to_txq[q];
2382                 if (tc->offset + tc->count > txq) {
2383                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2384                                 i, q);
2385                         netdev_set_prio_tc_map(dev, i, 0);
2386                 }
2387         }
2388 }
2389
2390 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2391 {
2392         if (dev->num_tc) {
2393                 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2394                 int i;
2395
2396                 /* walk through the TCs and see if it falls into any of them */
2397                 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2398                         if ((txq - tc->offset) < tc->count)
2399                                 return i;
2400                 }
2401
2402                 /* didn't find it, just return -1 to indicate no match */
2403                 return -1;
2404         }
2405
2406         return 0;
2407 }
2408 EXPORT_SYMBOL(netdev_txq_to_tc);
2409
2410 #ifdef CONFIG_XPS
2411 struct static_key xps_needed __read_mostly;
2412 EXPORT_SYMBOL(xps_needed);
2413 struct static_key xps_rxqs_needed __read_mostly;
2414 EXPORT_SYMBOL(xps_rxqs_needed);
2415 static DEFINE_MUTEX(xps_map_mutex);
2416 #define xmap_dereference(P)             \
2417         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2418
2419 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2420                              int tci, u16 index)
2421 {
2422         struct xps_map *map = NULL;
2423         int pos;
2424
2425         if (dev_maps)
2426                 map = xmap_dereference(dev_maps->attr_map[tci]);
2427         if (!map)
2428                 return false;
2429
2430         for (pos = map->len; pos--;) {
2431                 if (map->queues[pos] != index)
2432                         continue;
2433
2434                 if (map->len > 1) {
2435                         map->queues[pos] = map->queues[--map->len];
2436                         break;
2437                 }
2438
2439                 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2440                 kfree_rcu(map, rcu);
2441                 return false;
2442         }
2443
2444         return true;
2445 }
2446
2447 static bool remove_xps_queue_cpu(struct net_device *dev,
2448                                  struct xps_dev_maps *dev_maps,
2449                                  int cpu, u16 offset, u16 count)
2450 {
2451         int num_tc = dev->num_tc ? : 1;
2452         bool active = false;
2453         int tci;
2454
2455         for (tci = cpu * num_tc; num_tc--; tci++) {
2456                 int i, j;
2457
2458                 for (i = count, j = offset; i--; j++) {
2459                         if (!remove_xps_queue(dev_maps, tci, j))
2460                                 break;
2461                 }
2462
2463                 active |= i < 0;
2464         }
2465
2466         return active;
2467 }
2468
2469 static void reset_xps_maps(struct net_device *dev,
2470                            struct xps_dev_maps *dev_maps,
2471                            bool is_rxqs_map)
2472 {
2473         if (is_rxqs_map) {
2474                 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2475                 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2476         } else {
2477                 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2478         }
2479         static_key_slow_dec_cpuslocked(&xps_needed);
2480         kfree_rcu(dev_maps, rcu);
2481 }
2482
2483 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2484                            struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2485                            u16 offset, u16 count, bool is_rxqs_map)
2486 {
2487         bool active = false;
2488         int i, j;
2489
2490         for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2491              j < nr_ids;)
2492                 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2493                                                count);
2494         if (!active)
2495                 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2496
2497         if (!is_rxqs_map) {
2498                 for (i = offset + (count - 1); count--; i--) {
2499                         netdev_queue_numa_node_write(
2500                                 netdev_get_tx_queue(dev, i),
2501                                 NUMA_NO_NODE);
2502                 }
2503         }
2504 }
2505
2506 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2507                                    u16 count)
2508 {
2509         const unsigned long *possible_mask = NULL;
2510         struct xps_dev_maps *dev_maps;
2511         unsigned int nr_ids;
2512
2513         if (!static_key_false(&xps_needed))
2514                 return;
2515
2516         cpus_read_lock();
2517         mutex_lock(&xps_map_mutex);
2518
2519         if (static_key_false(&xps_rxqs_needed)) {
2520                 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2521                 if (dev_maps) {
2522                         nr_ids = dev->num_rx_queues;
2523                         clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2524                                        offset, count, true);
2525                 }
2526         }
2527
2528         dev_maps = xmap_dereference(dev->xps_cpus_map);
2529         if (!dev_maps)
2530                 goto out_no_maps;
2531
2532         if (num_possible_cpus() > 1)
2533                 possible_mask = cpumask_bits(cpu_possible_mask);
2534         nr_ids = nr_cpu_ids;
2535         clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2536                        false);
2537
2538 out_no_maps:
2539         mutex_unlock(&xps_map_mutex);
2540         cpus_read_unlock();
2541 }
2542
2543 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2544 {
2545         netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2546 }
2547
2548 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2549                                       u16 index, bool is_rxqs_map)
2550 {
2551         struct xps_map *new_map;
2552         int alloc_len = XPS_MIN_MAP_ALLOC;
2553         int i, pos;
2554
2555         for (pos = 0; map && pos < map->len; pos++) {
2556                 if (map->queues[pos] != index)
2557                         continue;
2558                 return map;
2559         }
2560
2561         /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2562         if (map) {
2563                 if (pos < map->alloc_len)
2564                         return map;
2565
2566                 alloc_len = map->alloc_len * 2;
2567         }
2568
2569         /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2570          *  map
2571          */
2572         if (is_rxqs_map)
2573                 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2574         else
2575                 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2576                                        cpu_to_node(attr_index));
2577         if (!new_map)
2578                 return NULL;
2579
2580         for (i = 0; i < pos; i++)
2581                 new_map->queues[i] = map->queues[i];
2582         new_map->alloc_len = alloc_len;
2583         new_map->len = pos;
2584
2585         return new_map;
2586 }
2587
2588 /* Must be called under cpus_read_lock */
2589 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2590                           u16 index, bool is_rxqs_map)
2591 {
2592         const unsigned long *online_mask = NULL, *possible_mask = NULL;
2593         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2594         int i, j, tci, numa_node_id = -2;
2595         int maps_sz, num_tc = 1, tc = 0;
2596         struct xps_map *map, *new_map;
2597         bool active = false;
2598         unsigned int nr_ids;
2599
2600         if (dev->num_tc) {
2601                 /* Do not allow XPS on subordinate device directly */
2602                 num_tc = dev->num_tc;
2603                 if (num_tc < 0)
2604                         return -EINVAL;
2605
2606                 /* If queue belongs to subordinate dev use its map */
2607                 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2608
2609                 tc = netdev_txq_to_tc(dev, index);
2610                 if (tc < 0)
2611                         return -EINVAL;
2612         }
2613
2614         mutex_lock(&xps_map_mutex);
2615         if (is_rxqs_map) {
2616                 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2617                 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2618                 nr_ids = dev->num_rx_queues;
2619         } else {
2620                 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2621                 if (num_possible_cpus() > 1) {
2622                         online_mask = cpumask_bits(cpu_online_mask);
2623                         possible_mask = cpumask_bits(cpu_possible_mask);
2624                 }
2625                 dev_maps = xmap_dereference(dev->xps_cpus_map);
2626                 nr_ids = nr_cpu_ids;
2627         }
2628
2629         if (maps_sz < L1_CACHE_BYTES)
2630                 maps_sz = L1_CACHE_BYTES;
2631
2632         /* allocate memory for queue storage */
2633         for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2634              j < nr_ids;) {
2635                 if (!new_dev_maps)
2636                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2637                 if (!new_dev_maps) {
2638                         mutex_unlock(&xps_map_mutex);
2639                         return -ENOMEM;
2640                 }
2641
2642                 tci = j * num_tc + tc;
2643                 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2644                                  NULL;
2645
2646                 map = expand_xps_map(map, j, index, is_rxqs_map);
2647                 if (!map)
2648                         goto error;
2649
2650                 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2651         }
2652
2653         if (!new_dev_maps)
2654                 goto out_no_new_maps;
2655
2656         if (!dev_maps) {
2657                 /* Increment static keys at most once per type */
2658                 static_key_slow_inc_cpuslocked(&xps_needed);
2659                 if (is_rxqs_map)
2660                         static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2661         }
2662
2663         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2664              j < nr_ids;) {
2665                 /* copy maps belonging to foreign traffic classes */
2666                 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2667                         /* fill in the new device map from the old device map */
2668                         map = xmap_dereference(dev_maps->attr_map[tci]);
2669                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2670                 }
2671
2672                 /* We need to explicitly update tci as prevous loop
2673                  * could break out early if dev_maps is NULL.
2674                  */
2675                 tci = j * num_tc + tc;
2676
2677                 if (netif_attr_test_mask(j, mask, nr_ids) &&
2678                     netif_attr_test_online(j, online_mask, nr_ids)) {
2679                         /* add tx-queue to CPU/rx-queue maps */
2680                         int pos = 0;
2681
2682                         map = xmap_dereference(new_dev_maps->attr_map[tci]);
2683                         while ((pos < map->len) && (map->queues[pos] != index))
2684                                 pos++;
2685
2686                         if (pos == map->len)
2687                                 map->queues[map->len++] = index;
2688 #ifdef CONFIG_NUMA
2689                         if (!is_rxqs_map) {
2690                                 if (numa_node_id == -2)
2691                                         numa_node_id = cpu_to_node(j);
2692                                 else if (numa_node_id != cpu_to_node(j))
2693                                         numa_node_id = -1;
2694                         }
2695 #endif
2696                 } else if (dev_maps) {
2697                         /* fill in the new device map from the old device map */
2698                         map = xmap_dereference(dev_maps->attr_map[tci]);
2699                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2700                 }
2701
2702                 /* copy maps belonging to foreign traffic classes */
2703                 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2704                         /* fill in the new device map from the old device map */
2705                         map = xmap_dereference(dev_maps->attr_map[tci]);
2706                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2707                 }
2708         }
2709
2710         if (is_rxqs_map)
2711                 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2712         else
2713                 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2714
2715         /* Cleanup old maps */
2716         if (!dev_maps)
2717                 goto out_no_old_maps;
2718
2719         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2720              j < nr_ids;) {
2721                 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2722                         new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2723                         map = xmap_dereference(dev_maps->attr_map[tci]);
2724                         if (map && map != new_map)
2725                                 kfree_rcu(map, rcu);
2726                 }
2727         }
2728
2729         kfree_rcu(dev_maps, rcu);
2730
2731 out_no_old_maps:
2732         dev_maps = new_dev_maps;
2733         active = true;
2734
2735 out_no_new_maps:
2736         if (!is_rxqs_map) {
2737                 /* update Tx queue numa node */
2738                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2739                                              (numa_node_id >= 0) ?
2740                                              numa_node_id : NUMA_NO_NODE);
2741         }
2742
2743         if (!dev_maps)
2744                 goto out_no_maps;
2745
2746         /* removes tx-queue from unused CPUs/rx-queues */
2747         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2748              j < nr_ids;) {
2749                 for (i = tc, tci = j * num_tc; i--; tci++)
2750                         active |= remove_xps_queue(dev_maps, tci, index);
2751                 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2752                     !netif_attr_test_online(j, online_mask, nr_ids))
2753                         active |= remove_xps_queue(dev_maps, tci, index);
2754                 for (i = num_tc - tc, tci++; --i; tci++)
2755                         active |= remove_xps_queue(dev_maps, tci, index);
2756         }
2757
2758         /* free map if not active */
2759         if (!active)
2760                 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2761
2762 out_no_maps:
2763         mutex_unlock(&xps_map_mutex);
2764
2765         return 0;
2766 error:
2767         /* remove any maps that we added */
2768         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2769              j < nr_ids;) {
2770                 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2771                         new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2772                         map = dev_maps ?
2773                               xmap_dereference(dev_maps->attr_map[tci]) :
2774                               NULL;
2775                         if (new_map && new_map != map)
2776                                 kfree(new_map);
2777                 }
2778         }
2779
2780         mutex_unlock(&xps_map_mutex);
2781
2782         kfree(new_dev_maps);
2783         return -ENOMEM;
2784 }
2785 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2786
2787 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2788                         u16 index)
2789 {
2790         int ret;
2791
2792         cpus_read_lock();
2793         ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2794         cpus_read_unlock();
2795
2796         return ret;
2797 }
2798 EXPORT_SYMBOL(netif_set_xps_queue);
2799
2800 #endif
2801 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2802 {
2803         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2804
2805         /* Unbind any subordinate channels */
2806         while (txq-- != &dev->_tx[0]) {
2807                 if (txq->sb_dev)
2808                         netdev_unbind_sb_channel(dev, txq->sb_dev);
2809         }
2810 }
2811
2812 void netdev_reset_tc(struct net_device *dev)
2813 {
2814 #ifdef CONFIG_XPS
2815         netif_reset_xps_queues_gt(dev, 0);
2816 #endif
2817         netdev_unbind_all_sb_channels(dev);
2818
2819         /* Reset TC configuration of device */
2820         dev->num_tc = 0;
2821         memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2822         memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2823 }
2824 EXPORT_SYMBOL(netdev_reset_tc);
2825
2826 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2827 {
2828         if (tc >= dev->num_tc)
2829                 return -EINVAL;
2830
2831 #ifdef CONFIG_XPS
2832         netif_reset_xps_queues(dev, offset, count);
2833 #endif
2834         dev->tc_to_txq[tc].count = count;
2835         dev->tc_to_txq[tc].offset = offset;
2836         return 0;
2837 }
2838 EXPORT_SYMBOL(netdev_set_tc_queue);
2839
2840 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2841 {
2842         if (num_tc > TC_MAX_QUEUE)
2843                 return -EINVAL;
2844
2845 #ifdef CONFIG_XPS
2846         netif_reset_xps_queues_gt(dev, 0);
2847 #endif
2848         netdev_unbind_all_sb_channels(dev);
2849
2850         dev->num_tc = num_tc;
2851         return 0;
2852 }
2853 EXPORT_SYMBOL(netdev_set_num_tc);
2854
2855 void netdev_unbind_sb_channel(struct net_device *dev,
2856                               struct net_device *sb_dev)
2857 {
2858         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2859
2860 #ifdef CONFIG_XPS
2861         netif_reset_xps_queues_gt(sb_dev, 0);
2862 #endif
2863         memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2864         memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2865
2866         while (txq-- != &dev->_tx[0]) {
2867                 if (txq->sb_dev == sb_dev)
2868                         txq->sb_dev = NULL;
2869         }
2870 }
2871 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2872
2873 int netdev_bind_sb_channel_queue(struct net_device *dev,
2874                                  struct net_device *sb_dev,
2875                                  u8 tc, u16 count, u16 offset)
2876 {
2877         /* Make certain the sb_dev and dev are already configured */
2878         if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2879                 return -EINVAL;
2880
2881         /* We cannot hand out queues we don't have */
2882         if ((offset + count) > dev->real_num_tx_queues)
2883                 return -EINVAL;
2884
2885         /* Record the mapping */
2886         sb_dev->tc_to_txq[tc].count = count;
2887         sb_dev->tc_to_txq[tc].offset = offset;
2888
2889         /* Provide a way for Tx queue to find the tc_to_txq map or
2890          * XPS map for itself.
2891          */
2892         while (count--)
2893                 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2894
2895         return 0;
2896 }
2897 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2898
2899 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2900 {
2901         /* Do not use a multiqueue device to represent a subordinate channel */
2902         if (netif_is_multiqueue(dev))
2903                 return -ENODEV;
2904
2905         /* We allow channels 1 - 32767 to be used for subordinate channels.
2906          * Channel 0 is meant to be "native" mode and used only to represent
2907          * the main root device. We allow writing 0 to reset the device back
2908          * to normal mode after being used as a subordinate channel.
2909          */
2910         if (channel > S16_MAX)
2911                 return -EINVAL;
2912
2913         dev->num_tc = -channel;
2914
2915         return 0;
2916 }
2917 EXPORT_SYMBOL(netdev_set_sb_channel);
2918
2919 /*
2920  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2921  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2922  */
2923 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2924 {
2925         bool disabling;
2926         int rc;
2927
2928         disabling = txq < dev->real_num_tx_queues;
2929
2930         if (txq < 1 || txq > dev->num_tx_queues)
2931                 return -EINVAL;
2932
2933         if (dev->reg_state == NETREG_REGISTERED ||
2934             dev->reg_state == NETREG_UNREGISTERING) {
2935                 ASSERT_RTNL();
2936
2937                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2938                                                   txq);
2939                 if (rc)
2940                         return rc;
2941
2942                 if (dev->num_tc)
2943                         netif_setup_tc(dev, txq);
2944
2945                 dev->real_num_tx_queues = txq;
2946
2947                 if (disabling) {
2948                         synchronize_net();
2949                         qdisc_reset_all_tx_gt(dev, txq);
2950 #ifdef CONFIG_XPS
2951                         netif_reset_xps_queues_gt(dev, txq);
2952 #endif
2953                 }
2954         } else {
2955                 dev->real_num_tx_queues = txq;
2956         }
2957
2958         return 0;
2959 }
2960 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2961
2962 #ifdef CONFIG_SYSFS
2963 /**
2964  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2965  *      @dev: Network device
2966  *      @rxq: Actual number of RX queues
2967  *
2968  *      This must be called either with the rtnl_lock held or before
2969  *      registration of the net device.  Returns 0 on success, or a
2970  *      negative error code.  If called before registration, it always
2971  *      succeeds.
2972  */
2973 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2974 {
2975         int rc;
2976
2977         if (rxq < 1 || rxq > dev->num_rx_queues)
2978                 return -EINVAL;
2979
2980         if (dev->reg_state == NETREG_REGISTERED) {
2981                 ASSERT_RTNL();
2982
2983                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2984                                                   rxq);
2985                 if (rc)
2986                         return rc;
2987         }
2988
2989         dev->real_num_rx_queues = rxq;
2990         return 0;
2991 }
2992 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2993 #endif
2994
2995 /**
2996  * netif_get_num_default_rss_queues - default number of RSS queues
2997  *
2998  * This routine should set an upper limit on the number of RSS queues
2999  * used by default by multiqueue devices.
3000  */
3001 int netif_get_num_default_rss_queues(void)
3002 {
3003         return is_kdump_kernel() ?
3004                 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
3005 }
3006 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3007
3008 static void __netif_reschedule(struct Qdisc *q)
3009 {
3010         struct softnet_data *sd;
3011         unsigned long flags;
3012
3013         local_irq_save(flags);
3014         sd = this_cpu_ptr(&softnet_data);
3015         q->next_sched = NULL;
3016         *sd->output_queue_tailp = q;
3017         sd->output_queue_tailp = &q->next_sched;
3018         raise_softirq_irqoff(NET_TX_SOFTIRQ);
3019         local_irq_restore(flags);
3020 }
3021
3022 void __netif_schedule(struct Qdisc *q)
3023 {
3024         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3025                 __netif_reschedule(q);
3026 }
3027 EXPORT_SYMBOL(__netif_schedule);
3028
3029 struct dev_kfree_skb_cb {
3030         enum skb_free_reason reason;
3031 };
3032
3033 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3034 {
3035         return (struct dev_kfree_skb_cb *)skb->cb;
3036 }
3037
3038 void netif_schedule_queue(struct netdev_queue *txq)
3039 {
3040         rcu_read_lock();
3041         if (!netif_xmit_stopped(txq)) {
3042                 struct Qdisc *q = rcu_dereference(txq->qdisc);
3043
3044                 __netif_schedule(q);
3045         }
3046         rcu_read_unlock();
3047 }
3048 EXPORT_SYMBOL(netif_schedule_queue);
3049
3050 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3051 {
3052         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3053                 struct Qdisc *q;
3054
3055                 rcu_read_lock();
3056                 q = rcu_dereference(dev_queue->qdisc);
3057                 __netif_schedule(q);
3058                 rcu_read_unlock();
3059         }
3060 }
3061 EXPORT_SYMBOL(netif_tx_wake_queue);
3062
3063 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3064 {
3065         unsigned long flags;
3066
3067         if (unlikely(!skb))
3068                 return;
3069
3070         if (likely(refcount_read(&skb->users) == 1)) {
3071                 smp_rmb();
3072                 refcount_set(&skb->users, 0);
3073         } else if (likely(!refcount_dec_and_test(&skb->users))) {
3074                 return;
3075         }
3076         get_kfree_skb_cb(skb)->reason = reason;
3077         local_irq_save(flags);
3078         skb->next = __this_cpu_read(softnet_data.completion_queue);
3079         __this_cpu_write(softnet_data.completion_queue, skb);
3080         raise_softirq_irqoff(NET_TX_SOFTIRQ);
3081         local_irq_restore(flags);
3082 }
3083 EXPORT_SYMBOL(__dev_kfree_skb_irq);
3084
3085 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3086 {
3087         if (in_irq() || irqs_disabled())
3088                 __dev_kfree_skb_irq(skb, reason);
3089         else
3090                 dev_kfree_skb(skb);
3091 }
3092 EXPORT_SYMBOL(__dev_kfree_skb_any);
3093
3094
3095 /**
3096  * netif_device_detach - mark device as removed
3097  * @dev: network device
3098  *
3099  * Mark device as removed from system and therefore no longer available.
3100  */
3101 void netif_device_detach(struct net_device *dev)
3102 {
3103         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3104             netif_running(dev)) {
3105                 netif_tx_stop_all_queues(dev);
3106         }
3107 }
3108 EXPORT_SYMBOL(netif_device_detach);
3109
3110 /**
3111  * netif_device_attach - mark device as attached
3112  * @dev: network device
3113  *
3114  * Mark device as attached from system and restart if needed.
3115  */
3116 void netif_device_attach(struct net_device *dev)
3117 {
3118         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3119             netif_running(dev)) {
3120                 netif_tx_wake_all_queues(dev);
3121                 __netdev_watchdog_up(dev);
3122         }
3123 }
3124 EXPORT_SYMBOL(netif_device_attach);
3125
3126 /*
3127  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3128  * to be used as a distribution range.
3129  */
3130 static u16 skb_tx_hash(const struct net_device *dev,
3131                        const struct net_device *sb_dev,
3132                        struct sk_buff *skb)
3133 {
3134         u32 hash;
3135         u16 qoffset = 0;
3136         u16 qcount = dev->real_num_tx_queues;
3137
3138         if (dev->num_tc) {
3139                 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3140
3141                 qoffset = sb_dev->tc_to_txq[tc].offset;
3142                 qcount = sb_dev->tc_to_txq[tc].count;
3143         }
3144
3145         if (skb_rx_queue_recorded(skb)) {
3146                 hash = skb_get_rx_queue(skb);
3147                 if (hash >= qoffset)
3148                         hash -= qoffset;
3149                 while (unlikely(hash >= qcount))
3150                         hash -= qcount;
3151                 return hash + qoffset;
3152         }
3153
3154         return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3155 }
3156
3157 static void skb_warn_bad_offload(const struct sk_buff *skb)
3158 {
3159         static const netdev_features_t null_features;
3160         struct net_device *dev = skb->dev;
3161         const char *name = "";
3162
3163         if (!net_ratelimit())
3164                 return;
3165
3166         if (dev) {
3167                 if (dev->dev.parent)
3168                         name = dev_driver_string(dev->dev.parent);
3169                 else
3170                         name = netdev_name(dev);
3171         }
3172         skb_dump(KERN_WARNING, skb, false);
3173         WARN(1, "%s: caps=(%pNF, %pNF)\n",
3174              name, dev ? &dev->features : &null_features,
3175              skb->sk ? &skb->sk->sk_route_caps : &null_features);
3176 }
3177
3178 /*
3179  * Invalidate hardware checksum when packet is to be mangled, and
3180  * complete checksum manually on outgoing path.
3181  */
3182 int skb_checksum_help(struct sk_buff *skb)
3183 {
3184         __wsum csum;
3185         int ret = 0, offset;
3186
3187         if (skb->ip_summed == CHECKSUM_COMPLETE)
3188                 goto out_set_summed;
3189
3190         if (unlikely(skb_shinfo(skb)->gso_size)) {
3191                 skb_warn_bad_offload(skb);
3192                 return -EINVAL;
3193         }
3194
3195         /* Before computing a checksum, we should make sure no frag could
3196          * be modified by an external entity : checksum could be wrong.
3197          */
3198         if (skb_has_shared_frag(skb)) {
3199                 ret = __skb_linearize(skb);
3200                 if (ret)
3201                         goto out;
3202         }
3203
3204         offset = skb_checksum_start_offset(skb);
3205         BUG_ON(offset >= skb_headlen(skb));
3206         csum = skb_checksum(skb, offset, skb->len - offset, 0);
3207
3208         offset += skb->csum_offset;
3209         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3210
3211         ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3212         if (ret)
3213                 goto out;
3214
3215         *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3216 out_set_summed:
3217         skb->ip_summed = CHECKSUM_NONE;
3218 out:
3219         return ret;
3220 }
3221 EXPORT_SYMBOL(skb_checksum_help);
3222
3223 int skb_crc32c_csum_help(struct sk_buff *skb)
3224 {
3225         __le32 crc32c_csum;
3226         int ret = 0, offset, start;
3227
3228         if (skb->ip_summed != CHECKSUM_PARTIAL)
3229                 goto out;
3230
3231         if (unlikely(skb_is_gso(skb)))
3232                 goto out;
3233
3234         /* Before computing a checksum, we should make sure no frag could
3235          * be modified by an external entity : checksum could be wrong.
3236          */
3237         if (unlikely(skb_has_shared_frag(skb))) {
3238                 ret = __skb_linearize(skb);
3239                 if (ret)
3240                         goto out;
3241         }
3242         start = skb_checksum_start_offset(skb);
3243         offset = start + offsetof(struct sctphdr, checksum);
3244         if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3245                 ret = -EINVAL;
3246                 goto out;
3247         }
3248
3249         ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3250         if (ret)
3251                 goto out;
3252
3253         crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3254                                                   skb->len - start, ~(__u32)0,
3255                                                   crc32c_csum_stub));
3256         *(__le32 *)(skb->data + offset) = crc32c_csum;
3257         skb->ip_summed = CHECKSUM_NONE;
3258         skb->csum_not_inet = 0;
3259 out:
3260         return ret;
3261 }
3262
3263 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3264 {
3265         __be16 type = skb->protocol;
3266
3267         /* Tunnel gso handlers can set protocol to ethernet. */
3268         if (type == htons(ETH_P_TEB)) {
3269                 struct ethhdr *eth;
3270
3271                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3272                         return 0;
3273
3274                 eth = (struct ethhdr *)skb->data;
3275                 type = eth->h_proto;
3276         }
3277
3278         return __vlan_get_protocol(skb, type, depth);
3279 }
3280
3281 /**
3282  *      skb_mac_gso_segment - mac layer segmentation handler.
3283  *      @skb: buffer to segment
3284  *      @features: features for the output path (see dev->features)
3285  */
3286 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3287                                     netdev_features_t features)
3288 {
3289         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3290         struct packet_offload *ptype;
3291         int vlan_depth = skb->mac_len;
3292         __be16 type = skb_network_protocol(skb, &vlan_depth);
3293
3294         if (unlikely(!type))
3295                 return ERR_PTR(-EINVAL);
3296
3297         __skb_pull(skb, vlan_depth);
3298
3299         rcu_read_lock();
3300         list_for_each_entry_rcu(ptype, &offload_base, list) {
3301                 if (ptype->type == type && ptype->callbacks.gso_segment) {
3302                         segs = ptype->callbacks.gso_segment(skb, features);
3303                         break;
3304                 }
3305         }
3306         rcu_read_unlock();
3307
3308         __skb_push(skb, skb->data - skb_mac_header(skb));
3309
3310         return segs;
3311 }
3312 EXPORT_SYMBOL(skb_mac_gso_segment);
3313
3314
3315 /* openvswitch calls this on rx path, so we need a different check.
3316  */
3317 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3318 {
3319         if (tx_path)
3320                 return skb->ip_summed != CHECKSUM_PARTIAL &&
3321                        skb->ip_summed != CHECKSUM_UNNECESSARY;
3322
3323         return skb->ip_summed == CHECKSUM_NONE;
3324 }
3325
3326 /**
3327  *      __skb_gso_segment - Perform segmentation on skb.
3328  *      @skb: buffer to segment
3329  *      @features: features for the output path (see dev->features)
3330  *      @tx_path: whether it is called in TX path
3331  *
3332  *      This function segments the given skb and returns a list of segments.
3333  *
3334  *      It may return NULL if the skb requires no segmentation.  This is
3335  *      only possible when GSO is used for verifying header integrity.
3336  *
3337  *      Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3338  */
3339 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3340                                   netdev_features_t features, bool tx_path)
3341 {
3342         struct sk_buff *segs;
3343
3344         if (unlikely(skb_needs_check(skb, tx_path))) {
3345                 int err;
3346
3347                 /* We're going to init ->check field in TCP or UDP header */
3348                 err = skb_cow_head(skb, 0);
3349                 if (err < 0)
3350                         return ERR_PTR(err);
3351         }
3352
3353         /* Only report GSO partial support if it will enable us to
3354          * support segmentation on this frame without needing additional
3355          * work.
3356          */
3357         if (features & NETIF_F_GSO_PARTIAL) {
3358                 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3359                 struct net_device *dev = skb->dev;
3360
3361                 partial_features |= dev->features & dev->gso_partial_features;
3362                 if (!skb_gso_ok(skb, features | partial_features))
3363                         features &= ~NETIF_F_GSO_PARTIAL;
3364         }
3365
3366         BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3367                      sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3368
3369         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3370         SKB_GSO_CB(skb)->encap_level = 0;
3371
3372         skb_reset_mac_header(skb);
3373         skb_reset_mac_len(skb);
3374
3375         segs = skb_mac_gso_segment(skb, features);
3376
3377         if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3378                 skb_warn_bad_offload(skb);
3379
3380         return segs;
3381 }
3382 EXPORT_SYMBOL(__skb_gso_segment);
3383
3384 /* Take action when hardware reception checksum errors are detected. */
3385 #ifdef CONFIG_BUG
3386 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3387 {
3388         if (net_ratelimit()) {
3389                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3390                 skb_dump(KERN_ERR, skb, true);
3391                 dump_stack();
3392         }
3393 }
3394 EXPORT_SYMBOL(netdev_rx_csum_fault);
3395 #endif
3396
3397 /* XXX: check that highmem exists at all on the given machine. */
3398 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3399 {
3400 #ifdef CONFIG_HIGHMEM
3401         int i;
3402
3403         if (!(dev->features & NETIF_F_HIGHDMA)) {
3404                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3405                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3406
3407                         if (PageHighMem(skb_frag_page(frag)))
3408                                 return 1;
3409                 }
3410         }
3411 #endif
3412         return 0;
3413 }
3414
3415 /* If MPLS offload request, verify we are testing hardware MPLS features
3416  * instead of standard features for the netdev.
3417  */
3418 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3419 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3420                                            netdev_features_t features,
3421                                            __be16 type)
3422 {
3423         if (eth_p_mpls(type))
3424                 features &= skb->dev->mpls_features;
3425
3426         return features;
3427 }
3428 #else
3429 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3430                                            netdev_features_t features,
3431                                            __be16 type)
3432 {
3433         return features;
3434 }
3435 #endif
3436
3437 static netdev_features_t harmonize_features(struct sk_buff *skb,
3438         netdev_features_t features)
3439 {
3440         int tmp;
3441         __be16 type;
3442
3443         type = skb_network_protocol(skb, &tmp);
3444         features = net_mpls_features(skb, features, type);
3445
3446         if (skb->ip_summed != CHECKSUM_NONE &&
3447             !can_checksum_protocol(features, type)) {
3448                 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3449         }
3450         if (illegal_highdma(skb->dev, skb))
3451                 features &= ~NETIF_F_SG;
3452
3453         return features;
3454 }
3455
3456 netdev_features_t passthru_features_check(struct sk_buff *skb,
3457                                           struct net_device *dev,
3458                                           netdev_features_t features)
3459 {
3460         return features;
3461 }
3462 EXPORT_SYMBOL(passthru_features_check);
3463
3464 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3465                                              struct net_device *dev,
3466                                              netdev_features_t features)
3467 {
3468         return vlan_features_check(skb, features);
3469 }
3470
3471 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3472                                             struct net_device *dev,
3473                                             netdev_features_t features)
3474 {
3475         u16 gso_segs = skb_shinfo(skb)->gso_segs;
3476
3477         if (gso_segs > dev->gso_max_segs)
3478                 return features & ~NETIF_F_GSO_MASK;
3479
3480         /* Support for GSO partial features requires software
3481          * intervention before we can actually process the packets
3482          * so we need to strip support for any partial features now
3483          * and we can pull them back in after we have partially
3484          * segmented the frame.
3485          */
3486         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3487                 features &= ~dev->gso_partial_features;
3488
3489         /* Make sure to clear the IPv4 ID mangling feature if the
3490          * IPv4 header has the potential to be fragmented.
3491          */
3492         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3493                 struct iphdr *iph = skb->encapsulation ?
3494                                     inner_ip_hdr(skb) : ip_hdr(skb);
3495
3496                 if (!(iph->frag_off & htons(IP_DF)))
3497                         features &= ~NETIF_F_TSO_MANGLEID;
3498         }
3499
3500         return features;
3501 }
3502
3503 netdev_features_t netif_skb_features(struct sk_buff *skb)
3504 {
3505         struct net_device *dev = skb->dev;
3506         netdev_features_t features = dev->features;
3507
3508         if (skb_is_gso(skb))
3509                 features = gso_features_check(skb, dev, features);
3510
3511         /* If encapsulation offload request, verify we are testing
3512          * hardware encapsulation features instead of standard
3513          * features for the netdev
3514          */
3515         if (skb->encapsulation)
3516                 features &= dev->hw_enc_features;
3517
3518         if (skb_vlan_tagged(skb))
3519                 features = netdev_intersect_features(features,
3520                                                      dev->vlan_features |
3521                                                      NETIF_F_HW_VLAN_CTAG_TX |
3522                                                      NETIF_F_HW_VLAN_STAG_TX);
3523
3524         if (dev->netdev_ops->ndo_features_check)
3525                 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3526                                                                 features);
3527         else
3528                 features &= dflt_features_check(skb, dev, features);
3529
3530         return harmonize_features(skb, features);
3531 }
3532 EXPORT_SYMBOL(netif_skb_features);
3533
3534 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3535                     struct netdev_queue *txq, bool more)
3536 {
3537         unsigned int len;
3538         int rc;
3539
3540         if (dev_nit_active(dev))
3541                 dev_queue_xmit_nit(skb, dev);
3542
3543         len = skb->len;
3544         trace_net_dev_start_xmit(skb, dev);
3545         rc = netdev_start_xmit(skb, dev, txq, more);
3546         trace_net_dev_xmit(skb, rc, dev, len);
3547
3548         return rc;
3549 }
3550
3551 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3552                                     struct netdev_queue *txq, int *ret)
3553 {
3554         struct sk_buff *skb = first;
3555         int rc = NETDEV_TX_OK;
3556
3557         while (skb) {
3558                 struct sk_buff *next = skb->next;
3559
3560                 skb_mark_not_on_list(skb);
3561                 rc = xmit_one(skb, dev, txq, next != NULL);
3562                 if (unlikely(!dev_xmit_complete(rc))) {
3563                         skb->next = next;
3564                         goto out;
3565                 }
3566
3567                 skb = next;
3568                 if (netif_tx_queue_stopped(txq) && skb) {
3569                         rc = NETDEV_TX_BUSY;
3570                         break;
3571                 }
3572         }
3573
3574 out:
3575         *ret = rc;
3576         return skb;
3577 }
3578
3579 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3580                                           netdev_features_t features)
3581 {
3582         if (skb_vlan_tag_present(skb) &&
3583             !vlan_hw_offload_capable(features, skb->vlan_proto))
3584                 skb = __vlan_hwaccel_push_inside(skb);
3585         return skb;
3586 }
3587
3588 int skb_csum_hwoffload_help(struct sk_buff *skb,
3589                             const netdev_features_t features)
3590 {
3591         if (unlikely(skb->csum_not_inet))
3592                 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3593                         skb_crc32c_csum_help(skb);
3594
3595         return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3596 }
3597 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3598
3599 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3600 {
3601         netdev_features_t features;
3602
3603         features = netif_skb_features(skb);
3604         skb = validate_xmit_vlan(skb, features);
3605         if (unlikely(!skb))
3606                 goto out_null;
3607
3608         skb = sk_validate_xmit_skb(skb, dev);
3609         if (unlikely(!skb))
3610                 goto out_null;
3611
3612         if (netif_needs_gso(skb, features)) {
3613                 struct sk_buff *segs;
3614
3615                 segs = skb_gso_segment(skb, features);
3616                 if (IS_ERR(segs)) {
3617                         goto out_kfree_skb;
3618                 } else if (segs) {
3619                         consume_skb(skb);
3620                         skb = segs;
3621                 }
3622         } else {
3623                 if (skb_needs_linearize(skb, features) &&
3624                     __skb_linearize(skb))
3625                         goto out_kfree_skb;
3626
3627                 /* If packet is not checksummed and device does not
3628                  * support checksumming for this protocol, complete
3629                  * checksumming here.
3630                  */
3631                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3632                         if (skb->encapsulation)
3633                                 skb_set_inner_transport_header(skb,
3634                                                                skb_checksum_start_offset(skb));
3635                         else
3636                                 skb_set_transport_header(skb,
3637                                                          skb_checksum_start_offset(skb));
3638                         if (skb_csum_hwoffload_help(skb, features))
3639                                 goto out_kfree_skb;
3640                 }
3641         }
3642
3643         skb = validate_xmit_xfrm(skb, features, again);
3644
3645         return skb;
3646
3647 out_kfree_skb:
3648         kfree_skb(skb);
3649 out_null:
3650         atomic_long_inc(&dev->tx_dropped);
3651         return NULL;
3652 }
3653
3654 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3655 {
3656         struct sk_buff *next, *head = NULL, *tail;
3657
3658         for (; skb != NULL; skb = next) {
3659                 next = skb->next;
3660                 skb_mark_not_on_list(skb);
3661
3662                 /* in case skb wont be segmented, point to itself */
3663                 skb->prev = skb;
3664
3665                 skb = validate_xmit_skb(skb, dev, again);
3666                 if (!skb)
3667                         continue;
3668
3669                 if (!head)
3670                         head = skb;
3671                 else
3672                         tail->next = skb;
3673                 /* If skb was segmented, skb->prev points to
3674                  * the last segment. If not, it still contains skb.
3675                  */
3676                 tail = skb->prev;
3677         }
3678         return head;
3679 }
3680 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3681
3682 static void qdisc_pkt_len_init(struct sk_buff *skb)
3683 {
3684         const struct skb_shared_info *shinfo = skb_shinfo(skb);
3685
3686         qdisc_skb_cb(skb)->pkt_len = skb->len;
3687
3688         /* To get more precise estimation of bytes sent on wire,
3689          * we add to pkt_len the headers size of all segments
3690          */
3691         if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3692                 unsigned int hdr_len;
3693                 u16 gso_segs = shinfo->gso_segs;
3694
3695                 /* mac layer + network layer */
3696                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3697
3698                 /* + transport layer */
3699                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3700                         const struct tcphdr *th;
3701                         struct tcphdr _tcphdr;
3702
3703                         th = skb_header_pointer(skb, skb_transport_offset(skb),
3704                                                 sizeof(_tcphdr), &_tcphdr);
3705                         if (likely(th))
3706                                 hdr_len += __tcp_hdrlen(th);
3707                 } else {
3708                         struct udphdr _udphdr;
3709
3710                         if (skb_header_pointer(skb, skb_transport_offset(skb),
3711                                                sizeof(_udphdr), &_udphdr))
3712                                 hdr_len += sizeof(struct udphdr);
3713                 }
3714
3715                 if (shinfo->gso_type & SKB_GSO_DODGY)
3716                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3717                                                 shinfo->gso_size);
3718
3719                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3720         }
3721 }
3722
3723 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3724                                  struct net_device *dev,
3725                                  struct netdev_queue *txq)
3726 {
3727         spinlock_t *root_lock = qdisc_lock(q);
3728         struct sk_buff *to_free = NULL;
3729         bool contended;
3730         int rc;
3731
3732         qdisc_calculate_pkt_len(skb, q);
3733
3734         if (q->flags & TCQ_F_NOLOCK) {
3735                 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3736                 qdisc_run(q);
3737
3738                 if (unlikely(to_free))
3739                         kfree_skb_list(to_free);
3740                 return rc;
3741         }
3742
3743         /*
3744          * Heuristic to force contended enqueues to serialize on a
3745          * separate lock before trying to get qdisc main lock.
3746          * This permits qdisc->running owner to get the lock more
3747          * often and dequeue packets faster.
3748          */
3749         contended = qdisc_is_running(q);
3750         if (unlikely(contended))
3751                 spin_lock(&q->busylock);
3752
3753         spin_lock(root_lock);
3754         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3755                 __qdisc_drop(skb, &to_free);
3756                 rc = NET_XMIT_DROP;
3757         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3758                    qdisc_run_begin(q)) {
3759                 /*
3760                  * This is a work-conserving queue; there are no old skbs
3761                  * waiting to be sent out; and the qdisc is not running -
3762                  * xmit the skb directly.
3763                  */
3764
3765                 qdisc_bstats_update(q, skb);
3766
3767                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3768                         if (unlikely(contended)) {
3769                                 spin_unlock(&q->busylock);
3770                                 contended = false;
3771                         }
3772                         __qdisc_run(q);
3773                 }
3774
3775                 qdisc_run_end(q);
3776                 rc = NET_XMIT_SUCCESS;
3777         } else {
3778                 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3779                 if (qdisc_run_begin(q)) {
3780                         if (unlikely(contended)) {
3781                                 spin_unlock(&q->busylock);
3782                                 contended = false;
3783                         }
3784                         __qdisc_run(q);
3785                         qdisc_run_end(q);
3786                 }
3787         }
3788         spin_unlock(root_lock);
3789         if (unlikely(to_free))
3790                 kfree_skb_list(to_free);
3791         if (unlikely(contended))
3792                 spin_unlock(&q->busylock);
3793         return rc;
3794 }
3795
3796 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3797 static void skb_update_prio(struct sk_buff *skb)
3798 {
3799         const struct netprio_map *map;
3800         const struct sock *sk;
3801         unsigned int prioidx;
3802
3803         if (skb->priority)
3804                 return;
3805         map = rcu_dereference_bh(skb->dev->priomap);
3806         if (!map)
3807                 return;
3808         sk = skb_to_full_sk(skb);
3809         if (!sk)
3810                 return;
3811
3812         prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3813
3814         if (prioidx < map->priomap_len)
3815                 skb->priority = map->priomap[prioidx];
3816 }
3817 #else
3818 #define skb_update_prio(skb)
3819 #endif
3820
3821 /**
3822  *      dev_loopback_xmit - loop back @skb
3823  *      @net: network namespace this loopback is happening in
3824  *      @sk:  sk needed to be a netfilter okfn
3825  *      @skb: buffer to transmit
3826  */
3827 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3828 {
3829         skb_reset_mac_header(skb);
3830         __skb_pull(skb, skb_network_offset(skb));
3831         skb->pkt_type = PACKET_LOOPBACK;
3832         skb->ip_summed = CHECKSUM_UNNECESSARY;
3833         WARN_ON(!skb_dst(skb));
3834         skb_dst_force(skb);
3835         netif_rx_ni(skb);
3836         return 0;
3837 }
3838 EXPORT_SYMBOL(dev_loopback_xmit);
3839
3840 #ifdef CONFIG_NET_EGRESS
3841 static struct sk_buff *
3842 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3843 {
3844         struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3845         struct tcf_result cl_res;
3846
3847         if (!miniq)
3848                 return skb;
3849
3850         /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3851         mini_qdisc_bstats_cpu_update(miniq, skb);
3852
3853         switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3854         case TC_ACT_OK:
3855         case TC_ACT_RECLASSIFY:
3856                 skb->tc_index = TC_H_MIN(cl_res.classid);
3857                 break;
3858         case TC_ACT_SHOT:
3859                 mini_qdisc_qstats_cpu_drop(miniq);
3860                 *ret = NET_XMIT_DROP;
3861                 kfree_skb(skb);
3862                 return NULL;
3863         case TC_ACT_STOLEN:
3864         case TC_ACT_QUEUED:
3865         case TC_ACT_TRAP:
3866                 *ret = NET_XMIT_SUCCESS;
3867                 consume_skb(skb);
3868                 return NULL;
3869         case TC_ACT_REDIRECT:
3870                 /* No need to push/pop skb's mac_header here on egress! */
3871                 skb_do_redirect(skb);
3872                 *ret = NET_XMIT_SUCCESS;
3873                 return NULL;
3874         default:
3875                 break;
3876         }
3877
3878         return skb;
3879 }
3880 #endif /* CONFIG_NET_EGRESS */
3881
3882 #ifdef CONFIG_XPS
3883 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3884                                struct xps_dev_maps *dev_maps, unsigned int tci)
3885 {
3886         struct xps_map *map;
3887         int queue_index = -1;
3888
3889         if (dev->num_tc) {
3890                 tci *= dev->num_tc;
3891                 tci += netdev_get_prio_tc_map(dev, skb->priority);
3892         }
3893
3894         map = rcu_dereference(dev_maps->attr_map[tci]);
3895         if (map) {
3896                 if (map->len == 1)
3897                         queue_index = map->queues[0];
3898                 else
3899                         queue_index = map->queues[reciprocal_scale(
3900                                                 skb_get_hash(skb), map->len)];
3901                 if (unlikely(queue_index >= dev->real_num_tx_queues))
3902                         queue_index = -1;
3903         }
3904         return queue_index;
3905 }
3906 #endif
3907
3908 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3909                          struct sk_buff *skb)
3910 {
3911 #ifdef CONFIG_XPS
3912         struct xps_dev_maps *dev_maps;
3913         struct sock *sk = skb->sk;
3914         int queue_index = -1;
3915
3916         if (!static_key_false(&xps_needed))
3917                 return -1;
3918
3919         rcu_read_lock();
3920         if (!static_key_false(&xps_rxqs_needed))
3921                 goto get_cpus_map;
3922
3923         dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3924         if (dev_maps) {
3925                 int tci = sk_rx_queue_get(sk);
3926
3927                 if (tci >= 0 && tci < dev->num_rx_queues)
3928                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3929                                                           tci);
3930         }
3931
3932 get_cpus_map:
3933         if (queue_index < 0) {
3934                 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
3935                 if (dev_maps) {
3936                         unsigned int tci = skb->sender_cpu - 1;
3937
3938                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3939                                                           tci);
3940                 }
3941         }
3942         rcu_read_unlock();
3943
3944         return queue_index;
3945 #else
3946         return -1;
3947 #endif
3948 }
3949
3950 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3951                      struct net_device *sb_dev)
3952 {
3953         return 0;
3954 }
3955 EXPORT_SYMBOL(dev_pick_tx_zero);
3956
3957 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3958                        struct net_device *sb_dev)
3959 {
3960         return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3961 }
3962 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3963
3964 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3965                      struct net_device *sb_dev)
3966 {
3967         struct sock *sk = skb->sk;
3968         int queue_index = sk_tx_queue_get(sk);
3969
3970         sb_dev = sb_dev ? : dev;
3971
3972         if (queue_index < 0 || skb->ooo_okay ||
3973             queue_index >= dev->real_num_tx_queues) {
3974                 int new_index = get_xps_queue(dev, sb_dev, skb);
3975
3976                 if (new_index < 0)
3977                         new_index = skb_tx_hash(dev, sb_dev, skb);
3978
3979                 if (queue_index != new_index && sk &&
3980                     sk_fullsock(sk) &&
3981                     rcu_access_pointer(sk->sk_dst_cache))
3982                         sk_tx_queue_set(sk, new_index);
3983
3984                 queue_index = new_index;
3985         }
3986
3987         return queue_index;
3988 }
3989 EXPORT_SYMBOL(netdev_pick_tx);
3990
3991 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
3992                                          struct sk_buff *skb,
3993                                          struct net_device *sb_dev)
3994 {
3995         int queue_index = 0;
3996
3997 #ifdef CONFIG_XPS
3998         u32 sender_cpu = skb->sender_cpu - 1;
3999
4000         if (sender_cpu >= (u32)NR_CPUS)
4001                 skb->sender_cpu = raw_smp_processor_id() + 1;
4002 #endif
4003
4004         if (dev->real_num_tx_queues != 1) {
4005                 const struct net_device_ops *ops = dev->netdev_ops;
4006
4007                 if (ops->ndo_select_queue)
4008                         queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4009                 else
4010                         queue_index = netdev_pick_tx(dev, skb, sb_dev);
4011
4012                 queue_index = netdev_cap_txqueue(dev, queue_index);
4013         }
4014
4015         skb_set_queue_mapping(skb, queue_index);
4016         return netdev_get_tx_queue(dev, queue_index);
4017 }
4018
4019 /**
4020  *      __dev_queue_xmit - transmit a buffer
4021  *      @skb: buffer to transmit
4022  *      @sb_dev: suboordinate device used for L2 forwarding offload
4023  *
4024  *      Queue a buffer for transmission to a network device. The caller must
4025  *      have set the device and priority and built the buffer before calling
4026  *      this function. The function can be called from an interrupt.
4027  *
4028  *      A negative errno code is returned on a failure. A success does not
4029  *      guarantee the frame will be transmitted as it may be dropped due
4030  *      to congestion or traffic shaping.
4031  *
4032  * -----------------------------------------------------------------------------------
4033  *      I notice this method can also return errors from the queue disciplines,
4034  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
4035  *      be positive.
4036  *
4037  *      Regardless of the return value, the skb is consumed, so it is currently
4038  *      difficult to retry a send to this method.  (You can bump the ref count
4039  *      before sending to hold a reference for retry if you are careful.)
4040  *
4041  *      When calling this method, interrupts MUST be enabled.  This is because
4042  *      the BH enable code must have IRQs enabled so that it will not deadlock.
4043  *          --BLG
4044  */
4045 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4046 {
4047         struct net_device *dev = skb->dev;
4048         struct netdev_queue *txq;
4049         struct Qdisc *q;
4050         int rc = -ENOMEM;
4051         bool again = false;
4052
4053         skb_reset_mac_header(skb);
4054
4055         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4056                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
4057
4058         /* Disable soft irqs for various locks below. Also
4059          * stops preemption for RCU.
4060          */
4061         rcu_read_lock_bh();
4062
4063         skb_update_prio(skb);
4064
4065         qdisc_pkt_len_init(skb);
4066 #ifdef CONFIG_NET_CLS_ACT
4067         skb->tc_at_ingress = 0;
4068 # ifdef CONFIG_NET_EGRESS
4069         if (static_branch_unlikely(&egress_needed_key)) {
4070                 skb = sch_handle_egress(skb, &rc, dev);
4071                 if (!skb)
4072                         goto out;
4073         }
4074 # endif
4075 #endif
4076         /* If device/qdisc don't need skb->dst, release it right now while
4077          * its hot in this cpu cache.
4078          */
4079         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4080                 skb_dst_drop(skb);
4081         else
4082                 skb_dst_force(skb);
4083
4084         txq = netdev_core_pick_tx(dev, skb, sb_dev);
4085         q = rcu_dereference_bh(txq->qdisc);
4086
4087         trace_net_dev_queue(skb);
4088         if (q->enqueue) {
4089                 rc = __dev_xmit_skb(skb, q, dev, txq);
4090                 goto out;
4091         }
4092
4093         /* The device has no queue. Common case for software devices:
4094          * loopback, all the sorts of tunnels...
4095
4096          * Really, it is unlikely that netif_tx_lock protection is necessary
4097          * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4098          * counters.)
4099          * However, it is possible, that they rely on protection
4100          * made by us here.
4101
4102          * Check this and shot the lock. It is not prone from deadlocks.
4103          *Either shot noqueue qdisc, it is even simpler 8)
4104          */
4105         if (dev->flags & IFF_UP) {
4106                 int cpu = smp_processor_id(); /* ok because BHs are off */
4107
4108                 if (txq->xmit_lock_owner != cpu) {
4109                         if (dev_xmit_recursion())
4110                                 goto recursion_alert;
4111
4112                         skb = validate_xmit_skb(skb, dev, &again);
4113                         if (!skb)
4114                                 goto out;
4115
4116                         HARD_TX_LOCK(dev, txq, cpu);
4117
4118                         if (!netif_xmit_stopped(txq)) {
4119                                 dev_xmit_recursion_inc();
4120                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4121                                 dev_xmit_recursion_dec();
4122                                 if (dev_xmit_complete(rc)) {
4123                                         HARD_TX_UNLOCK(dev, txq);
4124                                         goto out;
4125                                 }
4126                         }
4127                         HARD_TX_UNLOCK(dev, txq);
4128                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4129                                              dev->name);
4130                 } else {
4131                         /* Recursion is detected! It is possible,
4132                          * unfortunately
4133                          */
4134 recursion_alert:
4135                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4136                                              dev->name);
4137                 }
4138         }
4139
4140         rc = -ENETDOWN;
4141         rcu_read_unlock_bh();
4142
4143         atomic_long_inc(&dev->tx_dropped);
4144         kfree_skb_list(skb);
4145         return rc;
4146 out:
4147         rcu_read_unlock_bh();
4148         return rc;
4149 }
4150
4151 int dev_queue_xmit(struct sk_buff *skb)
4152 {
4153         return __dev_queue_xmit(skb, NULL);
4154 }
4155 EXPORT_SYMBOL(dev_queue_xmit);
4156
4157 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4158 {
4159         return __dev_queue_xmit(skb, sb_dev);
4160 }
4161 EXPORT_SYMBOL(dev_queue_xmit_accel);
4162
4163 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4164 {
4165         struct net_device *dev = skb->dev;
4166         struct sk_buff *orig_skb = skb;
4167         struct netdev_queue *txq;
4168         int ret = NETDEV_TX_BUSY;
4169         bool again = false;
4170
4171         if (unlikely(!netif_running(dev) ||
4172                      !netif_carrier_ok(dev)))
4173                 goto drop;
4174
4175         skb = validate_xmit_skb_list(skb, dev, &again);
4176         if (skb != orig_skb)
4177                 goto drop;
4178
4179         skb_set_queue_mapping(skb, queue_id);
4180         txq = skb_get_tx_queue(dev, skb);
4181
4182         local_bh_disable();
4183
4184         HARD_TX_LOCK(dev, txq, smp_processor_id());
4185         if (!netif_xmit_frozen_or_drv_stopped(txq))
4186                 ret = netdev_start_xmit(skb, dev, txq, false);
4187         HARD_TX_UNLOCK(dev, txq);
4188
4189         local_bh_enable();
4190
4191         if (!dev_xmit_complete(ret))
4192                 kfree_skb(skb);
4193
4194         return ret;
4195 drop:
4196         atomic_long_inc(&dev->tx_dropped);
4197         kfree_skb_list(skb);
4198         return NET_XMIT_DROP;
4199 }
4200 EXPORT_SYMBOL(dev_direct_xmit);
4201
4202 /*************************************************************************
4203  *                      Receiver routines
4204  *************************************************************************/
4205
4206 int netdev_max_backlog __read_mostly = 1000;
4207 EXPORT_SYMBOL(netdev_max_backlog);
4208
4209 int netdev_tstamp_prequeue __read_mostly = 1;
4210 int netdev_budget __read_mostly = 300;
4211 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4212 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4213 int weight_p __read_mostly = 64;           /* old backlog weight */
4214 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4215 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4216 int dev_rx_weight __read_mostly = 64;
4217 int dev_tx_weight __read_mostly = 64;
4218 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4219 int gro_normal_batch __read_mostly = 8;
4220
4221 /* Called with irq disabled */
4222 static inline void ____napi_schedule(struct softnet_data *sd,
4223                                      struct napi_struct *napi)
4224 {
4225         list_add_tail(&napi->poll_list, &sd->poll_list);
4226         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4227 }
4228
4229 #ifdef CONFIG_RPS
4230
4231 /* One global table that all flow-based protocols share. */
4232 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4233 EXPORT_SYMBOL(rps_sock_flow_table);
4234 u32 rps_cpu_mask __read_mostly;
4235 EXPORT_SYMBOL(rps_cpu_mask);
4236
4237 struct static_key_false rps_needed __read_mostly;
4238 EXPORT_SYMBOL(rps_needed);
4239 struct static_key_false rfs_needed __read_mostly;
4240 EXPORT_SYMBOL(rfs_needed);
4241
4242 static struct rps_dev_flow *
4243 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4244             struct rps_dev_flow *rflow, u16 next_cpu)
4245 {
4246         if (next_cpu < nr_cpu_ids) {
4247 #ifdef CONFIG_RFS_ACCEL
4248                 struct netdev_rx_queue *rxqueue;
4249                 struct rps_dev_flow_table *flow_table;
4250                 struct rps_dev_flow *old_rflow;
4251                 u32 flow_id;
4252                 u16 rxq_index;
4253                 int rc;
4254
4255                 /* Should we steer this flow to a different hardware queue? */
4256                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4257                     !(dev->features & NETIF_F_NTUPLE))
4258                         goto out;
4259                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4260                 if (rxq_index == skb_get_rx_queue(skb))
4261                         goto out;
4262
4263                 rxqueue = dev->_rx + rxq_index;
4264                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4265                 if (!flow_table)
4266                         goto out;
4267                 flow_id = skb_get_hash(skb) & flow_table->mask;
4268                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4269                                                         rxq_index, flow_id);
4270                 if (rc < 0)
4271                         goto out;
4272                 old_rflow = rflow;
4273                 rflow = &flow_table->flows[flow_id];
4274                 rflow->filter = rc;
4275                 if (old_rflow->filter == rflow->filter)
4276                         old_rflow->filter = RPS_NO_FILTER;
4277         out:
4278 #endif
4279                 rflow->last_qtail =
4280                         per_cpu(softnet_data, next_cpu).input_queue_head;
4281         }
4282
4283         rflow->cpu = next_cpu;
4284         return rflow;
4285 }
4286
4287 /*
4288  * get_rps_cpu is called from netif_receive_skb and returns the target
4289  * CPU from the RPS map of the receiving queue for a given skb.
4290  * rcu_read_lock must be held on entry.
4291  */
4292 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4293                        struct rps_dev_flow **rflowp)
4294 {
4295         const struct rps_sock_flow_table *sock_flow_table;
4296         struct netdev_rx_queue *rxqueue = dev->_rx;
4297         struct rps_dev_flow_table *flow_table;
4298         struct rps_map *map;
4299         int cpu = -1;
4300         u32 tcpu;
4301         u32 hash;
4302
4303         if (skb_rx_queue_recorded(skb)) {
4304                 u16 index = skb_get_rx_queue(skb);
4305
4306                 if (unlikely(index >= dev->real_num_rx_queues)) {
4307                         WARN_ONCE(dev->real_num_rx_queues > 1,
4308                                   "%s received packet on queue %u, but number "
4309                                   "of RX queues is %u\n",
4310                                   dev->name, index, dev->real_num_rx_queues);
4311                         goto done;
4312                 }
4313                 rxqueue += index;
4314         }
4315
4316         /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4317
4318         flow_table = rcu_dereference(rxqueue->rps_flow_table);
4319         map = rcu_dereference(rxqueue->rps_map);
4320         if (!flow_table && !map)
4321                 goto done;
4322
4323         skb_reset_network_header(skb);
4324         hash = skb_get_hash(skb);
4325         if (!hash)
4326                 goto done;
4327
4328         sock_flow_table = rcu_dereference(rps_sock_flow_table);
4329         if (flow_table && sock_flow_table) {
4330                 struct rps_dev_flow *rflow;
4331                 u32 next_cpu;
4332                 u32 ident;
4333
4334                 /* First check into global flow table if there is a match */
4335                 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4336                 if ((ident ^ hash) & ~rps_cpu_mask)
4337                         goto try_rps;
4338
4339                 next_cpu = ident & rps_cpu_mask;
4340
4341                 /* OK, now we know there is a match,
4342                  * we can look at the local (per receive queue) flow table
4343                  */
4344                 rflow = &flow_table->flows[hash & flow_table->mask];
4345                 tcpu = rflow->cpu;
4346
4347                 /*
4348                  * If the desired CPU (where last recvmsg was done) is
4349                  * different from current CPU (one in the rx-queue flow
4350                  * table entry), switch if one of the following holds:
4351                  *   - Current CPU is unset (>= nr_cpu_ids).
4352                  *   - Current CPU is offline.
4353                  *   - The current CPU's queue tail has advanced beyond the
4354                  *     last packet that was enqueued using this table entry.
4355                  *     This guarantees that all previous packets for the flow
4356                  *     have been dequeued, thus preserving in order delivery.
4357                  */
4358                 if (unlikely(tcpu != next_cpu) &&
4359                     (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4360                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4361                       rflow->last_qtail)) >= 0)) {
4362                         tcpu = next_cpu;
4363                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4364                 }
4365
4366                 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4367                         *rflowp = rflow;
4368                         cpu = tcpu;
4369                         goto done;
4370                 }
4371         }
4372
4373 try_rps:
4374
4375         if (map) {
4376                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4377                 if (cpu_online(tcpu)) {
4378                         cpu = tcpu;
4379                         goto done;
4380                 }
4381         }
4382
4383 done:
4384         return cpu;
4385 }
4386
4387 #ifdef CONFIG_RFS_ACCEL
4388
4389 /**
4390  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4391  * @dev: Device on which the filter was set
4392  * @rxq_index: RX queue index
4393  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4394  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4395  *
4396  * Drivers that implement ndo_rx_flow_steer() should periodically call
4397  * this function for each installed filter and remove the filters for
4398  * which it returns %true.
4399  */
4400 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4401                          u32 flow_id, u16 filter_id)
4402 {
4403         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4404         struct rps_dev_flow_table *flow_table;
4405         struct rps_dev_flow *rflow;
4406         bool expire = true;
4407         unsigned int cpu;
4408
4409         rcu_read_lock();
4410         flow_table = rcu_dereference(rxqueue->rps_flow_table);
4411         if (flow_table && flow_id <= flow_table->mask) {
4412                 rflow = &flow_table->flows[flow_id];
4413                 cpu = READ_ONCE(rflow->cpu);
4414                 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4415                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4416                            rflow->last_qtail) <
4417                      (int)(10 * flow_table->mask)))
4418                         expire = false;
4419         }
4420         rcu_read_unlock();
4421         return expire;
4422 }
4423 EXPORT_SYMBOL(rps_may_expire_flow);
4424
4425 #endif /* CONFIG_RFS_ACCEL */
4426
4427 /* Called from hardirq (IPI) context */
4428 static void rps_trigger_softirq(void *data)
4429 {
4430         struct softnet_data *sd = data;
4431
4432         ____napi_schedule(sd, &sd->backlog);
4433         sd->received_rps++;
4434 }
4435
4436 #endif /* CONFIG_RPS */
4437
4438 /*
4439  * Check if this softnet_data structure is another cpu one
4440  * If yes, queue it to our IPI list and return 1
4441  * If no, return 0
4442  */
4443 static int rps_ipi_queued(struct softnet_data *sd)
4444 {
4445 #ifdef CONFIG_RPS
4446         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4447
4448         if (sd != mysd) {
4449                 sd->rps_ipi_next = mysd->rps_ipi_list;
4450                 mysd->rps_ipi_list = sd;
4451
4452                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4453                 return 1;
4454         }
4455 #endif /* CONFIG_RPS */
4456         return 0;
4457 }
4458
4459 #ifdef CONFIG_NET_FLOW_LIMIT
4460 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4461 #endif
4462
4463 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4464 {
4465 #ifdef CONFIG_NET_FLOW_LIMIT
4466         struct sd_flow_limit *fl;
4467         struct softnet_data *sd;
4468         unsigned int old_flow, new_flow;
4469
4470         if (qlen < (netdev_max_backlog >> 1))
4471                 return false;
4472
4473         sd = this_cpu_ptr(&softnet_data);
4474
4475         rcu_read_lock();
4476         fl = rcu_dereference(sd->flow_limit);
4477         if (fl) {
4478                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4479                 old_flow = fl->history[fl->history_head];
4480                 fl->history[fl->history_head] = new_flow;
4481
4482                 fl->history_head++;
4483                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4484
4485                 if (likely(fl->buckets[old_flow]))
4486                         fl->buckets[old_flow]--;
4487
4488                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4489                         fl->count++;
4490                         rcu_read_unlock();
4491                         return true;
4492                 }
4493         }
4494         rcu_read_unlock();
4495 #endif
4496         return false;
4497 }
4498
4499 /*
4500  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4501  * queue (may be a remote CPU queue).
4502  */
4503 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4504                               unsigned int *qtail)
4505 {
4506         struct softnet_data *sd;
4507         unsigned long flags;
4508         unsigned int qlen;
4509
4510         sd = &per_cpu(softnet_data, cpu);
4511
4512         local_irq_save(flags);
4513
4514         rps_lock(sd);
4515         if (!netif_running(skb->dev))
4516                 goto drop;
4517         qlen = skb_queue_len(&sd->input_pkt_queue);
4518         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4519                 if (qlen) {
4520 enqueue:
4521                         __skb_queue_tail(&sd->input_pkt_queue, skb);
4522                         input_queue_tail_incr_save(sd, qtail);
4523                         rps_unlock(sd);
4524                         local_irq_restore(flags);
4525                         return NET_RX_SUCCESS;
4526                 }
4527
4528                 /* Schedule NAPI for backlog device
4529                  * We can use non atomic operation since we own the queue lock
4530                  */
4531                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4532                         if (!rps_ipi_queued(sd))
4533                                 ____napi_schedule(sd, &sd->backlog);
4534                 }
4535                 goto enqueue;
4536         }
4537
4538 drop:
4539         sd->dropped++;
4540         rps_unlock(sd);
4541
4542         local_irq_restore(flags);
4543
4544         atomic_long_inc(&skb->dev->rx_dropped);
4545         kfree_skb(skb);
4546         return NET_RX_DROP;
4547 }
4548
4549 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4550 {
4551         struct net_device *dev = skb->dev;
4552         struct netdev_rx_queue *rxqueue;
4553
4554         rxqueue = dev->_rx;
4555
4556         if (skb_rx_queue_recorded(skb)) {
4557                 u16 index = skb_get_rx_queue(skb);
4558
4559                 if (unlikely(index >= dev->real_num_rx_queues)) {
4560                         WARN_ONCE(dev->real_num_rx_queues > 1,
4561                                   "%s received packet on queue %u, but number "
4562                                   "of RX queues is %u\n",
4563                                   dev->name, index, dev->real_num_rx_queues);
4564
4565                         return rxqueue; /* Return first rxqueue */
4566                 }
4567                 rxqueue += index;
4568         }
4569         return rxqueue;
4570 }
4571
4572 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4573                                      struct xdp_buff *xdp,
4574                                      struct bpf_prog *xdp_prog)
4575 {
4576         struct netdev_rx_queue *rxqueue;
4577         void *orig_data, *orig_data_end;
4578         u32 metalen, act = XDP_DROP;
4579         __be16 orig_eth_type;
4580         struct ethhdr *eth;
4581         bool orig_bcast;
4582         int hlen, off;
4583         u32 mac_len;
4584
4585         /* Reinjected packets coming from act_mirred or similar should
4586          * not get XDP generic processing.
4587          */
4588         if (skb_is_redirected(skb))
4589                 return XDP_PASS;
4590
4591         /* XDP packets must be linear and must have sufficient headroom
4592          * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4593          * native XDP provides, thus we need to do it here as well.
4594          */
4595         if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4596             skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4597                 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4598                 int troom = skb->tail + skb->data_len - skb->end;
4599
4600                 /* In case we have to go down the path and also linearize,
4601                  * then lets do the pskb_expand_head() work just once here.
4602                  */
4603                 if (pskb_expand_head(skb,
4604                                      hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4605                                      troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4606                         goto do_drop;
4607                 if (skb_linearize(skb))
4608                         goto do_drop;
4609         }
4610
4611         /* The XDP program wants to see the packet starting at the MAC
4612          * header.
4613          */
4614         mac_len = skb->data - skb_mac_header(skb);
4615         hlen = skb_headlen(skb) + mac_len;
4616         xdp->data = skb->data - mac_len;
4617         xdp->data_meta = xdp->data;
4618         xdp->data_end = xdp->data + hlen;
4619         xdp->data_hard_start = skb->data - skb_headroom(skb);
4620
4621         /* SKB "head" area always have tailroom for skb_shared_info */
4622         xdp->frame_sz  = (void *)skb_end_pointer(skb) - xdp->data_hard_start;
4623         xdp->frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4624
4625         orig_data_end = xdp->data_end;
4626         orig_data = xdp->data;
4627         eth = (struct ethhdr *)xdp->data;
4628         orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4629         orig_eth_type = eth->h_proto;
4630
4631         rxqueue = netif_get_rxqueue(skb);
4632         xdp->rxq = &rxqueue->xdp_rxq;
4633
4634         act = bpf_prog_run_xdp(xdp_prog, xdp);
4635
4636         /* check if bpf_xdp_adjust_head was used */
4637         off = xdp->data - orig_data;
4638         if (off) {
4639                 if (off > 0)
4640                         __skb_pull(skb, off);
4641                 else if (off < 0)
4642                         __skb_push(skb, -off);
4643
4644                 skb->mac_header += off;
4645                 skb_reset_network_header(skb);
4646         }
4647
4648         /* check if bpf_xdp_adjust_tail was used */
4649         off = xdp->data_end - orig_data_end;
4650         if (off != 0) {
4651                 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4652                 skb->len += off; /* positive on grow, negative on shrink */
4653         }
4654
4655         /* check if XDP changed eth hdr such SKB needs update */
4656         eth = (struct ethhdr *)xdp->data;
4657         if ((orig_eth_type != eth->h_proto) ||
4658             (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4659                 __skb_push(skb, ETH_HLEN);
4660                 skb->protocol = eth_type_trans(skb, skb->dev);
4661         }
4662
4663         switch (act) {
4664         case XDP_REDIRECT:
4665         case XDP_TX:
4666                 __skb_push(skb, mac_len);
4667                 break;
4668         case XDP_PASS:
4669                 metalen = xdp->data - xdp->data_meta;
4670                 if (metalen)
4671                         skb_metadata_set(skb, metalen);
4672                 break;
4673         default:
4674                 bpf_warn_invalid_xdp_action(act);
4675                 /* fall through */
4676         case XDP_ABORTED:
4677                 trace_xdp_exception(skb->dev, xdp_prog, act);
4678                 /* fall through */
4679         case XDP_DROP:
4680         do_drop:
4681                 kfree_skb(skb);
4682                 break;
4683         }
4684
4685         return act;
4686 }
4687
4688 /* When doing generic XDP we have to bypass the qdisc layer and the
4689  * network taps in order to match in-driver-XDP behavior.
4690  */
4691 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4692 {
4693         struct net_device *dev = skb->dev;
4694         struct netdev_queue *txq;
4695         bool free_skb = true;
4696         int cpu, rc;
4697
4698         txq = netdev_core_pick_tx(dev, skb, NULL);
4699         cpu = smp_processor_id();
4700         HARD_TX_LOCK(dev, txq, cpu);
4701         if (!netif_xmit_stopped(txq)) {
4702                 rc = netdev_start_xmit(skb, dev, txq, 0);
4703                 if (dev_xmit_complete(rc))
4704                         free_skb = false;
4705         }
4706         HARD_TX_UNLOCK(dev, txq);
4707         if (free_skb) {
4708                 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4709                 kfree_skb(skb);
4710         }
4711 }
4712
4713 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4714
4715 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4716 {
4717         if (xdp_prog) {
4718                 struct xdp_buff xdp;
4719                 u32 act;
4720                 int err;
4721
4722                 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4723                 if (act != XDP_PASS) {
4724                         switch (act) {
4725                         case XDP_REDIRECT:
4726                                 err = xdp_do_generic_redirect(skb->dev, skb,
4727                                                               &xdp, xdp_prog);
4728                                 if (err)
4729                                         goto out_redir;
4730                                 break;
4731                         case XDP_TX:
4732                                 generic_xdp_tx(skb, xdp_prog);
4733                                 break;
4734                         }
4735                         return XDP_DROP;
4736                 }
4737         }
4738         return XDP_PASS;
4739 out_redir:
4740         kfree_skb(skb);
4741         return XDP_DROP;
4742 }
4743 EXPORT_SYMBOL_GPL(do_xdp_generic);
4744
4745 static int netif_rx_internal(struct sk_buff *skb)
4746 {
4747         int ret;
4748
4749         net_timestamp_check(netdev_tstamp_prequeue, skb);
4750
4751         trace_netif_rx(skb);
4752
4753 #ifdef CONFIG_RPS
4754         if (static_branch_unlikely(&rps_needed)) {
4755                 struct rps_dev_flow voidflow, *rflow = &voidflow;
4756                 int cpu;
4757
4758                 preempt_disable();
4759                 rcu_read_lock();
4760
4761                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
4762                 if (cpu < 0)
4763                         cpu = smp_processor_id();
4764
4765                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4766
4767                 rcu_read_unlock();
4768                 preempt_enable();
4769         } else
4770 #endif
4771         {
4772                 unsigned int qtail;
4773
4774                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4775                 put_cpu();
4776         }
4777         return ret;
4778 }
4779
4780 /**
4781  *      netif_rx        -       post buffer to the network code
4782  *      @skb: buffer to post
4783  *
4784  *      This function receives a packet from a device driver and queues it for
4785  *      the upper (protocol) levels to process.  It always succeeds. The buffer
4786  *      may be dropped during processing for congestion control or by the
4787  *      protocol layers.
4788  *
4789  *      return values:
4790  *      NET_RX_SUCCESS  (no congestion)
4791  *      NET_RX_DROP     (packet was dropped)
4792  *
4793  */
4794
4795 int netif_rx(struct sk_buff *skb)
4796 {
4797         int ret;
4798
4799         trace_netif_rx_entry(skb);
4800
4801         ret = netif_rx_internal(skb);
4802         trace_netif_rx_exit(ret);
4803
4804         return ret;
4805 }
4806 EXPORT_SYMBOL(netif_rx);
4807
4808 int netif_rx_ni(struct sk_buff *skb)
4809 {
4810         int err;
4811
4812         trace_netif_rx_ni_entry(skb);
4813
4814         preempt_disable();
4815         err = netif_rx_internal(skb);
4816         if (local_softirq_pending())
4817                 do_softirq();
4818         preempt_enable();
4819         trace_netif_rx_ni_exit(err);
4820
4821         return err;
4822 }
4823 EXPORT_SYMBOL(netif_rx_ni);
4824
4825 static __latent_entropy void net_tx_action(struct softirq_action *h)
4826 {
4827         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4828
4829         if (sd->completion_queue) {
4830                 struct sk_buff *clist;
4831
4832                 local_irq_disable();
4833                 clist = sd->completion_queue;
4834                 sd->completion_queue = NULL;
4835                 local_irq_enable();
4836
4837                 while (clist) {
4838                         struct sk_buff *skb = clist;
4839
4840                         clist = clist->next;
4841
4842                         WARN_ON(refcount_read(&skb->users));
4843                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4844                                 trace_consume_skb(skb);
4845                         else
4846                                 trace_kfree_skb(skb, net_tx_action);
4847
4848                         if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4849                                 __kfree_skb(skb);
4850                         else
4851                                 __kfree_skb_defer(skb);
4852                 }
4853
4854                 __kfree_skb_flush();
4855         }
4856
4857         if (sd->output_queue) {
4858                 struct Qdisc *head;
4859
4860                 local_irq_disable();
4861                 head = sd->output_queue;
4862                 sd->output_queue = NULL;
4863                 sd->output_queue_tailp = &sd->output_queue;
4864                 local_irq_enable();
4865
4866                 while (head) {
4867                         struct Qdisc *q = head;
4868                         spinlock_t *root_lock = NULL;
4869
4870                         head = head->next_sched;
4871
4872                         if (!(q->flags & TCQ_F_NOLOCK)) {
4873                                 root_lock = qdisc_lock(q);
4874                                 spin_lock(root_lock);
4875                         }
4876                         /* We need to make sure head->next_sched is read
4877                          * before clearing __QDISC_STATE_SCHED
4878                          */
4879                         smp_mb__before_atomic();
4880                         clear_bit(__QDISC_STATE_SCHED, &q->state);
4881                         qdisc_run(q);
4882                         if (root_lock)
4883                                 spin_unlock(root_lock);
4884                 }
4885         }
4886
4887         xfrm_dev_backlog(sd);
4888 }
4889
4890 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4891 /* This hook is defined here for ATM LANE */
4892 int (*br_fdb_test_addr_hook)(struct net_device *dev,
4893                              unsigned char *addr) __read_mostly;
4894 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4895 #endif
4896
4897 static inline struct sk_buff *
4898 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4899                    struct net_device *orig_dev)
4900 {
4901 #ifdef CONFIG_NET_CLS_ACT
4902         struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4903         struct tcf_result cl_res;
4904
4905         /* If there's at least one ingress present somewhere (so
4906          * we get here via enabled static key), remaining devices
4907          * that are not configured with an ingress qdisc will bail
4908          * out here.
4909          */
4910         if (!miniq)
4911                 return skb;
4912
4913         if (*pt_prev) {
4914                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4915                 *pt_prev = NULL;
4916         }
4917
4918         qdisc_skb_cb(skb)->pkt_len = skb->len;
4919         skb->tc_at_ingress = 1;
4920         mini_qdisc_bstats_cpu_update(miniq, skb);
4921
4922         switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
4923                                      &cl_res, false)) {
4924         case TC_ACT_OK:
4925         case TC_ACT_RECLASSIFY:
4926                 skb->tc_index = TC_H_MIN(cl_res.classid);
4927                 break;
4928         case TC_ACT_SHOT:
4929                 mini_qdisc_qstats_cpu_drop(miniq);
4930                 kfree_skb(skb);
4931                 return NULL;
4932         case TC_ACT_STOLEN:
4933         case TC_ACT_QUEUED:
4934         case TC_ACT_TRAP:
4935                 consume_skb(skb);
4936                 return NULL;
4937         case TC_ACT_REDIRECT:
4938                 /* skb_mac_header check was done by cls/act_bpf, so
4939                  * we can safely push the L2 header back before
4940                  * redirecting to another netdev
4941                  */
4942                 __skb_push(skb, skb->mac_len);
4943                 skb_do_redirect(skb);
4944                 return NULL;
4945         case TC_ACT_CONSUMED:
4946                 return NULL;
4947         default:
4948                 break;
4949         }
4950 #endif /* CONFIG_NET_CLS_ACT */
4951         return skb;
4952 }
4953
4954 /**
4955  *      netdev_is_rx_handler_busy - check if receive handler is registered
4956  *      @dev: device to check
4957  *
4958  *      Check if a receive handler is already registered for a given device.
4959  *      Return true if there one.
4960  *
4961  *      The caller must hold the rtnl_mutex.
4962  */
4963 bool netdev_is_rx_handler_busy(struct net_device *dev)
4964 {
4965         ASSERT_RTNL();
4966         return dev && rtnl_dereference(dev->rx_handler);
4967 }
4968 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4969
4970 /**
4971  *      netdev_rx_handler_register - register receive handler
4972  *      @dev: device to register a handler for
4973  *      @rx_handler: receive handler to register
4974  *      @rx_handler_data: data pointer that is used by rx handler
4975  *
4976  *      Register a receive handler for a device. This handler will then be
4977  *      called from __netif_receive_skb. A negative errno code is returned
4978  *      on a failure.
4979  *
4980  *      The caller must hold the rtnl_mutex.
4981  *
4982  *      For a general description of rx_handler, see enum rx_handler_result.
4983  */
4984 int netdev_rx_handler_register(struct net_device *dev,
4985                                rx_handler_func_t *rx_handler,
4986                                void *rx_handler_data)
4987 {
4988         if (netdev_is_rx_handler_busy(dev))
4989                 return -EBUSY;
4990
4991         if (dev->priv_flags & IFF_NO_RX_HANDLER)
4992                 return -EINVAL;
4993
4994         /* Note: rx_handler_data must be set before rx_handler */
4995         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4996         rcu_assign_pointer(dev->rx_handler, rx_handler);
4997
4998         return 0;
4999 }
5000 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5001
5002 /**
5003  *      netdev_rx_handler_unregister - unregister receive handler
5004  *      @dev: device to unregister a handler from
5005  *
5006  *      Unregister a receive handler from a device.
5007  *
5008  *      The caller must hold the rtnl_mutex.
5009  */
5010 void netdev_rx_handler_unregister(struct net_device *dev)
5011 {
5012
5013         ASSERT_RTNL();
5014         RCU_INIT_POINTER(dev->rx_handler, NULL);
5015         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5016          * section has a guarantee to see a non NULL rx_handler_data
5017          * as well.
5018          */
5019         synchronize_net();
5020         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5021 }
5022 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5023
5024 /*
5025  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5026  * the special handling of PFMEMALLOC skbs.
5027  */
5028 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5029 {
5030         switch (skb->protocol) {
5031         case htons(ETH_P_ARP):
5032         case htons(ETH_P_IP):
5033         case htons(ETH_P_IPV6):
5034         case htons(ETH_P_8021Q):
5035         case htons(ETH_P_8021AD):
5036                 return true;
5037         default:
5038                 return false;
5039         }
5040 }
5041
5042 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5043                              int *ret, struct net_device *orig_dev)
5044 {
5045         if (nf_hook_ingress_active(skb)) {
5046                 int ingress_retval;
5047
5048                 if (*pt_prev) {
5049                         *ret = deliver_skb(skb, *pt_prev, orig_dev);
5050                         *pt_prev = NULL;
5051                 }
5052
5053                 rcu_read_lock();
5054                 ingress_retval = nf_hook_ingress(skb);
5055                 rcu_read_unlock();
5056                 return ingress_retval;
5057         }
5058         return 0;
5059 }
5060
5061 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5062                                     struct packet_type **ppt_prev)
5063 {
5064         struct packet_type *ptype, *pt_prev;
5065         rx_handler_func_t *rx_handler;
5066         struct sk_buff *skb = *pskb;
5067         struct net_device *orig_dev;
5068         bool deliver_exact = false;
5069         int ret = NET_RX_DROP;
5070         __be16 type;
5071
5072         net_timestamp_check(!netdev_tstamp_prequeue, skb);
5073
5074         trace_netif_receive_skb(skb);
5075
5076         orig_dev = skb->dev;
5077
5078         skb_reset_network_header(skb);
5079         if (!skb_transport_header_was_set(skb))
5080                 skb_reset_transport_header(skb);
5081         skb_reset_mac_len(skb);
5082
5083         pt_prev = NULL;
5084
5085 another_round:
5086         skb->skb_iif = skb->dev->ifindex;
5087
5088         __this_cpu_inc(softnet_data.processed);
5089
5090         if (static_branch_unlikely(&generic_xdp_needed_key)) {
5091                 int ret2;
5092
5093                 preempt_disable();
5094                 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5095                 preempt_enable();
5096
5097                 if (ret2 != XDP_PASS) {
5098                         ret = NET_RX_DROP;
5099                         goto out;
5100                 }
5101                 skb_reset_mac_len(skb);
5102         }
5103
5104         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
5105             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
5106                 skb = skb_vlan_untag(skb);
5107                 if (unlikely(!skb))
5108                         goto out;
5109         }
5110
5111         if (skb_skip_tc_classify(skb))
5112                 goto skip_classify;
5113
5114         if (pfmemalloc)
5115                 goto skip_taps;
5116
5117         list_for_each_entry_rcu(ptype, &ptype_all, list) {
5118                 if (pt_prev)
5119                         ret = deliver_skb(skb, pt_prev, orig_dev);
5120                 pt_prev = ptype;
5121         }
5122
5123         list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5124                 if (pt_prev)
5125                         ret = deliver_skb(skb, pt_prev, orig_dev);
5126                 pt_prev = ptype;
5127         }
5128
5129 skip_taps:
5130 #ifdef CONFIG_NET_INGRESS
5131         if (static_branch_unlikely(&ingress_needed_key)) {
5132                 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
5133                 if (!skb)
5134                         goto out;
5135
5136                 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5137                         goto out;
5138         }
5139 #endif
5140         skb_reset_redirect(skb);
5141 skip_classify:
5142         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5143                 goto drop;
5144
5145         if (skb_vlan_tag_present(skb)) {
5146                 if (pt_prev) {
5147                         ret = deliver_skb(skb, pt_prev, orig_dev);
5148                         pt_prev = NULL;
5149                 }
5150                 if (vlan_do_receive(&skb))
5151                         goto another_round;
5152                 else if (unlikely(!skb))
5153                         goto out;
5154         }
5155
5156         rx_handler = rcu_dereference(skb->dev->rx_handler);
5157         if (rx_handler) {
5158                 if (pt_prev) {
5159                         ret = deliver_skb(skb, pt_prev, orig_dev);
5160                         pt_prev = NULL;
5161                 }
5162                 switch (rx_handler(&skb)) {
5163                 case RX_HANDLER_CONSUMED:
5164                         ret = NET_RX_SUCCESS;
5165                         goto out;
5166                 case RX_HANDLER_ANOTHER:
5167                         goto another_round;
5168                 case RX_HANDLER_EXACT:
5169                         deliver_exact = true;
5170                 case RX_HANDLER_PASS:
5171                         break;
5172                 default:
5173                         BUG();
5174                 }
5175         }
5176
5177         if (unlikely(skb_vlan_tag_present(skb))) {
5178 check_vlan_id:
5179                 if (skb_vlan_tag_get_id(skb)) {
5180                         /* Vlan id is non 0 and vlan_do_receive() above couldn't
5181                          * find vlan device.
5182                          */
5183                         skb->pkt_type = PACKET_OTHERHOST;
5184                 } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
5185                            skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
5186                         /* Outer header is 802.1P with vlan 0, inner header is
5187                          * 802.1Q or 802.1AD and vlan_do_receive() above could
5188                          * not find vlan dev for vlan id 0.
5189                          */
5190                         __vlan_hwaccel_clear_tag(skb);
5191                         skb = skb_vlan_untag(skb);
5192                         if (unlikely(!skb))
5193                                 goto out;
5194                         if (vlan_do_receive(&skb))
5195                                 /* After stripping off 802.1P header with vlan 0
5196                                  * vlan dev is found for inner header.
5197                                  */
5198                                 goto another_round;
5199                         else if (unlikely(!skb))
5200                                 goto out;
5201                         else
5202                                 /* We have stripped outer 802.1P vlan 0 header.
5203                                  * But could not find vlan dev.
5204                                  * check again for vlan id to set OTHERHOST.
5205                                  */
5206                                 goto check_vlan_id;
5207                 }
5208                 /* Note: we might in the future use prio bits
5209                  * and set skb->priority like in vlan_do_receive()
5210                  * For the time being, just ignore Priority Code Point
5211                  */
5212                 __vlan_hwaccel_clear_tag(skb);
5213         }
5214
5215         type = skb->protocol;
5216
5217         /* deliver only exact match when indicated */
5218         if (likely(!deliver_exact)) {
5219                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5220                                        &ptype_base[ntohs(type) &
5221                                                    PTYPE_HASH_MASK]);
5222         }
5223
5224         deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5225                                &orig_dev->ptype_specific);
5226
5227         if (unlikely(skb->dev != orig_dev)) {
5228                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5229                                        &skb->dev->ptype_specific);
5230         }
5231
5232         if (pt_prev) {
5233                 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5234                         goto drop;
5235                 *ppt_prev = pt_prev;
5236         } else {
5237 drop:
5238                 if (!deliver_exact)
5239                         atomic_long_inc(&skb->dev->rx_dropped);
5240                 else
5241                         atomic_long_inc(&skb->dev->rx_nohandler);
5242                 kfree_skb(skb);
5243                 /* Jamal, now you will not able to escape explaining
5244                  * me how you were going to use this. :-)
5245                  */
5246                 ret = NET_RX_DROP;
5247         }
5248
5249 out:
5250         /* The invariant here is that if *ppt_prev is not NULL
5251          * then skb should also be non-NULL.
5252          *
5253          * Apparently *ppt_prev assignment above holds this invariant due to
5254          * skb dereferencing near it.
5255          */
5256         *pskb = skb;
5257         return ret;
5258 }
5259
5260 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5261 {
5262         struct net_device *orig_dev = skb->dev;
5263         struct packet_type *pt_prev = NULL;
5264         int ret;
5265
5266         ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5267         if (pt_prev)
5268                 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5269                                          skb->dev, pt_prev, orig_dev);
5270         return ret;
5271 }
5272
5273 /**
5274  *      netif_receive_skb_core - special purpose version of netif_receive_skb
5275  *      @skb: buffer to process
5276  *
5277  *      More direct receive version of netif_receive_skb().  It should
5278  *      only be used by callers that have a need to skip RPS and Generic XDP.
5279  *      Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5280  *
5281  *      This function may only be called from softirq context and interrupts
5282  *      should be enabled.
5283  *
5284  *      Return values (usually ignored):
5285  *      NET_RX_SUCCESS: no congestion
5286  *      NET_RX_DROP: packet was dropped
5287  */
5288 int netif_receive_skb_core(struct sk_buff *skb)
5289 {
5290         int ret;
5291
5292         rcu_read_lock();
5293         ret = __netif_receive_skb_one_core(skb, false);
5294         rcu_read_unlock();
5295
5296         return ret;
5297 }
5298 EXPORT_SYMBOL(netif_receive_skb_core);
5299
5300 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5301                                                   struct packet_type *pt_prev,
5302                                                   struct net_device *orig_dev)
5303 {
5304         struct sk_buff *skb, *next;
5305
5306         if (!pt_prev)
5307                 return;
5308         if (list_empty(head))
5309                 return;
5310         if (pt_prev->list_func != NULL)
5311                 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5312                                    ip_list_rcv, head, pt_prev, orig_dev);
5313         else
5314                 list_for_each_entry_safe(skb, next, head, list) {
5315                         skb_list_del_init(skb);
5316                         pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5317                 }
5318 }
5319
5320 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5321 {
5322         /* Fast-path assumptions:
5323          * - There is no RX handler.
5324          * - Only one packet_type matches.
5325          * If either of these fails, we will end up doing some per-packet
5326          * processing in-line, then handling the 'last ptype' for the whole
5327          * sublist.  This can't cause out-of-order delivery to any single ptype,
5328          * because the 'last ptype' must be constant across the sublist, and all
5329          * other ptypes are handled per-packet.
5330          */
5331         /* Current (common) ptype of sublist */
5332         struct packet_type *pt_curr = NULL;
5333         /* Current (common) orig_dev of sublist */
5334         struct net_device *od_curr = NULL;
5335         struct list_head sublist;
5336         struct sk_buff *skb, *next;
5337
5338         INIT_LIST_HEAD(&sublist);
5339         list_for_each_entry_safe(skb, next, head, list) {
5340                 struct net_device *orig_dev = skb->dev;
5341                 struct packet_type *pt_prev = NULL;
5342
5343                 skb_list_del_init(skb);
5344                 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5345                 if (!pt_prev)
5346                         continue;
5347                 if (pt_curr != pt_prev || od_curr != orig_dev) {
5348                         /* dispatch old sublist */
5349                         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5350                         /* start new sublist */
5351                         INIT_LIST_HEAD(&sublist);
5352                         pt_curr = pt_prev;
5353                         od_curr = orig_dev;
5354                 }
5355                 list_add_tail(&skb->list, &sublist);
5356         }
5357
5358         /* dispatch final sublist */
5359         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5360 }
5361
5362 static int __netif_receive_skb(struct sk_buff *skb)
5363 {
5364         int ret;
5365
5366         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5367                 unsigned int noreclaim_flag;
5368
5369                 /*
5370                  * PFMEMALLOC skbs are special, they should
5371                  * - be delivered to SOCK_MEMALLOC sockets only
5372                  * - stay away from userspace
5373                  * - have bounded memory usage
5374                  *
5375                  * Use PF_MEMALLOC as this saves us from propagating the allocation
5376                  * context down to all allocation sites.
5377                  */
5378                 noreclaim_flag = memalloc_noreclaim_save();
5379                 ret = __netif_receive_skb_one_core(skb, true);
5380                 memalloc_noreclaim_restore(noreclaim_flag);
5381         } else
5382                 ret = __netif_receive_skb_one_core(skb, false);
5383
5384         return ret;
5385 }
5386
5387 static void __netif_receive_skb_list(struct list_head *head)
5388 {
5389         unsigned long noreclaim_flag = 0;
5390         struct sk_buff *skb, *next;
5391         bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5392
5393         list_for_each_entry_safe(skb, next, head, list) {
5394                 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5395                         struct list_head sublist;
5396
5397                         /* Handle the previous sublist */
5398                         list_cut_before(&sublist, head, &skb->list);
5399                         if (!list_empty(&sublist))
5400                                 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5401                         pfmemalloc = !pfmemalloc;
5402                         /* See comments in __netif_receive_skb */
5403                         if (pfmemalloc)
5404                                 noreclaim_flag = memalloc_noreclaim_save();
5405                         else
5406                                 memalloc_noreclaim_restore(noreclaim_flag);
5407                 }
5408         }
5409         /* Handle the remaining sublist */
5410         if (!list_empty(head))
5411                 __netif_receive_skb_list_core(head, pfmemalloc);
5412         /* Restore pflags */
5413         if (pfmemalloc)
5414                 memalloc_noreclaim_restore(noreclaim_flag);
5415 }
5416
5417 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5418 {
5419         struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5420         struct bpf_prog *new = xdp->prog;
5421         int ret = 0;
5422
5423         if (new) {
5424                 u32 i;
5425
5426                 /* generic XDP does not work with DEVMAPs that can
5427                  * have a bpf_prog installed on an entry
5428                  */
5429                 for (i = 0; i < new->aux->used_map_cnt; i++) {
5430                         if (dev_map_can_have_prog(new->aux->used_maps[i]))
5431                                 return -EINVAL;
5432                 }
5433         }
5434
5435         switch (xdp->command) {
5436         case XDP_SETUP_PROG:
5437                 rcu_assign_pointer(dev->xdp_prog, new);
5438                 if (old)
5439                         bpf_prog_put(old);
5440
5441                 if (old && !new) {
5442                         static_branch_dec(&generic_xdp_needed_key);
5443                 } else if (new && !old) {
5444                         static_branch_inc(&generic_xdp_needed_key);
5445                         dev_disable_lro(dev);
5446                         dev_disable_gro_hw(dev);
5447                 }
5448                 break;
5449
5450         case XDP_QUERY_PROG:
5451                 xdp->prog_id = old ? old->aux->id : 0;
5452                 break;
5453
5454         default:
5455                 ret = -EINVAL;
5456                 break;
5457         }
5458
5459         return ret;
5460 }
5461
5462 static int netif_receive_skb_internal(struct sk_buff *skb)
5463 {
5464         int ret;
5465
5466         net_timestamp_check(netdev_tstamp_prequeue, skb);
5467
5468         if (skb_defer_rx_timestamp(skb))
5469                 return NET_RX_SUCCESS;
5470
5471         rcu_read_lock();
5472 #ifdef CONFIG_RPS
5473         if (static_branch_unlikely(&rps_needed)) {
5474                 struct rps_dev_flow voidflow, *rflow = &voidflow;
5475                 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5476
5477                 if (cpu >= 0) {
5478                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5479                         rcu_read_unlock();
5480                         return ret;
5481                 }
5482         }
5483 #endif
5484         ret = __netif_receive_skb(skb);
5485         rcu_read_unlock();
5486         return ret;
5487 }
5488
5489 static void netif_receive_skb_list_internal(struct list_head *head)
5490 {
5491         struct sk_buff *skb, *next;
5492         struct list_head sublist;
5493
5494         INIT_LIST_HEAD(&sublist);
5495         list_for_each_entry_safe(skb, next, head, list) {
5496                 net_timestamp_check(netdev_tstamp_prequeue, skb);
5497                 skb_list_del_init(skb);
5498                 if (!skb_defer_rx_timestamp(skb))
5499                         list_add_tail(&skb->list, &sublist);
5500         }
5501         list_splice_init(&sublist, head);
5502
5503         rcu_read_lock();
5504 #ifdef CONFIG_RPS
5505         if (static_branch_unlikely(&rps_needed)) {
5506                 list_for_each_entry_safe(skb, next, head, list) {
5507                         struct rps_dev_flow voidflow, *rflow = &voidflow;
5508                         int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5509
5510                         if (cpu >= 0) {
5511                                 /* Will be handled, remove from list */
5512                                 skb_list_del_init(skb);
5513                                 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5514                         }
5515                 }
5516         }
5517 #endif
5518         __netif_receive_skb_list(head);
5519         rcu_read_unlock();
5520 }
5521
5522 /**
5523  *      netif_receive_skb - process receive buffer from network
5524  *      @skb: buffer to process
5525  *
5526  *      netif_receive_skb() is the main receive data processing function.
5527  *      It always succeeds. The buffer may be dropped during processing
5528  *      for congestion control or by the protocol layers.
5529  *
5530  *      This function may only be called from softirq context and interrupts
5531  *      should be enabled.
5532  *
5533  *      Return values (usually ignored):
5534  *      NET_RX_SUCCESS: no congestion
5535  *      NET_RX_DROP: packet was dropped
5536  */
5537 int netif_receive_skb(struct sk_buff *skb)
5538 {
5539         int ret;
5540
5541         trace_netif_receive_skb_entry(skb);
5542
5543         ret = netif_receive_skb_internal(skb);
5544         trace_netif_receive_skb_exit(ret);
5545
5546         return ret;
5547 }
5548 EXPORT_SYMBOL(netif_receive_skb);
5549
5550 /**
5551  *      netif_receive_skb_list - process many receive buffers from network
5552  *      @head: list of skbs to process.
5553  *
5554  *      Since return value of netif_receive_skb() is normally ignored, and
5555  *      wouldn't be meaningful for a list, this function returns void.
5556  *
5557  *      This function may only be called from softirq context and interrupts
5558  *      should be enabled.
5559  */
5560 void netif_receive_skb_list(struct list_head *head)
5561 {
5562         struct sk_buff *skb;
5563
5564         if (list_empty(head))
5565                 return;
5566         if (trace_netif_receive_skb_list_entry_enabled()) {
5567                 list_for_each_entry(skb, head, list)
5568                         trace_netif_receive_skb_list_entry(skb);
5569         }
5570         netif_receive_skb_list_internal(head);
5571         trace_netif_receive_skb_list_exit(0);
5572 }
5573 EXPORT_SYMBOL(netif_receive_skb_list);
5574
5575 DEFINE_PER_CPU(struct work_struct, flush_works);
5576
5577 /* Network device is going away, flush any packets still pending */
5578 static void flush_backlog(struct work_struct *work)
5579 {
5580         struct sk_buff *skb, *tmp;
5581         struct softnet_data *sd;
5582
5583         local_bh_disable();
5584         sd = this_cpu_ptr(&softnet_data);
5585
5586         local_irq_disable();
5587         rps_lock(sd);
5588         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5589                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5590                         __skb_unlink(skb, &sd->input_pkt_queue);
5591                         kfree_skb(skb);
5592                         input_queue_head_incr(sd);
5593                 }
5594         }
5595         rps_unlock(sd);
5596         local_irq_enable();
5597
5598         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5599                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5600                         __skb_unlink(skb, &sd->process_queue);
5601                         kfree_skb(skb);
5602                         input_queue_head_incr(sd);
5603                 }
5604         }
5605         local_bh_enable();
5606 }
5607
5608 static void flush_all_backlogs(void)
5609 {
5610         unsigned int cpu;
5611
5612         get_online_cpus();
5613
5614         for_each_online_cpu(cpu)
5615                 queue_work_on(cpu, system_highpri_wq,
5616                               per_cpu_ptr(&flush_works, cpu));
5617
5618         for_each_online_cpu(cpu)
5619                 flush_work(per_cpu_ptr(&flush_works, cpu));
5620
5621         put_online_cpus();
5622 }
5623
5624 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5625 static void gro_normal_list(struct napi_struct *napi)
5626 {
5627         if (!napi->rx_count)
5628                 return;
5629         netif_receive_skb_list_internal(&napi->rx_list);
5630         INIT_LIST_HEAD(&napi->rx_list);
5631         napi->rx_count = 0;
5632 }
5633
5634 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5635  * pass the whole batch up to the stack.
5636  */
5637 static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
5638 {
5639         list_add_tail(&skb->list, &napi->rx_list);
5640         if (++napi->rx_count >= gro_normal_batch)
5641                 gro_normal_list(napi);
5642 }
5643
5644 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
5645 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
5646 static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
5647 {
5648         struct packet_offload *ptype;
5649         __be16 type = skb->protocol;
5650         struct list_head *head = &offload_base;
5651         int err = -ENOENT;
5652
5653         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5654
5655         if (NAPI_GRO_CB(skb)->count == 1) {
5656                 skb_shinfo(skb)->gso_size = 0;
5657                 goto out;
5658         }
5659
5660         rcu_read_lock();
5661         list_for_each_entry_rcu(ptype, head, list) {
5662                 if (ptype->type != type || !ptype->callbacks.gro_complete)
5663                         continue;
5664
5665                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5666                                          ipv6_gro_complete, inet_gro_complete,
5667                                          skb, 0);
5668                 break;
5669         }
5670         rcu_read_unlock();
5671
5672         if (err) {
5673                 WARN_ON(&ptype->list == head);
5674                 kfree_skb(skb);
5675                 return NET_RX_SUCCESS;
5676         }
5677
5678 out:
5679         gro_normal_one(napi, skb);
5680         return NET_RX_SUCCESS;
5681 }
5682
5683 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5684                                    bool flush_old)
5685 {
5686         struct list_head *head = &napi->gro_hash[index].list;
5687         struct sk_buff *skb, *p;
5688
5689         list_for_each_entry_safe_reverse(skb, p, head, list) {
5690                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5691                         return;
5692                 skb_list_del_init(skb);
5693                 napi_gro_complete(napi, skb);
5694                 napi->gro_hash[index].count--;
5695         }
5696
5697         if (!napi->gro_hash[index].count)
5698                 __clear_bit(index, &napi->gro_bitmask);
5699 }
5700
5701 /* napi->gro_hash[].list contains packets ordered by age.
5702  * youngest packets at the head of it.
5703  * Complete skbs in reverse order to reduce latencies.
5704  */
5705 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5706 {
5707         unsigned long bitmask = napi->gro_bitmask;
5708         unsigned int i, base = ~0U;
5709
5710         while ((i = ffs(bitmask)) != 0) {
5711                 bitmask >>= i;
5712                 base += i;
5713                 __napi_gro_flush_chain(napi, base, flush_old);
5714         }
5715 }
5716 EXPORT_SYMBOL(napi_gro_flush);
5717
5718 static struct list_head *gro_list_prepare(struct napi_struct *napi,
5719                                           struct sk_buff *skb)
5720 {
5721         unsigned int maclen = skb->dev->hard_header_len;
5722         u32 hash = skb_get_hash_raw(skb);
5723         struct list_head *head;
5724         struct sk_buff *p;
5725
5726         head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5727         list_for_each_entry(p, head, list) {
5728                 unsigned long diffs;
5729
5730                 NAPI_GRO_CB(p)->flush = 0;
5731
5732                 if (hash != skb_get_hash_raw(p)) {
5733                         NAPI_GRO_CB(p)->same_flow = 0;
5734                         continue;
5735                 }
5736
5737                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5738                 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5739                 if (skb_vlan_tag_present(p))
5740                         diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
5741                 diffs |= skb_metadata_dst_cmp(p, skb);
5742                 diffs |= skb_metadata_differs(p, skb);
5743                 if (maclen == ETH_HLEN)
5744                         diffs |= compare_ether_header(skb_mac_header(p),
5745                                                       skb_mac_header(skb));
5746                 else if (!diffs)
5747                         diffs = memcmp(skb_mac_header(p),
5748                                        skb_mac_header(skb),
5749                                        maclen);
5750                 NAPI_GRO_CB(p)->same_flow = !diffs;
5751         }
5752
5753         return head;
5754 }
5755
5756 static void skb_gro_reset_offset(struct sk_buff *skb)
5757 {
5758         const struct skb_shared_info *pinfo = skb_shinfo(skb);
5759         const skb_frag_t *frag0 = &pinfo->frags[0];
5760
5761         NAPI_GRO_CB(skb)->data_offset = 0;
5762         NAPI_GRO_CB(skb)->frag0 = NULL;
5763         NAPI_GRO_CB(skb)->frag0_len = 0;
5764
5765         if (!skb_headlen(skb) && pinfo->nr_frags &&
5766             !PageHighMem(skb_frag_page(frag0))) {
5767                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5768                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5769                                                     skb_frag_size(frag0),
5770                                                     skb->end - skb->tail);
5771         }
5772 }
5773
5774 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5775 {
5776         struct skb_shared_info *pinfo = skb_shinfo(skb);
5777
5778         BUG_ON(skb->end - skb->tail < grow);
5779
5780         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5781
5782         skb->data_len -= grow;
5783         skb->tail += grow;
5784
5785         skb_frag_off_add(&pinfo->frags[0], grow);
5786         skb_frag_size_sub(&pinfo->frags[0], grow);
5787
5788         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5789                 skb_frag_unref(skb, 0);
5790                 memmove(pinfo->frags, pinfo->frags + 1,
5791                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5792         }
5793 }
5794
5795 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
5796 {
5797         struct sk_buff *oldest;
5798
5799         oldest = list_last_entry(head, struct sk_buff, list);
5800
5801         /* We are called with head length >= MAX_GRO_SKBS, so this is
5802          * impossible.
5803          */
5804         if (WARN_ON_ONCE(!oldest))
5805                 return;
5806
5807         /* Do not adjust napi->gro_hash[].count, caller is adding a new
5808          * SKB to the chain.
5809          */
5810         skb_list_del_init(oldest);
5811         napi_gro_complete(napi, oldest);
5812 }
5813
5814 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
5815                                                            struct sk_buff *));
5816 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
5817                                                            struct sk_buff *));
5818 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5819 {
5820         u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
5821         struct list_head *head = &offload_base;
5822         struct packet_offload *ptype;
5823         __be16 type = skb->protocol;
5824         struct list_head *gro_head;
5825         struct sk_buff *pp = NULL;
5826         enum gro_result ret;
5827         int same_flow;
5828         int grow;
5829
5830         if (netif_elide_gro(skb->dev))
5831                 goto normal;
5832
5833         gro_head = gro_list_prepare(napi, skb);
5834
5835         rcu_read_lock();
5836         list_for_each_entry_rcu(ptype, head, list) {
5837                 if (ptype->type != type || !ptype->callbacks.gro_receive)
5838                         continue;
5839
5840                 skb_set_network_header(skb, skb_gro_offset(skb));
5841                 skb_reset_mac_len(skb);
5842                 NAPI_GRO_CB(skb)->same_flow = 0;
5843                 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5844                 NAPI_GRO_CB(skb)->free = 0;
5845                 NAPI_GRO_CB(skb)->encap_mark = 0;
5846                 NAPI_GRO_CB(skb)->recursion_counter = 0;
5847                 NAPI_GRO_CB(skb)->is_fou = 0;
5848                 NAPI_GRO_CB(skb)->is_atomic = 1;
5849                 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
5850
5851                 /* Setup for GRO checksum validation */
5852                 switch (skb->ip_summed) {
5853                 case CHECKSUM_COMPLETE:
5854                         NAPI_GRO_CB(skb)->csum = skb->csum;
5855                         NAPI_GRO_CB(skb)->csum_valid = 1;
5856                         NAPI_GRO_CB(skb)->csum_cnt = 0;
5857                         break;
5858                 case CHECKSUM_UNNECESSARY:
5859                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5860                         NAPI_GRO_CB(skb)->csum_valid = 0;
5861                         break;
5862                 default:
5863                         NAPI_GRO_CB(skb)->csum_cnt = 0;
5864                         NAPI_GRO_CB(skb)->csum_valid = 0;
5865                 }
5866
5867                 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
5868                                         ipv6_gro_receive, inet_gro_receive,
5869                                         gro_head, skb);
5870                 break;
5871         }
5872         rcu_read_unlock();
5873
5874         if (&ptype->list == head)
5875                 goto normal;
5876
5877         if (PTR_ERR(pp) == -EINPROGRESS) {
5878                 ret = GRO_CONSUMED;
5879                 goto ok;
5880         }
5881
5882         same_flow = NAPI_GRO_CB(skb)->same_flow;
5883         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
5884
5885         if (pp) {
5886                 skb_list_del_init(pp);
5887                 napi_gro_complete(napi, pp);
5888                 napi->gro_hash[hash].count--;
5889         }
5890
5891         if (same_flow)
5892                 goto ok;
5893
5894         if (NAPI_GRO_CB(skb)->flush)
5895                 goto normal;
5896
5897         if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5898                 gro_flush_oldest(napi, gro_head);
5899         } else {
5900                 napi->gro_hash[hash].count++;
5901         }
5902         NAPI_GRO_CB(skb)->count = 1;
5903         NAPI_GRO_CB(skb)->age = jiffies;
5904         NAPI_GRO_CB(skb)->last = skb;
5905         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
5906         list_add(&skb->list, gro_head);
5907         ret = GRO_HELD;
5908
5909 pull:
5910         grow = skb_gro_offset(skb) - skb_headlen(skb);
5911         if (grow > 0)
5912                 gro_pull_from_frag0(skb, grow);
5913 ok:
5914         if (napi->gro_hash[hash].count) {
5915                 if (!test_bit(hash, &napi->gro_bitmask))
5916                         __set_bit(hash, &napi->gro_bitmask);
5917         } else if (test_bit(hash, &napi->gro_bitmask)) {
5918                 __clear_bit(hash, &napi->gro_bitmask);
5919         }
5920
5921         return ret;
5922
5923 normal:
5924         ret = GRO_NORMAL;
5925         goto pull;
5926 }
5927
5928 struct packet_offload *gro_find_receive_by_type(__be16 type)
5929 {
5930         struct list_head *offload_head = &offload_base;
5931         struct packet_offload *ptype;
5932
5933         list_for_each_entry_rcu(ptype, offload_head, list) {
5934                 if (ptype->type != type || !ptype->callbacks.gro_receive)
5935                         continue;
5936                 return ptype;
5937         }
5938         return NULL;
5939 }
5940 EXPORT_SYMBOL(gro_find_receive_by_type);
5941
5942 struct packet_offload *gro_find_complete_by_type(__be16 type)
5943 {
5944         struct list_head *offload_head = &offload_base;
5945         struct packet_offload *ptype;
5946
5947         list_for_each_entry_rcu(ptype, offload_head, list) {
5948                 if (ptype->type != type || !ptype->callbacks.gro_complete)
5949                         continue;
5950                 return ptype;
5951         }
5952         return NULL;
5953 }
5954 EXPORT_SYMBOL(gro_find_complete_by_type);
5955
5956 static void napi_skb_free_stolen_head(struct sk_buff *skb)
5957 {
5958         skb_dst_drop(skb);
5959         skb_ext_put(skb);
5960         kmem_cache_free(skbuff_head_cache, skb);
5961 }
5962
5963 static gro_result_t napi_skb_finish(struct napi_struct *napi,
5964                                     struct sk_buff *skb,
5965                                     gro_result_t ret)
5966 {
5967         switch (ret) {
5968         case GRO_NORMAL:
5969                 gro_normal_one(napi, skb);
5970                 break;
5971
5972         case GRO_DROP:
5973                 kfree_skb(skb);
5974                 break;
5975
5976         case GRO_MERGED_FREE:
5977                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5978                         napi_skb_free_stolen_head(skb);
5979                 else
5980                         __kfree_skb(skb);
5981                 break;
5982
5983         case GRO_HELD:
5984         case GRO_MERGED:
5985         case GRO_CONSUMED:
5986                 break;
5987         }
5988
5989         return ret;
5990 }
5991
5992 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5993 {
5994         gro_result_t ret;
5995
5996         skb_mark_napi_id(skb, napi);
5997         trace_napi_gro_receive_entry(skb);
5998
5999         skb_gro_reset_offset(skb);
6000
6001         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
6002         trace_napi_gro_receive_exit(ret);
6003
6004         return ret;
6005 }
6006 EXPORT_SYMBOL(napi_gro_receive);
6007
6008 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
6009 {
6010         if (unlikely(skb->pfmemalloc)) {
6011                 consume_skb(skb);
6012                 return;
6013         }
6014         __skb_pull(skb, skb_headlen(skb));
6015         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
6016         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
6017         __vlan_hwaccel_clear_tag(skb);
6018         skb->dev = napi->dev;
6019         skb->skb_iif = 0;
6020
6021         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
6022         skb->pkt_type = PACKET_HOST;
6023
6024         skb->encapsulation = 0;
6025         skb_shinfo(skb)->gso_type = 0;
6026         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6027         skb_ext_reset(skb);
6028
6029         napi->skb = skb;
6030 }
6031
6032 struct sk_buff *napi_get_frags(struct napi_struct *napi)
6033 {
6034         struct sk_buff *skb = napi->skb;
6035
6036         if (!skb) {
6037                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
6038                 if (skb) {
6039                         napi->skb = skb;
6040                         skb_mark_napi_id(skb, napi);
6041                 }
6042         }
6043         return skb;
6044 }
6045 EXPORT_SYMBOL(napi_get_frags);
6046
6047 static gro_result_t napi_frags_finish(struct napi_struct *napi,
6048                                       struct sk_buff *skb,
6049                                       gro_result_t ret)
6050 {
6051         switch (ret) {
6052         case GRO_NORMAL:
6053         case GRO_HELD:
6054                 __skb_push(skb, ETH_HLEN);
6055                 skb->protocol = eth_type_trans(skb, skb->dev);
6056                 if (ret == GRO_NORMAL)
6057                         gro_normal_one(napi, skb);
6058                 break;
6059
6060         case GRO_DROP:
6061                 napi_reuse_skb(napi, skb);
6062                 break;
6063
6064         case GRO_MERGED_FREE:
6065                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6066                         napi_skb_free_stolen_head(skb);
6067                 else
6068                         napi_reuse_skb(napi, skb);
6069                 break;
6070
6071         case GRO_MERGED:
6072         case GRO_CONSUMED:
6073                 break;
6074         }
6075
6076         return ret;
6077 }
6078
6079 /* Upper GRO stack assumes network header starts at gro_offset=0
6080  * Drivers could call both napi_gro_frags() and napi_gro_receive()
6081  * We copy ethernet header into skb->data to have a common layout.
6082  */
6083 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
6084 {
6085         struct sk_buff *skb = napi->skb;
6086         const struct ethhdr *eth;
6087         unsigned int hlen = sizeof(*eth);
6088
6089         napi->skb = NULL;
6090
6091         skb_reset_mac_header(skb);
6092         skb_gro_reset_offset(skb);
6093
6094         if (unlikely(skb_gro_header_hard(skb, hlen))) {
6095                 eth = skb_gro_header_slow(skb, hlen, 0);
6096                 if (unlikely(!eth)) {
6097                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6098                                              __func__, napi->dev->name);
6099                         napi_reuse_skb(napi, skb);
6100                         return NULL;
6101                 }
6102         } else {
6103                 eth = (const struct ethhdr *)skb->data;
6104                 gro_pull_from_frag0(skb, hlen);
6105                 NAPI_GRO_CB(skb)->frag0 += hlen;
6106                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
6107         }
6108         __skb_pull(skb, hlen);
6109
6110         /*
6111          * This works because the only protocols we care about don't require
6112          * special handling.
6113          * We'll fix it up properly in napi_frags_finish()
6114          */
6115         skb->protocol = eth->h_proto;
6116
6117         return skb;
6118 }
6119
6120 gro_result_t napi_gro_frags(struct napi_struct *napi)
6121 {
6122         gro_result_t ret;
6123         struct sk_buff *skb = napi_frags_skb(napi);
6124
6125         if (!skb)
6126                 return GRO_DROP;
6127
6128         trace_napi_gro_frags_entry(skb);
6129
6130         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6131         trace_napi_gro_frags_exit(ret);
6132
6133         return ret;
6134 }
6135 EXPORT_SYMBOL(napi_gro_frags);
6136
6137 /* Compute the checksum from gro_offset and return the folded value
6138  * after adding in any pseudo checksum.
6139  */
6140 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
6141 {
6142         __wsum wsum;
6143         __sum16 sum;
6144
6145         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
6146
6147         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
6148         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
6149         /* See comments in __skb_checksum_complete(). */
6150         if (likely(!sum)) {
6151                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
6152                     !skb->csum_complete_sw)
6153                         netdev_rx_csum_fault(skb->dev, skb);
6154         }
6155
6156         NAPI_GRO_CB(skb)->csum = wsum;
6157         NAPI_GRO_CB(skb)->csum_valid = 1;
6158
6159         return sum;
6160 }
6161 EXPORT_SYMBOL(__skb_gro_checksum_complete);
6162
6163 static void net_rps_send_ipi(struct softnet_data *remsd)
6164 {
6165 #ifdef CONFIG_RPS
6166         while (remsd) {
6167                 struct softnet_data *next = remsd->rps_ipi_next;
6168
6169                 if (cpu_online(remsd->cpu))
6170                         smp_call_function_single_async(remsd->cpu, &remsd->csd);
6171                 remsd = next;
6172         }
6173 #endif
6174 }
6175
6176 /*
6177  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6178  * Note: called with local irq disabled, but exits with local irq enabled.
6179  */
6180 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6181 {
6182 #ifdef CONFIG_RPS
6183         struct softnet_data *remsd = sd->rps_ipi_list;
6184
6185         if (remsd) {
6186                 sd->rps_ipi_list = NULL;
6187
6188                 local_irq_enable();
6189
6190                 /* Send pending IPI's to kick RPS processing on remote cpus. */
6191                 net_rps_send_ipi(remsd);
6192         } else
6193 #endif
6194                 local_irq_enable();
6195 }
6196
6197 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6198 {
6199 #ifdef CONFIG_RPS
6200         return sd->rps_ipi_list != NULL;
6201 #else
6202         return false;
6203 #endif
6204 }
6205
6206 static int process_backlog(struct napi_struct *napi, int quota)
6207 {
6208         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6209         bool again = true;
6210         int work = 0;
6211
6212         /* Check if we have pending ipi, its better to send them now,
6213          * not waiting net_rx_action() end.
6214          */
6215         if (sd_has_rps_ipi_waiting(sd)) {
6216                 local_irq_disable();
6217                 net_rps_action_and_irq_enable(sd);
6218         }
6219
6220         napi->weight = dev_rx_weight;
6221         while (again) {
6222                 struct sk_buff *skb;
6223
6224                 while ((skb = __skb_dequeue(&sd->process_queue))) {
6225                         rcu_read_lock();
6226                         __netif_receive_skb(skb);
6227                         rcu_read_unlock();
6228                         input_queue_head_incr(sd);
6229                         if (++work >= quota)
6230                                 return work;
6231
6232                 }
6233
6234                 local_irq_disable();
6235                 rps_lock(sd);
6236                 if (skb_queue_empty(&sd->input_pkt_queue)) {
6237                         /*
6238                          * Inline a custom version of __napi_complete().
6239                          * only current cpu owns and manipulates this napi,
6240                          * and NAPI_STATE_SCHED is the only possible flag set
6241                          * on backlog.
6242                          * We can use a plain write instead of clear_bit(),
6243                          * and we dont need an smp_mb() memory barrier.
6244                          */
6245                         napi->state = 0;
6246                         again = false;
6247                 } else {
6248                         skb_queue_splice_tail_init(&sd->input_pkt_queue,
6249                                                    &sd->process_queue);
6250                 }
6251                 rps_unlock(sd);
6252                 local_irq_enable();
6253         }
6254
6255         return work;
6256 }
6257
6258 /**
6259  * __napi_schedule - schedule for receive
6260  * @n: entry to schedule
6261  *
6262  * The entry's receive function will be scheduled to run.
6263  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6264  */
6265 void __napi_schedule(struct napi_struct *n)
6266 {
6267         unsigned long flags;
6268
6269         local_irq_save(flags);
6270         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6271         local_irq_restore(flags);
6272 }
6273 EXPORT_SYMBOL(__napi_schedule);
6274
6275 /**
6276  *      napi_schedule_prep - check if napi can be scheduled
6277  *      @n: napi context
6278  *
6279  * Test if NAPI routine is already running, and if not mark
6280  * it as running.  This is used as a condition variable
6281  * insure only one NAPI poll instance runs.  We also make
6282  * sure there is no pending NAPI disable.
6283  */
6284 bool napi_schedule_prep(struct napi_struct *n)
6285 {
6286         unsigned long val, new;
6287
6288         do {
6289                 val = READ_ONCE(n->state);
6290                 if (unlikely(val & NAPIF_STATE_DISABLE))
6291                         return false;
6292                 new = val | NAPIF_STATE_SCHED;
6293
6294                 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6295                  * This was suggested by Alexander Duyck, as compiler
6296                  * emits better code than :
6297                  * if (val & NAPIF_STATE_SCHED)
6298                  *     new |= NAPIF_STATE_MISSED;
6299                  */
6300                 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6301                                                    NAPIF_STATE_MISSED;
6302         } while (cmpxchg(&n->state, val, new) != val);
6303
6304         return !(val & NAPIF_STATE_SCHED);
6305 }
6306 EXPORT_SYMBOL(napi_schedule_prep);
6307
6308 /**
6309  * __napi_schedule_irqoff - schedule for receive
6310  * @n: entry to schedule
6311  *
6312  * Variant of __napi_schedule() assuming hard irqs are masked
6313  */
6314 void __napi_schedule_irqoff(struct napi_struct *n)
6315 {
6316         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6317 }
6318 EXPORT_SYMBOL(__napi_schedule_irqoff);
6319
6320 bool napi_complete_done(struct napi_struct *n, int work_done)
6321 {
6322         unsigned long flags, val, new, timeout = 0;
6323         bool ret = true;
6324
6325         /*
6326          * 1) Don't let napi dequeue from the cpu poll list
6327          *    just in case its running on a different cpu.
6328          * 2) If we are busy polling, do nothing here, we have
6329          *    the guarantee we will be called later.
6330          */
6331         if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6332                                  NAPIF_STATE_IN_BUSY_POLL)))
6333                 return false;
6334
6335         if (work_done) {
6336                 if (n->gro_bitmask)
6337                         timeout = READ_ONCE(n->dev->gro_flush_timeout);
6338                 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6339         }
6340         if (n->defer_hard_irqs_count > 0) {
6341                 n->defer_hard_irqs_count--;
6342                 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6343                 if (timeout)
6344                         ret = false;
6345         }
6346         if (n->gro_bitmask) {
6347                 /* When the NAPI instance uses a timeout and keeps postponing
6348                  * it, we need to bound somehow the time packets are kept in
6349                  * the GRO layer
6350                  */
6351                 napi_gro_flush(n, !!timeout);
6352         }
6353
6354         gro_normal_list(n);
6355
6356         if (unlikely(!list_empty(&n->poll_list))) {
6357                 /* If n->poll_list is not empty, we need to mask irqs */
6358                 local_irq_save(flags);
6359                 list_del_init(&n->poll_list);
6360                 local_irq_restore(flags);
6361         }
6362
6363         do {
6364                 val = READ_ONCE(n->state);
6365
6366                 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6367
6368                 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
6369
6370                 /* If STATE_MISSED was set, leave STATE_SCHED set,
6371                  * because we will call napi->poll() one more time.
6372                  * This C code was suggested by Alexander Duyck to help gcc.
6373                  */
6374                 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6375                                                     NAPIF_STATE_SCHED;
6376         } while (cmpxchg(&n->state, val, new) != val);
6377
6378         if (unlikely(val & NAPIF_STATE_MISSED)) {
6379                 __napi_schedule(n);
6380                 return false;
6381         }
6382
6383         if (timeout)
6384                 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6385                               HRTIMER_MODE_REL_PINNED);
6386         return ret;
6387 }
6388 EXPORT_SYMBOL(napi_complete_done);
6389
6390 /* must be called under rcu_read_lock(), as we dont take a reference */
6391 static struct napi_struct *napi_by_id(unsigned int napi_id)
6392 {
6393         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6394         struct napi_struct *napi;
6395
6396         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6397                 if (napi->napi_id == napi_id)
6398                         return napi;
6399
6400         return NULL;
6401 }
6402
6403 #if defined(CONFIG_NET_RX_BUSY_POLL)
6404
6405 #define BUSY_POLL_BUDGET 8
6406
6407 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
6408 {
6409         int rc;
6410
6411         /* Busy polling means there is a high chance device driver hard irq
6412          * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6413          * set in napi_schedule_prep().
6414          * Since we are about to call napi->poll() once more, we can safely
6415          * clear NAPI_STATE_MISSED.
6416          *
6417          * Note: x86 could use a single "lock and ..." instruction
6418          * to perform these two clear_bit()
6419          */
6420         clear_bit(NAPI_STATE_MISSED, &napi->state);
6421         clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6422
6423         local_bh_disable();
6424
6425         /* All we really want here is to re-enable device interrupts.
6426          * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6427          */
6428         rc = napi->poll(napi, BUSY_POLL_BUDGET);
6429         /* We can't gro_normal_list() here, because napi->poll() might have
6430          * rearmed the napi (napi_complete_done()) in which case it could
6431          * already be running on another CPU.
6432          */
6433         trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
6434         netpoll_poll_unlock(have_poll_lock);
6435         if (rc == BUSY_POLL_BUDGET) {
6436                 /* As the whole budget was spent, we still own the napi so can
6437                  * safely handle the rx_list.
6438                  */
6439                 gro_normal_list(napi);
6440                 __napi_schedule(napi);
6441         }
6442         local_bh_enable();
6443 }
6444
6445 void napi_busy_loop(unsigned int napi_id,
6446                     bool (*loop_end)(void *, unsigned long),
6447                     void *loop_end_arg)
6448 {
6449         unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6450         int (*napi_poll)(struct napi_struct *napi, int budget);
6451         void *have_poll_lock = NULL;
6452         struct napi_struct *napi;
6453
6454 restart:
6455         napi_poll = NULL;
6456
6457         rcu_read_lock();
6458
6459         napi = napi_by_id(napi_id);
6460         if (!napi)
6461                 goto out;
6462
6463         preempt_disable();
6464         for (;;) {
6465                 int work = 0;
6466
6467                 local_bh_disable();
6468                 if (!napi_poll) {
6469                         unsigned long val = READ_ONCE(napi->state);
6470
6471                         /* If multiple threads are competing for this napi,
6472                          * we avoid dirtying napi->state as much as we can.
6473                          */
6474                         if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6475                                    NAPIF_STATE_IN_BUSY_POLL))
6476                                 goto count;
6477                         if (cmpxchg(&napi->state, val,
6478                                     val | NAPIF_STATE_IN_BUSY_POLL |
6479                                           NAPIF_STATE_SCHED) != val)
6480                                 goto count;
6481                         have_poll_lock = netpoll_poll_lock(napi);
6482                         napi_poll = napi->poll;
6483                 }
6484                 work = napi_poll(napi, BUSY_POLL_BUDGET);
6485                 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
6486                 gro_normal_list(napi);
6487 count:
6488                 if (work > 0)
6489                         __NET_ADD_STATS(dev_net(napi->dev),
6490                                         LINUX_MIB_BUSYPOLLRXPACKETS, work);
6491                 local_bh_enable();
6492
6493                 if (!loop_end || loop_end(loop_end_arg, start_time))
6494                         break;
6495
6496                 if (unlikely(need_resched())) {
6497                         if (napi_poll)
6498                                 busy_poll_stop(napi, have_poll_lock);
6499                         preempt_enable();
6500                         rcu_read_unlock();
6501                         cond_resched();
6502                         if (loop_end(loop_end_arg, start_time))
6503                                 return;
6504                         goto restart;
6505                 }
6506                 cpu_relax();
6507         }
6508         if (napi_poll)
6509                 busy_poll_stop(napi, have_poll_lock);
6510         preempt_enable();
6511 out:
6512         rcu_read_unlock();
6513 }
6514 EXPORT_SYMBOL(napi_busy_loop);
6515
6516 #endif /* CONFIG_NET_RX_BUSY_POLL */
6517
6518 static void napi_hash_add(struct napi_struct *napi)
6519 {
6520         if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6521             test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
6522                 return;
6523
6524         spin_lock(&napi_hash_lock);
6525
6526         /* 0..NR_CPUS range is reserved for sender_cpu use */
6527         do {
6528                 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6529                         napi_gen_id = MIN_NAPI_ID;
6530         } while (napi_by_id(napi_gen_id));
6531         napi->napi_id = napi_gen_id;
6532
6533         hlist_add_head_rcu(&napi->napi_hash_node,
6534                            &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6535
6536         spin_unlock(&napi_hash_lock);
6537 }
6538
6539 /* Warning : caller is responsible to make sure rcu grace period
6540  * is respected before freeing memory containing @napi
6541  */
6542 bool napi_hash_del(struct napi_struct *napi)
6543 {
6544         bool rcu_sync_needed = false;
6545
6546         spin_lock(&napi_hash_lock);
6547
6548         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6549                 rcu_sync_needed = true;
6550                 hlist_del_rcu(&napi->napi_hash_node);
6551         }
6552         spin_unlock(&napi_hash_lock);
6553         return rcu_sync_needed;
6554 }
6555 EXPORT_SYMBOL_GPL(napi_hash_del);
6556
6557 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6558 {
6559         struct napi_struct *napi;
6560
6561         napi = container_of(timer, struct napi_struct, timer);
6562
6563         /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6564          * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6565          */
6566         if (!napi_disable_pending(napi) &&
6567             !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6568                 __napi_schedule_irqoff(napi);
6569
6570         return HRTIMER_NORESTART;
6571 }
6572
6573 static void init_gro_hash(struct napi_struct *napi)
6574 {
6575         int i;
6576
6577         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6578                 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6579                 napi->gro_hash[i].count = 0;
6580         }
6581         napi->gro_bitmask = 0;
6582 }
6583
6584 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6585                     int (*poll)(struct napi_struct *, int), int weight)
6586 {
6587         INIT_LIST_HEAD(&napi->poll_list);
6588         hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6589         napi->timer.function = napi_watchdog;
6590         init_gro_hash(napi);
6591         napi->skb = NULL;
6592         INIT_LIST_HEAD(&napi->rx_list);
6593         napi->rx_count = 0;
6594         napi->poll = poll;
6595         if (weight > NAPI_POLL_WEIGHT)
6596                 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6597                                 weight);
6598         napi->weight = weight;
6599         list_add(&napi->dev_list, &dev->napi_list);
6600         napi->dev = dev;
6601 #ifdef CONFIG_NETPOLL
6602         napi->poll_owner = -1;
6603 #endif
6604         set_bit(NAPI_STATE_SCHED, &napi->state);
6605         napi_hash_add(napi);
6606 }
6607 EXPORT_SYMBOL(netif_napi_add);
6608
6609 void napi_disable(struct napi_struct *n)
6610 {
6611         might_sleep();
6612         set_bit(NAPI_STATE_DISABLE, &n->state);
6613
6614         while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6615                 msleep(1);
6616         while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6617                 msleep(1);
6618
6619         hrtimer_cancel(&n->timer);
6620
6621         clear_bit(NAPI_STATE_DISABLE, &n->state);
6622 }
6623 EXPORT_SYMBOL(napi_disable);
6624
6625 static void flush_gro_hash(struct napi_struct *napi)
6626 {
6627         int i;
6628
6629         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6630                 struct sk_buff *skb, *n;
6631
6632                 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6633                         kfree_skb(skb);
6634                 napi->gro_hash[i].count = 0;
6635         }
6636 }
6637
6638 /* Must be called in process context */
6639 void netif_napi_del(struct napi_struct *napi)
6640 {
6641         might_sleep();
6642         if (napi_hash_del(napi))
6643                 synchronize_net();
6644         list_del_init(&napi->dev_list);
6645         napi_free_frags(napi);
6646
6647         flush_gro_hash(napi);
6648         napi->gro_bitmask = 0;
6649 }
6650 EXPORT_SYMBOL(netif_napi_del);
6651
6652 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6653 {
6654         void *have;
6655         int work, weight;
6656
6657         list_del_init(&n->poll_list);
6658
6659         have = netpoll_poll_lock(n);
6660
6661         weight = n->weight;
6662
6663         /* This NAPI_STATE_SCHED test is for avoiding a race
6664          * with netpoll's poll_napi().  Only the entity which
6665          * obtains the lock and sees NAPI_STATE_SCHED set will
6666          * actually make the ->poll() call.  Therefore we avoid
6667          * accidentally calling ->poll() when NAPI is not scheduled.
6668          */
6669         work = 0;
6670         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6671                 work = n->poll(n, weight);
6672                 trace_napi_poll(n, work, weight);
6673         }
6674
6675         WARN_ON_ONCE(work > weight);
6676
6677         if (likely(work < weight))
6678                 goto out_unlock;
6679
6680         /* Drivers must not modify the NAPI state if they
6681          * consume the entire weight.  In such cases this code
6682          * still "owns" the NAPI instance and therefore can
6683          * move the instance around on the list at-will.
6684          */
6685         if (unlikely(napi_disable_pending(n))) {
6686                 napi_complete(n);
6687                 goto out_unlock;
6688         }
6689
6690         if (n->gro_bitmask) {
6691                 /* flush too old packets
6692                  * If HZ < 1000, flush all packets.
6693                  */
6694                 napi_gro_flush(n, HZ >= 1000);
6695         }
6696
6697         gro_normal_list(n);
6698
6699         /* Some drivers may have called napi_schedule
6700          * prior to exhausting their budget.
6701          */
6702         if (unlikely(!list_empty(&n->poll_list))) {
6703                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6704                              n->dev ? n->dev->name : "backlog");
6705                 goto out_unlock;
6706         }
6707
6708         list_add_tail(&n->poll_list, repoll);
6709
6710 out_unlock:
6711         netpoll_poll_unlock(have);
6712
6713         return work;
6714 }
6715
6716 static __latent_entropy void net_rx_action(struct softirq_action *h)
6717 {
6718         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6719         unsigned long time_limit = jiffies +
6720                 usecs_to_jiffies(netdev_budget_usecs);
6721         int budget = netdev_budget;
6722         LIST_HEAD(list);
6723         LIST_HEAD(repoll);
6724
6725         local_irq_disable();
6726         list_splice_init(&sd->poll_list, &list);
6727         local_irq_enable();
6728
6729         for (;;) {
6730                 struct napi_struct *n;
6731
6732                 if (list_empty(&list)) {
6733                         if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6734                                 goto out;
6735                         break;
6736                 }
6737
6738                 n = list_first_entry(&list, struct napi_struct, poll_list);
6739                 budget -= napi_poll(n, &repoll);
6740
6741                 /* If softirq window is exhausted then punt.
6742                  * Allow this to run for 2 jiffies since which will allow
6743                  * an average latency of 1.5/HZ.
6744                  */
6745                 if (unlikely(budget <= 0 ||
6746                              time_after_eq(jiffies, time_limit))) {
6747                         sd->time_squeeze++;
6748                         break;
6749                 }
6750         }
6751
6752         local_irq_disable();
6753
6754         list_splice_tail_init(&sd->poll_list, &list);
6755         list_splice_tail(&repoll, &list);
6756         list_splice(&list, &sd->poll_list);
6757         if (!list_empty(&sd->poll_list))
6758                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6759
6760         net_rps_action_and_irq_enable(sd);
6761 out:
6762         __kfree_skb_flush();
6763 }
6764
6765 struct netdev_adjacent {
6766         struct net_device *dev;
6767
6768         /* upper master flag, there can only be one master device per list */
6769         bool master;
6770
6771         /* lookup ignore flag */
6772         bool ignore;
6773
6774         /* counter for the number of times this device was added to us */
6775         u16 ref_nr;
6776
6777         /* private field for the users */
6778         void *private;
6779
6780         struct list_head list;
6781         struct rcu_head rcu;
6782 };
6783
6784 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6785                                                  struct list_head *adj_list)
6786 {
6787         struct netdev_adjacent *adj;
6788
6789         list_for_each_entry(adj, adj_list, list) {
6790                 if (adj->dev == adj_dev)
6791                         return adj;
6792         }
6793         return NULL;
6794 }
6795
6796 static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6797 {
6798         struct net_device *dev = data;
6799
6800         return upper_dev == dev;
6801 }
6802
6803 /**
6804  * netdev_has_upper_dev - Check if device is linked to an upper device
6805  * @dev: device
6806  * @upper_dev: upper device to check
6807  *
6808  * Find out if a device is linked to specified upper device and return true
6809  * in case it is. Note that this checks only immediate upper device,
6810  * not through a complete stack of devices. The caller must hold the RTNL lock.
6811  */
6812 bool netdev_has_upper_dev(struct net_device *dev,
6813                           struct net_device *upper_dev)
6814 {
6815         ASSERT_RTNL();
6816
6817         return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6818                                              upper_dev);
6819 }
6820 EXPORT_SYMBOL(netdev_has_upper_dev);
6821
6822 /**
6823  * netdev_has_upper_dev_all - Check if device is linked to an upper device
6824  * @dev: device
6825  * @upper_dev: upper device to check
6826  *
6827  * Find out if a device is linked to specified upper device and return true
6828  * in case it is. Note that this checks the entire upper device chain.
6829  * The caller must hold rcu lock.
6830  */
6831
6832 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6833                                   struct net_device *upper_dev)
6834 {
6835         return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6836                                                upper_dev);
6837 }
6838 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6839
6840 /**
6841  * netdev_has_any_upper_dev - Check if device is linked to some device
6842  * @dev: device
6843  *
6844  * Find out if a device is linked to an upper device and return true in case
6845  * it is. The caller must hold the RTNL lock.
6846  */
6847 bool netdev_has_any_upper_dev(struct net_device *dev)
6848 {
6849         ASSERT_RTNL();
6850
6851         return !list_empty(&dev->adj_list.upper);
6852 }
6853 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6854
6855 /**
6856  * netdev_master_upper_dev_get - Get master upper device
6857  * @dev: device
6858  *
6859  * Find a master upper device and return pointer to it or NULL in case
6860  * it's not there. The caller must hold the RTNL lock.
6861  */
6862 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6863 {
6864         struct netdev_adjacent *upper;
6865
6866         ASSERT_RTNL();
6867
6868         if (list_empty(&dev->adj_list.upper))
6869                 return NULL;
6870
6871         upper = list_first_entry(&dev->adj_list.upper,
6872                                  struct netdev_adjacent, list);
6873         if (likely(upper->master))
6874                 return upper->dev;
6875         return NULL;
6876 }
6877 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6878
6879 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6880 {
6881         struct netdev_adjacent *upper;
6882
6883         ASSERT_RTNL();
6884
6885         if (list_empty(&dev->adj_list.upper))
6886                 return NULL;
6887
6888         upper = list_first_entry(&dev->adj_list.upper,
6889                                  struct netdev_adjacent, list);
6890         if (likely(upper->master) && !upper->ignore)
6891                 return upper->dev;
6892         return NULL;
6893 }
6894
6895 /**
6896  * netdev_has_any_lower_dev - Check if device is linked to some device
6897  * @dev: device
6898  *
6899  * Find out if a device is linked to a lower device and return true in case
6900  * it is. The caller must hold the RTNL lock.
6901  */
6902 static bool netdev_has_any_lower_dev(struct net_device *dev)
6903 {
6904         ASSERT_RTNL();
6905
6906         return !list_empty(&dev->adj_list.lower);
6907 }
6908
6909 void *netdev_adjacent_get_private(struct list_head *adj_list)
6910 {
6911         struct netdev_adjacent *adj;
6912
6913         adj = list_entry(adj_list, struct netdev_adjacent, list);
6914
6915         return adj->private;
6916 }
6917 EXPORT_SYMBOL(netdev_adjacent_get_private);
6918
6919 /**
6920  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6921  * @dev: device
6922  * @iter: list_head ** of the current position
6923  *
6924  * Gets the next device from the dev's upper list, starting from iter
6925  * position. The caller must hold RCU read lock.
6926  */
6927 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6928                                                  struct list_head **iter)
6929 {
6930         struct netdev_adjacent *upper;
6931
6932         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6933
6934         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6935
6936         if (&upper->list == &dev->adj_list.upper)
6937                 return NULL;
6938
6939         *iter = &upper->list;
6940
6941         return upper->dev;
6942 }
6943 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6944
6945 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6946                                                   struct list_head **iter,
6947                                                   bool *ignore)
6948 {
6949         struct netdev_adjacent *upper;
6950
6951         upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6952
6953         if (&upper->list == &dev->adj_list.upper)
6954                 return NULL;
6955
6956         *iter = &upper->list;
6957         *ignore = upper->ignore;
6958
6959         return upper->dev;
6960 }
6961
6962 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6963                                                     struct list_head **iter)
6964 {
6965         struct netdev_adjacent *upper;
6966
6967         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6968
6969         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6970
6971         if (&upper->list == &dev->adj_list.upper)
6972                 return NULL;
6973
6974         *iter = &upper->list;
6975
6976         return upper->dev;
6977 }
6978
6979 static int __netdev_walk_all_upper_dev(struct net_device *dev,
6980                                        int (*fn)(struct net_device *dev,
6981                                                  void *data),
6982                                        void *data)
6983 {
6984         struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6985         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6986         int ret, cur = 0;
6987         bool ignore;
6988
6989         now = dev;
6990         iter = &dev->adj_list.upper;
6991
6992         while (1) {
6993                 if (now != dev) {
6994                         ret = fn(now, data);
6995                         if (ret)
6996                                 return ret;
6997                 }
6998
6999                 next = NULL;
7000                 while (1) {
7001                         udev = __netdev_next_upper_dev(now, &iter, &ignore);
7002                         if (!udev)
7003                                 break;
7004                         if (ignore)
7005                                 continue;
7006
7007                         next = udev;
7008                         niter = &udev->adj_list.upper;
7009                         dev_stack[cur] = now;
7010                         iter_stack[cur++] = iter;
7011                         break;
7012                 }
7013
7014                 if (!next) {
7015                         if (!cur)
7016                                 return 0;
7017                         next = dev_stack[--cur];
7018                         niter = iter_stack[cur];
7019                 }
7020
7021                 now = next;
7022                 iter = niter;
7023         }
7024
7025         return 0;
7026 }
7027
7028 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7029                                   int (*fn)(struct net_device *dev,
7030                                             void *data),
7031                                   void *data)
7032 {
7033         struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7034         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7035         int ret, cur = 0;
7036
7037         now = dev;
7038         iter = &dev->adj_list.upper;
7039
7040         while (1) {
7041                 if (now != dev) {
7042                         ret = fn(now, data);
7043                         if (ret)
7044                                 return ret;
7045                 }
7046
7047                 next = NULL;
7048                 while (1) {
7049                         udev = netdev_next_upper_dev_rcu(now, &iter);
7050                         if (!udev)
7051                                 break;
7052
7053                         next = udev;
7054                         niter = &udev->adj_list.upper;
7055                         dev_stack[cur] = now;
7056                         iter_stack[cur++] = iter;
7057                         break;
7058                 }
7059
7060                 if (!next) {
7061                         if (!cur)
7062                                 return 0;
7063                         next = dev_stack[--cur];
7064                         niter = iter_stack[cur];
7065                 }
7066
7067                 now = next;
7068                 iter = niter;
7069         }
7070
7071         return 0;
7072 }
7073 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7074
7075 static bool __netdev_has_upper_dev(struct net_device *dev,
7076                                    struct net_device *upper_dev)
7077 {
7078         ASSERT_RTNL();
7079
7080         return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7081                                            upper_dev);
7082 }
7083
7084 /**
7085  * netdev_lower_get_next_private - Get the next ->private from the
7086  *                                 lower neighbour list
7087  * @dev: device
7088  * @iter: list_head ** of the current position
7089  *
7090  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7091  * list, starting from iter position. The caller must hold either hold the
7092  * RTNL lock or its own locking that guarantees that the neighbour lower
7093  * list will remain unchanged.
7094  */
7095 void *netdev_lower_get_next_private(struct net_device *dev,
7096                                     struct list_head **iter)
7097 {
7098         struct netdev_adjacent *lower;
7099
7100         lower = list_entry(*iter, struct netdev_adjacent, list);
7101
7102         if (&lower->list == &dev->adj_list.lower)
7103                 return NULL;
7104
7105         *iter = lower->list.next;
7106
7107         return lower->private;
7108 }
7109 EXPORT_SYMBOL(netdev_lower_get_next_private);
7110
7111 /**
7112  * netdev_lower_get_next_private_rcu - Get the next ->private from the
7113  *                                     lower neighbour list, RCU
7114  *                                     variant
7115  * @dev: device
7116  * @iter: list_head ** of the current position
7117  *
7118  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7119  * list, starting from iter position. The caller must hold RCU read lock.
7120  */
7121 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7122                                         struct list_head **iter)
7123 {
7124         struct netdev_adjacent *lower;
7125
7126         WARN_ON_ONCE(!rcu_read_lock_held());
7127
7128         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7129
7130         if (&lower->list == &dev->adj_list.lower)
7131                 return NULL;
7132
7133         *iter = &lower->list;
7134
7135         return lower->private;
7136 }
7137 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7138
7139 /**
7140  * netdev_lower_get_next - Get the next device from the lower neighbour
7141  *                         list
7142  * @dev: device
7143  * @iter: list_head ** of the current position
7144  *
7145  * Gets the next netdev_adjacent from the dev's lower neighbour
7146  * list, starting from iter position. The caller must hold RTNL lock or
7147  * its own locking that guarantees that the neighbour lower
7148  * list will remain unchanged.
7149  */
7150 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7151 {
7152         struct netdev_adjacent *lower;
7153
7154         lower = list_entry(*iter, struct netdev_adjacent, list);
7155
7156         if (&lower->list == &dev->adj_list.lower)
7157                 return NULL;
7158
7159         *iter = lower->list.next;
7160
7161         return lower->dev;
7162 }
7163 EXPORT_SYMBOL(netdev_lower_get_next);
7164
7165 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7166                                                 struct list_head **iter)
7167 {
7168         struct netdev_adjacent *lower;
7169
7170         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7171
7172         if (&lower->list == &dev->adj_list.lower)
7173                 return NULL;
7174
7175         *iter = &lower->list;
7176
7177         return lower->dev;
7178 }
7179
7180 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7181                                                   struct list_head **iter,
7182                                                   bool *ignore)
7183 {
7184         struct netdev_adjacent *lower;
7185
7186         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7187
7188         if (&lower->list == &dev->adj_list.lower)
7189                 return NULL;
7190
7191         *iter = &lower->list;
7192         *ignore = lower->ignore;
7193
7194         return lower->dev;
7195 }
7196
7197 int netdev_walk_all_lower_dev(struct net_device *dev,
7198                               int (*fn)(struct net_device *dev,
7199                                         void *data),
7200                               void *data)
7201 {
7202         struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7203         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7204         int ret, cur = 0;
7205
7206         now = dev;
7207         iter = &dev->adj_list.lower;
7208
7209         while (1) {
7210                 if (now != dev) {
7211                         ret = fn(now, data);
7212                         if (ret)
7213                                 return ret;
7214                 }
7215
7216                 next = NULL;
7217                 while (1) {
7218                         ldev = netdev_next_lower_dev(now, &iter);
7219                         if (!ldev)
7220                                 break;
7221
7222                         next = ldev;
7223                         niter = &ldev->adj_list.lower;
7224                         dev_stack[cur] = now;
7225                         iter_stack[cur++] = iter;
7226                         break;
7227                 }
7228
7229                 if (!next) {
7230                         if (!cur)
7231                                 return 0;
7232                         next = dev_stack[--cur];
7233                         niter = iter_stack[cur];
7234                 }
7235
7236                 now = next;
7237                 iter = niter;
7238         }
7239
7240         return 0;
7241 }
7242 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7243
7244 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7245                                        int (*fn)(struct net_device *dev,
7246                                                  void *data),
7247                                        void *data)
7248 {
7249         struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7250         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7251         int ret, cur = 0;
7252         bool ignore;
7253
7254         now = dev;
7255         iter = &dev->adj_list.lower;
7256
7257         while (1) {
7258                 if (now != dev) {
7259                         ret = fn(now, data);
7260                         if (ret)
7261                                 return ret;
7262                 }
7263
7264                 next = NULL;
7265                 while (1) {
7266                         ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7267                         if (!ldev)
7268                                 break;
7269                         if (ignore)
7270                                 continue;
7271
7272                         next = ldev;
7273                         niter = &ldev->adj_list.lower;
7274                         dev_stack[cur] = now;
7275                         iter_stack[cur++] = iter;
7276                         break;
7277                 }
7278
7279                 if (!next) {
7280                         if (!cur)
7281                                 return 0;
7282                         next = dev_stack[--cur];
7283                         niter = iter_stack[cur];
7284                 }
7285
7286                 now = next;
7287                 iter = niter;
7288         }
7289
7290         return 0;
7291 }
7292
7293 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7294                                              struct list_head **iter)
7295 {
7296         struct netdev_adjacent *lower;
7297
7298         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7299         if (&lower->list == &dev->adj_list.lower)
7300                 return NULL;
7301
7302         *iter = &lower->list;
7303
7304         return lower->dev;
7305 }
7306 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7307
7308 static u8 __netdev_upper_depth(struct net_device *dev)
7309 {
7310         struct net_device *udev;
7311         struct list_head *iter;
7312         u8 max_depth = 0;
7313         bool ignore;
7314
7315         for (iter = &dev->adj_list.upper,
7316              udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7317              udev;
7318              udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7319                 if (ignore)
7320                         continue;
7321                 if (max_depth < udev->upper_level)
7322                         max_depth = udev->upper_level;
7323         }
7324
7325         return max_depth;
7326 }
7327
7328 static u8 __netdev_lower_depth(struct net_device *dev)
7329 {
7330         struct net_device *ldev;
7331         struct list_head *iter;
7332         u8 max_depth = 0;
7333         bool ignore;
7334
7335         for (iter = &dev->adj_list.lower,
7336              ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7337              ldev;
7338              ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7339                 if (ignore)
7340                         continue;
7341                 if (max_depth < ldev->lower_level)
7342                         max_depth = ldev->lower_level;
7343         }
7344
7345         return max_depth;
7346 }
7347
7348 static int __netdev_update_upper_level(struct net_device *dev, void *data)
7349 {
7350         dev->upper_level = __netdev_upper_depth(dev) + 1;
7351         return 0;
7352 }
7353
7354 static int __netdev_update_lower_level(struct net_device *dev, void *data)
7355 {
7356         dev->lower_level = __netdev_lower_depth(dev) + 1;
7357         return 0;
7358 }
7359
7360 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7361                                   int (*fn)(struct net_device *dev,
7362                                             void *data),
7363                                   void *data)
7364 {
7365         struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7366         struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7367         int ret, cur = 0;
7368
7369         now = dev;
7370         iter = &dev->adj_list.lower;
7371
7372         while (1) {
7373                 if (now != dev) {
7374                         ret = fn(now, data);
7375                         if (ret)
7376                                 return ret;
7377                 }
7378
7379                 next = NULL;
7380                 while (1) {
7381                         ldev = netdev_next_lower_dev_rcu(now, &iter);
7382                         if (!ldev)
7383                                 break;
7384
7385                         next = ldev;
7386                         niter = &ldev->adj_list.lower;
7387                         dev_stack[cur] = now;
7388                         iter_stack[cur++] = iter;
7389                         break;
7390                 }
7391
7392                 if (!next) {
7393                         if (!cur)
7394                                 return 0;
7395                         next = dev_stack[--cur];
7396                         niter = iter_stack[cur];
7397                 }
7398
7399                 now = next;
7400                 iter = niter;
7401         }
7402
7403         return 0;
7404 }
7405 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7406
7407 /**
7408  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7409  *                                     lower neighbour list, RCU
7410  *                                     variant
7411  * @dev: device
7412  *
7413  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7414  * list. The caller must hold RCU read lock.
7415  */
7416 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7417 {
7418         struct netdev_adjacent *lower;
7419
7420         lower = list_first_or_null_rcu(&dev->adj_list.lower,
7421                         struct netdev_adjacent, list);
7422         if (lower)
7423                 return lower->private;
7424         return NULL;
7425 }
7426 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7427
7428 /**
7429  * netdev_master_upper_dev_get_rcu - Get master upper device
7430  * @dev: device
7431  *
7432  * Find a master upper device and return pointer to it or NULL in case
7433  * it's not there. The caller must hold the RCU read lock.
7434  */
7435 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7436 {
7437         struct netdev_adjacent *upper;
7438
7439         upper = list_first_or_null_rcu(&dev->adj_list.upper,
7440                                        struct netdev_adjacent, list);
7441         if (upper && likely(upper->master))
7442                 return upper->dev;
7443         return NULL;
7444 }
7445 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7446
7447 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7448                               struct net_device *adj_dev,
7449                               struct list_head *dev_list)
7450 {
7451         char linkname[IFNAMSIZ+7];
7452
7453         sprintf(linkname, dev_list == &dev->adj_list.upper ?
7454                 "upper_%s" : "lower_%s", adj_dev->name);
7455         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7456                                  linkname);
7457 }
7458 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7459                                char *name,
7460                                struct list_head *dev_list)
7461 {
7462         char linkname[IFNAMSIZ+7];
7463
7464         sprintf(linkname, dev_list == &dev->adj_list.upper ?
7465                 "upper_%s" : "lower_%s", name);
7466         sysfs_remove_link(&(dev->dev.kobj), linkname);
7467 }
7468
7469 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7470                                                  struct net_device *adj_dev,
7471                                                  struct list_head *dev_list)
7472 {
7473         return (dev_list == &dev->adj_list.upper ||
7474                 dev_list == &dev->adj_list.lower) &&
7475                 net_eq(dev_net(dev), dev_net(adj_dev));
7476 }
7477
7478 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7479                                         struct net_device *adj_dev,
7480                                         struct list_head *dev_list,
7481                                         void *private, bool master)
7482 {
7483         struct netdev_adjacent *adj;
7484         int ret;
7485
7486         adj = __netdev_find_adj(adj_dev, dev_list);
7487
7488         if (adj) {
7489                 adj->ref_nr += 1;
7490                 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7491                          dev->name, adj_dev->name, adj->ref_nr);
7492
7493                 return 0;
7494         }
7495
7496         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7497         if (!adj)
7498                 return -ENOMEM;
7499
7500         adj->dev = adj_dev;
7501         adj->master = master;
7502         adj->ref_nr = 1;
7503         adj->private = private;
7504         adj->ignore = false;
7505         dev_hold(adj_dev);
7506
7507         pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7508                  dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7509
7510         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7511                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7512                 if (ret)
7513                         goto free_adj;
7514         }
7515
7516         /* Ensure that master link is always the first item in list. */
7517         if (master) {
7518                 ret = sysfs_create_link(&(dev->dev.kobj),
7519                                         &(adj_dev->dev.kobj), "master");
7520                 if (ret)
7521                         goto remove_symlinks;
7522
7523                 list_add_rcu(&adj->list, dev_list);
7524         } else {
7525                 list_add_tail_rcu(&adj->list, dev_list);
7526         }
7527
7528         return 0;
7529
7530 remove_symlinks:
7531         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7532                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7533 free_adj:
7534         kfree(adj);
7535         dev_put(adj_dev);
7536
7537         return ret;
7538 }
7539
7540 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7541                                          struct net_device *adj_dev,
7542                                          u16 ref_nr,
7543                                          struct list_head *dev_list)
7544 {
7545         struct netdev_adjacent *adj;
7546
7547         pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7548                  dev->name, adj_dev->name, ref_nr);
7549
7550         adj = __netdev_find_adj(adj_dev, dev_list);
7551
7552         if (!adj) {
7553                 pr_err("Adjacency does not exist for device %s from %s\n",
7554                        dev->name, adj_dev->name);
7555                 WARN_ON(1);
7556                 return;
7557         }
7558
7559         if (adj->ref_nr > ref_nr) {
7560                 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7561                          dev->name, adj_dev->name, ref_nr,
7562                          adj->ref_nr - ref_nr);
7563                 adj->ref_nr -= ref_nr;
7564                 return;
7565         }
7566
7567         if (adj->master)
7568                 sysfs_remove_link(&(dev->dev.kobj), "master");
7569
7570         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7571                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7572
7573         list_del_rcu(&adj->list);
7574         pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7575                  adj_dev->name, dev->name, adj_dev->name);
7576         dev_put(adj_dev);
7577         kfree_rcu(adj, rcu);
7578 }
7579
7580 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7581                                             struct net_device *upper_dev,
7582                                             struct list_head *up_list,
7583                                             struct list_head *down_list,
7584                                             void *private, bool master)
7585 {
7586         int ret;
7587
7588         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7589                                            private, master);
7590         if (ret)
7591                 return ret;
7592
7593         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7594                                            private, false);
7595         if (ret) {
7596                 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7597                 return ret;
7598         }
7599
7600         return 0;
7601 }
7602
7603 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7604                                                struct net_device *upper_dev,
7605                                                u16 ref_nr,
7606                                                struct list_head *up_list,
7607                                                struct list_head *down_list)
7608 {
7609         __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7610         __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7611 }
7612
7613 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7614                                                 struct net_device *upper_dev,
7615                                                 void *private, bool master)
7616 {
7617         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7618                                                 &dev->adj_list.upper,
7619                                                 &upper_dev->adj_list.lower,
7620                                                 private, master);
7621 }
7622
7623 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7624                                                    struct net_device *upper_dev)
7625 {
7626         __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7627                                            &dev->adj_list.upper,
7628                                            &upper_dev->adj_list.lower);
7629 }
7630
7631 static int __netdev_upper_dev_link(struct net_device *dev,
7632                                    struct net_device *upper_dev, bool master,
7633                                    void *upper_priv, void *upper_info,
7634                                    struct netlink_ext_ack *extack)
7635 {
7636         struct netdev_notifier_changeupper_info changeupper_info = {
7637                 .info = {
7638                         .dev = dev,
7639                         .extack = extack,
7640                 },
7641                 .upper_dev = upper_dev,
7642                 .master = master,
7643                 .linking = true,
7644                 .upper_info = upper_info,
7645         };
7646         struct net_device *master_dev;
7647         int ret = 0;
7648
7649         ASSERT_RTNL();
7650
7651         if (dev == upper_dev)
7652                 return -EBUSY;
7653
7654         /* To prevent loops, check if dev is not upper device to upper_dev. */
7655         if (__netdev_has_upper_dev(upper_dev, dev))
7656                 return -EBUSY;
7657
7658         if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7659                 return -EMLINK;
7660
7661         if (!master) {
7662                 if (__netdev_has_upper_dev(dev, upper_dev))
7663                         return -EEXIST;
7664         } else {
7665                 master_dev = __netdev_master_upper_dev_get(dev);
7666                 if (master_dev)
7667                         return master_dev == upper_dev ? -EEXIST : -EBUSY;
7668         }
7669
7670         ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7671                                             &changeupper_info.info);
7672         ret = notifier_to_errno(ret);
7673         if (ret)
7674                 return ret;
7675
7676         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7677                                                    master);
7678         if (ret)
7679                 return ret;
7680
7681         ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7682                                             &changeupper_info.info);
7683         ret = notifier_to_errno(ret);
7684         if (ret)
7685                 goto rollback;
7686
7687         __netdev_update_upper_level(dev, NULL);
7688         __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7689
7690         __netdev_update_lower_level(upper_dev, NULL);
7691         __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7692                                     NULL);
7693
7694         return 0;
7695
7696 rollback:
7697         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7698
7699         return ret;
7700 }
7701
7702 /**
7703  * netdev_upper_dev_link - Add a link to the upper device
7704  * @dev: device
7705  * @upper_dev: new upper device
7706  * @extack: netlink extended ack
7707  *
7708  * Adds a link to device which is upper to this one. The caller must hold
7709  * the RTNL lock. On a failure a negative errno code is returned.
7710  * On success the reference counts are adjusted and the function
7711  * returns zero.
7712  */
7713 int netdev_upper_dev_link(struct net_device *dev,
7714                           struct net_device *upper_dev,
7715                           struct netlink_ext_ack *extack)
7716 {
7717         return __netdev_upper_dev_link(dev, upper_dev, false,
7718                                        NULL, NULL, extack);
7719 }
7720 EXPORT_SYMBOL(netdev_upper_dev_link);
7721
7722 /**
7723  * netdev_master_upper_dev_link - Add a master link to the upper device
7724  * @dev: device
7725  * @upper_dev: new upper device
7726  * @upper_priv: upper device private
7727  * @upper_info: upper info to be passed down via notifier
7728  * @extack: netlink extended ack
7729  *
7730  * Adds a link to device which is upper to this one. In this case, only
7731  * one master upper device can be linked, although other non-master devices
7732  * might be linked as well. The caller must hold the RTNL lock.
7733  * On a failure a negative errno code is returned. On success the reference
7734  * counts are adjusted and the function returns zero.
7735  */
7736 int netdev_master_upper_dev_link(struct net_device *dev,
7737                                  struct net_device *upper_dev,
7738                                  void *upper_priv, void *upper_info,
7739                                  struct netlink_ext_ack *extack)
7740 {
7741         return __netdev_upper_dev_link(dev, upper_dev, true,
7742                                        upper_priv, upper_info, extack);
7743 }
7744 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7745
7746 /**
7747  * netdev_upper_dev_unlink - Removes a link to upper device
7748  * @dev: device
7749  * @upper_dev: new upper device
7750  *
7751  * Removes a link to device which is upper to this one. The caller must hold
7752  * the RTNL lock.
7753  */
7754 void netdev_upper_dev_unlink(struct net_device *dev,
7755                              struct net_device *upper_dev)
7756 {
7757         struct netdev_notifier_changeupper_info changeupper_info = {
7758                 .info = {
7759                         .dev = dev,
7760                 },
7761                 .upper_dev = upper_dev,
7762                 .linking = false,
7763         };
7764
7765         ASSERT_RTNL();
7766
7767         changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7768
7769         call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7770                                       &changeupper_info.info);
7771
7772         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7773
7774         call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7775                                       &changeupper_info.info);
7776
7777         __netdev_update_upper_level(dev, NULL);
7778         __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7779
7780         __netdev_update_lower_level(upper_dev, NULL);
7781         __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7782                                     NULL);
7783 }
7784 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7785
7786 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7787                                       struct net_device *lower_dev,
7788                                       bool val)
7789 {
7790         struct netdev_adjacent *adj;
7791
7792         adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7793         if (adj)
7794                 adj->ignore = val;
7795
7796         adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7797         if (adj)
7798                 adj->ignore = val;
7799 }
7800
7801 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7802                                         struct net_device *lower_dev)
7803 {
7804         __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7805 }
7806
7807 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7808                                        struct net_device *lower_dev)
7809 {
7810         __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7811 }
7812
7813 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7814                                    struct net_device *new_dev,
7815                                    struct net_device *dev,
7816                                    struct netlink_ext_ack *extack)
7817 {
7818         int err;
7819
7820         if (!new_dev)
7821                 return 0;
7822
7823         if (old_dev && new_dev != old_dev)
7824                 netdev_adjacent_dev_disable(dev, old_dev);
7825
7826         err = netdev_upper_dev_link(new_dev, dev, extack);
7827         if (err) {
7828                 if (old_dev && new_dev != old_dev)
7829                         netdev_adjacent_dev_enable(dev, old_dev);
7830                 return err;
7831         }
7832
7833         return 0;
7834 }
7835 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7836
7837 void netdev_adjacent_change_commit(struct net_device *old_dev,
7838                                    struct net_device *new_dev,
7839                                    struct net_device *dev)
7840 {
7841         if (!new_dev || !old_dev)
7842                 return;
7843
7844         if (new_dev == old_dev)
7845                 return;
7846
7847         netdev_adjacent_dev_enable(dev, old_dev);
7848         netdev_upper_dev_unlink(old_dev, dev);
7849 }
7850 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7851
7852 void netdev_adjacent_change_abort(struct net_device *old_dev,
7853                                   struct net_device *new_dev,
7854                                   struct net_device *dev)
7855 {
7856         if (!new_dev)
7857                 return;
7858
7859         if (old_dev && new_dev != old_dev)
7860                 netdev_adjacent_dev_enable(dev, old_dev);
7861
7862         netdev_upper_dev_unlink(new_dev, dev);
7863 }
7864 EXPORT_SYMBOL(netdev_adjacent_change_abort);
7865
7866 /**
7867  * netdev_bonding_info_change - Dispatch event about slave change
7868  * @dev: device
7869  * @bonding_info: info to dispatch
7870  *
7871  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7872  * The caller must hold the RTNL lock.
7873  */
7874 void netdev_bonding_info_change(struct net_device *dev,
7875                                 struct netdev_bonding_info *bonding_info)
7876 {
7877         struct netdev_notifier_bonding_info info = {
7878                 .info.dev = dev,
7879         };
7880
7881         memcpy(&info.bonding_info, bonding_info,
7882                sizeof(struct netdev_bonding_info));
7883         call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7884                                       &info.info);
7885 }
7886 EXPORT_SYMBOL(netdev_bonding_info_change);
7887
7888 /**
7889  * netdev_get_xmit_slave - Get the xmit slave of master device
7890  * @skb: The packet
7891  * @all_slaves: assume all the slaves are active
7892  *
7893  * The reference counters are not incremented so the caller must be
7894  * careful with locks. The caller must hold RCU lock.
7895  * %NULL is returned if no slave is found.
7896  */
7897
7898 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
7899                                          struct sk_buff *skb,
7900                                          bool all_slaves)
7901 {
7902         const struct net_device_ops *ops = dev->netdev_ops;
7903
7904         if (!ops->ndo_get_xmit_slave)
7905                 return NULL;
7906         return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
7907 }
7908 EXPORT_SYMBOL(netdev_get_xmit_slave);
7909
7910 static void netdev_adjacent_add_links(struct net_device *dev)
7911 {
7912         struct netdev_adjacent *iter;
7913
7914         struct net *net = dev_net(dev);
7915
7916         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7917                 if (!net_eq(net, dev_net(iter->dev)))
7918                         continue;
7919                 netdev_adjacent_sysfs_add(iter->dev, dev,
7920                                           &iter->dev->adj_list.lower);
7921                 netdev_adjacent_sysfs_add(dev, iter->dev,
7922                                           &dev->adj_list.upper);
7923         }
7924
7925         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7926                 if (!net_eq(net, dev_net(iter->dev)))
7927                         continue;
7928                 netdev_adjacent_sysfs_add(iter->dev, dev,
7929                                           &iter->dev->adj_list.upper);
7930                 netdev_adjacent_sysfs_add(dev, iter->dev,
7931                                           &dev->adj_list.lower);
7932         }
7933 }
7934
7935 static void netdev_adjacent_del_links(struct net_device *dev)
7936 {
7937         struct netdev_adjacent *iter;
7938
7939         struct net *net = dev_net(dev);
7940
7941         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7942                 if (!net_eq(net, dev_net(iter->dev)))
7943                         continue;
7944                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7945                                           &iter->dev->adj_list.lower);
7946                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7947                                           &dev->adj_list.upper);
7948         }
7949
7950         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7951                 if (!net_eq(net, dev_net(iter->dev)))
7952                         continue;
7953                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7954                                           &iter->dev->adj_list.upper);
7955                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7956                                           &dev->adj_list.lower);
7957         }
7958 }
7959
7960 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
7961 {
7962         struct netdev_adjacent *iter;
7963
7964         struct net *net = dev_net(dev);
7965
7966         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7967                 if (!net_eq(net, dev_net(iter->dev)))
7968                         continue;
7969                 netdev_adjacent_sysfs_del(iter->dev, oldname,
7970                                           &iter->dev->adj_list.lower);
7971                 netdev_adjacent_sysfs_add(iter->dev, dev,
7972                                           &iter->dev->adj_list.lower);
7973         }
7974
7975         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7976                 if (!net_eq(net, dev_net(iter->dev)))
7977                         continue;
7978                 netdev_adjacent_sysfs_del(iter->dev, oldname,
7979                                           &iter->dev->adj_list.upper);
7980                 netdev_adjacent_sysfs_add(iter->dev, dev,
7981                                           &iter->dev->adj_list.upper);
7982         }
7983 }
7984
7985 void *netdev_lower_dev_get_private(struct net_device *dev,
7986                                    struct net_device *lower_dev)
7987 {
7988         struct netdev_adjacent *lower;
7989
7990         if (!lower_dev)
7991                 return NULL;
7992         lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
7993         if (!lower)
7994                 return NULL;
7995
7996         return lower->private;
7997 }
7998 EXPORT_SYMBOL(netdev_lower_dev_get_private);
7999
8000
8001 /**
8002  * netdev_lower_change - Dispatch event about lower device state change
8003  * @lower_dev: device
8004  * @lower_state_info: state to dispatch
8005  *
8006  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8007  * The caller must hold the RTNL lock.
8008  */
8009 void netdev_lower_state_changed(struct net_device *lower_dev,
8010                                 void *lower_state_info)
8011 {
8012         struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8013                 .info.dev = lower_dev,
8014         };
8015
8016         ASSERT_RTNL();
8017         changelowerstate_info.lower_state_info = lower_state_info;
8018         call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8019                                       &changelowerstate_info.info);
8020 }
8021 EXPORT_SYMBOL(netdev_lower_state_changed);
8022
8023 static void dev_change_rx_flags(struct net_device *dev, int flags)
8024 {
8025         const struct net_device_ops *ops = dev->netdev_ops;
8026
8027         if (ops->ndo_change_rx_flags)
8028                 ops->ndo_change_rx_flags(dev, flags);
8029 }
8030
8031 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8032 {
8033         unsigned int old_flags = dev->flags;
8034         kuid_t uid;
8035         kgid_t gid;
8036
8037         ASSERT_RTNL();
8038
8039         dev->flags |= IFF_PROMISC;
8040         dev->promiscuity += inc;
8041         if (dev->promiscuity == 0) {
8042                 /*
8043                  * Avoid overflow.
8044                  * If inc causes overflow, untouch promisc and return error.
8045                  */
8046                 if (inc < 0)
8047                         dev->flags &= ~IFF_PROMISC;
8048                 else {
8049                         dev->promiscuity -= inc;
8050                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
8051                                 dev->name);
8052                         return -EOVERFLOW;
8053                 }
8054         }
8055         if (dev->flags != old_flags) {
8056                 pr_info("device %s %s promiscuous mode\n",
8057                         dev->name,
8058                         dev->flags & IFF_PROMISC ? "entered" : "left");
8059                 if (audit_enabled) {
8060                         current_uid_gid(&uid, &gid);
8061                         audit_log(audit_context(), GFP_ATOMIC,
8062                                   AUDIT_ANOM_PROMISCUOUS,
8063                                   "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8064                                   dev->name, (dev->flags & IFF_PROMISC),
8065                                   (old_flags & IFF_PROMISC),
8066                                   from_kuid(&init_user_ns, audit_get_loginuid(current)),
8067                                   from_kuid(&init_user_ns, uid),
8068                                   from_kgid(&init_user_ns, gid),
8069                                   audit_get_sessionid(current));
8070                 }
8071
8072                 dev_change_rx_flags(dev, IFF_PROMISC);
8073         }
8074         if (notify)
8075                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
8076         return 0;
8077 }
8078
8079 /**
8080  *      dev_set_promiscuity     - update promiscuity count on a device
8081  *      @dev: device
8082  *      @inc: modifier
8083  *
8084  *      Add or remove promiscuity from a device. While the count in the device
8085  *      remains above zero the interface remains promiscuous. Once it hits zero
8086  *      the device reverts back to normal filtering operation. A negative inc
8087  *      value is used to drop promiscuity on the device.
8088  *      Return 0 if successful or a negative errno code on error.
8089  */
8090 int dev_set_promiscuity(struct net_device *dev, int inc)
8091 {
8092         unsigned int old_flags = dev->flags;
8093         int err;
8094
8095         err = __dev_set_promiscuity(dev, inc, true);
8096         if (err < 0)
8097                 return err;
8098         if (dev->flags != old_flags)
8099                 dev_set_rx_mode(dev);
8100         return err;
8101 }
8102 EXPORT_SYMBOL(dev_set_promiscuity);
8103
8104 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8105 {
8106         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8107
8108         ASSERT_RTNL();
8109
8110         dev->flags |= IFF_ALLMULTI;
8111         dev->allmulti += inc;
8112         if (dev->allmulti == 0) {
8113                 /*
8114                  * Avoid overflow.
8115                  * If inc causes overflow, untouch allmulti and return error.
8116                  */
8117                 if (inc < 0)
8118                         dev->flags &= ~IFF_ALLMULTI;
8119                 else {
8120                         dev->allmulti -= inc;
8121                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8122                                 dev->name);
8123                         return -EOVERFLOW;
8124                 }
8125         }
8126         if (dev->flags ^ old_flags) {
8127                 dev_change_rx_flags(dev, IFF_ALLMULTI);
8128                 dev_set_rx_mode(dev);
8129                 if (notify)
8130                         __dev_notify_flags(dev, old_flags,
8131                                            dev->gflags ^ old_gflags);
8132         }
8133         return 0;
8134 }
8135
8136 /**
8137  *      dev_set_allmulti        - update allmulti count on a device
8138  *      @dev: device
8139  *      @inc: modifier
8140  *
8141  *      Add or remove reception of all multicast frames to a device. While the
8142  *      count in the device remains above zero the interface remains listening
8143  *      to all interfaces. Once it hits zero the device reverts back to normal
8144  *      filtering operation. A negative @inc value is used to drop the counter
8145  *      when releasing a resource needing all multicasts.
8146  *      Return 0 if successful or a negative errno code on error.
8147  */
8148
8149 int dev_set_allmulti(struct net_device *dev, int inc)
8150 {
8151         return __dev_set_allmulti(dev, inc, true);
8152 }
8153 EXPORT_SYMBOL(dev_set_allmulti);
8154
8155 /*
8156  *      Upload unicast and multicast address lists to device and
8157  *      configure RX filtering. When the device doesn't support unicast
8158  *      filtering it is put in promiscuous mode while unicast addresses
8159  *      are present.
8160  */
8161 void __dev_set_rx_mode(struct net_device *dev)
8162 {
8163         const struct net_device_ops *ops = dev->netdev_ops;
8164
8165         /* dev_open will call this function so the list will stay sane. */
8166         if (!(dev->flags&IFF_UP))
8167                 return;
8168
8169         if (!netif_device_present(dev))
8170                 return;
8171
8172         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8173                 /* Unicast addresses changes may only happen under the rtnl,
8174                  * therefore calling __dev_set_promiscuity here is safe.
8175                  */
8176                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8177                         __dev_set_promiscuity(dev, 1, false);
8178                         dev->uc_promisc = true;
8179                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8180                         __dev_set_promiscuity(dev, -1, false);
8181                         dev->uc_promisc = false;
8182                 }
8183         }
8184
8185         if (ops->ndo_set_rx_mode)
8186                 ops->ndo_set_rx_mode(dev);
8187 }
8188
8189 void dev_set_rx_mode(struct net_device *dev)
8190 {
8191         netif_addr_lock_bh(dev);
8192         __dev_set_rx_mode(dev);
8193         netif_addr_unlock_bh(dev);
8194 }
8195
8196 /**
8197  *      dev_get_flags - get flags reported to userspace
8198  *      @dev: device
8199  *
8200  *      Get the combination of flag bits exported through APIs to userspace.
8201  */
8202 unsigned int dev_get_flags(const struct net_device *dev)
8203 {
8204         unsigned int flags;
8205
8206         flags = (dev->flags & ~(IFF_PROMISC |
8207                                 IFF_ALLMULTI |
8208                                 IFF_RUNNING |
8209                                 IFF_LOWER_UP |
8210                                 IFF_DORMANT)) |
8211                 (dev->gflags & (IFF_PROMISC |
8212                                 IFF_ALLMULTI));
8213
8214         if (netif_running(dev)) {
8215                 if (netif_oper_up(dev))
8216                         flags |= IFF_RUNNING;
8217                 if (netif_carrier_ok(dev))
8218                         flags |= IFF_LOWER_UP;
8219                 if (netif_dormant(dev))
8220                         flags |= IFF_DORMANT;
8221         }
8222
8223         return flags;
8224 }
8225 EXPORT_SYMBOL(dev_get_flags);
8226
8227 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8228                        struct netlink_ext_ack *extack)
8229 {
8230         unsigned int old_flags = dev->flags;
8231         int ret;
8232
8233         ASSERT_RTNL();
8234
8235         /*
8236          *      Set the flags on our device.
8237          */
8238
8239         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8240                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8241                                IFF_AUTOMEDIA)) |
8242                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8243                                     IFF_ALLMULTI));
8244
8245         /*
8246          *      Load in the correct multicast list now the flags have changed.
8247          */
8248
8249         if ((old_flags ^ flags) & IFF_MULTICAST)
8250                 dev_change_rx_flags(dev, IFF_MULTICAST);
8251
8252         dev_set_rx_mode(dev);
8253
8254         /*
8255          *      Have we downed the interface. We handle IFF_UP ourselves
8256          *      according to user attempts to set it, rather than blindly
8257          *      setting it.
8258          */
8259
8260         ret = 0;
8261         if ((old_flags ^ flags) & IFF_UP) {
8262                 if (old_flags & IFF_UP)
8263                         __dev_close(dev);
8264                 else
8265                         ret = __dev_open(dev, extack);
8266         }
8267
8268         if ((flags ^ dev->gflags) & IFF_PROMISC) {
8269                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8270                 unsigned int old_flags = dev->flags;
8271
8272                 dev->gflags ^= IFF_PROMISC;
8273
8274                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8275                         if (dev->flags != old_flags)
8276                                 dev_set_rx_mode(dev);
8277         }
8278
8279         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8280          * is important. Some (broken) drivers set IFF_PROMISC, when
8281          * IFF_ALLMULTI is requested not asking us and not reporting.
8282          */
8283         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8284                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8285
8286                 dev->gflags ^= IFF_ALLMULTI;
8287                 __dev_set_allmulti(dev, inc, false);
8288         }
8289
8290         return ret;
8291 }
8292
8293 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8294                         unsigned int gchanges)
8295 {
8296         unsigned int changes = dev->flags ^ old_flags;
8297
8298         if (gchanges)
8299                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8300
8301         if (changes & IFF_UP) {
8302                 if (dev->flags & IFF_UP)
8303                         call_netdevice_notifiers(NETDEV_UP, dev);
8304                 else
8305                         call_netdevice_notifiers(NETDEV_DOWN, dev);
8306         }
8307
8308         if (dev->flags & IFF_UP &&
8309             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8310                 struct netdev_notifier_change_info change_info = {
8311                         .info = {
8312                                 .dev = dev,
8313                         },
8314                         .flags_changed = changes,
8315                 };
8316
8317                 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8318         }
8319 }
8320
8321 /**
8322  *      dev_change_flags - change device settings
8323  *      @dev: device
8324  *      @flags: device state flags
8325  *      @extack: netlink extended ack
8326  *
8327  *      Change settings on device based state flags. The flags are
8328  *      in the userspace exported format.
8329  */
8330 int dev_change_flags(struct net_device *dev, unsigned int flags,
8331                      struct netlink_ext_ack *extack)
8332 {
8333         int ret;
8334         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8335
8336         ret = __dev_change_flags(dev, flags, extack);
8337         if (ret < 0)
8338                 return ret;
8339
8340         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8341         __dev_notify_flags(dev, old_flags, changes);
8342         return ret;
8343 }
8344 EXPORT_SYMBOL(dev_change_flags);
8345
8346 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8347 {
8348         const struct net_device_ops *ops = dev->netdev_ops;
8349
8350         if (ops->ndo_change_mtu)
8351                 return ops->ndo_change_mtu(dev, new_mtu);
8352
8353         /* Pairs with all the lockless reads of dev->mtu in the stack */
8354         WRITE_ONCE(dev->mtu, new_mtu);
8355         return 0;
8356 }
8357 EXPORT_SYMBOL(__dev_set_mtu);
8358
8359 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8360                      struct netlink_ext_ack *extack)
8361 {
8362         /* MTU must be positive, and in range */
8363         if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8364                 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8365                 return -EINVAL;
8366         }
8367
8368         if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8369                 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8370                 return -EINVAL;
8371         }
8372         return 0;
8373 }
8374
8375 /**
8376  *      dev_set_mtu_ext - Change maximum transfer unit
8377  *      @dev: device
8378  *      @new_mtu: new transfer unit
8379  *      @extack: netlink extended ack
8380  *
8381  *      Change the maximum transfer size of the network device.
8382  */
8383 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8384                     struct netlink_ext_ack *extack)
8385 {
8386         int err, orig_mtu;
8387
8388         if (new_mtu == dev->mtu)
8389                 return 0;
8390
8391         err = dev_validate_mtu(dev, new_mtu, extack);
8392         if (err)
8393                 return err;
8394
8395         if (!netif_device_present(dev))
8396                 return -ENODEV;
8397
8398         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8399         err = notifier_to_errno(err);
8400         if (err)
8401                 return err;
8402
8403         orig_mtu = dev->mtu;
8404         err = __dev_set_mtu(dev, new_mtu);
8405
8406         if (!err) {
8407                 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8408                                                    orig_mtu);
8409                 err = notifier_to_errno(err);
8410                 if (err) {
8411                         /* setting mtu back and notifying everyone again,
8412                          * so that they have a chance to revert changes.
8413                          */
8414                         __dev_set_mtu(dev, orig_mtu);
8415                         call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8416                                                      new_mtu);
8417                 }
8418         }
8419         return err;
8420 }
8421
8422 int dev_set_mtu(struct net_device *dev, int new_mtu)
8423 {
8424         struct netlink_ext_ack extack;
8425         int err;
8426
8427         memset(&extack, 0, sizeof(extack));
8428         err = dev_set_mtu_ext(dev, new_mtu, &extack);
8429         if (err && extack._msg)
8430                 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8431         return err;
8432 }
8433 EXPORT_SYMBOL(dev_set_mtu);
8434
8435 /**
8436  *      dev_change_tx_queue_len - Change TX queue length of a netdevice
8437  *      @dev: device
8438  *      @new_len: new tx queue length
8439  */
8440 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8441 {
8442         unsigned int orig_len = dev->tx_queue_len;
8443         int res;
8444
8445         if (new_len != (unsigned int)new_len)
8446                 return -ERANGE;
8447
8448         if (new_len != orig_len) {
8449                 dev->tx_queue_len = new_len;
8450                 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8451                 res = notifier_to_errno(res);
8452                 if (res)
8453                         goto err_rollback;
8454                 res = dev_qdisc_change_tx_queue_len(dev);
8455                 if (res)
8456                         goto err_rollback;
8457         }
8458
8459         return 0;
8460
8461 err_rollback:
8462         netdev_err(dev, "refused to change device tx_queue_len\n");
8463         dev->tx_queue_len = orig_len;
8464         return res;
8465 }
8466
8467 /**
8468  *      dev_set_group - Change group this device belongs to
8469  *      @dev: device
8470  *      @new_group: group this device should belong to
8471  */
8472 void dev_set_group(struct net_device *dev, int new_group)
8473 {
8474         dev->group = new_group;
8475 }
8476 EXPORT_SYMBOL(dev_set_group);
8477
8478 /**
8479  *      dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8480  *      @dev: device
8481  *      @addr: new address
8482  *      @extack: netlink extended ack
8483  */
8484 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8485                               struct netlink_ext_ack *extack)
8486 {
8487         struct netdev_notifier_pre_changeaddr_info info = {
8488                 .info.dev = dev,
8489                 .info.extack = extack,
8490                 .dev_addr = addr,
8491         };
8492         int rc;
8493
8494         rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8495         return notifier_to_errno(rc);
8496 }
8497 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8498
8499 /**
8500  *      dev_set_mac_address - Change Media Access Control Address
8501  *      @dev: device
8502  *      @sa: new address
8503  *      @extack: netlink extended ack
8504  *
8505  *      Change the hardware (MAC) address of the device
8506  */
8507 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8508                         struct netlink_ext_ack *extack)
8509 {
8510         const struct net_device_ops *ops = dev->netdev_ops;
8511         int err;
8512
8513         if (!ops->ndo_set_mac_address)
8514                 return -EOPNOTSUPP;
8515         if (sa->sa_family != dev->type)
8516                 return -EINVAL;
8517         if (!netif_device_present(dev))
8518                 return -ENODEV;
8519         err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8520         if (err)
8521                 return err;
8522         err = ops->ndo_set_mac_address(dev, sa);
8523         if (err)
8524                 return err;
8525         dev->addr_assign_type = NET_ADDR_SET;
8526         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8527         add_device_randomness(dev->dev_addr, dev->addr_len);
8528         return 0;
8529 }
8530 EXPORT_SYMBOL(dev_set_mac_address);
8531
8532 /**
8533  *      dev_change_carrier - Change device carrier
8534  *      @dev: device
8535  *      @new_carrier: new value
8536  *
8537  *      Change device carrier
8538  */
8539 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8540 {
8541         const struct net_device_ops *ops = dev->netdev_ops;
8542
8543         if (!ops->ndo_change_carrier)
8544                 return -EOPNOTSUPP;
8545         if (!netif_device_present(dev))
8546                 return -ENODEV;
8547         return ops->ndo_change_carrier(dev, new_carrier);
8548 }
8549 EXPORT_SYMBOL(dev_change_carrier);
8550
8551 /**
8552  *      dev_get_phys_port_id - Get device physical port ID
8553  *      @dev: device
8554  *      @ppid: port ID
8555  *
8556  *      Get device physical port ID
8557  */
8558 int dev_get_phys_port_id(struct net_device *dev,
8559                          struct netdev_phys_item_id *ppid)
8560 {
8561         const struct net_device_ops *ops = dev->netdev_ops;
8562
8563         if (!ops->ndo_get_phys_port_id)
8564                 return -EOPNOTSUPP;
8565         return ops->ndo_get_phys_port_id(dev, ppid);
8566 }
8567 EXPORT_SYMBOL(dev_get_phys_port_id);
8568
8569 /**
8570  *      dev_get_phys_port_name - Get device physical port name
8571  *      @dev: device
8572  *      @name: port name
8573  *      @len: limit of bytes to copy to name
8574  *
8575  *      Get device physical port name
8576  */
8577 int dev_get_phys_port_name(struct net_device *dev,
8578                            char *name, size_t len)
8579 {
8580         const struct net_device_ops *ops = dev->netdev_ops;
8581         int err;
8582
8583         if (ops->ndo_get_phys_port_name) {
8584                 err = ops->ndo_get_phys_port_name(dev, name, len);
8585                 if (err != -EOPNOTSUPP)
8586                         return err;
8587         }
8588         return devlink_compat_phys_port_name_get(dev, name, len);
8589 }
8590 EXPORT_SYMBOL(dev_get_phys_port_name);
8591
8592 /**
8593  *      dev_get_port_parent_id - Get the device's port parent identifier
8594  *      @dev: network device
8595  *      @ppid: pointer to a storage for the port's parent identifier
8596  *      @recurse: allow/disallow recursion to lower devices
8597  *
8598  *      Get the devices's port parent identifier
8599  */
8600 int dev_get_port_parent_id(struct net_device *dev,
8601                            struct netdev_phys_item_id *ppid,
8602                            bool recurse)
8603 {
8604         const struct net_device_ops *ops = dev->netdev_ops;
8605         struct netdev_phys_item_id first = { };
8606         struct net_device *lower_dev;
8607         struct list_head *iter;
8608         int err;
8609
8610         if (ops->ndo_get_port_parent_id) {
8611                 err = ops->ndo_get_port_parent_id(dev, ppid);
8612                 if (err != -EOPNOTSUPP)
8613                         return err;
8614         }
8615
8616         err = devlink_compat_switch_id_get(dev, ppid);
8617         if (!err || err != -EOPNOTSUPP)
8618                 return err;
8619
8620         if (!recurse)
8621                 return -EOPNOTSUPP;
8622
8623         netdev_for_each_lower_dev(dev, lower_dev, iter) {
8624                 err = dev_get_port_parent_id(lower_dev, ppid, recurse);
8625                 if (err)
8626                         break;
8627                 if (!first.id_len)
8628                         first = *ppid;
8629                 else if (memcmp(&first, ppid, sizeof(*ppid)))
8630                         return -ENODATA;
8631         }
8632
8633         return err;
8634 }
8635 EXPORT_SYMBOL(dev_get_port_parent_id);
8636
8637 /**
8638  *      netdev_port_same_parent_id - Indicate if two network devices have
8639  *      the same port parent identifier
8640  *      @a: first network device
8641  *      @b: second network device
8642  */
8643 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8644 {
8645         struct netdev_phys_item_id a_id = { };
8646         struct netdev_phys_item_id b_id = { };
8647
8648         if (dev_get_port_parent_id(a, &a_id, true) ||
8649             dev_get_port_parent_id(b, &b_id, true))
8650                 return false;
8651
8652         return netdev_phys_item_id_same(&a_id, &b_id);
8653 }
8654 EXPORT_SYMBOL(netdev_port_same_parent_id);
8655
8656 /**
8657  *      dev_change_proto_down - update protocol port state information
8658  *      @dev: device
8659  *      @proto_down: new value
8660  *
8661  *      This info can be used by switch drivers to set the phys state of the
8662  *      port.
8663  */
8664 int dev_change_proto_down(struct net_device *dev, bool proto_down)
8665 {
8666         const struct net_device_ops *ops = dev->netdev_ops;
8667
8668         if (!ops->ndo_change_proto_down)
8669                 return -EOPNOTSUPP;
8670         if (!netif_device_present(dev))
8671                 return -ENODEV;
8672         return ops->ndo_change_proto_down(dev, proto_down);
8673 }
8674 EXPORT_SYMBOL(dev_change_proto_down);
8675
8676 /**
8677  *      dev_change_proto_down_generic - generic implementation for
8678  *      ndo_change_proto_down that sets carrier according to
8679  *      proto_down.
8680  *
8681  *      @dev: device
8682  *      @proto_down: new value
8683  */
8684 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
8685 {
8686         if (proto_down)
8687                 netif_carrier_off(dev);
8688         else
8689                 netif_carrier_on(dev);
8690         dev->proto_down = proto_down;
8691         return 0;
8692 }
8693 EXPORT_SYMBOL(dev_change_proto_down_generic);
8694
8695 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
8696                     enum bpf_netdev_command cmd)
8697 {
8698         struct netdev_bpf xdp;
8699
8700         if (!bpf_op)
8701                 return 0;
8702
8703         memset(&xdp, 0, sizeof(xdp));
8704         xdp.command = cmd;
8705
8706         /* Query must always succeed. */
8707         WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
8708
8709         return xdp.prog_id;
8710 }
8711
8712 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
8713                            struct netlink_ext_ack *extack, u32 flags,
8714                            struct bpf_prog *prog)
8715 {
8716         bool non_hw = !(flags & XDP_FLAGS_HW_MODE);
8717         struct bpf_prog *prev_prog = NULL;
8718         struct netdev_bpf xdp;
8719         int err;
8720
8721         if (non_hw) {
8722                 prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op,
8723                                                            XDP_QUERY_PROG));
8724                 if (IS_ERR(prev_prog))
8725                         prev_prog = NULL;
8726         }
8727
8728         memset(&xdp, 0, sizeof(xdp));
8729         if (flags & XDP_FLAGS_HW_MODE)
8730                 xdp.command = XDP_SETUP_PROG_HW;
8731         else
8732                 xdp.command = XDP_SETUP_PROG;
8733         xdp.extack = extack;
8734         xdp.flags = flags;
8735         xdp.prog = prog;
8736
8737         err = bpf_op(dev, &xdp);
8738         if (!err && non_hw)
8739                 bpf_prog_change_xdp(prev_prog, prog);
8740
8741         if (prev_prog)
8742                 bpf_prog_put(prev_prog);
8743
8744         return err;
8745 }
8746
8747 static void dev_xdp_uninstall(struct net_device *dev)
8748 {
8749         struct netdev_bpf xdp;
8750         bpf_op_t ndo_bpf;
8751
8752         /* Remove generic XDP */
8753         WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
8754
8755         /* Remove from the driver */
8756         ndo_bpf = dev->netdev_ops->ndo_bpf;
8757         if (!ndo_bpf)
8758                 return;
8759
8760         memset(&xdp, 0, sizeof(xdp));
8761         xdp.command = XDP_QUERY_PROG;
8762         WARN_ON(ndo_bpf(dev, &xdp));
8763         if (xdp.prog_id)
8764                 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8765                                         NULL));
8766
8767         /* Remove HW offload */
8768         memset(&xdp, 0, sizeof(xdp));
8769         xdp.command = XDP_QUERY_PROG_HW;
8770         if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
8771                 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8772                                         NULL));
8773 }
8774
8775 /**
8776  *      dev_change_xdp_fd - set or clear a bpf program for a device rx path
8777  *      @dev: device
8778  *      @extack: netlink extended ack
8779  *      @fd: new program fd or negative value to clear
8780  *      @expected_fd: old program fd that userspace expects to replace or clear
8781  *      @flags: xdp-related flags
8782  *
8783  *      Set or clear a bpf program for a device
8784  */
8785 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
8786                       int fd, int expected_fd, u32 flags)
8787 {
8788         const struct net_device_ops *ops = dev->netdev_ops;
8789         enum bpf_netdev_command query;
8790         u32 prog_id, expected_id = 0;
8791         bpf_op_t bpf_op, bpf_chk;
8792         struct bpf_prog *prog;
8793         bool offload;
8794         int err;
8795
8796         ASSERT_RTNL();
8797
8798         offload = flags & XDP_FLAGS_HW_MODE;
8799         query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
8800
8801         bpf_op = bpf_chk = ops->ndo_bpf;
8802         if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) {
8803                 NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode");
8804                 return -EOPNOTSUPP;
8805         }
8806         if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
8807                 bpf_op = generic_xdp_install;
8808         if (bpf_op == bpf_chk)
8809                 bpf_chk = generic_xdp_install;
8810
8811         prog_id = __dev_xdp_query(dev, bpf_op, query);
8812         if (flags & XDP_FLAGS_REPLACE) {
8813                 if (expected_fd >= 0) {
8814                         prog = bpf_prog_get_type_dev(expected_fd,
8815                                                      BPF_PROG_TYPE_XDP,
8816                                                      bpf_op == ops->ndo_bpf);
8817                         if (IS_ERR(prog))
8818                                 return PTR_ERR(prog);
8819                         expected_id = prog->aux->id;
8820                         bpf_prog_put(prog);
8821                 }
8822
8823                 if (prog_id != expected_id) {
8824                         NL_SET_ERR_MSG(extack, "Active program does not match expected");
8825                         return -EEXIST;
8826                 }
8827         }
8828         if (fd >= 0) {
8829                 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
8830                         NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
8831                         return -EEXIST;
8832                 }
8833
8834                 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) {
8835                         NL_SET_ERR_MSG(extack, "XDP program already attached");
8836                         return -EBUSY;
8837                 }
8838
8839                 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
8840                                              bpf_op == ops->ndo_bpf);
8841                 if (IS_ERR(prog))
8842                         return PTR_ERR(prog);
8843
8844                 if (!offload && bpf_prog_is_dev_bound(prog->aux)) {
8845                         NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
8846                         bpf_prog_put(prog);
8847                         return -EINVAL;
8848                 }
8849
8850                 if (prog->expected_attach_type == BPF_XDP_DEVMAP) {
8851                         NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
8852                         bpf_prog_put(prog);
8853                         return -EINVAL;
8854                 }
8855
8856                 /* prog->aux->id may be 0 for orphaned device-bound progs */
8857                 if (prog->aux->id && prog->aux->id == prog_id) {
8858                         bpf_prog_put(prog);
8859                         return 0;
8860                 }
8861         } else {
8862                 if (!prog_id)
8863                         return 0;
8864                 prog = NULL;
8865         }
8866
8867         err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
8868         if (err < 0 && prog)
8869                 bpf_prog_put(prog);
8870
8871         return err;
8872 }
8873
8874 /**
8875  *      dev_new_index   -       allocate an ifindex
8876  *      @net: the applicable net namespace
8877  *
8878  *      Returns a suitable unique value for a new device interface
8879  *      number.  The caller must hold the rtnl semaphore or the
8880  *      dev_base_lock to be sure it remains unique.
8881  */
8882 static int dev_new_index(struct net *net)
8883 {
8884         int ifindex = net->ifindex;
8885
8886         for (;;) {
8887                 if (++ifindex <= 0)
8888                         ifindex = 1;
8889                 if (!__dev_get_by_index(net, ifindex))
8890                         return net->ifindex = ifindex;
8891         }
8892 }
8893
8894 /* Delayed registration/unregisteration */
8895 static LIST_HEAD(net_todo_list);
8896 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
8897
8898 static void net_set_todo(struct net_device *dev)
8899 {
8900         list_add_tail(&dev->todo_list, &net_todo_list);
8901         dev_net(dev)->dev_unreg_count++;
8902 }
8903
8904 static void rollback_registered_many(struct list_head *head)
8905 {
8906         struct net_device *dev, *tmp;
8907         LIST_HEAD(close_head);
8908
8909         BUG_ON(dev_boot_phase);
8910         ASSERT_RTNL();
8911
8912         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
8913                 /* Some devices call without registering
8914                  * for initialization unwind. Remove those
8915                  * devices and proceed with the remaining.
8916                  */
8917                 if (dev->reg_state == NETREG_UNINITIALIZED) {
8918                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8919                                  dev->name, dev);
8920
8921                         WARN_ON(1);
8922                         list_del(&dev->unreg_list);
8923                         continue;
8924                 }
8925                 dev->dismantle = true;
8926                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
8927         }
8928
8929         /* If device is running, close it first. */
8930         list_for_each_entry(dev, head, unreg_list)
8931                 list_add_tail(&dev->close_list, &close_head);
8932         dev_close_many(&close_head, true);
8933
8934         list_for_each_entry(dev, head, unreg_list) {
8935                 /* And unlink it from device chain. */
8936                 unlist_netdevice(dev);
8937
8938                 dev->reg_state = NETREG_UNREGISTERING;
8939         }
8940         flush_all_backlogs();
8941
8942         synchronize_net();
8943
8944         list_for_each_entry(dev, head, unreg_list) {
8945                 struct sk_buff *skb = NULL;
8946
8947                 /* Shutdown queueing discipline. */
8948                 dev_shutdown(dev);
8949
8950                 dev_xdp_uninstall(dev);
8951
8952                 /* Notify protocols, that we are about to destroy
8953                  * this device. They should clean all the things.
8954                  */
8955                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8956
8957                 if (!dev->rtnl_link_ops ||
8958                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8959                         skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
8960                                                      GFP_KERNEL, NULL, 0);
8961
8962                 /*
8963                  *      Flush the unicast and multicast chains
8964                  */
8965                 dev_uc_flush(dev);
8966                 dev_mc_flush(dev);
8967
8968                 netdev_name_node_alt_flush(dev);
8969                 netdev_name_node_free(dev->name_node);
8970
8971                 if (dev->netdev_ops->ndo_uninit)
8972                         dev->netdev_ops->ndo_uninit(dev);
8973
8974                 if (skb)
8975                         rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
8976
8977                 /* Notifier chain MUST detach us all upper devices. */
8978                 WARN_ON(netdev_has_any_upper_dev(dev));
8979                 WARN_ON(netdev_has_any_lower_dev(dev));
8980
8981                 /* Remove entries from kobject tree */
8982                 netdev_unregister_kobject(dev);
8983 #ifdef CONFIG_XPS
8984                 /* Remove XPS queueing entries */
8985                 netif_reset_xps_queues_gt(dev, 0);
8986 #endif
8987         }
8988
8989         synchronize_net();
8990
8991         list_for_each_entry(dev, head, unreg_list)
8992                 dev_put(dev);
8993 }
8994
8995 static void rollback_registered(struct net_device *dev)
8996 {
8997         LIST_HEAD(single);
8998
8999         list_add(&dev->unreg_list, &single);
9000         rollback_registered_many(&single);
9001         list_del(&single);
9002 }
9003
9004 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9005         struct net_device *upper, netdev_features_t features)
9006 {
9007         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9008         netdev_features_t feature;
9009         int feature_bit;
9010
9011         for_each_netdev_feature(upper_disables, feature_bit) {
9012                 feature = __NETIF_F_BIT(feature_bit);
9013                 if (!(upper->wanted_features & feature)
9014                     && (features & feature)) {
9015                         netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9016                                    &feature, upper->name);
9017                         features &= ~feature;
9018                 }
9019         }
9020
9021         return features;
9022 }
9023
9024 static void netdev_sync_lower_features(struct net_device *upper,
9025         struct net_device *lower, netdev_features_t features)
9026 {
9027         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9028         netdev_features_t feature;
9029         int feature_bit;
9030
9031         for_each_netdev_feature(upper_disables, feature_bit) {
9032                 feature = __NETIF_F_BIT(feature_bit);
9033                 if (!(features & feature) && (lower->features & feature)) {
9034                         netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9035                                    &feature, lower->name);
9036                         lower->wanted_features &= ~feature;
9037                         __netdev_update_features(lower);
9038
9039                         if (unlikely(lower->features & feature))
9040                                 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9041                                             &feature, lower->name);
9042                         else
9043                                 netdev_features_change(lower);
9044                 }
9045         }
9046 }
9047
9048 static netdev_features_t netdev_fix_features(struct net_device *dev,
9049         netdev_features_t features)
9050 {
9051         /* Fix illegal checksum combinations */
9052         if ((features & NETIF_F_HW_CSUM) &&
9053             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9054                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9055                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9056         }
9057
9058         /* TSO requires that SG is present as well. */
9059         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9060                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9061                 features &= ~NETIF_F_ALL_TSO;
9062         }
9063
9064         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9065                                         !(features & NETIF_F_IP_CSUM)) {
9066                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9067                 features &= ~NETIF_F_TSO;
9068                 features &= ~NETIF_F_TSO_ECN;
9069         }
9070
9071         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9072                                          !(features & NETIF_F_IPV6_CSUM)) {
9073                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9074                 features &= ~NETIF_F_TSO6;
9075         }
9076
9077         /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9078         if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9079                 features &= ~NETIF_F_TSO_MANGLEID;
9080
9081         /* TSO ECN requires that TSO is present as well. */
9082         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9083                 features &= ~NETIF_F_TSO_ECN;
9084
9085         /* Software GSO depends on SG. */
9086         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9087                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9088                 features &= ~NETIF_F_GSO;
9089         }
9090
9091         /* GSO partial features require GSO partial be set */
9092         if ((features & dev->gso_partial_features) &&
9093             !(features & NETIF_F_GSO_PARTIAL)) {
9094                 netdev_dbg(dev,
9095                            "Dropping partially supported GSO features since no GSO partial.\n");
9096                 features &= ~dev->gso_partial_features;
9097         }
9098
9099         if (!(features & NETIF_F_RXCSUM)) {
9100                 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9101                  * successfully merged by hardware must also have the
9102                  * checksum verified by hardware.  If the user does not
9103                  * want to enable RXCSUM, logically, we should disable GRO_HW.
9104                  */
9105                 if (features & NETIF_F_GRO_HW) {
9106                         netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9107                         features &= ~NETIF_F_GRO_HW;
9108                 }
9109         }
9110
9111         /* LRO/HW-GRO features cannot be combined with RX-FCS */
9112         if (features & NETIF_F_RXFCS) {
9113                 if (features & NETIF_F_LRO) {
9114                         netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9115                         features &= ~NETIF_F_LRO;
9116                 }
9117
9118                 if (features & NETIF_F_GRO_HW) {
9119                         netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9120                         features &= ~NETIF_F_GRO_HW;
9121                 }
9122         }
9123
9124         return features;
9125 }
9126
9127 int __netdev_update_features(struct net_device *dev)
9128 {
9129         struct net_device *upper, *lower;
9130         netdev_features_t features;
9131         struct list_head *iter;
9132         int err = -1;
9133
9134         ASSERT_RTNL();
9135
9136         features = netdev_get_wanted_features(dev);
9137
9138         if (dev->netdev_ops->ndo_fix_features)
9139                 features = dev->netdev_ops->ndo_fix_features(dev, features);
9140
9141         /* driver might be less strict about feature dependencies */
9142         features = netdev_fix_features(dev, features);
9143
9144         /* some features can't be enabled if they're off an an upper device */
9145         netdev_for_each_upper_dev_rcu(dev, upper, iter)
9146                 features = netdev_sync_upper_features(dev, upper, features);
9147
9148         if (dev->features == features)
9149                 goto sync_lower;
9150
9151         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9152                 &dev->features, &features);
9153
9154         if (dev->netdev_ops->ndo_set_features)
9155                 err = dev->netdev_ops->ndo_set_features(dev, features);
9156         else
9157                 err = 0;
9158
9159         if (unlikely(err < 0)) {
9160                 netdev_err(dev,
9161                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
9162                         err, &features, &dev->features);
9163                 /* return non-0 since some features might have changed and
9164                  * it's better to fire a spurious notification than miss it
9165                  */
9166                 return -1;
9167         }
9168
9169 sync_lower:
9170         /* some features must be disabled on lower devices when disabled
9171          * on an upper device (think: bonding master or bridge)
9172          */
9173         netdev_for_each_lower_dev(dev, lower, iter)
9174                 netdev_sync_lower_features(dev, lower, features);
9175
9176         if (!err) {
9177                 netdev_features_t diff = features ^ dev->features;
9178
9179                 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9180                         /* udp_tunnel_{get,drop}_rx_info both need
9181                          * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9182                          * device, or they won't do anything.
9183                          * Thus we need to update dev->features
9184                          * *before* calling udp_tunnel_get_rx_info,
9185                          * but *after* calling udp_tunnel_drop_rx_info.
9186                          */
9187                         if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9188                                 dev->features = features;
9189                                 udp_tunnel_get_rx_info(dev);
9190                         } else {
9191                                 udp_tunnel_drop_rx_info(dev);
9192                         }
9193                 }
9194
9195                 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9196                         if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9197                                 dev->features = features;
9198                                 err |= vlan_get_rx_ctag_filter_info(dev);
9199                         } else {
9200                                 vlan_drop_rx_ctag_filter_info(dev);
9201                         }
9202                 }
9203
9204                 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9205                         if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9206                                 dev->features = features;
9207                                 err |= vlan_get_rx_stag_filter_info(dev);
9208                         } else {
9209                                 vlan_drop_rx_stag_filter_info(dev);
9210                         }
9211                 }
9212
9213                 dev->features = features;
9214         }
9215
9216         return err < 0 ? 0 : 1;
9217 }
9218
9219 /**
9220  *      netdev_update_features - recalculate device features
9221  *      @dev: the device to check
9222  *
9223  *      Recalculate dev->features set and send notifications if it
9224  *      has changed. Should be called after driver or hardware dependent
9225  *      conditions might have changed that influence the features.
9226  */
9227 void netdev_update_features(struct net_device *dev)
9228 {
9229         if (__netdev_update_features(dev))
9230                 netdev_features_change(dev);
9231 }
9232 EXPORT_SYMBOL(netdev_update_features);
9233
9234 /**
9235  *      netdev_change_features - recalculate device features
9236  *      @dev: the device to check
9237  *
9238  *      Recalculate dev->features set and send notifications even
9239  *      if they have not changed. Should be called instead of
9240  *      netdev_update_features() if also dev->vlan_features might
9241  *      have changed to allow the changes to be propagated to stacked
9242  *      VLAN devices.
9243  */
9244 void netdev_change_features(struct net_device *dev)
9245 {
9246         __netdev_update_features(dev);
9247         netdev_features_change(dev);
9248 }
9249 EXPORT_SYMBOL(netdev_change_features);
9250
9251 /**
9252  *      netif_stacked_transfer_operstate -      transfer operstate
9253  *      @rootdev: the root or lower level device to transfer state from
9254  *      @dev: the device to transfer operstate to
9255  *
9256  *      Transfer operational state from root to device. This is normally
9257  *      called when a stacking relationship exists between the root
9258  *      device and the device(a leaf device).
9259  */
9260 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9261                                         struct net_device *dev)
9262 {
9263         if (rootdev->operstate == IF_OPER_DORMANT)
9264                 netif_dormant_on(dev);
9265         else
9266                 netif_dormant_off(dev);
9267
9268         if (rootdev->operstate == IF_OPER_TESTING)
9269                 netif_testing_on(dev);
9270         else
9271                 netif_testing_off(dev);
9272
9273         if (netif_carrier_ok(rootdev))
9274                 netif_carrier_on(dev);
9275         else
9276                 netif_carrier_off(dev);
9277 }
9278 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9279
9280 static int netif_alloc_rx_queues(struct net_device *dev)
9281 {
9282         unsigned int i, count = dev->num_rx_queues;
9283         struct netdev_rx_queue *rx;
9284         size_t sz = count * sizeof(*rx);
9285         int err = 0;
9286
9287         BUG_ON(count < 1);
9288
9289         rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9290         if (!rx)
9291                 return -ENOMEM;
9292
9293         dev->_rx = rx;
9294
9295         for (i = 0; i < count; i++) {
9296                 rx[i].dev = dev;
9297
9298                 /* XDP RX-queue setup */
9299                 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
9300                 if (err < 0)
9301                         goto err_rxq_info;
9302         }
9303         return 0;
9304
9305 err_rxq_info:
9306         /* Rollback successful reg's and free other resources */
9307         while (i--)
9308                 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9309         kvfree(dev->_rx);
9310         dev->_rx = NULL;
9311         return err;
9312 }
9313
9314 static void netif_free_rx_queues(struct net_device *dev)
9315 {
9316         unsigned int i, count = dev->num_rx_queues;
9317
9318         /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9319         if (!dev->_rx)
9320                 return;
9321
9322         for (i = 0; i < count; i++)
9323                 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9324
9325         kvfree(dev->_rx);
9326 }
9327
9328 static void netdev_init_one_queue(struct net_device *dev,
9329                                   struct netdev_queue *queue, void *_unused)
9330 {
9331         /* Initialize queue lock */
9332         spin_lock_init(&queue->_xmit_lock);
9333         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
9334         queue->xmit_lock_owner = -1;
9335         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
9336         queue->dev = dev;
9337 #ifdef CONFIG_BQL
9338         dql_init(&queue->dql, HZ);
9339 #endif
9340 }
9341
9342 static void netif_free_tx_queues(struct net_device *dev)
9343 {
9344         kvfree(dev->_tx);
9345 }
9346
9347 static int netif_alloc_netdev_queues(struct net_device *dev)
9348 {
9349         unsigned int count = dev->num_tx_queues;
9350         struct netdev_queue *tx;
9351         size_t sz = count * sizeof(*tx);
9352
9353         if (count < 1 || count > 0xffff)
9354                 return -EINVAL;
9355
9356         tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9357         if (!tx)
9358                 return -ENOMEM;
9359
9360         dev->_tx = tx;
9361
9362         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
9363         spin_lock_init(&dev->tx_global_lock);
9364
9365         return 0;
9366 }
9367
9368 void netif_tx_stop_all_queues(struct net_device *dev)
9369 {
9370         unsigned int i;
9371
9372         for (i = 0; i < dev->num_tx_queues; i++) {
9373                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
9374
9375                 netif_tx_stop_queue(txq);
9376         }
9377 }
9378 EXPORT_SYMBOL(netif_tx_stop_all_queues);
9379
9380 void netdev_update_lockdep_key(struct net_device *dev)
9381 {
9382         lockdep_unregister_key(&dev->addr_list_lock_key);
9383         lockdep_register_key(&dev->addr_list_lock_key);
9384
9385         lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
9386 }
9387 EXPORT_SYMBOL(netdev_update_lockdep_key);
9388
9389 /**
9390  *      register_netdevice      - register a network device
9391  *      @dev: device to register
9392  *
9393  *      Take a completed network device structure and add it to the kernel
9394  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9395  *      chain. 0 is returned on success. A negative errno code is returned
9396  *      on a failure to set up the device, or if the name is a duplicate.
9397  *
9398  *      Callers must hold the rtnl semaphore. You may want
9399  *      register_netdev() instead of this.
9400  *
9401  *      BUGS:
9402  *      The locking appears insufficient to guarantee two parallel registers
9403  *      will not get the same name.
9404  */
9405
9406 int register_netdevice(struct net_device *dev)
9407 {
9408         int ret;
9409         struct net *net = dev_net(dev);
9410
9411         BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
9412                      NETDEV_FEATURE_COUNT);
9413         BUG_ON(dev_boot_phase);
9414         ASSERT_RTNL();
9415
9416         might_sleep();
9417
9418         /* When net_device's are persistent, this will be fatal. */
9419         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
9420         BUG_ON(!net);
9421
9422         ret = ethtool_check_ops(dev->ethtool_ops);
9423         if (ret)
9424                 return ret;
9425
9426         spin_lock_init(&dev->addr_list_lock);
9427         lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
9428
9429         ret = dev_get_valid_name(net, dev, dev->name);
9430         if (ret < 0)
9431                 goto out;
9432
9433         ret = -ENOMEM;
9434         dev->name_node = netdev_name_node_head_alloc(dev);
9435         if (!dev->name_node)
9436                 goto out;
9437
9438         /* Init, if this function is available */
9439         if (dev->netdev_ops->ndo_init) {
9440                 ret = dev->netdev_ops->ndo_init(dev);
9441                 if (ret) {
9442                         if (ret > 0)
9443                                 ret = -EIO;
9444                         goto err_free_name;
9445                 }
9446         }
9447
9448         if (((dev->hw_features | dev->features) &
9449              NETIF_F_HW_VLAN_CTAG_FILTER) &&
9450             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
9451              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
9452                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
9453                 ret = -EINVAL;
9454                 goto err_uninit;
9455         }
9456
9457         ret = -EBUSY;
9458         if (!dev->ifindex)
9459                 dev->ifindex = dev_new_index(net);
9460         else if (__dev_get_by_index(net, dev->ifindex))
9461                 goto err_uninit;
9462
9463         /* Transfer changeable features to wanted_features and enable
9464          * software offloads (GSO and GRO).
9465          */
9466         dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
9467         dev->features |= NETIF_F_SOFT_FEATURES;
9468
9469         if (dev->netdev_ops->ndo_udp_tunnel_add) {
9470                 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9471                 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9472         }
9473
9474         dev->wanted_features = dev->features & dev->hw_features;
9475
9476         if (!(dev->flags & IFF_LOOPBACK))
9477                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
9478
9479         /* If IPv4 TCP segmentation offload is supported we should also
9480          * allow the device to enable segmenting the frame with the option
9481          * of ignoring a static IP ID value.  This doesn't enable the
9482          * feature itself but allows the user to enable it later.
9483          */
9484         if (dev->hw_features & NETIF_F_TSO)
9485                 dev->hw_features |= NETIF_F_TSO_MANGLEID;
9486         if (dev->vlan_features & NETIF_F_TSO)
9487                 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
9488         if (dev->mpls_features & NETIF_F_TSO)
9489                 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
9490         if (dev->hw_enc_features & NETIF_F_TSO)
9491                 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9492
9493         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
9494          */
9495         dev->vlan_features |= NETIF_F_HIGHDMA;
9496
9497         /* Make NETIF_F_SG inheritable to tunnel devices.
9498          */
9499         dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
9500
9501         /* Make NETIF_F_SG inheritable to MPLS.
9502          */
9503         dev->mpls_features |= NETIF_F_SG;
9504
9505         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
9506         ret = notifier_to_errno(ret);
9507         if (ret)
9508                 goto err_uninit;
9509
9510         ret = netdev_register_kobject(dev);
9511         if (ret) {
9512                 dev->reg_state = NETREG_UNREGISTERED;
9513                 goto err_uninit;
9514         }
9515         dev->reg_state = NETREG_REGISTERED;
9516
9517         __netdev_update_features(dev);
9518
9519         /*
9520          *      Default initial state at registry is that the
9521          *      device is present.
9522          */
9523
9524         set_bit(__LINK_STATE_PRESENT, &dev->state);
9525
9526         linkwatch_init_dev(dev);
9527
9528         dev_init_scheduler(dev);
9529         dev_hold(dev);
9530         list_netdevice(dev);
9531         add_device_randomness(dev->dev_addr, dev->addr_len);
9532
9533         /* If the device has permanent device address, driver should
9534          * set dev_addr and also addr_assign_type should be set to
9535          * NET_ADDR_PERM (default value).
9536          */
9537         if (dev->addr_assign_type == NET_ADDR_PERM)
9538                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9539
9540         /* Notify protocols, that a new device appeared. */
9541         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
9542         ret = notifier_to_errno(ret);
9543         if (ret) {
9544                 rollback_registered(dev);
9545                 rcu_barrier();
9546
9547                 dev->reg_state = NETREG_UNREGISTERED;
9548         }
9549         /*
9550          *      Prevent userspace races by waiting until the network
9551          *      device is fully setup before sending notifications.
9552          */
9553         if (!dev->rtnl_link_ops ||
9554             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
9555                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
9556
9557 out:
9558         return ret;
9559
9560 err_uninit:
9561         if (dev->netdev_ops->ndo_uninit)
9562                 dev->netdev_ops->ndo_uninit(dev);
9563         if (dev->priv_destructor)
9564                 dev->priv_destructor(dev);
9565 err_free_name:
9566         netdev_name_node_free(dev->name_node);
9567         goto out;
9568 }
9569 EXPORT_SYMBOL(register_netdevice);
9570
9571 /**
9572  *      init_dummy_netdev       - init a dummy network device for NAPI
9573  *      @dev: device to init
9574  *
9575  *      This takes a network device structure and initialize the minimum
9576  *      amount of fields so it can be used to schedule NAPI polls without
9577  *      registering a full blown interface. This is to be used by drivers
9578  *      that need to tie several hardware interfaces to a single NAPI
9579  *      poll scheduler due to HW limitations.
9580  */
9581 int init_dummy_netdev(struct net_device *dev)
9582 {
9583         /* Clear everything. Note we don't initialize spinlocks
9584          * are they aren't supposed to be taken by any of the
9585          * NAPI code and this dummy netdev is supposed to be
9586          * only ever used for NAPI polls
9587          */
9588         memset(dev, 0, sizeof(struct net_device));
9589
9590         /* make sure we BUG if trying to hit standard
9591          * register/unregister code path
9592          */
9593         dev->reg_state = NETREG_DUMMY;
9594
9595         /* NAPI wants this */
9596         INIT_LIST_HEAD(&dev->napi_list);
9597
9598         /* a dummy interface is started by default */
9599         set_bit(__LINK_STATE_PRESENT, &dev->state);
9600         set_bit(__LINK_STATE_START, &dev->state);
9601
9602         /* napi_busy_loop stats accounting wants this */
9603         dev_net_set(dev, &init_net);
9604
9605         /* Note : We dont allocate pcpu_refcnt for dummy devices,
9606          * because users of this 'device' dont need to change
9607          * its refcount.
9608          */
9609
9610         return 0;
9611 }
9612 EXPORT_SYMBOL_GPL(init_dummy_netdev);
9613
9614
9615 /**
9616  *      register_netdev - register a network device
9617  *      @dev: device to register
9618  *
9619  *      Take a completed network device structure and add it to the kernel
9620  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9621  *      chain. 0 is returned on success. A negative errno code is returned
9622  *      on a failure to set up the device, or if the name is a duplicate.
9623  *
9624  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
9625  *      and expands the device name if you passed a format string to
9626  *      alloc_netdev.
9627  */
9628 int register_netdev(struct net_device *dev)
9629 {
9630         int err;
9631
9632         if (rtnl_lock_killable())
9633                 return -EINTR;
9634         err = register_netdevice(dev);
9635         rtnl_unlock();
9636         return err;
9637 }
9638 EXPORT_SYMBOL(register_netdev);
9639
9640 int netdev_refcnt_read(const struct net_device *dev)
9641 {
9642         int i, refcnt = 0;
9643
9644         for_each_possible_cpu(i)
9645                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
9646         return refcnt;
9647 }
9648 EXPORT_SYMBOL(netdev_refcnt_read);
9649
9650 /**
9651  * netdev_wait_allrefs - wait until all references are gone.
9652  * @dev: target net_device
9653  *
9654  * This is called when unregistering network devices.
9655  *
9656  * Any protocol or device that holds a reference should register
9657  * for netdevice notification, and cleanup and put back the
9658  * reference if they receive an UNREGISTER event.
9659  * We can get stuck here if buggy protocols don't correctly
9660  * call dev_put.
9661  */
9662 static void netdev_wait_allrefs(struct net_device *dev)
9663 {
9664         unsigned long rebroadcast_time, warning_time;
9665         int refcnt;
9666
9667         linkwatch_forget_dev(dev);
9668
9669         rebroadcast_time = warning_time = jiffies;
9670         refcnt = netdev_refcnt_read(dev);
9671
9672         while (refcnt != 0) {
9673                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
9674                         rtnl_lock();
9675
9676                         /* Rebroadcast unregister notification */
9677                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9678
9679                         __rtnl_unlock();
9680                         rcu_barrier();
9681                         rtnl_lock();
9682
9683                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
9684                                      &dev->state)) {
9685                                 /* We must not have linkwatch events
9686                                  * pending on unregister. If this
9687                                  * happens, we simply run the queue
9688                                  * unscheduled, resulting in a noop
9689                                  * for this device.
9690                                  */
9691                                 linkwatch_run_queue();
9692                         }
9693
9694                         __rtnl_unlock();
9695
9696                         rebroadcast_time = jiffies;
9697                 }
9698
9699                 msleep(250);
9700
9701                 refcnt = netdev_refcnt_read(dev);
9702
9703                 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
9704                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9705                                  dev->name, refcnt);
9706                         warning_time = jiffies;
9707                 }
9708         }
9709 }
9710
9711 /* The sequence is:
9712  *
9713  *      rtnl_lock();
9714  *      ...
9715  *      register_netdevice(x1);
9716  *      register_netdevice(x2);
9717  *      ...
9718  *      unregister_netdevice(y1);
9719  *      unregister_netdevice(y2);
9720  *      ...
9721  *      rtnl_unlock();
9722  *      free_netdev(y1);
9723  *      free_netdev(y2);
9724  *
9725  * We are invoked by rtnl_unlock().
9726  * This allows us to deal with problems:
9727  * 1) We can delete sysfs objects which invoke hotplug
9728  *    without deadlocking with linkwatch via keventd.
9729  * 2) Since we run with the RTNL semaphore not held, we can sleep
9730  *    safely in order to wait for the netdev refcnt to drop to zero.
9731  *
9732  * We must not return until all unregister events added during
9733  * the interval the lock was held have been completed.
9734  */
9735 void netdev_run_todo(void)
9736 {
9737         struct list_head list;
9738
9739         /* Snapshot list, allow later requests */
9740         list_replace_init(&net_todo_list, &list);
9741
9742         __rtnl_unlock();
9743
9744
9745         /* Wait for rcu callbacks to finish before next phase */
9746         if (!list_empty(&list))
9747                 rcu_barrier();
9748
9749         while (!list_empty(&list)) {
9750                 struct net_device *dev
9751                         = list_first_entry(&list, struct net_device, todo_list);
9752                 list_del(&dev->todo_list);
9753
9754                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
9755                         pr_err("network todo '%s' but state %d\n",
9756                                dev->name, dev->reg_state);
9757                         dump_stack();
9758                         continue;
9759                 }
9760
9761                 dev->reg_state = NETREG_UNREGISTERED;
9762
9763                 netdev_wait_allrefs(dev);
9764
9765                 /* paranoia */
9766                 BUG_ON(netdev_refcnt_read(dev));
9767                 BUG_ON(!list_empty(&dev->ptype_all));
9768                 BUG_ON(!list_empty(&dev->ptype_specific));
9769                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
9770                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
9771 #if IS_ENABLED(CONFIG_DECNET)
9772                 WARN_ON(dev->dn_ptr);
9773 #endif
9774                 if (dev->priv_destructor)
9775                         dev->priv_destructor(dev);
9776                 if (dev->needs_free_netdev)
9777                         free_netdev(dev);
9778
9779                 /* Report a network device has been unregistered */
9780                 rtnl_lock();
9781                 dev_net(dev)->dev_unreg_count--;
9782                 __rtnl_unlock();
9783                 wake_up(&netdev_unregistering_wq);
9784
9785                 /* Free network device */
9786                 kobject_put(&dev->dev.kobj);
9787         }
9788 }
9789
9790 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9791  * all the same fields in the same order as net_device_stats, with only
9792  * the type differing, but rtnl_link_stats64 may have additional fields
9793  * at the end for newer counters.
9794  */
9795 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
9796                              const struct net_device_stats *netdev_stats)
9797 {
9798 #if BITS_PER_LONG == 64
9799         BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9800         memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9801         /* zero out counters that only exist in rtnl_link_stats64 */
9802         memset((char *)stats64 + sizeof(*netdev_stats), 0,
9803                sizeof(*stats64) - sizeof(*netdev_stats));
9804 #else
9805         size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
9806         const unsigned long *src = (const unsigned long *)netdev_stats;
9807         u64 *dst = (u64 *)stats64;
9808
9809         BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
9810         for (i = 0; i < n; i++)
9811                 dst[i] = src[i];
9812         /* zero out counters that only exist in rtnl_link_stats64 */
9813         memset((char *)stats64 + n * sizeof(u64), 0,
9814                sizeof(*stats64) - n * sizeof(u64));
9815 #endif
9816 }
9817 EXPORT_SYMBOL(netdev_stats_to_stats64);
9818
9819 /**
9820  *      dev_get_stats   - get network device statistics
9821  *      @dev: device to get statistics from
9822  *      @storage: place to store stats
9823  *
9824  *      Get network statistics from device. Return @storage.
9825  *      The device driver may provide its own method by setting
9826  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9827  *      otherwise the internal statistics structure is used.
9828  */
9829 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
9830                                         struct rtnl_link_stats64 *storage)
9831 {
9832         const struct net_device_ops *ops = dev->netdev_ops;
9833
9834         if (ops->ndo_get_stats64) {
9835                 memset(storage, 0, sizeof(*storage));
9836                 ops->ndo_get_stats64(dev, storage);
9837         } else if (ops->ndo_get_stats) {
9838                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
9839         } else {
9840                 netdev_stats_to_stats64(storage, &dev->stats);
9841         }
9842         storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
9843         storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
9844         storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
9845         return storage;
9846 }
9847 EXPORT_SYMBOL(dev_get_stats);
9848
9849 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
9850 {
9851         struct netdev_queue *queue = dev_ingress_queue(dev);
9852
9853 #ifdef CONFIG_NET_CLS_ACT
9854         if (queue)
9855                 return queue;
9856         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
9857         if (!queue)
9858                 return NULL;
9859         netdev_init_one_queue(dev, queue, NULL);
9860         RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
9861         queue->qdisc_sleeping = &noop_qdisc;
9862         rcu_assign_pointer(dev->ingress_queue, queue);
9863 #endif
9864         return queue;
9865 }
9866
9867 static const struct ethtool_ops default_ethtool_ops;
9868
9869 void netdev_set_default_ethtool_ops(struct net_device *dev,
9870                                     const struct ethtool_ops *ops)
9871 {
9872         if (dev->ethtool_ops == &default_ethtool_ops)
9873                 dev->ethtool_ops = ops;
9874 }
9875 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
9876
9877 void netdev_freemem(struct net_device *dev)
9878 {
9879         char *addr = (char *)dev - dev->padded;
9880
9881         kvfree(addr);
9882 }
9883
9884 /**
9885  * alloc_netdev_mqs - allocate network device
9886  * @sizeof_priv: size of private data to allocate space for
9887  * @name: device name format string
9888  * @name_assign_type: origin of device name
9889  * @setup: callback to initialize device
9890  * @txqs: the number of TX subqueues to allocate
9891  * @rxqs: the number of RX subqueues to allocate
9892  *
9893  * Allocates a struct net_device with private data area for driver use
9894  * and performs basic initialization.  Also allocates subqueue structs
9895  * for each queue on the device.
9896  */
9897 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
9898                 unsigned char name_assign_type,
9899                 void (*setup)(struct net_device *),
9900                 unsigned int txqs, unsigned int rxqs)
9901 {
9902         struct net_device *dev;
9903         unsigned int alloc_size;
9904         struct net_device *p;
9905
9906         BUG_ON(strlen(name) >= sizeof(dev->name));
9907
9908         if (txqs < 1) {
9909                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
9910                 return NULL;
9911         }
9912
9913         if (rxqs < 1) {
9914                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
9915                 return NULL;
9916         }
9917
9918         alloc_size = sizeof(struct net_device);
9919         if (sizeof_priv) {
9920                 /* ensure 32-byte alignment of private area */
9921                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
9922                 alloc_size += sizeof_priv;
9923         }
9924         /* ensure 32-byte alignment of whole construct */
9925         alloc_size += NETDEV_ALIGN - 1;
9926
9927         p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9928         if (!p)
9929                 return NULL;
9930
9931         dev = PTR_ALIGN(p, NETDEV_ALIGN);
9932         dev->padded = (char *)dev - (char *)p;
9933
9934         dev->pcpu_refcnt = alloc_percpu(int);
9935         if (!dev->pcpu_refcnt)
9936                 goto free_dev;
9937
9938         if (dev_addr_init(dev))
9939                 goto free_pcpu;
9940
9941         dev_mc_init(dev);
9942         dev_uc_init(dev);
9943
9944         dev_net_set(dev, &init_net);
9945
9946         lockdep_register_key(&dev->addr_list_lock_key);
9947
9948         dev->gso_max_size = GSO_MAX_SIZE;
9949         dev->gso_max_segs = GSO_MAX_SEGS;
9950         dev->upper_level = 1;
9951         dev->lower_level = 1;
9952
9953         INIT_LIST_HEAD(&dev->napi_list);
9954         INIT_LIST_HEAD(&dev->unreg_list);
9955         INIT_LIST_HEAD(&dev->close_list);
9956         INIT_LIST_HEAD(&dev->link_watch_list);
9957         INIT_LIST_HEAD(&dev->adj_list.upper);
9958         INIT_LIST_HEAD(&dev->adj_list.lower);
9959         INIT_LIST_HEAD(&dev->ptype_all);
9960         INIT_LIST_HEAD(&dev->ptype_specific);
9961         INIT_LIST_HEAD(&dev->net_notifier_list);
9962 #ifdef CONFIG_NET_SCHED
9963         hash_init(dev->qdisc_hash);
9964 #endif
9965         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
9966         setup(dev);
9967
9968         if (!dev->tx_queue_len) {
9969                 dev->priv_flags |= IFF_NO_QUEUE;
9970                 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
9971         }
9972
9973         dev->num_tx_queues = txqs;
9974         dev->real_num_tx_queues = txqs;
9975         if (netif_alloc_netdev_queues(dev))
9976                 goto free_all;
9977
9978         dev->num_rx_queues = rxqs;
9979         dev->real_num_rx_queues = rxqs;
9980         if (netif_alloc_rx_queues(dev))
9981                 goto free_all;
9982
9983         strcpy(dev->name, name);
9984         dev->name_assign_type = name_assign_type;
9985         dev->group = INIT_NETDEV_GROUP;
9986         if (!dev->ethtool_ops)
9987                 dev->ethtool_ops = &default_ethtool_ops;
9988
9989         nf_hook_ingress_init(dev);
9990
9991         return dev;
9992
9993 free_all:
9994         free_netdev(dev);
9995         return NULL;
9996
9997 free_pcpu:
9998         free_percpu(dev->pcpu_refcnt);
9999 free_dev:
10000         netdev_freemem(dev);
10001         return NULL;
10002 }
10003 EXPORT_SYMBOL(alloc_netdev_mqs);
10004
10005 /**
10006  * free_netdev - free network device
10007  * @dev: device
10008  *
10009  * This function does the last stage of destroying an allocated device
10010  * interface. The reference to the device object is released. If this
10011  * is the last reference then it will be freed.Must be called in process
10012  * context.
10013  */
10014 void free_netdev(struct net_device *dev)
10015 {
10016         struct napi_struct *p, *n;
10017
10018         might_sleep();
10019         netif_free_tx_queues(dev);
10020         netif_free_rx_queues(dev);
10021
10022         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10023
10024         /* Flush device addresses */
10025         dev_addr_flush(dev);
10026
10027         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10028                 netif_napi_del(p);
10029
10030         free_percpu(dev->pcpu_refcnt);
10031         dev->pcpu_refcnt = NULL;
10032         free_percpu(dev->xdp_bulkq);
10033         dev->xdp_bulkq = NULL;
10034
10035         lockdep_unregister_key(&dev->addr_list_lock_key);
10036
10037         /*  Compatibility with error handling in drivers */
10038         if (dev->reg_state == NETREG_UNINITIALIZED) {
10039                 netdev_freemem(dev);
10040                 return;
10041         }
10042
10043         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10044         dev->reg_state = NETREG_RELEASED;
10045
10046         /* will free via device release */
10047         put_device(&dev->dev);
10048 }
10049 EXPORT_SYMBOL(free_netdev);
10050
10051 /**
10052  *      synchronize_net -  Synchronize with packet receive processing
10053  *
10054  *      Wait for packets currently being received to be done.
10055  *      Does not block later packets from starting.
10056  */
10057 void synchronize_net(void)
10058 {
10059         might_sleep();
10060         if (rtnl_is_locked())
10061                 synchronize_rcu_expedited();
10062         else
10063                 synchronize_rcu();
10064 }
10065 EXPORT_SYMBOL(synchronize_net);
10066
10067 /**
10068  *      unregister_netdevice_queue - remove device from the kernel
10069  *      @dev: device
10070  *      @head: list
10071  *
10072  *      This function shuts down a device interface and removes it
10073  *      from the kernel tables.
10074  *      If head not NULL, device is queued to be unregistered later.
10075  *
10076  *      Callers must hold the rtnl semaphore.  You may want
10077  *      unregister_netdev() instead of this.
10078  */
10079
10080 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10081 {
10082         ASSERT_RTNL();
10083
10084         if (head) {
10085                 list_move_tail(&dev->unreg_list, head);
10086         } else {
10087                 rollback_registered(dev);
10088                 /* Finish processing unregister after unlock */
10089                 net_set_todo(dev);
10090         }
10091 }
10092 EXPORT_SYMBOL(unregister_netdevice_queue);
10093
10094 /**
10095  *      unregister_netdevice_many - unregister many devices
10096  *      @head: list of devices
10097  *
10098  *  Note: As most callers use a stack allocated list_head,
10099  *  we force a list_del() to make sure stack wont be corrupted later.
10100  */
10101 void unregister_netdevice_many(struct list_head *head)
10102 {
10103         struct net_device *dev;
10104
10105         if (!list_empty(head)) {
10106                 rollback_registered_many(head);
10107                 list_for_each_entry(dev, head, unreg_list)
10108                         net_set_todo(dev);
10109                 list_del(head);
10110         }
10111 }
10112 EXPORT_SYMBOL(unregister_netdevice_many);
10113
10114 /**
10115  *      unregister_netdev - remove device from the kernel
10116  *      @dev: device
10117  *
10118  *      This function shuts down a device interface and removes it
10119  *      from the kernel tables.
10120  *
10121  *      This is just a wrapper for unregister_netdevice that takes
10122  *      the rtnl semaphore.  In general you want to use this and not
10123  *      unregister_netdevice.
10124  */
10125 void unregister_netdev(struct net_device *dev)
10126 {
10127         rtnl_lock();
10128         unregister_netdevice(dev);
10129         rtnl_unlock();
10130 }
10131 EXPORT_SYMBOL(unregister_netdev);
10132
10133 /**
10134  *      dev_change_net_namespace - move device to different nethost namespace
10135  *      @dev: device
10136  *      @net: network namespace
10137  *      @pat: If not NULL name pattern to try if the current device name
10138  *            is already taken in the destination network namespace.
10139  *
10140  *      This function shuts down a device interface and moves it
10141  *      to a new network namespace. On success 0 is returned, on
10142  *      a failure a netagive errno code is returned.
10143  *
10144  *      Callers must hold the rtnl semaphore.
10145  */
10146
10147 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
10148 {
10149         struct net *net_old = dev_net(dev);
10150         int err, new_nsid, new_ifindex;
10151
10152         ASSERT_RTNL();
10153
10154         /* Don't allow namespace local devices to be moved. */
10155         err = -EINVAL;
10156         if (dev->features & NETIF_F_NETNS_LOCAL)
10157                 goto out;
10158
10159         /* Ensure the device has been registrered */
10160         if (dev->reg_state != NETREG_REGISTERED)
10161                 goto out;
10162
10163         /* Get out if there is nothing todo */
10164         err = 0;
10165         if (net_eq(net_old, net))
10166                 goto out;
10167
10168         /* Pick the destination device name, and ensure
10169          * we can use it in the destination network namespace.
10170          */
10171         err = -EEXIST;
10172         if (__dev_get_by_name(net, dev->name)) {
10173                 /* We get here if we can't use the current device name */
10174                 if (!pat)
10175                         goto out;
10176                 err = dev_get_valid_name(net, dev, pat);
10177                 if (err < 0)
10178                         goto out;
10179         }
10180
10181         /*
10182          * And now a mini version of register_netdevice unregister_netdevice.
10183          */
10184
10185         /* If device is running close it first. */
10186         dev_close(dev);
10187
10188         /* And unlink it from device chain */
10189         unlist_netdevice(dev);
10190
10191         synchronize_net();
10192
10193         /* Shutdown queueing discipline. */
10194         dev_shutdown(dev);
10195
10196         /* Notify protocols, that we are about to destroy
10197          * this device. They should clean all the things.
10198          *
10199          * Note that dev->reg_state stays at NETREG_REGISTERED.
10200          * This is wanted because this way 8021q and macvlan know
10201          * the device is just moving and can keep their slaves up.
10202          */
10203         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10204         rcu_barrier();
10205
10206         new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
10207         /* If there is an ifindex conflict assign a new one */
10208         if (__dev_get_by_index(net, dev->ifindex))
10209                 new_ifindex = dev_new_index(net);
10210         else
10211                 new_ifindex = dev->ifindex;
10212
10213         rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
10214                             new_ifindex);
10215
10216         /*
10217          *      Flush the unicast and multicast chains
10218          */
10219         dev_uc_flush(dev);
10220         dev_mc_flush(dev);
10221
10222         /* Send a netdev-removed uevent to the old namespace */
10223         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
10224         netdev_adjacent_del_links(dev);
10225
10226         /* Move per-net netdevice notifiers that are following the netdevice */
10227         move_netdevice_notifiers_dev_net(dev, net);
10228
10229         /* Actually switch the network namespace */
10230         dev_net_set(dev, net);
10231         dev->ifindex = new_ifindex;
10232
10233         /* Send a netdev-add uevent to the new namespace */
10234         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
10235         netdev_adjacent_add_links(dev);
10236
10237         /* Fixup kobjects */
10238         err = device_rename(&dev->dev, dev->name);
10239         WARN_ON(err);
10240
10241         /* Adapt owner in case owning user namespace of target network
10242          * namespace is different from the original one.
10243          */
10244         err = netdev_change_owner(dev, net_old, net);
10245         WARN_ON(err);
10246
10247         /* Add the device back in the hashes */
10248         list_netdevice(dev);
10249
10250         /* Notify protocols, that a new device appeared. */
10251         call_netdevice_notifiers(NETDEV_REGISTER, dev);
10252
10253         /*
10254          *      Prevent userspace races by waiting until the network
10255          *      device is fully setup before sending notifications.
10256          */
10257         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10258
10259         synchronize_net();
10260         err = 0;
10261 out:
10262         return err;
10263 }
10264 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
10265
10266 static int dev_cpu_dead(unsigned int oldcpu)
10267 {
10268         struct sk_buff **list_skb;
10269         struct sk_buff *skb;
10270         unsigned int cpu;
10271         struct softnet_data *sd, *oldsd, *remsd = NULL;
10272
10273         local_irq_disable();
10274         cpu = smp_processor_id();
10275         sd = &per_cpu(softnet_data, cpu);
10276         oldsd = &per_cpu(softnet_data, oldcpu);
10277
10278         /* Find end of our completion_queue. */
10279         list_skb = &sd->completion_queue;
10280         while (*list_skb)
10281                 list_skb = &(*list_skb)->next;
10282         /* Append completion queue from offline CPU. */
10283         *list_skb = oldsd->completion_queue;
10284         oldsd->completion_queue = NULL;
10285
10286         /* Append output queue from offline CPU. */
10287         if (oldsd->output_queue) {
10288                 *sd->output_queue_tailp = oldsd->output_queue;
10289                 sd->output_queue_tailp = oldsd->output_queue_tailp;
10290                 oldsd->output_queue = NULL;
10291                 oldsd->output_queue_tailp = &oldsd->output_queue;
10292         }
10293         /* Append NAPI poll list from offline CPU, with one exception :
10294          * process_backlog() must be called by cpu owning percpu backlog.
10295          * We properly handle process_queue & input_pkt_queue later.
10296          */
10297         while (!list_empty(&oldsd->poll_list)) {
10298                 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
10299                                                             struct napi_struct,
10300                                                             poll_list);
10301
10302                 list_del_init(&napi->poll_list);
10303                 if (napi->poll == process_backlog)
10304                         napi->state = 0;
10305                 else
10306                         ____napi_schedule(sd, napi);
10307         }
10308
10309         raise_softirq_irqoff(NET_TX_SOFTIRQ);
10310         local_irq_enable();
10311
10312 #ifdef CONFIG_RPS
10313         remsd = oldsd->rps_ipi_list;
10314         oldsd->rps_ipi_list = NULL;
10315 #endif
10316         /* send out pending IPI's on offline CPU */
10317         net_rps_send_ipi(remsd);
10318
10319         /* Process offline CPU's input_pkt_queue */
10320         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
10321                 netif_rx_ni(skb);
10322                 input_queue_head_incr(oldsd);
10323         }
10324         while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
10325                 netif_rx_ni(skb);
10326                 input_queue_head_incr(oldsd);
10327         }
10328
10329         return 0;
10330 }
10331
10332 /**
10333  *      netdev_increment_features - increment feature set by one
10334  *      @all: current feature set
10335  *      @one: new feature set
10336  *      @mask: mask feature set
10337  *
10338  *      Computes a new feature set after adding a device with feature set
10339  *      @one to the master device with current feature set @all.  Will not
10340  *      enable anything that is off in @mask. Returns the new feature set.
10341  */
10342 netdev_features_t netdev_increment_features(netdev_features_t all,
10343         netdev_features_t one, netdev_features_t mask)
10344 {
10345         if (mask & NETIF_F_HW_CSUM)
10346                 mask |= NETIF_F_CSUM_MASK;
10347         mask |= NETIF_F_VLAN_CHALLENGED;
10348
10349         all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
10350         all &= one | ~NETIF_F_ALL_FOR_ALL;
10351
10352         /* If one device supports hw checksumming, set for all. */
10353         if (all & NETIF_F_HW_CSUM)
10354                 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
10355
10356         return all;
10357 }
10358 EXPORT_SYMBOL(netdev_increment_features);
10359
10360 static struct hlist_head * __net_init netdev_create_hash(void)
10361 {
10362         int i;
10363         struct hlist_head *hash;
10364
10365         hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
10366         if (hash != NULL)
10367                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
10368                         INIT_HLIST_HEAD(&hash[i]);
10369
10370         return hash;
10371 }
10372
10373 /* Initialize per network namespace state */
10374 static int __net_init netdev_init(struct net *net)
10375 {
10376         BUILD_BUG_ON(GRO_HASH_BUCKETS >
10377                      8 * sizeof_field(struct napi_struct, gro_bitmask));
10378
10379         if (net != &init_net)
10380                 INIT_LIST_HEAD(&net->dev_base_head);
10381
10382         net->dev_name_head = netdev_create_hash();
10383         if (net->dev_name_head == NULL)
10384                 goto err_name;
10385
10386         net->dev_index_head = netdev_create_hash();
10387         if (net->dev_index_head == NULL)
10388                 goto err_idx;
10389
10390         RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
10391
10392         return 0;
10393
10394 err_idx:
10395         kfree(net->dev_name_head);
10396 err_name:
10397         return -ENOMEM;
10398 }
10399
10400 /**
10401  *      netdev_drivername - network driver for the device
10402  *      @dev: network device
10403  *
10404  *      Determine network driver for device.
10405  */
10406 const char *netdev_drivername(const struct net_device *dev)
10407 {
10408         const struct device_driver *driver;
10409         const struct device *parent;
10410         const char *empty = "";
10411
10412         parent = dev->dev.parent;
10413         if (!parent)
10414                 return empty;
10415
10416         driver = parent->driver;
10417         if (driver && driver->name)
10418                 return driver->name;
10419         return empty;
10420 }
10421
10422 static void __netdev_printk(const char *level, const struct net_device *dev,
10423                             struct va_format *vaf)
10424 {
10425         if (dev && dev->dev.parent) {
10426                 dev_printk_emit(level[1] - '0',
10427                                 dev->dev.parent,
10428                                 "%s %s %s%s: %pV",
10429                                 dev_driver_string(dev->dev.parent),
10430                                 dev_name(dev->dev.parent),
10431                                 netdev_name(dev), netdev_reg_state(dev),
10432                                 vaf);
10433         } else if (dev) {
10434                 printk("%s%s%s: %pV",
10435                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
10436         } else {
10437                 printk("%s(NULL net_device): %pV", level, vaf);
10438         }
10439 }
10440
10441 void netdev_printk(const char *level, const struct net_device *dev,
10442                    const char *format, ...)
10443 {
10444         struct va_format vaf;
10445         va_list args;
10446
10447         va_start(args, format);
10448
10449         vaf.fmt = format;
10450         vaf.va = &args;
10451
10452         __netdev_printk(level, dev, &vaf);
10453
10454         va_end(args);
10455 }
10456 EXPORT_SYMBOL(netdev_printk);
10457
10458 #define define_netdev_printk_level(func, level)                 \
10459 void func(const struct net_device *dev, const char *fmt, ...)   \
10460 {                                                               \
10461         struct va_format vaf;                                   \
10462         va_list args;                                           \
10463                                                                 \
10464         va_start(args, fmt);                                    \
10465                                                                 \
10466         vaf.fmt = fmt;                                          \
10467         vaf.va = &args;                                         \
10468                                                                 \
10469         __netdev_printk(level, dev, &vaf);                      \
10470                                                                 \
10471         va_end(args);                                           \
10472 }                                                               \
10473 EXPORT_SYMBOL(func);
10474
10475 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
10476 define_netdev_printk_level(netdev_alert, KERN_ALERT);
10477 define_netdev_printk_level(netdev_crit, KERN_CRIT);
10478 define_netdev_printk_level(netdev_err, KERN_ERR);
10479 define_netdev_printk_level(netdev_warn, KERN_WARNING);
10480 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
10481 define_netdev_printk_level(netdev_info, KERN_INFO);
10482
10483 static void __net_exit netdev_exit(struct net *net)
10484 {
10485         kfree(net->dev_name_head);
10486         kfree(net->dev_index_head);
10487         if (net != &init_net)
10488                 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
10489 }
10490
10491 static struct pernet_operations __net_initdata netdev_net_ops = {
10492         .init = netdev_init,
10493         .exit = netdev_exit,
10494 };
10495
10496 static void __net_exit default_device_exit(struct net *net)
10497 {
10498         struct net_device *dev, *aux;
10499         /*
10500          * Push all migratable network devices back to the
10501          * initial network namespace
10502          */
10503         rtnl_lock();
10504         for_each_netdev_safe(net, dev, aux) {
10505                 int err;
10506                 char fb_name[IFNAMSIZ];
10507
10508                 /* Ignore unmoveable devices (i.e. loopback) */
10509                 if (dev->features & NETIF_F_NETNS_LOCAL)
10510                         continue;
10511
10512                 /* Leave virtual devices for the generic cleanup */
10513                 if (dev->rtnl_link_ops)
10514                         continue;
10515
10516                 /* Push remaining network devices to init_net */
10517                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
10518                 if (__dev_get_by_name(&init_net, fb_name))
10519                         snprintf(fb_name, IFNAMSIZ, "dev%%d");
10520                 err = dev_change_net_namespace(dev, &init_net, fb_name);
10521                 if (err) {
10522                         pr_emerg("%s: failed to move %s to init_net: %d\n",
10523                                  __func__, dev->name, err);
10524                         BUG();
10525                 }
10526         }
10527         rtnl_unlock();
10528 }
10529
10530 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
10531 {
10532         /* Return with the rtnl_lock held when there are no network
10533          * devices unregistering in any network namespace in net_list.
10534          */
10535         struct net *net;
10536         bool unregistering;
10537         DEFINE_WAIT_FUNC(wait, woken_wake_function);
10538
10539         add_wait_queue(&netdev_unregistering_wq, &wait);
10540         for (;;) {
10541                 unregistering = false;
10542                 rtnl_lock();
10543                 list_for_each_entry(net, net_list, exit_list) {
10544                         if (net->dev_unreg_count > 0) {
10545                                 unregistering = true;
10546                                 break;
10547                         }
10548                 }
10549                 if (!unregistering)
10550                         break;
10551                 __rtnl_unlock();
10552
10553                 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
10554         }
10555         remove_wait_queue(&netdev_unregistering_wq, &wait);
10556 }
10557
10558 static void __net_exit default_device_exit_batch(struct list_head *net_list)
10559 {
10560         /* At exit all network devices most be removed from a network
10561          * namespace.  Do this in the reverse order of registration.
10562          * Do this across as many network namespaces as possible to
10563          * improve batching efficiency.
10564          */
10565         struct net_device *dev;
10566         struct net *net;
10567         LIST_HEAD(dev_kill_list);
10568
10569         /* To prevent network device cleanup code from dereferencing
10570          * loopback devices or network devices that have been freed
10571          * wait here for all pending unregistrations to complete,
10572          * before unregistring the loopback device and allowing the
10573          * network namespace be freed.
10574          *
10575          * The netdev todo list containing all network devices
10576          * unregistrations that happen in default_device_exit_batch
10577          * will run in the rtnl_unlock() at the end of
10578          * default_device_exit_batch.
10579          */
10580         rtnl_lock_unregistering(net_list);
10581         list_for_each_entry(net, net_list, exit_list) {
10582                 for_each_netdev_reverse(net, dev) {
10583                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
10584                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
10585                         else
10586                                 unregister_netdevice_queue(dev, &dev_kill_list);
10587                 }
10588         }
10589         unregister_netdevice_many(&dev_kill_list);
10590         rtnl_unlock();
10591 }
10592
10593 static struct pernet_operations __net_initdata default_device_ops = {
10594         .exit = default_device_exit,
10595         .exit_batch = default_device_exit_batch,
10596 };
10597
10598 /*
10599  *      Initialize the DEV module. At boot time this walks the device list and
10600  *      unhooks any devices that fail to initialise (normally hardware not
10601  *      present) and leaves us with a valid list of present and active devices.
10602  *
10603  */
10604
10605 /*
10606  *       This is called single threaded during boot, so no need
10607  *       to take the rtnl semaphore.
10608  */
10609 static int __init net_dev_init(void)
10610 {
10611         int i, rc = -ENOMEM;
10612
10613         BUG_ON(!dev_boot_phase);
10614
10615         if (dev_proc_init())
10616                 goto out;
10617
10618         if (netdev_kobject_init())
10619                 goto out;
10620
10621         INIT_LIST_HEAD(&ptype_all);
10622         for (i = 0; i < PTYPE_HASH_SIZE; i++)
10623                 INIT_LIST_HEAD(&ptype_base[i]);
10624
10625         INIT_LIST_HEAD(&offload_base);
10626
10627         if (register_pernet_subsys(&netdev_net_ops))
10628                 goto out;
10629
10630         /*
10631          *      Initialise the packet receive queues.
10632          */
10633
10634         for_each_possible_cpu(i) {
10635                 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
10636                 struct softnet_data *sd = &per_cpu(softnet_data, i);
10637
10638                 INIT_WORK(flush, flush_backlog);
10639
10640                 skb_queue_head_init(&sd->input_pkt_queue);
10641                 skb_queue_head_init(&sd->process_queue);
10642 #ifdef CONFIG_XFRM_OFFLOAD
10643                 skb_queue_head_init(&sd->xfrm_backlog);
10644 #endif
10645                 INIT_LIST_HEAD(&sd->poll_list);
10646                 sd->output_queue_tailp = &sd->output_queue;
10647 #ifdef CONFIG_RPS
10648                 sd->csd.func = rps_trigger_softirq;
10649                 sd->csd.info = sd;
10650                 sd->cpu = i;
10651 #endif
10652
10653                 init_gro_hash(&sd->backlog);
10654                 sd->backlog.poll = process_backlog;
10655                 sd->backlog.weight = weight_p;
10656         }
10657
10658         dev_boot_phase = 0;
10659
10660         /* The loopback device is special if any other network devices
10661          * is present in a network namespace the loopback device must
10662          * be present. Since we now dynamically allocate and free the
10663          * loopback device ensure this invariant is maintained by
10664          * keeping the loopback device as the first device on the
10665          * list of network devices.  Ensuring the loopback devices
10666          * is the first device that appears and the last network device
10667          * that disappears.
10668          */
10669         if (register_pernet_device(&loopback_net_ops))
10670                 goto out;
10671
10672         if (register_pernet_device(&default_device_ops))
10673                 goto out;
10674
10675         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
10676         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
10677
10678         rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
10679                                        NULL, dev_cpu_dead);
10680         WARN_ON(rc < 0);
10681         rc = 0;
10682 out:
10683         return rc;
10684 }
10685
10686 subsys_initcall(net_dev_init);