af_packet: fix data-race in packet_setsockopt / packet_setsockopt
[platform/kernel/linux-rpi.git] / net / packet / af_packet.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              PACKET - implements raw packet sockets.
8  *
9  * Authors:     Ross Biro
10  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *
13  * Fixes:
14  *              Alan Cox        :       verify_area() now used correctly
15  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
16  *              Alan Cox        :       tidied skbuff lists.
17  *              Alan Cox        :       Now uses generic datagram routines I
18  *                                      added. Also fixed the peek/read crash
19  *                                      from all old Linux datagram code.
20  *              Alan Cox        :       Uses the improved datagram code.
21  *              Alan Cox        :       Added NULL's for socket options.
22  *              Alan Cox        :       Re-commented the code.
23  *              Alan Cox        :       Use new kernel side addressing
24  *              Rob Janssen     :       Correct MTU usage.
25  *              Dave Platt      :       Counter leaks caused by incorrect
26  *                                      interrupt locking and some slightly
27  *                                      dubious gcc output. Can you read
28  *                                      compiler: it said _VOLATILE_
29  *      Richard Kooijman        :       Timestamp fixes.
30  *              Alan Cox        :       New buffers. Use sk->mac.raw.
31  *              Alan Cox        :       sendmsg/recvmsg support.
32  *              Alan Cox        :       Protocol setting support
33  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
34  *      Cyrus Durgin            :       Fixed kerneld for kmod.
35  *      Michal Ostrowski        :       Module initialization cleanup.
36  *         Ulises Alonso        :       Frame number limit removal and
37  *                                      packet_set_ring memory leak.
38  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
39  *                                      The convention is that longer addresses
40  *                                      will simply extend the hardware address
41  *                                      byte arrays at the end of sockaddr_ll
42  *                                      and packet_mreq.
43  *              Johann Baudy    :       Added TX RING.
44  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
45  *                                      layer.
46  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
47  */
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #include <linux/ethtool.h>
52 #include <linux/types.h>
53 #include <linux/mm.h>
54 #include <linux/capability.h>
55 #include <linux/fcntl.h>
56 #include <linux/socket.h>
57 #include <linux/in.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/if_packet.h>
61 #include <linux/wireless.h>
62 #include <linux/kernel.h>
63 #include <linux/kmod.h>
64 #include <linux/slab.h>
65 #include <linux/vmalloc.h>
66 #include <net/net_namespace.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <linux/skbuff.h>
70 #include <net/sock.h>
71 #include <linux/errno.h>
72 #include <linux/timer.h>
73 #include <linux/uaccess.h>
74 #include <asm/ioctls.h>
75 #include <asm/page.h>
76 #include <asm/cacheflush.h>
77 #include <asm/io.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/poll.h>
81 #include <linux/module.h>
82 #include <linux/init.h>
83 #include <linux/mutex.h>
84 #include <linux/if_vlan.h>
85 #include <linux/virtio_net.h>
86 #include <linux/errqueue.h>
87 #include <linux/net_tstamp.h>
88 #include <linux/percpu.h>
89 #ifdef CONFIG_INET
90 #include <net/inet_common.h>
91 #endif
92 #include <linux/bpf.h>
93 #include <net/compat.h>
94
95 #include "internal.h"
96
97 /*
98    Assumptions:
99    - If the device has no dev->header_ops->create, there is no LL header
100      visible above the device. In this case, its hard_header_len should be 0.
101      The device may prepend its own header internally. In this case, its
102      needed_headroom should be set to the space needed for it to add its
103      internal header.
104      For example, a WiFi driver pretending to be an Ethernet driver should
105      set its hard_header_len to be the Ethernet header length, and set its
106      needed_headroom to be (the real WiFi header length - the fake Ethernet
107      header length).
108    - packet socket receives packets with pulled ll header,
109      so that SOCK_RAW should push it back.
110
111 On receive:
112 -----------
113
114 Incoming, dev_has_header(dev) == true
115    mac_header -> ll header
116    data       -> data
117
118 Outgoing, dev_has_header(dev) == true
119    mac_header -> ll header
120    data       -> ll header
121
122 Incoming, dev_has_header(dev) == false
123    mac_header -> data
124      However drivers often make it point to the ll header.
125      This is incorrect because the ll header should be invisible to us.
126    data       -> data
127
128 Outgoing, dev_has_header(dev) == false
129    mac_header -> data. ll header is invisible to us.
130    data       -> data
131
132 Resume
133   If dev_has_header(dev) == false we are unable to restore the ll header,
134     because it is invisible to us.
135
136
137 On transmit:
138 ------------
139
140 dev_has_header(dev) == true
141    mac_header -> ll header
142    data       -> ll header
143
144 dev_has_header(dev) == false (ll header is invisible to us)
145    mac_header -> data
146    data       -> data
147
148    We should set network_header on output to the correct position,
149    packet classifier depends on it.
150  */
151
152 /* Private packet socket structures. */
153
154 /* identical to struct packet_mreq except it has
155  * a longer address field.
156  */
157 struct packet_mreq_max {
158         int             mr_ifindex;
159         unsigned short  mr_type;
160         unsigned short  mr_alen;
161         unsigned char   mr_address[MAX_ADDR_LEN];
162 };
163
164 union tpacket_uhdr {
165         struct tpacket_hdr  *h1;
166         struct tpacket2_hdr *h2;
167         struct tpacket3_hdr *h3;
168         void *raw;
169 };
170
171 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
172                 int closing, int tx_ring);
173
174 #define V3_ALIGNMENT    (8)
175
176 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
177
178 #define BLK_PLUS_PRIV(sz_of_priv) \
179         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
180
181 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
182 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
183 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
184 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
185 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
186 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
187
188 struct packet_sock;
189 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
190                        struct packet_type *pt, struct net_device *orig_dev);
191
192 static void *packet_previous_frame(struct packet_sock *po,
193                 struct packet_ring_buffer *rb,
194                 int status);
195 static void packet_increment_head(struct packet_ring_buffer *buff);
196 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
197 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
198                         struct packet_sock *);
199 static void prb_retire_current_block(struct tpacket_kbdq_core *,
200                 struct packet_sock *, unsigned int status);
201 static int prb_queue_frozen(struct tpacket_kbdq_core *);
202 static void prb_open_block(struct tpacket_kbdq_core *,
203                 struct tpacket_block_desc *);
204 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
205 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
206 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
207 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
208                 struct tpacket3_hdr *);
209 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
210                 struct tpacket3_hdr *);
211 static void packet_flush_mclist(struct sock *sk);
212 static u16 packet_pick_tx_queue(struct sk_buff *skb);
213
214 struct packet_skb_cb {
215         union {
216                 struct sockaddr_pkt pkt;
217                 union {
218                         /* Trick: alias skb original length with
219                          * ll.sll_family and ll.protocol in order
220                          * to save room.
221                          */
222                         unsigned int origlen;
223                         struct sockaddr_ll ll;
224                 };
225         } sa;
226 };
227
228 #define vio_le() virtio_legacy_is_little_endian()
229
230 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
231
232 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
233 #define GET_PBLOCK_DESC(x, bid) \
234         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
235 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
236         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
237 #define GET_NEXT_PRB_BLK_NUM(x) \
238         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
239         ((x)->kactive_blk_num+1) : 0)
240
241 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
242 static void __fanout_link(struct sock *sk, struct packet_sock *po);
243
244 static int packet_direct_xmit(struct sk_buff *skb)
245 {
246         return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
247 }
248
249 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
250 {
251         struct net_device *dev;
252
253         rcu_read_lock();
254         dev = rcu_dereference(po->cached_dev);
255         dev_hold(dev);
256         rcu_read_unlock();
257
258         return dev;
259 }
260
261 static void packet_cached_dev_assign(struct packet_sock *po,
262                                      struct net_device *dev)
263 {
264         rcu_assign_pointer(po->cached_dev, dev);
265 }
266
267 static void packet_cached_dev_reset(struct packet_sock *po)
268 {
269         RCU_INIT_POINTER(po->cached_dev, NULL);
270 }
271
272 static bool packet_use_direct_xmit(const struct packet_sock *po)
273 {
274         return po->xmit == packet_direct_xmit;
275 }
276
277 static u16 packet_pick_tx_queue(struct sk_buff *skb)
278 {
279         struct net_device *dev = skb->dev;
280         const struct net_device_ops *ops = dev->netdev_ops;
281         int cpu = raw_smp_processor_id();
282         u16 queue_index;
283
284 #ifdef CONFIG_XPS
285         skb->sender_cpu = cpu + 1;
286 #endif
287         skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
288         if (ops->ndo_select_queue) {
289                 queue_index = ops->ndo_select_queue(dev, skb, NULL);
290                 queue_index = netdev_cap_txqueue(dev, queue_index);
291         } else {
292                 queue_index = netdev_pick_tx(dev, skb, NULL);
293         }
294
295         return queue_index;
296 }
297
298 /* __register_prot_hook must be invoked through register_prot_hook
299  * or from a context in which asynchronous accesses to the packet
300  * socket is not possible (packet_create()).
301  */
302 static void __register_prot_hook(struct sock *sk)
303 {
304         struct packet_sock *po = pkt_sk(sk);
305
306         if (!po->running) {
307                 if (po->fanout)
308                         __fanout_link(sk, po);
309                 else
310                         dev_add_pack(&po->prot_hook);
311
312                 sock_hold(sk);
313                 po->running = 1;
314         }
315 }
316
317 static void register_prot_hook(struct sock *sk)
318 {
319         lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
320         __register_prot_hook(sk);
321 }
322
323 /* If the sync parameter is true, we will temporarily drop
324  * the po->bind_lock and do a synchronize_net to make sure no
325  * asynchronous packet processing paths still refer to the elements
326  * of po->prot_hook.  If the sync parameter is false, it is the
327  * callers responsibility to take care of this.
328  */
329 static void __unregister_prot_hook(struct sock *sk, bool sync)
330 {
331         struct packet_sock *po = pkt_sk(sk);
332
333         lockdep_assert_held_once(&po->bind_lock);
334
335         po->running = 0;
336
337         if (po->fanout)
338                 __fanout_unlink(sk, po);
339         else
340                 __dev_remove_pack(&po->prot_hook);
341
342         __sock_put(sk);
343
344         if (sync) {
345                 spin_unlock(&po->bind_lock);
346                 synchronize_net();
347                 spin_lock(&po->bind_lock);
348         }
349 }
350
351 static void unregister_prot_hook(struct sock *sk, bool sync)
352 {
353         struct packet_sock *po = pkt_sk(sk);
354
355         if (po->running)
356                 __unregister_prot_hook(sk, sync);
357 }
358
359 static inline struct page * __pure pgv_to_page(void *addr)
360 {
361         if (is_vmalloc_addr(addr))
362                 return vmalloc_to_page(addr);
363         return virt_to_page(addr);
364 }
365
366 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
367 {
368         union tpacket_uhdr h;
369
370         h.raw = frame;
371         switch (po->tp_version) {
372         case TPACKET_V1:
373                 h.h1->tp_status = status;
374                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
375                 break;
376         case TPACKET_V2:
377                 h.h2->tp_status = status;
378                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
379                 break;
380         case TPACKET_V3:
381                 h.h3->tp_status = status;
382                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
383                 break;
384         default:
385                 WARN(1, "TPACKET version not supported.\n");
386                 BUG();
387         }
388
389         smp_wmb();
390 }
391
392 static int __packet_get_status(const struct packet_sock *po, void *frame)
393 {
394         union tpacket_uhdr h;
395
396         smp_rmb();
397
398         h.raw = frame;
399         switch (po->tp_version) {
400         case TPACKET_V1:
401                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
402                 return h.h1->tp_status;
403         case TPACKET_V2:
404                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
405                 return h.h2->tp_status;
406         case TPACKET_V3:
407                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
408                 return h.h3->tp_status;
409         default:
410                 WARN(1, "TPACKET version not supported.\n");
411                 BUG();
412                 return 0;
413         }
414 }
415
416 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
417                                    unsigned int flags)
418 {
419         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
420
421         if (shhwtstamps &&
422             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
423             ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
424                 return TP_STATUS_TS_RAW_HARDWARE;
425
426         if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
427             ktime_to_timespec64_cond(skb->tstamp, ts))
428                 return TP_STATUS_TS_SOFTWARE;
429
430         return 0;
431 }
432
433 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
434                                     struct sk_buff *skb)
435 {
436         union tpacket_uhdr h;
437         struct timespec64 ts;
438         __u32 ts_status;
439
440         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
441                 return 0;
442
443         h.raw = frame;
444         /*
445          * versions 1 through 3 overflow the timestamps in y2106, since they
446          * all store the seconds in a 32-bit unsigned integer.
447          * If we create a version 4, that should have a 64-bit timestamp,
448          * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
449          * nanoseconds.
450          */
451         switch (po->tp_version) {
452         case TPACKET_V1:
453                 h.h1->tp_sec = ts.tv_sec;
454                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
455                 break;
456         case TPACKET_V2:
457                 h.h2->tp_sec = ts.tv_sec;
458                 h.h2->tp_nsec = ts.tv_nsec;
459                 break;
460         case TPACKET_V3:
461                 h.h3->tp_sec = ts.tv_sec;
462                 h.h3->tp_nsec = ts.tv_nsec;
463                 break;
464         default:
465                 WARN(1, "TPACKET version not supported.\n");
466                 BUG();
467         }
468
469         /* one flush is safe, as both fields always lie on the same cacheline */
470         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
471         smp_wmb();
472
473         return ts_status;
474 }
475
476 static void *packet_lookup_frame(const struct packet_sock *po,
477                                  const struct packet_ring_buffer *rb,
478                                  unsigned int position,
479                                  int status)
480 {
481         unsigned int pg_vec_pos, frame_offset;
482         union tpacket_uhdr h;
483
484         pg_vec_pos = position / rb->frames_per_block;
485         frame_offset = position % rb->frames_per_block;
486
487         h.raw = rb->pg_vec[pg_vec_pos].buffer +
488                 (frame_offset * rb->frame_size);
489
490         if (status != __packet_get_status(po, h.raw))
491                 return NULL;
492
493         return h.raw;
494 }
495
496 static void *packet_current_frame(struct packet_sock *po,
497                 struct packet_ring_buffer *rb,
498                 int status)
499 {
500         return packet_lookup_frame(po, rb, rb->head, status);
501 }
502
503 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
504 {
505         del_timer_sync(&pkc->retire_blk_timer);
506 }
507
508 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
509                 struct sk_buff_head *rb_queue)
510 {
511         struct tpacket_kbdq_core *pkc;
512
513         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
514
515         spin_lock_bh(&rb_queue->lock);
516         pkc->delete_blk_timer = 1;
517         spin_unlock_bh(&rb_queue->lock);
518
519         prb_del_retire_blk_timer(pkc);
520 }
521
522 static void prb_setup_retire_blk_timer(struct packet_sock *po)
523 {
524         struct tpacket_kbdq_core *pkc;
525
526         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
527         timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
528                     0);
529         pkc->retire_blk_timer.expires = jiffies;
530 }
531
532 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
533                                 int blk_size_in_bytes)
534 {
535         struct net_device *dev;
536         unsigned int mbits, div;
537         struct ethtool_link_ksettings ecmd;
538         int err;
539
540         rtnl_lock();
541         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
542         if (unlikely(!dev)) {
543                 rtnl_unlock();
544                 return DEFAULT_PRB_RETIRE_TOV;
545         }
546         err = __ethtool_get_link_ksettings(dev, &ecmd);
547         rtnl_unlock();
548         if (err)
549                 return DEFAULT_PRB_RETIRE_TOV;
550
551         /* If the link speed is so slow you don't really
552          * need to worry about perf anyways
553          */
554         if (ecmd.base.speed < SPEED_1000 ||
555             ecmd.base.speed == SPEED_UNKNOWN)
556                 return DEFAULT_PRB_RETIRE_TOV;
557
558         div = ecmd.base.speed / 1000;
559         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
560
561         if (div)
562                 mbits /= div;
563
564         if (div)
565                 return mbits + 1;
566         return mbits;
567 }
568
569 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
570                         union tpacket_req_u *req_u)
571 {
572         p1->feature_req_word = req_u->req3.tp_feature_req_word;
573 }
574
575 static void init_prb_bdqc(struct packet_sock *po,
576                         struct packet_ring_buffer *rb,
577                         struct pgv *pg_vec,
578                         union tpacket_req_u *req_u)
579 {
580         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
581         struct tpacket_block_desc *pbd;
582
583         memset(p1, 0x0, sizeof(*p1));
584
585         p1->knxt_seq_num = 1;
586         p1->pkbdq = pg_vec;
587         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
588         p1->pkblk_start = pg_vec[0].buffer;
589         p1->kblk_size = req_u->req3.tp_block_size;
590         p1->knum_blocks = req_u->req3.tp_block_nr;
591         p1->hdrlen = po->tp_hdrlen;
592         p1->version = po->tp_version;
593         p1->last_kactive_blk_num = 0;
594         po->stats.stats3.tp_freeze_q_cnt = 0;
595         if (req_u->req3.tp_retire_blk_tov)
596                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
597         else
598                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
599                                                 req_u->req3.tp_block_size);
600         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
601         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
602         rwlock_init(&p1->blk_fill_in_prog_lock);
603
604         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
605         prb_init_ft_ops(p1, req_u);
606         prb_setup_retire_blk_timer(po);
607         prb_open_block(p1, pbd);
608 }
609
610 /*  Do NOT update the last_blk_num first.
611  *  Assumes sk_buff_head lock is held.
612  */
613 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
614 {
615         mod_timer(&pkc->retire_blk_timer,
616                         jiffies + pkc->tov_in_jiffies);
617         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
618 }
619
620 /*
621  * Timer logic:
622  * 1) We refresh the timer only when we open a block.
623  *    By doing this we don't waste cycles refreshing the timer
624  *        on packet-by-packet basis.
625  *
626  * With a 1MB block-size, on a 1Gbps line, it will take
627  * i) ~8 ms to fill a block + ii) memcpy etc.
628  * In this cut we are not accounting for the memcpy time.
629  *
630  * So, if the user sets the 'tmo' to 10ms then the timer
631  * will never fire while the block is still getting filled
632  * (which is what we want). However, the user could choose
633  * to close a block early and that's fine.
634  *
635  * But when the timer does fire, we check whether or not to refresh it.
636  * Since the tmo granularity is in msecs, it is not too expensive
637  * to refresh the timer, lets say every '8' msecs.
638  * Either the user can set the 'tmo' or we can derive it based on
639  * a) line-speed and b) block-size.
640  * prb_calc_retire_blk_tmo() calculates the tmo.
641  *
642  */
643 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
644 {
645         struct packet_sock *po =
646                 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
647         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
648         unsigned int frozen;
649         struct tpacket_block_desc *pbd;
650
651         spin_lock(&po->sk.sk_receive_queue.lock);
652
653         frozen = prb_queue_frozen(pkc);
654         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
655
656         if (unlikely(pkc->delete_blk_timer))
657                 goto out;
658
659         /* We only need to plug the race when the block is partially filled.
660          * tpacket_rcv:
661          *              lock(); increment BLOCK_NUM_PKTS; unlock()
662          *              copy_bits() is in progress ...
663          *              timer fires on other cpu:
664          *              we can't retire the current block because copy_bits
665          *              is in progress.
666          *
667          */
668         if (BLOCK_NUM_PKTS(pbd)) {
669                 /* Waiting for skb_copy_bits to finish... */
670                 write_lock(&pkc->blk_fill_in_prog_lock);
671                 write_unlock(&pkc->blk_fill_in_prog_lock);
672         }
673
674         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
675                 if (!frozen) {
676                         if (!BLOCK_NUM_PKTS(pbd)) {
677                                 /* An empty block. Just refresh the timer. */
678                                 goto refresh_timer;
679                         }
680                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
681                         if (!prb_dispatch_next_block(pkc, po))
682                                 goto refresh_timer;
683                         else
684                                 goto out;
685                 } else {
686                         /* Case 1. Queue was frozen because user-space was
687                          *         lagging behind.
688                          */
689                         if (prb_curr_blk_in_use(pbd)) {
690                                 /*
691                                  * Ok, user-space is still behind.
692                                  * So just refresh the timer.
693                                  */
694                                 goto refresh_timer;
695                         } else {
696                                /* Case 2. queue was frozen,user-space caught up,
697                                 * now the link went idle && the timer fired.
698                                 * We don't have a block to close.So we open this
699                                 * block and restart the timer.
700                                 * opening a block thaws the queue,restarts timer
701                                 * Thawing/timer-refresh is a side effect.
702                                 */
703                                 prb_open_block(pkc, pbd);
704                                 goto out;
705                         }
706                 }
707         }
708
709 refresh_timer:
710         _prb_refresh_rx_retire_blk_timer(pkc);
711
712 out:
713         spin_unlock(&po->sk.sk_receive_queue.lock);
714 }
715
716 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
717                 struct tpacket_block_desc *pbd1, __u32 status)
718 {
719         /* Flush everything minus the block header */
720
721 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
722         u8 *start, *end;
723
724         start = (u8 *)pbd1;
725
726         /* Skip the block header(we know header WILL fit in 4K) */
727         start += PAGE_SIZE;
728
729         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
730         for (; start < end; start += PAGE_SIZE)
731                 flush_dcache_page(pgv_to_page(start));
732
733         smp_wmb();
734 #endif
735
736         /* Now update the block status. */
737
738         BLOCK_STATUS(pbd1) = status;
739
740         /* Flush the block header */
741
742 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
743         start = (u8 *)pbd1;
744         flush_dcache_page(pgv_to_page(start));
745
746         smp_wmb();
747 #endif
748 }
749
750 /*
751  * Side effect:
752  *
753  * 1) flush the block
754  * 2) Increment active_blk_num
755  *
756  * Note:We DONT refresh the timer on purpose.
757  *      Because almost always the next block will be opened.
758  */
759 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
760                 struct tpacket_block_desc *pbd1,
761                 struct packet_sock *po, unsigned int stat)
762 {
763         __u32 status = TP_STATUS_USER | stat;
764
765         struct tpacket3_hdr *last_pkt;
766         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
767         struct sock *sk = &po->sk;
768
769         if (atomic_read(&po->tp_drops))
770                 status |= TP_STATUS_LOSING;
771
772         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
773         last_pkt->tp_next_offset = 0;
774
775         /* Get the ts of the last pkt */
776         if (BLOCK_NUM_PKTS(pbd1)) {
777                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
778                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
779         } else {
780                 /* Ok, we tmo'd - so get the current time.
781                  *
782                  * It shouldn't really happen as we don't close empty
783                  * blocks. See prb_retire_rx_blk_timer_expired().
784                  */
785                 struct timespec64 ts;
786                 ktime_get_real_ts64(&ts);
787                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
788                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
789         }
790
791         smp_wmb();
792
793         /* Flush the block */
794         prb_flush_block(pkc1, pbd1, status);
795
796         sk->sk_data_ready(sk);
797
798         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
799 }
800
801 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
802 {
803         pkc->reset_pending_on_curr_blk = 0;
804 }
805
806 /*
807  * Side effect of opening a block:
808  *
809  * 1) prb_queue is thawed.
810  * 2) retire_blk_timer is refreshed.
811  *
812  */
813 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
814         struct tpacket_block_desc *pbd1)
815 {
816         struct timespec64 ts;
817         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
818
819         smp_rmb();
820
821         /* We could have just memset this but we will lose the
822          * flexibility of making the priv area sticky
823          */
824
825         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
826         BLOCK_NUM_PKTS(pbd1) = 0;
827         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
828
829         ktime_get_real_ts64(&ts);
830
831         h1->ts_first_pkt.ts_sec = ts.tv_sec;
832         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
833
834         pkc1->pkblk_start = (char *)pbd1;
835         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
836
837         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
838         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
839
840         pbd1->version = pkc1->version;
841         pkc1->prev = pkc1->nxt_offset;
842         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
843
844         prb_thaw_queue(pkc1);
845         _prb_refresh_rx_retire_blk_timer(pkc1);
846
847         smp_wmb();
848 }
849
850 /*
851  * Queue freeze logic:
852  * 1) Assume tp_block_nr = 8 blocks.
853  * 2) At time 't0', user opens Rx ring.
854  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
855  * 4) user-space is either sleeping or processing block '0'.
856  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
857  *    it will close block-7,loop around and try to fill block '0'.
858  *    call-flow:
859  *    __packet_lookup_frame_in_block
860  *      prb_retire_current_block()
861  *      prb_dispatch_next_block()
862  *        |->(BLOCK_STATUS == USER) evaluates to true
863  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
864  * 6) Now there are two cases:
865  *    6.1) Link goes idle right after the queue is frozen.
866  *         But remember, the last open_block() refreshed the timer.
867  *         When this timer expires,it will refresh itself so that we can
868  *         re-open block-0 in near future.
869  *    6.2) Link is busy and keeps on receiving packets. This is a simple
870  *         case and __packet_lookup_frame_in_block will check if block-0
871  *         is free and can now be re-used.
872  */
873 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
874                                   struct packet_sock *po)
875 {
876         pkc->reset_pending_on_curr_blk = 1;
877         po->stats.stats3.tp_freeze_q_cnt++;
878 }
879
880 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
881
882 /*
883  * If the next block is free then we will dispatch it
884  * and return a good offset.
885  * Else, we will freeze the queue.
886  * So, caller must check the return value.
887  */
888 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
889                 struct packet_sock *po)
890 {
891         struct tpacket_block_desc *pbd;
892
893         smp_rmb();
894
895         /* 1. Get current block num */
896         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
897
898         /* 2. If this block is currently in_use then freeze the queue */
899         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
900                 prb_freeze_queue(pkc, po);
901                 return NULL;
902         }
903
904         /*
905          * 3.
906          * open this block and return the offset where the first packet
907          * needs to get stored.
908          */
909         prb_open_block(pkc, pbd);
910         return (void *)pkc->nxt_offset;
911 }
912
913 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
914                 struct packet_sock *po, unsigned int status)
915 {
916         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
917
918         /* retire/close the current block */
919         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
920                 /*
921                  * Plug the case where copy_bits() is in progress on
922                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
923                  * have space to copy the pkt in the current block and
924                  * called prb_retire_current_block()
925                  *
926                  * We don't need to worry about the TMO case because
927                  * the timer-handler already handled this case.
928                  */
929                 if (!(status & TP_STATUS_BLK_TMO)) {
930                         /* Waiting for skb_copy_bits to finish... */
931                         write_lock(&pkc->blk_fill_in_prog_lock);
932                         write_unlock(&pkc->blk_fill_in_prog_lock);
933                 }
934                 prb_close_block(pkc, pbd, po, status);
935                 return;
936         }
937 }
938
939 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
940 {
941         return TP_STATUS_USER & BLOCK_STATUS(pbd);
942 }
943
944 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
945 {
946         return pkc->reset_pending_on_curr_blk;
947 }
948
949 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
950         __releases(&pkc->blk_fill_in_prog_lock)
951 {
952         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
953
954         read_unlock(&pkc->blk_fill_in_prog_lock);
955 }
956
957 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
958                         struct tpacket3_hdr *ppd)
959 {
960         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
961 }
962
963 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
964                         struct tpacket3_hdr *ppd)
965 {
966         ppd->hv1.tp_rxhash = 0;
967 }
968
969 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
970                         struct tpacket3_hdr *ppd)
971 {
972         if (skb_vlan_tag_present(pkc->skb)) {
973                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
974                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
975                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
976         } else {
977                 ppd->hv1.tp_vlan_tci = 0;
978                 ppd->hv1.tp_vlan_tpid = 0;
979                 ppd->tp_status = TP_STATUS_AVAILABLE;
980         }
981 }
982
983 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
984                         struct tpacket3_hdr *ppd)
985 {
986         ppd->hv1.tp_padding = 0;
987         prb_fill_vlan_info(pkc, ppd);
988
989         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
990                 prb_fill_rxhash(pkc, ppd);
991         else
992                 prb_clear_rxhash(pkc, ppd);
993 }
994
995 static void prb_fill_curr_block(char *curr,
996                                 struct tpacket_kbdq_core *pkc,
997                                 struct tpacket_block_desc *pbd,
998                                 unsigned int len)
999         __acquires(&pkc->blk_fill_in_prog_lock)
1000 {
1001         struct tpacket3_hdr *ppd;
1002
1003         ppd  = (struct tpacket3_hdr *)curr;
1004         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1005         pkc->prev = curr;
1006         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1007         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1008         BLOCK_NUM_PKTS(pbd) += 1;
1009         read_lock(&pkc->blk_fill_in_prog_lock);
1010         prb_run_all_ft_ops(pkc, ppd);
1011 }
1012
1013 /* Assumes caller has the sk->rx_queue.lock */
1014 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1015                                             struct sk_buff *skb,
1016                                             unsigned int len
1017                                             )
1018 {
1019         struct tpacket_kbdq_core *pkc;
1020         struct tpacket_block_desc *pbd;
1021         char *curr, *end;
1022
1023         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1024         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1025
1026         /* Queue is frozen when user space is lagging behind */
1027         if (prb_queue_frozen(pkc)) {
1028                 /*
1029                  * Check if that last block which caused the queue to freeze,
1030                  * is still in_use by user-space.
1031                  */
1032                 if (prb_curr_blk_in_use(pbd)) {
1033                         /* Can't record this packet */
1034                         return NULL;
1035                 } else {
1036                         /*
1037                          * Ok, the block was released by user-space.
1038                          * Now let's open that block.
1039                          * opening a block also thaws the queue.
1040                          * Thawing is a side effect.
1041                          */
1042                         prb_open_block(pkc, pbd);
1043                 }
1044         }
1045
1046         smp_mb();
1047         curr = pkc->nxt_offset;
1048         pkc->skb = skb;
1049         end = (char *)pbd + pkc->kblk_size;
1050
1051         /* first try the current block */
1052         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1053                 prb_fill_curr_block(curr, pkc, pbd, len);
1054                 return (void *)curr;
1055         }
1056
1057         /* Ok, close the current block */
1058         prb_retire_current_block(pkc, po, 0);
1059
1060         /* Now, try to dispatch the next block */
1061         curr = (char *)prb_dispatch_next_block(pkc, po);
1062         if (curr) {
1063                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1064                 prb_fill_curr_block(curr, pkc, pbd, len);
1065                 return (void *)curr;
1066         }
1067
1068         /*
1069          * No free blocks are available.user_space hasn't caught up yet.
1070          * Queue was just frozen and now this packet will get dropped.
1071          */
1072         return NULL;
1073 }
1074
1075 static void *packet_current_rx_frame(struct packet_sock *po,
1076                                             struct sk_buff *skb,
1077                                             int status, unsigned int len)
1078 {
1079         char *curr = NULL;
1080         switch (po->tp_version) {
1081         case TPACKET_V1:
1082         case TPACKET_V2:
1083                 curr = packet_lookup_frame(po, &po->rx_ring,
1084                                         po->rx_ring.head, status);
1085                 return curr;
1086         case TPACKET_V3:
1087                 return __packet_lookup_frame_in_block(po, skb, len);
1088         default:
1089                 WARN(1, "TPACKET version not supported\n");
1090                 BUG();
1091                 return NULL;
1092         }
1093 }
1094
1095 static void *prb_lookup_block(const struct packet_sock *po,
1096                               const struct packet_ring_buffer *rb,
1097                               unsigned int idx,
1098                               int status)
1099 {
1100         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1101         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1102
1103         if (status != BLOCK_STATUS(pbd))
1104                 return NULL;
1105         return pbd;
1106 }
1107
1108 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1109 {
1110         unsigned int prev;
1111         if (rb->prb_bdqc.kactive_blk_num)
1112                 prev = rb->prb_bdqc.kactive_blk_num-1;
1113         else
1114                 prev = rb->prb_bdqc.knum_blocks-1;
1115         return prev;
1116 }
1117
1118 /* Assumes caller has held the rx_queue.lock */
1119 static void *__prb_previous_block(struct packet_sock *po,
1120                                          struct packet_ring_buffer *rb,
1121                                          int status)
1122 {
1123         unsigned int previous = prb_previous_blk_num(rb);
1124         return prb_lookup_block(po, rb, previous, status);
1125 }
1126
1127 static void *packet_previous_rx_frame(struct packet_sock *po,
1128                                              struct packet_ring_buffer *rb,
1129                                              int status)
1130 {
1131         if (po->tp_version <= TPACKET_V2)
1132                 return packet_previous_frame(po, rb, status);
1133
1134         return __prb_previous_block(po, rb, status);
1135 }
1136
1137 static void packet_increment_rx_head(struct packet_sock *po,
1138                                             struct packet_ring_buffer *rb)
1139 {
1140         switch (po->tp_version) {
1141         case TPACKET_V1:
1142         case TPACKET_V2:
1143                 return packet_increment_head(rb);
1144         case TPACKET_V3:
1145         default:
1146                 WARN(1, "TPACKET version not supported.\n");
1147                 BUG();
1148                 return;
1149         }
1150 }
1151
1152 static void *packet_previous_frame(struct packet_sock *po,
1153                 struct packet_ring_buffer *rb,
1154                 int status)
1155 {
1156         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1157         return packet_lookup_frame(po, rb, previous, status);
1158 }
1159
1160 static void packet_increment_head(struct packet_ring_buffer *buff)
1161 {
1162         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1163 }
1164
1165 static void packet_inc_pending(struct packet_ring_buffer *rb)
1166 {
1167         this_cpu_inc(*rb->pending_refcnt);
1168 }
1169
1170 static void packet_dec_pending(struct packet_ring_buffer *rb)
1171 {
1172         this_cpu_dec(*rb->pending_refcnt);
1173 }
1174
1175 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1176 {
1177         unsigned int refcnt = 0;
1178         int cpu;
1179
1180         /* We don't use pending refcount in rx_ring. */
1181         if (rb->pending_refcnt == NULL)
1182                 return 0;
1183
1184         for_each_possible_cpu(cpu)
1185                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1186
1187         return refcnt;
1188 }
1189
1190 static int packet_alloc_pending(struct packet_sock *po)
1191 {
1192         po->rx_ring.pending_refcnt = NULL;
1193
1194         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1195         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1196                 return -ENOBUFS;
1197
1198         return 0;
1199 }
1200
1201 static void packet_free_pending(struct packet_sock *po)
1202 {
1203         free_percpu(po->tx_ring.pending_refcnt);
1204 }
1205
1206 #define ROOM_POW_OFF    2
1207 #define ROOM_NONE       0x0
1208 #define ROOM_LOW        0x1
1209 #define ROOM_NORMAL     0x2
1210
1211 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1212 {
1213         int idx, len;
1214
1215         len = READ_ONCE(po->rx_ring.frame_max) + 1;
1216         idx = READ_ONCE(po->rx_ring.head);
1217         if (pow_off)
1218                 idx += len >> pow_off;
1219         if (idx >= len)
1220                 idx -= len;
1221         return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1222 }
1223
1224 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1225 {
1226         int idx, len;
1227
1228         len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1229         idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1230         if (pow_off)
1231                 idx += len >> pow_off;
1232         if (idx >= len)
1233                 idx -= len;
1234         return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1235 }
1236
1237 static int __packet_rcv_has_room(const struct packet_sock *po,
1238                                  const struct sk_buff *skb)
1239 {
1240         const struct sock *sk = &po->sk;
1241         int ret = ROOM_NONE;
1242
1243         if (po->prot_hook.func != tpacket_rcv) {
1244                 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1245                 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1246                                    - (skb ? skb->truesize : 0);
1247
1248                 if (avail > (rcvbuf >> ROOM_POW_OFF))
1249                         return ROOM_NORMAL;
1250                 else if (avail > 0)
1251                         return ROOM_LOW;
1252                 else
1253                         return ROOM_NONE;
1254         }
1255
1256         if (po->tp_version == TPACKET_V3) {
1257                 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1258                         ret = ROOM_NORMAL;
1259                 else if (__tpacket_v3_has_room(po, 0))
1260                         ret = ROOM_LOW;
1261         } else {
1262                 if (__tpacket_has_room(po, ROOM_POW_OFF))
1263                         ret = ROOM_NORMAL;
1264                 else if (__tpacket_has_room(po, 0))
1265                         ret = ROOM_LOW;
1266         }
1267
1268         return ret;
1269 }
1270
1271 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1272 {
1273         int pressure, ret;
1274
1275         ret = __packet_rcv_has_room(po, skb);
1276         pressure = ret != ROOM_NORMAL;
1277
1278         if (READ_ONCE(po->pressure) != pressure)
1279                 WRITE_ONCE(po->pressure, pressure);
1280
1281         return ret;
1282 }
1283
1284 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1285 {
1286         if (READ_ONCE(po->pressure) &&
1287             __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1288                 WRITE_ONCE(po->pressure,  0);
1289 }
1290
1291 static void packet_sock_destruct(struct sock *sk)
1292 {
1293         skb_queue_purge(&sk->sk_error_queue);
1294
1295         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1296         WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1297
1298         if (!sock_flag(sk, SOCK_DEAD)) {
1299                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1300                 return;
1301         }
1302
1303         sk_refcnt_debug_dec(sk);
1304 }
1305
1306 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1307 {
1308         u32 *history = po->rollover->history;
1309         u32 victim, rxhash;
1310         int i, count = 0;
1311
1312         rxhash = skb_get_hash(skb);
1313         for (i = 0; i < ROLLOVER_HLEN; i++)
1314                 if (READ_ONCE(history[i]) == rxhash)
1315                         count++;
1316
1317         victim = prandom_u32() % ROLLOVER_HLEN;
1318
1319         /* Avoid dirtying the cache line if possible */
1320         if (READ_ONCE(history[victim]) != rxhash)
1321                 WRITE_ONCE(history[victim], rxhash);
1322
1323         return count > (ROLLOVER_HLEN >> 1);
1324 }
1325
1326 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1327                                       struct sk_buff *skb,
1328                                       unsigned int num)
1329 {
1330         return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1331 }
1332
1333 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1334                                     struct sk_buff *skb,
1335                                     unsigned int num)
1336 {
1337         unsigned int val = atomic_inc_return(&f->rr_cur);
1338
1339         return val % num;
1340 }
1341
1342 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1343                                      struct sk_buff *skb,
1344                                      unsigned int num)
1345 {
1346         return smp_processor_id() % num;
1347 }
1348
1349 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1350                                      struct sk_buff *skb,
1351                                      unsigned int num)
1352 {
1353         return prandom_u32_max(num);
1354 }
1355
1356 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1357                                           struct sk_buff *skb,
1358                                           unsigned int idx, bool try_self,
1359                                           unsigned int num)
1360 {
1361         struct packet_sock *po, *po_next, *po_skip = NULL;
1362         unsigned int i, j, room = ROOM_NONE;
1363
1364         po = pkt_sk(rcu_dereference(f->arr[idx]));
1365
1366         if (try_self) {
1367                 room = packet_rcv_has_room(po, skb);
1368                 if (room == ROOM_NORMAL ||
1369                     (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1370                         return idx;
1371                 po_skip = po;
1372         }
1373
1374         i = j = min_t(int, po->rollover->sock, num - 1);
1375         do {
1376                 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1377                 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1378                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1379                         if (i != j)
1380                                 po->rollover->sock = i;
1381                         atomic_long_inc(&po->rollover->num);
1382                         if (room == ROOM_LOW)
1383                                 atomic_long_inc(&po->rollover->num_huge);
1384                         return i;
1385                 }
1386
1387                 if (++i == num)
1388                         i = 0;
1389         } while (i != j);
1390
1391         atomic_long_inc(&po->rollover->num_failed);
1392         return idx;
1393 }
1394
1395 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1396                                     struct sk_buff *skb,
1397                                     unsigned int num)
1398 {
1399         return skb_get_queue_mapping(skb) % num;
1400 }
1401
1402 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1403                                      struct sk_buff *skb,
1404                                      unsigned int num)
1405 {
1406         struct bpf_prog *prog;
1407         unsigned int ret = 0;
1408
1409         rcu_read_lock();
1410         prog = rcu_dereference(f->bpf_prog);
1411         if (prog)
1412                 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1413         rcu_read_unlock();
1414
1415         return ret;
1416 }
1417
1418 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1419 {
1420         return f->flags & (flag >> 8);
1421 }
1422
1423 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1424                              struct packet_type *pt, struct net_device *orig_dev)
1425 {
1426         struct packet_fanout *f = pt->af_packet_priv;
1427         unsigned int num = READ_ONCE(f->num_members);
1428         struct net *net = read_pnet(&f->net);
1429         struct packet_sock *po;
1430         unsigned int idx;
1431
1432         if (!net_eq(dev_net(dev), net) || !num) {
1433                 kfree_skb(skb);
1434                 return 0;
1435         }
1436
1437         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1438                 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1439                 if (!skb)
1440                         return 0;
1441         }
1442         switch (f->type) {
1443         case PACKET_FANOUT_HASH:
1444         default:
1445                 idx = fanout_demux_hash(f, skb, num);
1446                 break;
1447         case PACKET_FANOUT_LB:
1448                 idx = fanout_demux_lb(f, skb, num);
1449                 break;
1450         case PACKET_FANOUT_CPU:
1451                 idx = fanout_demux_cpu(f, skb, num);
1452                 break;
1453         case PACKET_FANOUT_RND:
1454                 idx = fanout_demux_rnd(f, skb, num);
1455                 break;
1456         case PACKET_FANOUT_QM:
1457                 idx = fanout_demux_qm(f, skb, num);
1458                 break;
1459         case PACKET_FANOUT_ROLLOVER:
1460                 idx = fanout_demux_rollover(f, skb, 0, false, num);
1461                 break;
1462         case PACKET_FANOUT_CBPF:
1463         case PACKET_FANOUT_EBPF:
1464                 idx = fanout_demux_bpf(f, skb, num);
1465                 break;
1466         }
1467
1468         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1469                 idx = fanout_demux_rollover(f, skb, idx, true, num);
1470
1471         po = pkt_sk(rcu_dereference(f->arr[idx]));
1472         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1473 }
1474
1475 DEFINE_MUTEX(fanout_mutex);
1476 EXPORT_SYMBOL_GPL(fanout_mutex);
1477 static LIST_HEAD(fanout_list);
1478 static u16 fanout_next_id;
1479
1480 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1481 {
1482         struct packet_fanout *f = po->fanout;
1483
1484         spin_lock(&f->lock);
1485         rcu_assign_pointer(f->arr[f->num_members], sk);
1486         smp_wmb();
1487         f->num_members++;
1488         if (f->num_members == 1)
1489                 dev_add_pack(&f->prot_hook);
1490         spin_unlock(&f->lock);
1491 }
1492
1493 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1494 {
1495         struct packet_fanout *f = po->fanout;
1496         int i;
1497
1498         spin_lock(&f->lock);
1499         for (i = 0; i < f->num_members; i++) {
1500                 if (rcu_dereference_protected(f->arr[i],
1501                                               lockdep_is_held(&f->lock)) == sk)
1502                         break;
1503         }
1504         BUG_ON(i >= f->num_members);
1505         rcu_assign_pointer(f->arr[i],
1506                            rcu_dereference_protected(f->arr[f->num_members - 1],
1507                                                      lockdep_is_held(&f->lock)));
1508         f->num_members--;
1509         if (f->num_members == 0)
1510                 __dev_remove_pack(&f->prot_hook);
1511         spin_unlock(&f->lock);
1512 }
1513
1514 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1515 {
1516         if (sk->sk_family != PF_PACKET)
1517                 return false;
1518
1519         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1520 }
1521
1522 static void fanout_init_data(struct packet_fanout *f)
1523 {
1524         switch (f->type) {
1525         case PACKET_FANOUT_LB:
1526                 atomic_set(&f->rr_cur, 0);
1527                 break;
1528         case PACKET_FANOUT_CBPF:
1529         case PACKET_FANOUT_EBPF:
1530                 RCU_INIT_POINTER(f->bpf_prog, NULL);
1531                 break;
1532         }
1533 }
1534
1535 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1536 {
1537         struct bpf_prog *old;
1538
1539         spin_lock(&f->lock);
1540         old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1541         rcu_assign_pointer(f->bpf_prog, new);
1542         spin_unlock(&f->lock);
1543
1544         if (old) {
1545                 synchronize_net();
1546                 bpf_prog_destroy(old);
1547         }
1548 }
1549
1550 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1551                                 unsigned int len)
1552 {
1553         struct bpf_prog *new;
1554         struct sock_fprog fprog;
1555         int ret;
1556
1557         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1558                 return -EPERM;
1559
1560         ret = copy_bpf_fprog_from_user(&fprog, data, len);
1561         if (ret)
1562                 return ret;
1563
1564         ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1565         if (ret)
1566                 return ret;
1567
1568         __fanout_set_data_bpf(po->fanout, new);
1569         return 0;
1570 }
1571
1572 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1573                                 unsigned int len)
1574 {
1575         struct bpf_prog *new;
1576         u32 fd;
1577
1578         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1579                 return -EPERM;
1580         if (len != sizeof(fd))
1581                 return -EINVAL;
1582         if (copy_from_sockptr(&fd, data, len))
1583                 return -EFAULT;
1584
1585         new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1586         if (IS_ERR(new))
1587                 return PTR_ERR(new);
1588
1589         __fanout_set_data_bpf(po->fanout, new);
1590         return 0;
1591 }
1592
1593 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1594                            unsigned int len)
1595 {
1596         switch (po->fanout->type) {
1597         case PACKET_FANOUT_CBPF:
1598                 return fanout_set_data_cbpf(po, data, len);
1599         case PACKET_FANOUT_EBPF:
1600                 return fanout_set_data_ebpf(po, data, len);
1601         default:
1602                 return -EINVAL;
1603         }
1604 }
1605
1606 static void fanout_release_data(struct packet_fanout *f)
1607 {
1608         switch (f->type) {
1609         case PACKET_FANOUT_CBPF:
1610         case PACKET_FANOUT_EBPF:
1611                 __fanout_set_data_bpf(f, NULL);
1612         }
1613 }
1614
1615 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1616 {
1617         struct packet_fanout *f;
1618
1619         list_for_each_entry(f, &fanout_list, list) {
1620                 if (f->id == candidate_id &&
1621                     read_pnet(&f->net) == sock_net(sk)) {
1622                         return false;
1623                 }
1624         }
1625         return true;
1626 }
1627
1628 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1629 {
1630         u16 id = fanout_next_id;
1631
1632         do {
1633                 if (__fanout_id_is_free(sk, id)) {
1634                         *new_id = id;
1635                         fanout_next_id = id + 1;
1636                         return true;
1637                 }
1638
1639                 id++;
1640         } while (id != fanout_next_id);
1641
1642         return false;
1643 }
1644
1645 static int fanout_add(struct sock *sk, struct fanout_args *args)
1646 {
1647         struct packet_rollover *rollover = NULL;
1648         struct packet_sock *po = pkt_sk(sk);
1649         u16 type_flags = args->type_flags;
1650         struct packet_fanout *f, *match;
1651         u8 type = type_flags & 0xff;
1652         u8 flags = type_flags >> 8;
1653         u16 id = args->id;
1654         int err;
1655
1656         switch (type) {
1657         case PACKET_FANOUT_ROLLOVER:
1658                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1659                         return -EINVAL;
1660                 break;
1661         case PACKET_FANOUT_HASH:
1662         case PACKET_FANOUT_LB:
1663         case PACKET_FANOUT_CPU:
1664         case PACKET_FANOUT_RND:
1665         case PACKET_FANOUT_QM:
1666         case PACKET_FANOUT_CBPF:
1667         case PACKET_FANOUT_EBPF:
1668                 break;
1669         default:
1670                 return -EINVAL;
1671         }
1672
1673         mutex_lock(&fanout_mutex);
1674
1675         err = -EALREADY;
1676         if (po->fanout)
1677                 goto out;
1678
1679         if (type == PACKET_FANOUT_ROLLOVER ||
1680             (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1681                 err = -ENOMEM;
1682                 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1683                 if (!rollover)
1684                         goto out;
1685                 atomic_long_set(&rollover->num, 0);
1686                 atomic_long_set(&rollover->num_huge, 0);
1687                 atomic_long_set(&rollover->num_failed, 0);
1688         }
1689
1690         if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1691                 if (id != 0) {
1692                         err = -EINVAL;
1693                         goto out;
1694                 }
1695                 if (!fanout_find_new_id(sk, &id)) {
1696                         err = -ENOMEM;
1697                         goto out;
1698                 }
1699                 /* ephemeral flag for the first socket in the group: drop it */
1700                 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1701         }
1702
1703         match = NULL;
1704         list_for_each_entry(f, &fanout_list, list) {
1705                 if (f->id == id &&
1706                     read_pnet(&f->net) == sock_net(sk)) {
1707                         match = f;
1708                         break;
1709                 }
1710         }
1711         err = -EINVAL;
1712         if (match) {
1713                 if (match->flags != flags)
1714                         goto out;
1715                 if (args->max_num_members &&
1716                     args->max_num_members != match->max_num_members)
1717                         goto out;
1718         } else {
1719                 if (args->max_num_members > PACKET_FANOUT_MAX)
1720                         goto out;
1721                 if (!args->max_num_members)
1722                         /* legacy PACKET_FANOUT_MAX */
1723                         args->max_num_members = 256;
1724                 err = -ENOMEM;
1725                 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1726                                  GFP_KERNEL);
1727                 if (!match)
1728                         goto out;
1729                 write_pnet(&match->net, sock_net(sk));
1730                 match->id = id;
1731                 match->type = type;
1732                 match->flags = flags;
1733                 INIT_LIST_HEAD(&match->list);
1734                 spin_lock_init(&match->lock);
1735                 refcount_set(&match->sk_ref, 0);
1736                 fanout_init_data(match);
1737                 match->prot_hook.type = po->prot_hook.type;
1738                 match->prot_hook.dev = po->prot_hook.dev;
1739                 match->prot_hook.func = packet_rcv_fanout;
1740                 match->prot_hook.af_packet_priv = match;
1741                 match->prot_hook.af_packet_net = read_pnet(&match->net);
1742                 match->prot_hook.id_match = match_fanout_group;
1743                 match->max_num_members = args->max_num_members;
1744                 list_add(&match->list, &fanout_list);
1745         }
1746         err = -EINVAL;
1747
1748         spin_lock(&po->bind_lock);
1749         if (po->running &&
1750             match->type == type &&
1751             match->prot_hook.type == po->prot_hook.type &&
1752             match->prot_hook.dev == po->prot_hook.dev) {
1753                 err = -ENOSPC;
1754                 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1755                         __dev_remove_pack(&po->prot_hook);
1756
1757                         /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1758                         WRITE_ONCE(po->fanout, match);
1759
1760                         po->rollover = rollover;
1761                         rollover = NULL;
1762                         refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1763                         __fanout_link(sk, po);
1764                         err = 0;
1765                 }
1766         }
1767         spin_unlock(&po->bind_lock);
1768
1769         if (err && !refcount_read(&match->sk_ref)) {
1770                 list_del(&match->list);
1771                 kvfree(match);
1772         }
1773
1774 out:
1775         kfree(rollover);
1776         mutex_unlock(&fanout_mutex);
1777         return err;
1778 }
1779
1780 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1781  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1782  * It is the responsibility of the caller to call fanout_release_data() and
1783  * free the returned packet_fanout (after synchronize_net())
1784  */
1785 static struct packet_fanout *fanout_release(struct sock *sk)
1786 {
1787         struct packet_sock *po = pkt_sk(sk);
1788         struct packet_fanout *f;
1789
1790         mutex_lock(&fanout_mutex);
1791         f = po->fanout;
1792         if (f) {
1793                 po->fanout = NULL;
1794
1795                 if (refcount_dec_and_test(&f->sk_ref))
1796                         list_del(&f->list);
1797                 else
1798                         f = NULL;
1799         }
1800         mutex_unlock(&fanout_mutex);
1801
1802         return f;
1803 }
1804
1805 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1806                                           struct sk_buff *skb)
1807 {
1808         /* Earlier code assumed this would be a VLAN pkt, double-check
1809          * this now that we have the actual packet in hand. We can only
1810          * do this check on Ethernet devices.
1811          */
1812         if (unlikely(dev->type != ARPHRD_ETHER))
1813                 return false;
1814
1815         skb_reset_mac_header(skb);
1816         return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1817 }
1818
1819 static const struct proto_ops packet_ops;
1820
1821 static const struct proto_ops packet_ops_spkt;
1822
1823 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1824                            struct packet_type *pt, struct net_device *orig_dev)
1825 {
1826         struct sock *sk;
1827         struct sockaddr_pkt *spkt;
1828
1829         /*
1830          *      When we registered the protocol we saved the socket in the data
1831          *      field for just this event.
1832          */
1833
1834         sk = pt->af_packet_priv;
1835
1836         /*
1837          *      Yank back the headers [hope the device set this
1838          *      right or kerboom...]
1839          *
1840          *      Incoming packets have ll header pulled,
1841          *      push it back.
1842          *
1843          *      For outgoing ones skb->data == skb_mac_header(skb)
1844          *      so that this procedure is noop.
1845          */
1846
1847         if (skb->pkt_type == PACKET_LOOPBACK)
1848                 goto out;
1849
1850         if (!net_eq(dev_net(dev), sock_net(sk)))
1851                 goto out;
1852
1853         skb = skb_share_check(skb, GFP_ATOMIC);
1854         if (skb == NULL)
1855                 goto oom;
1856
1857         /* drop any routing info */
1858         skb_dst_drop(skb);
1859
1860         /* drop conntrack reference */
1861         nf_reset_ct(skb);
1862
1863         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1864
1865         skb_push(skb, skb->data - skb_mac_header(skb));
1866
1867         /*
1868          *      The SOCK_PACKET socket receives _all_ frames.
1869          */
1870
1871         spkt->spkt_family = dev->type;
1872         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1873         spkt->spkt_protocol = skb->protocol;
1874
1875         /*
1876          *      Charge the memory to the socket. This is done specifically
1877          *      to prevent sockets using all the memory up.
1878          */
1879
1880         if (sock_queue_rcv_skb(sk, skb) == 0)
1881                 return 0;
1882
1883 out:
1884         kfree_skb(skb);
1885 oom:
1886         return 0;
1887 }
1888
1889 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1890 {
1891         if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1892             sock->type == SOCK_RAW) {
1893                 skb_reset_mac_header(skb);
1894                 skb->protocol = dev_parse_header_protocol(skb);
1895         }
1896
1897         skb_probe_transport_header(skb);
1898 }
1899
1900 /*
1901  *      Output a raw packet to a device layer. This bypasses all the other
1902  *      protocol layers and you must therefore supply it with a complete frame
1903  */
1904
1905 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1906                                size_t len)
1907 {
1908         struct sock *sk = sock->sk;
1909         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1910         struct sk_buff *skb = NULL;
1911         struct net_device *dev;
1912         struct sockcm_cookie sockc;
1913         __be16 proto = 0;
1914         int err;
1915         int extra_len = 0;
1916
1917         /*
1918          *      Get and verify the address.
1919          */
1920
1921         if (saddr) {
1922                 if (msg->msg_namelen < sizeof(struct sockaddr))
1923                         return -EINVAL;
1924                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1925                         proto = saddr->spkt_protocol;
1926         } else
1927                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1928
1929         /*
1930          *      Find the device first to size check it
1931          */
1932
1933         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1934 retry:
1935         rcu_read_lock();
1936         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1937         err = -ENODEV;
1938         if (dev == NULL)
1939                 goto out_unlock;
1940
1941         err = -ENETDOWN;
1942         if (!(dev->flags & IFF_UP))
1943                 goto out_unlock;
1944
1945         /*
1946          * You may not queue a frame bigger than the mtu. This is the lowest level
1947          * raw protocol and you must do your own fragmentation at this level.
1948          */
1949
1950         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1951                 if (!netif_supports_nofcs(dev)) {
1952                         err = -EPROTONOSUPPORT;
1953                         goto out_unlock;
1954                 }
1955                 extra_len = 4; /* We're doing our own CRC */
1956         }
1957
1958         err = -EMSGSIZE;
1959         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1960                 goto out_unlock;
1961
1962         if (!skb) {
1963                 size_t reserved = LL_RESERVED_SPACE(dev);
1964                 int tlen = dev->needed_tailroom;
1965                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1966
1967                 rcu_read_unlock();
1968                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1969                 if (skb == NULL)
1970                         return -ENOBUFS;
1971                 /* FIXME: Save some space for broken drivers that write a hard
1972                  * header at transmission time by themselves. PPP is the notable
1973                  * one here. This should really be fixed at the driver level.
1974                  */
1975                 skb_reserve(skb, reserved);
1976                 skb_reset_network_header(skb);
1977
1978                 /* Try to align data part correctly */
1979                 if (hhlen) {
1980                         skb->data -= hhlen;
1981                         skb->tail -= hhlen;
1982                         if (len < hhlen)
1983                                 skb_reset_network_header(skb);
1984                 }
1985                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1986                 if (err)
1987                         goto out_free;
1988                 goto retry;
1989         }
1990
1991         if (!dev_validate_header(dev, skb->data, len)) {
1992                 err = -EINVAL;
1993                 goto out_unlock;
1994         }
1995         if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1996             !packet_extra_vlan_len_allowed(dev, skb)) {
1997                 err = -EMSGSIZE;
1998                 goto out_unlock;
1999         }
2000
2001         sockcm_init(&sockc, sk);
2002         if (msg->msg_controllen) {
2003                 err = sock_cmsg_send(sk, msg, &sockc);
2004                 if (unlikely(err))
2005                         goto out_unlock;
2006         }
2007
2008         skb->protocol = proto;
2009         skb->dev = dev;
2010         skb->priority = sk->sk_priority;
2011         skb->mark = sk->sk_mark;
2012         skb->tstamp = sockc.transmit_time;
2013
2014         skb_setup_tx_timestamp(skb, sockc.tsflags);
2015
2016         if (unlikely(extra_len == 4))
2017                 skb->no_fcs = 1;
2018
2019         packet_parse_headers(skb, sock);
2020
2021         dev_queue_xmit(skb);
2022         rcu_read_unlock();
2023         return len;
2024
2025 out_unlock:
2026         rcu_read_unlock();
2027 out_free:
2028         kfree_skb(skb);
2029         return err;
2030 }
2031
2032 static unsigned int run_filter(struct sk_buff *skb,
2033                                const struct sock *sk,
2034                                unsigned int res)
2035 {
2036         struct sk_filter *filter;
2037
2038         rcu_read_lock();
2039         filter = rcu_dereference(sk->sk_filter);
2040         if (filter != NULL)
2041                 res = bpf_prog_run_clear_cb(filter->prog, skb);
2042         rcu_read_unlock();
2043
2044         return res;
2045 }
2046
2047 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2048                            size_t *len)
2049 {
2050         struct virtio_net_hdr vnet_hdr;
2051
2052         if (*len < sizeof(vnet_hdr))
2053                 return -EINVAL;
2054         *len -= sizeof(vnet_hdr);
2055
2056         if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2057                 return -EINVAL;
2058
2059         return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2060 }
2061
2062 /*
2063  * This function makes lazy skb cloning in hope that most of packets
2064  * are discarded by BPF.
2065  *
2066  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2067  * and skb->cb are mangled. It works because (and until) packets
2068  * falling here are owned by current CPU. Output packets are cloned
2069  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2070  * sequentially, so that if we return skb to original state on exit,
2071  * we will not harm anyone.
2072  */
2073
2074 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2075                       struct packet_type *pt, struct net_device *orig_dev)
2076 {
2077         struct sock *sk;
2078         struct sockaddr_ll *sll;
2079         struct packet_sock *po;
2080         u8 *skb_head = skb->data;
2081         int skb_len = skb->len;
2082         unsigned int snaplen, res;
2083         bool is_drop_n_account = false;
2084
2085         if (skb->pkt_type == PACKET_LOOPBACK)
2086                 goto drop;
2087
2088         sk = pt->af_packet_priv;
2089         po = pkt_sk(sk);
2090
2091         if (!net_eq(dev_net(dev), sock_net(sk)))
2092                 goto drop;
2093
2094         skb->dev = dev;
2095
2096         if (dev_has_header(dev)) {
2097                 /* The device has an explicit notion of ll header,
2098                  * exported to higher levels.
2099                  *
2100                  * Otherwise, the device hides details of its frame
2101                  * structure, so that corresponding packet head is
2102                  * never delivered to user.
2103                  */
2104                 if (sk->sk_type != SOCK_DGRAM)
2105                         skb_push(skb, skb->data - skb_mac_header(skb));
2106                 else if (skb->pkt_type == PACKET_OUTGOING) {
2107                         /* Special case: outgoing packets have ll header at head */
2108                         skb_pull(skb, skb_network_offset(skb));
2109                 }
2110         }
2111
2112         snaplen = skb->len;
2113
2114         res = run_filter(skb, sk, snaplen);
2115         if (!res)
2116                 goto drop_n_restore;
2117         if (snaplen > res)
2118                 snaplen = res;
2119
2120         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2121                 goto drop_n_acct;
2122
2123         if (skb_shared(skb)) {
2124                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2125                 if (nskb == NULL)
2126                         goto drop_n_acct;
2127
2128                 if (skb_head != skb->data) {
2129                         skb->data = skb_head;
2130                         skb->len = skb_len;
2131                 }
2132                 consume_skb(skb);
2133                 skb = nskb;
2134         }
2135
2136         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2137
2138         sll = &PACKET_SKB_CB(skb)->sa.ll;
2139         sll->sll_hatype = dev->type;
2140         sll->sll_pkttype = skb->pkt_type;
2141         if (unlikely(po->origdev))
2142                 sll->sll_ifindex = orig_dev->ifindex;
2143         else
2144                 sll->sll_ifindex = dev->ifindex;
2145
2146         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2147
2148         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2149          * Use their space for storing the original skb length.
2150          */
2151         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2152
2153         if (pskb_trim(skb, snaplen))
2154                 goto drop_n_acct;
2155
2156         skb_set_owner_r(skb, sk);
2157         skb->dev = NULL;
2158         skb_dst_drop(skb);
2159
2160         /* drop conntrack reference */
2161         nf_reset_ct(skb);
2162
2163         spin_lock(&sk->sk_receive_queue.lock);
2164         po->stats.stats1.tp_packets++;
2165         sock_skb_set_dropcount(sk, skb);
2166         __skb_queue_tail(&sk->sk_receive_queue, skb);
2167         spin_unlock(&sk->sk_receive_queue.lock);
2168         sk->sk_data_ready(sk);
2169         return 0;
2170
2171 drop_n_acct:
2172         is_drop_n_account = true;
2173         atomic_inc(&po->tp_drops);
2174         atomic_inc(&sk->sk_drops);
2175
2176 drop_n_restore:
2177         if (skb_head != skb->data && skb_shared(skb)) {
2178                 skb->data = skb_head;
2179                 skb->len = skb_len;
2180         }
2181 drop:
2182         if (!is_drop_n_account)
2183                 consume_skb(skb);
2184         else
2185                 kfree_skb(skb);
2186         return 0;
2187 }
2188
2189 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2190                        struct packet_type *pt, struct net_device *orig_dev)
2191 {
2192         struct sock *sk;
2193         struct packet_sock *po;
2194         struct sockaddr_ll *sll;
2195         union tpacket_uhdr h;
2196         u8 *skb_head = skb->data;
2197         int skb_len = skb->len;
2198         unsigned int snaplen, res;
2199         unsigned long status = TP_STATUS_USER;
2200         unsigned short macoff, hdrlen;
2201         unsigned int netoff;
2202         struct sk_buff *copy_skb = NULL;
2203         struct timespec64 ts;
2204         __u32 ts_status;
2205         bool is_drop_n_account = false;
2206         unsigned int slot_id = 0;
2207         bool do_vnet = false;
2208
2209         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2210          * We may add members to them until current aligned size without forcing
2211          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2212          */
2213         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2214         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2215
2216         if (skb->pkt_type == PACKET_LOOPBACK)
2217                 goto drop;
2218
2219         sk = pt->af_packet_priv;
2220         po = pkt_sk(sk);
2221
2222         if (!net_eq(dev_net(dev), sock_net(sk)))
2223                 goto drop;
2224
2225         if (dev_has_header(dev)) {
2226                 if (sk->sk_type != SOCK_DGRAM)
2227                         skb_push(skb, skb->data - skb_mac_header(skb));
2228                 else if (skb->pkt_type == PACKET_OUTGOING) {
2229                         /* Special case: outgoing packets have ll header at head */
2230                         skb_pull(skb, skb_network_offset(skb));
2231                 }
2232         }
2233
2234         snaplen = skb->len;
2235
2236         res = run_filter(skb, sk, snaplen);
2237         if (!res)
2238                 goto drop_n_restore;
2239
2240         /* If we are flooded, just give up */
2241         if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2242                 atomic_inc(&po->tp_drops);
2243                 goto drop_n_restore;
2244         }
2245
2246         if (skb->ip_summed == CHECKSUM_PARTIAL)
2247                 status |= TP_STATUS_CSUMNOTREADY;
2248         else if (skb->pkt_type != PACKET_OUTGOING &&
2249                  (skb->ip_summed == CHECKSUM_COMPLETE ||
2250                   skb_csum_unnecessary(skb)))
2251                 status |= TP_STATUS_CSUM_VALID;
2252
2253         if (snaplen > res)
2254                 snaplen = res;
2255
2256         if (sk->sk_type == SOCK_DGRAM) {
2257                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2258                                   po->tp_reserve;
2259         } else {
2260                 unsigned int maclen = skb_network_offset(skb);
2261                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2262                                        (maclen < 16 ? 16 : maclen)) +
2263                                        po->tp_reserve;
2264                 if (po->has_vnet_hdr) {
2265                         netoff += sizeof(struct virtio_net_hdr);
2266                         do_vnet = true;
2267                 }
2268                 macoff = netoff - maclen;
2269         }
2270         if (netoff > USHRT_MAX) {
2271                 atomic_inc(&po->tp_drops);
2272                 goto drop_n_restore;
2273         }
2274         if (po->tp_version <= TPACKET_V2) {
2275                 if (macoff + snaplen > po->rx_ring.frame_size) {
2276                         if (po->copy_thresh &&
2277                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2278                                 if (skb_shared(skb)) {
2279                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
2280                                 } else {
2281                                         copy_skb = skb_get(skb);
2282                                         skb_head = skb->data;
2283                                 }
2284                                 if (copy_skb)
2285                                         skb_set_owner_r(copy_skb, sk);
2286                         }
2287                         snaplen = po->rx_ring.frame_size - macoff;
2288                         if ((int)snaplen < 0) {
2289                                 snaplen = 0;
2290                                 do_vnet = false;
2291                         }
2292                 }
2293         } else if (unlikely(macoff + snaplen >
2294                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2295                 u32 nval;
2296
2297                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2298                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2299                             snaplen, nval, macoff);
2300                 snaplen = nval;
2301                 if (unlikely((int)snaplen < 0)) {
2302                         snaplen = 0;
2303                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2304                         do_vnet = false;
2305                 }
2306         }
2307         spin_lock(&sk->sk_receive_queue.lock);
2308         h.raw = packet_current_rx_frame(po, skb,
2309                                         TP_STATUS_KERNEL, (macoff+snaplen));
2310         if (!h.raw)
2311                 goto drop_n_account;
2312
2313         if (po->tp_version <= TPACKET_V2) {
2314                 slot_id = po->rx_ring.head;
2315                 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2316                         goto drop_n_account;
2317                 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2318         }
2319
2320         if (do_vnet &&
2321             virtio_net_hdr_from_skb(skb, h.raw + macoff -
2322                                     sizeof(struct virtio_net_hdr),
2323                                     vio_le(), true, 0)) {
2324                 if (po->tp_version == TPACKET_V3)
2325                         prb_clear_blk_fill_status(&po->rx_ring);
2326                 goto drop_n_account;
2327         }
2328
2329         if (po->tp_version <= TPACKET_V2) {
2330                 packet_increment_rx_head(po, &po->rx_ring);
2331         /*
2332          * LOSING will be reported till you read the stats,
2333          * because it's COR - Clear On Read.
2334          * Anyways, moving it for V1/V2 only as V3 doesn't need this
2335          * at packet level.
2336          */
2337                 if (atomic_read(&po->tp_drops))
2338                         status |= TP_STATUS_LOSING;
2339         }
2340
2341         po->stats.stats1.tp_packets++;
2342         if (copy_skb) {
2343                 status |= TP_STATUS_COPY;
2344                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2345         }
2346         spin_unlock(&sk->sk_receive_queue.lock);
2347
2348         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2349
2350         /* Always timestamp; prefer an existing software timestamp taken
2351          * closer to the time of capture.
2352          */
2353         ts_status = tpacket_get_timestamp(skb, &ts,
2354                                           po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
2355         if (!ts_status)
2356                 ktime_get_real_ts64(&ts);
2357
2358         status |= ts_status;
2359
2360         switch (po->tp_version) {
2361         case TPACKET_V1:
2362                 h.h1->tp_len = skb->len;
2363                 h.h1->tp_snaplen = snaplen;
2364                 h.h1->tp_mac = macoff;
2365                 h.h1->tp_net = netoff;
2366                 h.h1->tp_sec = ts.tv_sec;
2367                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2368                 hdrlen = sizeof(*h.h1);
2369                 break;
2370         case TPACKET_V2:
2371                 h.h2->tp_len = skb->len;
2372                 h.h2->tp_snaplen = snaplen;
2373                 h.h2->tp_mac = macoff;
2374                 h.h2->tp_net = netoff;
2375                 h.h2->tp_sec = ts.tv_sec;
2376                 h.h2->tp_nsec = ts.tv_nsec;
2377                 if (skb_vlan_tag_present(skb)) {
2378                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2379                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2380                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2381                 } else {
2382                         h.h2->tp_vlan_tci = 0;
2383                         h.h2->tp_vlan_tpid = 0;
2384                 }
2385                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2386                 hdrlen = sizeof(*h.h2);
2387                 break;
2388         case TPACKET_V3:
2389                 /* tp_nxt_offset,vlan are already populated above.
2390                  * So DONT clear those fields here
2391                  */
2392                 h.h3->tp_status |= status;
2393                 h.h3->tp_len = skb->len;
2394                 h.h3->tp_snaplen = snaplen;
2395                 h.h3->tp_mac = macoff;
2396                 h.h3->tp_net = netoff;
2397                 h.h3->tp_sec  = ts.tv_sec;
2398                 h.h3->tp_nsec = ts.tv_nsec;
2399                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2400                 hdrlen = sizeof(*h.h3);
2401                 break;
2402         default:
2403                 BUG();
2404         }
2405
2406         sll = h.raw + TPACKET_ALIGN(hdrlen);
2407         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2408         sll->sll_family = AF_PACKET;
2409         sll->sll_hatype = dev->type;
2410         sll->sll_protocol = skb->protocol;
2411         sll->sll_pkttype = skb->pkt_type;
2412         if (unlikely(po->origdev))
2413                 sll->sll_ifindex = orig_dev->ifindex;
2414         else
2415                 sll->sll_ifindex = dev->ifindex;
2416
2417         smp_mb();
2418
2419 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2420         if (po->tp_version <= TPACKET_V2) {
2421                 u8 *start, *end;
2422
2423                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2424                                         macoff + snaplen);
2425
2426                 for (start = h.raw; start < end; start += PAGE_SIZE)
2427                         flush_dcache_page(pgv_to_page(start));
2428         }
2429         smp_wmb();
2430 #endif
2431
2432         if (po->tp_version <= TPACKET_V2) {
2433                 spin_lock(&sk->sk_receive_queue.lock);
2434                 __packet_set_status(po, h.raw, status);
2435                 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2436                 spin_unlock(&sk->sk_receive_queue.lock);
2437                 sk->sk_data_ready(sk);
2438         } else if (po->tp_version == TPACKET_V3) {
2439                 prb_clear_blk_fill_status(&po->rx_ring);
2440         }
2441
2442 drop_n_restore:
2443         if (skb_head != skb->data && skb_shared(skb)) {
2444                 skb->data = skb_head;
2445                 skb->len = skb_len;
2446         }
2447 drop:
2448         if (!is_drop_n_account)
2449                 consume_skb(skb);
2450         else
2451                 kfree_skb(skb);
2452         return 0;
2453
2454 drop_n_account:
2455         spin_unlock(&sk->sk_receive_queue.lock);
2456         atomic_inc(&po->tp_drops);
2457         is_drop_n_account = true;
2458
2459         sk->sk_data_ready(sk);
2460         kfree_skb(copy_skb);
2461         goto drop_n_restore;
2462 }
2463
2464 static void tpacket_destruct_skb(struct sk_buff *skb)
2465 {
2466         struct packet_sock *po = pkt_sk(skb->sk);
2467
2468         if (likely(po->tx_ring.pg_vec)) {
2469                 void *ph;
2470                 __u32 ts;
2471
2472                 ph = skb_zcopy_get_nouarg(skb);
2473                 packet_dec_pending(&po->tx_ring);
2474
2475                 ts = __packet_set_timestamp(po, ph, skb);
2476                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2477
2478                 if (!packet_read_pending(&po->tx_ring))
2479                         complete(&po->skb_completion);
2480         }
2481
2482         sock_wfree(skb);
2483 }
2484
2485 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2486 {
2487         if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2488             (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2489              __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2490               __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2491                 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2492                          __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2493                         __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2494
2495         if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2496                 return -EINVAL;
2497
2498         return 0;
2499 }
2500
2501 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2502                                  struct virtio_net_hdr *vnet_hdr)
2503 {
2504         if (*len < sizeof(*vnet_hdr))
2505                 return -EINVAL;
2506         *len -= sizeof(*vnet_hdr);
2507
2508         if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2509                 return -EFAULT;
2510
2511         return __packet_snd_vnet_parse(vnet_hdr, *len);
2512 }
2513
2514 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2515                 void *frame, struct net_device *dev, void *data, int tp_len,
2516                 __be16 proto, unsigned char *addr, int hlen, int copylen,
2517                 const struct sockcm_cookie *sockc)
2518 {
2519         union tpacket_uhdr ph;
2520         int to_write, offset, len, nr_frags, len_max;
2521         struct socket *sock = po->sk.sk_socket;
2522         struct page *page;
2523         int err;
2524
2525         ph.raw = frame;
2526
2527         skb->protocol = proto;
2528         skb->dev = dev;
2529         skb->priority = po->sk.sk_priority;
2530         skb->mark = po->sk.sk_mark;
2531         skb->tstamp = sockc->transmit_time;
2532         skb_setup_tx_timestamp(skb, sockc->tsflags);
2533         skb_zcopy_set_nouarg(skb, ph.raw);
2534
2535         skb_reserve(skb, hlen);
2536         skb_reset_network_header(skb);
2537
2538         to_write = tp_len;
2539
2540         if (sock->type == SOCK_DGRAM) {
2541                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2542                                 NULL, tp_len);
2543                 if (unlikely(err < 0))
2544                         return -EINVAL;
2545         } else if (copylen) {
2546                 int hdrlen = min_t(int, copylen, tp_len);
2547
2548                 skb_push(skb, dev->hard_header_len);
2549                 skb_put(skb, copylen - dev->hard_header_len);
2550                 err = skb_store_bits(skb, 0, data, hdrlen);
2551                 if (unlikely(err))
2552                         return err;
2553                 if (!dev_validate_header(dev, skb->data, hdrlen))
2554                         return -EINVAL;
2555
2556                 data += hdrlen;
2557                 to_write -= hdrlen;
2558         }
2559
2560         offset = offset_in_page(data);
2561         len_max = PAGE_SIZE - offset;
2562         len = ((to_write > len_max) ? len_max : to_write);
2563
2564         skb->data_len = to_write;
2565         skb->len += to_write;
2566         skb->truesize += to_write;
2567         refcount_add(to_write, &po->sk.sk_wmem_alloc);
2568
2569         while (likely(to_write)) {
2570                 nr_frags = skb_shinfo(skb)->nr_frags;
2571
2572                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2573                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2574                                MAX_SKB_FRAGS);
2575                         return -EFAULT;
2576                 }
2577
2578                 page = pgv_to_page(data);
2579                 data += len;
2580                 flush_dcache_page(page);
2581                 get_page(page);
2582                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2583                 to_write -= len;
2584                 offset = 0;
2585                 len_max = PAGE_SIZE;
2586                 len = ((to_write > len_max) ? len_max : to_write);
2587         }
2588
2589         packet_parse_headers(skb, sock);
2590
2591         return tp_len;
2592 }
2593
2594 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2595                                 int size_max, void **data)
2596 {
2597         union tpacket_uhdr ph;
2598         int tp_len, off;
2599
2600         ph.raw = frame;
2601
2602         switch (po->tp_version) {
2603         case TPACKET_V3:
2604                 if (ph.h3->tp_next_offset != 0) {
2605                         pr_warn_once("variable sized slot not supported");
2606                         return -EINVAL;
2607                 }
2608                 tp_len = ph.h3->tp_len;
2609                 break;
2610         case TPACKET_V2:
2611                 tp_len = ph.h2->tp_len;
2612                 break;
2613         default:
2614                 tp_len = ph.h1->tp_len;
2615                 break;
2616         }
2617         if (unlikely(tp_len > size_max)) {
2618                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2619                 return -EMSGSIZE;
2620         }
2621
2622         if (unlikely(po->tp_tx_has_off)) {
2623                 int off_min, off_max;
2624
2625                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2626                 off_max = po->tx_ring.frame_size - tp_len;
2627                 if (po->sk.sk_type == SOCK_DGRAM) {
2628                         switch (po->tp_version) {
2629                         case TPACKET_V3:
2630                                 off = ph.h3->tp_net;
2631                                 break;
2632                         case TPACKET_V2:
2633                                 off = ph.h2->tp_net;
2634                                 break;
2635                         default:
2636                                 off = ph.h1->tp_net;
2637                                 break;
2638                         }
2639                 } else {
2640                         switch (po->tp_version) {
2641                         case TPACKET_V3:
2642                                 off = ph.h3->tp_mac;
2643                                 break;
2644                         case TPACKET_V2:
2645                                 off = ph.h2->tp_mac;
2646                                 break;
2647                         default:
2648                                 off = ph.h1->tp_mac;
2649                                 break;
2650                         }
2651                 }
2652                 if (unlikely((off < off_min) || (off_max < off)))
2653                         return -EINVAL;
2654         } else {
2655                 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2656         }
2657
2658         *data = frame + off;
2659         return tp_len;
2660 }
2661
2662 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2663 {
2664         struct sk_buff *skb = NULL;
2665         struct net_device *dev;
2666         struct virtio_net_hdr *vnet_hdr = NULL;
2667         struct sockcm_cookie sockc;
2668         __be16 proto;
2669         int err, reserve = 0;
2670         void *ph;
2671         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2672         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2673         unsigned char *addr = NULL;
2674         int tp_len, size_max;
2675         void *data;
2676         int len_sum = 0;
2677         int status = TP_STATUS_AVAILABLE;
2678         int hlen, tlen, copylen = 0;
2679         long timeo = 0;
2680
2681         mutex_lock(&po->pg_vec_lock);
2682
2683         /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2684          * we need to confirm it under protection of pg_vec_lock.
2685          */
2686         if (unlikely(!po->tx_ring.pg_vec)) {
2687                 err = -EBUSY;
2688                 goto out;
2689         }
2690         if (likely(saddr == NULL)) {
2691                 dev     = packet_cached_dev_get(po);
2692                 proto   = READ_ONCE(po->num);
2693         } else {
2694                 err = -EINVAL;
2695                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2696                         goto out;
2697                 if (msg->msg_namelen < (saddr->sll_halen
2698                                         + offsetof(struct sockaddr_ll,
2699                                                 sll_addr)))
2700                         goto out;
2701                 proto   = saddr->sll_protocol;
2702                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2703                 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2704                         if (dev && msg->msg_namelen < dev->addr_len +
2705                                    offsetof(struct sockaddr_ll, sll_addr))
2706                                 goto out_put;
2707                         addr = saddr->sll_addr;
2708                 }
2709         }
2710
2711         err = -ENXIO;
2712         if (unlikely(dev == NULL))
2713                 goto out;
2714         err = -ENETDOWN;
2715         if (unlikely(!(dev->flags & IFF_UP)))
2716                 goto out_put;
2717
2718         sockcm_init(&sockc, &po->sk);
2719         if (msg->msg_controllen) {
2720                 err = sock_cmsg_send(&po->sk, msg, &sockc);
2721                 if (unlikely(err))
2722                         goto out_put;
2723         }
2724
2725         if (po->sk.sk_socket->type == SOCK_RAW)
2726                 reserve = dev->hard_header_len;
2727         size_max = po->tx_ring.frame_size
2728                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2729
2730         if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2731                 size_max = dev->mtu + reserve + VLAN_HLEN;
2732
2733         reinit_completion(&po->skb_completion);
2734
2735         do {
2736                 ph = packet_current_frame(po, &po->tx_ring,
2737                                           TP_STATUS_SEND_REQUEST);
2738                 if (unlikely(ph == NULL)) {
2739                         if (need_wait && skb) {
2740                                 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2741                                 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2742                                 if (timeo <= 0) {
2743                                         err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2744                                         goto out_put;
2745                                 }
2746                         }
2747                         /* check for additional frames */
2748                         continue;
2749                 }
2750
2751                 skb = NULL;
2752                 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2753                 if (tp_len < 0)
2754                         goto tpacket_error;
2755
2756                 status = TP_STATUS_SEND_REQUEST;
2757                 hlen = LL_RESERVED_SPACE(dev);
2758                 tlen = dev->needed_tailroom;
2759                 if (po->has_vnet_hdr) {
2760                         vnet_hdr = data;
2761                         data += sizeof(*vnet_hdr);
2762                         tp_len -= sizeof(*vnet_hdr);
2763                         if (tp_len < 0 ||
2764                             __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2765                                 tp_len = -EINVAL;
2766                                 goto tpacket_error;
2767                         }
2768                         copylen = __virtio16_to_cpu(vio_le(),
2769                                                     vnet_hdr->hdr_len);
2770                 }
2771                 copylen = max_t(int, copylen, dev->hard_header_len);
2772                 skb = sock_alloc_send_skb(&po->sk,
2773                                 hlen + tlen + sizeof(struct sockaddr_ll) +
2774                                 (copylen - dev->hard_header_len),
2775                                 !need_wait, &err);
2776
2777                 if (unlikely(skb == NULL)) {
2778                         /* we assume the socket was initially writeable ... */
2779                         if (likely(len_sum > 0))
2780                                 err = len_sum;
2781                         goto out_status;
2782                 }
2783                 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2784                                           addr, hlen, copylen, &sockc);
2785                 if (likely(tp_len >= 0) &&
2786                     tp_len > dev->mtu + reserve &&
2787                     !po->has_vnet_hdr &&
2788                     !packet_extra_vlan_len_allowed(dev, skb))
2789                         tp_len = -EMSGSIZE;
2790
2791                 if (unlikely(tp_len < 0)) {
2792 tpacket_error:
2793                         if (po->tp_loss) {
2794                                 __packet_set_status(po, ph,
2795                                                 TP_STATUS_AVAILABLE);
2796                                 packet_increment_head(&po->tx_ring);
2797                                 kfree_skb(skb);
2798                                 continue;
2799                         } else {
2800                                 status = TP_STATUS_WRONG_FORMAT;
2801                                 err = tp_len;
2802                                 goto out_status;
2803                         }
2804                 }
2805
2806                 if (po->has_vnet_hdr) {
2807                         if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2808                                 tp_len = -EINVAL;
2809                                 goto tpacket_error;
2810                         }
2811                         virtio_net_hdr_set_proto(skb, vnet_hdr);
2812                 }
2813
2814                 skb->destructor = tpacket_destruct_skb;
2815                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2816                 packet_inc_pending(&po->tx_ring);
2817
2818                 status = TP_STATUS_SEND_REQUEST;
2819                 err = po->xmit(skb);
2820                 if (unlikely(err > 0)) {
2821                         err = net_xmit_errno(err);
2822                         if (err && __packet_get_status(po, ph) ==
2823                                    TP_STATUS_AVAILABLE) {
2824                                 /* skb was destructed already */
2825                                 skb = NULL;
2826                                 goto out_status;
2827                         }
2828                         /*
2829                          * skb was dropped but not destructed yet;
2830                          * let's treat it like congestion or err < 0
2831                          */
2832                         err = 0;
2833                 }
2834                 packet_increment_head(&po->tx_ring);
2835                 len_sum += tp_len;
2836         } while (likely((ph != NULL) ||
2837                 /* Note: packet_read_pending() might be slow if we have
2838                  * to call it as it's per_cpu variable, but in fast-path
2839                  * we already short-circuit the loop with the first
2840                  * condition, and luckily don't have to go that path
2841                  * anyway.
2842                  */
2843                  (need_wait && packet_read_pending(&po->tx_ring))));
2844
2845         err = len_sum;
2846         goto out_put;
2847
2848 out_status:
2849         __packet_set_status(po, ph, status);
2850         kfree_skb(skb);
2851 out_put:
2852         dev_put(dev);
2853 out:
2854         mutex_unlock(&po->pg_vec_lock);
2855         return err;
2856 }
2857
2858 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2859                                         size_t reserve, size_t len,
2860                                         size_t linear, int noblock,
2861                                         int *err)
2862 {
2863         struct sk_buff *skb;
2864
2865         /* Under a page?  Don't bother with paged skb. */
2866         if (prepad + len < PAGE_SIZE || !linear)
2867                 linear = len;
2868
2869         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2870                                    err, 0);
2871         if (!skb)
2872                 return NULL;
2873
2874         skb_reserve(skb, reserve);
2875         skb_put(skb, linear);
2876         skb->data_len = len - linear;
2877         skb->len += len - linear;
2878
2879         return skb;
2880 }
2881
2882 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2883 {
2884         struct sock *sk = sock->sk;
2885         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2886         struct sk_buff *skb;
2887         struct net_device *dev;
2888         __be16 proto;
2889         unsigned char *addr = NULL;
2890         int err, reserve = 0;
2891         struct sockcm_cookie sockc;
2892         struct virtio_net_hdr vnet_hdr = { 0 };
2893         int offset = 0;
2894         struct packet_sock *po = pkt_sk(sk);
2895         bool has_vnet_hdr = false;
2896         int hlen, tlen, linear;
2897         int extra_len = 0;
2898
2899         /*
2900          *      Get and verify the address.
2901          */
2902
2903         if (likely(saddr == NULL)) {
2904                 dev     = packet_cached_dev_get(po);
2905                 proto   = READ_ONCE(po->num);
2906         } else {
2907                 err = -EINVAL;
2908                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2909                         goto out;
2910                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2911                         goto out;
2912                 proto   = saddr->sll_protocol;
2913                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2914                 if (sock->type == SOCK_DGRAM) {
2915                         if (dev && msg->msg_namelen < dev->addr_len +
2916                                    offsetof(struct sockaddr_ll, sll_addr))
2917                                 goto out_unlock;
2918                         addr = saddr->sll_addr;
2919                 }
2920         }
2921
2922         err = -ENXIO;
2923         if (unlikely(dev == NULL))
2924                 goto out_unlock;
2925         err = -ENETDOWN;
2926         if (unlikely(!(dev->flags & IFF_UP)))
2927                 goto out_unlock;
2928
2929         sockcm_init(&sockc, sk);
2930         sockc.mark = sk->sk_mark;
2931         if (msg->msg_controllen) {
2932                 err = sock_cmsg_send(sk, msg, &sockc);
2933                 if (unlikely(err))
2934                         goto out_unlock;
2935         }
2936
2937         if (sock->type == SOCK_RAW)
2938                 reserve = dev->hard_header_len;
2939         if (po->has_vnet_hdr) {
2940                 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2941                 if (err)
2942                         goto out_unlock;
2943                 has_vnet_hdr = true;
2944         }
2945
2946         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2947                 if (!netif_supports_nofcs(dev)) {
2948                         err = -EPROTONOSUPPORT;
2949                         goto out_unlock;
2950                 }
2951                 extra_len = 4; /* We're doing our own CRC */
2952         }
2953
2954         err = -EMSGSIZE;
2955         if (!vnet_hdr.gso_type &&
2956             (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2957                 goto out_unlock;
2958
2959         err = -ENOBUFS;
2960         hlen = LL_RESERVED_SPACE(dev);
2961         tlen = dev->needed_tailroom;
2962         linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2963         linear = max(linear, min_t(int, len, dev->hard_header_len));
2964         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2965                                msg->msg_flags & MSG_DONTWAIT, &err);
2966         if (skb == NULL)
2967                 goto out_unlock;
2968
2969         skb_reset_network_header(skb);
2970
2971         err = -EINVAL;
2972         if (sock->type == SOCK_DGRAM) {
2973                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2974                 if (unlikely(offset < 0))
2975                         goto out_free;
2976         } else if (reserve) {
2977                 skb_reserve(skb, -reserve);
2978                 if (len < reserve + sizeof(struct ipv6hdr) &&
2979                     dev->min_header_len != dev->hard_header_len)
2980                         skb_reset_network_header(skb);
2981         }
2982
2983         /* Returns -EFAULT on error */
2984         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2985         if (err)
2986                 goto out_free;
2987
2988         if (sock->type == SOCK_RAW &&
2989             !dev_validate_header(dev, skb->data, len)) {
2990                 err = -EINVAL;
2991                 goto out_free;
2992         }
2993
2994         skb_setup_tx_timestamp(skb, sockc.tsflags);
2995
2996         if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2997             !packet_extra_vlan_len_allowed(dev, skb)) {
2998                 err = -EMSGSIZE;
2999                 goto out_free;
3000         }
3001
3002         skb->protocol = proto;
3003         skb->dev = dev;
3004         skb->priority = sk->sk_priority;
3005         skb->mark = sockc.mark;
3006         skb->tstamp = sockc.transmit_time;
3007
3008         if (has_vnet_hdr) {
3009                 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3010                 if (err)
3011                         goto out_free;
3012                 len += sizeof(vnet_hdr);
3013                 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3014         }
3015
3016         packet_parse_headers(skb, sock);
3017
3018         if (unlikely(extra_len == 4))
3019                 skb->no_fcs = 1;
3020
3021         err = po->xmit(skb);
3022         if (err > 0 && (err = net_xmit_errno(err)) != 0)
3023                 goto out_unlock;
3024
3025         dev_put(dev);
3026
3027         return len;
3028
3029 out_free:
3030         kfree_skb(skb);
3031 out_unlock:
3032         dev_put(dev);
3033 out:
3034         return err;
3035 }
3036
3037 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3038 {
3039         struct sock *sk = sock->sk;
3040         struct packet_sock *po = pkt_sk(sk);
3041
3042         /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3043          * tpacket_snd() will redo the check safely.
3044          */
3045         if (data_race(po->tx_ring.pg_vec))
3046                 return tpacket_snd(po, msg);
3047
3048         return packet_snd(sock, msg, len);
3049 }
3050
3051 /*
3052  *      Close a PACKET socket. This is fairly simple. We immediately go
3053  *      to 'closed' state and remove our protocol entry in the device list.
3054  */
3055
3056 static int packet_release(struct socket *sock)
3057 {
3058         struct sock *sk = sock->sk;
3059         struct packet_sock *po;
3060         struct packet_fanout *f;
3061         struct net *net;
3062         union tpacket_req_u req_u;
3063
3064         if (!sk)
3065                 return 0;
3066
3067         net = sock_net(sk);
3068         po = pkt_sk(sk);
3069
3070         mutex_lock(&net->packet.sklist_lock);
3071         sk_del_node_init_rcu(sk);
3072         mutex_unlock(&net->packet.sklist_lock);
3073
3074         preempt_disable();
3075         sock_prot_inuse_add(net, sk->sk_prot, -1);
3076         preempt_enable();
3077
3078         spin_lock(&po->bind_lock);
3079         unregister_prot_hook(sk, false);
3080         packet_cached_dev_reset(po);
3081
3082         if (po->prot_hook.dev) {
3083                 dev_put(po->prot_hook.dev);
3084                 po->prot_hook.dev = NULL;
3085         }
3086         spin_unlock(&po->bind_lock);
3087
3088         packet_flush_mclist(sk);
3089
3090         lock_sock(sk);
3091         if (po->rx_ring.pg_vec) {
3092                 memset(&req_u, 0, sizeof(req_u));
3093                 packet_set_ring(sk, &req_u, 1, 0);
3094         }
3095
3096         if (po->tx_ring.pg_vec) {
3097                 memset(&req_u, 0, sizeof(req_u));
3098                 packet_set_ring(sk, &req_u, 1, 1);
3099         }
3100         release_sock(sk);
3101
3102         f = fanout_release(sk);
3103
3104         synchronize_net();
3105
3106         kfree(po->rollover);
3107         if (f) {
3108                 fanout_release_data(f);
3109                 kvfree(f);
3110         }
3111         /*
3112          *      Now the socket is dead. No more input will appear.
3113          */
3114         sock_orphan(sk);
3115         sock->sk = NULL;
3116
3117         /* Purge queues */
3118
3119         skb_queue_purge(&sk->sk_receive_queue);
3120         packet_free_pending(po);
3121         sk_refcnt_debug_release(sk);
3122
3123         sock_put(sk);
3124         return 0;
3125 }
3126
3127 /*
3128  *      Attach a packet hook.
3129  */
3130
3131 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3132                           __be16 proto)
3133 {
3134         struct packet_sock *po = pkt_sk(sk);
3135         struct net_device *dev_curr;
3136         __be16 proto_curr;
3137         bool need_rehook;
3138         struct net_device *dev = NULL;
3139         int ret = 0;
3140         bool unlisted = false;
3141
3142         lock_sock(sk);
3143         spin_lock(&po->bind_lock);
3144         rcu_read_lock();
3145
3146         if (po->fanout) {
3147                 ret = -EINVAL;
3148                 goto out_unlock;
3149         }
3150
3151         if (name) {
3152                 dev = dev_get_by_name_rcu(sock_net(sk), name);
3153                 if (!dev) {
3154                         ret = -ENODEV;
3155                         goto out_unlock;
3156                 }
3157         } else if (ifindex) {
3158                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3159                 if (!dev) {
3160                         ret = -ENODEV;
3161                         goto out_unlock;
3162                 }
3163         }
3164
3165         dev_hold(dev);
3166
3167         proto_curr = po->prot_hook.type;
3168         dev_curr = po->prot_hook.dev;
3169
3170         need_rehook = proto_curr != proto || dev_curr != dev;
3171
3172         if (need_rehook) {
3173                 if (po->running) {
3174                         rcu_read_unlock();
3175                         /* prevents packet_notifier() from calling
3176                          * register_prot_hook()
3177                          */
3178                         WRITE_ONCE(po->num, 0);
3179                         __unregister_prot_hook(sk, true);
3180                         rcu_read_lock();
3181                         dev_curr = po->prot_hook.dev;
3182                         if (dev)
3183                                 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3184                                                                  dev->ifindex);
3185                 }
3186
3187                 BUG_ON(po->running);
3188                 WRITE_ONCE(po->num, proto);
3189                 po->prot_hook.type = proto;
3190
3191                 if (unlikely(unlisted)) {
3192                         dev_put(dev);
3193                         po->prot_hook.dev = NULL;
3194                         WRITE_ONCE(po->ifindex, -1);
3195                         packet_cached_dev_reset(po);
3196                 } else {
3197                         po->prot_hook.dev = dev;
3198                         WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3199                         packet_cached_dev_assign(po, dev);
3200                 }
3201         }
3202         dev_put(dev_curr);
3203
3204         if (proto == 0 || !need_rehook)
3205                 goto out_unlock;
3206
3207         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3208                 register_prot_hook(sk);
3209         } else {
3210                 sk->sk_err = ENETDOWN;
3211                 if (!sock_flag(sk, SOCK_DEAD))
3212                         sk_error_report(sk);
3213         }
3214
3215 out_unlock:
3216         rcu_read_unlock();
3217         spin_unlock(&po->bind_lock);
3218         release_sock(sk);
3219         return ret;
3220 }
3221
3222 /*
3223  *      Bind a packet socket to a device
3224  */
3225
3226 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3227                             int addr_len)
3228 {
3229         struct sock *sk = sock->sk;
3230         char name[sizeof(uaddr->sa_data) + 1];
3231
3232         /*
3233          *      Check legality
3234          */
3235
3236         if (addr_len != sizeof(struct sockaddr))
3237                 return -EINVAL;
3238         /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3239          * zero-terminated.
3240          */
3241         memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3242         name[sizeof(uaddr->sa_data)] = 0;
3243
3244         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3245 }
3246
3247 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3248 {
3249         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3250         struct sock *sk = sock->sk;
3251
3252         /*
3253          *      Check legality
3254          */
3255
3256         if (addr_len < sizeof(struct sockaddr_ll))
3257                 return -EINVAL;
3258         if (sll->sll_family != AF_PACKET)
3259                 return -EINVAL;
3260
3261         return packet_do_bind(sk, NULL, sll->sll_ifindex,
3262                               sll->sll_protocol ? : pkt_sk(sk)->num);
3263 }
3264
3265 static struct proto packet_proto = {
3266         .name     = "PACKET",
3267         .owner    = THIS_MODULE,
3268         .obj_size = sizeof(struct packet_sock),
3269 };
3270
3271 /*
3272  *      Create a packet of type SOCK_PACKET.
3273  */
3274
3275 static int packet_create(struct net *net, struct socket *sock, int protocol,
3276                          int kern)
3277 {
3278         struct sock *sk;
3279         struct packet_sock *po;
3280         __be16 proto = (__force __be16)protocol; /* weird, but documented */
3281         int err;
3282
3283         if (!ns_capable(net->user_ns, CAP_NET_RAW))
3284                 return -EPERM;
3285         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3286             sock->type != SOCK_PACKET)
3287                 return -ESOCKTNOSUPPORT;
3288
3289         sock->state = SS_UNCONNECTED;
3290
3291         err = -ENOBUFS;
3292         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3293         if (sk == NULL)
3294                 goto out;
3295
3296         sock->ops = &packet_ops;
3297         if (sock->type == SOCK_PACKET)
3298                 sock->ops = &packet_ops_spkt;
3299
3300         sock_init_data(sock, sk);
3301
3302         po = pkt_sk(sk);
3303         init_completion(&po->skb_completion);
3304         sk->sk_family = PF_PACKET;
3305         po->num = proto;
3306         po->xmit = dev_queue_xmit;
3307
3308         err = packet_alloc_pending(po);
3309         if (err)
3310                 goto out2;
3311
3312         packet_cached_dev_reset(po);
3313
3314         sk->sk_destruct = packet_sock_destruct;
3315         sk_refcnt_debug_inc(sk);
3316
3317         /*
3318          *      Attach a protocol block
3319          */
3320
3321         spin_lock_init(&po->bind_lock);
3322         mutex_init(&po->pg_vec_lock);
3323         po->rollover = NULL;
3324         po->prot_hook.func = packet_rcv;
3325
3326         if (sock->type == SOCK_PACKET)
3327                 po->prot_hook.func = packet_rcv_spkt;
3328
3329         po->prot_hook.af_packet_priv = sk;
3330         po->prot_hook.af_packet_net = sock_net(sk);
3331
3332         if (proto) {
3333                 po->prot_hook.type = proto;
3334                 __register_prot_hook(sk);
3335         }
3336
3337         mutex_lock(&net->packet.sklist_lock);
3338         sk_add_node_tail_rcu(sk, &net->packet.sklist);
3339         mutex_unlock(&net->packet.sklist_lock);
3340
3341         preempt_disable();
3342         sock_prot_inuse_add(net, &packet_proto, 1);
3343         preempt_enable();
3344
3345         return 0;
3346 out2:
3347         sk_free(sk);
3348 out:
3349         return err;
3350 }
3351
3352 /*
3353  *      Pull a packet from our receive queue and hand it to the user.
3354  *      If necessary we block.
3355  */
3356
3357 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3358                           int flags)
3359 {
3360         struct sock *sk = sock->sk;
3361         struct sk_buff *skb;
3362         int copied, err;
3363         int vnet_hdr_len = 0;
3364         unsigned int origlen = 0;
3365
3366         err = -EINVAL;
3367         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3368                 goto out;
3369
3370 #if 0
3371         /* What error should we return now? EUNATTACH? */
3372         if (pkt_sk(sk)->ifindex < 0)
3373                 return -ENODEV;
3374 #endif
3375
3376         if (flags & MSG_ERRQUEUE) {
3377                 err = sock_recv_errqueue(sk, msg, len,
3378                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
3379                 goto out;
3380         }
3381
3382         /*
3383          *      Call the generic datagram receiver. This handles all sorts
3384          *      of horrible races and re-entrancy so we can forget about it
3385          *      in the protocol layers.
3386          *
3387          *      Now it will return ENETDOWN, if device have just gone down,
3388          *      but then it will block.
3389          */
3390
3391         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3392
3393         /*
3394          *      An error occurred so return it. Because skb_recv_datagram()
3395          *      handles the blocking we don't see and worry about blocking
3396          *      retries.
3397          */
3398
3399         if (skb == NULL)
3400                 goto out;
3401
3402         packet_rcv_try_clear_pressure(pkt_sk(sk));
3403
3404         if (pkt_sk(sk)->has_vnet_hdr) {
3405                 err = packet_rcv_vnet(msg, skb, &len);
3406                 if (err)
3407                         goto out_free;
3408                 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3409         }
3410
3411         /* You lose any data beyond the buffer you gave. If it worries
3412          * a user program they can ask the device for its MTU
3413          * anyway.
3414          */
3415         copied = skb->len;
3416         if (copied > len) {
3417                 copied = len;
3418                 msg->msg_flags |= MSG_TRUNC;
3419         }
3420
3421         err = skb_copy_datagram_msg(skb, 0, msg, copied);
3422         if (err)
3423                 goto out_free;
3424
3425         if (sock->type != SOCK_PACKET) {
3426                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3427
3428                 /* Original length was stored in sockaddr_ll fields */
3429                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3430                 sll->sll_family = AF_PACKET;
3431                 sll->sll_protocol = skb->protocol;
3432         }
3433
3434         sock_recv_ts_and_drops(msg, sk, skb);
3435
3436         if (msg->msg_name) {
3437                 int copy_len;
3438
3439                 /* If the address length field is there to be filled
3440                  * in, we fill it in now.
3441                  */
3442                 if (sock->type == SOCK_PACKET) {
3443                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3444                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3445                         copy_len = msg->msg_namelen;
3446                 } else {
3447                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3448
3449                         msg->msg_namelen = sll->sll_halen +
3450                                 offsetof(struct sockaddr_ll, sll_addr);
3451                         copy_len = msg->msg_namelen;
3452                         if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3453                                 memset(msg->msg_name +
3454                                        offsetof(struct sockaddr_ll, sll_addr),
3455                                        0, sizeof(sll->sll_addr));
3456                                 msg->msg_namelen = sizeof(struct sockaddr_ll);
3457                         }
3458                 }
3459                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3460         }
3461
3462         if (pkt_sk(sk)->auxdata) {
3463                 struct tpacket_auxdata aux;
3464
3465                 aux.tp_status = TP_STATUS_USER;
3466                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3467                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3468                 else if (skb->pkt_type != PACKET_OUTGOING &&
3469                          (skb->ip_summed == CHECKSUM_COMPLETE ||
3470                           skb_csum_unnecessary(skb)))
3471                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3472
3473                 aux.tp_len = origlen;
3474                 aux.tp_snaplen = skb->len;
3475                 aux.tp_mac = 0;
3476                 aux.tp_net = skb_network_offset(skb);
3477                 if (skb_vlan_tag_present(skb)) {
3478                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3479                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3480                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3481                 } else {
3482                         aux.tp_vlan_tci = 0;
3483                         aux.tp_vlan_tpid = 0;
3484                 }
3485                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3486         }
3487
3488         /*
3489          *      Free or return the buffer as appropriate. Again this
3490          *      hides all the races and re-entrancy issues from us.
3491          */
3492         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3493
3494 out_free:
3495         skb_free_datagram(sk, skb);
3496 out:
3497         return err;
3498 }
3499
3500 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3501                                int peer)
3502 {
3503         struct net_device *dev;
3504         struct sock *sk = sock->sk;
3505
3506         if (peer)
3507                 return -EOPNOTSUPP;
3508
3509         uaddr->sa_family = AF_PACKET;
3510         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3511         rcu_read_lock();
3512         dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3513         if (dev)
3514                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3515         rcu_read_unlock();
3516
3517         return sizeof(*uaddr);
3518 }
3519
3520 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3521                           int peer)
3522 {
3523         struct net_device *dev;
3524         struct sock *sk = sock->sk;
3525         struct packet_sock *po = pkt_sk(sk);
3526         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3527         int ifindex;
3528
3529         if (peer)
3530                 return -EOPNOTSUPP;
3531
3532         ifindex = READ_ONCE(po->ifindex);
3533         sll->sll_family = AF_PACKET;
3534         sll->sll_ifindex = ifindex;
3535         sll->sll_protocol = READ_ONCE(po->num);
3536         sll->sll_pkttype = 0;
3537         rcu_read_lock();
3538         dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3539         if (dev) {
3540                 sll->sll_hatype = dev->type;
3541                 sll->sll_halen = dev->addr_len;
3542                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3543         } else {
3544                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3545                 sll->sll_halen = 0;
3546         }
3547         rcu_read_unlock();
3548
3549         return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3550 }
3551
3552 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3553                          int what)
3554 {
3555         switch (i->type) {
3556         case PACKET_MR_MULTICAST:
3557                 if (i->alen != dev->addr_len)
3558                         return -EINVAL;
3559                 if (what > 0)
3560                         return dev_mc_add(dev, i->addr);
3561                 else
3562                         return dev_mc_del(dev, i->addr);
3563                 break;
3564         case PACKET_MR_PROMISC:
3565                 return dev_set_promiscuity(dev, what);
3566         case PACKET_MR_ALLMULTI:
3567                 return dev_set_allmulti(dev, what);
3568         case PACKET_MR_UNICAST:
3569                 if (i->alen != dev->addr_len)
3570                         return -EINVAL;
3571                 if (what > 0)
3572                         return dev_uc_add(dev, i->addr);
3573                 else
3574                         return dev_uc_del(dev, i->addr);
3575                 break;
3576         default:
3577                 break;
3578         }
3579         return 0;
3580 }
3581
3582 static void packet_dev_mclist_delete(struct net_device *dev,
3583                                      struct packet_mclist **mlp)
3584 {
3585         struct packet_mclist *ml;
3586
3587         while ((ml = *mlp) != NULL) {
3588                 if (ml->ifindex == dev->ifindex) {
3589                         packet_dev_mc(dev, ml, -1);
3590                         *mlp = ml->next;
3591                         kfree(ml);
3592                 } else
3593                         mlp = &ml->next;
3594         }
3595 }
3596
3597 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3598 {
3599         struct packet_sock *po = pkt_sk(sk);
3600         struct packet_mclist *ml, *i;
3601         struct net_device *dev;
3602         int err;
3603
3604         rtnl_lock();
3605
3606         err = -ENODEV;
3607         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3608         if (!dev)
3609                 goto done;
3610
3611         err = -EINVAL;
3612         if (mreq->mr_alen > dev->addr_len)
3613                 goto done;
3614
3615         err = -ENOBUFS;
3616         i = kmalloc(sizeof(*i), GFP_KERNEL);
3617         if (i == NULL)
3618                 goto done;
3619
3620         err = 0;
3621         for (ml = po->mclist; ml; ml = ml->next) {
3622                 if (ml->ifindex == mreq->mr_ifindex &&
3623                     ml->type == mreq->mr_type &&
3624                     ml->alen == mreq->mr_alen &&
3625                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3626                         ml->count++;
3627                         /* Free the new element ... */
3628                         kfree(i);
3629                         goto done;
3630                 }
3631         }
3632
3633         i->type = mreq->mr_type;
3634         i->ifindex = mreq->mr_ifindex;
3635         i->alen = mreq->mr_alen;
3636         memcpy(i->addr, mreq->mr_address, i->alen);
3637         memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3638         i->count = 1;
3639         i->next = po->mclist;
3640         po->mclist = i;
3641         err = packet_dev_mc(dev, i, 1);
3642         if (err) {
3643                 po->mclist = i->next;
3644                 kfree(i);
3645         }
3646
3647 done:
3648         rtnl_unlock();
3649         return err;
3650 }
3651
3652 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3653 {
3654         struct packet_mclist *ml, **mlp;
3655
3656         rtnl_lock();
3657
3658         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3659                 if (ml->ifindex == mreq->mr_ifindex &&
3660                     ml->type == mreq->mr_type &&
3661                     ml->alen == mreq->mr_alen &&
3662                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3663                         if (--ml->count == 0) {
3664                                 struct net_device *dev;
3665                                 *mlp = ml->next;
3666                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3667                                 if (dev)
3668                                         packet_dev_mc(dev, ml, -1);
3669                                 kfree(ml);
3670                         }
3671                         break;
3672                 }
3673         }
3674         rtnl_unlock();
3675         return 0;
3676 }
3677
3678 static void packet_flush_mclist(struct sock *sk)
3679 {
3680         struct packet_sock *po = pkt_sk(sk);
3681         struct packet_mclist *ml;
3682
3683         if (!po->mclist)
3684                 return;
3685
3686         rtnl_lock();
3687         while ((ml = po->mclist) != NULL) {
3688                 struct net_device *dev;
3689
3690                 po->mclist = ml->next;
3691                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3692                 if (dev != NULL)
3693                         packet_dev_mc(dev, ml, -1);
3694                 kfree(ml);
3695         }
3696         rtnl_unlock();
3697 }
3698
3699 static int
3700 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3701                   unsigned int optlen)
3702 {
3703         struct sock *sk = sock->sk;
3704         struct packet_sock *po = pkt_sk(sk);
3705         int ret;
3706
3707         if (level != SOL_PACKET)
3708                 return -ENOPROTOOPT;
3709
3710         switch (optname) {
3711         case PACKET_ADD_MEMBERSHIP:
3712         case PACKET_DROP_MEMBERSHIP:
3713         {
3714                 struct packet_mreq_max mreq;
3715                 int len = optlen;
3716                 memset(&mreq, 0, sizeof(mreq));
3717                 if (len < sizeof(struct packet_mreq))
3718                         return -EINVAL;
3719                 if (len > sizeof(mreq))
3720                         len = sizeof(mreq);
3721                 if (copy_from_sockptr(&mreq, optval, len))
3722                         return -EFAULT;
3723                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3724                         return -EINVAL;
3725                 if (optname == PACKET_ADD_MEMBERSHIP)
3726                         ret = packet_mc_add(sk, &mreq);
3727                 else
3728                         ret = packet_mc_drop(sk, &mreq);
3729                 return ret;
3730         }
3731
3732         case PACKET_RX_RING:
3733         case PACKET_TX_RING:
3734         {
3735                 union tpacket_req_u req_u;
3736                 int len;
3737
3738                 lock_sock(sk);
3739                 switch (po->tp_version) {
3740                 case TPACKET_V1:
3741                 case TPACKET_V2:
3742                         len = sizeof(req_u.req);
3743                         break;
3744                 case TPACKET_V3:
3745                 default:
3746                         len = sizeof(req_u.req3);
3747                         break;
3748                 }
3749                 if (optlen < len) {
3750                         ret = -EINVAL;
3751                 } else {
3752                         if (copy_from_sockptr(&req_u.req, optval, len))
3753                                 ret = -EFAULT;
3754                         else
3755                                 ret = packet_set_ring(sk, &req_u, 0,
3756                                                     optname == PACKET_TX_RING);
3757                 }
3758                 release_sock(sk);
3759                 return ret;
3760         }
3761         case PACKET_COPY_THRESH:
3762         {
3763                 int val;
3764
3765                 if (optlen != sizeof(val))
3766                         return -EINVAL;
3767                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3768                         return -EFAULT;
3769
3770                 pkt_sk(sk)->copy_thresh = val;
3771                 return 0;
3772         }
3773         case PACKET_VERSION:
3774         {
3775                 int val;
3776
3777                 if (optlen != sizeof(val))
3778                         return -EINVAL;
3779                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3780                         return -EFAULT;
3781                 switch (val) {
3782                 case TPACKET_V1:
3783                 case TPACKET_V2:
3784                 case TPACKET_V3:
3785                         break;
3786                 default:
3787                         return -EINVAL;
3788                 }
3789                 lock_sock(sk);
3790                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3791                         ret = -EBUSY;
3792                 } else {
3793                         po->tp_version = val;
3794                         ret = 0;
3795                 }
3796                 release_sock(sk);
3797                 return ret;
3798         }
3799         case PACKET_RESERVE:
3800         {
3801                 unsigned int val;
3802
3803                 if (optlen != sizeof(val))
3804                         return -EINVAL;
3805                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3806                         return -EFAULT;
3807                 if (val > INT_MAX)
3808                         return -EINVAL;
3809                 lock_sock(sk);
3810                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3811                         ret = -EBUSY;
3812                 } else {
3813                         po->tp_reserve = val;
3814                         ret = 0;
3815                 }
3816                 release_sock(sk);
3817                 return ret;
3818         }
3819         case PACKET_LOSS:
3820         {
3821                 unsigned int val;
3822
3823                 if (optlen != sizeof(val))
3824                         return -EINVAL;
3825                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3826                         return -EFAULT;
3827
3828                 lock_sock(sk);
3829                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3830                         ret = -EBUSY;
3831                 } else {
3832                         po->tp_loss = !!val;
3833                         ret = 0;
3834                 }
3835                 release_sock(sk);
3836                 return ret;
3837         }
3838         case PACKET_AUXDATA:
3839         {
3840                 int val;
3841
3842                 if (optlen < sizeof(val))
3843                         return -EINVAL;
3844                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3845                         return -EFAULT;
3846
3847                 lock_sock(sk);
3848                 po->auxdata = !!val;
3849                 release_sock(sk);
3850                 return 0;
3851         }
3852         case PACKET_ORIGDEV:
3853         {
3854                 int val;
3855
3856                 if (optlen < sizeof(val))
3857                         return -EINVAL;
3858                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3859                         return -EFAULT;
3860
3861                 lock_sock(sk);
3862                 po->origdev = !!val;
3863                 release_sock(sk);
3864                 return 0;
3865         }
3866         case PACKET_VNET_HDR:
3867         {
3868                 int val;
3869
3870                 if (sock->type != SOCK_RAW)
3871                         return -EINVAL;
3872                 if (optlen < sizeof(val))
3873                         return -EINVAL;
3874                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3875                         return -EFAULT;
3876
3877                 lock_sock(sk);
3878                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3879                         ret = -EBUSY;
3880                 } else {
3881                         po->has_vnet_hdr = !!val;
3882                         ret = 0;
3883                 }
3884                 release_sock(sk);
3885                 return ret;
3886         }
3887         case PACKET_TIMESTAMP:
3888         {
3889                 int val;
3890
3891                 if (optlen != sizeof(val))
3892                         return -EINVAL;
3893                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3894                         return -EFAULT;
3895
3896                 po->tp_tstamp = val;
3897                 return 0;
3898         }
3899         case PACKET_FANOUT:
3900         {
3901                 struct fanout_args args = { 0 };
3902
3903                 if (optlen != sizeof(int) && optlen != sizeof(args))
3904                         return -EINVAL;
3905                 if (copy_from_sockptr(&args, optval, optlen))
3906                         return -EFAULT;
3907
3908                 return fanout_add(sk, &args);
3909         }
3910         case PACKET_FANOUT_DATA:
3911         {
3912                 /* Paired with the WRITE_ONCE() in fanout_add() */
3913                 if (!READ_ONCE(po->fanout))
3914                         return -EINVAL;
3915
3916                 return fanout_set_data(po, optval, optlen);
3917         }
3918         case PACKET_IGNORE_OUTGOING:
3919         {
3920                 int val;
3921
3922                 if (optlen != sizeof(val))
3923                         return -EINVAL;
3924                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3925                         return -EFAULT;
3926                 if (val < 0 || val > 1)
3927                         return -EINVAL;
3928
3929                 po->prot_hook.ignore_outgoing = !!val;
3930                 return 0;
3931         }
3932         case PACKET_TX_HAS_OFF:
3933         {
3934                 unsigned int val;
3935
3936                 if (optlen != sizeof(val))
3937                         return -EINVAL;
3938                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3939                         return -EFAULT;
3940
3941                 lock_sock(sk);
3942                 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
3943                         po->tp_tx_has_off = !!val;
3944
3945                 release_sock(sk);
3946                 return 0;
3947         }
3948         case PACKET_QDISC_BYPASS:
3949         {
3950                 int val;
3951
3952                 if (optlen != sizeof(val))
3953                         return -EINVAL;
3954                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3955                         return -EFAULT;
3956
3957                 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3958                 return 0;
3959         }
3960         default:
3961                 return -ENOPROTOOPT;
3962         }
3963 }
3964
3965 static int packet_getsockopt(struct socket *sock, int level, int optname,
3966                              char __user *optval, int __user *optlen)
3967 {
3968         int len;
3969         int val, lv = sizeof(val);
3970         struct sock *sk = sock->sk;
3971         struct packet_sock *po = pkt_sk(sk);
3972         void *data = &val;
3973         union tpacket_stats_u st;
3974         struct tpacket_rollover_stats rstats;
3975         int drops;
3976
3977         if (level != SOL_PACKET)
3978                 return -ENOPROTOOPT;
3979
3980         if (get_user(len, optlen))
3981                 return -EFAULT;
3982
3983         if (len < 0)
3984                 return -EINVAL;
3985
3986         switch (optname) {
3987         case PACKET_STATISTICS:
3988                 spin_lock_bh(&sk->sk_receive_queue.lock);
3989                 memcpy(&st, &po->stats, sizeof(st));
3990                 memset(&po->stats, 0, sizeof(po->stats));
3991                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3992                 drops = atomic_xchg(&po->tp_drops, 0);
3993
3994                 if (po->tp_version == TPACKET_V3) {
3995                         lv = sizeof(struct tpacket_stats_v3);
3996                         st.stats3.tp_drops = drops;
3997                         st.stats3.tp_packets += drops;
3998                         data = &st.stats3;
3999                 } else {
4000                         lv = sizeof(struct tpacket_stats);
4001                         st.stats1.tp_drops = drops;
4002                         st.stats1.tp_packets += drops;
4003                         data = &st.stats1;
4004                 }
4005
4006                 break;
4007         case PACKET_AUXDATA:
4008                 val = po->auxdata;
4009                 break;
4010         case PACKET_ORIGDEV:
4011                 val = po->origdev;
4012                 break;
4013         case PACKET_VNET_HDR:
4014                 val = po->has_vnet_hdr;
4015                 break;
4016         case PACKET_VERSION:
4017                 val = po->tp_version;
4018                 break;
4019         case PACKET_HDRLEN:
4020                 if (len > sizeof(int))
4021                         len = sizeof(int);
4022                 if (len < sizeof(int))
4023                         return -EINVAL;
4024                 if (copy_from_user(&val, optval, len))
4025                         return -EFAULT;
4026                 switch (val) {
4027                 case TPACKET_V1:
4028                         val = sizeof(struct tpacket_hdr);
4029                         break;
4030                 case TPACKET_V2:
4031                         val = sizeof(struct tpacket2_hdr);
4032                         break;
4033                 case TPACKET_V3:
4034                         val = sizeof(struct tpacket3_hdr);
4035                         break;
4036                 default:
4037                         return -EINVAL;
4038                 }
4039                 break;
4040         case PACKET_RESERVE:
4041                 val = po->tp_reserve;
4042                 break;
4043         case PACKET_LOSS:
4044                 val = po->tp_loss;
4045                 break;
4046         case PACKET_TIMESTAMP:
4047                 val = po->tp_tstamp;
4048                 break;
4049         case PACKET_FANOUT:
4050                 val = (po->fanout ?
4051                        ((u32)po->fanout->id |
4052                         ((u32)po->fanout->type << 16) |
4053                         ((u32)po->fanout->flags << 24)) :
4054                        0);
4055                 break;
4056         case PACKET_IGNORE_OUTGOING:
4057                 val = po->prot_hook.ignore_outgoing;
4058                 break;
4059         case PACKET_ROLLOVER_STATS:
4060                 if (!po->rollover)
4061                         return -EINVAL;
4062                 rstats.tp_all = atomic_long_read(&po->rollover->num);
4063                 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4064                 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4065                 data = &rstats;
4066                 lv = sizeof(rstats);
4067                 break;
4068         case PACKET_TX_HAS_OFF:
4069                 val = po->tp_tx_has_off;
4070                 break;
4071         case PACKET_QDISC_BYPASS:
4072                 val = packet_use_direct_xmit(po);
4073                 break;
4074         default:
4075                 return -ENOPROTOOPT;
4076         }
4077
4078         if (len > lv)
4079                 len = lv;
4080         if (put_user(len, optlen))
4081                 return -EFAULT;
4082         if (copy_to_user(optval, data, len))
4083                 return -EFAULT;
4084         return 0;
4085 }
4086
4087 static int packet_notifier(struct notifier_block *this,
4088                            unsigned long msg, void *ptr)
4089 {
4090         struct sock *sk;
4091         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4092         struct net *net = dev_net(dev);
4093
4094         rcu_read_lock();
4095         sk_for_each_rcu(sk, &net->packet.sklist) {
4096                 struct packet_sock *po = pkt_sk(sk);
4097
4098                 switch (msg) {
4099                 case NETDEV_UNREGISTER:
4100                         if (po->mclist)
4101                                 packet_dev_mclist_delete(dev, &po->mclist);
4102                         fallthrough;
4103
4104                 case NETDEV_DOWN:
4105                         if (dev->ifindex == po->ifindex) {
4106                                 spin_lock(&po->bind_lock);
4107                                 if (po->running) {
4108                                         __unregister_prot_hook(sk, false);
4109                                         sk->sk_err = ENETDOWN;
4110                                         if (!sock_flag(sk, SOCK_DEAD))
4111                                                 sk_error_report(sk);
4112                                 }
4113                                 if (msg == NETDEV_UNREGISTER) {
4114                                         packet_cached_dev_reset(po);
4115                                         WRITE_ONCE(po->ifindex, -1);
4116                                         dev_put(po->prot_hook.dev);
4117                                         po->prot_hook.dev = NULL;
4118                                 }
4119                                 spin_unlock(&po->bind_lock);
4120                         }
4121                         break;
4122                 case NETDEV_UP:
4123                         if (dev->ifindex == po->ifindex) {
4124                                 spin_lock(&po->bind_lock);
4125                                 if (po->num)
4126                                         register_prot_hook(sk);
4127                                 spin_unlock(&po->bind_lock);
4128                         }
4129                         break;
4130                 }
4131         }
4132         rcu_read_unlock();
4133         return NOTIFY_DONE;
4134 }
4135
4136
4137 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4138                         unsigned long arg)
4139 {
4140         struct sock *sk = sock->sk;
4141
4142         switch (cmd) {
4143         case SIOCOUTQ:
4144         {
4145                 int amount = sk_wmem_alloc_get(sk);
4146
4147                 return put_user(amount, (int __user *)arg);
4148         }
4149         case SIOCINQ:
4150         {
4151                 struct sk_buff *skb;
4152                 int amount = 0;
4153
4154                 spin_lock_bh(&sk->sk_receive_queue.lock);
4155                 skb = skb_peek(&sk->sk_receive_queue);
4156                 if (skb)
4157                         amount = skb->len;
4158                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4159                 return put_user(amount, (int __user *)arg);
4160         }
4161 #ifdef CONFIG_INET
4162         case SIOCADDRT:
4163         case SIOCDELRT:
4164         case SIOCDARP:
4165         case SIOCGARP:
4166         case SIOCSARP:
4167         case SIOCGIFADDR:
4168         case SIOCSIFADDR:
4169         case SIOCGIFBRDADDR:
4170         case SIOCSIFBRDADDR:
4171         case SIOCGIFNETMASK:
4172         case SIOCSIFNETMASK:
4173         case SIOCGIFDSTADDR:
4174         case SIOCSIFDSTADDR:
4175         case SIOCSIFFLAGS:
4176                 return inet_dgram_ops.ioctl(sock, cmd, arg);
4177 #endif
4178
4179         default:
4180                 return -ENOIOCTLCMD;
4181         }
4182         return 0;
4183 }
4184
4185 static __poll_t packet_poll(struct file *file, struct socket *sock,
4186                                 poll_table *wait)
4187 {
4188         struct sock *sk = sock->sk;
4189         struct packet_sock *po = pkt_sk(sk);
4190         __poll_t mask = datagram_poll(file, sock, wait);
4191
4192         spin_lock_bh(&sk->sk_receive_queue.lock);
4193         if (po->rx_ring.pg_vec) {
4194                 if (!packet_previous_rx_frame(po, &po->rx_ring,
4195                         TP_STATUS_KERNEL))
4196                         mask |= EPOLLIN | EPOLLRDNORM;
4197         }
4198         packet_rcv_try_clear_pressure(po);
4199         spin_unlock_bh(&sk->sk_receive_queue.lock);
4200         spin_lock_bh(&sk->sk_write_queue.lock);
4201         if (po->tx_ring.pg_vec) {
4202                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4203                         mask |= EPOLLOUT | EPOLLWRNORM;
4204         }
4205         spin_unlock_bh(&sk->sk_write_queue.lock);
4206         return mask;
4207 }
4208
4209
4210 /* Dirty? Well, I still did not learn better way to account
4211  * for user mmaps.
4212  */
4213
4214 static void packet_mm_open(struct vm_area_struct *vma)
4215 {
4216         struct file *file = vma->vm_file;
4217         struct socket *sock = file->private_data;
4218         struct sock *sk = sock->sk;
4219
4220         if (sk)
4221                 atomic_inc(&pkt_sk(sk)->mapped);
4222 }
4223
4224 static void packet_mm_close(struct vm_area_struct *vma)
4225 {
4226         struct file *file = vma->vm_file;
4227         struct socket *sock = file->private_data;
4228         struct sock *sk = sock->sk;
4229
4230         if (sk)
4231                 atomic_dec(&pkt_sk(sk)->mapped);
4232 }
4233
4234 static const struct vm_operations_struct packet_mmap_ops = {
4235         .open   =       packet_mm_open,
4236         .close  =       packet_mm_close,
4237 };
4238
4239 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4240                         unsigned int len)
4241 {
4242         int i;
4243
4244         for (i = 0; i < len; i++) {
4245                 if (likely(pg_vec[i].buffer)) {
4246                         if (is_vmalloc_addr(pg_vec[i].buffer))
4247                                 vfree(pg_vec[i].buffer);
4248                         else
4249                                 free_pages((unsigned long)pg_vec[i].buffer,
4250                                            order);
4251                         pg_vec[i].buffer = NULL;
4252                 }
4253         }
4254         kfree(pg_vec);
4255 }
4256
4257 static char *alloc_one_pg_vec_page(unsigned long order)
4258 {
4259         char *buffer;
4260         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4261                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4262
4263         buffer = (char *) __get_free_pages(gfp_flags, order);
4264         if (buffer)
4265                 return buffer;
4266
4267         /* __get_free_pages failed, fall back to vmalloc */
4268         buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4269         if (buffer)
4270                 return buffer;
4271
4272         /* vmalloc failed, lets dig into swap here */
4273         gfp_flags &= ~__GFP_NORETRY;
4274         buffer = (char *) __get_free_pages(gfp_flags, order);
4275         if (buffer)
4276                 return buffer;
4277
4278         /* complete and utter failure */
4279         return NULL;
4280 }
4281
4282 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4283 {
4284         unsigned int block_nr = req->tp_block_nr;
4285         struct pgv *pg_vec;
4286         int i;
4287
4288         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4289         if (unlikely(!pg_vec))
4290                 goto out;
4291
4292         for (i = 0; i < block_nr; i++) {
4293                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4294                 if (unlikely(!pg_vec[i].buffer))
4295                         goto out_free_pgvec;
4296         }
4297
4298 out:
4299         return pg_vec;
4300
4301 out_free_pgvec:
4302         free_pg_vec(pg_vec, order, block_nr);
4303         pg_vec = NULL;
4304         goto out;
4305 }
4306
4307 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4308                 int closing, int tx_ring)
4309 {
4310         struct pgv *pg_vec = NULL;
4311         struct packet_sock *po = pkt_sk(sk);
4312         unsigned long *rx_owner_map = NULL;
4313         int was_running, order = 0;
4314         struct packet_ring_buffer *rb;
4315         struct sk_buff_head *rb_queue;
4316         __be16 num;
4317         int err;
4318         /* Added to avoid minimal code churn */
4319         struct tpacket_req *req = &req_u->req;
4320
4321         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4322         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4323
4324         err = -EBUSY;
4325         if (!closing) {
4326                 if (atomic_read(&po->mapped))
4327                         goto out;
4328                 if (packet_read_pending(rb))
4329                         goto out;
4330         }
4331
4332         if (req->tp_block_nr) {
4333                 unsigned int min_frame_size;
4334
4335                 /* Sanity tests and some calculations */
4336                 err = -EBUSY;
4337                 if (unlikely(rb->pg_vec))
4338                         goto out;
4339
4340                 switch (po->tp_version) {
4341                 case TPACKET_V1:
4342                         po->tp_hdrlen = TPACKET_HDRLEN;
4343                         break;
4344                 case TPACKET_V2:
4345                         po->tp_hdrlen = TPACKET2_HDRLEN;
4346                         break;
4347                 case TPACKET_V3:
4348                         po->tp_hdrlen = TPACKET3_HDRLEN;
4349                         break;
4350                 }
4351
4352                 err = -EINVAL;
4353                 if (unlikely((int)req->tp_block_size <= 0))
4354                         goto out;
4355                 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4356                         goto out;
4357                 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4358                 if (po->tp_version >= TPACKET_V3 &&
4359                     req->tp_block_size <
4360                     BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4361                         goto out;
4362                 if (unlikely(req->tp_frame_size < min_frame_size))
4363                         goto out;
4364                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4365                         goto out;
4366
4367                 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4368                 if (unlikely(rb->frames_per_block == 0))
4369                         goto out;
4370                 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4371                         goto out;
4372                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4373                                         req->tp_frame_nr))
4374                         goto out;
4375
4376                 err = -ENOMEM;
4377                 order = get_order(req->tp_block_size);
4378                 pg_vec = alloc_pg_vec(req, order);
4379                 if (unlikely(!pg_vec))
4380                         goto out;
4381                 switch (po->tp_version) {
4382                 case TPACKET_V3:
4383                         /* Block transmit is not supported yet */
4384                         if (!tx_ring) {
4385                                 init_prb_bdqc(po, rb, pg_vec, req_u);
4386                         } else {
4387                                 struct tpacket_req3 *req3 = &req_u->req3;
4388
4389                                 if (req3->tp_retire_blk_tov ||
4390                                     req3->tp_sizeof_priv ||
4391                                     req3->tp_feature_req_word) {
4392                                         err = -EINVAL;
4393                                         goto out_free_pg_vec;
4394                                 }
4395                         }
4396                         break;
4397                 default:
4398                         if (!tx_ring) {
4399                                 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4400                                         GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4401                                 if (!rx_owner_map)
4402                                         goto out_free_pg_vec;
4403                         }
4404                         break;
4405                 }
4406         }
4407         /* Done */
4408         else {
4409                 err = -EINVAL;
4410                 if (unlikely(req->tp_frame_nr))
4411                         goto out;
4412         }
4413
4414
4415         /* Detach socket from network */
4416         spin_lock(&po->bind_lock);
4417         was_running = po->running;
4418         num = po->num;
4419         if (was_running) {
4420                 WRITE_ONCE(po->num, 0);
4421                 __unregister_prot_hook(sk, false);
4422         }
4423         spin_unlock(&po->bind_lock);
4424
4425         synchronize_net();
4426
4427         err = -EBUSY;
4428         mutex_lock(&po->pg_vec_lock);
4429         if (closing || atomic_read(&po->mapped) == 0) {
4430                 err = 0;
4431                 spin_lock_bh(&rb_queue->lock);
4432                 swap(rb->pg_vec, pg_vec);
4433                 if (po->tp_version <= TPACKET_V2)
4434                         swap(rb->rx_owner_map, rx_owner_map);
4435                 rb->frame_max = (req->tp_frame_nr - 1);
4436                 rb->head = 0;
4437                 rb->frame_size = req->tp_frame_size;
4438                 spin_unlock_bh(&rb_queue->lock);
4439
4440                 swap(rb->pg_vec_order, order);
4441                 swap(rb->pg_vec_len, req->tp_block_nr);
4442
4443                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4444                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4445                                                 tpacket_rcv : packet_rcv;
4446                 skb_queue_purge(rb_queue);
4447                 if (atomic_read(&po->mapped))
4448                         pr_err("packet_mmap: vma is busy: %d\n",
4449                                atomic_read(&po->mapped));
4450         }
4451         mutex_unlock(&po->pg_vec_lock);
4452
4453         spin_lock(&po->bind_lock);
4454         if (was_running) {
4455                 WRITE_ONCE(po->num, num);
4456                 register_prot_hook(sk);
4457         }
4458         spin_unlock(&po->bind_lock);
4459         if (pg_vec && (po->tp_version > TPACKET_V2)) {
4460                 /* Because we don't support block-based V3 on tx-ring */
4461                 if (!tx_ring)
4462                         prb_shutdown_retire_blk_timer(po, rb_queue);
4463         }
4464
4465 out_free_pg_vec:
4466         if (pg_vec) {
4467                 bitmap_free(rx_owner_map);
4468                 free_pg_vec(pg_vec, order, req->tp_block_nr);
4469         }
4470 out:
4471         return err;
4472 }
4473
4474 static int packet_mmap(struct file *file, struct socket *sock,
4475                 struct vm_area_struct *vma)
4476 {
4477         struct sock *sk = sock->sk;
4478         struct packet_sock *po = pkt_sk(sk);
4479         unsigned long size, expected_size;
4480         struct packet_ring_buffer *rb;
4481         unsigned long start;
4482         int err = -EINVAL;
4483         int i;
4484
4485         if (vma->vm_pgoff)
4486                 return -EINVAL;
4487
4488         mutex_lock(&po->pg_vec_lock);
4489
4490         expected_size = 0;
4491         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4492                 if (rb->pg_vec) {
4493                         expected_size += rb->pg_vec_len
4494                                                 * rb->pg_vec_pages
4495                                                 * PAGE_SIZE;
4496                 }
4497         }
4498
4499         if (expected_size == 0)
4500                 goto out;
4501
4502         size = vma->vm_end - vma->vm_start;
4503         if (size != expected_size)
4504                 goto out;
4505
4506         start = vma->vm_start;
4507         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4508                 if (rb->pg_vec == NULL)
4509                         continue;
4510
4511                 for (i = 0; i < rb->pg_vec_len; i++) {
4512                         struct page *page;
4513                         void *kaddr = rb->pg_vec[i].buffer;
4514                         int pg_num;
4515
4516                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4517                                 page = pgv_to_page(kaddr);
4518                                 err = vm_insert_page(vma, start, page);
4519                                 if (unlikely(err))
4520                                         goto out;
4521                                 start += PAGE_SIZE;
4522                                 kaddr += PAGE_SIZE;
4523                         }
4524                 }
4525         }
4526
4527         atomic_inc(&po->mapped);
4528         vma->vm_ops = &packet_mmap_ops;
4529         err = 0;
4530
4531 out:
4532         mutex_unlock(&po->pg_vec_lock);
4533         return err;
4534 }
4535
4536 static const struct proto_ops packet_ops_spkt = {
4537         .family =       PF_PACKET,
4538         .owner =        THIS_MODULE,
4539         .release =      packet_release,
4540         .bind =         packet_bind_spkt,
4541         .connect =      sock_no_connect,
4542         .socketpair =   sock_no_socketpair,
4543         .accept =       sock_no_accept,
4544         .getname =      packet_getname_spkt,
4545         .poll =         datagram_poll,
4546         .ioctl =        packet_ioctl,
4547         .gettstamp =    sock_gettstamp,
4548         .listen =       sock_no_listen,
4549         .shutdown =     sock_no_shutdown,
4550         .sendmsg =      packet_sendmsg_spkt,
4551         .recvmsg =      packet_recvmsg,
4552         .mmap =         sock_no_mmap,
4553         .sendpage =     sock_no_sendpage,
4554 };
4555
4556 static const struct proto_ops packet_ops = {
4557         .family =       PF_PACKET,
4558         .owner =        THIS_MODULE,
4559         .release =      packet_release,
4560         .bind =         packet_bind,
4561         .connect =      sock_no_connect,
4562         .socketpair =   sock_no_socketpair,
4563         .accept =       sock_no_accept,
4564         .getname =      packet_getname,
4565         .poll =         packet_poll,
4566         .ioctl =        packet_ioctl,
4567         .gettstamp =    sock_gettstamp,
4568         .listen =       sock_no_listen,
4569         .shutdown =     sock_no_shutdown,
4570         .setsockopt =   packet_setsockopt,
4571         .getsockopt =   packet_getsockopt,
4572         .sendmsg =      packet_sendmsg,
4573         .recvmsg =      packet_recvmsg,
4574         .mmap =         packet_mmap,
4575         .sendpage =     sock_no_sendpage,
4576 };
4577
4578 static const struct net_proto_family packet_family_ops = {
4579         .family =       PF_PACKET,
4580         .create =       packet_create,
4581         .owner  =       THIS_MODULE,
4582 };
4583
4584 static struct notifier_block packet_netdev_notifier = {
4585         .notifier_call =        packet_notifier,
4586 };
4587
4588 #ifdef CONFIG_PROC_FS
4589
4590 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4591         __acquires(RCU)
4592 {
4593         struct net *net = seq_file_net(seq);
4594
4595         rcu_read_lock();
4596         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4597 }
4598
4599 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4600 {
4601         struct net *net = seq_file_net(seq);
4602         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4603 }
4604
4605 static void packet_seq_stop(struct seq_file *seq, void *v)
4606         __releases(RCU)
4607 {
4608         rcu_read_unlock();
4609 }
4610
4611 static int packet_seq_show(struct seq_file *seq, void *v)
4612 {
4613         if (v == SEQ_START_TOKEN)
4614                 seq_printf(seq,
4615                            "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
4616                            IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4617         else {
4618                 struct sock *s = sk_entry(v);
4619                 const struct packet_sock *po = pkt_sk(s);
4620
4621                 seq_printf(seq,
4622                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4623                            s,
4624                            refcount_read(&s->sk_refcnt),
4625                            s->sk_type,
4626                            ntohs(READ_ONCE(po->num)),
4627                            READ_ONCE(po->ifindex),
4628                            po->running,
4629                            atomic_read(&s->sk_rmem_alloc),
4630                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4631                            sock_i_ino(s));
4632         }
4633
4634         return 0;
4635 }
4636
4637 static const struct seq_operations packet_seq_ops = {
4638         .start  = packet_seq_start,
4639         .next   = packet_seq_next,
4640         .stop   = packet_seq_stop,
4641         .show   = packet_seq_show,
4642 };
4643 #endif
4644
4645 static int __net_init packet_net_init(struct net *net)
4646 {
4647         mutex_init(&net->packet.sklist_lock);
4648         INIT_HLIST_HEAD(&net->packet.sklist);
4649
4650 #ifdef CONFIG_PROC_FS
4651         if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4652                         sizeof(struct seq_net_private)))
4653                 return -ENOMEM;
4654 #endif /* CONFIG_PROC_FS */
4655
4656         return 0;
4657 }
4658
4659 static void __net_exit packet_net_exit(struct net *net)
4660 {
4661         remove_proc_entry("packet", net->proc_net);
4662         WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4663 }
4664
4665 static struct pernet_operations packet_net_ops = {
4666         .init = packet_net_init,
4667         .exit = packet_net_exit,
4668 };
4669
4670
4671 static void __exit packet_exit(void)
4672 {
4673         unregister_netdevice_notifier(&packet_netdev_notifier);
4674         unregister_pernet_subsys(&packet_net_ops);
4675         sock_unregister(PF_PACKET);
4676         proto_unregister(&packet_proto);
4677 }
4678
4679 static int __init packet_init(void)
4680 {
4681         int rc;
4682
4683         rc = proto_register(&packet_proto, 0);
4684         if (rc)
4685                 goto out;
4686         rc = sock_register(&packet_family_ops);
4687         if (rc)
4688                 goto out_proto;
4689         rc = register_pernet_subsys(&packet_net_ops);
4690         if (rc)
4691                 goto out_sock;
4692         rc = register_netdevice_notifier(&packet_netdev_notifier);
4693         if (rc)
4694                 goto out_pernet;
4695
4696         return 0;
4697
4698 out_pernet:
4699         unregister_pernet_subsys(&packet_net_ops);
4700 out_sock:
4701         sock_unregister(PF_PACKET);
4702 out_proto:
4703         proto_unregister(&packet_proto);
4704 out:
4705         return rc;
4706 }
4707
4708 module_init(packet_init);
4709 module_exit(packet_exit);
4710 MODULE_LICENSE("GPL");
4711 MODULE_ALIAS_NETPROTO(PF_PACKET);