net/packet: convert po->has_vnet_hdr to an atomic flag
[platform/kernel/linux-rpi.git] / net / packet / af_packet.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              PACKET - implements raw packet sockets.
8  *
9  * Authors:     Ross Biro
10  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *
13  * Fixes:
14  *              Alan Cox        :       verify_area() now used correctly
15  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
16  *              Alan Cox        :       tidied skbuff lists.
17  *              Alan Cox        :       Now uses generic datagram routines I
18  *                                      added. Also fixed the peek/read crash
19  *                                      from all old Linux datagram code.
20  *              Alan Cox        :       Uses the improved datagram code.
21  *              Alan Cox        :       Added NULL's for socket options.
22  *              Alan Cox        :       Re-commented the code.
23  *              Alan Cox        :       Use new kernel side addressing
24  *              Rob Janssen     :       Correct MTU usage.
25  *              Dave Platt      :       Counter leaks caused by incorrect
26  *                                      interrupt locking and some slightly
27  *                                      dubious gcc output. Can you read
28  *                                      compiler: it said _VOLATILE_
29  *      Richard Kooijman        :       Timestamp fixes.
30  *              Alan Cox        :       New buffers. Use sk->mac.raw.
31  *              Alan Cox        :       sendmsg/recvmsg support.
32  *              Alan Cox        :       Protocol setting support
33  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
34  *      Cyrus Durgin            :       Fixed kerneld for kmod.
35  *      Michal Ostrowski        :       Module initialization cleanup.
36  *         Ulises Alonso        :       Frame number limit removal and
37  *                                      packet_set_ring memory leak.
38  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
39  *                                      The convention is that longer addresses
40  *                                      will simply extend the hardware address
41  *                                      byte arrays at the end of sockaddr_ll
42  *                                      and packet_mreq.
43  *              Johann Baudy    :       Added TX RING.
44  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
45  *                                      layer.
46  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
47  */
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #include <linux/ethtool.h>
52 #include <linux/filter.h>
53 #include <linux/types.h>
54 #include <linux/mm.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kernel.h>
64 #include <linux/kmod.h>
65 #include <linux/slab.h>
66 #include <linux/vmalloc.h>
67 #include <net/net_namespace.h>
68 #include <net/ip.h>
69 #include <net/protocol.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <linux/errno.h>
73 #include <linux/timer.h>
74 #include <linux/uaccess.h>
75 #include <asm/ioctls.h>
76 #include <asm/page.h>
77 #include <asm/cacheflush.h>
78 #include <asm/io.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
81 #include <linux/poll.h>
82 #include <linux/module.h>
83 #include <linux/init.h>
84 #include <linux/mutex.h>
85 #include <linux/if_vlan.h>
86 #include <linux/virtio_net.h>
87 #include <linux/errqueue.h>
88 #include <linux/net_tstamp.h>
89 #include <linux/percpu.h>
90 #ifdef CONFIG_INET
91 #include <net/inet_common.h>
92 #endif
93 #include <linux/bpf.h>
94 #include <net/compat.h>
95 #include <linux/netfilter_netdev.h>
96
97 #include "internal.h"
98
99 /*
100    Assumptions:
101    - If the device has no dev->header_ops->create, there is no LL header
102      visible above the device. In this case, its hard_header_len should be 0.
103      The device may prepend its own header internally. In this case, its
104      needed_headroom should be set to the space needed for it to add its
105      internal header.
106      For example, a WiFi driver pretending to be an Ethernet driver should
107      set its hard_header_len to be the Ethernet header length, and set its
108      needed_headroom to be (the real WiFi header length - the fake Ethernet
109      header length).
110    - packet socket receives packets with pulled ll header,
111      so that SOCK_RAW should push it back.
112
113 On receive:
114 -----------
115
116 Incoming, dev_has_header(dev) == true
117    mac_header -> ll header
118    data       -> data
119
120 Outgoing, dev_has_header(dev) == true
121    mac_header -> ll header
122    data       -> ll header
123
124 Incoming, dev_has_header(dev) == false
125    mac_header -> data
126      However drivers often make it point to the ll header.
127      This is incorrect because the ll header should be invisible to us.
128    data       -> data
129
130 Outgoing, dev_has_header(dev) == false
131    mac_header -> data. ll header is invisible to us.
132    data       -> data
133
134 Resume
135   If dev_has_header(dev) == false we are unable to restore the ll header,
136     because it is invisible to us.
137
138
139 On transmit:
140 ------------
141
142 dev_has_header(dev) == true
143    mac_header -> ll header
144    data       -> ll header
145
146 dev_has_header(dev) == false (ll header is invisible to us)
147    mac_header -> data
148    data       -> data
149
150    We should set network_header on output to the correct position,
151    packet classifier depends on it.
152  */
153
154 /* Private packet socket structures. */
155
156 /* identical to struct packet_mreq except it has
157  * a longer address field.
158  */
159 struct packet_mreq_max {
160         int             mr_ifindex;
161         unsigned short  mr_type;
162         unsigned short  mr_alen;
163         unsigned char   mr_address[MAX_ADDR_LEN];
164 };
165
166 union tpacket_uhdr {
167         struct tpacket_hdr  *h1;
168         struct tpacket2_hdr *h2;
169         struct tpacket3_hdr *h3;
170         void *raw;
171 };
172
173 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
174                 int closing, int tx_ring);
175
176 #define V3_ALIGNMENT    (8)
177
178 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
179
180 #define BLK_PLUS_PRIV(sz_of_priv) \
181         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
182
183 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
184 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
185 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
186 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
187 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
188 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
189
190 struct packet_sock;
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192                        struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195                 struct packet_ring_buffer *rb,
196                 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200                         struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202                 struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205                 struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
209 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
210                 struct tpacket3_hdr *);
211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
212                 struct tpacket3_hdr *);
213 static void packet_flush_mclist(struct sock *sk);
214 static u16 packet_pick_tx_queue(struct sk_buff *skb);
215
216 struct packet_skb_cb {
217         union {
218                 struct sockaddr_pkt pkt;
219                 union {
220                         /* Trick: alias skb original length with
221                          * ll.sll_family and ll.protocol in order
222                          * to save room.
223                          */
224                         unsigned int origlen;
225                         struct sockaddr_ll ll;
226                 };
227         } sa;
228 };
229
230 #define vio_le() virtio_legacy_is_little_endian()
231
232 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
233
234 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid) \
236         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
238         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241         ((x)->kactive_blk_num+1) : 0)
242
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245
246 #ifdef CONFIG_NETFILTER_EGRESS
247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
248 {
249         struct sk_buff *next, *head = NULL, *tail;
250         int rc;
251
252         rcu_read_lock();
253         for (; skb != NULL; skb = next) {
254                 next = skb->next;
255                 skb_mark_not_on_list(skb);
256
257                 if (!nf_hook_egress(skb, &rc, skb->dev))
258                         continue;
259
260                 if (!head)
261                         head = skb;
262                 else
263                         tail->next = skb;
264
265                 tail = skb;
266         }
267         rcu_read_unlock();
268
269         return head;
270 }
271 #endif
272
273 static int packet_direct_xmit(struct sk_buff *skb)
274 {
275 #ifdef CONFIG_NETFILTER_EGRESS
276         if (nf_hook_egress_active()) {
277                 skb = nf_hook_direct_egress(skb);
278                 if (!skb)
279                         return NET_XMIT_DROP;
280         }
281 #endif
282         return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
283 }
284
285 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
286 {
287         struct net_device *dev;
288
289         rcu_read_lock();
290         dev = rcu_dereference(po->cached_dev);
291         dev_hold(dev);
292         rcu_read_unlock();
293
294         return dev;
295 }
296
297 static void packet_cached_dev_assign(struct packet_sock *po,
298                                      struct net_device *dev)
299 {
300         rcu_assign_pointer(po->cached_dev, dev);
301 }
302
303 static void packet_cached_dev_reset(struct packet_sock *po)
304 {
305         RCU_INIT_POINTER(po->cached_dev, NULL);
306 }
307
308 static bool packet_use_direct_xmit(const struct packet_sock *po)
309 {
310         /* Paired with WRITE_ONCE() in packet_setsockopt() */
311         return READ_ONCE(po->xmit) == packet_direct_xmit;
312 }
313
314 static u16 packet_pick_tx_queue(struct sk_buff *skb)
315 {
316         struct net_device *dev = skb->dev;
317         const struct net_device_ops *ops = dev->netdev_ops;
318         int cpu = raw_smp_processor_id();
319         u16 queue_index;
320
321 #ifdef CONFIG_XPS
322         skb->sender_cpu = cpu + 1;
323 #endif
324         skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
325         if (ops->ndo_select_queue) {
326                 queue_index = ops->ndo_select_queue(dev, skb, NULL);
327                 queue_index = netdev_cap_txqueue(dev, queue_index);
328         } else {
329                 queue_index = netdev_pick_tx(dev, skb, NULL);
330         }
331
332         return queue_index;
333 }
334
335 /* __register_prot_hook must be invoked through register_prot_hook
336  * or from a context in which asynchronous accesses to the packet
337  * socket is not possible (packet_create()).
338  */
339 static void __register_prot_hook(struct sock *sk)
340 {
341         struct packet_sock *po = pkt_sk(sk);
342
343         if (!po->running) {
344                 if (po->fanout)
345                         __fanout_link(sk, po);
346                 else
347                         dev_add_pack(&po->prot_hook);
348
349                 sock_hold(sk);
350                 po->running = 1;
351         }
352 }
353
354 static void register_prot_hook(struct sock *sk)
355 {
356         lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
357         __register_prot_hook(sk);
358 }
359
360 /* If the sync parameter is true, we will temporarily drop
361  * the po->bind_lock and do a synchronize_net to make sure no
362  * asynchronous packet processing paths still refer to the elements
363  * of po->prot_hook.  If the sync parameter is false, it is the
364  * callers responsibility to take care of this.
365  */
366 static void __unregister_prot_hook(struct sock *sk, bool sync)
367 {
368         struct packet_sock *po = pkt_sk(sk);
369
370         lockdep_assert_held_once(&po->bind_lock);
371
372         po->running = 0;
373
374         if (po->fanout)
375                 __fanout_unlink(sk, po);
376         else
377                 __dev_remove_pack(&po->prot_hook);
378
379         __sock_put(sk);
380
381         if (sync) {
382                 spin_unlock(&po->bind_lock);
383                 synchronize_net();
384                 spin_lock(&po->bind_lock);
385         }
386 }
387
388 static void unregister_prot_hook(struct sock *sk, bool sync)
389 {
390         struct packet_sock *po = pkt_sk(sk);
391
392         if (po->running)
393                 __unregister_prot_hook(sk, sync);
394 }
395
396 static inline struct page * __pure pgv_to_page(void *addr)
397 {
398         if (is_vmalloc_addr(addr))
399                 return vmalloc_to_page(addr);
400         return virt_to_page(addr);
401 }
402
403 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
404 {
405         union tpacket_uhdr h;
406
407         h.raw = frame;
408         switch (po->tp_version) {
409         case TPACKET_V1:
410                 h.h1->tp_status = status;
411                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
412                 break;
413         case TPACKET_V2:
414                 h.h2->tp_status = status;
415                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
416                 break;
417         case TPACKET_V3:
418                 h.h3->tp_status = status;
419                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
420                 break;
421         default:
422                 WARN(1, "TPACKET version not supported.\n");
423                 BUG();
424         }
425
426         smp_wmb();
427 }
428
429 static int __packet_get_status(const struct packet_sock *po, void *frame)
430 {
431         union tpacket_uhdr h;
432
433         smp_rmb();
434
435         h.raw = frame;
436         switch (po->tp_version) {
437         case TPACKET_V1:
438                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
439                 return h.h1->tp_status;
440         case TPACKET_V2:
441                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
442                 return h.h2->tp_status;
443         case TPACKET_V3:
444                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
445                 return h.h3->tp_status;
446         default:
447                 WARN(1, "TPACKET version not supported.\n");
448                 BUG();
449                 return 0;
450         }
451 }
452
453 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
454                                    unsigned int flags)
455 {
456         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
457
458         if (shhwtstamps &&
459             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
460             ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
461                 return TP_STATUS_TS_RAW_HARDWARE;
462
463         if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
464             ktime_to_timespec64_cond(skb_tstamp(skb), ts))
465                 return TP_STATUS_TS_SOFTWARE;
466
467         return 0;
468 }
469
470 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
471                                     struct sk_buff *skb)
472 {
473         union tpacket_uhdr h;
474         struct timespec64 ts;
475         __u32 ts_status;
476
477         if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
478                 return 0;
479
480         h.raw = frame;
481         /*
482          * versions 1 through 3 overflow the timestamps in y2106, since they
483          * all store the seconds in a 32-bit unsigned integer.
484          * If we create a version 4, that should have a 64-bit timestamp,
485          * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
486          * nanoseconds.
487          */
488         switch (po->tp_version) {
489         case TPACKET_V1:
490                 h.h1->tp_sec = ts.tv_sec;
491                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
492                 break;
493         case TPACKET_V2:
494                 h.h2->tp_sec = ts.tv_sec;
495                 h.h2->tp_nsec = ts.tv_nsec;
496                 break;
497         case TPACKET_V3:
498                 h.h3->tp_sec = ts.tv_sec;
499                 h.h3->tp_nsec = ts.tv_nsec;
500                 break;
501         default:
502                 WARN(1, "TPACKET version not supported.\n");
503                 BUG();
504         }
505
506         /* one flush is safe, as both fields always lie on the same cacheline */
507         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
508         smp_wmb();
509
510         return ts_status;
511 }
512
513 static void *packet_lookup_frame(const struct packet_sock *po,
514                                  const struct packet_ring_buffer *rb,
515                                  unsigned int position,
516                                  int status)
517 {
518         unsigned int pg_vec_pos, frame_offset;
519         union tpacket_uhdr h;
520
521         pg_vec_pos = position / rb->frames_per_block;
522         frame_offset = position % rb->frames_per_block;
523
524         h.raw = rb->pg_vec[pg_vec_pos].buffer +
525                 (frame_offset * rb->frame_size);
526
527         if (status != __packet_get_status(po, h.raw))
528                 return NULL;
529
530         return h.raw;
531 }
532
533 static void *packet_current_frame(struct packet_sock *po,
534                 struct packet_ring_buffer *rb,
535                 int status)
536 {
537         return packet_lookup_frame(po, rb, rb->head, status);
538 }
539
540 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
541 {
542         del_timer_sync(&pkc->retire_blk_timer);
543 }
544
545 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
546                 struct sk_buff_head *rb_queue)
547 {
548         struct tpacket_kbdq_core *pkc;
549
550         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
551
552         spin_lock_bh(&rb_queue->lock);
553         pkc->delete_blk_timer = 1;
554         spin_unlock_bh(&rb_queue->lock);
555
556         prb_del_retire_blk_timer(pkc);
557 }
558
559 static void prb_setup_retire_blk_timer(struct packet_sock *po)
560 {
561         struct tpacket_kbdq_core *pkc;
562
563         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
564         timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
565                     0);
566         pkc->retire_blk_timer.expires = jiffies;
567 }
568
569 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
570                                 int blk_size_in_bytes)
571 {
572         struct net_device *dev;
573         unsigned int mbits, div;
574         struct ethtool_link_ksettings ecmd;
575         int err;
576
577         rtnl_lock();
578         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
579         if (unlikely(!dev)) {
580                 rtnl_unlock();
581                 return DEFAULT_PRB_RETIRE_TOV;
582         }
583         err = __ethtool_get_link_ksettings(dev, &ecmd);
584         rtnl_unlock();
585         if (err)
586                 return DEFAULT_PRB_RETIRE_TOV;
587
588         /* If the link speed is so slow you don't really
589          * need to worry about perf anyways
590          */
591         if (ecmd.base.speed < SPEED_1000 ||
592             ecmd.base.speed == SPEED_UNKNOWN)
593                 return DEFAULT_PRB_RETIRE_TOV;
594
595         div = ecmd.base.speed / 1000;
596         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
597
598         if (div)
599                 mbits /= div;
600
601         if (div)
602                 return mbits + 1;
603         return mbits;
604 }
605
606 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
607                         union tpacket_req_u *req_u)
608 {
609         p1->feature_req_word = req_u->req3.tp_feature_req_word;
610 }
611
612 static void init_prb_bdqc(struct packet_sock *po,
613                         struct packet_ring_buffer *rb,
614                         struct pgv *pg_vec,
615                         union tpacket_req_u *req_u)
616 {
617         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
618         struct tpacket_block_desc *pbd;
619
620         memset(p1, 0x0, sizeof(*p1));
621
622         p1->knxt_seq_num = 1;
623         p1->pkbdq = pg_vec;
624         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
625         p1->pkblk_start = pg_vec[0].buffer;
626         p1->kblk_size = req_u->req3.tp_block_size;
627         p1->knum_blocks = req_u->req3.tp_block_nr;
628         p1->hdrlen = po->tp_hdrlen;
629         p1->version = po->tp_version;
630         p1->last_kactive_blk_num = 0;
631         po->stats.stats3.tp_freeze_q_cnt = 0;
632         if (req_u->req3.tp_retire_blk_tov)
633                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
634         else
635                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
636                                                 req_u->req3.tp_block_size);
637         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
638         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
639         rwlock_init(&p1->blk_fill_in_prog_lock);
640
641         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
642         prb_init_ft_ops(p1, req_u);
643         prb_setup_retire_blk_timer(po);
644         prb_open_block(p1, pbd);
645 }
646
647 /*  Do NOT update the last_blk_num first.
648  *  Assumes sk_buff_head lock is held.
649  */
650 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
651 {
652         mod_timer(&pkc->retire_blk_timer,
653                         jiffies + pkc->tov_in_jiffies);
654         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
655 }
656
657 /*
658  * Timer logic:
659  * 1) We refresh the timer only when we open a block.
660  *    By doing this we don't waste cycles refreshing the timer
661  *        on packet-by-packet basis.
662  *
663  * With a 1MB block-size, on a 1Gbps line, it will take
664  * i) ~8 ms to fill a block + ii) memcpy etc.
665  * In this cut we are not accounting for the memcpy time.
666  *
667  * So, if the user sets the 'tmo' to 10ms then the timer
668  * will never fire while the block is still getting filled
669  * (which is what we want). However, the user could choose
670  * to close a block early and that's fine.
671  *
672  * But when the timer does fire, we check whether or not to refresh it.
673  * Since the tmo granularity is in msecs, it is not too expensive
674  * to refresh the timer, lets say every '8' msecs.
675  * Either the user can set the 'tmo' or we can derive it based on
676  * a) line-speed and b) block-size.
677  * prb_calc_retire_blk_tmo() calculates the tmo.
678  *
679  */
680 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
681 {
682         struct packet_sock *po =
683                 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
684         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
685         unsigned int frozen;
686         struct tpacket_block_desc *pbd;
687
688         spin_lock(&po->sk.sk_receive_queue.lock);
689
690         frozen = prb_queue_frozen(pkc);
691         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
692
693         if (unlikely(pkc->delete_blk_timer))
694                 goto out;
695
696         /* We only need to plug the race when the block is partially filled.
697          * tpacket_rcv:
698          *              lock(); increment BLOCK_NUM_PKTS; unlock()
699          *              copy_bits() is in progress ...
700          *              timer fires on other cpu:
701          *              we can't retire the current block because copy_bits
702          *              is in progress.
703          *
704          */
705         if (BLOCK_NUM_PKTS(pbd)) {
706                 /* Waiting for skb_copy_bits to finish... */
707                 write_lock(&pkc->blk_fill_in_prog_lock);
708                 write_unlock(&pkc->blk_fill_in_prog_lock);
709         }
710
711         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
712                 if (!frozen) {
713                         if (!BLOCK_NUM_PKTS(pbd)) {
714                                 /* An empty block. Just refresh the timer. */
715                                 goto refresh_timer;
716                         }
717                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
718                         if (!prb_dispatch_next_block(pkc, po))
719                                 goto refresh_timer;
720                         else
721                                 goto out;
722                 } else {
723                         /* Case 1. Queue was frozen because user-space was
724                          *         lagging behind.
725                          */
726                         if (prb_curr_blk_in_use(pbd)) {
727                                 /*
728                                  * Ok, user-space is still behind.
729                                  * So just refresh the timer.
730                                  */
731                                 goto refresh_timer;
732                         } else {
733                                /* Case 2. queue was frozen,user-space caught up,
734                                 * now the link went idle && the timer fired.
735                                 * We don't have a block to close.So we open this
736                                 * block and restart the timer.
737                                 * opening a block thaws the queue,restarts timer
738                                 * Thawing/timer-refresh is a side effect.
739                                 */
740                                 prb_open_block(pkc, pbd);
741                                 goto out;
742                         }
743                 }
744         }
745
746 refresh_timer:
747         _prb_refresh_rx_retire_blk_timer(pkc);
748
749 out:
750         spin_unlock(&po->sk.sk_receive_queue.lock);
751 }
752
753 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
754                 struct tpacket_block_desc *pbd1, __u32 status)
755 {
756         /* Flush everything minus the block header */
757
758 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
759         u8 *start, *end;
760
761         start = (u8 *)pbd1;
762
763         /* Skip the block header(we know header WILL fit in 4K) */
764         start += PAGE_SIZE;
765
766         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
767         for (; start < end; start += PAGE_SIZE)
768                 flush_dcache_page(pgv_to_page(start));
769
770         smp_wmb();
771 #endif
772
773         /* Now update the block status. */
774
775         BLOCK_STATUS(pbd1) = status;
776
777         /* Flush the block header */
778
779 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
780         start = (u8 *)pbd1;
781         flush_dcache_page(pgv_to_page(start));
782
783         smp_wmb();
784 #endif
785 }
786
787 /*
788  * Side effect:
789  *
790  * 1) flush the block
791  * 2) Increment active_blk_num
792  *
793  * Note:We DONT refresh the timer on purpose.
794  *      Because almost always the next block will be opened.
795  */
796 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
797                 struct tpacket_block_desc *pbd1,
798                 struct packet_sock *po, unsigned int stat)
799 {
800         __u32 status = TP_STATUS_USER | stat;
801
802         struct tpacket3_hdr *last_pkt;
803         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
804         struct sock *sk = &po->sk;
805
806         if (atomic_read(&po->tp_drops))
807                 status |= TP_STATUS_LOSING;
808
809         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
810         last_pkt->tp_next_offset = 0;
811
812         /* Get the ts of the last pkt */
813         if (BLOCK_NUM_PKTS(pbd1)) {
814                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
815                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
816         } else {
817                 /* Ok, we tmo'd - so get the current time.
818                  *
819                  * It shouldn't really happen as we don't close empty
820                  * blocks. See prb_retire_rx_blk_timer_expired().
821                  */
822                 struct timespec64 ts;
823                 ktime_get_real_ts64(&ts);
824                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
825                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
826         }
827
828         smp_wmb();
829
830         /* Flush the block */
831         prb_flush_block(pkc1, pbd1, status);
832
833         sk->sk_data_ready(sk);
834
835         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
836 }
837
838 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
839 {
840         pkc->reset_pending_on_curr_blk = 0;
841 }
842
843 /*
844  * Side effect of opening a block:
845  *
846  * 1) prb_queue is thawed.
847  * 2) retire_blk_timer is refreshed.
848  *
849  */
850 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
851         struct tpacket_block_desc *pbd1)
852 {
853         struct timespec64 ts;
854         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
855
856         smp_rmb();
857
858         /* We could have just memset this but we will lose the
859          * flexibility of making the priv area sticky
860          */
861
862         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
863         BLOCK_NUM_PKTS(pbd1) = 0;
864         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
865
866         ktime_get_real_ts64(&ts);
867
868         h1->ts_first_pkt.ts_sec = ts.tv_sec;
869         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
870
871         pkc1->pkblk_start = (char *)pbd1;
872         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
873
874         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
875         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
876
877         pbd1->version = pkc1->version;
878         pkc1->prev = pkc1->nxt_offset;
879         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
880
881         prb_thaw_queue(pkc1);
882         _prb_refresh_rx_retire_blk_timer(pkc1);
883
884         smp_wmb();
885 }
886
887 /*
888  * Queue freeze logic:
889  * 1) Assume tp_block_nr = 8 blocks.
890  * 2) At time 't0', user opens Rx ring.
891  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
892  * 4) user-space is either sleeping or processing block '0'.
893  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
894  *    it will close block-7,loop around and try to fill block '0'.
895  *    call-flow:
896  *    __packet_lookup_frame_in_block
897  *      prb_retire_current_block()
898  *      prb_dispatch_next_block()
899  *        |->(BLOCK_STATUS == USER) evaluates to true
900  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
901  * 6) Now there are two cases:
902  *    6.1) Link goes idle right after the queue is frozen.
903  *         But remember, the last open_block() refreshed the timer.
904  *         When this timer expires,it will refresh itself so that we can
905  *         re-open block-0 in near future.
906  *    6.2) Link is busy and keeps on receiving packets. This is a simple
907  *         case and __packet_lookup_frame_in_block will check if block-0
908  *         is free and can now be re-used.
909  */
910 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
911                                   struct packet_sock *po)
912 {
913         pkc->reset_pending_on_curr_blk = 1;
914         po->stats.stats3.tp_freeze_q_cnt++;
915 }
916
917 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
918
919 /*
920  * If the next block is free then we will dispatch it
921  * and return a good offset.
922  * Else, we will freeze the queue.
923  * So, caller must check the return value.
924  */
925 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
926                 struct packet_sock *po)
927 {
928         struct tpacket_block_desc *pbd;
929
930         smp_rmb();
931
932         /* 1. Get current block num */
933         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
934
935         /* 2. If this block is currently in_use then freeze the queue */
936         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
937                 prb_freeze_queue(pkc, po);
938                 return NULL;
939         }
940
941         /*
942          * 3.
943          * open this block and return the offset where the first packet
944          * needs to get stored.
945          */
946         prb_open_block(pkc, pbd);
947         return (void *)pkc->nxt_offset;
948 }
949
950 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
951                 struct packet_sock *po, unsigned int status)
952 {
953         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
954
955         /* retire/close the current block */
956         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
957                 /*
958                  * Plug the case where copy_bits() is in progress on
959                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
960                  * have space to copy the pkt in the current block and
961                  * called prb_retire_current_block()
962                  *
963                  * We don't need to worry about the TMO case because
964                  * the timer-handler already handled this case.
965                  */
966                 if (!(status & TP_STATUS_BLK_TMO)) {
967                         /* Waiting for skb_copy_bits to finish... */
968                         write_lock(&pkc->blk_fill_in_prog_lock);
969                         write_unlock(&pkc->blk_fill_in_prog_lock);
970                 }
971                 prb_close_block(pkc, pbd, po, status);
972                 return;
973         }
974 }
975
976 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
977 {
978         return TP_STATUS_USER & BLOCK_STATUS(pbd);
979 }
980
981 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
982 {
983         return pkc->reset_pending_on_curr_blk;
984 }
985
986 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
987         __releases(&pkc->blk_fill_in_prog_lock)
988 {
989         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
990
991         read_unlock(&pkc->blk_fill_in_prog_lock);
992 }
993
994 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
995                         struct tpacket3_hdr *ppd)
996 {
997         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
998 }
999
1000 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1001                         struct tpacket3_hdr *ppd)
1002 {
1003         ppd->hv1.tp_rxhash = 0;
1004 }
1005
1006 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1007                         struct tpacket3_hdr *ppd)
1008 {
1009         if (skb_vlan_tag_present(pkc->skb)) {
1010                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1011                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1012                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1013         } else {
1014                 ppd->hv1.tp_vlan_tci = 0;
1015                 ppd->hv1.tp_vlan_tpid = 0;
1016                 ppd->tp_status = TP_STATUS_AVAILABLE;
1017         }
1018 }
1019
1020 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1021                         struct tpacket3_hdr *ppd)
1022 {
1023         ppd->hv1.tp_padding = 0;
1024         prb_fill_vlan_info(pkc, ppd);
1025
1026         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1027                 prb_fill_rxhash(pkc, ppd);
1028         else
1029                 prb_clear_rxhash(pkc, ppd);
1030 }
1031
1032 static void prb_fill_curr_block(char *curr,
1033                                 struct tpacket_kbdq_core *pkc,
1034                                 struct tpacket_block_desc *pbd,
1035                                 unsigned int len)
1036         __acquires(&pkc->blk_fill_in_prog_lock)
1037 {
1038         struct tpacket3_hdr *ppd;
1039
1040         ppd  = (struct tpacket3_hdr *)curr;
1041         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1042         pkc->prev = curr;
1043         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1044         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1045         BLOCK_NUM_PKTS(pbd) += 1;
1046         read_lock(&pkc->blk_fill_in_prog_lock);
1047         prb_run_all_ft_ops(pkc, ppd);
1048 }
1049
1050 /* Assumes caller has the sk->rx_queue.lock */
1051 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1052                                             struct sk_buff *skb,
1053                                             unsigned int len
1054                                             )
1055 {
1056         struct tpacket_kbdq_core *pkc;
1057         struct tpacket_block_desc *pbd;
1058         char *curr, *end;
1059
1060         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1061         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1062
1063         /* Queue is frozen when user space is lagging behind */
1064         if (prb_queue_frozen(pkc)) {
1065                 /*
1066                  * Check if that last block which caused the queue to freeze,
1067                  * is still in_use by user-space.
1068                  */
1069                 if (prb_curr_blk_in_use(pbd)) {
1070                         /* Can't record this packet */
1071                         return NULL;
1072                 } else {
1073                         /*
1074                          * Ok, the block was released by user-space.
1075                          * Now let's open that block.
1076                          * opening a block also thaws the queue.
1077                          * Thawing is a side effect.
1078                          */
1079                         prb_open_block(pkc, pbd);
1080                 }
1081         }
1082
1083         smp_mb();
1084         curr = pkc->nxt_offset;
1085         pkc->skb = skb;
1086         end = (char *)pbd + pkc->kblk_size;
1087
1088         /* first try the current block */
1089         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1090                 prb_fill_curr_block(curr, pkc, pbd, len);
1091                 return (void *)curr;
1092         }
1093
1094         /* Ok, close the current block */
1095         prb_retire_current_block(pkc, po, 0);
1096
1097         /* Now, try to dispatch the next block */
1098         curr = (char *)prb_dispatch_next_block(pkc, po);
1099         if (curr) {
1100                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1101                 prb_fill_curr_block(curr, pkc, pbd, len);
1102                 return (void *)curr;
1103         }
1104
1105         /*
1106          * No free blocks are available.user_space hasn't caught up yet.
1107          * Queue was just frozen and now this packet will get dropped.
1108          */
1109         return NULL;
1110 }
1111
1112 static void *packet_current_rx_frame(struct packet_sock *po,
1113                                             struct sk_buff *skb,
1114                                             int status, unsigned int len)
1115 {
1116         char *curr = NULL;
1117         switch (po->tp_version) {
1118         case TPACKET_V1:
1119         case TPACKET_V2:
1120                 curr = packet_lookup_frame(po, &po->rx_ring,
1121                                         po->rx_ring.head, status);
1122                 return curr;
1123         case TPACKET_V3:
1124                 return __packet_lookup_frame_in_block(po, skb, len);
1125         default:
1126                 WARN(1, "TPACKET version not supported\n");
1127                 BUG();
1128                 return NULL;
1129         }
1130 }
1131
1132 static void *prb_lookup_block(const struct packet_sock *po,
1133                               const struct packet_ring_buffer *rb,
1134                               unsigned int idx,
1135                               int status)
1136 {
1137         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1138         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1139
1140         if (status != BLOCK_STATUS(pbd))
1141                 return NULL;
1142         return pbd;
1143 }
1144
1145 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1146 {
1147         unsigned int prev;
1148         if (rb->prb_bdqc.kactive_blk_num)
1149                 prev = rb->prb_bdqc.kactive_blk_num-1;
1150         else
1151                 prev = rb->prb_bdqc.knum_blocks-1;
1152         return prev;
1153 }
1154
1155 /* Assumes caller has held the rx_queue.lock */
1156 static void *__prb_previous_block(struct packet_sock *po,
1157                                          struct packet_ring_buffer *rb,
1158                                          int status)
1159 {
1160         unsigned int previous = prb_previous_blk_num(rb);
1161         return prb_lookup_block(po, rb, previous, status);
1162 }
1163
1164 static void *packet_previous_rx_frame(struct packet_sock *po,
1165                                              struct packet_ring_buffer *rb,
1166                                              int status)
1167 {
1168         if (po->tp_version <= TPACKET_V2)
1169                 return packet_previous_frame(po, rb, status);
1170
1171         return __prb_previous_block(po, rb, status);
1172 }
1173
1174 static void packet_increment_rx_head(struct packet_sock *po,
1175                                             struct packet_ring_buffer *rb)
1176 {
1177         switch (po->tp_version) {
1178         case TPACKET_V1:
1179         case TPACKET_V2:
1180                 return packet_increment_head(rb);
1181         case TPACKET_V3:
1182         default:
1183                 WARN(1, "TPACKET version not supported.\n");
1184                 BUG();
1185                 return;
1186         }
1187 }
1188
1189 static void *packet_previous_frame(struct packet_sock *po,
1190                 struct packet_ring_buffer *rb,
1191                 int status)
1192 {
1193         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1194         return packet_lookup_frame(po, rb, previous, status);
1195 }
1196
1197 static void packet_increment_head(struct packet_ring_buffer *buff)
1198 {
1199         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1200 }
1201
1202 static void packet_inc_pending(struct packet_ring_buffer *rb)
1203 {
1204         this_cpu_inc(*rb->pending_refcnt);
1205 }
1206
1207 static void packet_dec_pending(struct packet_ring_buffer *rb)
1208 {
1209         this_cpu_dec(*rb->pending_refcnt);
1210 }
1211
1212 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1213 {
1214         unsigned int refcnt = 0;
1215         int cpu;
1216
1217         /* We don't use pending refcount in rx_ring. */
1218         if (rb->pending_refcnt == NULL)
1219                 return 0;
1220
1221         for_each_possible_cpu(cpu)
1222                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1223
1224         return refcnt;
1225 }
1226
1227 static int packet_alloc_pending(struct packet_sock *po)
1228 {
1229         po->rx_ring.pending_refcnt = NULL;
1230
1231         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1232         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1233                 return -ENOBUFS;
1234
1235         return 0;
1236 }
1237
1238 static void packet_free_pending(struct packet_sock *po)
1239 {
1240         free_percpu(po->tx_ring.pending_refcnt);
1241 }
1242
1243 #define ROOM_POW_OFF    2
1244 #define ROOM_NONE       0x0
1245 #define ROOM_LOW        0x1
1246 #define ROOM_NORMAL     0x2
1247
1248 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1249 {
1250         int idx, len;
1251
1252         len = READ_ONCE(po->rx_ring.frame_max) + 1;
1253         idx = READ_ONCE(po->rx_ring.head);
1254         if (pow_off)
1255                 idx += len >> pow_off;
1256         if (idx >= len)
1257                 idx -= len;
1258         return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1259 }
1260
1261 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1262 {
1263         int idx, len;
1264
1265         len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1266         idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1267         if (pow_off)
1268                 idx += len >> pow_off;
1269         if (idx >= len)
1270                 idx -= len;
1271         return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1272 }
1273
1274 static int __packet_rcv_has_room(const struct packet_sock *po,
1275                                  const struct sk_buff *skb)
1276 {
1277         const struct sock *sk = &po->sk;
1278         int ret = ROOM_NONE;
1279
1280         if (po->prot_hook.func != tpacket_rcv) {
1281                 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1282                 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1283                                    - (skb ? skb->truesize : 0);
1284
1285                 if (avail > (rcvbuf >> ROOM_POW_OFF))
1286                         return ROOM_NORMAL;
1287                 else if (avail > 0)
1288                         return ROOM_LOW;
1289                 else
1290                         return ROOM_NONE;
1291         }
1292
1293         if (po->tp_version == TPACKET_V3) {
1294                 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1295                         ret = ROOM_NORMAL;
1296                 else if (__tpacket_v3_has_room(po, 0))
1297                         ret = ROOM_LOW;
1298         } else {
1299                 if (__tpacket_has_room(po, ROOM_POW_OFF))
1300                         ret = ROOM_NORMAL;
1301                 else if (__tpacket_has_room(po, 0))
1302                         ret = ROOM_LOW;
1303         }
1304
1305         return ret;
1306 }
1307
1308 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1309 {
1310         int pressure, ret;
1311
1312         ret = __packet_rcv_has_room(po, skb);
1313         pressure = ret != ROOM_NORMAL;
1314
1315         if (READ_ONCE(po->pressure) != pressure)
1316                 WRITE_ONCE(po->pressure, pressure);
1317
1318         return ret;
1319 }
1320
1321 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1322 {
1323         if (READ_ONCE(po->pressure) &&
1324             __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1325                 WRITE_ONCE(po->pressure,  0);
1326 }
1327
1328 static void packet_sock_destruct(struct sock *sk)
1329 {
1330         skb_queue_purge(&sk->sk_error_queue);
1331
1332         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1333         WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1334
1335         if (!sock_flag(sk, SOCK_DEAD)) {
1336                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1337                 return;
1338         }
1339 }
1340
1341 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1342 {
1343         u32 *history = po->rollover->history;
1344         u32 victim, rxhash;
1345         int i, count = 0;
1346
1347         rxhash = skb_get_hash(skb);
1348         for (i = 0; i < ROLLOVER_HLEN; i++)
1349                 if (READ_ONCE(history[i]) == rxhash)
1350                         count++;
1351
1352         victim = get_random_u32_below(ROLLOVER_HLEN);
1353
1354         /* Avoid dirtying the cache line if possible */
1355         if (READ_ONCE(history[victim]) != rxhash)
1356                 WRITE_ONCE(history[victim], rxhash);
1357
1358         return count > (ROLLOVER_HLEN >> 1);
1359 }
1360
1361 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1362                                       struct sk_buff *skb,
1363                                       unsigned int num)
1364 {
1365         return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1366 }
1367
1368 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1369                                     struct sk_buff *skb,
1370                                     unsigned int num)
1371 {
1372         unsigned int val = atomic_inc_return(&f->rr_cur);
1373
1374         return val % num;
1375 }
1376
1377 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1378                                      struct sk_buff *skb,
1379                                      unsigned int num)
1380 {
1381         return smp_processor_id() % num;
1382 }
1383
1384 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1385                                      struct sk_buff *skb,
1386                                      unsigned int num)
1387 {
1388         return get_random_u32_below(num);
1389 }
1390
1391 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1392                                           struct sk_buff *skb,
1393                                           unsigned int idx, bool try_self,
1394                                           unsigned int num)
1395 {
1396         struct packet_sock *po, *po_next, *po_skip = NULL;
1397         unsigned int i, j, room = ROOM_NONE;
1398
1399         po = pkt_sk(rcu_dereference(f->arr[idx]));
1400
1401         if (try_self) {
1402                 room = packet_rcv_has_room(po, skb);
1403                 if (room == ROOM_NORMAL ||
1404                     (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1405                         return idx;
1406                 po_skip = po;
1407         }
1408
1409         i = j = min_t(int, po->rollover->sock, num - 1);
1410         do {
1411                 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1412                 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1413                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1414                         if (i != j)
1415                                 po->rollover->sock = i;
1416                         atomic_long_inc(&po->rollover->num);
1417                         if (room == ROOM_LOW)
1418                                 atomic_long_inc(&po->rollover->num_huge);
1419                         return i;
1420                 }
1421
1422                 if (++i == num)
1423                         i = 0;
1424         } while (i != j);
1425
1426         atomic_long_inc(&po->rollover->num_failed);
1427         return idx;
1428 }
1429
1430 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1431                                     struct sk_buff *skb,
1432                                     unsigned int num)
1433 {
1434         return skb_get_queue_mapping(skb) % num;
1435 }
1436
1437 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1438                                      struct sk_buff *skb,
1439                                      unsigned int num)
1440 {
1441         struct bpf_prog *prog;
1442         unsigned int ret = 0;
1443
1444         rcu_read_lock();
1445         prog = rcu_dereference(f->bpf_prog);
1446         if (prog)
1447                 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1448         rcu_read_unlock();
1449
1450         return ret;
1451 }
1452
1453 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1454 {
1455         return f->flags & (flag >> 8);
1456 }
1457
1458 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1459                              struct packet_type *pt, struct net_device *orig_dev)
1460 {
1461         struct packet_fanout *f = pt->af_packet_priv;
1462         unsigned int num = READ_ONCE(f->num_members);
1463         struct net *net = read_pnet(&f->net);
1464         struct packet_sock *po;
1465         unsigned int idx;
1466
1467         if (!net_eq(dev_net(dev), net) || !num) {
1468                 kfree_skb(skb);
1469                 return 0;
1470         }
1471
1472         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1473                 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1474                 if (!skb)
1475                         return 0;
1476         }
1477         switch (f->type) {
1478         case PACKET_FANOUT_HASH:
1479         default:
1480                 idx = fanout_demux_hash(f, skb, num);
1481                 break;
1482         case PACKET_FANOUT_LB:
1483                 idx = fanout_demux_lb(f, skb, num);
1484                 break;
1485         case PACKET_FANOUT_CPU:
1486                 idx = fanout_demux_cpu(f, skb, num);
1487                 break;
1488         case PACKET_FANOUT_RND:
1489                 idx = fanout_demux_rnd(f, skb, num);
1490                 break;
1491         case PACKET_FANOUT_QM:
1492                 idx = fanout_demux_qm(f, skb, num);
1493                 break;
1494         case PACKET_FANOUT_ROLLOVER:
1495                 idx = fanout_demux_rollover(f, skb, 0, false, num);
1496                 break;
1497         case PACKET_FANOUT_CBPF:
1498         case PACKET_FANOUT_EBPF:
1499                 idx = fanout_demux_bpf(f, skb, num);
1500                 break;
1501         }
1502
1503         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1504                 idx = fanout_demux_rollover(f, skb, idx, true, num);
1505
1506         po = pkt_sk(rcu_dereference(f->arr[idx]));
1507         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1508 }
1509
1510 DEFINE_MUTEX(fanout_mutex);
1511 EXPORT_SYMBOL_GPL(fanout_mutex);
1512 static LIST_HEAD(fanout_list);
1513 static u16 fanout_next_id;
1514
1515 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1516 {
1517         struct packet_fanout *f = po->fanout;
1518
1519         spin_lock(&f->lock);
1520         rcu_assign_pointer(f->arr[f->num_members], sk);
1521         smp_wmb();
1522         f->num_members++;
1523         if (f->num_members == 1)
1524                 dev_add_pack(&f->prot_hook);
1525         spin_unlock(&f->lock);
1526 }
1527
1528 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1529 {
1530         struct packet_fanout *f = po->fanout;
1531         int i;
1532
1533         spin_lock(&f->lock);
1534         for (i = 0; i < f->num_members; i++) {
1535                 if (rcu_dereference_protected(f->arr[i],
1536                                               lockdep_is_held(&f->lock)) == sk)
1537                         break;
1538         }
1539         BUG_ON(i >= f->num_members);
1540         rcu_assign_pointer(f->arr[i],
1541                            rcu_dereference_protected(f->arr[f->num_members - 1],
1542                                                      lockdep_is_held(&f->lock)));
1543         f->num_members--;
1544         if (f->num_members == 0)
1545                 __dev_remove_pack(&f->prot_hook);
1546         spin_unlock(&f->lock);
1547 }
1548
1549 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1550 {
1551         if (sk->sk_family != PF_PACKET)
1552                 return false;
1553
1554         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1555 }
1556
1557 static void fanout_init_data(struct packet_fanout *f)
1558 {
1559         switch (f->type) {
1560         case PACKET_FANOUT_LB:
1561                 atomic_set(&f->rr_cur, 0);
1562                 break;
1563         case PACKET_FANOUT_CBPF:
1564         case PACKET_FANOUT_EBPF:
1565                 RCU_INIT_POINTER(f->bpf_prog, NULL);
1566                 break;
1567         }
1568 }
1569
1570 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1571 {
1572         struct bpf_prog *old;
1573
1574         spin_lock(&f->lock);
1575         old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1576         rcu_assign_pointer(f->bpf_prog, new);
1577         spin_unlock(&f->lock);
1578
1579         if (old) {
1580                 synchronize_net();
1581                 bpf_prog_destroy(old);
1582         }
1583 }
1584
1585 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1586                                 unsigned int len)
1587 {
1588         struct bpf_prog *new;
1589         struct sock_fprog fprog;
1590         int ret;
1591
1592         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1593                 return -EPERM;
1594
1595         ret = copy_bpf_fprog_from_user(&fprog, data, len);
1596         if (ret)
1597                 return ret;
1598
1599         ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1600         if (ret)
1601                 return ret;
1602
1603         __fanout_set_data_bpf(po->fanout, new);
1604         return 0;
1605 }
1606
1607 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1608                                 unsigned int len)
1609 {
1610         struct bpf_prog *new;
1611         u32 fd;
1612
1613         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1614                 return -EPERM;
1615         if (len != sizeof(fd))
1616                 return -EINVAL;
1617         if (copy_from_sockptr(&fd, data, len))
1618                 return -EFAULT;
1619
1620         new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1621         if (IS_ERR(new))
1622                 return PTR_ERR(new);
1623
1624         __fanout_set_data_bpf(po->fanout, new);
1625         return 0;
1626 }
1627
1628 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1629                            unsigned int len)
1630 {
1631         switch (po->fanout->type) {
1632         case PACKET_FANOUT_CBPF:
1633                 return fanout_set_data_cbpf(po, data, len);
1634         case PACKET_FANOUT_EBPF:
1635                 return fanout_set_data_ebpf(po, data, len);
1636         default:
1637                 return -EINVAL;
1638         }
1639 }
1640
1641 static void fanout_release_data(struct packet_fanout *f)
1642 {
1643         switch (f->type) {
1644         case PACKET_FANOUT_CBPF:
1645         case PACKET_FANOUT_EBPF:
1646                 __fanout_set_data_bpf(f, NULL);
1647         }
1648 }
1649
1650 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1651 {
1652         struct packet_fanout *f;
1653
1654         list_for_each_entry(f, &fanout_list, list) {
1655                 if (f->id == candidate_id &&
1656                     read_pnet(&f->net) == sock_net(sk)) {
1657                         return false;
1658                 }
1659         }
1660         return true;
1661 }
1662
1663 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1664 {
1665         u16 id = fanout_next_id;
1666
1667         do {
1668                 if (__fanout_id_is_free(sk, id)) {
1669                         *new_id = id;
1670                         fanout_next_id = id + 1;
1671                         return true;
1672                 }
1673
1674                 id++;
1675         } while (id != fanout_next_id);
1676
1677         return false;
1678 }
1679
1680 static int fanout_add(struct sock *sk, struct fanout_args *args)
1681 {
1682         struct packet_rollover *rollover = NULL;
1683         struct packet_sock *po = pkt_sk(sk);
1684         u16 type_flags = args->type_flags;
1685         struct packet_fanout *f, *match;
1686         u8 type = type_flags & 0xff;
1687         u8 flags = type_flags >> 8;
1688         u16 id = args->id;
1689         int err;
1690
1691         switch (type) {
1692         case PACKET_FANOUT_ROLLOVER:
1693                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1694                         return -EINVAL;
1695                 break;
1696         case PACKET_FANOUT_HASH:
1697         case PACKET_FANOUT_LB:
1698         case PACKET_FANOUT_CPU:
1699         case PACKET_FANOUT_RND:
1700         case PACKET_FANOUT_QM:
1701         case PACKET_FANOUT_CBPF:
1702         case PACKET_FANOUT_EBPF:
1703                 break;
1704         default:
1705                 return -EINVAL;
1706         }
1707
1708         mutex_lock(&fanout_mutex);
1709
1710         err = -EALREADY;
1711         if (po->fanout)
1712                 goto out;
1713
1714         if (type == PACKET_FANOUT_ROLLOVER ||
1715             (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1716                 err = -ENOMEM;
1717                 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1718                 if (!rollover)
1719                         goto out;
1720                 atomic_long_set(&rollover->num, 0);
1721                 atomic_long_set(&rollover->num_huge, 0);
1722                 atomic_long_set(&rollover->num_failed, 0);
1723         }
1724
1725         if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1726                 if (id != 0) {
1727                         err = -EINVAL;
1728                         goto out;
1729                 }
1730                 if (!fanout_find_new_id(sk, &id)) {
1731                         err = -ENOMEM;
1732                         goto out;
1733                 }
1734                 /* ephemeral flag for the first socket in the group: drop it */
1735                 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1736         }
1737
1738         match = NULL;
1739         list_for_each_entry(f, &fanout_list, list) {
1740                 if (f->id == id &&
1741                     read_pnet(&f->net) == sock_net(sk)) {
1742                         match = f;
1743                         break;
1744                 }
1745         }
1746         err = -EINVAL;
1747         if (match) {
1748                 if (match->flags != flags)
1749                         goto out;
1750                 if (args->max_num_members &&
1751                     args->max_num_members != match->max_num_members)
1752                         goto out;
1753         } else {
1754                 if (args->max_num_members > PACKET_FANOUT_MAX)
1755                         goto out;
1756                 if (!args->max_num_members)
1757                         /* legacy PACKET_FANOUT_MAX */
1758                         args->max_num_members = 256;
1759                 err = -ENOMEM;
1760                 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1761                                  GFP_KERNEL);
1762                 if (!match)
1763                         goto out;
1764                 write_pnet(&match->net, sock_net(sk));
1765                 match->id = id;
1766                 match->type = type;
1767                 match->flags = flags;
1768                 INIT_LIST_HEAD(&match->list);
1769                 spin_lock_init(&match->lock);
1770                 refcount_set(&match->sk_ref, 0);
1771                 fanout_init_data(match);
1772                 match->prot_hook.type = po->prot_hook.type;
1773                 match->prot_hook.dev = po->prot_hook.dev;
1774                 match->prot_hook.func = packet_rcv_fanout;
1775                 match->prot_hook.af_packet_priv = match;
1776                 match->prot_hook.af_packet_net = read_pnet(&match->net);
1777                 match->prot_hook.id_match = match_fanout_group;
1778                 match->max_num_members = args->max_num_members;
1779                 match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
1780                 list_add(&match->list, &fanout_list);
1781         }
1782         err = -EINVAL;
1783
1784         spin_lock(&po->bind_lock);
1785         if (po->running &&
1786             match->type == type &&
1787             match->prot_hook.type == po->prot_hook.type &&
1788             match->prot_hook.dev == po->prot_hook.dev) {
1789                 err = -ENOSPC;
1790                 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1791                         __dev_remove_pack(&po->prot_hook);
1792
1793                         /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1794                         WRITE_ONCE(po->fanout, match);
1795
1796                         po->rollover = rollover;
1797                         rollover = NULL;
1798                         refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1799                         __fanout_link(sk, po);
1800                         err = 0;
1801                 }
1802         }
1803         spin_unlock(&po->bind_lock);
1804
1805         if (err && !refcount_read(&match->sk_ref)) {
1806                 list_del(&match->list);
1807                 kvfree(match);
1808         }
1809
1810 out:
1811         kfree(rollover);
1812         mutex_unlock(&fanout_mutex);
1813         return err;
1814 }
1815
1816 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1817  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1818  * It is the responsibility of the caller to call fanout_release_data() and
1819  * free the returned packet_fanout (after synchronize_net())
1820  */
1821 static struct packet_fanout *fanout_release(struct sock *sk)
1822 {
1823         struct packet_sock *po = pkt_sk(sk);
1824         struct packet_fanout *f;
1825
1826         mutex_lock(&fanout_mutex);
1827         f = po->fanout;
1828         if (f) {
1829                 po->fanout = NULL;
1830
1831                 if (refcount_dec_and_test(&f->sk_ref))
1832                         list_del(&f->list);
1833                 else
1834                         f = NULL;
1835         }
1836         mutex_unlock(&fanout_mutex);
1837
1838         return f;
1839 }
1840
1841 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1842                                           struct sk_buff *skb)
1843 {
1844         /* Earlier code assumed this would be a VLAN pkt, double-check
1845          * this now that we have the actual packet in hand. We can only
1846          * do this check on Ethernet devices.
1847          */
1848         if (unlikely(dev->type != ARPHRD_ETHER))
1849                 return false;
1850
1851         skb_reset_mac_header(skb);
1852         return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1853 }
1854
1855 static const struct proto_ops packet_ops;
1856
1857 static const struct proto_ops packet_ops_spkt;
1858
1859 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1860                            struct packet_type *pt, struct net_device *orig_dev)
1861 {
1862         struct sock *sk;
1863         struct sockaddr_pkt *spkt;
1864
1865         /*
1866          *      When we registered the protocol we saved the socket in the data
1867          *      field for just this event.
1868          */
1869
1870         sk = pt->af_packet_priv;
1871
1872         /*
1873          *      Yank back the headers [hope the device set this
1874          *      right or kerboom...]
1875          *
1876          *      Incoming packets have ll header pulled,
1877          *      push it back.
1878          *
1879          *      For outgoing ones skb->data == skb_mac_header(skb)
1880          *      so that this procedure is noop.
1881          */
1882
1883         if (skb->pkt_type == PACKET_LOOPBACK)
1884                 goto out;
1885
1886         if (!net_eq(dev_net(dev), sock_net(sk)))
1887                 goto out;
1888
1889         skb = skb_share_check(skb, GFP_ATOMIC);
1890         if (skb == NULL)
1891                 goto oom;
1892
1893         /* drop any routing info */
1894         skb_dst_drop(skb);
1895
1896         /* drop conntrack reference */
1897         nf_reset_ct(skb);
1898
1899         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1900
1901         skb_push(skb, skb->data - skb_mac_header(skb));
1902
1903         /*
1904          *      The SOCK_PACKET socket receives _all_ frames.
1905          */
1906
1907         spkt->spkt_family = dev->type;
1908         strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1909         spkt->spkt_protocol = skb->protocol;
1910
1911         /*
1912          *      Charge the memory to the socket. This is done specifically
1913          *      to prevent sockets using all the memory up.
1914          */
1915
1916         if (sock_queue_rcv_skb(sk, skb) == 0)
1917                 return 0;
1918
1919 out:
1920         kfree_skb(skb);
1921 oom:
1922         return 0;
1923 }
1924
1925 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1926 {
1927         int depth;
1928
1929         if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1930             sock->type == SOCK_RAW) {
1931                 skb_reset_mac_header(skb);
1932                 skb->protocol = dev_parse_header_protocol(skb);
1933         }
1934
1935         /* Move network header to the right position for VLAN tagged packets */
1936         if (likely(skb->dev->type == ARPHRD_ETHER) &&
1937             eth_type_vlan(skb->protocol) &&
1938             __vlan_get_protocol(skb, skb->protocol, &depth) != 0) {
1939                 if (pskb_may_pull(skb, depth))
1940                         skb_set_network_header(skb, depth);
1941         }
1942
1943         skb_probe_transport_header(skb);
1944 }
1945
1946 /*
1947  *      Output a raw packet to a device layer. This bypasses all the other
1948  *      protocol layers and you must therefore supply it with a complete frame
1949  */
1950
1951 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1952                                size_t len)
1953 {
1954         struct sock *sk = sock->sk;
1955         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1956         struct sk_buff *skb = NULL;
1957         struct net_device *dev;
1958         struct sockcm_cookie sockc;
1959         __be16 proto = 0;
1960         int err;
1961         int extra_len = 0;
1962
1963         /*
1964          *      Get and verify the address.
1965          */
1966
1967         if (saddr) {
1968                 if (msg->msg_namelen < sizeof(struct sockaddr))
1969                         return -EINVAL;
1970                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1971                         proto = saddr->spkt_protocol;
1972         } else
1973                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1974
1975         /*
1976          *      Find the device first to size check it
1977          */
1978
1979         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1980 retry:
1981         rcu_read_lock();
1982         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1983         err = -ENODEV;
1984         if (dev == NULL)
1985                 goto out_unlock;
1986
1987         err = -ENETDOWN;
1988         if (!(dev->flags & IFF_UP))
1989                 goto out_unlock;
1990
1991         /*
1992          * You may not queue a frame bigger than the mtu. This is the lowest level
1993          * raw protocol and you must do your own fragmentation at this level.
1994          */
1995
1996         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1997                 if (!netif_supports_nofcs(dev)) {
1998                         err = -EPROTONOSUPPORT;
1999                         goto out_unlock;
2000                 }
2001                 extra_len = 4; /* We're doing our own CRC */
2002         }
2003
2004         err = -EMSGSIZE;
2005         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
2006                 goto out_unlock;
2007
2008         if (!skb) {
2009                 size_t reserved = LL_RESERVED_SPACE(dev);
2010                 int tlen = dev->needed_tailroom;
2011                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2012
2013                 rcu_read_unlock();
2014                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2015                 if (skb == NULL)
2016                         return -ENOBUFS;
2017                 /* FIXME: Save some space for broken drivers that write a hard
2018                  * header at transmission time by themselves. PPP is the notable
2019                  * one here. This should really be fixed at the driver level.
2020                  */
2021                 skb_reserve(skb, reserved);
2022                 skb_reset_network_header(skb);
2023
2024                 /* Try to align data part correctly */
2025                 if (hhlen) {
2026                         skb->data -= hhlen;
2027                         skb->tail -= hhlen;
2028                         if (len < hhlen)
2029                                 skb_reset_network_header(skb);
2030                 }
2031                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
2032                 if (err)
2033                         goto out_free;
2034                 goto retry;
2035         }
2036
2037         if (!dev_validate_header(dev, skb->data, len)) {
2038                 err = -EINVAL;
2039                 goto out_unlock;
2040         }
2041         if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2042             !packet_extra_vlan_len_allowed(dev, skb)) {
2043                 err = -EMSGSIZE;
2044                 goto out_unlock;
2045         }
2046
2047         sockcm_init(&sockc, sk);
2048         if (msg->msg_controllen) {
2049                 err = sock_cmsg_send(sk, msg, &sockc);
2050                 if (unlikely(err))
2051                         goto out_unlock;
2052         }
2053
2054         skb->protocol = proto;
2055         skb->dev = dev;
2056         skb->priority = sk->sk_priority;
2057         skb->mark = sk->sk_mark;
2058         skb->tstamp = sockc.transmit_time;
2059
2060         skb_setup_tx_timestamp(skb, sockc.tsflags);
2061
2062         if (unlikely(extra_len == 4))
2063                 skb->no_fcs = 1;
2064
2065         packet_parse_headers(skb, sock);
2066
2067         dev_queue_xmit(skb);
2068         rcu_read_unlock();
2069         return len;
2070
2071 out_unlock:
2072         rcu_read_unlock();
2073 out_free:
2074         kfree_skb(skb);
2075         return err;
2076 }
2077
2078 static unsigned int run_filter(struct sk_buff *skb,
2079                                const struct sock *sk,
2080                                unsigned int res)
2081 {
2082         struct sk_filter *filter;
2083
2084         rcu_read_lock();
2085         filter = rcu_dereference(sk->sk_filter);
2086         if (filter != NULL)
2087                 res = bpf_prog_run_clear_cb(filter->prog, skb);
2088         rcu_read_unlock();
2089
2090         return res;
2091 }
2092
2093 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2094                            size_t *len)
2095 {
2096         struct virtio_net_hdr vnet_hdr;
2097
2098         if (*len < sizeof(vnet_hdr))
2099                 return -EINVAL;
2100         *len -= sizeof(vnet_hdr);
2101
2102         if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2103                 return -EINVAL;
2104
2105         return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2106 }
2107
2108 /*
2109  * This function makes lazy skb cloning in hope that most of packets
2110  * are discarded by BPF.
2111  *
2112  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2113  * and skb->cb are mangled. It works because (and until) packets
2114  * falling here are owned by current CPU. Output packets are cloned
2115  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2116  * sequentially, so that if we return skb to original state on exit,
2117  * we will not harm anyone.
2118  */
2119
2120 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2121                       struct packet_type *pt, struct net_device *orig_dev)
2122 {
2123         struct sock *sk;
2124         struct sockaddr_ll *sll;
2125         struct packet_sock *po;
2126         u8 *skb_head = skb->data;
2127         int skb_len = skb->len;
2128         unsigned int snaplen, res;
2129         bool is_drop_n_account = false;
2130
2131         if (skb->pkt_type == PACKET_LOOPBACK)
2132                 goto drop;
2133
2134         sk = pt->af_packet_priv;
2135         po = pkt_sk(sk);
2136
2137         if (!net_eq(dev_net(dev), sock_net(sk)))
2138                 goto drop;
2139
2140         skb->dev = dev;
2141
2142         if (dev_has_header(dev)) {
2143                 /* The device has an explicit notion of ll header,
2144                  * exported to higher levels.
2145                  *
2146                  * Otherwise, the device hides details of its frame
2147                  * structure, so that corresponding packet head is
2148                  * never delivered to user.
2149                  */
2150                 if (sk->sk_type != SOCK_DGRAM)
2151                         skb_push(skb, skb->data - skb_mac_header(skb));
2152                 else if (skb->pkt_type == PACKET_OUTGOING) {
2153                         /* Special case: outgoing packets have ll header at head */
2154                         skb_pull(skb, skb_network_offset(skb));
2155                 }
2156         }
2157
2158         snaplen = skb->len;
2159
2160         res = run_filter(skb, sk, snaplen);
2161         if (!res)
2162                 goto drop_n_restore;
2163         if (snaplen > res)
2164                 snaplen = res;
2165
2166         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2167                 goto drop_n_acct;
2168
2169         if (skb_shared(skb)) {
2170                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2171                 if (nskb == NULL)
2172                         goto drop_n_acct;
2173
2174                 if (skb_head != skb->data) {
2175                         skb->data = skb_head;
2176                         skb->len = skb_len;
2177                 }
2178                 consume_skb(skb);
2179                 skb = nskb;
2180         }
2181
2182         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2183
2184         sll = &PACKET_SKB_CB(skb)->sa.ll;
2185         sll->sll_hatype = dev->type;
2186         sll->sll_pkttype = skb->pkt_type;
2187         if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2188                 sll->sll_ifindex = orig_dev->ifindex;
2189         else
2190                 sll->sll_ifindex = dev->ifindex;
2191
2192         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2193
2194         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2195          * Use their space for storing the original skb length.
2196          */
2197         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2198
2199         if (pskb_trim(skb, snaplen))
2200                 goto drop_n_acct;
2201
2202         skb_set_owner_r(skb, sk);
2203         skb->dev = NULL;
2204         skb_dst_drop(skb);
2205
2206         /* drop conntrack reference */
2207         nf_reset_ct(skb);
2208
2209         spin_lock(&sk->sk_receive_queue.lock);
2210         po->stats.stats1.tp_packets++;
2211         sock_skb_set_dropcount(sk, skb);
2212         skb_clear_delivery_time(skb);
2213         __skb_queue_tail(&sk->sk_receive_queue, skb);
2214         spin_unlock(&sk->sk_receive_queue.lock);
2215         sk->sk_data_ready(sk);
2216         return 0;
2217
2218 drop_n_acct:
2219         is_drop_n_account = true;
2220         atomic_inc(&po->tp_drops);
2221         atomic_inc(&sk->sk_drops);
2222
2223 drop_n_restore:
2224         if (skb_head != skb->data && skb_shared(skb)) {
2225                 skb->data = skb_head;
2226                 skb->len = skb_len;
2227         }
2228 drop:
2229         if (!is_drop_n_account)
2230                 consume_skb(skb);
2231         else
2232                 kfree_skb(skb);
2233         return 0;
2234 }
2235
2236 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2237                        struct packet_type *pt, struct net_device *orig_dev)
2238 {
2239         struct sock *sk;
2240         struct packet_sock *po;
2241         struct sockaddr_ll *sll;
2242         union tpacket_uhdr h;
2243         u8 *skb_head = skb->data;
2244         int skb_len = skb->len;
2245         unsigned int snaplen, res;
2246         unsigned long status = TP_STATUS_USER;
2247         unsigned short macoff, hdrlen;
2248         unsigned int netoff;
2249         struct sk_buff *copy_skb = NULL;
2250         struct timespec64 ts;
2251         __u32 ts_status;
2252         bool is_drop_n_account = false;
2253         unsigned int slot_id = 0;
2254         bool do_vnet = false;
2255
2256         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2257          * We may add members to them until current aligned size without forcing
2258          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2259          */
2260         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2261         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2262
2263         if (skb->pkt_type == PACKET_LOOPBACK)
2264                 goto drop;
2265
2266         sk = pt->af_packet_priv;
2267         po = pkt_sk(sk);
2268
2269         if (!net_eq(dev_net(dev), sock_net(sk)))
2270                 goto drop;
2271
2272         if (dev_has_header(dev)) {
2273                 if (sk->sk_type != SOCK_DGRAM)
2274                         skb_push(skb, skb->data - skb_mac_header(skb));
2275                 else if (skb->pkt_type == PACKET_OUTGOING) {
2276                         /* Special case: outgoing packets have ll header at head */
2277                         skb_pull(skb, skb_network_offset(skb));
2278                 }
2279         }
2280
2281         snaplen = skb->len;
2282
2283         res = run_filter(skb, sk, snaplen);
2284         if (!res)
2285                 goto drop_n_restore;
2286
2287         /* If we are flooded, just give up */
2288         if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2289                 atomic_inc(&po->tp_drops);
2290                 goto drop_n_restore;
2291         }
2292
2293         if (skb->ip_summed == CHECKSUM_PARTIAL)
2294                 status |= TP_STATUS_CSUMNOTREADY;
2295         else if (skb->pkt_type != PACKET_OUTGOING &&
2296                  skb_csum_unnecessary(skb))
2297                 status |= TP_STATUS_CSUM_VALID;
2298         if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
2299                 status |= TP_STATUS_GSO_TCP;
2300
2301         if (snaplen > res)
2302                 snaplen = res;
2303
2304         if (sk->sk_type == SOCK_DGRAM) {
2305                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2306                                   po->tp_reserve;
2307         } else {
2308                 unsigned int maclen = skb_network_offset(skb);
2309                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2310                                        (maclen < 16 ? 16 : maclen)) +
2311                                        po->tp_reserve;
2312                 if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) {
2313                         netoff += sizeof(struct virtio_net_hdr);
2314                         do_vnet = true;
2315                 }
2316                 macoff = netoff - maclen;
2317         }
2318         if (netoff > USHRT_MAX) {
2319                 atomic_inc(&po->tp_drops);
2320                 goto drop_n_restore;
2321         }
2322         if (po->tp_version <= TPACKET_V2) {
2323                 if (macoff + snaplen > po->rx_ring.frame_size) {
2324                         if (po->copy_thresh &&
2325                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2326                                 if (skb_shared(skb)) {
2327                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
2328                                 } else {
2329                                         copy_skb = skb_get(skb);
2330                                         skb_head = skb->data;
2331                                 }
2332                                 if (copy_skb) {
2333                                         memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2334                                                sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2335                                         skb_set_owner_r(copy_skb, sk);
2336                                 }
2337                         }
2338                         snaplen = po->rx_ring.frame_size - macoff;
2339                         if ((int)snaplen < 0) {
2340                                 snaplen = 0;
2341                                 do_vnet = false;
2342                         }
2343                 }
2344         } else if (unlikely(macoff + snaplen >
2345                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2346                 u32 nval;
2347
2348                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2349                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2350                             snaplen, nval, macoff);
2351                 snaplen = nval;
2352                 if (unlikely((int)snaplen < 0)) {
2353                         snaplen = 0;
2354                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2355                         do_vnet = false;
2356                 }
2357         }
2358         spin_lock(&sk->sk_receive_queue.lock);
2359         h.raw = packet_current_rx_frame(po, skb,
2360                                         TP_STATUS_KERNEL, (macoff+snaplen));
2361         if (!h.raw)
2362                 goto drop_n_account;
2363
2364         if (po->tp_version <= TPACKET_V2) {
2365                 slot_id = po->rx_ring.head;
2366                 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2367                         goto drop_n_account;
2368                 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2369         }
2370
2371         if (do_vnet &&
2372             virtio_net_hdr_from_skb(skb, h.raw + macoff -
2373                                     sizeof(struct virtio_net_hdr),
2374                                     vio_le(), true, 0)) {
2375                 if (po->tp_version == TPACKET_V3)
2376                         prb_clear_blk_fill_status(&po->rx_ring);
2377                 goto drop_n_account;
2378         }
2379
2380         if (po->tp_version <= TPACKET_V2) {
2381                 packet_increment_rx_head(po, &po->rx_ring);
2382         /*
2383          * LOSING will be reported till you read the stats,
2384          * because it's COR - Clear On Read.
2385          * Anyways, moving it for V1/V2 only as V3 doesn't need this
2386          * at packet level.
2387          */
2388                 if (atomic_read(&po->tp_drops))
2389                         status |= TP_STATUS_LOSING;
2390         }
2391
2392         po->stats.stats1.tp_packets++;
2393         if (copy_skb) {
2394                 status |= TP_STATUS_COPY;
2395                 skb_clear_delivery_time(copy_skb);
2396                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2397         }
2398         spin_unlock(&sk->sk_receive_queue.lock);
2399
2400         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2401
2402         /* Always timestamp; prefer an existing software timestamp taken
2403          * closer to the time of capture.
2404          */
2405         ts_status = tpacket_get_timestamp(skb, &ts,
2406                                           READ_ONCE(po->tp_tstamp) |
2407                                           SOF_TIMESTAMPING_SOFTWARE);
2408         if (!ts_status)
2409                 ktime_get_real_ts64(&ts);
2410
2411         status |= ts_status;
2412
2413         switch (po->tp_version) {
2414         case TPACKET_V1:
2415                 h.h1->tp_len = skb->len;
2416                 h.h1->tp_snaplen = snaplen;
2417                 h.h1->tp_mac = macoff;
2418                 h.h1->tp_net = netoff;
2419                 h.h1->tp_sec = ts.tv_sec;
2420                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2421                 hdrlen = sizeof(*h.h1);
2422                 break;
2423         case TPACKET_V2:
2424                 h.h2->tp_len = skb->len;
2425                 h.h2->tp_snaplen = snaplen;
2426                 h.h2->tp_mac = macoff;
2427                 h.h2->tp_net = netoff;
2428                 h.h2->tp_sec = ts.tv_sec;
2429                 h.h2->tp_nsec = ts.tv_nsec;
2430                 if (skb_vlan_tag_present(skb)) {
2431                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2432                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2433                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2434                 } else {
2435                         h.h2->tp_vlan_tci = 0;
2436                         h.h2->tp_vlan_tpid = 0;
2437                 }
2438                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2439                 hdrlen = sizeof(*h.h2);
2440                 break;
2441         case TPACKET_V3:
2442                 /* tp_nxt_offset,vlan are already populated above.
2443                  * So DONT clear those fields here
2444                  */
2445                 h.h3->tp_status |= status;
2446                 h.h3->tp_len = skb->len;
2447                 h.h3->tp_snaplen = snaplen;
2448                 h.h3->tp_mac = macoff;
2449                 h.h3->tp_net = netoff;
2450                 h.h3->tp_sec  = ts.tv_sec;
2451                 h.h3->tp_nsec = ts.tv_nsec;
2452                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2453                 hdrlen = sizeof(*h.h3);
2454                 break;
2455         default:
2456                 BUG();
2457         }
2458
2459         sll = h.raw + TPACKET_ALIGN(hdrlen);
2460         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2461         sll->sll_family = AF_PACKET;
2462         sll->sll_hatype = dev->type;
2463         sll->sll_protocol = skb->protocol;
2464         sll->sll_pkttype = skb->pkt_type;
2465         if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2466                 sll->sll_ifindex = orig_dev->ifindex;
2467         else
2468                 sll->sll_ifindex = dev->ifindex;
2469
2470         smp_mb();
2471
2472 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2473         if (po->tp_version <= TPACKET_V2) {
2474                 u8 *start, *end;
2475
2476                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2477                                         macoff + snaplen);
2478
2479                 for (start = h.raw; start < end; start += PAGE_SIZE)
2480                         flush_dcache_page(pgv_to_page(start));
2481         }
2482         smp_wmb();
2483 #endif
2484
2485         if (po->tp_version <= TPACKET_V2) {
2486                 spin_lock(&sk->sk_receive_queue.lock);
2487                 __packet_set_status(po, h.raw, status);
2488                 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2489                 spin_unlock(&sk->sk_receive_queue.lock);
2490                 sk->sk_data_ready(sk);
2491         } else if (po->tp_version == TPACKET_V3) {
2492                 prb_clear_blk_fill_status(&po->rx_ring);
2493         }
2494
2495 drop_n_restore:
2496         if (skb_head != skb->data && skb_shared(skb)) {
2497                 skb->data = skb_head;
2498                 skb->len = skb_len;
2499         }
2500 drop:
2501         if (!is_drop_n_account)
2502                 consume_skb(skb);
2503         else
2504                 kfree_skb(skb);
2505         return 0;
2506
2507 drop_n_account:
2508         spin_unlock(&sk->sk_receive_queue.lock);
2509         atomic_inc(&po->tp_drops);
2510         is_drop_n_account = true;
2511
2512         sk->sk_data_ready(sk);
2513         kfree_skb(copy_skb);
2514         goto drop_n_restore;
2515 }
2516
2517 static void tpacket_destruct_skb(struct sk_buff *skb)
2518 {
2519         struct packet_sock *po = pkt_sk(skb->sk);
2520
2521         if (likely(po->tx_ring.pg_vec)) {
2522                 void *ph;
2523                 __u32 ts;
2524
2525                 ph = skb_zcopy_get_nouarg(skb);
2526                 packet_dec_pending(&po->tx_ring);
2527
2528                 ts = __packet_set_timestamp(po, ph, skb);
2529                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2530
2531                 if (!packet_read_pending(&po->tx_ring))
2532                         complete(&po->skb_completion);
2533         }
2534
2535         sock_wfree(skb);
2536 }
2537
2538 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2539 {
2540         if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2541             (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2542              __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2543               __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2544                 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2545                          __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2546                         __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2547
2548         if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2549                 return -EINVAL;
2550
2551         return 0;
2552 }
2553
2554 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2555                                  struct virtio_net_hdr *vnet_hdr)
2556 {
2557         if (*len < sizeof(*vnet_hdr))
2558                 return -EINVAL;
2559         *len -= sizeof(*vnet_hdr);
2560
2561         if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2562                 return -EFAULT;
2563
2564         return __packet_snd_vnet_parse(vnet_hdr, *len);
2565 }
2566
2567 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2568                 void *frame, struct net_device *dev, void *data, int tp_len,
2569                 __be16 proto, unsigned char *addr, int hlen, int copylen,
2570                 const struct sockcm_cookie *sockc)
2571 {
2572         union tpacket_uhdr ph;
2573         int to_write, offset, len, nr_frags, len_max;
2574         struct socket *sock = po->sk.sk_socket;
2575         struct page *page;
2576         int err;
2577
2578         ph.raw = frame;
2579
2580         skb->protocol = proto;
2581         skb->dev = dev;
2582         skb->priority = po->sk.sk_priority;
2583         skb->mark = po->sk.sk_mark;
2584         skb->tstamp = sockc->transmit_time;
2585         skb_setup_tx_timestamp(skb, sockc->tsflags);
2586         skb_zcopy_set_nouarg(skb, ph.raw);
2587
2588         skb_reserve(skb, hlen);
2589         skb_reset_network_header(skb);
2590
2591         to_write = tp_len;
2592
2593         if (sock->type == SOCK_DGRAM) {
2594                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2595                                 NULL, tp_len);
2596                 if (unlikely(err < 0))
2597                         return -EINVAL;
2598         } else if (copylen) {
2599                 int hdrlen = min_t(int, copylen, tp_len);
2600
2601                 skb_push(skb, dev->hard_header_len);
2602                 skb_put(skb, copylen - dev->hard_header_len);
2603                 err = skb_store_bits(skb, 0, data, hdrlen);
2604                 if (unlikely(err))
2605                         return err;
2606                 if (!dev_validate_header(dev, skb->data, hdrlen))
2607                         return -EINVAL;
2608
2609                 data += hdrlen;
2610                 to_write -= hdrlen;
2611         }
2612
2613         offset = offset_in_page(data);
2614         len_max = PAGE_SIZE - offset;
2615         len = ((to_write > len_max) ? len_max : to_write);
2616
2617         skb->data_len = to_write;
2618         skb->len += to_write;
2619         skb->truesize += to_write;
2620         refcount_add(to_write, &po->sk.sk_wmem_alloc);
2621
2622         while (likely(to_write)) {
2623                 nr_frags = skb_shinfo(skb)->nr_frags;
2624
2625                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2626                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2627                                MAX_SKB_FRAGS);
2628                         return -EFAULT;
2629                 }
2630
2631                 page = pgv_to_page(data);
2632                 data += len;
2633                 flush_dcache_page(page);
2634                 get_page(page);
2635                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2636                 to_write -= len;
2637                 offset = 0;
2638                 len_max = PAGE_SIZE;
2639                 len = ((to_write > len_max) ? len_max : to_write);
2640         }
2641
2642         packet_parse_headers(skb, sock);
2643
2644         return tp_len;
2645 }
2646
2647 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2648                                 int size_max, void **data)
2649 {
2650         union tpacket_uhdr ph;
2651         int tp_len, off;
2652
2653         ph.raw = frame;
2654
2655         switch (po->tp_version) {
2656         case TPACKET_V3:
2657                 if (ph.h3->tp_next_offset != 0) {
2658                         pr_warn_once("variable sized slot not supported");
2659                         return -EINVAL;
2660                 }
2661                 tp_len = ph.h3->tp_len;
2662                 break;
2663         case TPACKET_V2:
2664                 tp_len = ph.h2->tp_len;
2665                 break;
2666         default:
2667                 tp_len = ph.h1->tp_len;
2668                 break;
2669         }
2670         if (unlikely(tp_len > size_max)) {
2671                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2672                 return -EMSGSIZE;
2673         }
2674
2675         if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2676                 int off_min, off_max;
2677
2678                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2679                 off_max = po->tx_ring.frame_size - tp_len;
2680                 if (po->sk.sk_type == SOCK_DGRAM) {
2681                         switch (po->tp_version) {
2682                         case TPACKET_V3:
2683                                 off = ph.h3->tp_net;
2684                                 break;
2685                         case TPACKET_V2:
2686                                 off = ph.h2->tp_net;
2687                                 break;
2688                         default:
2689                                 off = ph.h1->tp_net;
2690                                 break;
2691                         }
2692                 } else {
2693                         switch (po->tp_version) {
2694                         case TPACKET_V3:
2695                                 off = ph.h3->tp_mac;
2696                                 break;
2697                         case TPACKET_V2:
2698                                 off = ph.h2->tp_mac;
2699                                 break;
2700                         default:
2701                                 off = ph.h1->tp_mac;
2702                                 break;
2703                         }
2704                 }
2705                 if (unlikely((off < off_min) || (off_max < off)))
2706                         return -EINVAL;
2707         } else {
2708                 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2709         }
2710
2711         *data = frame + off;
2712         return tp_len;
2713 }
2714
2715 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2716 {
2717         struct sk_buff *skb = NULL;
2718         struct net_device *dev;
2719         struct virtio_net_hdr *vnet_hdr = NULL;
2720         struct sockcm_cookie sockc;
2721         __be16 proto;
2722         int err, reserve = 0;
2723         void *ph;
2724         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2725         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2726         unsigned char *addr = NULL;
2727         int tp_len, size_max;
2728         void *data;
2729         int len_sum = 0;
2730         int status = TP_STATUS_AVAILABLE;
2731         int hlen, tlen, copylen = 0;
2732         long timeo = 0;
2733
2734         mutex_lock(&po->pg_vec_lock);
2735
2736         /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2737          * we need to confirm it under protection of pg_vec_lock.
2738          */
2739         if (unlikely(!po->tx_ring.pg_vec)) {
2740                 err = -EBUSY;
2741                 goto out;
2742         }
2743         if (likely(saddr == NULL)) {
2744                 dev     = packet_cached_dev_get(po);
2745                 proto   = READ_ONCE(po->num);
2746         } else {
2747                 err = -EINVAL;
2748                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2749                         goto out;
2750                 if (msg->msg_namelen < (saddr->sll_halen
2751                                         + offsetof(struct sockaddr_ll,
2752                                                 sll_addr)))
2753                         goto out;
2754                 proto   = saddr->sll_protocol;
2755                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2756                 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2757                         if (dev && msg->msg_namelen < dev->addr_len +
2758                                    offsetof(struct sockaddr_ll, sll_addr))
2759                                 goto out_put;
2760                         addr = saddr->sll_addr;
2761                 }
2762         }
2763
2764         err = -ENXIO;
2765         if (unlikely(dev == NULL))
2766                 goto out;
2767         err = -ENETDOWN;
2768         if (unlikely(!(dev->flags & IFF_UP)))
2769                 goto out_put;
2770
2771         sockcm_init(&sockc, &po->sk);
2772         if (msg->msg_controllen) {
2773                 err = sock_cmsg_send(&po->sk, msg, &sockc);
2774                 if (unlikely(err))
2775                         goto out_put;
2776         }
2777
2778         if (po->sk.sk_socket->type == SOCK_RAW)
2779                 reserve = dev->hard_header_len;
2780         size_max = po->tx_ring.frame_size
2781                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2782
2783         if ((size_max > dev->mtu + reserve + VLAN_HLEN) &&
2784             !packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR))
2785                 size_max = dev->mtu + reserve + VLAN_HLEN;
2786
2787         reinit_completion(&po->skb_completion);
2788
2789         do {
2790                 ph = packet_current_frame(po, &po->tx_ring,
2791                                           TP_STATUS_SEND_REQUEST);
2792                 if (unlikely(ph == NULL)) {
2793                         if (need_wait && skb) {
2794                                 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2795                                 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2796                                 if (timeo <= 0) {
2797                                         err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2798                                         goto out_put;
2799                                 }
2800                         }
2801                         /* check for additional frames */
2802                         continue;
2803                 }
2804
2805                 skb = NULL;
2806                 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2807                 if (tp_len < 0)
2808                         goto tpacket_error;
2809
2810                 status = TP_STATUS_SEND_REQUEST;
2811                 hlen = LL_RESERVED_SPACE(dev);
2812                 tlen = dev->needed_tailroom;
2813                 if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) {
2814                         vnet_hdr = data;
2815                         data += sizeof(*vnet_hdr);
2816                         tp_len -= sizeof(*vnet_hdr);
2817                         if (tp_len < 0 ||
2818                             __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2819                                 tp_len = -EINVAL;
2820                                 goto tpacket_error;
2821                         }
2822                         copylen = __virtio16_to_cpu(vio_le(),
2823                                                     vnet_hdr->hdr_len);
2824                 }
2825                 copylen = max_t(int, copylen, dev->hard_header_len);
2826                 skb = sock_alloc_send_skb(&po->sk,
2827                                 hlen + tlen + sizeof(struct sockaddr_ll) +
2828                                 (copylen - dev->hard_header_len),
2829                                 !need_wait, &err);
2830
2831                 if (unlikely(skb == NULL)) {
2832                         /* we assume the socket was initially writeable ... */
2833                         if (likely(len_sum > 0))
2834                                 err = len_sum;
2835                         goto out_status;
2836                 }
2837                 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2838                                           addr, hlen, copylen, &sockc);
2839                 if (likely(tp_len >= 0) &&
2840                     tp_len > dev->mtu + reserve &&
2841                     !packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR) &&
2842                     !packet_extra_vlan_len_allowed(dev, skb))
2843                         tp_len = -EMSGSIZE;
2844
2845                 if (unlikely(tp_len < 0)) {
2846 tpacket_error:
2847                         if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2848                                 __packet_set_status(po, ph,
2849                                                 TP_STATUS_AVAILABLE);
2850                                 packet_increment_head(&po->tx_ring);
2851                                 kfree_skb(skb);
2852                                 continue;
2853                         } else {
2854                                 status = TP_STATUS_WRONG_FORMAT;
2855                                 err = tp_len;
2856                                 goto out_status;
2857                         }
2858                 }
2859
2860                 if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) {
2861                         if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2862                                 tp_len = -EINVAL;
2863                                 goto tpacket_error;
2864                         }
2865                         virtio_net_hdr_set_proto(skb, vnet_hdr);
2866                 }
2867
2868                 skb->destructor = tpacket_destruct_skb;
2869                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2870                 packet_inc_pending(&po->tx_ring);
2871
2872                 status = TP_STATUS_SEND_REQUEST;
2873                 /* Paired with WRITE_ONCE() in packet_setsockopt() */
2874                 err = READ_ONCE(po->xmit)(skb);
2875                 if (unlikely(err != 0)) {
2876                         if (err > 0)
2877                                 err = net_xmit_errno(err);
2878                         if (err && __packet_get_status(po, ph) ==
2879                                    TP_STATUS_AVAILABLE) {
2880                                 /* skb was destructed already */
2881                                 skb = NULL;
2882                                 goto out_status;
2883                         }
2884                         /*
2885                          * skb was dropped but not destructed yet;
2886                          * let's treat it like congestion or err < 0
2887                          */
2888                         err = 0;
2889                 }
2890                 packet_increment_head(&po->tx_ring);
2891                 len_sum += tp_len;
2892         } while (likely((ph != NULL) ||
2893                 /* Note: packet_read_pending() might be slow if we have
2894                  * to call it as it's per_cpu variable, but in fast-path
2895                  * we already short-circuit the loop with the first
2896                  * condition, and luckily don't have to go that path
2897                  * anyway.
2898                  */
2899                  (need_wait && packet_read_pending(&po->tx_ring))));
2900
2901         err = len_sum;
2902         goto out_put;
2903
2904 out_status:
2905         __packet_set_status(po, ph, status);
2906         kfree_skb(skb);
2907 out_put:
2908         dev_put(dev);
2909 out:
2910         mutex_unlock(&po->pg_vec_lock);
2911         return err;
2912 }
2913
2914 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2915                                         size_t reserve, size_t len,
2916                                         size_t linear, int noblock,
2917                                         int *err)
2918 {
2919         struct sk_buff *skb;
2920
2921         /* Under a page?  Don't bother with paged skb. */
2922         if (prepad + len < PAGE_SIZE || !linear)
2923                 linear = len;
2924
2925         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2926                                    err, 0);
2927         if (!skb)
2928                 return NULL;
2929
2930         skb_reserve(skb, reserve);
2931         skb_put(skb, linear);
2932         skb->data_len = len - linear;
2933         skb->len += len - linear;
2934
2935         return skb;
2936 }
2937
2938 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2939 {
2940         struct sock *sk = sock->sk;
2941         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2942         struct sk_buff *skb;
2943         struct net_device *dev;
2944         __be16 proto;
2945         unsigned char *addr = NULL;
2946         int err, reserve = 0;
2947         struct sockcm_cookie sockc;
2948         struct virtio_net_hdr vnet_hdr = { 0 };
2949         int offset = 0;
2950         struct packet_sock *po = pkt_sk(sk);
2951         bool has_vnet_hdr = false;
2952         int hlen, tlen, linear;
2953         int extra_len = 0;
2954
2955         /*
2956          *      Get and verify the address.
2957          */
2958
2959         if (likely(saddr == NULL)) {
2960                 dev     = packet_cached_dev_get(po);
2961                 proto   = READ_ONCE(po->num);
2962         } else {
2963                 err = -EINVAL;
2964                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2965                         goto out;
2966                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2967                         goto out;
2968                 proto   = saddr->sll_protocol;
2969                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2970                 if (sock->type == SOCK_DGRAM) {
2971                         if (dev && msg->msg_namelen < dev->addr_len +
2972                                    offsetof(struct sockaddr_ll, sll_addr))
2973                                 goto out_unlock;
2974                         addr = saddr->sll_addr;
2975                 }
2976         }
2977
2978         err = -ENXIO;
2979         if (unlikely(dev == NULL))
2980                 goto out_unlock;
2981         err = -ENETDOWN;
2982         if (unlikely(!(dev->flags & IFF_UP)))
2983                 goto out_unlock;
2984
2985         sockcm_init(&sockc, sk);
2986         sockc.mark = sk->sk_mark;
2987         if (msg->msg_controllen) {
2988                 err = sock_cmsg_send(sk, msg, &sockc);
2989                 if (unlikely(err))
2990                         goto out_unlock;
2991         }
2992
2993         if (sock->type == SOCK_RAW)
2994                 reserve = dev->hard_header_len;
2995         if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) {
2996                 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2997                 if (err)
2998                         goto out_unlock;
2999                 has_vnet_hdr = true;
3000         }
3001
3002         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
3003                 if (!netif_supports_nofcs(dev)) {
3004                         err = -EPROTONOSUPPORT;
3005                         goto out_unlock;
3006                 }
3007                 extra_len = 4; /* We're doing our own CRC */
3008         }
3009
3010         err = -EMSGSIZE;
3011         if (!vnet_hdr.gso_type &&
3012             (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
3013                 goto out_unlock;
3014
3015         err = -ENOBUFS;
3016         hlen = LL_RESERVED_SPACE(dev);
3017         tlen = dev->needed_tailroom;
3018         linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3019         linear = max(linear, min_t(int, len, dev->hard_header_len));
3020         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3021                                msg->msg_flags & MSG_DONTWAIT, &err);
3022         if (skb == NULL)
3023                 goto out_unlock;
3024
3025         skb_reset_network_header(skb);
3026
3027         err = -EINVAL;
3028         if (sock->type == SOCK_DGRAM) {
3029                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3030                 if (unlikely(offset < 0))
3031                         goto out_free;
3032         } else if (reserve) {
3033                 skb_reserve(skb, -reserve);
3034                 if (len < reserve + sizeof(struct ipv6hdr) &&
3035                     dev->min_header_len != dev->hard_header_len)
3036                         skb_reset_network_header(skb);
3037         }
3038
3039         /* Returns -EFAULT on error */
3040         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3041         if (err)
3042                 goto out_free;
3043
3044         if ((sock->type == SOCK_RAW &&
3045              !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3046                 err = -EINVAL;
3047                 goto out_free;
3048         }
3049
3050         skb_setup_tx_timestamp(skb, sockc.tsflags);
3051
3052         if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3053             !packet_extra_vlan_len_allowed(dev, skb)) {
3054                 err = -EMSGSIZE;
3055                 goto out_free;
3056         }
3057
3058         skb->protocol = proto;
3059         skb->dev = dev;
3060         skb->priority = sk->sk_priority;
3061         skb->mark = sockc.mark;
3062         skb->tstamp = sockc.transmit_time;
3063
3064         if (unlikely(extra_len == 4))
3065                 skb->no_fcs = 1;
3066
3067         packet_parse_headers(skb, sock);
3068
3069         if (has_vnet_hdr) {
3070                 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3071                 if (err)
3072                         goto out_free;
3073                 len += sizeof(vnet_hdr);
3074                 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3075         }
3076
3077         /* Paired with WRITE_ONCE() in packet_setsockopt() */
3078         err = READ_ONCE(po->xmit)(skb);
3079         if (unlikely(err != 0)) {
3080                 if (err > 0)
3081                         err = net_xmit_errno(err);
3082                 if (err)
3083                         goto out_unlock;
3084         }
3085
3086         dev_put(dev);
3087
3088         return len;
3089
3090 out_free:
3091         kfree_skb(skb);
3092 out_unlock:
3093         dev_put(dev);
3094 out:
3095         return err;
3096 }
3097
3098 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3099 {
3100         struct sock *sk = sock->sk;
3101         struct packet_sock *po = pkt_sk(sk);
3102
3103         /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3104          * tpacket_snd() will redo the check safely.
3105          */
3106         if (data_race(po->tx_ring.pg_vec))
3107                 return tpacket_snd(po, msg);
3108
3109         return packet_snd(sock, msg, len);
3110 }
3111
3112 /*
3113  *      Close a PACKET socket. This is fairly simple. We immediately go
3114  *      to 'closed' state and remove our protocol entry in the device list.
3115  */
3116
3117 static int packet_release(struct socket *sock)
3118 {
3119         struct sock *sk = sock->sk;
3120         struct packet_sock *po;
3121         struct packet_fanout *f;
3122         struct net *net;
3123         union tpacket_req_u req_u;
3124
3125         if (!sk)
3126                 return 0;
3127
3128         net = sock_net(sk);
3129         po = pkt_sk(sk);
3130
3131         mutex_lock(&net->packet.sklist_lock);
3132         sk_del_node_init_rcu(sk);
3133         mutex_unlock(&net->packet.sklist_lock);
3134
3135         sock_prot_inuse_add(net, sk->sk_prot, -1);
3136
3137         spin_lock(&po->bind_lock);
3138         unregister_prot_hook(sk, false);
3139         packet_cached_dev_reset(po);
3140
3141         if (po->prot_hook.dev) {
3142                 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3143                 po->prot_hook.dev = NULL;
3144         }
3145         spin_unlock(&po->bind_lock);
3146
3147         packet_flush_mclist(sk);
3148
3149         lock_sock(sk);
3150         if (po->rx_ring.pg_vec) {
3151                 memset(&req_u, 0, sizeof(req_u));
3152                 packet_set_ring(sk, &req_u, 1, 0);
3153         }
3154
3155         if (po->tx_ring.pg_vec) {
3156                 memset(&req_u, 0, sizeof(req_u));
3157                 packet_set_ring(sk, &req_u, 1, 1);
3158         }
3159         release_sock(sk);
3160
3161         f = fanout_release(sk);
3162
3163         synchronize_net();
3164
3165         kfree(po->rollover);
3166         if (f) {
3167                 fanout_release_data(f);
3168                 kvfree(f);
3169         }
3170         /*
3171          *      Now the socket is dead. No more input will appear.
3172          */
3173         sock_orphan(sk);
3174         sock->sk = NULL;
3175
3176         /* Purge queues */
3177
3178         skb_queue_purge(&sk->sk_receive_queue);
3179         packet_free_pending(po);
3180
3181         sock_put(sk);
3182         return 0;
3183 }
3184
3185 /*
3186  *      Attach a packet hook.
3187  */
3188
3189 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3190                           __be16 proto)
3191 {
3192         struct packet_sock *po = pkt_sk(sk);
3193         struct net_device *dev = NULL;
3194         bool unlisted = false;
3195         bool need_rehook;
3196         int ret = 0;
3197
3198         lock_sock(sk);
3199         spin_lock(&po->bind_lock);
3200         rcu_read_lock();
3201
3202         if (po->fanout) {
3203                 ret = -EINVAL;
3204                 goto out_unlock;
3205         }
3206
3207         if (name) {
3208                 dev = dev_get_by_name_rcu(sock_net(sk), name);
3209                 if (!dev) {
3210                         ret = -ENODEV;
3211                         goto out_unlock;
3212                 }
3213         } else if (ifindex) {
3214                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3215                 if (!dev) {
3216                         ret = -ENODEV;
3217                         goto out_unlock;
3218                 }
3219         }
3220
3221         need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
3222
3223         if (need_rehook) {
3224                 dev_hold(dev);
3225                 if (po->running) {
3226                         rcu_read_unlock();
3227                         /* prevents packet_notifier() from calling
3228                          * register_prot_hook()
3229                          */
3230                         WRITE_ONCE(po->num, 0);
3231                         __unregister_prot_hook(sk, true);
3232                         rcu_read_lock();
3233                         if (dev)
3234                                 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3235                                                                  dev->ifindex);
3236                 }
3237
3238                 BUG_ON(po->running);
3239                 WRITE_ONCE(po->num, proto);
3240                 po->prot_hook.type = proto;
3241
3242                 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3243
3244                 if (unlikely(unlisted)) {
3245                         po->prot_hook.dev = NULL;
3246                         WRITE_ONCE(po->ifindex, -1);
3247                         packet_cached_dev_reset(po);
3248                 } else {
3249                         netdev_hold(dev, &po->prot_hook.dev_tracker,
3250                                     GFP_ATOMIC);
3251                         po->prot_hook.dev = dev;
3252                         WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3253                         packet_cached_dev_assign(po, dev);
3254                 }
3255                 dev_put(dev);
3256         }
3257
3258         if (proto == 0 || !need_rehook)
3259                 goto out_unlock;
3260
3261         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3262                 register_prot_hook(sk);
3263         } else {
3264                 sk->sk_err = ENETDOWN;
3265                 if (!sock_flag(sk, SOCK_DEAD))
3266                         sk_error_report(sk);
3267         }
3268
3269 out_unlock:
3270         rcu_read_unlock();
3271         spin_unlock(&po->bind_lock);
3272         release_sock(sk);
3273         return ret;
3274 }
3275
3276 /*
3277  *      Bind a packet socket to a device
3278  */
3279
3280 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3281                             int addr_len)
3282 {
3283         struct sock *sk = sock->sk;
3284         char name[sizeof(uaddr->sa_data_min) + 1];
3285
3286         /*
3287          *      Check legality
3288          */
3289
3290         if (addr_len != sizeof(struct sockaddr))
3291                 return -EINVAL;
3292         /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3293          * zero-terminated.
3294          */
3295         memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
3296         name[sizeof(uaddr->sa_data_min)] = 0;
3297
3298         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3299 }
3300
3301 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3302 {
3303         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3304         struct sock *sk = sock->sk;
3305
3306         /*
3307          *      Check legality
3308          */
3309
3310         if (addr_len < sizeof(struct sockaddr_ll))
3311                 return -EINVAL;
3312         if (sll->sll_family != AF_PACKET)
3313                 return -EINVAL;
3314
3315         return packet_do_bind(sk, NULL, sll->sll_ifindex,
3316                               sll->sll_protocol ? : pkt_sk(sk)->num);
3317 }
3318
3319 static struct proto packet_proto = {
3320         .name     = "PACKET",
3321         .owner    = THIS_MODULE,
3322         .obj_size = sizeof(struct packet_sock),
3323 };
3324
3325 /*
3326  *      Create a packet of type SOCK_PACKET.
3327  */
3328
3329 static int packet_create(struct net *net, struct socket *sock, int protocol,
3330                          int kern)
3331 {
3332         struct sock *sk;
3333         struct packet_sock *po;
3334         __be16 proto = (__force __be16)protocol; /* weird, but documented */
3335         int err;
3336
3337         if (!ns_capable(net->user_ns, CAP_NET_RAW))
3338                 return -EPERM;
3339         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3340             sock->type != SOCK_PACKET)
3341                 return -ESOCKTNOSUPPORT;
3342
3343         sock->state = SS_UNCONNECTED;
3344
3345         err = -ENOBUFS;
3346         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3347         if (sk == NULL)
3348                 goto out;
3349
3350         sock->ops = &packet_ops;
3351         if (sock->type == SOCK_PACKET)
3352                 sock->ops = &packet_ops_spkt;
3353
3354         sock_init_data(sock, sk);
3355
3356         po = pkt_sk(sk);
3357         init_completion(&po->skb_completion);
3358         sk->sk_family = PF_PACKET;
3359         po->num = proto;
3360         po->xmit = dev_queue_xmit;
3361
3362         err = packet_alloc_pending(po);
3363         if (err)
3364                 goto out2;
3365
3366         packet_cached_dev_reset(po);
3367
3368         sk->sk_destruct = packet_sock_destruct;
3369
3370         /*
3371          *      Attach a protocol block
3372          */
3373
3374         spin_lock_init(&po->bind_lock);
3375         mutex_init(&po->pg_vec_lock);
3376         po->rollover = NULL;
3377         po->prot_hook.func = packet_rcv;
3378
3379         if (sock->type == SOCK_PACKET)
3380                 po->prot_hook.func = packet_rcv_spkt;
3381
3382         po->prot_hook.af_packet_priv = sk;
3383         po->prot_hook.af_packet_net = sock_net(sk);
3384
3385         if (proto) {
3386                 po->prot_hook.type = proto;
3387                 __register_prot_hook(sk);
3388         }
3389
3390         mutex_lock(&net->packet.sklist_lock);
3391         sk_add_node_tail_rcu(sk, &net->packet.sklist);
3392         mutex_unlock(&net->packet.sklist_lock);
3393
3394         sock_prot_inuse_add(net, &packet_proto, 1);
3395
3396         return 0;
3397 out2:
3398         sk_free(sk);
3399 out:
3400         return err;
3401 }
3402
3403 /*
3404  *      Pull a packet from our receive queue and hand it to the user.
3405  *      If necessary we block.
3406  */
3407
3408 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3409                           int flags)
3410 {
3411         struct sock *sk = sock->sk;
3412         struct sk_buff *skb;
3413         int copied, err;
3414         int vnet_hdr_len = 0;
3415         unsigned int origlen = 0;
3416
3417         err = -EINVAL;
3418         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3419                 goto out;
3420
3421 #if 0
3422         /* What error should we return now? EUNATTACH? */
3423         if (pkt_sk(sk)->ifindex < 0)
3424                 return -ENODEV;
3425 #endif
3426
3427         if (flags & MSG_ERRQUEUE) {
3428                 err = sock_recv_errqueue(sk, msg, len,
3429                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
3430                 goto out;
3431         }
3432
3433         /*
3434          *      Call the generic datagram receiver. This handles all sorts
3435          *      of horrible races and re-entrancy so we can forget about it
3436          *      in the protocol layers.
3437          *
3438          *      Now it will return ENETDOWN, if device have just gone down,
3439          *      but then it will block.
3440          */
3441
3442         skb = skb_recv_datagram(sk, flags, &err);
3443
3444         /*
3445          *      An error occurred so return it. Because skb_recv_datagram()
3446          *      handles the blocking we don't see and worry about blocking
3447          *      retries.
3448          */
3449
3450         if (skb == NULL)
3451                 goto out;
3452
3453         packet_rcv_try_clear_pressure(pkt_sk(sk));
3454
3455         if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_HAS_VNET_HDR)) {
3456                 err = packet_rcv_vnet(msg, skb, &len);
3457                 if (err)
3458                         goto out_free;
3459                 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3460         }
3461
3462         /* You lose any data beyond the buffer you gave. If it worries
3463          * a user program they can ask the device for its MTU
3464          * anyway.
3465          */
3466         copied = skb->len;
3467         if (copied > len) {
3468                 copied = len;
3469                 msg->msg_flags |= MSG_TRUNC;
3470         }
3471
3472         err = skb_copy_datagram_msg(skb, 0, msg, copied);
3473         if (err)
3474                 goto out_free;
3475
3476         if (sock->type != SOCK_PACKET) {
3477                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3478
3479                 /* Original length was stored in sockaddr_ll fields */
3480                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3481                 sll->sll_family = AF_PACKET;
3482                 sll->sll_protocol = skb->protocol;
3483         }
3484
3485         sock_recv_cmsgs(msg, sk, skb);
3486
3487         if (msg->msg_name) {
3488                 const size_t max_len = min(sizeof(skb->cb),
3489                                            sizeof(struct sockaddr_storage));
3490                 int copy_len;
3491
3492                 /* If the address length field is there to be filled
3493                  * in, we fill it in now.
3494                  */
3495                 if (sock->type == SOCK_PACKET) {
3496                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3497                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3498                         copy_len = msg->msg_namelen;
3499                 } else {
3500                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3501
3502                         msg->msg_namelen = sll->sll_halen +
3503                                 offsetof(struct sockaddr_ll, sll_addr);
3504                         copy_len = msg->msg_namelen;
3505                         if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3506                                 memset(msg->msg_name +
3507                                        offsetof(struct sockaddr_ll, sll_addr),
3508                                        0, sizeof(sll->sll_addr));
3509                                 msg->msg_namelen = sizeof(struct sockaddr_ll);
3510                         }
3511                 }
3512                 if (WARN_ON_ONCE(copy_len > max_len)) {
3513                         copy_len = max_len;
3514                         msg->msg_namelen = copy_len;
3515                 }
3516                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3517         }
3518
3519         if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
3520                 struct tpacket_auxdata aux;
3521
3522                 aux.tp_status = TP_STATUS_USER;
3523                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3524                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3525                 else if (skb->pkt_type != PACKET_OUTGOING &&
3526                          skb_csum_unnecessary(skb))
3527                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3528                 if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
3529                         aux.tp_status |= TP_STATUS_GSO_TCP;
3530
3531                 aux.tp_len = origlen;
3532                 aux.tp_snaplen = skb->len;
3533                 aux.tp_mac = 0;
3534                 aux.tp_net = skb_network_offset(skb);
3535                 if (skb_vlan_tag_present(skb)) {
3536                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3537                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3538                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3539                 } else {
3540                         aux.tp_vlan_tci = 0;
3541                         aux.tp_vlan_tpid = 0;
3542                 }
3543                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3544         }
3545
3546         /*
3547          *      Free or return the buffer as appropriate. Again this
3548          *      hides all the races and re-entrancy issues from us.
3549          */
3550         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3551
3552 out_free:
3553         skb_free_datagram(sk, skb);
3554 out:
3555         return err;
3556 }
3557
3558 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3559                                int peer)
3560 {
3561         struct net_device *dev;
3562         struct sock *sk = sock->sk;
3563
3564         if (peer)
3565                 return -EOPNOTSUPP;
3566
3567         uaddr->sa_family = AF_PACKET;
3568         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
3569         rcu_read_lock();
3570         dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3571         if (dev)
3572                 strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
3573         rcu_read_unlock();
3574
3575         return sizeof(*uaddr);
3576 }
3577
3578 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3579                           int peer)
3580 {
3581         struct net_device *dev;
3582         struct sock *sk = sock->sk;
3583         struct packet_sock *po = pkt_sk(sk);
3584         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3585         int ifindex;
3586
3587         if (peer)
3588                 return -EOPNOTSUPP;
3589
3590         ifindex = READ_ONCE(po->ifindex);
3591         sll->sll_family = AF_PACKET;
3592         sll->sll_ifindex = ifindex;
3593         sll->sll_protocol = READ_ONCE(po->num);
3594         sll->sll_pkttype = 0;
3595         rcu_read_lock();
3596         dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3597         if (dev) {
3598                 sll->sll_hatype = dev->type;
3599                 sll->sll_halen = dev->addr_len;
3600                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3601         } else {
3602                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3603                 sll->sll_halen = 0;
3604         }
3605         rcu_read_unlock();
3606
3607         return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3608 }
3609
3610 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3611                          int what)
3612 {
3613         switch (i->type) {
3614         case PACKET_MR_MULTICAST:
3615                 if (i->alen != dev->addr_len)
3616                         return -EINVAL;
3617                 if (what > 0)
3618                         return dev_mc_add(dev, i->addr);
3619                 else
3620                         return dev_mc_del(dev, i->addr);
3621                 break;
3622         case PACKET_MR_PROMISC:
3623                 return dev_set_promiscuity(dev, what);
3624         case PACKET_MR_ALLMULTI:
3625                 return dev_set_allmulti(dev, what);
3626         case PACKET_MR_UNICAST:
3627                 if (i->alen != dev->addr_len)
3628                         return -EINVAL;
3629                 if (what > 0)
3630                         return dev_uc_add(dev, i->addr);
3631                 else
3632                         return dev_uc_del(dev, i->addr);
3633                 break;
3634         default:
3635                 break;
3636         }
3637         return 0;
3638 }
3639
3640 static void packet_dev_mclist_delete(struct net_device *dev,
3641                                      struct packet_mclist **mlp)
3642 {
3643         struct packet_mclist *ml;
3644
3645         while ((ml = *mlp) != NULL) {
3646                 if (ml->ifindex == dev->ifindex) {
3647                         packet_dev_mc(dev, ml, -1);
3648                         *mlp = ml->next;
3649                         kfree(ml);
3650                 } else
3651                         mlp = &ml->next;
3652         }
3653 }
3654
3655 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3656 {
3657         struct packet_sock *po = pkt_sk(sk);
3658         struct packet_mclist *ml, *i;
3659         struct net_device *dev;
3660         int err;
3661
3662         rtnl_lock();
3663
3664         err = -ENODEV;
3665         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3666         if (!dev)
3667                 goto done;
3668
3669         err = -EINVAL;
3670         if (mreq->mr_alen > dev->addr_len)
3671                 goto done;
3672
3673         err = -ENOBUFS;
3674         i = kmalloc(sizeof(*i), GFP_KERNEL);
3675         if (i == NULL)
3676                 goto done;
3677
3678         err = 0;
3679         for (ml = po->mclist; ml; ml = ml->next) {
3680                 if (ml->ifindex == mreq->mr_ifindex &&
3681                     ml->type == mreq->mr_type &&
3682                     ml->alen == mreq->mr_alen &&
3683                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3684                         ml->count++;
3685                         /* Free the new element ... */
3686                         kfree(i);
3687                         goto done;
3688                 }
3689         }
3690
3691         i->type = mreq->mr_type;
3692         i->ifindex = mreq->mr_ifindex;
3693         i->alen = mreq->mr_alen;
3694         memcpy(i->addr, mreq->mr_address, i->alen);
3695         memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3696         i->count = 1;
3697         i->next = po->mclist;
3698         po->mclist = i;
3699         err = packet_dev_mc(dev, i, 1);
3700         if (err) {
3701                 po->mclist = i->next;
3702                 kfree(i);
3703         }
3704
3705 done:
3706         rtnl_unlock();
3707         return err;
3708 }
3709
3710 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3711 {
3712         struct packet_mclist *ml, **mlp;
3713
3714         rtnl_lock();
3715
3716         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3717                 if (ml->ifindex == mreq->mr_ifindex &&
3718                     ml->type == mreq->mr_type &&
3719                     ml->alen == mreq->mr_alen &&
3720                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3721                         if (--ml->count == 0) {
3722                                 struct net_device *dev;
3723                                 *mlp = ml->next;
3724                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3725                                 if (dev)
3726                                         packet_dev_mc(dev, ml, -1);
3727                                 kfree(ml);
3728                         }
3729                         break;
3730                 }
3731         }
3732         rtnl_unlock();
3733         return 0;
3734 }
3735
3736 static void packet_flush_mclist(struct sock *sk)
3737 {
3738         struct packet_sock *po = pkt_sk(sk);
3739         struct packet_mclist *ml;
3740
3741         if (!po->mclist)
3742                 return;
3743
3744         rtnl_lock();
3745         while ((ml = po->mclist) != NULL) {
3746                 struct net_device *dev;
3747
3748                 po->mclist = ml->next;
3749                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3750                 if (dev != NULL)
3751                         packet_dev_mc(dev, ml, -1);
3752                 kfree(ml);
3753         }
3754         rtnl_unlock();
3755 }
3756
3757 static int
3758 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3759                   unsigned int optlen)
3760 {
3761         struct sock *sk = sock->sk;
3762         struct packet_sock *po = pkt_sk(sk);
3763         int ret;
3764
3765         if (level != SOL_PACKET)
3766                 return -ENOPROTOOPT;
3767
3768         switch (optname) {
3769         case PACKET_ADD_MEMBERSHIP:
3770         case PACKET_DROP_MEMBERSHIP:
3771         {
3772                 struct packet_mreq_max mreq;
3773                 int len = optlen;
3774                 memset(&mreq, 0, sizeof(mreq));
3775                 if (len < sizeof(struct packet_mreq))
3776                         return -EINVAL;
3777                 if (len > sizeof(mreq))
3778                         len = sizeof(mreq);
3779                 if (copy_from_sockptr(&mreq, optval, len))
3780                         return -EFAULT;
3781                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3782                         return -EINVAL;
3783                 if (optname == PACKET_ADD_MEMBERSHIP)
3784                         ret = packet_mc_add(sk, &mreq);
3785                 else
3786                         ret = packet_mc_drop(sk, &mreq);
3787                 return ret;
3788         }
3789
3790         case PACKET_RX_RING:
3791         case PACKET_TX_RING:
3792         {
3793                 union tpacket_req_u req_u;
3794                 int len;
3795
3796                 lock_sock(sk);
3797                 switch (po->tp_version) {
3798                 case TPACKET_V1:
3799                 case TPACKET_V2:
3800                         len = sizeof(req_u.req);
3801                         break;
3802                 case TPACKET_V3:
3803                 default:
3804                         len = sizeof(req_u.req3);
3805                         break;
3806                 }
3807                 if (optlen < len) {
3808                         ret = -EINVAL;
3809                 } else {
3810                         if (copy_from_sockptr(&req_u.req, optval, len))
3811                                 ret = -EFAULT;
3812                         else
3813                                 ret = packet_set_ring(sk, &req_u, 0,
3814                                                     optname == PACKET_TX_RING);
3815                 }
3816                 release_sock(sk);
3817                 return ret;
3818         }
3819         case PACKET_COPY_THRESH:
3820         {
3821                 int val;
3822
3823                 if (optlen != sizeof(val))
3824                         return -EINVAL;
3825                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3826                         return -EFAULT;
3827
3828                 pkt_sk(sk)->copy_thresh = val;
3829                 return 0;
3830         }
3831         case PACKET_VERSION:
3832         {
3833                 int val;
3834
3835                 if (optlen != sizeof(val))
3836                         return -EINVAL;
3837                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3838                         return -EFAULT;
3839                 switch (val) {
3840                 case TPACKET_V1:
3841                 case TPACKET_V2:
3842                 case TPACKET_V3:
3843                         break;
3844                 default:
3845                         return -EINVAL;
3846                 }
3847                 lock_sock(sk);
3848                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3849                         ret = -EBUSY;
3850                 } else {
3851                         po->tp_version = val;
3852                         ret = 0;
3853                 }
3854                 release_sock(sk);
3855                 return ret;
3856         }
3857         case PACKET_RESERVE:
3858         {
3859                 unsigned int val;
3860
3861                 if (optlen != sizeof(val))
3862                         return -EINVAL;
3863                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3864                         return -EFAULT;
3865                 if (val > INT_MAX)
3866                         return -EINVAL;
3867                 lock_sock(sk);
3868                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3869                         ret = -EBUSY;
3870                 } else {
3871                         po->tp_reserve = val;
3872                         ret = 0;
3873                 }
3874                 release_sock(sk);
3875                 return ret;
3876         }
3877         case PACKET_LOSS:
3878         {
3879                 unsigned int val;
3880
3881                 if (optlen != sizeof(val))
3882                         return -EINVAL;
3883                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3884                         return -EFAULT;
3885
3886                 lock_sock(sk);
3887                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3888                         ret = -EBUSY;
3889                 } else {
3890                         packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3891                         ret = 0;
3892                 }
3893                 release_sock(sk);
3894                 return ret;
3895         }
3896         case PACKET_AUXDATA:
3897         {
3898                 int val;
3899
3900                 if (optlen < sizeof(val))
3901                         return -EINVAL;
3902                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3903                         return -EFAULT;
3904
3905                 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
3906                 return 0;
3907         }
3908         case PACKET_ORIGDEV:
3909         {
3910                 int val;
3911
3912                 if (optlen < sizeof(val))
3913                         return -EINVAL;
3914                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3915                         return -EFAULT;
3916
3917                 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
3918                 return 0;
3919         }
3920         case PACKET_VNET_HDR:
3921         {
3922                 int val;
3923
3924                 if (sock->type != SOCK_RAW)
3925                         return -EINVAL;
3926                 if (optlen < sizeof(val))
3927                         return -EINVAL;
3928                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3929                         return -EFAULT;
3930
3931                 lock_sock(sk);
3932                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3933                         ret = -EBUSY;
3934                 } else {
3935                         packet_sock_flag_set(po, PACKET_SOCK_HAS_VNET_HDR, val);
3936                         ret = 0;
3937                 }
3938                 release_sock(sk);
3939                 return ret;
3940         }
3941         case PACKET_TIMESTAMP:
3942         {
3943                 int val;
3944
3945                 if (optlen != sizeof(val))
3946                         return -EINVAL;
3947                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3948                         return -EFAULT;
3949
3950                 WRITE_ONCE(po->tp_tstamp, val);
3951                 return 0;
3952         }
3953         case PACKET_FANOUT:
3954         {
3955                 struct fanout_args args = { 0 };
3956
3957                 if (optlen != sizeof(int) && optlen != sizeof(args))
3958                         return -EINVAL;
3959                 if (copy_from_sockptr(&args, optval, optlen))
3960                         return -EFAULT;
3961
3962                 return fanout_add(sk, &args);
3963         }
3964         case PACKET_FANOUT_DATA:
3965         {
3966                 /* Paired with the WRITE_ONCE() in fanout_add() */
3967                 if (!READ_ONCE(po->fanout))
3968                         return -EINVAL;
3969
3970                 return fanout_set_data(po, optval, optlen);
3971         }
3972         case PACKET_IGNORE_OUTGOING:
3973         {
3974                 int val;
3975
3976                 if (optlen != sizeof(val))
3977                         return -EINVAL;
3978                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3979                         return -EFAULT;
3980                 if (val < 0 || val > 1)
3981                         return -EINVAL;
3982
3983                 po->prot_hook.ignore_outgoing = !!val;
3984                 return 0;
3985         }
3986         case PACKET_TX_HAS_OFF:
3987         {
3988                 unsigned int val;
3989
3990                 if (optlen != sizeof(val))
3991                         return -EINVAL;
3992                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3993                         return -EFAULT;
3994
3995                 lock_sock(sk);
3996                 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
3997                         packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
3998
3999                 release_sock(sk);
4000                 return 0;
4001         }
4002         case PACKET_QDISC_BYPASS:
4003         {
4004                 int val;
4005
4006                 if (optlen != sizeof(val))
4007                         return -EINVAL;
4008                 if (copy_from_sockptr(&val, optval, sizeof(val)))
4009                         return -EFAULT;
4010
4011                 /* Paired with all lockless reads of po->xmit */
4012                 WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
4013                 return 0;
4014         }
4015         default:
4016                 return -ENOPROTOOPT;
4017         }
4018 }
4019
4020 static int packet_getsockopt(struct socket *sock, int level, int optname,
4021                              char __user *optval, int __user *optlen)
4022 {
4023         int len;
4024         int val, lv = sizeof(val);
4025         struct sock *sk = sock->sk;
4026         struct packet_sock *po = pkt_sk(sk);
4027         void *data = &val;
4028         union tpacket_stats_u st;
4029         struct tpacket_rollover_stats rstats;
4030         int drops;
4031
4032         if (level != SOL_PACKET)
4033                 return -ENOPROTOOPT;
4034
4035         if (get_user(len, optlen))
4036                 return -EFAULT;
4037
4038         if (len < 0)
4039                 return -EINVAL;
4040
4041         switch (optname) {
4042         case PACKET_STATISTICS:
4043                 spin_lock_bh(&sk->sk_receive_queue.lock);
4044                 memcpy(&st, &po->stats, sizeof(st));
4045                 memset(&po->stats, 0, sizeof(po->stats));
4046                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4047                 drops = atomic_xchg(&po->tp_drops, 0);
4048
4049                 if (po->tp_version == TPACKET_V3) {
4050                         lv = sizeof(struct tpacket_stats_v3);
4051                         st.stats3.tp_drops = drops;
4052                         st.stats3.tp_packets += drops;
4053                         data = &st.stats3;
4054                 } else {
4055                         lv = sizeof(struct tpacket_stats);
4056                         st.stats1.tp_drops = drops;
4057                         st.stats1.tp_packets += drops;
4058                         data = &st.stats1;
4059                 }
4060
4061                 break;
4062         case PACKET_AUXDATA:
4063                 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4064                 break;
4065         case PACKET_ORIGDEV:
4066                 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4067                 break;
4068         case PACKET_VNET_HDR:
4069                 val = packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR);
4070                 break;
4071         case PACKET_VERSION:
4072                 val = po->tp_version;
4073                 break;
4074         case PACKET_HDRLEN:
4075                 if (len > sizeof(int))
4076                         len = sizeof(int);
4077                 if (len < sizeof(int))
4078                         return -EINVAL;
4079                 if (copy_from_user(&val, optval, len))
4080                         return -EFAULT;
4081                 switch (val) {
4082                 case TPACKET_V1:
4083                         val = sizeof(struct tpacket_hdr);
4084                         break;
4085                 case TPACKET_V2:
4086                         val = sizeof(struct tpacket2_hdr);
4087                         break;
4088                 case TPACKET_V3:
4089                         val = sizeof(struct tpacket3_hdr);
4090                         break;
4091                 default:
4092                         return -EINVAL;
4093                 }
4094                 break;
4095         case PACKET_RESERVE:
4096                 val = po->tp_reserve;
4097                 break;
4098         case PACKET_LOSS:
4099                 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4100                 break;
4101         case PACKET_TIMESTAMP:
4102                 val = READ_ONCE(po->tp_tstamp);
4103                 break;
4104         case PACKET_FANOUT:
4105                 val = (po->fanout ?
4106                        ((u32)po->fanout->id |
4107                         ((u32)po->fanout->type << 16) |
4108                         ((u32)po->fanout->flags << 24)) :
4109                        0);
4110                 break;
4111         case PACKET_IGNORE_OUTGOING:
4112                 val = po->prot_hook.ignore_outgoing;
4113                 break;
4114         case PACKET_ROLLOVER_STATS:
4115                 if (!po->rollover)
4116                         return -EINVAL;
4117                 rstats.tp_all = atomic_long_read(&po->rollover->num);
4118                 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4119                 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4120                 data = &rstats;
4121                 lv = sizeof(rstats);
4122                 break;
4123         case PACKET_TX_HAS_OFF:
4124                 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4125                 break;
4126         case PACKET_QDISC_BYPASS:
4127                 val = packet_use_direct_xmit(po);
4128                 break;
4129         default:
4130                 return -ENOPROTOOPT;
4131         }
4132
4133         if (len > lv)
4134                 len = lv;
4135         if (put_user(len, optlen))
4136                 return -EFAULT;
4137         if (copy_to_user(optval, data, len))
4138                 return -EFAULT;
4139         return 0;
4140 }
4141
4142 static int packet_notifier(struct notifier_block *this,
4143                            unsigned long msg, void *ptr)
4144 {
4145         struct sock *sk;
4146         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4147         struct net *net = dev_net(dev);
4148
4149         rcu_read_lock();
4150         sk_for_each_rcu(sk, &net->packet.sklist) {
4151                 struct packet_sock *po = pkt_sk(sk);
4152
4153                 switch (msg) {
4154                 case NETDEV_UNREGISTER:
4155                         if (po->mclist)
4156                                 packet_dev_mclist_delete(dev, &po->mclist);
4157                         fallthrough;
4158
4159                 case NETDEV_DOWN:
4160                         if (dev->ifindex == po->ifindex) {
4161                                 spin_lock(&po->bind_lock);
4162                                 if (po->running) {
4163                                         __unregister_prot_hook(sk, false);
4164                                         sk->sk_err = ENETDOWN;
4165                                         if (!sock_flag(sk, SOCK_DEAD))
4166                                                 sk_error_report(sk);
4167                                 }
4168                                 if (msg == NETDEV_UNREGISTER) {
4169                                         packet_cached_dev_reset(po);
4170                                         WRITE_ONCE(po->ifindex, -1);
4171                                         netdev_put(po->prot_hook.dev,
4172                                                    &po->prot_hook.dev_tracker);
4173                                         po->prot_hook.dev = NULL;
4174                                 }
4175                                 spin_unlock(&po->bind_lock);
4176                         }
4177                         break;
4178                 case NETDEV_UP:
4179                         if (dev->ifindex == po->ifindex) {
4180                                 spin_lock(&po->bind_lock);
4181                                 if (po->num)
4182                                         register_prot_hook(sk);
4183                                 spin_unlock(&po->bind_lock);
4184                         }
4185                         break;
4186                 }
4187         }
4188         rcu_read_unlock();
4189         return NOTIFY_DONE;
4190 }
4191
4192
4193 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4194                         unsigned long arg)
4195 {
4196         struct sock *sk = sock->sk;
4197
4198         switch (cmd) {
4199         case SIOCOUTQ:
4200         {
4201                 int amount = sk_wmem_alloc_get(sk);
4202
4203                 return put_user(amount, (int __user *)arg);
4204         }
4205         case SIOCINQ:
4206         {
4207                 struct sk_buff *skb;
4208                 int amount = 0;
4209
4210                 spin_lock_bh(&sk->sk_receive_queue.lock);
4211                 skb = skb_peek(&sk->sk_receive_queue);
4212                 if (skb)
4213                         amount = skb->len;
4214                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4215                 return put_user(amount, (int __user *)arg);
4216         }
4217 #ifdef CONFIG_INET
4218         case SIOCADDRT:
4219         case SIOCDELRT:
4220         case SIOCDARP:
4221         case SIOCGARP:
4222         case SIOCSARP:
4223         case SIOCGIFADDR:
4224         case SIOCSIFADDR:
4225         case SIOCGIFBRDADDR:
4226         case SIOCSIFBRDADDR:
4227         case SIOCGIFNETMASK:
4228         case SIOCSIFNETMASK:
4229         case SIOCGIFDSTADDR:
4230         case SIOCSIFDSTADDR:
4231         case SIOCSIFFLAGS:
4232                 return inet_dgram_ops.ioctl(sock, cmd, arg);
4233 #endif
4234
4235         default:
4236                 return -ENOIOCTLCMD;
4237         }
4238         return 0;
4239 }
4240
4241 static __poll_t packet_poll(struct file *file, struct socket *sock,
4242                                 poll_table *wait)
4243 {
4244         struct sock *sk = sock->sk;
4245         struct packet_sock *po = pkt_sk(sk);
4246         __poll_t mask = datagram_poll(file, sock, wait);
4247
4248         spin_lock_bh(&sk->sk_receive_queue.lock);
4249         if (po->rx_ring.pg_vec) {
4250                 if (!packet_previous_rx_frame(po, &po->rx_ring,
4251                         TP_STATUS_KERNEL))
4252                         mask |= EPOLLIN | EPOLLRDNORM;
4253         }
4254         packet_rcv_try_clear_pressure(po);
4255         spin_unlock_bh(&sk->sk_receive_queue.lock);
4256         spin_lock_bh(&sk->sk_write_queue.lock);
4257         if (po->tx_ring.pg_vec) {
4258                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4259                         mask |= EPOLLOUT | EPOLLWRNORM;
4260         }
4261         spin_unlock_bh(&sk->sk_write_queue.lock);
4262         return mask;
4263 }
4264
4265
4266 /* Dirty? Well, I still did not learn better way to account
4267  * for user mmaps.
4268  */
4269
4270 static void packet_mm_open(struct vm_area_struct *vma)
4271 {
4272         struct file *file = vma->vm_file;
4273         struct socket *sock = file->private_data;
4274         struct sock *sk = sock->sk;
4275
4276         if (sk)
4277                 atomic_inc(&pkt_sk(sk)->mapped);
4278 }
4279
4280 static void packet_mm_close(struct vm_area_struct *vma)
4281 {
4282         struct file *file = vma->vm_file;
4283         struct socket *sock = file->private_data;
4284         struct sock *sk = sock->sk;
4285
4286         if (sk)
4287                 atomic_dec(&pkt_sk(sk)->mapped);
4288 }
4289
4290 static const struct vm_operations_struct packet_mmap_ops = {
4291         .open   =       packet_mm_open,
4292         .close  =       packet_mm_close,
4293 };
4294
4295 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4296                         unsigned int len)
4297 {
4298         int i;
4299
4300         for (i = 0; i < len; i++) {
4301                 if (likely(pg_vec[i].buffer)) {
4302                         if (is_vmalloc_addr(pg_vec[i].buffer))
4303                                 vfree(pg_vec[i].buffer);
4304                         else
4305                                 free_pages((unsigned long)pg_vec[i].buffer,
4306                                            order);
4307                         pg_vec[i].buffer = NULL;
4308                 }
4309         }
4310         kfree(pg_vec);
4311 }
4312
4313 static char *alloc_one_pg_vec_page(unsigned long order)
4314 {
4315         char *buffer;
4316         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4317                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4318
4319         buffer = (char *) __get_free_pages(gfp_flags, order);
4320         if (buffer)
4321                 return buffer;
4322
4323         /* __get_free_pages failed, fall back to vmalloc */
4324         buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4325         if (buffer)
4326                 return buffer;
4327
4328         /* vmalloc failed, lets dig into swap here */
4329         gfp_flags &= ~__GFP_NORETRY;
4330         buffer = (char *) __get_free_pages(gfp_flags, order);
4331         if (buffer)
4332                 return buffer;
4333
4334         /* complete and utter failure */
4335         return NULL;
4336 }
4337
4338 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4339 {
4340         unsigned int block_nr = req->tp_block_nr;
4341         struct pgv *pg_vec;
4342         int i;
4343
4344         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4345         if (unlikely(!pg_vec))
4346                 goto out;
4347
4348         for (i = 0; i < block_nr; i++) {
4349                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4350                 if (unlikely(!pg_vec[i].buffer))
4351                         goto out_free_pgvec;
4352         }
4353
4354 out:
4355         return pg_vec;
4356
4357 out_free_pgvec:
4358         free_pg_vec(pg_vec, order, block_nr);
4359         pg_vec = NULL;
4360         goto out;
4361 }
4362
4363 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4364                 int closing, int tx_ring)
4365 {
4366         struct pgv *pg_vec = NULL;
4367         struct packet_sock *po = pkt_sk(sk);
4368         unsigned long *rx_owner_map = NULL;
4369         int was_running, order = 0;
4370         struct packet_ring_buffer *rb;
4371         struct sk_buff_head *rb_queue;
4372         __be16 num;
4373         int err;
4374         /* Added to avoid minimal code churn */
4375         struct tpacket_req *req = &req_u->req;
4376
4377         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4378         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4379
4380         err = -EBUSY;
4381         if (!closing) {
4382                 if (atomic_read(&po->mapped))
4383                         goto out;
4384                 if (packet_read_pending(rb))
4385                         goto out;
4386         }
4387
4388         if (req->tp_block_nr) {
4389                 unsigned int min_frame_size;
4390
4391                 /* Sanity tests and some calculations */
4392                 err = -EBUSY;
4393                 if (unlikely(rb->pg_vec))
4394                         goto out;
4395
4396                 switch (po->tp_version) {
4397                 case TPACKET_V1:
4398                         po->tp_hdrlen = TPACKET_HDRLEN;
4399                         break;
4400                 case TPACKET_V2:
4401                         po->tp_hdrlen = TPACKET2_HDRLEN;
4402                         break;
4403                 case TPACKET_V3:
4404                         po->tp_hdrlen = TPACKET3_HDRLEN;
4405                         break;
4406                 }
4407
4408                 err = -EINVAL;
4409                 if (unlikely((int)req->tp_block_size <= 0))
4410                         goto out;
4411                 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4412                         goto out;
4413                 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4414                 if (po->tp_version >= TPACKET_V3 &&
4415                     req->tp_block_size <
4416                     BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4417                         goto out;
4418                 if (unlikely(req->tp_frame_size < min_frame_size))
4419                         goto out;
4420                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4421                         goto out;
4422
4423                 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4424                 if (unlikely(rb->frames_per_block == 0))
4425                         goto out;
4426                 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4427                         goto out;
4428                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4429                                         req->tp_frame_nr))
4430                         goto out;
4431
4432                 err = -ENOMEM;
4433                 order = get_order(req->tp_block_size);
4434                 pg_vec = alloc_pg_vec(req, order);
4435                 if (unlikely(!pg_vec))
4436                         goto out;
4437                 switch (po->tp_version) {
4438                 case TPACKET_V3:
4439                         /* Block transmit is not supported yet */
4440                         if (!tx_ring) {
4441                                 init_prb_bdqc(po, rb, pg_vec, req_u);
4442                         } else {
4443                                 struct tpacket_req3 *req3 = &req_u->req3;
4444
4445                                 if (req3->tp_retire_blk_tov ||
4446                                     req3->tp_sizeof_priv ||
4447                                     req3->tp_feature_req_word) {
4448                                         err = -EINVAL;
4449                                         goto out_free_pg_vec;
4450                                 }
4451                         }
4452                         break;
4453                 default:
4454                         if (!tx_ring) {
4455                                 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4456                                         GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4457                                 if (!rx_owner_map)
4458                                         goto out_free_pg_vec;
4459                         }
4460                         break;
4461                 }
4462         }
4463         /* Done */
4464         else {
4465                 err = -EINVAL;
4466                 if (unlikely(req->tp_frame_nr))
4467                         goto out;
4468         }
4469
4470
4471         /* Detach socket from network */
4472         spin_lock(&po->bind_lock);
4473         was_running = po->running;
4474         num = po->num;
4475         if (was_running) {
4476                 WRITE_ONCE(po->num, 0);
4477                 __unregister_prot_hook(sk, false);
4478         }
4479         spin_unlock(&po->bind_lock);
4480
4481         synchronize_net();
4482
4483         err = -EBUSY;
4484         mutex_lock(&po->pg_vec_lock);
4485         if (closing || atomic_read(&po->mapped) == 0) {
4486                 err = 0;
4487                 spin_lock_bh(&rb_queue->lock);
4488                 swap(rb->pg_vec, pg_vec);
4489                 if (po->tp_version <= TPACKET_V2)
4490                         swap(rb->rx_owner_map, rx_owner_map);
4491                 rb->frame_max = (req->tp_frame_nr - 1);
4492                 rb->head = 0;
4493                 rb->frame_size = req->tp_frame_size;
4494                 spin_unlock_bh(&rb_queue->lock);
4495
4496                 swap(rb->pg_vec_order, order);
4497                 swap(rb->pg_vec_len, req->tp_block_nr);
4498
4499                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4500                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4501                                                 tpacket_rcv : packet_rcv;
4502                 skb_queue_purge(rb_queue);
4503                 if (atomic_read(&po->mapped))
4504                         pr_err("packet_mmap: vma is busy: %d\n",
4505                                atomic_read(&po->mapped));
4506         }
4507         mutex_unlock(&po->pg_vec_lock);
4508
4509         spin_lock(&po->bind_lock);
4510         if (was_running) {
4511                 WRITE_ONCE(po->num, num);
4512                 register_prot_hook(sk);
4513         }
4514         spin_unlock(&po->bind_lock);
4515         if (pg_vec && (po->tp_version > TPACKET_V2)) {
4516                 /* Because we don't support block-based V3 on tx-ring */
4517                 if (!tx_ring)
4518                         prb_shutdown_retire_blk_timer(po, rb_queue);
4519         }
4520
4521 out_free_pg_vec:
4522         if (pg_vec) {
4523                 bitmap_free(rx_owner_map);
4524                 free_pg_vec(pg_vec, order, req->tp_block_nr);
4525         }
4526 out:
4527         return err;
4528 }
4529
4530 static int packet_mmap(struct file *file, struct socket *sock,
4531                 struct vm_area_struct *vma)
4532 {
4533         struct sock *sk = sock->sk;
4534         struct packet_sock *po = pkt_sk(sk);
4535         unsigned long size, expected_size;
4536         struct packet_ring_buffer *rb;
4537         unsigned long start;
4538         int err = -EINVAL;
4539         int i;
4540
4541         if (vma->vm_pgoff)
4542                 return -EINVAL;
4543
4544         mutex_lock(&po->pg_vec_lock);
4545
4546         expected_size = 0;
4547         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4548                 if (rb->pg_vec) {
4549                         expected_size += rb->pg_vec_len
4550                                                 * rb->pg_vec_pages
4551                                                 * PAGE_SIZE;
4552                 }
4553         }
4554
4555         if (expected_size == 0)
4556                 goto out;
4557
4558         size = vma->vm_end - vma->vm_start;
4559         if (size != expected_size)
4560                 goto out;
4561
4562         start = vma->vm_start;
4563         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4564                 if (rb->pg_vec == NULL)
4565                         continue;
4566
4567                 for (i = 0; i < rb->pg_vec_len; i++) {
4568                         struct page *page;
4569                         void *kaddr = rb->pg_vec[i].buffer;
4570                         int pg_num;
4571
4572                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4573                                 page = pgv_to_page(kaddr);
4574                                 err = vm_insert_page(vma, start, page);
4575                                 if (unlikely(err))
4576                                         goto out;
4577                                 start += PAGE_SIZE;
4578                                 kaddr += PAGE_SIZE;
4579                         }
4580                 }
4581         }
4582
4583         atomic_inc(&po->mapped);
4584         vma->vm_ops = &packet_mmap_ops;
4585         err = 0;
4586
4587 out:
4588         mutex_unlock(&po->pg_vec_lock);
4589         return err;
4590 }
4591
4592 static const struct proto_ops packet_ops_spkt = {
4593         .family =       PF_PACKET,
4594         .owner =        THIS_MODULE,
4595         .release =      packet_release,
4596         .bind =         packet_bind_spkt,
4597         .connect =      sock_no_connect,
4598         .socketpair =   sock_no_socketpair,
4599         .accept =       sock_no_accept,
4600         .getname =      packet_getname_spkt,
4601         .poll =         datagram_poll,
4602         .ioctl =        packet_ioctl,
4603         .gettstamp =    sock_gettstamp,
4604         .listen =       sock_no_listen,
4605         .shutdown =     sock_no_shutdown,
4606         .sendmsg =      packet_sendmsg_spkt,
4607         .recvmsg =      packet_recvmsg,
4608         .mmap =         sock_no_mmap,
4609         .sendpage =     sock_no_sendpage,
4610 };
4611
4612 static const struct proto_ops packet_ops = {
4613         .family =       PF_PACKET,
4614         .owner =        THIS_MODULE,
4615         .release =      packet_release,
4616         .bind =         packet_bind,
4617         .connect =      sock_no_connect,
4618         .socketpair =   sock_no_socketpair,
4619         .accept =       sock_no_accept,
4620         .getname =      packet_getname,
4621         .poll =         packet_poll,
4622         .ioctl =        packet_ioctl,
4623         .gettstamp =    sock_gettstamp,
4624         .listen =       sock_no_listen,
4625         .shutdown =     sock_no_shutdown,
4626         .setsockopt =   packet_setsockopt,
4627         .getsockopt =   packet_getsockopt,
4628         .sendmsg =      packet_sendmsg,
4629         .recvmsg =      packet_recvmsg,
4630         .mmap =         packet_mmap,
4631         .sendpage =     sock_no_sendpage,
4632 };
4633
4634 static const struct net_proto_family packet_family_ops = {
4635         .family =       PF_PACKET,
4636         .create =       packet_create,
4637         .owner  =       THIS_MODULE,
4638 };
4639
4640 static struct notifier_block packet_netdev_notifier = {
4641         .notifier_call =        packet_notifier,
4642 };
4643
4644 #ifdef CONFIG_PROC_FS
4645
4646 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4647         __acquires(RCU)
4648 {
4649         struct net *net = seq_file_net(seq);
4650
4651         rcu_read_lock();
4652         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4653 }
4654
4655 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4656 {
4657         struct net *net = seq_file_net(seq);
4658         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4659 }
4660
4661 static void packet_seq_stop(struct seq_file *seq, void *v)
4662         __releases(RCU)
4663 {
4664         rcu_read_unlock();
4665 }
4666
4667 static int packet_seq_show(struct seq_file *seq, void *v)
4668 {
4669         if (v == SEQ_START_TOKEN)
4670                 seq_printf(seq,
4671                            "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
4672                            IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4673         else {
4674                 struct sock *s = sk_entry(v);
4675                 const struct packet_sock *po = pkt_sk(s);
4676
4677                 seq_printf(seq,
4678                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4679                            s,
4680                            refcount_read(&s->sk_refcnt),
4681                            s->sk_type,
4682                            ntohs(READ_ONCE(po->num)),
4683                            READ_ONCE(po->ifindex),
4684                            po->running,
4685                            atomic_read(&s->sk_rmem_alloc),
4686                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4687                            sock_i_ino(s));
4688         }
4689
4690         return 0;
4691 }
4692
4693 static const struct seq_operations packet_seq_ops = {
4694         .start  = packet_seq_start,
4695         .next   = packet_seq_next,
4696         .stop   = packet_seq_stop,
4697         .show   = packet_seq_show,
4698 };
4699 #endif
4700
4701 static int __net_init packet_net_init(struct net *net)
4702 {
4703         mutex_init(&net->packet.sklist_lock);
4704         INIT_HLIST_HEAD(&net->packet.sklist);
4705
4706 #ifdef CONFIG_PROC_FS
4707         if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4708                         sizeof(struct seq_net_private)))
4709                 return -ENOMEM;
4710 #endif /* CONFIG_PROC_FS */
4711
4712         return 0;
4713 }
4714
4715 static void __net_exit packet_net_exit(struct net *net)
4716 {
4717         remove_proc_entry("packet", net->proc_net);
4718         WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4719 }
4720
4721 static struct pernet_operations packet_net_ops = {
4722         .init = packet_net_init,
4723         .exit = packet_net_exit,
4724 };
4725
4726
4727 static void __exit packet_exit(void)
4728 {
4729         sock_unregister(PF_PACKET);
4730         proto_unregister(&packet_proto);
4731         unregister_netdevice_notifier(&packet_netdev_notifier);
4732         unregister_pernet_subsys(&packet_net_ops);
4733 }
4734
4735 static int __init packet_init(void)
4736 {
4737         int rc;
4738
4739         rc = register_pernet_subsys(&packet_net_ops);
4740         if (rc)
4741                 goto out;
4742         rc = register_netdevice_notifier(&packet_netdev_notifier);
4743         if (rc)
4744                 goto out_pernet;
4745         rc = proto_register(&packet_proto, 0);
4746         if (rc)
4747                 goto out_notifier;
4748         rc = sock_register(&packet_family_ops);
4749         if (rc)
4750                 goto out_proto;
4751
4752         return 0;
4753
4754 out_proto:
4755         proto_unregister(&packet_proto);
4756 out_notifier:
4757         unregister_netdevice_notifier(&packet_netdev_notifier);
4758 out_pernet:
4759         unregister_pernet_subsys(&packet_net_ops);
4760 out:
4761         return rc;
4762 }
4763
4764 module_init(packet_init);
4765 module_exit(packet_exit);
4766 MODULE_LICENSE("GPL");
4767 MODULE_ALIAS_NETPROTO(PF_PACKET);